|
@@ -968,3 +968,63 @@ int dma_supported(struct device *dev, u64 mask)
|
|
|
{
|
|
|
if (mask < (u64)arm_dma_limit)
|
|
|
return 0;
|
|
|
+ return 1;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(dma_supported);
|
|
|
+
|
|
|
+int arm_dma_set_mask(struct device *dev, u64 dma_mask)
|
|
|
+{
|
|
|
+ if (!dev->dma_mask || !dma_supported(dev, dma_mask))
|
|
|
+ return -EIO;
|
|
|
+
|
|
|
+ *dev->dma_mask = dma_mask;
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+#define PREALLOC_DMA_DEBUG_ENTRIES 4096
|
|
|
+
|
|
|
+static int __init dma_debug_do_init(void)
|
|
|
+{
|
|
|
+ dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+fs_initcall(dma_debug_do_init);
|
|
|
+
|
|
|
+#ifdef CONFIG_ARM_DMA_USE_IOMMU
|
|
|
+
|
|
|
+/* IOMMU */
|
|
|
+
|
|
|
+static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
|
|
|
+ size_t size)
|
|
|
+{
|
|
|
+ unsigned int order = get_order(size);
|
|
|
+ unsigned int align = 0;
|
|
|
+ unsigned int count, start;
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ count = ((PAGE_ALIGN(size) >> PAGE_SHIFT) +
|
|
|
+ (1 << mapping->order) - 1) >> mapping->order;
|
|
|
+
|
|
|
+ if (order > mapping->order)
|
|
|
+ align = (1 << (order - mapping->order)) - 1;
|
|
|
+
|
|
|
+ spin_lock_irqsave(&mapping->lock, flags);
|
|
|
+ start = bitmap_find_next_zero_area(mapping->bitmap, mapping->bits, 0,
|
|
|
+ count, align);
|
|
|
+ if (start > mapping->bits) {
|
|
|
+ spin_unlock_irqrestore(&mapping->lock, flags);
|
|
|
+ return DMA_ERROR_CODE;
|
|
|
+ }
|
|
|
+
|
|
|
+ bitmap_set(mapping->bitmap, start, count);
|
|
|
+ spin_unlock_irqrestore(&mapping->lock, flags);
|
|
|
+
|
|
|
+ return mapping->base + (start << (mapping->order + PAGE_SHIFT));
|
|
|
+}
|
|
|
+
|
|
|
+static inline void __free_iova(struct dma_iommu_mapping *mapping,
|
|
|
+ dma_addr_t addr, size_t size)
|
|
|
+{
|
|
|
+ unsigned int start = (addr - mapping->base) >>
|
|
|
+ (mapping->order + PAGE_SHIFT);
|