|
@@ -1028,3 +1028,185 @@ static inline void __free_iova(struct dma_iommu_mapping *mapping,
|
|
|
{
|
|
|
unsigned int start = (addr - mapping->base) >>
|
|
|
(mapping->order + PAGE_SHIFT);
|
|
|
+ unsigned int count = ((size >> PAGE_SHIFT) +
|
|
|
+ (1 << mapping->order) - 1) >> mapping->order;
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ spin_lock_irqsave(&mapping->lock, flags);
|
|
|
+ bitmap_clear(mapping->bitmap, start, count);
|
|
|
+ spin_unlock_irqrestore(&mapping->lock, flags);
|
|
|
+}
|
|
|
+
|
|
|
+static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
|
|
|
+ gfp_t gfp, struct dma_attrs *attrs)
|
|
|
+{
|
|
|
+ struct page **pages;
|
|
|
+ int count = size >> PAGE_SHIFT;
|
|
|
+ int array_size = count * sizeof(struct page *);
|
|
|
+ int i = 0;
|
|
|
+
|
|
|
+ if (array_size <= PAGE_SIZE)
|
|
|
+ pages = kzalloc(array_size, gfp);
|
|
|
+ else
|
|
|
+ pages = vzalloc(array_size);
|
|
|
+ if (!pages)
|
|
|
+ return NULL;
|
|
|
+
|
|
|
+ if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs))
|
|
|
+ {
|
|
|
+ unsigned long order = get_order(size);
|
|
|
+ struct page *page;
|
|
|
+
|
|
|
+ page = dma_alloc_from_contiguous(dev, count, order);
|
|
|
+ if (!page)
|
|
|
+ goto error;
|
|
|
+
|
|
|
+ __dma_clear_buffer(page, size);
|
|
|
+
|
|
|
+ for (i = 0; i < count; i++)
|
|
|
+ pages[i] = page + i;
|
|
|
+
|
|
|
+ return pages;
|
|
|
+ }
|
|
|
+
|
|
|
+ while (count) {
|
|
|
+ int j, order = __fls(count);
|
|
|
+
|
|
|
+ pages[i] = alloc_pages(gfp | __GFP_NOWARN, order);
|
|
|
+ while (!pages[i] && order)
|
|
|
+ pages[i] = alloc_pages(gfp | __GFP_NOWARN, --order);
|
|
|
+ if (!pages[i])
|
|
|
+ goto error;
|
|
|
+
|
|
|
+ if (order) {
|
|
|
+ split_page(pages[i], order);
|
|
|
+ j = 1 << order;
|
|
|
+ while (--j)
|
|
|
+ pages[i + j] = pages[i] + j;
|
|
|
+ }
|
|
|
+
|
|
|
+ __dma_clear_buffer(pages[i], PAGE_SIZE << order);
|
|
|
+ i += 1 << order;
|
|
|
+ count -= 1 << order;
|
|
|
+ }
|
|
|
+
|
|
|
+ return pages;
|
|
|
+error:
|
|
|
+ while (i--)
|
|
|
+ if (pages[i])
|
|
|
+ __free_pages(pages[i], 0);
|
|
|
+ if (array_size <= PAGE_SIZE)
|
|
|
+ kfree(pages);
|
|
|
+ else
|
|
|
+ vfree(pages);
|
|
|
+ return NULL;
|
|
|
+}
|
|
|
+
|
|
|
+static int __iommu_free_buffer(struct device *dev, struct page **pages,
|
|
|
+ size_t size, struct dma_attrs *attrs)
|
|
|
+{
|
|
|
+ int count = size >> PAGE_SHIFT;
|
|
|
+ int array_size = count * sizeof(struct page *);
|
|
|
+ int i;
|
|
|
+
|
|
|
+ if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) {
|
|
|
+ dma_release_from_contiguous(dev, pages[0], count);
|
|
|
+ } else {
|
|
|
+ for (i = 0; i < count; i++)
|
|
|
+ if (pages[i])
|
|
|
+ __free_pages(pages[i], 0);
|
|
|
+ }
|
|
|
+
|
|
|
+ if (array_size <= PAGE_SIZE)
|
|
|
+ kfree(pages);
|
|
|
+ else
|
|
|
+ vfree(pages);
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Create a CPU mapping for a specified pages
|
|
|
+ */
|
|
|
+static void *
|
|
|
+__iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot,
|
|
|
+ const void *caller)
|
|
|
+{
|
|
|
+ unsigned int i, nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
|
|
+ struct vm_struct *area;
|
|
|
+ unsigned long p;
|
|
|
+
|
|
|
+ area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP,
|
|
|
+ caller);
|
|
|
+ if (!area)
|
|
|
+ return NULL;
|
|
|
+
|
|
|
+ area->pages = pages;
|
|
|
+ area->nr_pages = nr_pages;
|
|
|
+ p = (unsigned long)area->addr;
|
|
|
+
|
|
|
+ for (i = 0; i < nr_pages; i++) {
|
|
|
+ phys_addr_t phys = __pfn_to_phys(page_to_pfn(pages[i]));
|
|
|
+ if (ioremap_page_range(p, p + PAGE_SIZE, phys, prot))
|
|
|
+ goto err;
|
|
|
+ p += PAGE_SIZE;
|
|
|
+ }
|
|
|
+ return area->addr;
|
|
|
+err:
|
|
|
+ unmap_kernel_range((unsigned long)area->addr, size);
|
|
|
+ vunmap(area->addr);
|
|
|
+ return NULL;
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Create a mapping in device IO address space for specified pages
|
|
|
+ */
|
|
|
+static dma_addr_t
|
|
|
+__iommu_create_mapping(struct device *dev, struct page **pages, size_t size)
|
|
|
+{
|
|
|
+ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
|
|
|
+ unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
|
|
+ dma_addr_t dma_addr, iova;
|
|
|
+ int i, ret = DMA_ERROR_CODE;
|
|
|
+
|
|
|
+ dma_addr = __alloc_iova(mapping, size);
|
|
|
+ if (dma_addr == DMA_ERROR_CODE)
|
|
|
+ return dma_addr;
|
|
|
+
|
|
|
+ iova = dma_addr;
|
|
|
+ for (i = 0; i < count; ) {
|
|
|
+ unsigned int next_pfn = page_to_pfn(pages[i]) + 1;
|
|
|
+ phys_addr_t phys = page_to_phys(pages[i]);
|
|
|
+ unsigned int len, j;
|
|
|
+
|
|
|
+ for (j = i + 1; j < count; j++, next_pfn++)
|
|
|
+ if (page_to_pfn(pages[j]) != next_pfn)
|
|
|
+ break;
|
|
|
+
|
|
|
+ len = (j - i) << PAGE_SHIFT;
|
|
|
+ ret = iommu_map(mapping->domain, iova, phys, len, 0);
|
|
|
+ if (ret < 0)
|
|
|
+ goto fail;
|
|
|
+ iova += len;
|
|
|
+ i = j;
|
|
|
+ }
|
|
|
+ return dma_addr;
|
|
|
+fail:
|
|
|
+ iommu_unmap(mapping->domain, dma_addr, iova-dma_addr);
|
|
|
+ __free_iova(mapping, dma_addr, size);
|
|
|
+ return DMA_ERROR_CODE;
|
|
|
+}
|
|
|
+
|
|
|
+static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size)
|
|
|
+{
|
|
|
+ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * add optional in-page offset from iova to size and align
|
|
|
+ * result to page size
|
|
|
+ */
|
|
|
+ size = PAGE_ALIGN((iova & ~PAGE_MASK) + size);
|
|
|
+ iova &= PAGE_MASK;
|
|
|
+
|
|
|
+ iommu_unmap(mapping->domain, iova, size);
|
|
|
+ __free_iova(mapping, iova, size);
|
|
|
+ return 0;
|