|
@@ -1210,3 +1210,192 @@ static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t si
|
|
|
iommu_unmap(mapping->domain, iova, size);
|
|
|
__free_iova(mapping, iova, size);
|
|
|
return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static struct page **__atomic_get_pages(void *addr)
|
|
|
+{
|
|
|
+ struct dma_pool *pool = &atomic_pool;
|
|
|
+ struct page **pages = pool->pages;
|
|
|
+ int offs = (addr - pool->vaddr) >> PAGE_SHIFT;
|
|
|
+
|
|
|
+ return pages + offs;
|
|
|
+}
|
|
|
+
|
|
|
+static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs)
|
|
|
+{
|
|
|
+ struct vm_struct *area;
|
|
|
+
|
|
|
+ if (__in_atomic_pool(cpu_addr, PAGE_SIZE))
|
|
|
+ return __atomic_get_pages(cpu_addr);
|
|
|
+
|
|
|
+ if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs))
|
|
|
+ return cpu_addr;
|
|
|
+
|
|
|
+ area = find_vm_area(cpu_addr);
|
|
|
+ if (area && (area->flags & VM_ARM_DMA_CONSISTENT))
|
|
|
+ return area->pages;
|
|
|
+ return NULL;
|
|
|
+}
|
|
|
+
|
|
|
+static void *__iommu_alloc_atomic(struct device *dev, size_t size,
|
|
|
+ dma_addr_t *handle)
|
|
|
+{
|
|
|
+ struct page *page;
|
|
|
+ void *addr;
|
|
|
+
|
|
|
+ addr = __alloc_from_pool(size, &page);
|
|
|
+ if (!addr)
|
|
|
+ return NULL;
|
|
|
+
|
|
|
+ *handle = __iommu_create_mapping(dev, &page, size);
|
|
|
+ if (*handle == DMA_ERROR_CODE)
|
|
|
+ goto err_mapping;
|
|
|
+
|
|
|
+ return addr;
|
|
|
+
|
|
|
+err_mapping:
|
|
|
+ __free_from_pool(addr, size);
|
|
|
+ return NULL;
|
|
|
+}
|
|
|
+
|
|
|
+static void __iommu_free_atomic(struct device *dev, struct page **pages,
|
|
|
+ dma_addr_t handle, size_t size)
|
|
|
+{
|
|
|
+ __iommu_remove_mapping(dev, handle, size);
|
|
|
+ __free_from_pool(page_address(pages[0]), size);
|
|
|
+}
|
|
|
+
|
|
|
+static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
|
|
|
+ dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
|
|
|
+{
|
|
|
+ pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel);
|
|
|
+ struct page **pages;
|
|
|
+ void *addr = NULL;
|
|
|
+
|
|
|
+ *handle = DMA_ERROR_CODE;
|
|
|
+ size = PAGE_ALIGN(size);
|
|
|
+
|
|
|
+ if (gfp & GFP_ATOMIC)
|
|
|
+ return __iommu_alloc_atomic(dev, size, handle);
|
|
|
+
|
|
|
+ pages = __iommu_alloc_buffer(dev, size, gfp, attrs);
|
|
|
+ if (!pages)
|
|
|
+ return NULL;
|
|
|
+
|
|
|
+ *handle = __iommu_create_mapping(dev, pages, size);
|
|
|
+ if (*handle == DMA_ERROR_CODE)
|
|
|
+ goto err_buffer;
|
|
|
+
|
|
|
+ if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs))
|
|
|
+ return pages;
|
|
|
+
|
|
|
+ addr = __iommu_alloc_remap(pages, size, gfp, prot,
|
|
|
+ __builtin_return_address(0));
|
|
|
+ if (!addr)
|
|
|
+ goto err_mapping;
|
|
|
+
|
|
|
+ return addr;
|
|
|
+
|
|
|
+err_mapping:
|
|
|
+ __iommu_remove_mapping(dev, *handle, size);
|
|
|
+err_buffer:
|
|
|
+ __iommu_free_buffer(dev, pages, size, attrs);
|
|
|
+ return NULL;
|
|
|
+}
|
|
|
+
|
|
|
+static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
|
|
|
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
|
|
|
+ struct dma_attrs *attrs)
|
|
|
+{
|
|
|
+ unsigned long uaddr = vma->vm_start;
|
|
|
+ unsigned long usize = vma->vm_end - vma->vm_start;
|
|
|
+ struct page **pages = __iommu_get_pages(cpu_addr, attrs);
|
|
|
+
|
|
|
+ vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
|
|
|
+
|
|
|
+ if (!pages)
|
|
|
+ return -ENXIO;
|
|
|
+
|
|
|
+ do {
|
|
|
+ int ret = vm_insert_page(vma, uaddr, *pages++);
|
|
|
+ if (ret) {
|
|
|
+ pr_err("Remapping memory failed: %d\n", ret);
|
|
|
+ return ret;
|
|
|
+ }
|
|
|
+ uaddr += PAGE_SIZE;
|
|
|
+ usize -= PAGE_SIZE;
|
|
|
+ } while (usize > 0);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * free a page as defined by the above mapping.
|
|
|
+ * Must not be called with IRQs disabled.
|
|
|
+ */
|
|
|
+void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
|
|
|
+ dma_addr_t handle, struct dma_attrs *attrs)
|
|
|
+{
|
|
|
+ struct page **pages = __iommu_get_pages(cpu_addr, attrs);
|
|
|
+ size = PAGE_ALIGN(size);
|
|
|
+
|
|
|
+ if (!pages) {
|
|
|
+ WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (__in_atomic_pool(cpu_addr, size)) {
|
|
|
+ __iommu_free_atomic(dev, pages, handle, size);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) {
|
|
|
+ unmap_kernel_range((unsigned long)cpu_addr, size);
|
|
|
+ vunmap(cpu_addr);
|
|
|
+ }
|
|
|
+
|
|
|
+ __iommu_remove_mapping(dev, handle, size);
|
|
|
+ __iommu_free_buffer(dev, pages, size, attrs);
|
|
|
+}
|
|
|
+
|
|
|
+static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
|
|
|
+ void *cpu_addr, dma_addr_t dma_addr,
|
|
|
+ size_t size, struct dma_attrs *attrs)
|
|
|
+{
|
|
|
+ unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
|
|
+ struct page **pages = __iommu_get_pages(cpu_addr, attrs);
|
|
|
+
|
|
|
+ if (!pages)
|
|
|
+ return -ENXIO;
|
|
|
+
|
|
|
+ return sg_alloc_table_from_pages(sgt, pages, count, 0, size,
|
|
|
+ GFP_KERNEL);
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Map a part of the scatter-gather list into contiguous io address space
|
|
|
+ */
|
|
|
+static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
|
|
|
+ size_t size, dma_addr_t *handle,
|
|
|
+ enum dma_data_direction dir, struct dma_attrs *attrs,
|
|
|
+ bool is_coherent)
|
|
|
+{
|
|
|
+ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
|
|
|
+ dma_addr_t iova, iova_base;
|
|
|
+ int ret = 0;
|
|
|
+ unsigned int count;
|
|
|
+ struct scatterlist *s;
|
|
|
+
|
|
|
+ size = PAGE_ALIGN(size);
|
|
|
+ *handle = DMA_ERROR_CODE;
|
|
|
+
|
|
|
+ iova_base = iova = __alloc_iova(mapping, size);
|
|
|
+ if (iova == DMA_ERROR_CODE)
|
|
|
+ return -ENOMEM;
|
|
|
+
|
|
|
+ for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) {
|
|
|
+ phys_addr_t phys = page_to_phys(sg_page(s));
|
|
|
+ unsigned int len = PAGE_ALIGN(s->offset + s->length);
|
|
|
+
|
|
|
+ if (!is_coherent &&
|
|
|
+ !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
|