|
@@ -1563,3 +1563,148 @@ void arm_iommu_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
|
|
|
|
|
|
for_each_sg(sg, s, nents, i)
|
|
|
__dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir);
|
|
|
+
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * arm_iommu_sync_sg_for_device
|
|
|
+ * @dev: valid struct device pointer
|
|
|
+ * @sg: list of buffers
|
|
|
+ * @nents: number of buffers to map (returned from dma_map_sg)
|
|
|
+ * @dir: DMA transfer direction (same as was passed to dma_map_sg)
|
|
|
+ */
|
|
|
+void arm_iommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
|
|
|
+ int nents, enum dma_data_direction dir)
|
|
|
+{
|
|
|
+ struct scatterlist *s;
|
|
|
+ int i;
|
|
|
+
|
|
|
+ for_each_sg(sg, s, nents, i)
|
|
|
+ __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
|
|
|
+}
|
|
|
+
|
|
|
+
|
|
|
+/**
|
|
|
+ * arm_coherent_iommu_map_page
|
|
|
+ * @dev: valid struct device pointer
|
|
|
+ * @page: page that buffer resides in
|
|
|
+ * @offset: offset into page for start of buffer
|
|
|
+ * @size: size of buffer to map
|
|
|
+ * @dir: DMA transfer direction
|
|
|
+ *
|
|
|
+ * Coherent IOMMU aware version of arm_dma_map_page()
|
|
|
+ */
|
|
|
+static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *page,
|
|
|
+ unsigned long offset, size_t size, enum dma_data_direction dir,
|
|
|
+ struct dma_attrs *attrs)
|
|
|
+{
|
|
|
+ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
|
|
|
+ dma_addr_t dma_addr;
|
|
|
+ int ret, len = PAGE_ALIGN(size + offset);
|
|
|
+
|
|
|
+ dma_addr = __alloc_iova(mapping, len);
|
|
|
+ if (dma_addr == DMA_ERROR_CODE)
|
|
|
+ return dma_addr;
|
|
|
+
|
|
|
+ ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, 0);
|
|
|
+ if (ret < 0)
|
|
|
+ goto fail;
|
|
|
+
|
|
|
+ return dma_addr + offset;
|
|
|
+fail:
|
|
|
+ __free_iova(mapping, dma_addr, len);
|
|
|
+ return DMA_ERROR_CODE;
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * arm_iommu_map_page
|
|
|
+ * @dev: valid struct device pointer
|
|
|
+ * @page: page that buffer resides in
|
|
|
+ * @offset: offset into page for start of buffer
|
|
|
+ * @size: size of buffer to map
|
|
|
+ * @dir: DMA transfer direction
|
|
|
+ *
|
|
|
+ * IOMMU aware version of arm_dma_map_page()
|
|
|
+ */
|
|
|
+static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
|
|
|
+ unsigned long offset, size_t size, enum dma_data_direction dir,
|
|
|
+ struct dma_attrs *attrs)
|
|
|
+{
|
|
|
+ if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
|
|
|
+ __dma_page_cpu_to_dev(page, offset, size, dir);
|
|
|
+
|
|
|
+ return arm_coherent_iommu_map_page(dev, page, offset, size, dir, attrs);
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * arm_coherent_iommu_unmap_page
|
|
|
+ * @dev: valid struct device pointer
|
|
|
+ * @handle: DMA address of buffer
|
|
|
+ * @size: size of buffer (same as passed to dma_map_page)
|
|
|
+ * @dir: DMA transfer direction (same as passed to dma_map_page)
|
|
|
+ *
|
|
|
+ * Coherent IOMMU aware version of arm_dma_unmap_page()
|
|
|
+ */
|
|
|
+static void arm_coherent_iommu_unmap_page(struct device *dev, dma_addr_t handle,
|
|
|
+ size_t size, enum dma_data_direction dir,
|
|
|
+ struct dma_attrs *attrs)
|
|
|
+{
|
|
|
+ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
|
|
|
+ dma_addr_t iova = handle & PAGE_MASK;
|
|
|
+ int offset = handle & ~PAGE_MASK;
|
|
|
+ int len = PAGE_ALIGN(size + offset);
|
|
|
+
|
|
|
+ if (!iova)
|
|
|
+ return;
|
|
|
+
|
|
|
+ iommu_unmap(mapping->domain, iova, len);
|
|
|
+ __free_iova(mapping, iova, len);
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * arm_iommu_unmap_page
|
|
|
+ * @dev: valid struct device pointer
|
|
|
+ * @handle: DMA address of buffer
|
|
|
+ * @size: size of buffer (same as passed to dma_map_page)
|
|
|
+ * @dir: DMA transfer direction (same as passed to dma_map_page)
|
|
|
+ *
|
|
|
+ * IOMMU aware version of arm_dma_unmap_page()
|
|
|
+ */
|
|
|
+static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
|
|
|
+ size_t size, enum dma_data_direction dir,
|
|
|
+ struct dma_attrs *attrs)
|
|
|
+{
|
|
|
+ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
|
|
|
+ dma_addr_t iova = handle & PAGE_MASK;
|
|
|
+ struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
|
|
|
+ int offset = handle & ~PAGE_MASK;
|
|
|
+ int len = PAGE_ALIGN(size + offset);
|
|
|
+
|
|
|
+ if (!iova)
|
|
|
+ return;
|
|
|
+
|
|
|
+ if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
|
|
|
+ __dma_page_dev_to_cpu(page, offset, size, dir);
|
|
|
+
|
|
|
+ iommu_unmap(mapping->domain, iova, len);
|
|
|
+ __free_iova(mapping, iova, len);
|
|
|
+}
|
|
|
+
|
|
|
+static void arm_iommu_sync_single_for_cpu(struct device *dev,
|
|
|
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
|
|
|
+{
|
|
|
+ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
|
|
|
+ dma_addr_t iova = handle & PAGE_MASK;
|
|
|
+ struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
|
|
|
+ unsigned int offset = handle & ~PAGE_MASK;
|
|
|
+
|
|
|
+ if (!iova)
|
|
|
+ return;
|
|
|
+
|
|
|
+ __dma_page_dev_to_cpu(page, offset, size, dir);
|
|
|
+}
|
|
|
+
|
|
|
+static void arm_iommu_sync_single_for_device(struct device *dev,
|
|
|
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
|
|
|
+{
|
|
|
+ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
|