|
@@ -1399,3 +1399,167 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
|
|
|
|
|
|
if (!is_coherent &&
|
|
if (!is_coherent &&
|
|
!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
|
|
!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
|
|
|
|
+ __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
|
|
|
|
+
|
|
|
|
+ ret = iommu_map(mapping->domain, iova, phys, len, 0);
|
|
|
|
+ if (ret < 0)
|
|
|
|
+ goto fail;
|
|
|
|
+ count += len >> PAGE_SHIFT;
|
|
|
|
+ iova += len;
|
|
|
|
+ }
|
|
|
|
+ *handle = iova_base;
|
|
|
|
+
|
|
|
|
+ return 0;
|
|
|
|
+fail:
|
|
|
|
+ iommu_unmap(mapping->domain, iova_base, count * PAGE_SIZE);
|
|
|
|
+ __free_iova(mapping, iova_base, size);
|
|
|
|
+ return ret;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
|
|
|
|
+ enum dma_data_direction dir, struct dma_attrs *attrs,
|
|
|
|
+ bool is_coherent)
|
|
|
|
+{
|
|
|
|
+ struct scatterlist *s = sg, *dma = sg, *start = sg;
|
|
|
|
+ int i, count = 0;
|
|
|
|
+ unsigned int offset = s->offset;
|
|
|
|
+ unsigned int size = s->offset + s->length;
|
|
|
|
+ unsigned int max = dma_get_max_seg_size(dev);
|
|
|
|
+
|
|
|
|
+ for (i = 1; i < nents; i++) {
|
|
|
|
+ s = sg_next(s);
|
|
|
|
+
|
|
|
|
+ s->dma_address = DMA_ERROR_CODE;
|
|
|
|
+ s->dma_length = 0;
|
|
|
|
+
|
|
|
|
+ if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) {
|
|
|
|
+ if (__map_sg_chunk(dev, start, size, &dma->dma_address,
|
|
|
|
+ dir, attrs, is_coherent) < 0)
|
|
|
|
+ goto bad_mapping;
|
|
|
|
+
|
|
|
|
+ dma->dma_address += offset;
|
|
|
|
+ dma->dma_length = size - offset;
|
|
|
|
+
|
|
|
|
+ size = offset = s->offset;
|
|
|
|
+ start = s;
|
|
|
|
+ dma = sg_next(dma);
|
|
|
|
+ count += 1;
|
|
|
|
+ }
|
|
|
|
+ size += s->length;
|
|
|
|
+ }
|
|
|
|
+ if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs,
|
|
|
|
+ is_coherent) < 0)
|
|
|
|
+ goto bad_mapping;
|
|
|
|
+
|
|
|
|
+ dma->dma_address += offset;
|
|
|
|
+ dma->dma_length = size - offset;
|
|
|
|
+
|
|
|
|
+ return count+1;
|
|
|
|
+
|
|
|
|
+bad_mapping:
|
|
|
|
+ for_each_sg(sg, s, count, i)
|
|
|
|
+ __iommu_remove_mapping(dev, sg_dma_address(s), sg_dma_len(s));
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/**
|
|
|
|
+ * arm_coherent_iommu_map_sg - map a set of SG buffers for streaming mode DMA
|
|
|
|
+ * @dev: valid struct device pointer
|
|
|
|
+ * @sg: list of buffers
|
|
|
|
+ * @nents: number of buffers to map
|
|
|
|
+ * @dir: DMA transfer direction
|
|
|
|
+ *
|
|
|
|
+ * Map a set of i/o coherent buffers described by scatterlist in streaming
|
|
|
|
+ * mode for DMA. The scatter gather list elements are merged together (if
|
|
|
|
+ * possible) and tagged with the appropriate dma address and length. They are
|
|
|
|
+ * obtained via sg_dma_{address,length}.
|
|
|
|
+ */
|
|
|
|
+int arm_coherent_iommu_map_sg(struct device *dev, struct scatterlist *sg,
|
|
|
|
+ int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
|
|
|
|
+{
|
|
|
|
+ return __iommu_map_sg(dev, sg, nents, dir, attrs, true);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/**
|
|
|
|
+ * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA
|
|
|
|
+ * @dev: valid struct device pointer
|
|
|
|
+ * @sg: list of buffers
|
|
|
|
+ * @nents: number of buffers to map
|
|
|
|
+ * @dir: DMA transfer direction
|
|
|
|
+ *
|
|
|
|
+ * Map a set of buffers described by scatterlist in streaming mode for DMA.
|
|
|
|
+ * The scatter gather list elements are merged together (if possible) and
|
|
|
|
+ * tagged with the appropriate dma address and length. They are obtained via
|
|
|
|
+ * sg_dma_{address,length}.
|
|
|
|
+ */
|
|
|
|
+int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg,
|
|
|
|
+ int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
|
|
|
|
+{
|
|
|
|
+ return __iommu_map_sg(dev, sg, nents, dir, attrs, false);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg,
|
|
|
|
+ int nents, enum dma_data_direction dir, struct dma_attrs *attrs,
|
|
|
|
+ bool is_coherent)
|
|
|
|
+{
|
|
|
|
+ struct scatterlist *s;
|
|
|
|
+ int i;
|
|
|
|
+
|
|
|
|
+ for_each_sg(sg, s, nents, i) {
|
|
|
|
+ if (sg_dma_len(s))
|
|
|
|
+ __iommu_remove_mapping(dev, sg_dma_address(s),
|
|
|
|
+ sg_dma_len(s));
|
|
|
|
+ if (!is_coherent &&
|
|
|
|
+ !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
|
|
|
|
+ __dma_page_dev_to_cpu(sg_page(s), s->offset,
|
|
|
|
+ s->length, dir);
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/**
|
|
|
|
+ * arm_coherent_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
|
|
|
|
+ * @dev: valid struct device pointer
|
|
|
|
+ * @sg: list of buffers
|
|
|
|
+ * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
|
|
|
|
+ * @dir: DMA transfer direction (same as was passed to dma_map_sg)
|
|
|
|
+ *
|
|
|
|
+ * Unmap a set of streaming mode DMA translations. Again, CPU access
|
|
|
|
+ * rules concerning calls here are the same as for dma_unmap_single().
|
|
|
|
+ */
|
|
|
|
+void arm_coherent_iommu_unmap_sg(struct device *dev, struct scatterlist *sg,
|
|
|
|
+ int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
|
|
|
|
+{
|
|
|
|
+ __iommu_unmap_sg(dev, sg, nents, dir, attrs, true);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/**
|
|
|
|
+ * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
|
|
|
|
+ * @dev: valid struct device pointer
|
|
|
|
+ * @sg: list of buffers
|
|
|
|
+ * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
|
|
|
|
+ * @dir: DMA transfer direction (same as was passed to dma_map_sg)
|
|
|
|
+ *
|
|
|
|
+ * Unmap a set of streaming mode DMA translations. Again, CPU access
|
|
|
|
+ * rules concerning calls here are the same as for dma_unmap_single().
|
|
|
|
+ */
|
|
|
|
+void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
|
|
|
|
+ enum dma_data_direction dir, struct dma_attrs *attrs)
|
|
|
|
+{
|
|
|
|
+ __iommu_unmap_sg(dev, sg, nents, dir, attrs, false);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/**
|
|
|
|
+ * arm_iommu_sync_sg_for_cpu
|
|
|
|
+ * @dev: valid struct device pointer
|
|
|
|
+ * @sg: list of buffers
|
|
|
|
+ * @nents: number of buffers to map (returned from dma_map_sg)
|
|
|
|
+ * @dir: DMA transfer direction (same as was passed to dma_map_sg)
|
|
|
|
+ */
|
|
|
|
+void arm_iommu_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
|
|
|
|
+ int nents, enum dma_data_direction dir)
|
|
|
|
+{
|
|
|
|
+ struct scatterlist *s;
|
|
|
|
+ int i;
|
|
|
|
+
|
|
|
|
+ for_each_sg(sg, s, nents, i)
|
|
|
|
+ __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir);
|