|
@@ -802,3 +802,169 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
|
|
|
op(vaddr, len, dir);
|
|
|
kunmap_high(page);
|
|
|
} else if (cache_is_vipt()) {
|
|
|
+ /* unmapped pages might still be cached */
|
|
|
+ vaddr = kmap_atomic(page);
|
|
|
+ op(vaddr + offset, len, dir);
|
|
|
+ kunmap_atomic(vaddr);
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ vaddr = page_address(page) + offset;
|
|
|
+ op(vaddr, len, dir);
|
|
|
+ }
|
|
|
+ offset = 0;
|
|
|
+ pfn++;
|
|
|
+ left -= len;
|
|
|
+ } while (left);
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Make an area consistent for devices.
|
|
|
+ * Note: Drivers should NOT use this function directly, as it will break
|
|
|
+ * platforms with CONFIG_DMABOUNCE.
|
|
|
+ * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
|
|
|
+ */
|
|
|
+static void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
|
|
|
+ size_t size, enum dma_data_direction dir)
|
|
|
+{
|
|
|
+ unsigned long paddr;
|
|
|
+
|
|
|
+ dma_cache_maint_page(page, off, size, dir, dmac_map_area);
|
|
|
+
|
|
|
+ paddr = page_to_phys(page) + off;
|
|
|
+ if (dir == DMA_FROM_DEVICE) {
|
|
|
+ outer_inv_range(paddr, paddr + size);
|
|
|
+ } else {
|
|
|
+ outer_clean_range(paddr, paddr + size);
|
|
|
+ }
|
|
|
+ /* FIXME: non-speculating: flush on bidirectional mappings? */
|
|
|
+}
|
|
|
+
|
|
|
+static void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
|
|
|
+ size_t size, enum dma_data_direction dir)
|
|
|
+{
|
|
|
+ unsigned long paddr = page_to_phys(page) + off;
|
|
|
+
|
|
|
+ /* FIXME: non-speculating: not required */
|
|
|
+ /* don't bother invalidating if DMA to device */
|
|
|
+ if (dir != DMA_TO_DEVICE)
|
|
|
+ outer_inv_range(paddr, paddr + size);
|
|
|
+
|
|
|
+ dma_cache_maint_page(page, off, size, dir, dmac_unmap_area);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Mark the D-cache clean for this page to avoid extra flushing.
|
|
|
+ */
|
|
|
+ if (dir != DMA_TO_DEVICE && off == 0 && size >= PAGE_SIZE)
|
|
|
+ set_bit(PG_dcache_clean, &page->flags);
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * arm_dma_map_sg - map a set of SG buffers for streaming mode DMA
|
|
|
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
|
|
|
+ * @sg: list of buffers
|
|
|
+ * @nents: number of buffers to map
|
|
|
+ * @dir: DMA transfer direction
|
|
|
+ *
|
|
|
+ * Map a set of buffers described by scatterlist in streaming mode for DMA.
|
|
|
+ * This is the scatter-gather version of the dma_map_single interface.
|
|
|
+ * Here the scatter gather list elements are each tagged with the
|
|
|
+ * appropriate dma address and length. They are obtained via
|
|
|
+ * sg_dma_{address,length}.
|
|
|
+ *
|
|
|
+ * Device ownership issues as mentioned for dma_map_single are the same
|
|
|
+ * here.
|
|
|
+ */
|
|
|
+int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
|
|
|
+ enum dma_data_direction dir, struct dma_attrs *attrs)
|
|
|
+{
|
|
|
+ struct dma_map_ops *ops = get_dma_ops(dev);
|
|
|
+ struct scatterlist *s;
|
|
|
+ int i, j;
|
|
|
+
|
|
|
+ for_each_sg(sg, s, nents, i) {
|
|
|
+#ifdef CONFIG_NEED_SG_DMA_LENGTH
|
|
|
+ s->dma_length = s->length;
|
|
|
+#endif
|
|
|
+ s->dma_address = ops->map_page(dev, sg_page(s), s->offset,
|
|
|
+ s->length, dir, attrs);
|
|
|
+ if (dma_mapping_error(dev, s->dma_address))
|
|
|
+ goto bad_mapping;
|
|
|
+ }
|
|
|
+ return nents;
|
|
|
+
|
|
|
+ bad_mapping:
|
|
|
+ for_each_sg(sg, s, i, j)
|
|
|
+ ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs);
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * arm_dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
|
|
|
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
|
|
|
+ * @sg: list of buffers
|
|
|
+ * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
|
|
|
+ * @dir: DMA transfer direction (same as was passed to dma_map_sg)
|
|
|
+ *
|
|
|
+ * Unmap a set of streaming mode DMA translations. Again, CPU access
|
|
|
+ * rules concerning calls here are the same as for dma_unmap_single().
|
|
|
+ */
|
|
|
+void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
|
|
|
+ enum dma_data_direction dir, struct dma_attrs *attrs)
|
|
|
+{
|
|
|
+ struct dma_map_ops *ops = get_dma_ops(dev);
|
|
|
+ struct scatterlist *s;
|
|
|
+
|
|
|
+ int i;
|
|
|
+
|
|
|
+ for_each_sg(sg, s, nents, i)
|
|
|
+ ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs);
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * arm_dma_sync_sg_for_cpu
|
|
|
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
|
|
|
+ * @sg: list of buffers
|
|
|
+ * @nents: number of buffers to map (returned from dma_map_sg)
|
|
|
+ * @dir: DMA transfer direction (same as was passed to dma_map_sg)
|
|
|
+ */
|
|
|
+void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
|
|
|
+ int nents, enum dma_data_direction dir)
|
|
|
+{
|
|
|
+ struct dma_map_ops *ops = get_dma_ops(dev);
|
|
|
+ struct scatterlist *s;
|
|
|
+ int i;
|
|
|
+
|
|
|
+ for_each_sg(sg, s, nents, i)
|
|
|
+ ops->sync_single_for_cpu(dev, sg_dma_address(s), s->length,
|
|
|
+ dir);
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * arm_dma_sync_sg_for_device
|
|
|
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
|
|
|
+ * @sg: list of buffers
|
|
|
+ * @nents: number of buffers to map (returned from dma_map_sg)
|
|
|
+ * @dir: DMA transfer direction (same as was passed to dma_map_sg)
|
|
|
+ */
|
|
|
+void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
|
|
|
+ int nents, enum dma_data_direction dir)
|
|
|
+{
|
|
|
+ struct dma_map_ops *ops = get_dma_ops(dev);
|
|
|
+ struct scatterlist *s;
|
|
|
+ int i;
|
|
|
+
|
|
|
+ for_each_sg(sg, s, nents, i)
|
|
|
+ ops->sync_single_for_device(dev, sg_dma_address(s), s->length,
|
|
|
+ dir);
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Return whether the given device DMA address mask can be supported
|
|
|
+ * properly. For example, if your device can only drive the low 24-bits
|
|
|
+ * during bus mastering, then you would pass 0x00ffffff as the mask
|
|
|
+ * to this function.
|
|
|
+ */
|
|
|
+int dma_supported(struct device *dev, u64 mask)
|
|
|
+{
|
|
|
+ if (mask < (u64)arm_dma_limit)
|
|
|
+ return 0;
|