|
@@ -266,3 +266,84 @@ dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
|
|
{
|
|
{
|
|
/*
|
|
/*
|
|
* No need to do anything since the CPU isn't supposed to
|
|
* No need to do anything since the CPU isn't supposed to
|
|
|
|
+ * touch this memory after we flushed it at mapping- or
|
|
|
|
+ * sync-for-device time.
|
|
|
|
+ */
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline void
|
|
|
|
+dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
|
|
|
|
+ size_t size, enum dma_data_direction direction)
|
|
|
|
+{
|
|
|
|
+ dma_cache_sync(dev, bus_to_virt(dma_handle), size, direction);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline void
|
|
|
|
+dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
|
|
|
|
+ unsigned long offset, size_t size,
|
|
|
|
+ enum dma_data_direction direction)
|
|
|
|
+{
|
|
|
|
+ /* just sync everything, that's all the pci API can do */
|
|
|
|
+ dma_sync_single_for_cpu(dev, dma_handle, offset+size, direction);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline void
|
|
|
|
+dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
|
|
|
|
+ unsigned long offset, size_t size,
|
|
|
|
+ enum dma_data_direction direction)
|
|
|
|
+{
|
|
|
|
+ /* just sync everything, that's all the pci API can do */
|
|
|
|
+ dma_sync_single_for_device(dev, dma_handle, offset+size, direction);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/**
|
|
|
|
+ * dma_sync_sg_for_cpu
|
|
|
|
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
|
|
|
|
+ * @sg: list of buffers
|
|
|
|
+ * @nents: number of buffers to map
|
|
|
|
+ * @dir: DMA transfer direction
|
|
|
|
+ *
|
|
|
|
+ * Make physical memory consistent for a set of streaming
|
|
|
|
+ * mode DMA translations after a transfer.
|
|
|
|
+ *
|
|
|
|
+ * The same as dma_sync_single_for_* but for a scatter-gather list,
|
|
|
|
+ * same rules and usage.
|
|
|
|
+ */
|
|
|
|
+static inline void
|
|
|
|
+dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
|
|
|
|
+ int nents, enum dma_data_direction direction)
|
|
|
|
+{
|
|
|
|
+ /*
|
|
|
|
+ * No need to do anything since the CPU isn't supposed to
|
|
|
|
+ * touch this memory after we flushed it at mapping- or
|
|
|
|
+ * sync-for-device time.
|
|
|
|
+ */
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline void
|
|
|
|
+dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
|
|
|
|
+ int nents, enum dma_data_direction direction)
|
|
|
|
+{
|
|
|
|
+ int i;
|
|
|
|
+
|
|
|
|
+ for (i = 0; i < nents; i++) {
|
|
|
|
+ dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, direction);
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/* Now for the API extensions over the pci_ one */
|
|
|
|
+
|
|
|
|
+#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
|
|
|
|
+#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
|
|
|
|
+
|
|
|
|
+/* drivers/base/dma-mapping.c */
|
|
|
|
+extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
|
|
|
|
+ void *cpu_addr, dma_addr_t dma_addr, size_t size);
|
|
|
|
+extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
|
|
|
|
+ void *cpu_addr, dma_addr_t dma_addr,
|
|
|
|
+ size_t size);
|
|
|
|
+
|
|
|
|
+#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
|
|
|
|
+#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
|
|
|
|
+
|
|
|
|
+#endif /* __ASM_AVR32_DMA_MAPPING_H */
|