|
@@ -0,0 +1,185 @@
|
|
|
+/*
|
|
|
+ * linux/arch/arm/mm/dma-mapping.c
|
|
|
+ *
|
|
|
+ * Copyright (C) 2000-2004 Russell King
|
|
|
+ *
|
|
|
+ * This program is free software; you can redistribute it and/or modify
|
|
|
+ * it under the terms of the GNU General Public License version 2 as
|
|
|
+ * published by the Free Software Foundation.
|
|
|
+ *
|
|
|
+ * DMA uncached mapping support.
|
|
|
+ */
|
|
|
+#include <linux/module.h>
|
|
|
+#include <linux/mm.h>
|
|
|
+#include <linux/gfp.h>
|
|
|
+#include <linux/errno.h>
|
|
|
+#include <linux/list.h>
|
|
|
+#include <linux/init.h>
|
|
|
+#include <linux/device.h>
|
|
|
+#include <linux/dma-mapping.h>
|
|
|
+#include <linux/dma-contiguous.h>
|
|
|
+#include <linux/highmem.h>
|
|
|
+#include <linux/memblock.h>
|
|
|
+#include <linux/slab.h>
|
|
|
+#include <linux/iommu.h>
|
|
|
+#include <linux/io.h>
|
|
|
+#include <linux/vmalloc.h>
|
|
|
+#include <linux/sizes.h>
|
|
|
+
|
|
|
+#include <asm/memory.h>
|
|
|
+#include <asm/highmem.h>
|
|
|
+#include <asm/cacheflush.h>
|
|
|
+#include <asm/tlbflush.h>
|
|
|
+#include <asm/mach/arch.h>
|
|
|
+#include <asm/dma-iommu.h>
|
|
|
+#include <asm/mach/map.h>
|
|
|
+#include <asm/system_info.h>
|
|
|
+#include <asm/dma-contiguous.h>
|
|
|
+
|
|
|
+#include "mm.h"
|
|
|
+
|
|
|
+/*
|
|
|
+ * The DMA API is built upon the notion of "buffer ownership". A buffer
|
|
|
+ * is either exclusively owned by the CPU (and therefore may be accessed
|
|
|
+ * by it) or exclusively owned by the DMA device. These helper functions
|
|
|
+ * represent the transitions between these two ownership states.
|
|
|
+ *
|
|
|
+ * Note, however, that on later ARMs, this notion does not work due to
|
|
|
+ * speculative prefetches. We model our approach on the assumption that
|
|
|
+ * the CPU does do speculative prefetches, which means we clean caches
|
|
|
+ * before transfers and delay cache invalidation until transfer completion.
|
|
|
+ *
|
|
|
+ */
|
|
|
+static void __dma_page_cpu_to_dev(struct page *, unsigned long,
|
|
|
+ size_t, enum dma_data_direction);
|
|
|
+static void __dma_page_dev_to_cpu(struct page *, unsigned long,
|
|
|
+ size_t, enum dma_data_direction);
|
|
|
+
|
|
|
+/**
|
|
|
+ * arm_dma_map_page - map a portion of a page for streaming DMA
|
|
|
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
|
|
|
+ * @page: page that buffer resides in
|
|
|
+ * @offset: offset into page for start of buffer
|
|
|
+ * @size: size of buffer to map
|
|
|
+ * @dir: DMA transfer direction
|
|
|
+ *
|
|
|
+ * Ensure that any data held in the cache is appropriately discarded
|
|
|
+ * or written back.
|
|
|
+ *
|
|
|
+ * The device owns this memory once this call has completed. The CPU
|
|
|
+ * can regain ownership by calling dma_unmap_page().
|
|
|
+ */
|
|
|
+static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page,
|
|
|
+ unsigned long offset, size_t size, enum dma_data_direction dir,
|
|
|
+ struct dma_attrs *attrs)
|
|
|
+{
|
|
|
+ if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
|
|
|
+ __dma_page_cpu_to_dev(page, offset, size, dir);
|
|
|
+ return pfn_to_dma(dev, page_to_pfn(page)) + offset;
|
|
|
+}
|
|
|
+
|
|
|
+static dma_addr_t arm_coherent_dma_map_page(struct device *dev, struct page *page,
|
|
|
+ unsigned long offset, size_t size, enum dma_data_direction dir,
|
|
|
+ struct dma_attrs *attrs)
|
|
|
+{
|
|
|
+ return pfn_to_dma(dev, page_to_pfn(page)) + offset;
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * arm_dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
|
|
|
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
|
|
|
+ * @handle: DMA address of buffer
|
|
|
+ * @size: size of buffer (same as passed to dma_map_page)
|
|
|
+ * @dir: DMA transfer direction (same as passed to dma_map_page)
|
|
|
+ *
|
|
|
+ * Unmap a page streaming mode DMA translation. The handle and size
|
|
|
+ * must match what was provided in the previous dma_map_page() call.
|
|
|
+ * All other usages are undefined.
|
|
|
+ *
|
|
|
+ * After this call, reads by the CPU to the buffer are guaranteed to see
|
|
|
+ * whatever the device wrote there.
|
|
|
+ */
|
|
|
+static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle,
|
|
|
+ size_t size, enum dma_data_direction dir,
|
|
|
+ struct dma_attrs *attrs)
|
|
|
+{
|
|
|
+ if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
|
|
|
+ __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
|
|
|
+ handle & ~PAGE_MASK, size, dir);
|
|
|
+}
|
|
|
+
|
|
|
+static void arm_dma_sync_single_for_cpu(struct device *dev,
|
|
|
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
|
|
|
+{
|
|
|
+ unsigned int offset = handle & (PAGE_SIZE - 1);
|
|
|
+ struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
|
|
|
+ __dma_page_dev_to_cpu(page, offset, size, dir);
|
|
|
+}
|
|
|
+
|
|
|
+static void arm_dma_sync_single_for_device(struct device *dev,
|
|
|
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
|
|
|
+{
|
|
|
+ unsigned int offset = handle & (PAGE_SIZE - 1);
|
|
|
+ struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
|
|
|
+ __dma_page_cpu_to_dev(page, offset, size, dir);
|
|
|
+}
|
|
|
+
|
|
|
+struct dma_map_ops arm_dma_ops = {
|
|
|
+ .alloc = arm_dma_alloc,
|
|
|
+ .free = arm_dma_free,
|
|
|
+ .mmap = arm_dma_mmap,
|
|
|
+ .get_sgtable = arm_dma_get_sgtable,
|
|
|
+ .map_page = arm_dma_map_page,
|
|
|
+ .unmap_page = arm_dma_unmap_page,
|
|
|
+ .map_sg = arm_dma_map_sg,
|
|
|
+ .unmap_sg = arm_dma_unmap_sg,
|
|
|
+ .sync_single_for_cpu = arm_dma_sync_single_for_cpu,
|
|
|
+ .sync_single_for_device = arm_dma_sync_single_for_device,
|
|
|
+ .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu,
|
|
|
+ .sync_sg_for_device = arm_dma_sync_sg_for_device,
|
|
|
+ .set_dma_mask = arm_dma_set_mask,
|
|
|
+};
|
|
|
+EXPORT_SYMBOL(arm_dma_ops);
|
|
|
+
|
|
|
+static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
|
|
|
+ dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs);
|
|
|
+static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr,
|
|
|
+ dma_addr_t handle, struct dma_attrs *attrs);
|
|
|
+
|
|
|
+struct dma_map_ops arm_coherent_dma_ops = {
|
|
|
+ .alloc = arm_coherent_dma_alloc,
|
|
|
+ .free = arm_coherent_dma_free,
|
|
|
+ .mmap = arm_dma_mmap,
|
|
|
+ .get_sgtable = arm_dma_get_sgtable,
|
|
|
+ .map_page = arm_coherent_dma_map_page,
|
|
|
+ .map_sg = arm_dma_map_sg,
|
|
|
+ .set_dma_mask = arm_dma_set_mask,
|
|
|
+};
|
|
|
+EXPORT_SYMBOL(arm_coherent_dma_ops);
|
|
|
+
|
|
|
+static u64 get_coherent_dma_mask(struct device *dev)
|
|
|
+{
|
|
|
+ u64 mask = (u64)arm_dma_limit;
|
|
|
+
|
|
|
+ if (dev) {
|
|
|
+ mask = dev->coherent_dma_mask;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Sanity check the DMA mask - it must be non-zero, and
|
|
|
+ * must be able to be satisfied by a DMA allocation.
|
|
|
+ */
|
|
|
+ if (mask == 0) {
|
|
|
+ dev_warn(dev, "coherent DMA mask is unset\n");
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ if ((~mask) & (u64)arm_dma_limit) {
|
|
|
+ dev_warn(dev, "coherent DMA mask %#llx is smaller "
|
|
|
+ "than system GFP_DMA mask %#llx\n",
|
|
|
+ mask, (u64)arm_dma_limit);
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ return mask;
|
|
|
+}
|