|
@@ -183,3 +183,97 @@ static u64 get_coherent_dma_mask(struct device *dev)
|
|
|
|
|
|
return mask;
|
|
|
}
|
|
|
+
|
|
|
+static void __dma_clear_buffer(struct page *page, size_t size)
|
|
|
+{
|
|
|
+ void *ptr;
|
|
|
+ /*
|
|
|
+ * Ensure that the allocated pages are zeroed, and that any data
|
|
|
+ * lurking in the kernel direct-mapped region is invalidated.
|
|
|
+ */
|
|
|
+ ptr = page_address(page);
|
|
|
+ if (ptr) {
|
|
|
+ memset(ptr, 0, size);
|
|
|
+ dmac_flush_range(ptr, ptr + size);
|
|
|
+ outer_flush_range(__pa(ptr), __pa(ptr) + size);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Allocate a DMA buffer for 'dev' of size 'size' using the
|
|
|
+ * specified gfp mask. Note that 'size' must be page aligned.
|
|
|
+ */
|
|
|
+static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gfp)
|
|
|
+{
|
|
|
+ unsigned long order = get_order(size);
|
|
|
+ struct page *page, *p, *e;
|
|
|
+
|
|
|
+ page = alloc_pages(gfp, order);
|
|
|
+ if (!page)
|
|
|
+ return NULL;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Now split the huge page and free the excess pages
|
|
|
+ */
|
|
|
+ split_page(page, order);
|
|
|
+ for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
|
|
|
+ __free_page(p);
|
|
|
+
|
|
|
+ __dma_clear_buffer(page, size);
|
|
|
+
|
|
|
+ return page;
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Free a DMA buffer. 'size' must be page aligned.
|
|
|
+ */
|
|
|
+static void __dma_free_buffer(struct page *page, size_t size)
|
|
|
+{
|
|
|
+ struct page *e = page + (size >> PAGE_SHIFT);
|
|
|
+
|
|
|
+ while (page < e) {
|
|
|
+ __free_page(page);
|
|
|
+ page++;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+#ifdef CONFIG_MMU
|
|
|
+#ifdef CONFIG_HUGETLB_PAGE
|
|
|
+#error ARM Coherent DMA allocator does not (yet) support huge TLB
|
|
|
+#endif
|
|
|
+
|
|
|
+static void *__alloc_from_contiguous(struct device *dev, size_t size,
|
|
|
+ pgprot_t prot, struct page **ret_page);
|
|
|
+
|
|
|
+static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
|
|
|
+ pgprot_t prot, struct page **ret_page,
|
|
|
+ const void *caller);
|
|
|
+
|
|
|
+static void *
|
|
|
+__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
|
|
|
+ const void *caller)
|
|
|
+{
|
|
|
+ struct vm_struct *area;
|
|
|
+ unsigned long addr;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * DMA allocation can be mapped to user space, so lets
|
|
|
+ * set VM_USERMAP flags too.
|
|
|
+ */
|
|
|
+ area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP,
|
|
|
+ caller);
|
|
|
+ if (!area)
|
|
|
+ return NULL;
|
|
|
+ addr = (unsigned long)area->addr;
|
|
|
+ area->phys_addr = __pfn_to_phys(page_to_pfn(page));
|
|
|
+
|
|
|
+ if (ioremap_page_range(addr, addr + size, area->phys_addr, prot)) {
|
|
|
+ vunmap((void *)addr);
|
|
|
+ return NULL;
|
|
|
+ }
|
|
|
+ return (void *)addr;
|
|
|
+}
|
|
|
+
|
|
|
+static void __dma_free_remap(void *cpu_addr, size_t size)
|
|
|
+{
|
|
|
+ unsigned int flags = VM_ARM_DMA_CONSISTENT | VM_USERMAP;
|