|
@@ -408,3 +408,53 @@ void __init dma_contiguous_remap(void)
|
|
|
if (end > arm_lowmem_limit)
|
|
|
end = arm_lowmem_limit;
|
|
|
if (start >= end)
|
|
|
+ continue;
|
|
|
+
|
|
|
+ map.pfn = __phys_to_pfn(start);
|
|
|
+ map.virtual = __phys_to_virt(start);
|
|
|
+ map.length = end - start;
|
|
|
+ map.type = MT_MEMORY_DMA_READY;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Clear previous low-memory mapping
|
|
|
+ */
|
|
|
+ for (addr = __phys_to_virt(start); addr < __phys_to_virt(end);
|
|
|
+ addr += PMD_SIZE)
|
|
|
+ pmd_clear(pmd_off_k(addr));
|
|
|
+
|
|
|
+ iotable_init(&map, 1);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static int __dma_update_pte(pte_t *pte, pgtable_t token, unsigned long addr,
|
|
|
+ void *data)
|
|
|
+{
|
|
|
+ struct page *page = virt_to_page(addr);
|
|
|
+ pgprot_t prot = *(pgprot_t *)data;
|
|
|
+
|
|
|
+ set_pte_ext(pte, mk_pte(page, prot), 0);
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static void __dma_remap(struct page *page, size_t size, pgprot_t prot)
|
|
|
+{
|
|
|
+ unsigned long start = (unsigned long) page_address(page);
|
|
|
+ unsigned end = start + size;
|
|
|
+
|
|
|
+ apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot);
|
|
|
+ dsb();
|
|
|
+ flush_tlb_kernel_range(start, end);
|
|
|
+}
|
|
|
+
|
|
|
+static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
|
|
|
+ pgprot_t prot, struct page **ret_page,
|
|
|
+ const void *caller)
|
|
|
+{
|
|
|
+ struct page *page;
|
|
|
+ void *ptr;
|
|
|
+ page = __dma_alloc_buffer(dev, size, gfp);
|
|
|
+ if (!page)
|
|
|
+ return NULL;
|
|
|
+
|
|
|
+ ptr = __dma_alloc_remap(page, size, gfp, prot, caller);
|
|
|
+ if (!ptr) {
|