|
@@ -458,3 +458,195 @@ static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
|
|
|
|
|
|
ptr = __dma_alloc_remap(page, size, gfp, prot, caller);
|
|
|
if (!ptr) {
|
|
|
+ __dma_free_buffer(page, size);
|
|
|
+ return NULL;
|
|
|
+ }
|
|
|
+
|
|
|
+ *ret_page = page;
|
|
|
+ return ptr;
|
|
|
+}
|
|
|
+
|
|
|
+static void *__alloc_from_pool(size_t size, struct page **ret_page)
|
|
|
+{
|
|
|
+ struct dma_pool *pool = &atomic_pool;
|
|
|
+ unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
|
|
+ unsigned int pageno;
|
|
|
+ unsigned long flags;
|
|
|
+ void *ptr = NULL;
|
|
|
+ unsigned long align_mask;
|
|
|
+
|
|
|
+ if (!pool->vaddr) {
|
|
|
+ WARN(1, "coherent pool not initialised!\n");
|
|
|
+ return NULL;
|
|
|
+ }
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Align the region allocation - allocations from pool are rather
|
|
|
+ * small, so align them to their order in pages, minimum is a page
|
|
|
+ * size. This helps reduce fragmentation of the DMA space.
|
|
|
+ */
|
|
|
+ align_mask = (1 << get_order(size)) - 1;
|
|
|
+
|
|
|
+ spin_lock_irqsave(&pool->lock, flags);
|
|
|
+ pageno = bitmap_find_next_zero_area(pool->bitmap, pool->nr_pages,
|
|
|
+ 0, count, align_mask);
|
|
|
+ if (pageno < pool->nr_pages) {
|
|
|
+ bitmap_set(pool->bitmap, pageno, count);
|
|
|
+ ptr = pool->vaddr + PAGE_SIZE * pageno;
|
|
|
+ *ret_page = pool->pages[pageno];
|
|
|
+ } else {
|
|
|
+ pr_err_once("ERROR: %u KiB atomic DMA coherent pool is too small!\n"
|
|
|
+ "Please increase it with coherent_pool= kernel parameter!\n",
|
|
|
+ (unsigned)pool->size / 1024);
|
|
|
+ }
|
|
|
+ spin_unlock_irqrestore(&pool->lock, flags);
|
|
|
+
|
|
|
+ return ptr;
|
|
|
+}
|
|
|
+
|
|
|
+static bool __in_atomic_pool(void *start, size_t size)
|
|
|
+{
|
|
|
+ struct dma_pool *pool = &atomic_pool;
|
|
|
+ void *end = start + size;
|
|
|
+ void *pool_start = pool->vaddr;
|
|
|
+ void *pool_end = pool->vaddr + pool->size;
|
|
|
+
|
|
|
+ if (start < pool_start || start >= pool_end)
|
|
|
+ return false;
|
|
|
+
|
|
|
+ if (end <= pool_end)
|
|
|
+ return true;
|
|
|
+
|
|
|
+ WARN(1, "Wrong coherent size(%p-%p) from atomic pool(%p-%p)\n",
|
|
|
+ start, end - 1, pool_start, pool_end - 1);
|
|
|
+
|
|
|
+ return false;
|
|
|
+}
|
|
|
+
|
|
|
+static int __free_from_pool(void *start, size_t size)
|
|
|
+{
|
|
|
+ struct dma_pool *pool = &atomic_pool;
|
|
|
+ unsigned long pageno, count;
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ if (!__in_atomic_pool(start, size))
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ pageno = (start - pool->vaddr) >> PAGE_SHIFT;
|
|
|
+ count = size >> PAGE_SHIFT;
|
|
|
+
|
|
|
+ spin_lock_irqsave(&pool->lock, flags);
|
|
|
+ bitmap_clear(pool->bitmap, pageno, count);
|
|
|
+ spin_unlock_irqrestore(&pool->lock, flags);
|
|
|
+
|
|
|
+ return 1;
|
|
|
+}
|
|
|
+
|
|
|
+static void *__alloc_from_contiguous(struct device *dev, size_t size,
|
|
|
+ pgprot_t prot, struct page **ret_page)
|
|
|
+{
|
|
|
+ unsigned long order = get_order(size);
|
|
|
+ size_t count = size >> PAGE_SHIFT;
|
|
|
+ struct page *page;
|
|
|
+
|
|
|
+ page = dma_alloc_from_contiguous(dev, count, order);
|
|
|
+ if (!page)
|
|
|
+ return NULL;
|
|
|
+
|
|
|
+ __dma_clear_buffer(page, size);
|
|
|
+ __dma_remap(page, size, prot);
|
|
|
+
|
|
|
+ *ret_page = page;
|
|
|
+ return page_address(page);
|
|
|
+}
|
|
|
+
|
|
|
+static void __free_from_contiguous(struct device *dev, struct page *page,
|
|
|
+ size_t size)
|
|
|
+{
|
|
|
+ __dma_remap(page, size, pgprot_kernel);
|
|
|
+ dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
|
|
|
+}
|
|
|
+
|
|
|
+static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot)
|
|
|
+{
|
|
|
+ prot = dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs) ?
|
|
|
+ pgprot_writecombine(prot) :
|
|
|
+ pgprot_dmacoherent(prot);
|
|
|
+ return prot;
|
|
|
+}
|
|
|
+
|
|
|
+#define nommu() 0
|
|
|
+
|
|
|
+#else /* !CONFIG_MMU */
|
|
|
+
|
|
|
+#define nommu() 1
|
|
|
+
|
|
|
+#define __get_dma_pgprot(attrs, prot) __pgprot(0)
|
|
|
+#define __alloc_remap_buffer(dev, size, gfp, prot, ret, c) NULL
|
|
|
+#define __alloc_from_pool(size, ret_page) NULL
|
|
|
+#define __alloc_from_contiguous(dev, size, prot, ret) NULL
|
|
|
+#define __free_from_pool(cpu_addr, size) 0
|
|
|
+#define __free_from_contiguous(dev, page, size) do { } while (0)
|
|
|
+#define __dma_free_remap(cpu_addr, size) do { } while (0)
|
|
|
+
|
|
|
+#endif /* CONFIG_MMU */
|
|
|
+
|
|
|
+static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp,
|
|
|
+ struct page **ret_page)
|
|
|
+{
|
|
|
+ struct page *page;
|
|
|
+ page = __dma_alloc_buffer(dev, size, gfp);
|
|
|
+ if (!page)
|
|
|
+ return NULL;
|
|
|
+
|
|
|
+ *ret_page = page;
|
|
|
+ return page_address(page);
|
|
|
+}
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
|
|
|
+ gfp_t gfp, pgprot_t prot, bool is_coherent, const void *caller)
|
|
|
+{
|
|
|
+ u64 mask = get_coherent_dma_mask(dev);
|
|
|
+ struct page *page = NULL;
|
|
|
+ void *addr;
|
|
|
+
|
|
|
+#ifdef CONFIG_DMA_API_DEBUG
|
|
|
+ u64 limit = (mask + 1) & ~mask;
|
|
|
+ if (limit && size >= limit) {
|
|
|
+ dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n",
|
|
|
+ size, mask);
|
|
|
+ return NULL;
|
|
|
+ }
|
|
|
+#endif
|
|
|
+
|
|
|
+ if (!mask)
|
|
|
+ return NULL;
|
|
|
+
|
|
|
+ if (mask < 0xffffffffULL)
|
|
|
+ gfp |= GFP_DMA;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Following is a work-around (a.k.a. hack) to prevent pages
|
|
|
+ * with __GFP_COMP being passed to split_page() which cannot
|
|
|
+ * handle them. The real problem is that this flag probably
|
|
|
+ * should be 0 on ARM as it is not supported on this
|
|
|
+ * platform; see CONFIG_HUGETLBFS.
|
|
|
+ */
|
|
|
+ gfp &= ~(__GFP_COMP);
|
|
|
+
|
|
|
+ *handle = DMA_ERROR_CODE;
|
|
|
+ size = PAGE_ALIGN(size);
|
|
|
+
|
|
|
+ if (is_coherent || nommu())
|
|
|
+ addr = __alloc_simple_buffer(dev, size, gfp, &page);
|
|
|
+ else if (!(gfp & __GFP_WAIT))
|
|
|
+ addr = __alloc_from_pool(size, &page);
|
|
|
+ else if (!IS_ENABLED(CONFIG_CMA))
|
|
|
+ addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller);
|
|
|
+ else
|
|
|
+ addr = __alloc_from_contiguous(dev, size, prot, &page);
|
|
|
+
|
|
|
+ if (addr)
|
|
|
+ *handle = pfn_to_dma(dev, page_to_pfn(page));
|