dataMonitoring.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652
  1. /*
  2. * linux/arch/arm/mm/dma-mapping.c
  3. *
  4. * Copyright (C) 2000-2004 Russell King
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. * DMA uncached mapping support.
  11. */
  12. #include <linux/module.h>
  13. #include <linux/mm.h>
  14. #include <linux/gfp.h>
  15. #include <linux/errno.h>
  16. #include <linux/list.h>
  17. #include <linux/init.h>
  18. #include <linux/device.h>
  19. #include <linux/dma-mapping.h>
  20. #include <linux/dma-contiguous.h>
  21. #include <linux/highmem.h>
  22. #include <linux/memblock.h>
  23. #include <linux/slab.h>
  24. #include <linux/iommu.h>
  25. #include <linux/io.h>
  26. #include <linux/vmalloc.h>
  27. #include <linux/sizes.h>
  28. #include <asm/memory.h>
  29. #include <asm/highmem.h>
  30. #include <asm/cacheflush.h>
  31. #include <asm/tlbflush.h>
  32. #include <asm/mach/arch.h>
  33. #include <asm/dma-iommu.h>
  34. #include <asm/mach/map.h>
  35. #include <asm/system_info.h>
  36. #include <asm/dma-contiguous.h>
  37. #include "mm.h"
  38. /*
  39. * The DMA API is built upon the notion of "buffer ownership". A buffer
  40. * is either exclusively owned by the CPU (and therefore may be accessed
  41. * by it) or exclusively owned by the DMA device. These helper functions
  42. * represent the transitions between these two ownership states.
  43. *
  44. * Note, however, that on later ARMs, this notion does not work due to
  45. * speculative prefetches. We model our approach on the assumption that
  46. * the CPU does do speculative prefetches, which means we clean caches
  47. * before transfers and delay cache invalidation until transfer completion.
  48. *
  49. */
  50. static void __dma_page_cpu_to_dev(struct page *, unsigned long,
  51. size_t, enum dma_data_direction);
  52. static void __dma_page_dev_to_cpu(struct page *, unsigned long,
  53. size_t, enum dma_data_direction);
  54. /**
  55. * arm_dma_map_page - map a portion of a page for streaming DMA
  56. * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
  57. * @page: page that buffer resides in
  58. * @offset: offset into page for start of buffer
  59. * @size: size of buffer to map
  60. * @dir: DMA transfer direction
  61. *
  62. * Ensure that any data held in the cache is appropriately discarded
  63. * or written back.
  64. *
  65. * The device owns this memory once this call has completed. The CPU
  66. * can regain ownership by calling dma_unmap_page().
  67. */
  68. static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page,
  69. unsigned long offset, size_t size, enum dma_data_direction dir,
  70. struct dma_attrs *attrs)
  71. {
  72. if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
  73. __dma_page_cpu_to_dev(page, offset, size, dir);
  74. return pfn_to_dma(dev, page_to_pfn(page)) + offset;
  75. }
  76. static dma_addr_t arm_coherent_dma_map_page(struct device *dev, struct page *page,
  77. unsigned long offset, size_t size, enum dma_data_direction dir,
  78. struct dma_attrs *attrs)
  79. {
  80. return pfn_to_dma(dev, page_to_pfn(page)) + offset;
  81. }
  82. /**
  83. * arm_dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
  84. * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
  85. * @handle: DMA address of buffer
  86. * @size: size of buffer (same as passed to dma_map_page)
  87. * @dir: DMA transfer direction (same as passed to dma_map_page)
  88. *
  89. * Unmap a page streaming mode DMA translation. The handle and size
  90. * must match what was provided in the previous dma_map_page() call.
  91. * All other usages are undefined.
  92. *
  93. * After this call, reads by the CPU to the buffer are guaranteed to see
  94. * whatever the device wrote there.
  95. */
  96. static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle,
  97. size_t size, enum dma_data_direction dir,
  98. struct dma_attrs *attrs)
  99. {
  100. if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
  101. __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
  102. handle & ~PAGE_MASK, size, dir);
  103. }
  104. static void arm_dma_sync_single_for_cpu(struct device *dev,
  105. dma_addr_t handle, size_t size, enum dma_data_direction dir)
  106. {
  107. unsigned int offset = handle & (PAGE_SIZE - 1);
  108. struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
  109. __dma_page_dev_to_cpu(page, offset, size, dir);
  110. }
  111. static void arm_dma_sync_single_for_device(struct device *dev,
  112. dma_addr_t handle, size_t size, enum dma_data_direction dir)
  113. {
  114. unsigned int offset = handle & (PAGE_SIZE - 1);
  115. struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
  116. __dma_page_cpu_to_dev(page, offset, size, dir);
  117. }
  118. struct dma_map_ops arm_dma_ops = {
  119. .alloc = arm_dma_alloc,
  120. .free = arm_dma_free,
  121. .mmap = arm_dma_mmap,
  122. .get_sgtable = arm_dma_get_sgtable,
  123. .map_page = arm_dma_map_page,
  124. .unmap_page = arm_dma_unmap_page,
  125. .map_sg = arm_dma_map_sg,
  126. .unmap_sg = arm_dma_unmap_sg,
  127. .sync_single_for_cpu = arm_dma_sync_single_for_cpu,
  128. .sync_single_for_device = arm_dma_sync_single_for_device,
  129. .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu,
  130. .sync_sg_for_device = arm_dma_sync_sg_for_device,
  131. .set_dma_mask = arm_dma_set_mask,
  132. };
  133. EXPORT_SYMBOL(arm_dma_ops);
  134. static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
  135. dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs);
  136. static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr,
  137. dma_addr_t handle, struct dma_attrs *attrs);
  138. struct dma_map_ops arm_coherent_dma_ops = {
  139. .alloc = arm_coherent_dma_alloc,
  140. .free = arm_coherent_dma_free,
  141. .mmap = arm_dma_mmap,
  142. .get_sgtable = arm_dma_get_sgtable,
  143. .map_page = arm_coherent_dma_map_page,
  144. .map_sg = arm_dma_map_sg,
  145. .set_dma_mask = arm_dma_set_mask,
  146. };
  147. EXPORT_SYMBOL(arm_coherent_dma_ops);
  148. static u64 get_coherent_dma_mask(struct device *dev)
  149. {
  150. u64 mask = (u64)arm_dma_limit;
  151. if (dev) {
  152. mask = dev->coherent_dma_mask;
  153. /*
  154. * Sanity check the DMA mask - it must be non-zero, and
  155. * must be able to be satisfied by a DMA allocation.
  156. */
  157. if (mask == 0) {
  158. dev_warn(dev, "coherent DMA mask is unset\n");
  159. return 0;
  160. }
  161. if ((~mask) & (u64)arm_dma_limit) {
  162. dev_warn(dev, "coherent DMA mask %#llx is smaller "
  163. "than system GFP_DMA mask %#llx\n",
  164. mask, (u64)arm_dma_limit);
  165. return 0;
  166. }
  167. }
  168. return mask;
  169. }
  170. static void __dma_clear_buffer(struct page *page, size_t size)
  171. {
  172. void *ptr;
  173. /*
  174. * Ensure that the allocated pages are zeroed, and that any data
  175. * lurking in the kernel direct-mapped region is invalidated.
  176. */
  177. ptr = page_address(page);
  178. if (ptr) {
  179. memset(ptr, 0, size);
  180. dmac_flush_range(ptr, ptr + size);
  181. outer_flush_range(__pa(ptr), __pa(ptr) + size);
  182. }
  183. }
  184. /*
  185. * Allocate a DMA buffer for 'dev' of size 'size' using the
  186. * specified gfp mask. Note that 'size' must be page aligned.
  187. */
  188. static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gfp)
  189. {
  190. unsigned long order = get_order(size);
  191. struct page *page, *p, *e;
  192. page = alloc_pages(gfp, order);
  193. if (!page)
  194. return NULL;
  195. /*
  196. * Now split the huge page and free the excess pages
  197. */
  198. split_page(page, order);
  199. for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
  200. __free_page(p);
  201. __dma_clear_buffer(page, size);
  202. return page;
  203. }
  204. /*
  205. * Free a DMA buffer. 'size' must be page aligned.
  206. */
  207. static void __dma_free_buffer(struct page *page, size_t size)
  208. {
  209. struct page *e = page + (size >> PAGE_SHIFT);
  210. while (page < e) {
  211. __free_page(page);
  212. page++;
  213. }
  214. }
  215. #ifdef CONFIG_MMU
  216. #ifdef CONFIG_HUGETLB_PAGE
  217. #error ARM Coherent DMA allocator does not (yet) support huge TLB
  218. #endif
  219. static void *__alloc_from_contiguous(struct device *dev, size_t size,
  220. pgprot_t prot, struct page **ret_page);
  221. static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
  222. pgprot_t prot, struct page **ret_page,
  223. const void *caller);
  224. static void *
  225. __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
  226. const void *caller)
  227. {
  228. struct vm_struct *area;
  229. unsigned long addr;
  230. /*
  231. * DMA allocation can be mapped to user space, so lets
  232. * set VM_USERMAP flags too.
  233. */
  234. area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP,
  235. caller);
  236. if (!area)
  237. return NULL;
  238. addr = (unsigned long)area->addr;
  239. area->phys_addr = __pfn_to_phys(page_to_pfn(page));
  240. if (ioremap_page_range(addr, addr + size, area->phys_addr, prot)) {
  241. vunmap((void *)addr);
  242. return NULL;
  243. }
  244. return (void *)addr;
  245. }
  246. static void __dma_free_remap(void *cpu_addr, size_t size)
  247. {
  248. unsigned int flags = VM_ARM_DMA_CONSISTENT | VM_USERMAP;
  249. struct vm_struct *area = find_vm_area(cpu_addr);
  250. if (!area || (area->flags & flags) != flags) {
  251. WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
  252. return;
  253. }
  254. unmap_kernel_range((unsigned long)cpu_addr, size);
  255. vunmap(cpu_addr);
  256. }
  257. #define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
  258. struct dma_pool {
  259. size_t size;
  260. spinlock_t lock;
  261. unsigned long *bitmap;
  262. unsigned long nr_pages;
  263. void *vaddr;
  264. struct page **pages;
  265. };
  266. static struct dma_pool atomic_pool = {
  267. .size = DEFAULT_DMA_COHERENT_POOL_SIZE,
  268. };
  269. static int __init early_coherent_pool(char *p)
  270. {
  271. atomic_pool.size = memparse(p, &p);
  272. return 0;
  273. }
  274. early_param("coherent_pool", early_coherent_pool);
  275. void __init init_dma_coherent_pool_size(unsigned long size)
  276. {
  277. /*
  278. * Catch any attempt to set the pool size too late.
  279. */
  280. BUG_ON(atomic_pool.vaddr);
  281. /*
  282. * Set architecture specific coherent pool size only if
  283. * it has not been changed by kernel command line parameter.
  284. */
  285. if (atomic_pool.size == DEFAULT_DMA_COHERENT_POOL_SIZE)
  286. atomic_pool.size = size;
  287. }
  288. /*
  289. * Initialise the coherent pool for atomic allocations.
  290. */
  291. static int __init atomic_pool_init(void)
  292. {
  293. struct dma_pool *pool = &atomic_pool;
  294. pgprot_t prot = pgprot_dmacoherent(pgprot_kernel);
  295. gfp_t gfp = GFP_KERNEL | GFP_DMA;
  296. unsigned long nr_pages = pool->size >> PAGE_SHIFT;
  297. unsigned long *bitmap;
  298. struct page *page;
  299. struct page **pages;
  300. void *ptr;
  301. int bitmap_size = BITS_TO_LONGS(nr_pages) * sizeof(long);
  302. bitmap = kzalloc(bitmap_size, GFP_KERNEL);
  303. if (!bitmap)
  304. goto no_bitmap;
  305. pages = kzalloc(nr_pages * sizeof(struct page *), GFP_KERNEL);
  306. if (!pages)
  307. goto no_pages;
  308. if (IS_ENABLED(CONFIG_CMA))
  309. ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page);
  310. else
  311. ptr = __alloc_remap_buffer(NULL, pool->size, gfp, prot, &page,
  312. NULL);
  313. if (ptr) {
  314. int i;
  315. for (i = 0; i < nr_pages; i++)
  316. pages[i] = page + i;
  317. spin_lock_init(&pool->lock);
  318. pool->vaddr = ptr;
  319. pool->pages = pages;
  320. pool->bitmap = bitmap;
  321. pool->nr_pages = nr_pages;
  322. pr_info("DMA: preallocated %u KiB pool for atomic coherent allocations\n",
  323. (unsigned)pool->size / 1024);
  324. return 0;
  325. }
  326. kfree(pages);
  327. no_pages:
  328. kfree(bitmap);
  329. no_bitmap:
  330. pr_err("DMA: failed to allocate %u KiB pool for atomic coherent allocation\n",
  331. (unsigned)pool->size / 1024);
  332. return -ENOMEM;
  333. }
  334. /*
  335. * CMA is activated by core_initcall, so we must be called after it.
  336. */
  337. postcore_initcall(atomic_pool_init);
  338. struct dma_contig_early_reserve {
  339. phys_addr_t base;
  340. unsigned long size;
  341. };
  342. static struct dma_contig_early_reserve dma_mmu_remap[MAX_CMA_AREAS] __initdata;
  343. static int dma_mmu_remap_num __initdata;
  344. void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size)
  345. {
  346. dma_mmu_remap[dma_mmu_remap_num].base = base;
  347. dma_mmu_remap[dma_mmu_remap_num].size = size;
  348. dma_mmu_remap_num++;
  349. }
  350. void __init dma_contiguous_remap(void)
  351. {
  352. int i;
  353. for (i = 0; i < dma_mmu_remap_num; i++) {
  354. phys_addr_t start = dma_mmu_remap[i].base;
  355. phys_addr_t end = start + dma_mmu_remap[i].size;
  356. struct map_desc map;
  357. unsigned long addr;
  358. if (end > arm_lowmem_limit)
  359. end = arm_lowmem_limit;
  360. if (start >= end)
  361. continue;
  362. map.pfn = __phys_to_pfn(start);
  363. map.virtual = __phys_to_virt(start);
  364. map.length = end - start;
  365. map.type = MT_MEMORY_DMA_READY;
  366. /*
  367. * Clear previous low-memory mapping
  368. */
  369. for (addr = __phys_to_virt(start); addr < __phys_to_virt(end);
  370. addr += PMD_SIZE)
  371. pmd_clear(pmd_off_k(addr));
  372. iotable_init(&map, 1);
  373. }
  374. }
  375. static int __dma_update_pte(pte_t *pte, pgtable_t token, unsigned long addr,
  376. void *data)
  377. {
  378. struct page *page = virt_to_page(addr);
  379. pgprot_t prot = *(pgprot_t *)data;
  380. set_pte_ext(pte, mk_pte(page, prot), 0);
  381. return 0;
  382. }
  383. static void __dma_remap(struct page *page, size_t size, pgprot_t prot)
  384. {
  385. unsigned long start = (unsigned long) page_address(page);
  386. unsigned end = start + size;
  387. apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot);
  388. dsb();
  389. flush_tlb_kernel_range(start, end);
  390. }
  391. static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
  392. pgprot_t prot, struct page **ret_page,
  393. const void *caller)
  394. {
  395. struct page *page;
  396. void *ptr;
  397. page = __dma_alloc_buffer(dev, size, gfp);
  398. if (!page)
  399. return NULL;
  400. ptr = __dma_alloc_remap(page, size, gfp, prot, caller);
  401. if (!ptr) {
  402. __dma_free_buffer(page, size);
  403. return NULL;
  404. }
  405. *ret_page = page;
  406. return ptr;
  407. }
  408. static void *__alloc_from_pool(size_t size, struct page **ret_page)
  409. {
  410. struct dma_pool *pool = &atomic_pool;
  411. unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
  412. unsigned int pageno;
  413. unsigned long flags;
  414. void *ptr = NULL;
  415. unsigned long align_mask;
  416. if (!pool->vaddr) {
  417. WARN(1, "coherent pool not initialised!\n");
  418. return NULL;
  419. }
  420. /*
  421. * Align the region allocation - allocations from pool are rather
  422. * small, so align them to their order in pages, minimum is a page
  423. * size. This helps reduce fragmentation of the DMA space.
  424. */
  425. align_mask = (1 << get_order(size)) - 1;
  426. spin_lock_irqsave(&pool->lock, flags);
  427. pageno = bitmap_find_next_zero_area(pool->bitmap, pool->nr_pages,
  428. 0, count, align_mask);
  429. if (pageno < pool->nr_pages) {
  430. bitmap_set(pool->bitmap, pageno, count);
  431. ptr = pool->vaddr + PAGE_SIZE * pageno;
  432. *ret_page = pool->pages[pageno];
  433. } else {
  434. pr_err_once("ERROR: %u KiB atomic DMA coherent pool is too small!\n"
  435. "Please increase it with coherent_pool= kernel parameter!\n",
  436. (unsigned)pool->size / 1024);
  437. }
  438. spin_unlock_irqrestore(&pool->lock, flags);
  439. return ptr;
  440. }
  441. static bool __in_atomic_pool(void *start, size_t size)
  442. {
  443. struct dma_pool *pool = &atomic_pool;
  444. void *end = start + size;
  445. void *pool_start = pool->vaddr;
  446. void *pool_end = pool->vaddr + pool->size;
  447. if (start < pool_start || start >= pool_end)
  448. return false;
  449. if (end <= pool_end)
  450. return true;
  451. WARN(1, "Wrong coherent size(%p-%p) from atomic pool(%p-%p)\n",
  452. start, end - 1, pool_start, pool_end - 1);
  453. return false;
  454. }
  455. static int __free_from_pool(void *start, size_t size)
  456. {
  457. struct dma_pool *pool = &atomic_pool;
  458. unsigned long pageno, count;
  459. unsigned long flags;
  460. if (!__in_atomic_pool(start, size))
  461. return 0;
  462. pageno = (start - pool->vaddr) >> PAGE_SHIFT;
  463. count = size >> PAGE_SHIFT;
  464. spin_lock_irqsave(&pool->lock, flags);
  465. bitmap_clear(pool->bitmap, pageno, count);
  466. spin_unlock_irqrestore(&pool->lock, flags);
  467. return 1;
  468. }
  469. static void *__alloc_from_contiguous(struct device *dev, size_t size,
  470. pgprot_t prot, struct page **ret_page)
  471. {
  472. unsigned long order = get_order(size);
  473. size_t count = size >> PAGE_SHIFT;
  474. struct page *page;
  475. page = dma_alloc_from_contiguous(dev, count, order);
  476. if (!page)
  477. return NULL;
  478. __dma_clear_buffer(page, size);
  479. __dma_remap(page, size, prot);
  480. *ret_page = page;
  481. return page_address(page);
  482. }
  483. static void __free_from_contiguous(struct device *dev, struct page *page,
  484. size_t size)
  485. {
  486. __dma_remap(page, size, pgprot_kernel);
  487. dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
  488. }
  489. static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot)
  490. {
  491. prot = dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs) ?
  492. pgprot_writecombine(prot) :
  493. pgprot_dmacoherent(prot);
  494. return prot;
  495. }
  496. #define nommu() 0
  497. #else /* !CONFIG_MMU */
  498. #define nommu() 1
  499. #define __get_dma_pgprot(attrs, prot) __pgprot(0)
  500. #define __alloc_remap_buffer(dev, size, gfp, prot, ret, c) NULL
  501. #define __alloc_from_pool(size, ret_page) NULL
  502. #define __alloc_from_contiguous(dev, size, prot, ret) NULL
  503. #define __free_from_pool(cpu_addr, size) 0
  504. #define __free_from_contiguous(dev, page, size) do { } while (0)
  505. #define __dma_free_remap(cpu_addr, size) do { } while (0)
  506. #endif /* CONFIG_MMU */
  507. static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp,
  508. struct page **ret_page)
  509. {
  510. struct page *page;
  511. page = __dma_alloc_buffer(dev, size, gfp);
  512. if (!page)
  513. return NULL;
  514. *ret_page = page;
  515. return page_address(page);
  516. }
  517. static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
  518. gfp_t gfp, pgprot_t prot, bool is_coherent, const void *caller)
  519. {
  520. u64 mask = get_coherent_dma_mask(dev);
  521. struct page *page = NULL;
  522. void *addr;
  523. #ifdef CONFIG_DMA_API_DEBUG
  524. u64 limit = (mask + 1) & ~mask;
  525. if (limit && size >= limit) {
  526. dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n",
  527. size, mask);
  528. return NULL;
  529. }
  530. #endif
  531. if (!mask)
  532. return NULL;
  533. if (mask < 0xffffffffULL)
  534. gfp |= GFP_DMA;
  535. /*
  536. * Following is a work-around (a.k.a. hack) to prevent pages
  537. * with __GFP_COMP being passed to split_page() which cannot
  538. * handle them. The real problem is that this flag probably
  539. * should be 0 on ARM as it is not supported on this
  540. * platform; see CONFIG_HUGETLBFS.
  541. */
  542. gfp &= ~(__GFP_COMP);
  543. *handle = DMA_ERROR_CODE;
  544. size = PAGE_ALIGN(size);
  545. if (is_coherent || nommu())
  546. addr = __alloc_simple_buffer(dev, size, gfp, &page);
  547. else if (!(gfp & __GFP_WAIT))
  548. addr = __alloc_from_pool(size, &page);
  549. else if (!IS_ENABLED(CONFIG_CMA))
  550. addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller);
  551. else
  552. addr = __alloc_from_contiguous(dev, size, prot, &page);
  553. if (addr)
  554. *handle = pfn_to_dma(dev, page_to_pfn(page));