basicAlgorithmEncapsulation.h 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163
  1. /*
  2. * arch/arm/include/asm/cacheflush.h
  3. *
  4. * Copyright (C) 1999-2002 Russell King
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #ifndef _ASMARM_CACHEFLUSH_H
  11. #define _ASMARM_CACHEFLUSH_H
  12. #include <linux/mm.h>
  13. #include <asm/glue-cache.h>
  14. #include <asm/shmparam.h>
  15. #include <asm/cachetype.h>
  16. #include <asm/outercache.h>
  17. #define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
  18. /*
  19. * This flag is used to indicate that the page pointed to by a pte is clean
  20. * and does not require cleaning before returning it to the user.
  21. */
  22. #define PG_dcache_clean PG_arch_1
  23. /*
  24. * MM Cache Management
  25. * ===================
  26. *
  27. * The arch/arm/mm/cache-*.S and arch/arm/mm/proc-*.S files
  28. * implement these methods.
  29. *
  30. * Start addresses are inclusive and end addresses are exclusive;
  31. * start addresses should be rounded down, end addresses up.
  32. *
  33. * See Documentation/cachetlb.txt for more information.
  34. * Please note that the implementation of these, and the required
  35. * effects are cache-type (VIVT/VIPT/PIPT) specific.
  36. *
  37. * flush_icache_all()
  38. *
  39. * Unconditionally clean and invalidate the entire icache.
  40. * Currently only needed for cache-v6.S and cache-v7.S, see
  41. * __flush_icache_all for the generic implementation.
  42. *
  43. * flush_kern_all()
  44. *
  45. * Unconditionally clean and invalidate the entire cache.
  46. *
  47. * flush_kern_louis()
  48. *
  49. * Flush data cache levels up to the level of unification
  50. * inner shareable and invalidate the I-cache.
  51. * Only needed from v7 onwards, falls back to flush_cache_all()
  52. * for all other processor versions.
  53. *
  54. * flush_user_all()
  55. *
  56. * Clean and invalidate all user space cache entries
  57. * before a change of page tables.
  58. *
  59. * flush_user_range(start, end, flags)
  60. *
  61. * Clean and invalidate a range of cache entries in the
  62. * specified address space before a change of page tables.
  63. * - start - user start address (inclusive, page aligned)
  64. * - end - user end address (exclusive, page aligned)
  65. * - flags - vma->vm_flags field
  66. *
  67. * coherent_kern_range(start, end)
  68. *
  69. * Ensure coherency between the Icache and the Dcache in the
  70. * region described by start, end. If you have non-snooping
  71. * Harvard caches, you need to implement this function.
  72. * - start - virtual start address
  73. * - end - virtual end address
  74. *
  75. * coherent_user_range(start, end)
  76. *
  77. * Ensure coherency between the Icache and the Dcache in the
  78. * region described by start, end. If you have non-snooping
  79. * Harvard caches, you need to implement this function.
  80. * - start - virtual start address
  81. * - end - virtual end address
  82. *
  83. * flush_kern_dcache_area(kaddr, size)
  84. *
  85. * Ensure that the data held in page is written back.
  86. * - kaddr - page address
  87. * - size - region size
  88. *
  89. * DMA Cache Coherency
  90. * ===================
  91. *
  92. * dma_flush_range(start, end)
  93. *
  94. * Clean and invalidate the specified virtual address range.
  95. * - start - virtual start address
  96. * - end - virtual end address
  97. */
  98. struct cpu_cache_fns {
  99. void (*flush_icache_all)(void);
  100. void (*flush_kern_all)(void);
  101. void (*flush_kern_louis)(void);
  102. void (*flush_user_all)(void);
  103. void (*flush_user_range)(unsigned long, unsigned long, unsigned int);
  104. void (*coherent_kern_range)(unsigned long, unsigned long);
  105. int (*coherent_user_range)(unsigned long, unsigned long);
  106. void (*flush_kern_dcache_area)(void *, size_t);
  107. void (*dma_map_area)(const void *, size_t, int);
  108. void (*dma_unmap_area)(const void *, size_t, int);
  109. void (*dma_flush_range)(const void *, const void *);
  110. };
  111. /*
  112. * Select the calling method
  113. */
  114. #ifdef MULTI_CACHE
  115. extern struct cpu_cache_fns cpu_cache;
  116. #define __cpuc_flush_icache_all cpu_cache.flush_icache_all
  117. #define __cpuc_flush_kern_all cpu_cache.flush_kern_all
  118. #define __cpuc_flush_kern_louis cpu_cache.flush_kern_louis
  119. #define __cpuc_flush_user_all cpu_cache.flush_user_all
  120. #define __cpuc_flush_user_range cpu_cache.flush_user_range
  121. #define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range
  122. #define __cpuc_coherent_user_range cpu_cache.coherent_user_range
  123. #define __cpuc_flush_dcache_area cpu_cache.flush_kern_dcache_area
  124. /*
  125. * These are private to the dma-mapping API. Do not use directly.
  126. * Their sole purpose is to ensure that data held in the cache
  127. * is visible to DMA, or data written by DMA to system memory is
  128. * visible to the CPU.
  129. */
  130. #define dmac_map_area cpu_cache.dma_map_area
  131. #define dmac_unmap_area cpu_cache.dma_unmap_area
  132. #define dmac_flush_range cpu_cache.dma_flush_range
  133. #else
  134. extern void __cpuc_flush_icache_all(void);
  135. extern void __cpuc_flush_kern_all(void);
  136. extern void __cpuc_flush_kern_louis(void);
  137. extern void __cpuc_flush_user_all(void);
  138. extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
  139. extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
  140. extern int __cpuc_coherent_user_range(unsigned long, unsigned long);
  141. extern void __cpuc_flush_dcache_area(void *, size_t);
  142. /*
  143. * These are private to the dma-mapping API. Do not use directly.
  144. * Their sole purpose is to ensure that data held in the cache
  145. * is visible to DMA, or data written by DMA to system memory is
  146. * visible to the CPU.
  147. */