connectTheSignalSlot.c 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332
  1. /*
  2. * arch/arm/mm/cache-l2x0.c - L210/L220 cache controller support
  3. *
  4. * Copyright (C) 2007 ARM Limited
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  18. */
  19. #include <linux/err.h>
  20. #include <linux/init.h>
  21. #include <linux/spinlock.h>
  22. #include <linux/io.h>
  23. #include <linux/of.h>
  24. #include <linux/of_address.h>
  25. #include <asm/cacheflush.h>
  26. #include <asm/hardware/cache-l2x0.h>
  27. #include "cache-aurora-l2.h"
  28. #define CACHE_LINE_SIZE 32
  29. static void __iomem *l2x0_base;
  30. static DEFINE_RAW_SPINLOCK(l2x0_lock);
  31. static u32 l2x0_way_mask; /* Bitmask of active ways */
  32. static u32 l2x0_size;
  33. static unsigned long sync_reg_offset = L2X0_CACHE_SYNC;
  34. /* Aurora don't have the cache ID register available, so we have to
  35. * pass it though the device tree */
  36. static u32 cache_id_part_number_from_dt;
  37. struct l2x0_regs l2x0_saved_regs;
  38. struct l2x0_of_data {
  39. void (*setup)(const struct device_node *, u32 *, u32 *);
  40. void (*save)(void);
  41. struct outer_cache_fns outer_cache;
  42. };
  43. static bool of_init = false;
  44. static inline void cache_wait_way(void __iomem *reg, unsigned long mask)
  45. {
  46. /* wait for cache operation by line or way to complete */
  47. while (readl_relaxed(reg) & mask)
  48. cpu_relax();
  49. }
  50. #ifdef CONFIG_CACHE_PL310
  51. static inline void cache_wait(void __iomem *reg, unsigned long mask)
  52. {
  53. /* cache operations by line are atomic on PL310 */
  54. }
  55. #else
  56. #define cache_wait cache_wait_way
  57. #endif
  58. static inline void cache_sync(void)
  59. {
  60. void __iomem *base = l2x0_base;
  61. writel_relaxed(0, base + sync_reg_offset);
  62. cache_wait(base + L2X0_CACHE_SYNC, 1);
  63. }
  64. static inline void l2x0_clean_line(unsigned long addr)
  65. {
  66. void __iomem *base = l2x0_base;
  67. cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
  68. writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
  69. }
  70. static inline void l2x0_inv_line(unsigned long addr)
  71. {
  72. void __iomem *base = l2x0_base;
  73. cache_wait(base + L2X0_INV_LINE_PA, 1);
  74. writel_relaxed(addr, base + L2X0_INV_LINE_PA);
  75. }
  76. #if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915)
  77. static inline void debug_writel(unsigned long val)
  78. {
  79. if (outer_cache.set_debug)
  80. outer_cache.set_debug(val);
  81. }
  82. static void pl310_set_debug(unsigned long val)
  83. {
  84. writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL);
  85. }
  86. #else
  87. /* Optimised out for non-errata case */
  88. static inline void debug_writel(unsigned long val)
  89. {
  90. }
  91. #define pl310_set_debug NULL
  92. #endif
  93. #ifdef CONFIG_PL310_ERRATA_588369
  94. static inline void l2x0_flush_line(unsigned long addr)
  95. {
  96. void __iomem *base = l2x0_base;
  97. /* Clean by PA followed by Invalidate by PA */
  98. cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
  99. writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
  100. cache_wait(base + L2X0_INV_LINE_PA, 1);
  101. writel_relaxed(addr, base + L2X0_INV_LINE_PA);
  102. }
  103. #else
  104. static inline void l2x0_flush_line(unsigned long addr)
  105. {
  106. void __iomem *base = l2x0_base;
  107. cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
  108. writel_relaxed(addr, base + L2X0_CLEAN_INV_LINE_PA);
  109. }
  110. #endif
  111. static void l2x0_cache_sync(void)
  112. {
  113. unsigned long flags;
  114. raw_spin_lock_irqsave(&l2x0_lock, flags);
  115. cache_sync();
  116. raw_spin_unlock_irqrestore(&l2x0_lock, flags);
  117. }
  118. static void __l2x0_flush_all(void)
  119. {
  120. debug_writel(0x03);
  121. writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_INV_WAY);
  122. cache_wait_way(l2x0_base + L2X0_CLEAN_INV_WAY, l2x0_way_mask);
  123. cache_sync();
  124. debug_writel(0x00);
  125. }
  126. static void l2x0_flush_all(void)
  127. {
  128. unsigned long flags;
  129. /* clean all ways */
  130. raw_spin_lock_irqsave(&l2x0_lock, flags);
  131. __l2x0_flush_all();
  132. raw_spin_unlock_irqrestore(&l2x0_lock, flags);
  133. }
  134. static void l2x0_clean_all(void)
  135. {
  136. unsigned long flags;
  137. /* clean all ways */
  138. raw_spin_lock_irqsave(&l2x0_lock, flags);
  139. writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY);
  140. cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask);
  141. cache_sync();
  142. raw_spin_unlock_irqrestore(&l2x0_lock, flags);
  143. }
  144. static void l2x0_inv_all(void)
  145. {
  146. unsigned long flags;
  147. /* invalidate all ways */
  148. raw_spin_lock_irqsave(&l2x0_lock, flags);
  149. /* Invalidating when L2 is enabled is a nono */
  150. BUG_ON(readl(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN);
  151. writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY);
  152. cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask);
  153. cache_sync();
  154. raw_spin_unlock_irqrestore(&l2x0_lock, flags);
  155. }
  156. static void l2x0_inv_range(unsigned long start, unsigned long end)
  157. {
  158. void __iomem *base = l2x0_base;
  159. unsigned long flags;
  160. raw_spin_lock_irqsave(&l2x0_lock, flags);
  161. if (start & (CACHE_LINE_SIZE - 1)) {
  162. start &= ~(CACHE_LINE_SIZE - 1);
  163. debug_writel(0x03);
  164. l2x0_flush_line(start);
  165. debug_writel(0x00);
  166. start += CACHE_LINE_SIZE;
  167. }
  168. if (end & (CACHE_LINE_SIZE - 1)) {
  169. end &= ~(CACHE_LINE_SIZE - 1);
  170. debug_writel(0x03);
  171. l2x0_flush_line(end);
  172. debug_writel(0x00);
  173. }
  174. while (start < end) {
  175. unsigned long blk_end = start + min(end - start, 4096UL);
  176. while (start < blk_end) {
  177. l2x0_inv_line(start);
  178. start += CACHE_LINE_SIZE;
  179. }
  180. if (blk_end < end) {
  181. raw_spin_unlock_irqrestore(&l2x0_lock, flags);
  182. raw_spin_lock_irqsave(&l2x0_lock, flags);
  183. }
  184. }
  185. cache_wait(base + L2X0_INV_LINE_PA, 1);
  186. cache_sync();
  187. raw_spin_unlock_irqrestore(&l2x0_lock, flags);
  188. }
  189. static void l2x0_clean_range(unsigned long start, unsigned long end)
  190. {
  191. void __iomem *base = l2x0_base;
  192. unsigned long flags;
  193. if ((end - start) >= l2x0_size) {
  194. l2x0_clean_all();
  195. return;
  196. }
  197. raw_spin_lock_irqsave(&l2x0_lock, flags);
  198. start &= ~(CACHE_LINE_SIZE - 1);
  199. while (start < end) {
  200. unsigned long blk_end = start + min(end - start, 4096UL);
  201. while (start < blk_end) {
  202. l2x0_clean_line(start);
  203. start += CACHE_LINE_SIZE;
  204. }
  205. if (blk_end < end) {
  206. raw_spin_unlock_irqrestore(&l2x0_lock, flags);
  207. raw_spin_lock_irqsave(&l2x0_lock, flags);
  208. }
  209. }
  210. cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
  211. cache_sync();
  212. raw_spin_unlock_irqrestore(&l2x0_lock, flags);
  213. }
  214. static void l2x0_flush_range(unsigned long start, unsigned long end)
  215. {
  216. void __iomem *base = l2x0_base;
  217. unsigned long flags;
  218. if ((end - start) >= l2x0_size) {
  219. l2x0_flush_all();
  220. return;
  221. }
  222. raw_spin_lock_irqsave(&l2x0_lock, flags);
  223. start &= ~(CACHE_LINE_SIZE - 1);
  224. while (start < end) {
  225. unsigned long blk_end = start + min(end - start, 4096UL);
  226. debug_writel(0x03);
  227. while (start < blk_end) {
  228. l2x0_flush_line(start);
  229. start += CACHE_LINE_SIZE;
  230. }
  231. debug_writel(0x00);
  232. if (blk_end < end) {
  233. raw_spin_unlock_irqrestore(&l2x0_lock, flags);
  234. raw_spin_lock_irqsave(&l2x0_lock, flags);
  235. }
  236. }
  237. cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
  238. cache_sync();
  239. raw_spin_unlock_irqrestore(&l2x0_lock, flags);
  240. }
  241. static void l2x0_disable(void)
  242. {
  243. unsigned long flags;
  244. raw_spin_lock_irqsave(&l2x0_lock, flags);
  245. __l2x0_flush_all();
  246. writel_relaxed(0, l2x0_base + L2X0_CTRL);
  247. dsb();
  248. raw_spin_unlock_irqrestore(&l2x0_lock, flags);
  249. }
  250. static void l2x0_unlock(u32 cache_id)
  251. {
  252. int lockregs;
  253. int i;
  254. switch (cache_id) {
  255. case L2X0_CACHE_ID_PART_L310:
  256. lockregs = 8;
  257. break;
  258. case AURORA_CACHE_ID:
  259. lockregs = 4;
  260. break;
  261. default:
  262. /* L210 and unknown types */
  263. lockregs = 1;
  264. break;
  265. }
  266. for (i = 0; i < lockregs; i++) {
  267. writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_D_BASE +
  268. i * L2X0_LOCKDOWN_STRIDE);
  269. writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_I_BASE +
  270. i * L2X0_LOCKDOWN_STRIDE);
  271. }
  272. }
  273. void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
  274. {
  275. u32 aux;
  276. u32 cache_id;
  277. u32 way_size = 0;
  278. int ways;
  279. int way_size_shift = L2X0_WAY_SIZE_SHIFT;
  280. const char *type;
  281. l2x0_base = base;