standardDeviationMemoryDefinition.h 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324
  1. #ifndef _M68K_DMA_H
  2. #define _M68K_DMA_H 1
  3. #ifdef CONFIG_COLDFIRE
  4. /*
  5. * ColdFire DMA Model:
  6. * ColdFire DMA supports two forms of DMA: Single and Dual address. Single
  7. * address mode emits a source address, and expects that the device will either
  8. * pick up the data (DMA READ) or source data (DMA WRITE). This implies that
  9. * the device will place data on the correct byte(s) of the data bus, as the
  10. * memory transactions are always 32 bits. This implies that only 32 bit
  11. * devices will find single mode transfers useful. Dual address DMA mode
  12. * performs two cycles: source read and destination write. ColdFire will
  13. * align the data so that the device will always get the correct bytes, thus
  14. * is useful for 8 and 16 bit devices. This is the mode that is supported
  15. * below.
  16. *
  17. * AUG/22/2000 : added support for 32-bit Dual-Address-Mode (K) 2000
  18. * Oliver Kamphenkel (O.Kamphenkel@tu-bs.de)
  19. *
  20. * AUG/25/2000 : addad support for 8, 16 and 32-bit Single-Address-Mode (K)2000
  21. * Oliver Kamphenkel (O.Kamphenkel@tu-bs.de)
  22. *
  23. * APR/18/2002 : added proper support for MCF5272 DMA controller.
  24. * Arthur Shipkowski (art@videon-central.com)
  25. */
  26. #include <asm/coldfire.h>
  27. #include <asm/mcfsim.h>
  28. #include <asm/mcfdma.h>
  29. /*
  30. * Set number of channels of DMA on ColdFire for different implementations.
  31. */
  32. #if defined(CONFIG_M5249) || defined(CONFIG_M5307) || defined(CONFIG_M5407) || \
  33. defined(CONFIG_M523x) || defined(CONFIG_M527x) || \
  34. defined(CONFIG_M528x) || defined(CONFIG_M525x)
  35. #define MAX_M68K_DMA_CHANNELS 4
  36. #elif defined(CONFIG_M5272)
  37. #define MAX_M68K_DMA_CHANNELS 1
  38. #elif defined(CONFIG_M532x)
  39. #define MAX_M68K_DMA_CHANNELS 0
  40. #else
  41. #define MAX_M68K_DMA_CHANNELS 2
  42. #endif
  43. extern unsigned int dma_base_addr[MAX_M68K_DMA_CHANNELS];
  44. extern unsigned int dma_device_address[MAX_M68K_DMA_CHANNELS];
  45. #if !defined(CONFIG_M5272)
  46. #define DMA_MODE_WRITE_BIT 0x01 /* Memory/IO to IO/Memory select */
  47. #define DMA_MODE_WORD_BIT 0x02 /* 8 or 16 bit transfers */
  48. #define DMA_MODE_LONG_BIT 0x04 /* or 32 bit transfers */
  49. #define DMA_MODE_SINGLE_BIT 0x08 /* single-address-mode */
  50. /* I/O to memory, 8 bits, mode */
  51. #define DMA_MODE_READ 0
  52. /* memory to I/O, 8 bits, mode */
  53. #define DMA_MODE_WRITE 1
  54. /* I/O to memory, 16 bits, mode */
  55. #define DMA_MODE_READ_WORD 2
  56. /* memory to I/O, 16 bits, mode */
  57. #define DMA_MODE_WRITE_WORD 3
  58. /* I/O to memory, 32 bits, mode */
  59. #define DMA_MODE_READ_LONG 4
  60. /* memory to I/O, 32 bits, mode */
  61. #define DMA_MODE_WRITE_LONG 5
  62. /* I/O to memory, 8 bits, single-address-mode */
  63. #define DMA_MODE_READ_SINGLE 8
  64. /* memory to I/O, 8 bits, single-address-mode */
  65. #define DMA_MODE_WRITE_SINGLE 9
  66. /* I/O to memory, 16 bits, single-address-mode */
  67. #define DMA_MODE_READ_WORD_SINGLE 10
  68. /* memory to I/O, 16 bits, single-address-mode */
  69. #define DMA_MODE_WRITE_WORD_SINGLE 11
  70. /* I/O to memory, 32 bits, single-address-mode */
  71. #define DMA_MODE_READ_LONG_SINGLE 12
  72. /* memory to I/O, 32 bits, single-address-mode */
  73. #define DMA_MODE_WRITE_LONG_SINGLE 13
  74. #else /* CONFIG_M5272 is defined */
  75. /* Source static-address mode */
  76. #define DMA_MODE_SRC_SA_BIT 0x01
  77. /* Two bits to select between all four modes */
  78. #define DMA_MODE_SSIZE_MASK 0x06
  79. /* Offset to shift bits in */
  80. #define DMA_MODE_SSIZE_OFF 0x01
  81. /* Destination static-address mode */
  82. #define DMA_MODE_DES_SA_BIT 0x10
  83. /* Two bits to select between all four modes */
  84. #define DMA_MODE_DSIZE_MASK 0x60
  85. /* Offset to shift bits in */
  86. #define DMA_MODE_DSIZE_OFF 0x05
  87. /* Size modifiers */
  88. #define DMA_MODE_SIZE_LONG 0x00
  89. #define DMA_MODE_SIZE_BYTE 0x01
  90. #define DMA_MODE_SIZE_WORD 0x02
  91. #define DMA_MODE_SIZE_LINE 0x03
  92. /*
  93. * Aliases to help speed quick ports; these may be suboptimal, however. They
  94. * do not include the SINGLE mode modifiers since the MCF5272 does not have a
  95. * mode where the device is in control of its addressing.
  96. */
  97. /* I/O to memory, 8 bits, mode */
  98. #define DMA_MODE_READ ((DMA_MODE_SIZE_BYTE << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_BYTE << DMA_MODE_SSIZE_OFF) | DMA_SRC_SA_BIT)
  99. /* memory to I/O, 8 bits, mode */
  100. #define DMA_MODE_WRITE ((DMA_MODE_SIZE_BYTE << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_BYTE << DMA_MODE_SSIZE_OFF) | DMA_DES_SA_BIT)
  101. /* I/O to memory, 16 bits, mode */
  102. #define DMA_MODE_READ_WORD ((DMA_MODE_SIZE_WORD << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_WORD << DMA_MODE_SSIZE_OFF) | DMA_SRC_SA_BIT)
  103. /* memory to I/O, 16 bits, mode */
  104. #define DMA_MODE_WRITE_WORD ((DMA_MODE_SIZE_WORD << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_WORD << DMA_MODE_SSIZE_OFF) | DMA_DES_SA_BIT)
  105. /* I/O to memory, 32 bits, mode */
  106. #define DMA_MODE_READ_LONG ((DMA_MODE_SIZE_LONG << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_LONG << DMA_MODE_SSIZE_OFF) | DMA_SRC_SA_BIT)
  107. /* memory to I/O, 32 bits, mode */
  108. #define DMA_MODE_WRITE_LONG ((DMA_MODE_SIZE_LONG << DMA_MODE_DSIZE_OFF) | (DMA_MODE_SIZE_LONG << DMA_MODE_SSIZE_OFF) | DMA_DES_SA_BIT)
  109. #endif /* !defined(CONFIG_M5272) */
  110. #if !defined(CONFIG_M5272)
  111. /* enable/disable a specific DMA channel */
  112. static __inline__ void enable_dma(unsigned int dmanr)
  113. {
  114. volatile unsigned short *dmawp;
  115. #ifdef DMA_DEBUG
  116. printk("enable_dma(dmanr=%d)\n", dmanr);
  117. #endif
  118. dmawp = (unsigned short *) dma_base_addr[dmanr];
  119. dmawp[MCFDMA_DCR] |= MCFDMA_DCR_EEXT;
  120. }
  121. static __inline__ void disable_dma(unsigned int dmanr)
  122. {
  123. volatile unsigned short *dmawp;
  124. volatile unsigned char *dmapb;
  125. #ifdef DMA_DEBUG
  126. printk("disable_dma(dmanr=%d)\n", dmanr);
  127. #endif
  128. dmawp = (unsigned short *) dma_base_addr[dmanr];
  129. dmapb = (unsigned char *) dma_base_addr[dmanr];
  130. /* Turn off external requests, and stop any DMA in progress */
  131. dmawp[MCFDMA_DCR] &= ~MCFDMA_DCR_EEXT;
  132. dmapb[MCFDMA_DSR] = MCFDMA_DSR_DONE;
  133. }
  134. /*
  135. * Clear the 'DMA Pointer Flip Flop'.
  136. * Write 0 for LSB/MSB, 1 for MSB/LSB access.
  137. * Use this once to initialize the FF to a known state.
  138. * After that, keep track of it. :-)
  139. * --- In order to do that, the DMA routines below should ---
  140. * --- only be used while interrupts are disabled! ---
  141. *
  142. * This is a NOP for ColdFire. Provide a stub for compatibility.
  143. */
  144. static __inline__ void clear_dma_ff(unsigned int dmanr)
  145. {
  146. }
  147. /* set mode (above) for a specific DMA channel */
  148. static __inline__ void set_dma_mode(unsigned int dmanr, char mode)
  149. {
  150. volatile unsigned char *dmabp;
  151. volatile unsigned short *dmawp;
  152. #ifdef DMA_DEBUG
  153. printk("set_dma_mode(dmanr=%d,mode=%d)\n", dmanr, mode);
  154. #endif
  155. dmabp = (unsigned char *) dma_base_addr[dmanr];
  156. dmawp = (unsigned short *) dma_base_addr[dmanr];
  157. /* Clear config errors */
  158. dmabp[MCFDMA_DSR] = MCFDMA_DSR_DONE;
  159. /* Set command register */
  160. dmawp[MCFDMA_DCR] =
  161. MCFDMA_DCR_INT | /* Enable completion irq */
  162. MCFDMA_DCR_CS | /* Force one xfer per request */
  163. MCFDMA_DCR_AA | /* Enable auto alignment */
  164. /* single-address-mode */
  165. ((mode & DMA_MODE_SINGLE_BIT) ? MCFDMA_DCR_SAA : 0) |
  166. /* sets s_rw (-> r/w) high if Memory to I/0 */
  167. ((mode & DMA_MODE_WRITE_BIT) ? MCFDMA_DCR_S_RW : 0) |
  168. /* Memory to I/O or I/O to Memory */
  169. ((mode & DMA_MODE_WRITE_BIT) ? MCFDMA_DCR_SINC : MCFDMA_DCR_DINC) |
  170. /* 32 bit, 16 bit or 8 bit transfers */
  171. ((mode & DMA_MODE_WORD_BIT) ? MCFDMA_DCR_SSIZE_WORD :
  172. ((mode & DMA_MODE_LONG_BIT) ? MCFDMA_DCR_SSIZE_LONG :
  173. MCFDMA_DCR_SSIZE_BYTE)) |
  174. ((mode & DMA_MODE_WORD_BIT) ? MCFDMA_DCR_DSIZE_WORD :
  175. ((mode & DMA_MODE_LONG_BIT) ? MCFDMA_DCR_DSIZE_LONG :
  176. MCFDMA_DCR_DSIZE_BYTE));
  177. #ifdef DEBUG_DMA
  178. printk("%s(%d): dmanr=%d DSR[%x]=%x DCR[%x]=%x\n", __FILE__, __LINE__,
  179. dmanr, (int) &dmabp[MCFDMA_DSR], dmabp[MCFDMA_DSR],
  180. (int) &dmawp[MCFDMA_DCR], dmawp[MCFDMA_DCR]);
  181. #endif
  182. }
  183. /* Set transfer address for specific DMA channel */
  184. static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a)
  185. {
  186. volatile unsigned short *dmawp;
  187. volatile unsigned int *dmalp;
  188. #ifdef DMA_DEBUG
  189. printk("set_dma_addr(dmanr=%d,a=%x)\n", dmanr, a);
  190. #endif
  191. dmawp = (unsigned short *) dma_base_addr[dmanr];
  192. dmalp = (unsigned int *) dma_base_addr[dmanr];
  193. /* Determine which address registers are used for memory/device accesses */
  194. if (dmawp[MCFDMA_DCR] & MCFDMA_DCR_SINC) {
  195. /* Source incrementing, must be memory */
  196. dmalp[MCFDMA_SAR] = a;
  197. /* Set dest address, must be device */
  198. dmalp[MCFDMA_DAR] = dma_device_address[dmanr];
  199. } else {
  200. /* Destination incrementing, must be memory */
  201. dmalp[MCFDMA_DAR] = a;
  202. /* Set source address, must be device */
  203. dmalp[MCFDMA_SAR] = dma_device_address[dmanr];
  204. }
  205. #ifdef DEBUG_DMA
  206. printk("%s(%d): dmanr=%d DCR[%x]=%x SAR[%x]=%08x DAR[%x]=%08x\n",
  207. __FILE__, __LINE__, dmanr, (int) &dmawp[MCFDMA_DCR], dmawp[MCFDMA_DCR],
  208. (int) &dmalp[MCFDMA_SAR], dmalp[MCFDMA_SAR],
  209. (int) &dmalp[MCFDMA_DAR], dmalp[MCFDMA_DAR]);
  210. #endif
  211. }
  212. /*
  213. * Specific for Coldfire - sets device address.
  214. * Should be called after the mode set call, and before set DMA address.
  215. */
  216. static __inline__ void set_dma_device_addr(unsigned int dmanr, unsigned int a)
  217. {
  218. #ifdef DMA_DEBUG
  219. printk("set_dma_device_addr(dmanr=%d,a=%x)\n", dmanr, a);
  220. #endif
  221. dma_device_address[dmanr] = a;
  222. }
  223. /*
  224. * NOTE 2: "count" represents _bytes_.
  225. */
  226. static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
  227. {
  228. volatile unsigned short *dmawp;
  229. #ifdef DMA_DEBUG
  230. printk("set_dma_count(dmanr=%d,count=%d)\n", dmanr, count);
  231. #endif
  232. dmawp = (unsigned short *) dma_base_addr[dmanr];
  233. dmawp[MCFDMA_BCR] = (unsigned short)count;
  234. }
  235. /*
  236. * Get DMA residue count. After a DMA transfer, this
  237. * should return zero. Reading this while a DMA transfer is
  238. * still in progress will return unpredictable results.
  239. * Otherwise, it returns the number of _bytes_ left to transfer.
  240. */
  241. static __inline__ int get_dma_residue(unsigned int dmanr)
  242. {
  243. volatile unsigned short *dmawp;
  244. unsigned short count;
  245. #ifdef DMA_DEBUG
  246. printk("get_dma_residue(dmanr=%d)\n", dmanr);
  247. #endif
  248. dmawp = (unsigned short *) dma_base_addr[dmanr];
  249. count = dmawp[MCFDMA_BCR];
  250. return((int) count);
  251. }
  252. #else /* CONFIG_M5272 is defined */
  253. /*
  254. * The MCF5272 DMA controller is very different than the controller defined above
  255. * in terms of register mapping. For instance, with the exception of the 16-bit
  256. * interrupt register (IRQ#85, for reference), all of the registers are 32-bit.
  257. *
  258. * The big difference, however, is the lack of device-requested DMA. All modes
  259. * are dual address transfer, and there is no 'device' setup or direction bit.
  260. * You can DMA between a device and memory, between memory and memory, or even between
  261. * two devices directly, with any combination of incrementing and non-incrementing
  262. * addresses you choose. This puts a crimp in distinguishing between the 'device
  263. * address' set up by set_dma_device_addr.
  264. *
  265. * Therefore, there are two options. One is to use set_dma_addr and set_dma_device_addr,
  266. * which will act exactly as above in -- it will look to see if the source is set to
  267. * autoincrement, and if so it will make the source use the set_dma_addr value and the
  268. * destination the set_dma_device_addr value. Otherwise the source will be set to the
  269. * set_dma_device_addr value and the destination will get the set_dma_addr value.
  270. *
  271. * The other is to use the provided set_dma_src_addr and set_dma_dest_addr functions
  272. * and make it explicit. Depending on what you're doing, one of these two should work
  273. * for you, but don't mix them in the same transfer setup.
  274. */
  275. /* enable/disable a specific DMA channel */
  276. static __inline__ void enable_dma(unsigned int dmanr)
  277. {
  278. volatile unsigned int *dmalp;
  279. #ifdef DMA_DEBUG
  280. printk("enable_dma(dmanr=%d)\n", dmanr);
  281. #endif