|
@@ -182,3 +182,143 @@ static __inline__ void set_dma_mode(unsigned int dmanr, char mode)
|
|
|
/* Clear config errors */
|
|
|
dmabp[MCFDMA_DSR] = MCFDMA_DSR_DONE;
|
|
|
|
|
|
+ /* Set command register */
|
|
|
+ dmawp[MCFDMA_DCR] =
|
|
|
+ MCFDMA_DCR_INT | /* Enable completion irq */
|
|
|
+ MCFDMA_DCR_CS | /* Force one xfer per request */
|
|
|
+ MCFDMA_DCR_AA | /* Enable auto alignment */
|
|
|
+ /* single-address-mode */
|
|
|
+ ((mode & DMA_MODE_SINGLE_BIT) ? MCFDMA_DCR_SAA : 0) |
|
|
|
+ /* sets s_rw (-> r/w) high if Memory to I/0 */
|
|
|
+ ((mode & DMA_MODE_WRITE_BIT) ? MCFDMA_DCR_S_RW : 0) |
|
|
|
+ /* Memory to I/O or I/O to Memory */
|
|
|
+ ((mode & DMA_MODE_WRITE_BIT) ? MCFDMA_DCR_SINC : MCFDMA_DCR_DINC) |
|
|
|
+ /* 32 bit, 16 bit or 8 bit transfers */
|
|
|
+ ((mode & DMA_MODE_WORD_BIT) ? MCFDMA_DCR_SSIZE_WORD :
|
|
|
+ ((mode & DMA_MODE_LONG_BIT) ? MCFDMA_DCR_SSIZE_LONG :
|
|
|
+ MCFDMA_DCR_SSIZE_BYTE)) |
|
|
|
+ ((mode & DMA_MODE_WORD_BIT) ? MCFDMA_DCR_DSIZE_WORD :
|
|
|
+ ((mode & DMA_MODE_LONG_BIT) ? MCFDMA_DCR_DSIZE_LONG :
|
|
|
+ MCFDMA_DCR_DSIZE_BYTE));
|
|
|
+
|
|
|
+#ifdef DEBUG_DMA
|
|
|
+ printk("%s(%d): dmanr=%d DSR[%x]=%x DCR[%x]=%x\n", __FILE__, __LINE__,
|
|
|
+ dmanr, (int) &dmabp[MCFDMA_DSR], dmabp[MCFDMA_DSR],
|
|
|
+ (int) &dmawp[MCFDMA_DCR], dmawp[MCFDMA_DCR]);
|
|
|
+#endif
|
|
|
+}
|
|
|
+
|
|
|
+/* Set transfer address for specific DMA channel */
|
|
|
+static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a)
|
|
|
+{
|
|
|
+ volatile unsigned short *dmawp;
|
|
|
+ volatile unsigned int *dmalp;
|
|
|
+
|
|
|
+#ifdef DMA_DEBUG
|
|
|
+ printk("set_dma_addr(dmanr=%d,a=%x)\n", dmanr, a);
|
|
|
+#endif
|
|
|
+
|
|
|
+ dmawp = (unsigned short *) dma_base_addr[dmanr];
|
|
|
+ dmalp = (unsigned int *) dma_base_addr[dmanr];
|
|
|
+
|
|
|
+ /* Determine which address registers are used for memory/device accesses */
|
|
|
+ if (dmawp[MCFDMA_DCR] & MCFDMA_DCR_SINC) {
|
|
|
+ /* Source incrementing, must be memory */
|
|
|
+ dmalp[MCFDMA_SAR] = a;
|
|
|
+ /* Set dest address, must be device */
|
|
|
+ dmalp[MCFDMA_DAR] = dma_device_address[dmanr];
|
|
|
+ } else {
|
|
|
+ /* Destination incrementing, must be memory */
|
|
|
+ dmalp[MCFDMA_DAR] = a;
|
|
|
+ /* Set source address, must be device */
|
|
|
+ dmalp[MCFDMA_SAR] = dma_device_address[dmanr];
|
|
|
+ }
|
|
|
+
|
|
|
+#ifdef DEBUG_DMA
|
|
|
+ printk("%s(%d): dmanr=%d DCR[%x]=%x SAR[%x]=%08x DAR[%x]=%08x\n",
|
|
|
+ __FILE__, __LINE__, dmanr, (int) &dmawp[MCFDMA_DCR], dmawp[MCFDMA_DCR],
|
|
|
+ (int) &dmalp[MCFDMA_SAR], dmalp[MCFDMA_SAR],
|
|
|
+ (int) &dmalp[MCFDMA_DAR], dmalp[MCFDMA_DAR]);
|
|
|
+#endif
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Specific for Coldfire - sets device address.
|
|
|
+ * Should be called after the mode set call, and before set DMA address.
|
|
|
+ */
|
|
|
+static __inline__ void set_dma_device_addr(unsigned int dmanr, unsigned int a)
|
|
|
+{
|
|
|
+#ifdef DMA_DEBUG
|
|
|
+ printk("set_dma_device_addr(dmanr=%d,a=%x)\n", dmanr, a);
|
|
|
+#endif
|
|
|
+
|
|
|
+ dma_device_address[dmanr] = a;
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * NOTE 2: "count" represents _bytes_.
|
|
|
+ */
|
|
|
+static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
|
|
|
+{
|
|
|
+ volatile unsigned short *dmawp;
|
|
|
+
|
|
|
+#ifdef DMA_DEBUG
|
|
|
+ printk("set_dma_count(dmanr=%d,count=%d)\n", dmanr, count);
|
|
|
+#endif
|
|
|
+
|
|
|
+ dmawp = (unsigned short *) dma_base_addr[dmanr];
|
|
|
+ dmawp[MCFDMA_BCR] = (unsigned short)count;
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Get DMA residue count. After a DMA transfer, this
|
|
|
+ * should return zero. Reading this while a DMA transfer is
|
|
|
+ * still in progress will return unpredictable results.
|
|
|
+ * Otherwise, it returns the number of _bytes_ left to transfer.
|
|
|
+ */
|
|
|
+static __inline__ int get_dma_residue(unsigned int dmanr)
|
|
|
+{
|
|
|
+ volatile unsigned short *dmawp;
|
|
|
+ unsigned short count;
|
|
|
+
|
|
|
+#ifdef DMA_DEBUG
|
|
|
+ printk("get_dma_residue(dmanr=%d)\n", dmanr);
|
|
|
+#endif
|
|
|
+
|
|
|
+ dmawp = (unsigned short *) dma_base_addr[dmanr];
|
|
|
+ count = dmawp[MCFDMA_BCR];
|
|
|
+ return((int) count);
|
|
|
+}
|
|
|
+#else /* CONFIG_M5272 is defined */
|
|
|
+
|
|
|
+/*
|
|
|
+ * The MCF5272 DMA controller is very different than the controller defined above
|
|
|
+ * in terms of register mapping. For instance, with the exception of the 16-bit
|
|
|
+ * interrupt register (IRQ#85, for reference), all of the registers are 32-bit.
|
|
|
+ *
|
|
|
+ * The big difference, however, is the lack of device-requested DMA. All modes
|
|
|
+ * are dual address transfer, and there is no 'device' setup or direction bit.
|
|
|
+ * You can DMA between a device and memory, between memory and memory, or even between
|
|
|
+ * two devices directly, with any combination of incrementing and non-incrementing
|
|
|
+ * addresses you choose. This puts a crimp in distinguishing between the 'device
|
|
|
+ * address' set up by set_dma_device_addr.
|
|
|
+ *
|
|
|
+ * Therefore, there are two options. One is to use set_dma_addr and set_dma_device_addr,
|
|
|
+ * which will act exactly as above in -- it will look to see if the source is set to
|
|
|
+ * autoincrement, and if so it will make the source use the set_dma_addr value and the
|
|
|
+ * destination the set_dma_device_addr value. Otherwise the source will be set to the
|
|
|
+ * set_dma_device_addr value and the destination will get the set_dma_addr value.
|
|
|
+ *
|
|
|
+ * The other is to use the provided set_dma_src_addr and set_dma_dest_addr functions
|
|
|
+ * and make it explicit. Depending on what you're doing, one of these two should work
|
|
|
+ * for you, but don't mix them in the same transfer setup.
|
|
|
+ */
|
|
|
+
|
|
|
+/* enable/disable a specific DMA channel */
|
|
|
+static __inline__ void enable_dma(unsigned int dmanr)
|
|
|
+{
|
|
|
+ volatile unsigned int *dmalp;
|
|
|
+
|
|
|
+#ifdef DMA_DEBUG
|
|
|
+ printk("enable_dma(dmanr=%d)\n", dmanr);
|
|
|
+#endif
|