|
@@ -435,3 +435,178 @@ static void __dma_memcpy(u32 daddr, s16 dmod, u32 saddr, s16 smod, size_t cnt, u
|
|
|
u32 shift = abs(dmod) >> 1;
|
|
|
size_t ycnt = cnt >> (16 - shift);
|
|
|
cnt = 1 << (16 - shift);
|
|
|
+ bfin_write_MDMA_D_Y_COUNT(ycnt);
|
|
|
+ bfin_write_MDMA_S_Y_COUNT(ycnt);
|
|
|
+ bfin_write_MDMA_D_Y_MODIFY(dmod);
|
|
|
+ bfin_write_MDMA_S_Y_MODIFY(smod);
|
|
|
+ }
|
|
|
+
|
|
|
+ bfin_write_MDMA_D_START_ADDR(daddr);
|
|
|
+ bfin_write_MDMA_D_X_COUNT(cnt);
|
|
|
+ bfin_write_MDMA_D_X_MODIFY(dmod);
|
|
|
+ bfin_write_MDMA_D_IRQ_STATUS(DMA_DONE | DMA_ERR);
|
|
|
+
|
|
|
+ bfin_write_MDMA_S_START_ADDR(saddr);
|
|
|
+ bfin_write_MDMA_S_X_COUNT(cnt);
|
|
|
+ bfin_write_MDMA_S_X_MODIFY(smod);
|
|
|
+ bfin_write_MDMA_S_IRQ_STATUS(DMA_DONE | DMA_ERR);
|
|
|
+
|
|
|
+ bfin_write_MDMA_S_CONFIG(DMAEN | conf);
|
|
|
+ if (conf & DMA2D)
|
|
|
+ bfin_write_MDMA_D_CONFIG(WNR | DI_EN_Y | DMAEN | conf);
|
|
|
+ else
|
|
|
+ bfin_write_MDMA_D_CONFIG(WNR | DI_EN_X | DMAEN | conf);
|
|
|
+
|
|
|
+ spin_unlock_irqrestore(&mdma_lock, flags);
|
|
|
+
|
|
|
+ SSYNC();
|
|
|
+
|
|
|
+ while (!(bfin_read_MDMA_D_IRQ_STATUS() & DMA_DONE))
|
|
|
+ if (bfin_read_MDMA_S_CONFIG())
|
|
|
+ continue;
|
|
|
+ else
|
|
|
+ return;
|
|
|
+
|
|
|
+ bfin_write_MDMA_D_IRQ_STATUS(DMA_DONE | DMA_ERR);
|
|
|
+
|
|
|
+ bfin_write_MDMA_S_CONFIG(0);
|
|
|
+ bfin_write_MDMA_D_CONFIG(0);
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * _dma_memcpy - translate C memcpy settings into MDMA settings
|
|
|
+ *
|
|
|
+ * Handle all the high level steps before we touch the MDMA registers. So
|
|
|
+ * handle direction, tweaking of sizes, and formatting of addresses.
|
|
|
+ */
|
|
|
+static void *_dma_memcpy(void *pdst, const void *psrc, size_t size)
|
|
|
+{
|
|
|
+ u32 conf, shift;
|
|
|
+ s16 mod;
|
|
|
+ unsigned long dst = (unsigned long)pdst;
|
|
|
+ unsigned long src = (unsigned long)psrc;
|
|
|
+
|
|
|
+ if (size == 0)
|
|
|
+ return NULL;
|
|
|
+
|
|
|
+ if (dst % 4 == 0 && src % 4 == 0 && size % 4 == 0) {
|
|
|
+ conf = WDSIZE_32;
|
|
|
+ shift = 2;
|
|
|
+ } else if (dst % 2 == 0 && src % 2 == 0 && size % 2 == 0) {
|
|
|
+ conf = WDSIZE_16;
|
|
|
+ shift = 1;
|
|
|
+ } else {
|
|
|
+ conf = WDSIZE_8;
|
|
|
+ shift = 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* If the two memory regions have a chance of overlapping, make
|
|
|
+ * sure the memcpy still works as expected. Do this by having the
|
|
|
+ * copy run backwards instead.
|
|
|
+ */
|
|
|
+ mod = 1 << shift;
|
|
|
+ if (src < dst) {
|
|
|
+ mod *= -1;
|
|
|
+ dst += size + mod;
|
|
|
+ src += size + mod;
|
|
|
+ }
|
|
|
+ size >>= shift;
|
|
|
+
|
|
|
+#ifndef DMA_MMR_SIZE_32
|
|
|
+ if (size > 0x10000)
|
|
|
+ conf |= DMA2D;
|
|
|
+#endif
|
|
|
+
|
|
|
+ __dma_memcpy(dst, mod, src, mod, size, conf);
|
|
|
+
|
|
|
+ return pdst;
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * dma_memcpy - DMA memcpy under mutex lock
|
|
|
+ *
|
|
|
+ * Do not check arguments before starting the DMA memcpy. Break the transfer
|
|
|
+ * up into two pieces. The first transfer is in multiples of 64k and the
|
|
|
+ * second transfer is the piece smaller than 64k.
|
|
|
+ */
|
|
|
+void *dma_memcpy(void *pdst, const void *psrc, size_t size)
|
|
|
+{
|
|
|
+ unsigned long dst = (unsigned long)pdst;
|
|
|
+ unsigned long src = (unsigned long)psrc;
|
|
|
+
|
|
|
+ if (bfin_addr_dcacheable(src))
|
|
|
+ blackfin_dcache_flush_range(src, src + size);
|
|
|
+
|
|
|
+ if (bfin_addr_dcacheable(dst))
|
|
|
+ blackfin_dcache_invalidate_range(dst, dst + size);
|
|
|
+
|
|
|
+ return dma_memcpy_nocache(pdst, psrc, size);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(dma_memcpy);
|
|
|
+
|
|
|
+/**
|
|
|
+ * dma_memcpy_nocache - DMA memcpy under mutex lock
|
|
|
+ * - No cache flush/invalidate
|
|
|
+ *
|
|
|
+ * Do not check arguments before starting the DMA memcpy. Break the transfer
|
|
|
+ * up into two pieces. The first transfer is in multiples of 64k and the
|
|
|
+ * second transfer is the piece smaller than 64k.
|
|
|
+ */
|
|
|
+void *dma_memcpy_nocache(void *pdst, const void *psrc, size_t size)
|
|
|
+{
|
|
|
+#ifdef DMA_MMR_SIZE_32
|
|
|
+ _dma_memcpy(pdst, psrc, size);
|
|
|
+#else
|
|
|
+ size_t bulk, rest;
|
|
|
+
|
|
|
+ bulk = size & ~0xffff;
|
|
|
+ rest = size - bulk;
|
|
|
+ if (bulk)
|
|
|
+ _dma_memcpy(pdst, psrc, bulk);
|
|
|
+ _dma_memcpy(pdst + bulk, psrc + bulk, rest);
|
|
|
+#endif
|
|
|
+ return pdst;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(dma_memcpy_nocache);
|
|
|
+
|
|
|
+/**
|
|
|
+ * safe_dma_memcpy - DMA memcpy w/argument checking
|
|
|
+ *
|
|
|
+ * Verify arguments are safe before heading to dma_memcpy().
|
|
|
+ */
|
|
|
+void *safe_dma_memcpy(void *dst, const void *src, size_t size)
|
|
|
+{
|
|
|
+ if (!access_ok(VERIFY_WRITE, dst, size))
|
|
|
+ return NULL;
|
|
|
+ if (!access_ok(VERIFY_READ, src, size))
|
|
|
+ return NULL;
|
|
|
+ return dma_memcpy(dst, src, size);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(safe_dma_memcpy);
|
|
|
+
|
|
|
+static void _dma_out(unsigned long addr, unsigned long buf, unsigned DMA_MMR_SIZE_TYPE len,
|
|
|
+ u16 size, u16 dma_size)
|
|
|
+{
|
|
|
+ blackfin_dcache_flush_range(buf, buf + len * size);
|
|
|
+ __dma_memcpy(addr, 0, buf, size, len, dma_size);
|
|
|
+}
|
|
|
+
|
|
|
+static void _dma_in(unsigned long addr, unsigned long buf, unsigned DMA_MMR_SIZE_TYPE len,
|
|
|
+ u16 size, u16 dma_size)
|
|
|
+{
|
|
|
+ blackfin_dcache_invalidate_range(buf, buf + len * size);
|
|
|
+ __dma_memcpy(buf, size, addr, 0, len, dma_size);
|
|
|
+}
|
|
|
+
|
|
|
+#define MAKE_DMA_IO(io, bwl, isize, dmasize, cnst) \
|
|
|
+void dma_##io##s##bwl(unsigned long addr, cnst void *buf, unsigned DMA_MMR_SIZE_TYPE len) \
|
|
|
+{ \
|
|
|
+ _dma_##io(addr, (unsigned long)buf, len, isize, WDSIZE_##dmasize); \
|
|
|
+} \
|
|
|
+EXPORT_SYMBOL(dma_##io##s##bwl)
|
|
|
+MAKE_DMA_IO(out, b, 1, 8, const);
|
|
|
+MAKE_DMA_IO(in, b, 1, 8, );
|
|
|
+MAKE_DMA_IO(out, w, 2, 16, const);
|
|
|
+MAKE_DMA_IO(in, w, 2, 16, );
|
|
|
+MAKE_DMA_IO(out, l, 4, 32, const);
|
|
|
+MAKE_DMA_IO(in, l, 4, 32, );
|