|
@@ -334,3 +334,87 @@ void __init early_dma_memcpy(void *pdst, const void *psrc, size_t size)
|
|
* Should be reverted after this issue is root caused.
|
|
* Should be reverted after this issue is root caused.
|
|
*/
|
|
*/
|
|
while (!(DMA_MMR_READ(&dst_ch->irq_status) & DMA_DONE))
|
|
while (!(DMA_MMR_READ(&dst_ch->irq_status) & DMA_DONE))
|
|
|
|
+ continue;
|
|
|
|
+#endif
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void __init early_dma_memcpy_done(void)
|
|
|
|
+{
|
|
|
|
+ early_shadow_stamp();
|
|
|
|
+
|
|
|
|
+ while ((bfin_read_MDMA_S0_CONFIG() && !(bfin_read_MDMA_D0_IRQ_STATUS() & DMA_DONE)) ||
|
|
|
|
+ (bfin_read_MDMA_S1_CONFIG() && !(bfin_read_MDMA_D1_IRQ_STATUS() & DMA_DONE)))
|
|
|
|
+ continue;
|
|
|
|
+
|
|
|
|
+ bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR);
|
|
|
|
+ bfin_write_MDMA_D1_IRQ_STATUS(DMA_DONE | DMA_ERR);
|
|
|
|
+ /*
|
|
|
|
+ * Now that DMA is done, we would normally flush cache, but
|
|
|
|
+ * i/d cache isn't running this early, so we don't bother,
|
|
|
|
+ * and just clear out the DMA channel for next time
|
|
|
|
+ */
|
|
|
|
+ bfin_write_MDMA_S0_CONFIG(0);
|
|
|
|
+ bfin_write_MDMA_S1_CONFIG(0);
|
|
|
|
+ bfin_write_MDMA_D0_CONFIG(0);
|
|
|
|
+ bfin_write_MDMA_D1_CONFIG(0);
|
|
|
|
+
|
|
|
|
+ __builtin_bfin_ssync();
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+#if defined(CH_MEM_STREAM3_SRC) && defined(CONFIG_BF60x)
|
|
|
|
+#define bfin_read_MDMA_S_CONFIG bfin_read_MDMA_S3_CONFIG
|
|
|
|
+#define bfin_write_MDMA_S_CONFIG bfin_write_MDMA_S3_CONFIG
|
|
|
|
+#define bfin_write_MDMA_S_START_ADDR bfin_write_MDMA_S3_START_ADDR
|
|
|
|
+#define bfin_write_MDMA_S_IRQ_STATUS bfin_write_MDMA_S3_IRQ_STATUS
|
|
|
|
+#define bfin_write_MDMA_S_X_COUNT bfin_write_MDMA_S3_X_COUNT
|
|
|
|
+#define bfin_write_MDMA_S_X_MODIFY bfin_write_MDMA_S3_X_MODIFY
|
|
|
|
+#define bfin_write_MDMA_S_Y_COUNT bfin_write_MDMA_S3_Y_COUNT
|
|
|
|
+#define bfin_write_MDMA_S_Y_MODIFY bfin_write_MDMA_S3_Y_MODIFY
|
|
|
|
+#define bfin_write_MDMA_D_CONFIG bfin_write_MDMA_D3_CONFIG
|
|
|
|
+#define bfin_write_MDMA_D_START_ADDR bfin_write_MDMA_D3_START_ADDR
|
|
|
|
+#define bfin_read_MDMA_D_IRQ_STATUS bfin_read_MDMA_D3_IRQ_STATUS
|
|
|
|
+#define bfin_write_MDMA_D_IRQ_STATUS bfin_write_MDMA_D3_IRQ_STATUS
|
|
|
|
+#define bfin_write_MDMA_D_X_COUNT bfin_write_MDMA_D3_X_COUNT
|
|
|
|
+#define bfin_write_MDMA_D_X_MODIFY bfin_write_MDMA_D3_X_MODIFY
|
|
|
|
+#define bfin_write_MDMA_D_Y_COUNT bfin_write_MDMA_D3_Y_COUNT
|
|
|
|
+#define bfin_write_MDMA_D_Y_MODIFY bfin_write_MDMA_D3_Y_MODIFY
|
|
|
|
+#else
|
|
|
|
+#define bfin_read_MDMA_S_CONFIG bfin_read_MDMA_S0_CONFIG
|
|
|
|
+#define bfin_write_MDMA_S_CONFIG bfin_write_MDMA_S0_CONFIG
|
|
|
|
+#define bfin_write_MDMA_S_START_ADDR bfin_write_MDMA_S0_START_ADDR
|
|
|
|
+#define bfin_write_MDMA_S_IRQ_STATUS bfin_write_MDMA_S0_IRQ_STATUS
|
|
|
|
+#define bfin_write_MDMA_S_X_COUNT bfin_write_MDMA_S0_X_COUNT
|
|
|
|
+#define bfin_write_MDMA_S_X_MODIFY bfin_write_MDMA_S0_X_MODIFY
|
|
|
|
+#define bfin_write_MDMA_S_Y_COUNT bfin_write_MDMA_S0_Y_COUNT
|
|
|
|
+#define bfin_write_MDMA_S_Y_MODIFY bfin_write_MDMA_S0_Y_MODIFY
|
|
|
|
+#define bfin_write_MDMA_D_CONFIG bfin_write_MDMA_D0_CONFIG
|
|
|
|
+#define bfin_write_MDMA_D_START_ADDR bfin_write_MDMA_D0_START_ADDR
|
|
|
|
+#define bfin_read_MDMA_D_IRQ_STATUS bfin_read_MDMA_D0_IRQ_STATUS
|
|
|
|
+#define bfin_write_MDMA_D_IRQ_STATUS bfin_write_MDMA_D0_IRQ_STATUS
|
|
|
|
+#define bfin_write_MDMA_D_X_COUNT bfin_write_MDMA_D0_X_COUNT
|
|
|
|
+#define bfin_write_MDMA_D_X_MODIFY bfin_write_MDMA_D0_X_MODIFY
|
|
|
|
+#define bfin_write_MDMA_D_Y_COUNT bfin_write_MDMA_D0_Y_COUNT
|
|
|
|
+#define bfin_write_MDMA_D_Y_MODIFY bfin_write_MDMA_D0_Y_MODIFY
|
|
|
|
+#endif
|
|
|
|
+
|
|
|
|
+/**
|
|
|
|
+ * __dma_memcpy - program the MDMA registers
|
|
|
|
+ *
|
|
|
|
+ * Actually program MDMA0 and wait for the transfer to finish. Disable IRQs
|
|
|
|
+ * while programming registers so that everything is fully configured. Wait
|
|
|
|
+ * for DMA to finish with IRQs enabled. If interrupted, the initial DMA_DONE
|
|
|
|
+ * check will make sure we don't clobber any existing transfer.
|
|
|
|
+ */
|
|
|
|
+static void __dma_memcpy(u32 daddr, s16 dmod, u32 saddr, s16 smod, size_t cnt, u32 conf)
|
|
|
|
+{
|
|
|
|
+ static DEFINE_SPINLOCK(mdma_lock);
|
|
|
|
+ unsigned long flags;
|
|
|
|
+
|
|
|
|
+ spin_lock_irqsave(&mdma_lock, flags);
|
|
|
|
+
|
|
|
|
+ /* Force a sync in case a previous config reset on this channel
|
|
|
|
+ * occurred. This is needed so subsequent writes to DMA registers
|
|
|
|
+ * are not spuriously lost/corrupted. Do it under irq lock and
|
|
|
|
+ * without the anomaly version (because we are atomic already).
|
|
|
|
+ */
|
|
|
|
+ __builtin_bfin_ssync();
|