Browse Source

waterDataDiscreteRateMining standardDeviationMemoryDefinition.c 姚强 commit at 2020-10-28

姚强 4 năm trước cách đây
mục cha
commit
e0b31c0f5a

+ 126 - 0
waterDataDiscreteRateMining/dataSharedMemory/standardDeviationMemoryDefinition.c

@@ -154,3 +154,129 @@ int request_dma(unsigned int channel, const char *device_id)
 }
 EXPORT_SYMBOL(request_dma);
 
+int set_dma_callback(unsigned int channel, irq_handler_t callback, void *data)
+{
+	int ret;
+	unsigned int irq;
+
+	BUG_ON(channel >= MAX_DMA_CHANNELS || !callback ||
+			!atomic_read(&dma_ch[channel].chan_status));
+
+	irq = channel2irq(channel);
+	ret = request_irq(irq, callback, 0, dma_ch[channel].device_id, data);
+	if (ret)
+		return ret;
+
+	dma_ch[channel].irq = irq;
+	dma_ch[channel].data = data;
+
+	return 0;
+}
+EXPORT_SYMBOL(set_dma_callback);
+
+/**
+ *	clear_dma_buffer - clear DMA fifos for specified channel
+ *
+ * Set the Buffer Clear bit in the Configuration register of specific DMA
+ * channel. This will stop the descriptor based DMA operation.
+ */
+static void clear_dma_buffer(unsigned int channel)
+{
+	dma_ch[channel].regs->cfg |= RESTART;
+	SSYNC();
+	dma_ch[channel].regs->cfg &= ~RESTART;
+}
+
+void free_dma(unsigned int channel)
+{
+	pr_debug("freedma() : BEGIN\n");
+	BUG_ON(channel >= MAX_DMA_CHANNELS ||
+			!atomic_read(&dma_ch[channel].chan_status));
+
+	/* Halt the DMA */
+	disable_dma(channel);
+	clear_dma_buffer(channel);
+
+	if (dma_ch[channel].irq)
+		free_irq(dma_ch[channel].irq, dma_ch[channel].data);
+
+	/* Clear the DMA Variable in the Channel */
+	atomic_set(&dma_ch[channel].chan_status, 0);
+
+	pr_debug("freedma() : END\n");
+}
+EXPORT_SYMBOL(free_dma);
+
+#ifdef CONFIG_PM
+# ifndef MAX_DMA_SUSPEND_CHANNELS
+#  define MAX_DMA_SUSPEND_CHANNELS MAX_DMA_CHANNELS
+# endif
+# ifndef CONFIG_BF60x
+int blackfin_dma_suspend(void)
+{
+	int i;
+
+	for (i = 0; i < MAX_DMA_CHANNELS; ++i) {
+		if (dma_ch[i].regs->cfg & DMAEN) {
+			printk(KERN_ERR "DMA Channel %d failed to suspend\n", i);
+			return -EBUSY;
+		}
+		if (i < MAX_DMA_SUSPEND_CHANNELS)
+			dma_ch[i].saved_peripheral_map = dma_ch[i].regs->peripheral_map;
+	}
+
+#if ANOMALY_05000480
+	bfin_write_DMAC_TC_PER(0x0);
+#endif
+	return 0;
+}
+
+void blackfin_dma_resume(void)
+{
+	int i;
+
+	for (i = 0; i < MAX_DMA_CHANNELS; ++i) {
+		dma_ch[i].regs->cfg = 0;
+		if (i < MAX_DMA_SUSPEND_CHANNELS)
+			dma_ch[i].regs->peripheral_map = dma_ch[i].saved_peripheral_map;
+	}
+#if ANOMALY_05000480
+	bfin_write_DMAC_TC_PER(0x0111);
+#endif
+}
+# else
+int blackfin_dma_suspend(void)
+{
+	return 0;
+}
+
+void blackfin_dma_resume(void)
+{
+}
+#endif
+#endif
+
+/**
+ *	blackfin_dma_early_init - minimal DMA init
+ *
+ * Setup a few DMA registers so we can safely do DMA transfers early on in
+ * the kernel booting process.  Really this just means using dma_memcpy().
+ */
+void __init blackfin_dma_early_init(void)
+{
+	early_shadow_stamp();
+	bfin_write_MDMA_S0_CONFIG(0);
+	bfin_write_MDMA_S1_CONFIG(0);
+}
+
+void __init early_dma_memcpy(void *pdst, const void *psrc, size_t size)
+{
+	unsigned long dst = (unsigned long)pdst;
+	unsigned long src = (unsigned long)psrc;
+	struct dma_register *dst_ch, *src_ch;
+
+	early_shadow_stamp();
+
+	/* We assume that everything is 4 byte aligned, so include
+	 * a basic sanity check
+	 */