ソースを参照

waterHeterogeneousDataSynchronization basicAlgorithmEncapsulation.h 邵敏 commit at 2020-12-28

邵敏 4 年 前
コミット
1a5a5d3ad2

+ 186 - 0
waterHeterogeneousDataSynchronization/dataCalculation/basicAlgorithmEncapsulation.h

@@ -161,3 +161,189 @@ struct dma_register {
 };
 
 struct dma_channel {
+	const char *device_id;
+	atomic_t chan_status;
+	volatile struct dma_register *regs;
+	struct dmasg *sg;		/* large mode descriptor */
+	unsigned int irq;
+	void *data;
+#ifdef CONFIG_PM
+	unsigned short saved_peripheral_map;
+#endif
+};
+
+#ifdef CONFIG_PM
+int blackfin_dma_suspend(void);
+void blackfin_dma_resume(void);
+#endif
+
+/*******************************************************************************
+*	DMA API's
+*******************************************************************************/
+extern struct dma_channel dma_ch[MAX_DMA_CHANNELS];
+extern struct dma_register * const dma_io_base_addr[MAX_DMA_CHANNELS];
+extern int channel2irq(unsigned int channel);
+
+static inline void set_dma_start_addr(unsigned int channel, unsigned long addr)
+{
+	dma_ch[channel].regs->start_addr = addr;
+}
+static inline void set_dma_next_desc_addr(unsigned int channel, void *addr)
+{
+	dma_ch[channel].regs->next_desc_ptr = addr;
+}
+static inline void set_dma_curr_desc_addr(unsigned int channel, void *addr)
+{
+	dma_ch[channel].regs->curr_desc_ptr = addr;
+}
+static inline void set_dma_x_count(unsigned int channel, unsigned DMA_MMR_SIZE_TYPE x_count)
+{
+	dma_ch[channel].regs->x_count = x_count;
+}
+static inline void set_dma_y_count(unsigned int channel, unsigned DMA_MMR_SIZE_TYPE y_count)
+{
+	dma_ch[channel].regs->y_count = y_count;
+}
+static inline void set_dma_x_modify(unsigned int channel, DMA_MMR_SIZE_TYPE x_modify)
+{
+	dma_ch[channel].regs->x_modify = x_modify;
+}
+static inline void set_dma_y_modify(unsigned int channel, DMA_MMR_SIZE_TYPE y_modify)
+{
+	dma_ch[channel].regs->y_modify = y_modify;
+}
+static inline void set_dma_config(unsigned int channel, unsigned DMA_MMR_SIZE_TYPE config)
+{
+	dma_ch[channel].regs->cfg = config;
+}
+static inline void set_dma_curr_addr(unsigned int channel, unsigned long addr)
+{
+	dma_ch[channel].regs->curr_addr_ptr = addr;
+}
+
+#ifdef CONFIG_BF60x
+static inline unsigned long
+set_bfin_dma_config2(char direction, char flow_mode, char intr_mode,
+		     char dma_mode, char mem_width, char syncmode, char peri_width)
+{
+	unsigned long config = 0;
+
+	switch (intr_mode) {
+	case INTR_ON_BUF:
+		if (dma_mode == DIMENSION_2D)
+			config = DI_EN_Y;
+		else
+			config = DI_EN_X;
+		break;
+	case INTR_ON_ROW:
+		config = DI_EN_X;
+		break;
+	case INTR_ON_PERI:
+		config = DI_EN_P;
+		break;
+	};
+
+	return config | (direction << 1) | (mem_width << 8) | (dma_mode << 26) |
+		(flow_mode << 12) | (syncmode << 2) | (peri_width << 4);
+}
+#endif
+
+static inline unsigned DMA_MMR_SIZE_TYPE
+set_bfin_dma_config(char direction, char flow_mode,
+		    char intr_mode, char dma_mode, char mem_width, char syncmode)
+{
+#ifdef CONFIG_BF60x
+	return set_bfin_dma_config2(direction, flow_mode, intr_mode, dma_mode,
+		mem_width, syncmode, mem_width);
+#else
+	return (direction << 1) | (mem_width << 2) | (dma_mode << 4) |
+		(intr_mode << 6) | (flow_mode << 12) | (syncmode << 5);
+#endif
+}
+
+static inline unsigned DMA_MMR_SIZE_TYPE get_dma_curr_irqstat(unsigned int channel)
+{
+	return dma_ch[channel].regs->irq_status;
+}
+static inline unsigned DMA_MMR_SIZE_TYPE get_dma_curr_xcount(unsigned int channel)
+{
+	return dma_ch[channel].regs->curr_x_count;
+}
+static inline unsigned DMA_MMR_SIZE_TYPE get_dma_curr_ycount(unsigned int channel)
+{
+	return dma_ch[channel].regs->curr_y_count;
+}
+static inline void *get_dma_next_desc_ptr(unsigned int channel)
+{
+	return dma_ch[channel].regs->next_desc_ptr;
+}
+static inline void *get_dma_curr_desc_ptr(unsigned int channel)
+{
+	return dma_ch[channel].regs->curr_desc_ptr;
+}
+static inline unsigned DMA_MMR_SIZE_TYPE get_dma_config(unsigned int channel)
+{
+	return dma_ch[channel].regs->cfg;
+}
+static inline unsigned long get_dma_curr_addr(unsigned int channel)
+{
+	return dma_ch[channel].regs->curr_addr_ptr;
+}
+
+static inline void set_dma_sg(unsigned int channel, struct dmasg *sg, int ndsize)
+{
+	/* Make sure the internal data buffers in the core are drained
+	 * so that the DMA descriptors are completely written when the
+	 * DMA engine goes to fetch them below.
+	 */
+	SSYNC();
+
+	dma_ch[channel].regs->next_desc_ptr = sg;
+	dma_ch[channel].regs->cfg =
+		(dma_ch[channel].regs->cfg & ~NDSIZE) |
+		((ndsize << NDSIZE_OFFSET) & NDSIZE);
+}
+
+static inline int dma_channel_active(unsigned int channel)
+{
+	return atomic_read(&dma_ch[channel].chan_status);
+}
+
+static inline void disable_dma(unsigned int channel)
+{
+	dma_ch[channel].regs->cfg &= ~DMAEN;
+	SSYNC();
+}
+static inline void enable_dma(unsigned int channel)
+{
+	dma_ch[channel].regs->curr_x_count = 0;
+	dma_ch[channel].regs->curr_y_count = 0;
+	dma_ch[channel].regs->cfg |= DMAEN;
+}
+int set_dma_callback(unsigned int channel, irq_handler_t callback, void *data);
+
+static inline void dma_disable_irq(unsigned int channel)
+{
+	disable_irq(dma_ch[channel].irq);
+}
+static inline void dma_disable_irq_nosync(unsigned int channel)
+{
+	disable_irq_nosync(dma_ch[channel].irq);
+}
+static inline void dma_enable_irq(unsigned int channel)
+{
+	enable_irq(dma_ch[channel].irq);
+}
+static inline void clear_dma_irqstat(unsigned int channel)
+{
+	dma_ch[channel].regs->irq_status = DMA_DONE | DMA_ERR | DMA_PIRQ;
+}
+
+void *dma_memcpy(void *dest, const void *src, size_t count);
+void *dma_memcpy_nocache(void *dest, const void *src, size_t count);
+void *safe_dma_memcpy(void *dest, const void *src, size_t count);
+void blackfin_dma_early_init(void);
+void early_dma_memcpy(void *dest, const void *src, size_t count);
+void early_dma_memcpy_done(void);
+
+#endif