|
@@ -267,3 +267,159 @@ static inline u32 iop_chan_get_current_descriptor(struct iop_adma_chan *chan)
|
|
return __raw_readl(AAU_ADAR(chan));
|
|
return __raw_readl(AAU_ADAR(chan));
|
|
default:
|
|
default:
|
|
BUG();
|
|
BUG();
|
|
|
|
+ }
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline void iop_chan_set_next_descriptor(struct iop_adma_chan *chan,
|
|
|
|
+ u32 next_desc_addr)
|
|
|
|
+{
|
|
|
|
+ int id = chan->device->id;
|
|
|
|
+
|
|
|
|
+ switch (id) {
|
|
|
|
+ case DMA0_ID:
|
|
|
|
+ case DMA1_ID:
|
|
|
|
+ __raw_writel(next_desc_addr, DMA_NDAR(chan));
|
|
|
|
+ break;
|
|
|
|
+ case AAU_ID:
|
|
|
|
+ __raw_writel(next_desc_addr, AAU_ANDAR(chan));
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+#define IOP_ADMA_STATUS_BUSY (1 << 10)
|
|
|
|
+#define IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT (1024)
|
|
|
|
+#define IOP_ADMA_XOR_MAX_BYTE_COUNT (16 * 1024 * 1024)
|
|
|
|
+#define IOP_ADMA_MAX_BYTE_COUNT (16 * 1024 * 1024)
|
|
|
|
+
|
|
|
|
+static inline int iop_chan_is_busy(struct iop_adma_chan *chan)
|
|
|
|
+{
|
|
|
|
+ u32 status = __raw_readl(DMA_CSR(chan));
|
|
|
|
+ return (status & IOP_ADMA_STATUS_BUSY) ? 1 : 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline int iop_desc_is_aligned(struct iop_adma_desc_slot *desc,
|
|
|
|
+ int num_slots)
|
|
|
|
+{
|
|
|
|
+ /* num_slots will only ever be 1, 2, 4, or 8 */
|
|
|
|
+ return (desc->idx & (num_slots - 1)) ? 0 : 1;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/* to do: support large (i.e. > hw max) buffer sizes */
|
|
|
|
+static inline int iop_chan_memcpy_slot_count(size_t len, int *slots_per_op)
|
|
|
|
+{
|
|
|
|
+ *slots_per_op = 1;
|
|
|
|
+ return 1;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/* to do: support large (i.e. > hw max) buffer sizes */
|
|
|
|
+static inline int iop_chan_memset_slot_count(size_t len, int *slots_per_op)
|
|
|
|
+{
|
|
|
|
+ *slots_per_op = 1;
|
|
|
|
+ return 1;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline int iop3xx_aau_xor_slot_count(size_t len, int src_cnt,
|
|
|
|
+ int *slots_per_op)
|
|
|
|
+{
|
|
|
|
+ static const char slot_count_table[] = {
|
|
|
|
+ 1, 1, 1, 1, /* 01 - 04 */
|
|
|
|
+ 2, 2, 2, 2, /* 05 - 08 */
|
|
|
|
+ 4, 4, 4, 4, /* 09 - 12 */
|
|
|
|
+ 4, 4, 4, 4, /* 13 - 16 */
|
|
|
|
+ 8, 8, 8, 8, /* 17 - 20 */
|
|
|
|
+ 8, 8, 8, 8, /* 21 - 24 */
|
|
|
|
+ 8, 8, 8, 8, /* 25 - 28 */
|
|
|
|
+ 8, 8, 8, 8, /* 29 - 32 */
|
|
|
|
+ };
|
|
|
|
+ *slots_per_op = slot_count_table[src_cnt - 1];
|
|
|
|
+ return *slots_per_op;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline int
|
|
|
|
+iop_chan_interrupt_slot_count(int *slots_per_op, struct iop_adma_chan *chan)
|
|
|
|
+{
|
|
|
|
+ switch (chan->device->id) {
|
|
|
|
+ case DMA0_ID:
|
|
|
|
+ case DMA1_ID:
|
|
|
|
+ return iop_chan_memcpy_slot_count(0, slots_per_op);
|
|
|
|
+ case AAU_ID:
|
|
|
|
+ return iop3xx_aau_xor_slot_count(0, 2, slots_per_op);
|
|
|
|
+ default:
|
|
|
|
+ BUG();
|
|
|
|
+ }
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline int iop_chan_xor_slot_count(size_t len, int src_cnt,
|
|
|
|
+ int *slots_per_op)
|
|
|
|
+{
|
|
|
|
+ int slot_cnt = iop3xx_aau_xor_slot_count(len, src_cnt, slots_per_op);
|
|
|
|
+
|
|
|
|
+ if (len <= IOP_ADMA_XOR_MAX_BYTE_COUNT)
|
|
|
|
+ return slot_cnt;
|
|
|
|
+
|
|
|
|
+ len -= IOP_ADMA_XOR_MAX_BYTE_COUNT;
|
|
|
|
+ while (len > IOP_ADMA_XOR_MAX_BYTE_COUNT) {
|
|
|
|
+ len -= IOP_ADMA_XOR_MAX_BYTE_COUNT;
|
|
|
|
+ slot_cnt += *slots_per_op;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ slot_cnt += *slots_per_op;
|
|
|
|
+
|
|
|
|
+ return slot_cnt;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/* zero sum on iop3xx is limited to 1k at a time so it requires multiple
|
|
|
|
+ * descriptors
|
|
|
|
+ */
|
|
|
|
+static inline int iop_chan_zero_sum_slot_count(size_t len, int src_cnt,
|
|
|
|
+ int *slots_per_op)
|
|
|
|
+{
|
|
|
|
+ int slot_cnt = iop3xx_aau_xor_slot_count(len, src_cnt, slots_per_op);
|
|
|
|
+
|
|
|
|
+ if (len <= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT)
|
|
|
|
+ return slot_cnt;
|
|
|
|
+
|
|
|
|
+ len -= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
|
|
|
|
+ while (len > IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT) {
|
|
|
|
+ len -= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
|
|
|
|
+ slot_cnt += *slots_per_op;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ slot_cnt += *slots_per_op;
|
|
|
|
+
|
|
|
|
+ return slot_cnt;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline int iop_desc_is_pq(struct iop_adma_desc_slot *desc)
|
|
|
|
+{
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline u32 iop_desc_get_dest_addr(struct iop_adma_desc_slot *desc,
|
|
|
|
+ struct iop_adma_chan *chan)
|
|
|
|
+{
|
|
|
|
+ union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
|
|
|
|
+
|
|
|
|
+ switch (chan->device->id) {
|
|
|
|
+ case DMA0_ID:
|
|
|
|
+ case DMA1_ID:
|
|
|
|
+ return hw_desc.dma->dest_addr;
|
|
|
|
+ case AAU_ID:
|
|
|
|
+ return hw_desc.aau->dest_addr;
|
|
|
|
+ default:
|
|
|
|
+ BUG();
|
|
|
|
+ }
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+static inline u32 iop_desc_get_qdest_addr(struct iop_adma_desc_slot *desc,
|
|
|
|
+ struct iop_adma_chan *chan)
|
|
|
|
+{
|
|
|
|
+ BUG();
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|