|
@@ -481,3 +481,143 @@ static inline void writes##bwlq(volatile void __iomem *mem, \
|
|
|
const volatile type *__addr = addr; \
|
|
|
\
|
|
|
while (count--) { \
|
|
|
+ __mem_write##bwlq(*__addr, mem); \
|
|
|
+ __addr++; \
|
|
|
+ } \
|
|
|
+} \
|
|
|
+ \
|
|
|
+static inline void reads##bwlq(volatile void __iomem *mem, void *addr, \
|
|
|
+ unsigned int count) \
|
|
|
+{ \
|
|
|
+ volatile type *__addr = addr; \
|
|
|
+ \
|
|
|
+ while (count--) { \
|
|
|
+ *__addr = __mem_read##bwlq(mem); \
|
|
|
+ __addr++; \
|
|
|
+ } \
|
|
|
+}
|
|
|
+
|
|
|
+#define __BUILD_IOPORT_STRING(bwlq, type) \
|
|
|
+ \
|
|
|
+static inline void outs##bwlq(unsigned long port, const void *addr, \
|
|
|
+ unsigned int count) \
|
|
|
+{ \
|
|
|
+ const volatile type *__addr = addr; \
|
|
|
+ \
|
|
|
+ while (count--) { \
|
|
|
+ __mem_out##bwlq(*__addr, port); \
|
|
|
+ __addr++; \
|
|
|
+ } \
|
|
|
+} \
|
|
|
+ \
|
|
|
+static inline void ins##bwlq(unsigned long port, void *addr, \
|
|
|
+ unsigned int count) \
|
|
|
+{ \
|
|
|
+ volatile type *__addr = addr; \
|
|
|
+ \
|
|
|
+ while (count--) { \
|
|
|
+ *__addr = __mem_in##bwlq(port); \
|
|
|
+ __addr++; \
|
|
|
+ } \
|
|
|
+}
|
|
|
+
|
|
|
+#define BUILDSTRING(bwlq, type) \
|
|
|
+ \
|
|
|
+__BUILD_MEMORY_STRING(bwlq, type) \
|
|
|
+__BUILD_IOPORT_STRING(bwlq, type)
|
|
|
+
|
|
|
+BUILDSTRING(b, u8)
|
|
|
+BUILDSTRING(w, u16)
|
|
|
+BUILDSTRING(l, u32)
|
|
|
+#ifdef CONFIG_64BIT
|
|
|
+BUILDSTRING(q, u64)
|
|
|
+#endif
|
|
|
+
|
|
|
+
|
|
|
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
|
|
|
+#define mmiowb() wmb()
|
|
|
+#else
|
|
|
+/* Depends on MIPS II instruction set */
|
|
|
+#define mmiowb() asm volatile ("sync" ::: "memory")
|
|
|
+#endif
|
|
|
+
|
|
|
+static inline void memset_io(volatile void __iomem *addr, unsigned char val, int count)
|
|
|
+{
|
|
|
+ memset((void __force *) addr, val, count);
|
|
|
+}
|
|
|
+static inline void memcpy_fromio(void *dst, const volatile void __iomem *src, int count)
|
|
|
+{
|
|
|
+ memcpy(dst, (void __force *) src, count);
|
|
|
+}
|
|
|
+static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int count)
|
|
|
+{
|
|
|
+ memcpy((void __force *) dst, src, count);
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * The caches on some architectures aren't dma-coherent and have need to
|
|
|
+ * handle this in software. There are three types of operations that
|
|
|
+ * can be applied to dma buffers.
|
|
|
+ *
|
|
|
+ * - dma_cache_wback_inv(start, size) makes caches and coherent by
|
|
|
+ * writing the content of the caches back to memory, if necessary.
|
|
|
+ * The function also invalidates the affected part of the caches as
|
|
|
+ * necessary before DMA transfers from outside to memory.
|
|
|
+ * - dma_cache_wback(start, size) makes caches and coherent by
|
|
|
+ * writing the content of the caches back to memory, if necessary.
|
|
|
+ * The function also invalidates the affected part of the caches as
|
|
|
+ * necessary before DMA transfers from outside to memory.
|
|
|
+ * - dma_cache_inv(start, size) invalidates the affected parts of the
|
|
|
+ * caches. Dirty lines of the caches may be written back or simply
|
|
|
+ * be discarded. This operation is necessary before dma operations
|
|
|
+ * to the memory.
|
|
|
+ *
|
|
|
+ * This API used to be exported; it now is for arch code internal use only.
|
|
|
+ */
|
|
|
+#ifdef CONFIG_DMA_NONCOHERENT
|
|
|
+
|
|
|
+extern void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size);
|
|
|
+extern void (*_dma_cache_wback)(unsigned long start, unsigned long size);
|
|
|
+extern void (*_dma_cache_inv)(unsigned long start, unsigned long size);
|
|
|
+
|
|
|
+#define dma_cache_wback_inv(start, size) _dma_cache_wback_inv(start, size)
|
|
|
+#define dma_cache_wback(start, size) _dma_cache_wback(start, size)
|
|
|
+#define dma_cache_inv(start, size) _dma_cache_inv(start, size)
|
|
|
+
|
|
|
+#else /* Sane hardware */
|
|
|
+
|
|
|
+#define dma_cache_wback_inv(start,size) \
|
|
|
+ do { (void) (start); (void) (size); } while (0)
|
|
|
+#define dma_cache_wback(start,size) \
|
|
|
+ do { (void) (start); (void) (size); } while (0)
|
|
|
+#define dma_cache_inv(start,size) \
|
|
|
+ do { (void) (start); (void) (size); } while (0)
|
|
|
+
|
|
|
+#endif /* CONFIG_DMA_NONCOHERENT */
|
|
|
+
|
|
|
+/*
|
|
|
+ * Read a 32-bit register that requires a 64-bit read cycle on the bus.
|
|
|
+ * Avoid interrupt mucking, just adjust the address for 4-byte access.
|
|
|
+ * Assume the addresses are 8-byte aligned.
|
|
|
+ */
|
|
|
+#ifdef __MIPSEB__
|
|
|
+#define __CSR_32_ADJUST 4
|
|
|
+#else
|
|
|
+#define __CSR_32_ADJUST 0
|
|
|
+#endif
|
|
|
+
|
|
|
+#define csr_out32(v, a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST) = (v))
|
|
|
+#define csr_in32(a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST))
|
|
|
+
|
|
|
+/*
|
|
|
+ * Convert a physical pointer to a virtual kernel pointer for /dev/mem
|
|
|
+ * access
|
|
|
+ */
|
|
|
+#define xlate_dev_mem_ptr(p) __va(p)
|
|
|
+
|
|
|
+/*
|
|
|
+ * Convert a virtual cached pointer to an uncached pointer
|
|
|
+ */
|
|
|
+#define xlate_dev_kmem_ptr(p) p
|
|
|
+
|
|
|
+#endif /* _ASM_IO_H */
|