|
@@ -256,3 +256,137 @@ static inline void __iomem *ioremap(unsigned long physaddr, unsigned long size)
|
|
|
{
|
|
|
return __ioremap(physaddr, size, IOMAP_NOCACHE_SER);
|
|
|
}
|
|
|
+
|
|
|
+static inline void __iomem *ioremap_nocache(unsigned long physaddr, unsigned long size)
|
|
|
+{
|
|
|
+ return __ioremap(physaddr, size, IOMAP_NOCACHE_SER);
|
|
|
+}
|
|
|
+
|
|
|
+static inline void __iomem *ioremap_writethrough(unsigned long physaddr, unsigned long size)
|
|
|
+{
|
|
|
+ return __ioremap(physaddr, size, IOMAP_WRITETHROUGH);
|
|
|
+}
|
|
|
+
|
|
|
+static inline void __iomem *ioremap_fullcache(unsigned long physaddr, unsigned long size)
|
|
|
+{
|
|
|
+ return __ioremap(physaddr, size, IOMAP_FULL_CACHING);
|
|
|
+}
|
|
|
+
|
|
|
+#define ioremap_wc ioremap_nocache
|
|
|
+
|
|
|
+extern void iounmap(void volatile __iomem *addr);
|
|
|
+
|
|
|
+static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
|
|
|
+{
|
|
|
+ return (void __iomem *) port;
|
|
|
+}
|
|
|
+
|
|
|
+static inline void ioport_unmap(void __iomem *p)
|
|
|
+{
|
|
|
+}
|
|
|
+
|
|
|
+static inline void flush_write_buffers(void)
|
|
|
+{
|
|
|
+ __asm__ __volatile__ ("membar" : : :"memory");
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * do appropriate I/O accesses for token type
|
|
|
+ */
|
|
|
+static inline unsigned int ioread8(void __iomem *p)
|
|
|
+{
|
|
|
+ return __builtin_read8(p);
|
|
|
+}
|
|
|
+
|
|
|
+static inline unsigned int ioread16(void __iomem *p)
|
|
|
+{
|
|
|
+ uint16_t ret = __builtin_read16(p);
|
|
|
+ if (__is_PCI_addr(p))
|
|
|
+ ret = _swapw(ret);
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+static inline unsigned int ioread32(void __iomem *p)
|
|
|
+{
|
|
|
+ uint32_t ret = __builtin_read32(p);
|
|
|
+ if (__is_PCI_addr(p))
|
|
|
+ ret = _swapl(ret);
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+static inline void iowrite8(u8 val, void __iomem *p)
|
|
|
+{
|
|
|
+ __builtin_write8(p, val);
|
|
|
+ if (__is_PCI_MEM(p))
|
|
|
+ __flush_PCI_writes();
|
|
|
+}
|
|
|
+
|
|
|
+static inline void iowrite16(u16 val, void __iomem *p)
|
|
|
+{
|
|
|
+ if (__is_PCI_addr(p))
|
|
|
+ val = _swapw(val);
|
|
|
+ __builtin_write16(p, val);
|
|
|
+ if (__is_PCI_MEM(p))
|
|
|
+ __flush_PCI_writes();
|
|
|
+}
|
|
|
+
|
|
|
+static inline void iowrite32(u32 val, void __iomem *p)
|
|
|
+{
|
|
|
+ if (__is_PCI_addr(p))
|
|
|
+ val = _swapl(val);
|
|
|
+ __builtin_write32(p, val);
|
|
|
+ if (__is_PCI_MEM(p))
|
|
|
+ __flush_PCI_writes();
|
|
|
+}
|
|
|
+
|
|
|
+static inline void ioread8_rep(void __iomem *p, void *dst, unsigned long count)
|
|
|
+{
|
|
|
+ io_insb((unsigned long) p, dst, count);
|
|
|
+}
|
|
|
+
|
|
|
+static inline void ioread16_rep(void __iomem *p, void *dst, unsigned long count)
|
|
|
+{
|
|
|
+ io_insw((unsigned long) p, dst, count);
|
|
|
+}
|
|
|
+
|
|
|
+static inline void ioread32_rep(void __iomem *p, void *dst, unsigned long count)
|
|
|
+{
|
|
|
+ __insl_ns((unsigned long) p, dst, count);
|
|
|
+}
|
|
|
+
|
|
|
+static inline void iowrite8_rep(void __iomem *p, const void *src, unsigned long count)
|
|
|
+{
|
|
|
+ io_outsb((unsigned long) p, src, count);
|
|
|
+}
|
|
|
+
|
|
|
+static inline void iowrite16_rep(void __iomem *p, const void *src, unsigned long count)
|
|
|
+{
|
|
|
+ io_outsw((unsigned long) p, src, count);
|
|
|
+}
|
|
|
+
|
|
|
+static inline void iowrite32_rep(void __iomem *p, const void *src, unsigned long count)
|
|
|
+{
|
|
|
+ __outsl_ns((unsigned long) p, src, count);
|
|
|
+}
|
|
|
+
|
|
|
+/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
|
|
|
+struct pci_dev;
|
|
|
+static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p)
|
|
|
+{
|
|
|
+}
|
|
|
+
|
|
|
+
|
|
|
+/*
|
|
|
+ * Convert a physical pointer to a virtual kernel pointer for /dev/mem
|
|
|
+ * access
|
|
|
+ */
|
|
|
+#define xlate_dev_mem_ptr(p) __va(p)
|
|
|
+
|
|
|
+/*
|
|
|
+ * Convert a virtual cached pointer to an uncached pointer
|
|
|
+ */
|
|
|
+#define xlate_dev_kmem_ptr(p) p
|
|
|
+
|
|
|
+#endif /* __KERNEL__ */
|
|
|
+
|
|
|
+#endif /* _ASM_IO_H */
|