|
@@ -205,3 +205,174 @@ static inline void __indirect_readsl(const volatile void __iomem *bus_addr,
|
|
|
/*
|
|
|
* We can use the built-in functions b/c they end up calling writeb/readb
|
|
|
*/
|
|
|
+#define memset_io(c,v,l) _memset_io((c),(v),(l))
|
|
|
+#define memcpy_fromio(a,c,l) _memcpy_fromio((a),(c),(l))
|
|
|
+#define memcpy_toio(c,a,l) _memcpy_toio((c),(a),(l))
|
|
|
+
|
|
|
+#endif /* CONFIG_IXP4XX_INDIRECT_PCI */
|
|
|
+
|
|
|
+#ifndef CONFIG_PCI
|
|
|
+
|
|
|
+#define __io(v) __typesafe_io(v)
|
|
|
+
|
|
|
+#else
|
|
|
+
|
|
|
+/*
|
|
|
+ * IXP4xx does not have a transparent cpu -> PCI I/O translation
|
|
|
+ * window. Instead, it has a set of registers that must be tweaked
|
|
|
+ * with the proper byte lanes, command types, and address for the
|
|
|
+ * transaction. This means that we need to override the default
|
|
|
+ * I/O functions.
|
|
|
+ */
|
|
|
+
|
|
|
+static inline void outb(u8 value, u32 addr)
|
|
|
+{
|
|
|
+ u32 n, byte_enables, data;
|
|
|
+ n = addr % 4;
|
|
|
+ byte_enables = (0xf & ~BIT(n)) << IXP4XX_PCI_NP_CBE_BESL;
|
|
|
+ data = value << (8*n);
|
|
|
+ ixp4xx_pci_write(addr, byte_enables | NP_CMD_IOWRITE, data);
|
|
|
+}
|
|
|
+
|
|
|
+static inline void outsb(u32 io_addr, const u8 *vaddr, u32 count)
|
|
|
+{
|
|
|
+ while (count--)
|
|
|
+ outb(*vaddr++, io_addr);
|
|
|
+}
|
|
|
+
|
|
|
+static inline void outw(u16 value, u32 addr)
|
|
|
+{
|
|
|
+ u32 n, byte_enables, data;
|
|
|
+ n = addr % 4;
|
|
|
+ byte_enables = (0xf & ~(BIT(n) | BIT(n+1))) << IXP4XX_PCI_NP_CBE_BESL;
|
|
|
+ data = value << (8*n);
|
|
|
+ ixp4xx_pci_write(addr, byte_enables | NP_CMD_IOWRITE, data);
|
|
|
+}
|
|
|
+
|
|
|
+static inline void outsw(u32 io_addr, const u16 *vaddr, u32 count)
|
|
|
+{
|
|
|
+ while (count--)
|
|
|
+ outw(cpu_to_le16(*vaddr++), io_addr);
|
|
|
+}
|
|
|
+
|
|
|
+static inline void outl(u32 value, u32 addr)
|
|
|
+{
|
|
|
+ ixp4xx_pci_write(addr, NP_CMD_IOWRITE, value);
|
|
|
+}
|
|
|
+
|
|
|
+static inline void outsl(u32 io_addr, const u32 *vaddr, u32 count)
|
|
|
+{
|
|
|
+ while (count--)
|
|
|
+ outl(cpu_to_le32(*vaddr++), io_addr);
|
|
|
+}
|
|
|
+
|
|
|
+static inline u8 inb(u32 addr)
|
|
|
+{
|
|
|
+ u32 n, byte_enables, data;
|
|
|
+ n = addr % 4;
|
|
|
+ byte_enables = (0xf & ~BIT(n)) << IXP4XX_PCI_NP_CBE_BESL;
|
|
|
+ if (ixp4xx_pci_read(addr, byte_enables | NP_CMD_IOREAD, &data))
|
|
|
+ return 0xff;
|
|
|
+
|
|
|
+ return data >> (8*n);
|
|
|
+}
|
|
|
+
|
|
|
+static inline void insb(u32 io_addr, u8 *vaddr, u32 count)
|
|
|
+{
|
|
|
+ while (count--)
|
|
|
+ *vaddr++ = inb(io_addr);
|
|
|
+}
|
|
|
+
|
|
|
+static inline u16 inw(u32 addr)
|
|
|
+{
|
|
|
+ u32 n, byte_enables, data;
|
|
|
+ n = addr % 4;
|
|
|
+ byte_enables = (0xf & ~(BIT(n) | BIT(n+1))) << IXP4XX_PCI_NP_CBE_BESL;
|
|
|
+ if (ixp4xx_pci_read(addr, byte_enables | NP_CMD_IOREAD, &data))
|
|
|
+ return 0xffff;
|
|
|
+
|
|
|
+ return data>>(8*n);
|
|
|
+}
|
|
|
+
|
|
|
+static inline void insw(u32 io_addr, u16 *vaddr, u32 count)
|
|
|
+{
|
|
|
+ while (count--)
|
|
|
+ *vaddr++ = le16_to_cpu(inw(io_addr));
|
|
|
+}
|
|
|
+
|
|
|
+static inline u32 inl(u32 addr)
|
|
|
+{
|
|
|
+ u32 data;
|
|
|
+ if (ixp4xx_pci_read(addr, NP_CMD_IOREAD, &data))
|
|
|
+ return 0xffffffff;
|
|
|
+
|
|
|
+ return data;
|
|
|
+}
|
|
|
+
|
|
|
+static inline void insl(u32 io_addr, u32 *vaddr, u32 count)
|
|
|
+{
|
|
|
+ while (count--)
|
|
|
+ *vaddr++ = le32_to_cpu(inl(io_addr));
|
|
|
+}
|
|
|
+
|
|
|
+#define PIO_OFFSET 0x10000UL
|
|
|
+#define PIO_MASK 0x0ffffUL
|
|
|
+
|
|
|
+#define __is_io_address(p) (((unsigned long)p >= PIO_OFFSET) && \
|
|
|
+ ((unsigned long)p <= (PIO_MASK + PIO_OFFSET)))
|
|
|
+
|
|
|
+#define ioread8(p) ioread8(p)
|
|
|
+static inline unsigned int ioread8(const void __iomem *addr)
|
|
|
+{
|
|
|
+ unsigned long port = (unsigned long __force)addr;
|
|
|
+ if (__is_io_address(port))
|
|
|
+ return (unsigned int)inb(port & PIO_MASK);
|
|
|
+ else
|
|
|
+#ifndef CONFIG_IXP4XX_INDIRECT_PCI
|
|
|
+ return (unsigned int)__raw_readb(addr);
|
|
|
+#else
|
|
|
+ return (unsigned int)__indirect_readb(addr);
|
|
|
+#endif
|
|
|
+}
|
|
|
+
|
|
|
+#define ioread8_rep(p, v, c) ioread8_rep(p, v, c)
|
|
|
+static inline void ioread8_rep(const void __iomem *addr, void *vaddr, u32 count)
|
|
|
+{
|
|
|
+ unsigned long port = (unsigned long __force)addr;
|
|
|
+ if (__is_io_address(port))
|
|
|
+ insb(port & PIO_MASK, vaddr, count);
|
|
|
+ else
|
|
|
+#ifndef CONFIG_IXP4XX_INDIRECT_PCI
|
|
|
+ __raw_readsb(addr, vaddr, count);
|
|
|
+#else
|
|
|
+ __indirect_readsb(addr, vaddr, count);
|
|
|
+#endif
|
|
|
+}
|
|
|
+
|
|
|
+#define ioread16(p) ioread16(p)
|
|
|
+static inline unsigned int ioread16(const void __iomem *addr)
|
|
|
+{
|
|
|
+ unsigned long port = (unsigned long __force)addr;
|
|
|
+ if (__is_io_address(port))
|
|
|
+ return (unsigned int)inw(port & PIO_MASK);
|
|
|
+ else
|
|
|
+#ifndef CONFIG_IXP4XX_INDIRECT_PCI
|
|
|
+ return le16_to_cpu((__force __le16)__raw_readw(addr));
|
|
|
+#else
|
|
|
+ return (unsigned int)__indirect_readw(addr);
|
|
|
+#endif
|
|
|
+}
|
|
|
+
|
|
|
+#define ioread16_rep(p, v, c) ioread16_rep(p, v, c)
|
|
|
+static inline void ioread16_rep(const void __iomem *addr, void *vaddr,
|
|
|
+ u32 count)
|
|
|
+{
|
|
|
+ unsigned long port = (unsigned long __force)addr;
|
|
|
+ if (__is_io_address(port))
|
|
|
+ insw(port & PIO_MASK, vaddr, count);
|
|
|
+ else
|
|
|
+#ifndef CONFIG_IXP4XX_INDIRECT_PCI
|
|
|
+ __raw_readsw(addr, vaddr, count);
|
|
|
+#else
|
|
|
+ __indirect_readsw(addr, vaddr, count);
|
|
|
+#endif
|