|
@@ -282,3 +282,141 @@ struct el_CIA_sysdata_mcheck {
|
|
|
unsigned long err_mask;
|
|
|
unsigned long cia_syn;
|
|
|
unsigned long mem_err0;
|
|
|
+ unsigned long mem_err1;
|
|
|
+ unsigned long pci_err0;
|
|
|
+ unsigned long pci_err1;
|
|
|
+ unsigned long pci_err2;
|
|
|
+};
|
|
|
+
|
|
|
+
|
|
|
+#ifdef __KERNEL__
|
|
|
+
|
|
|
+#ifndef __EXTERN_INLINE
|
|
|
+/* Do not touch, this should *NOT* be static inline */
|
|
|
+#define __EXTERN_INLINE extern inline
|
|
|
+#define __IO_EXTERN_INLINE
|
|
|
+#endif
|
|
|
+
|
|
|
+/*
|
|
|
+ * I/O functions:
|
|
|
+ *
|
|
|
+ * CIA (the 2117x PCI/memory support chipset for the EV5 (21164)
|
|
|
+ * series of processors uses a sparse address mapping scheme to
|
|
|
+ * get at PCI memory and I/O.
|
|
|
+ */
|
|
|
+
|
|
|
+/*
|
|
|
+ * Memory functions. 64-bit and 32-bit accesses are done through
|
|
|
+ * dense memory space, everything else through sparse space.
|
|
|
+ *
|
|
|
+ * For reading and writing 8 and 16 bit quantities we need to
|
|
|
+ * go through one of the three sparse address mapping regions
|
|
|
+ * and use the HAE_MEM CSR to provide some bits of the address.
|
|
|
+ * The following few routines use only sparse address region 1
|
|
|
+ * which gives 1Gbyte of accessible space which relates exactly
|
|
|
+ * to the amount of PCI memory mapping *into* system address space.
|
|
|
+ * See p 6-17 of the specification but it looks something like this:
|
|
|
+ *
|
|
|
+ * 21164 Address:
|
|
|
+ *
|
|
|
+ * 3 2 1
|
|
|
+ * 9876543210987654321098765432109876543210
|
|
|
+ * 1ZZZZ0.PCI.QW.Address............BBLL
|
|
|
+ *
|
|
|
+ * ZZ = SBZ
|
|
|
+ * BB = Byte offset
|
|
|
+ * LL = Transfer length
|
|
|
+ *
|
|
|
+ * PCI Address:
|
|
|
+ *
|
|
|
+ * 3 2 1
|
|
|
+ * 10987654321098765432109876543210
|
|
|
+ * HHH....PCI.QW.Address........ 00
|
|
|
+ *
|
|
|
+ * HHH = 31:29 HAE_MEM CSR
|
|
|
+ *
|
|
|
+ */
|
|
|
+
|
|
|
+#define vip volatile int __force *
|
|
|
+#define vuip volatile unsigned int __force *
|
|
|
+#define vulp volatile unsigned long __force *
|
|
|
+
|
|
|
+__EXTERN_INLINE unsigned int cia_ioread8(void __iomem *xaddr)
|
|
|
+{
|
|
|
+ unsigned long addr = (unsigned long) xaddr;
|
|
|
+ unsigned long result, base_and_type;
|
|
|
+
|
|
|
+ if (addr >= CIA_DENSE_MEM)
|
|
|
+ base_and_type = CIA_SPARSE_MEM + 0x00;
|
|
|
+ else
|
|
|
+ base_and_type = CIA_IO + 0x00;
|
|
|
+
|
|
|
+ /* We can use CIA_MEM_R1_MASK for io ports too, since it is large
|
|
|
+ enough to cover all io ports, and smaller than CIA_IO. */
|
|
|
+ addr &= CIA_MEM_R1_MASK;
|
|
|
+ result = *(vip) ((addr << 5) + base_and_type);
|
|
|
+ return __kernel_extbl(result, addr & 3);
|
|
|
+}
|
|
|
+
|
|
|
+__EXTERN_INLINE void cia_iowrite8(u8 b, void __iomem *xaddr)
|
|
|
+{
|
|
|
+ unsigned long addr = (unsigned long) xaddr;
|
|
|
+ unsigned long w, base_and_type;
|
|
|
+
|
|
|
+ if (addr >= CIA_DENSE_MEM)
|
|
|
+ base_and_type = CIA_SPARSE_MEM + 0x00;
|
|
|
+ else
|
|
|
+ base_and_type = CIA_IO + 0x00;
|
|
|
+
|
|
|
+ addr &= CIA_MEM_R1_MASK;
|
|
|
+ w = __kernel_insbl(b, addr & 3);
|
|
|
+ *(vuip) ((addr << 5) + base_and_type) = w;
|
|
|
+}
|
|
|
+
|
|
|
+__EXTERN_INLINE unsigned int cia_ioread16(void __iomem *xaddr)
|
|
|
+{
|
|
|
+ unsigned long addr = (unsigned long) xaddr;
|
|
|
+ unsigned long result, base_and_type;
|
|
|
+
|
|
|
+ if (addr >= CIA_DENSE_MEM)
|
|
|
+ base_and_type = CIA_SPARSE_MEM + 0x08;
|
|
|
+ else
|
|
|
+ base_and_type = CIA_IO + 0x08;
|
|
|
+
|
|
|
+ addr &= CIA_MEM_R1_MASK;
|
|
|
+ result = *(vip) ((addr << 5) + base_and_type);
|
|
|
+ return __kernel_extwl(result, addr & 3);
|
|
|
+}
|
|
|
+
|
|
|
+__EXTERN_INLINE void cia_iowrite16(u16 b, void __iomem *xaddr)
|
|
|
+{
|
|
|
+ unsigned long addr = (unsigned long) xaddr;
|
|
|
+ unsigned long w, base_and_type;
|
|
|
+
|
|
|
+ if (addr >= CIA_DENSE_MEM)
|
|
|
+ base_and_type = CIA_SPARSE_MEM + 0x08;
|
|
|
+ else
|
|
|
+ base_and_type = CIA_IO + 0x08;
|
|
|
+
|
|
|
+ addr &= CIA_MEM_R1_MASK;
|
|
|
+ w = __kernel_inswl(b, addr & 3);
|
|
|
+ *(vuip) ((addr << 5) + base_and_type) = w;
|
|
|
+}
|
|
|
+
|
|
|
+__EXTERN_INLINE unsigned int cia_ioread32(void __iomem *xaddr)
|
|
|
+{
|
|
|
+ unsigned long addr = (unsigned long) xaddr;
|
|
|
+ if (addr < CIA_DENSE_MEM)
|
|
|
+ addr = ((addr - CIA_IO) << 5) + CIA_IO + 0x18;
|
|
|
+ return *(vuip)addr;
|
|
|
+}
|
|
|
+
|
|
|
+__EXTERN_INLINE void cia_iowrite32(u32 b, void __iomem *xaddr)
|
|
|
+{
|
|
|
+ unsigned long addr = (unsigned long) xaddr;
|
|
|
+ if (addr < CIA_DENSE_MEM)
|
|
|
+ addr = ((addr - CIA_IO) << 5) + CIA_IO + 0x18;
|
|
|
+ *(vuip)addr = b;
|
|
|
+}
|
|
|
+
|
|
|
+__EXTERN_INLINE void __iomem *cia_ioportmap(unsigned long addr)
|