|
@@ -263,3 +263,158 @@ irongate_init_arch(void)
|
|
struct pci_controller *hose;
|
|
struct pci_controller *hose;
|
|
int amd761 = (IRONGATE0->dev_vendor >> 16) > 0x7006; /* Albacore? */
|
|
int amd761 = (IRONGATE0->dev_vendor >> 16) > 0x7006; /* Albacore? */
|
|
|
|
|
|
|
|
+ IronECC = amd761 ? &IRONGATE0->bacsr54_eccms761 : &IRONGATE0->dramms;
|
|
|
|
+
|
|
|
|
+ irongate_pci_clr_err();
|
|
|
|
+
|
|
|
|
+ if (amd761)
|
|
|
|
+ albacore_init_arch();
|
|
|
|
+
|
|
|
|
+ irongate_setup_agp();
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * Create our single hose.
|
|
|
|
+ */
|
|
|
|
+
|
|
|
|
+ pci_isa_hose = hose = alloc_pci_controller();
|
|
|
|
+ hose->io_space = &ioport_resource;
|
|
|
|
+ hose->mem_space = &iomem_resource;
|
|
|
|
+ hose->index = 0;
|
|
|
|
+
|
|
|
|
+ /* This is for userland consumption. For some reason, the 40-bit
|
|
|
|
+ PIO bias that we use in the kernel through KSEG didn't work for
|
|
|
|
+ the page table based user mappings. So make sure we get the
|
|
|
|
+ 43-bit PIO bias. */
|
|
|
|
+ hose->sparse_mem_base = 0;
|
|
|
|
+ hose->sparse_io_base = 0;
|
|
|
|
+ hose->dense_mem_base
|
|
|
|
+ = (IRONGATE_MEM & 0xffffffffffUL) | 0x80000000000UL;
|
|
|
|
+ hose->dense_io_base
|
|
|
|
+ = (IRONGATE_IO & 0xffffffffffUL) | 0x80000000000UL;
|
|
|
|
+
|
|
|
|
+ hose->sg_isa = hose->sg_pci = NULL;
|
|
|
|
+ __direct_map_base = 0;
|
|
|
|
+ __direct_map_size = 0xffffffff;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * IO map and AGP support
|
|
|
|
+ */
|
|
|
|
+#include <linux/vmalloc.h>
|
|
|
|
+#include <linux/agp_backend.h>
|
|
|
|
+#include <linux/agpgart.h>
|
|
|
|
+#include <linux/export.h>
|
|
|
|
+#include <asm/pgalloc.h>
|
|
|
|
+
|
|
|
|
+#define GET_PAGE_DIR_OFF(addr) (addr >> 22)
|
|
|
|
+#define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr))
|
|
|
|
+
|
|
|
|
+#define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12)
|
|
|
|
+#define GET_GATT(addr) (gatt_pages[GET_PAGE_DIR_IDX(addr)])
|
|
|
|
+
|
|
|
|
+void __iomem *
|
|
|
|
+irongate_ioremap(unsigned long addr, unsigned long size)
|
|
|
|
+{
|
|
|
|
+ struct vm_struct *area;
|
|
|
|
+ unsigned long vaddr;
|
|
|
|
+ unsigned long baddr, last;
|
|
|
|
+ u32 *mmio_regs, *gatt_pages, *cur_gatt, pte;
|
|
|
|
+ unsigned long gart_bus_addr;
|
|
|
|
+
|
|
|
|
+ if (!alpha_agpgart_size)
|
|
|
|
+ return (void __iomem *)(addr + IRONGATE_MEM);
|
|
|
|
+
|
|
|
|
+ gart_bus_addr = (unsigned long)IRONGATE0->bar0 &
|
|
|
|
+ PCI_BASE_ADDRESS_MEM_MASK;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * Check for within the AGP aperture...
|
|
|
|
+ */
|
|
|
|
+ do {
|
|
|
|
+ /*
|
|
|
|
+ * Check the AGP area
|
|
|
|
+ */
|
|
|
|
+ if (addr >= gart_bus_addr && addr + size - 1 <
|
|
|
|
+ gart_bus_addr + alpha_agpgart_size)
|
|
|
|
+ break;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * Not found - assume legacy ioremap
|
|
|
|
+ */
|
|
|
|
+ return (void __iomem *)(addr + IRONGATE_MEM);
|
|
|
|
+ } while(0);
|
|
|
|
+
|
|
|
|
+ mmio_regs = (u32 *)(((unsigned long)IRONGATE0->bar1 &
|
|
|
|
+ PCI_BASE_ADDRESS_MEM_MASK) + IRONGATE_MEM);
|
|
|
|
+
|
|
|
|
+ gatt_pages = (u32 *)(phys_to_virt(mmio_regs[1])); /* FIXME */
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * Adjust the limits (mappings must be page aligned)
|
|
|
|
+ */
|
|
|
|
+ if (addr & ~PAGE_MASK) {
|
|
|
|
+ printk("AGP ioremap failed... addr not page aligned (0x%lx)\n",
|
|
|
|
+ addr);
|
|
|
|
+ return (void __iomem *)(addr + IRONGATE_MEM);
|
|
|
|
+ }
|
|
|
|
+ last = addr + size - 1;
|
|
|
|
+ size = PAGE_ALIGN(last) - addr;
|
|
|
|
+
|
|
|
|
+#if 0
|
|
|
|
+ printk("irongate_ioremap(0x%lx, 0x%lx)\n", addr, size);
|
|
|
|
+ printk("irongate_ioremap: gart_bus_addr 0x%lx\n", gart_bus_addr);
|
|
|
|
+ printk("irongate_ioremap: gart_aper_size 0x%lx\n", gart_aper_size);
|
|
|
|
+ printk("irongate_ioremap: mmio_regs %p\n", mmio_regs);
|
|
|
|
+ printk("irongate_ioremap: gatt_pages %p\n", gatt_pages);
|
|
|
|
+
|
|
|
|
+ for(baddr = addr; baddr <= last; baddr += PAGE_SIZE)
|
|
|
|
+ {
|
|
|
|
+ cur_gatt = phys_to_virt(GET_GATT(baddr) & ~1);
|
|
|
|
+ pte = cur_gatt[GET_GATT_OFF(baddr)] & ~1;
|
|
|
|
+ printk("irongate_ioremap: cur_gatt %p pte 0x%x\n",
|
|
|
|
+ cur_gatt, pte);
|
|
|
|
+ }
|
|
|
|
+#endif
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * Map it
|
|
|
|
+ */
|
|
|
|
+ area = get_vm_area(size, VM_IOREMAP);
|
|
|
|
+ if (!area) return NULL;
|
|
|
|
+
|
|
|
|
+ for(baddr = addr, vaddr = (unsigned long)area->addr;
|
|
|
|
+ baddr <= last;
|
|
|
|
+ baddr += PAGE_SIZE, vaddr += PAGE_SIZE)
|
|
|
|
+ {
|
|
|
|
+ cur_gatt = phys_to_virt(GET_GATT(baddr) & ~1);
|
|
|
|
+ pte = cur_gatt[GET_GATT_OFF(baddr)] & ~1;
|
|
|
|
+
|
|
|
|
+ if (__alpha_remap_area_pages(vaddr,
|
|
|
|
+ pte, PAGE_SIZE, 0)) {
|
|
|
|
+ printk("AGP ioremap: FAILED to map...\n");
|
|
|
|
+ vfree(area->addr);
|
|
|
|
+ return NULL;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ flush_tlb_all();
|
|
|
|
+
|
|
|
|
+ vaddr = (unsigned long)area->addr + (addr & ~PAGE_MASK);
|
|
|
|
+#if 0
|
|
|
|
+ printk("irongate_ioremap(0x%lx, 0x%lx) returning 0x%lx\n",
|
|
|
|
+ addr, size, vaddr);
|
|
|
|
+#endif
|
|
|
|
+ return (void __iomem *)vaddr;
|
|
|
|
+}
|
|
|
|
+EXPORT_SYMBOL(irongate_ioremap);
|
|
|
|
+
|
|
|
|
+void
|
|
|
|
+irongate_iounmap(volatile void __iomem *xaddr)
|
|
|
|
+{
|
|
|
|
+ unsigned long addr = (unsigned long) xaddr;
|
|
|
|
+ if (((long)addr >> 41) == -2)
|
|
|
|
+ return; /* kseg map, nothing to do */
|
|
|
|
+ if (addr)
|
|
|
|
+ return vfree((void *)(PAGE_MASK & addr));
|
|
|
|
+}
|
|
|
|
+EXPORT_SYMBOL(irongate_iounmap);
|