123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828 |
- /*
- * linux/arch/alpha/kernel/core_cia.c
- *
- * Written by David A Rusling (david.rusling@reo.mts.dec.com).
- * December 1995.
- *
- * Copyright (C) 1995 David A Rusling
- * Copyright (C) 1997, 1998 Jay Estabrook
- * Copyright (C) 1998, 1999, 2000 Richard Henderson
- *
- * Code common to all CIA core logic chips.
- */
- #define __EXTERN_INLINE inline
- #include <asm/io.h>
- #include <asm/core_cia.h>
- #undef __EXTERN_INLINE
- #include <linux/types.h>
- #include <linux/pci.h>
- #include <linux/sched.h>
- #include <linux/init.h>
- #include <linux/bootmem.h>
- #include <asm/ptrace.h>
- #include <asm/mce.h>
- #include "proto.h"
- #include "pci_impl.h"
- /*
- * NOTE: Herein lie back-to-back mb instructions. They are magic.
- * One plausible explanation is that the i/o controller does not properly
- * handle the system transaction. Another involves timing. Ho hum.
- */
- #define DEBUG_CONFIG 0
- #if DEBUG_CONFIG
- # define DBGC(args) printk args
- #else
- # define DBGC(args)
- #endif
- #define vip volatile int *
- /*
- * Given a bus, device, and function number, compute resulting
- * configuration space address. It is therefore not safe to have
- * concurrent invocations to configuration space access routines, but
- * there really shouldn't be any need for this.
- *
- * Type 0:
- *
- * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1
- * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
- * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- * | | |D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|0|
- * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- *
- * 31:11 Device select bit.
- * 10:8 Function number
- * 7:2 Register number
- *
- * Type 1:
- *
- * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1
- * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
- * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- * | | | | | | | | | | |B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|1|
- * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- *
- * 31:24 reserved
- * 23:16 bus number (8 bits = 128 possible buses)
- * 15:11 Device number (5 bits)
- * 10:8 function number
- * 7:2 register number
- *
- * Notes:
- * The function number selects which function of a multi-function device
- * (e.g., SCSI and Ethernet).
- *
- * The register selects a DWORD (32 bit) register offset. Hence it
- * doesn't get shifted by 2 bits as we want to "drop" the bottom two
- * bits.
- */
- static int
- mk_conf_addr(struct pci_bus *bus_dev, unsigned int device_fn, int where,
- unsigned long *pci_addr, unsigned char *type1)
- {
- u8 bus = bus_dev->number;
- *type1 = (bus != 0);
- *pci_addr = (bus << 16) | (device_fn << 8) | where;
- DBGC(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x,"
- " returning address 0x%p\n"
- bus, device_fn, where, *pci_addr));
- return 0;
- }
- static unsigned int
- conf_read(unsigned long addr, unsigned char type1)
- {
- unsigned long flags;
- int stat0, value;
- int cia_cfg = 0;
- DBGC(("conf_read(addr=0x%lx, type1=%d) ", addr, type1));
- local_irq_save(flags);
- /* Reset status register to avoid losing errors. */
- stat0 = *(vip)CIA_IOC_CIA_ERR;
- *(vip)CIA_IOC_CIA_ERR = stat0;
- mb();
- *(vip)CIA_IOC_CIA_ERR; /* re-read to force write */
- /* If Type1 access, must set CIA CFG. */
- if (type1) {
- cia_cfg = *(vip)CIA_IOC_CFG;
- *(vip)CIA_IOC_CFG = (cia_cfg & ~3) | 1;
- mb();
- *(vip)CIA_IOC_CFG;
- }
- mb();
- draina();
- mcheck_expected(0) = 1;
- mcheck_taken(0) = 0;
- mb();
- /* Access configuration space. */
- value = *(vip)addr;
- mb();
- mb(); /* magic */
- if (mcheck_taken(0)) {
- mcheck_taken(0) = 0;
- value = 0xffffffff;
- mb();
- }
- mcheck_expected(0) = 0;
- mb();
- /* If Type1 access, must reset IOC CFG so normal IO space ops work. */
- if (type1) {
- *(vip)CIA_IOC_CFG = cia_cfg;
- mb();
- *(vip)CIA_IOC_CFG;
- }
- local_irq_restore(flags);
- DBGC(("done\n"));
- return value;
- }
- static void
- conf_write(unsigned long addr, unsigned int value, unsigned char type1)
- {
- unsigned long flags;
- int stat0, cia_cfg = 0;
- DBGC(("conf_write(addr=0x%lx, type1=%d) ", addr, type1));
- local_irq_save(flags);
- /* Reset status register to avoid losing errors. */
- stat0 = *(vip)CIA_IOC_CIA_ERR;
- *(vip)CIA_IOC_CIA_ERR = stat0;
- mb();
- *(vip)CIA_IOC_CIA_ERR; /* re-read to force write */
- /* If Type1 access, must set CIA CFG. */
- if (type1) {
- cia_cfg = *(vip)CIA_IOC_CFG;
- *(vip)CIA_IOC_CFG = (cia_cfg & ~3) | 1;
- mb();
- *(vip)CIA_IOC_CFG;
- }
- mb();
- draina();
- mcheck_expected(0) = 1;
- mcheck_taken(0) = 0;
- mb();
- /* Access configuration space. */
- *(vip)addr = value;
- mb();
- *(vip)addr; /* read back to force the write */
- mcheck_expected(0) = 0;
- mb();
- /* If Type1 access, must reset IOC CFG so normal IO space ops work. */
- if (type1) {
- *(vip)CIA_IOC_CFG = cia_cfg;
- mb();
- *(vip)CIA_IOC_CFG;
- }
- local_irq_restore(flags);
- DBGC(("done\n"));
- }
- static int
- cia_read_config(struct pci_bus *bus, unsigned int devfn, int where, int size,
- u32 *value)
- {
- unsigned long addr, pci_addr;
- long mask;
- unsigned char type1;
- int shift;
- if (mk_conf_addr(bus, devfn, where, &pci_addr, &type1))
- return PCIBIOS_DEVICE_NOT_FOUND;
- mask = (size - 1) * 8;
- shift = (where & 3) * 8;
- addr = (pci_addr << 5) + mask + CIA_CONF;
- *value = conf_read(addr, type1) >> (shift);
- return PCIBIOS_SUCCESSFUL;
- }
- static int
- cia_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size,
- u32 value)
- {
- unsigned long addr, pci_addr;
- long mask;
- unsigned char type1;
- if (mk_conf_addr(bus, devfn, where, &pci_addr, &type1))
- return PCIBIOS_DEVICE_NOT_FOUND;
- mask = (size - 1) * 8;
- addr = (pci_addr << 5) + mask + CIA_CONF;
- conf_write(addr, value << ((where & 3) * 8), type1);
- return PCIBIOS_SUCCESSFUL;
- }
- struct pci_ops cia_pci_ops =
- {
- .read = cia_read_config,
- .write = cia_write_config,
- };
- /*
- * CIA Pass 1 and PYXIS Pass 1 and 2 have a broken scatter-gather tlb.
- * It cannot be invalidated. Rather than hard code the pass numbers,
- * actually try the tbia to see if it works.
- */
- void
- cia_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end)
- {
- wmb();
- *(vip)CIA_IOC_PCI_TBIA = 3; /* Flush all locked and unlocked. */
- mb();
- *(vip)CIA_IOC_PCI_TBIA;
- }
- /*
- * On PYXIS, even if the tbia works, we cannot use it. It effectively locks
- * the chip (as well as direct write to the tag registers) if there is a
- * SG DMA operation in progress. This is true at least for PYXIS rev. 1,
- * so always use the method below.
- */
- /*
- * This is the method NT and NetBSD use.
- *
- * Allocate mappings, and put the chip into DMA loopback mode to read a
- * garbage page. This works by causing TLB misses, causing old entries to
- * be purged to make room for the new entries coming in for the garbage page.
- */
- #define CIA_BROKEN_TBIA_BASE 0x30000000
- #define CIA_BROKEN_TBIA_SIZE 1024
- /* Always called with interrupts disabled */
- void
- cia_pci_tbi_try2(struct pci_controller *hose,
- dma_addr_t start, dma_addr_t end)
- {
- void __iomem *bus_addr;
- int ctrl;
- /* Put the chip into PCI loopback mode. */
- mb();
- ctrl = *(vip)CIA_IOC_CIA_CTRL;
- *(vip)CIA_IOC_CIA_CTRL = ctrl | CIA_CTRL_PCI_LOOP_EN;
- mb();
- *(vip)CIA_IOC_CIA_CTRL;
- mb();
- /* Read from PCI dense memory space at TBI_ADDR, skipping 32k on
- each read. This forces SG TLB misses. NetBSD claims that the
- TLB entries are not quite LRU, meaning that we need to read more
- times than there are actual tags. The 2117x docs claim strict
- round-robin. Oh well, we've come this far... */
- /* Even better - as seen on the PYXIS rev 1 the TLB tags 0-3 can
- be filled by the TLB misses *only once* after being invalidated
- (by tbia or direct write). Next misses won't update them even
- though the lock bits are cleared. Tags 4-7 are "quite LRU" though,
- so use them and read at window 3 base exactly 4 times. Reading
- more sometimes makes the chip crazy. -ink */
- bus_addr = cia_ioremap(CIA_BROKEN_TBIA_BASE, 32768 * 4);
- cia_readl(bus_addr + 0x00000);
- cia_readl(bus_addr + 0x08000);
- cia_readl(bus_addr + 0x10000);
- cia_readl(bus_addr + 0x18000);
- cia_iounmap(bus_addr);
- /* Restore normal PCI operation. */
- mb();
- *(vip)CIA_IOC_CIA_CTRL = ctrl;
- mb();
- *(vip)CIA_IOC_CIA_CTRL;
- mb();
- }
- static inline void
- cia_prepare_tbia_workaround(int window)
- {
- unsigned long *ppte, pte;
- long i;
- /* Use minimal 1K map. */
- ppte = __alloc_bootmem(CIA_BROKEN_TBIA_SIZE, 32768, 0);
- pte = (virt_to_phys(ppte) >> (PAGE_SHIFT - 1)) | 1;
- for (i = 0; i < CIA_BROKEN_TBIA_SIZE / sizeof(unsigned long); ++i)
- ppte[i] = pte;
- *(vip)CIA_IOC_PCI_Wn_BASE(window) = CIA_BROKEN_TBIA_BASE | 3;
- *(vip)CIA_IOC_PCI_Wn_MASK(window)
- = (CIA_BROKEN_TBIA_SIZE*1024 - 1) & 0xfff00000;
- *(vip)CIA_IOC_PCI_Tn_BASE(window) = virt_to_phys(ppte) >> 2;
- }
- static void __init
- verify_tb_operation(void)
- {
- static int page[PAGE_SIZE/4]
- __attribute__((aligned(PAGE_SIZE)))
- __initdata = { 0 };
- struct pci_iommu_arena *arena = pci_isa_hose->sg_isa;
- int ctrl, addr0, tag0, pte0, data0;
- int temp, use_tbia_try2 = 0;
- void __iomem *bus_addr;
- /* pyxis -- tbia is broken */
- if (pci_isa_hose->dense_io_base)
- use_tbia_try2 = 1;
- /* Put the chip into PCI loopback mode. */
- mb();
- ctrl = *(vip)CIA_IOC_CIA_CTRL;
- *(vip)CIA_IOC_CIA_CTRL = ctrl | CIA_CTRL_PCI_LOOP_EN;
- mb();
- *(vip)CIA_IOC_CIA_CTRL;
- mb();
- /* Write a valid entry directly into the TLB registers. */
- addr0 = arena->dma_base;
- tag0 = addr0 | 1;
- pte0 = (virt_to_phys(page) >> (PAGE_SHIFT - 1)) | 1;
- *(vip)CIA_IOC_TB_TAGn(0) = tag0;
- *(vip)CIA_IOC_TB_TAGn(1) = 0;
- *(vip)CIA_IOC_TB_TAGn(2) = 0;
- *(vip)CIA_IOC_TB_TAGn(3) = 0;
- *(vip)CIA_IOC_TB_TAGn(4) = 0;
- *(vip)CIA_IOC_TB_TAGn(5) = 0;
- *(vip)CIA_IOC_TB_TAGn(6) = 0;
- *(vip)CIA_IOC_TB_TAGn(7) = 0;
- *(vip)CIA_IOC_TBn_PAGEm(0,0) = pte0;
- *(vip)CIA_IOC_TBn_PAGEm(0,1) = 0;
- *(vip)CIA_IOC_TBn_PAGEm(0,2) = 0;
- *(vip)CIA_IOC_TBn_PAGEm(0,3) = 0;
- mb();
- /* Get a usable bus address */
- bus_addr = cia_ioremap(addr0, 8*PAGE_SIZE);
- /* First, verify we can read back what we've written. If
- this fails, we can't be sure of any of the other testing
- we're going to do, so bail. */
- /* ??? Actually, we could do the work with machine checks.
- By passing this register update test, we pretty much
- guarantee that cia_pci_tbi_try1 works. If this test
- fails, cia_pci_tbi_try2 might still work. */
- temp = *(vip)CIA_IOC_TB_TAGn(0);
- if (temp != tag0) {
- printk("pci: failed tb register update test "
- "(tag0 %#x != %#x)\n", temp, tag0);
- goto failed;
- }
- temp = *(vip)CIA_IOC_TB_TAGn(1);
- if (temp != 0) {
- printk("pci: failed tb register update test "
- "(tag1 %#x != 0)\n", temp);
- goto failed;
- }
- temp = *(vip)CIA_IOC_TBn_PAGEm(0,0);
- if (temp != pte0) {
- printk("pci: failed tb register update test "
- "(pte0 %#x != %#x)\n", temp, pte0);
- goto failed;
- }
- printk("pci: passed tb register update test\n");
- /* Second, verify we can actually do I/O through this entry. */
- data0 = 0xdeadbeef;
- page[0] = data0;
- mcheck_expected(0) = 1;
- mcheck_taken(0) = 0;
- mb();
- temp = cia_readl(bus_addr);
- mb();
- mcheck_expected(0) = 0;
- mb();
- if (mcheck_taken(0)) {
- printk("pci: failed sg loopback i/o read test (mcheck)\n");
- goto failed;
- }
- if (temp != data0) {
- printk("pci: failed sg loopback i/o read test "
- "(%#x != %#x)\n", temp, data0);
- goto failed;
- }
- printk("pci: passed sg loopback i/o read test\n");
- /* Third, try to invalidate the TLB. */
- if (! use_tbia_try2) {
- cia_pci_tbi(arena->hose, 0, -1);
- temp = *(vip)CIA_IOC_TB_TAGn(0);
- if (temp & 1) {
- use_tbia_try2 = 1;
- printk("pci: failed tbia test; workaround available\n");
- } else {
- printk("pci: passed tbia test\n");
- }
- }
- /* Fourth, verify the TLB snoops the EV5's caches when
- doing a tlb fill. */
- data0 = 0x5adda15e;
- page[0] = data0;
- arena->ptes[4] = pte0;
- mcheck_expected(0) = 1;
- mcheck_taken(0) = 0;
- mb();
- temp = cia_readl(bus_addr + 4*PAGE_SIZE);
- mb();
- mcheck_expected(0) = 0;
- mb();
- if (mcheck_taken(0)) {
- printk("pci: failed pte write cache snoop test (mcheck)\n");
- goto failed;
- }
- if (temp != data0) {
- printk("pci: failed pte write cache snoop test "
- "(%#x != %#x)\n", temp, data0);
- goto failed;
- }
- printk("pci: passed pte write cache snoop test\n");
- /* Fifth, verify that a previously invalid PTE entry gets
- filled from the page table. */
- data0 = 0xabcdef12;
- page[0] = data0;
- arena->ptes[5] = pte0;
- mcheck_expected(0) = 1;
- mcheck_taken(0) = 0;
- mb();
- temp = cia_readl(bus_addr + 5*PAGE_SIZE);
- mb();
- mcheck_expected(0) = 0;
- mb();
- if (mcheck_taken(0)) {
- printk("pci: failed valid tag invalid pte reload test "
- "(mcheck; workaround available)\n");
- /* Work around this bug by aligning new allocations
- on 4 page boundaries. */
- arena->align_entry = 4;
- } else if (temp != data0) {
- printk("pci: failed valid tag invalid pte reload test "
- "(%#x != %#x)\n", temp, data0);
- goto failed;
- } else {
- printk("pci: passed valid tag invalid pte reload test\n");
- }
- /* Sixth, verify machine checks are working. Test invalid
- pte under the same valid tag as we used above. */
- mcheck_expected(0) = 1;
- mcheck_taken(0) = 0;
- mb();
- temp = cia_readl(bus_addr + 6*PAGE_SIZE);
- mb();
- mcheck_expected(0) = 0;
- mb();
- printk("pci: %s pci machine check test\n",
- mcheck_taken(0) ? "passed" : "failed");
- /* Clean up after the tests. */
- arena->ptes[4] = 0;
- arena->ptes[5] = 0;
- if (use_tbia_try2) {
- alpha_mv.mv_pci_tbi = cia_pci_tbi_try2;
- /* Tags 0-3 must be disabled if we use this workaraund. */
- wmb();
- *(vip)CIA_IOC_TB_TAGn(0) = 2;
- *(vip)CIA_IOC_TB_TAGn(1) = 2;
- *(vip)CIA_IOC_TB_TAGn(2) = 2;
- *(vip)CIA_IOC_TB_TAGn(3) = 2;
- printk("pci: tbia workaround enabled\n");
- }
- alpha_mv.mv_pci_tbi(arena->hose, 0, -1);
- exit:
- /* unmap the bus addr */
- cia_iounmap(bus_addr);
- /* Restore normal PCI operation. */
- mb();
- *(vip)CIA_IOC_CIA_CTRL = ctrl;
- mb();
- *(vip)CIA_IOC_CIA_CTRL;
- mb();
- return;
- failed:
- printk("pci: disabling sg translation window\n");
- *(vip)CIA_IOC_PCI_W0_BASE = 0;
- *(vip)CIA_IOC_PCI_W1_BASE = 0;
- pci_isa_hose->sg_isa = NULL;
- alpha_mv.mv_pci_tbi = NULL;
- goto exit;
- }
- #if defined(ALPHA_RESTORE_SRM_SETUP)
- /* Save CIA configuration data as the console had it set up. */
- struct
- {
- unsigned int hae_mem;
- unsigned int hae_io;
- unsigned int pci_dac_offset;
- unsigned int err_mask;
- unsigned int cia_ctrl;
- unsigned int cia_cnfg;
- struct {
- unsigned int w_base;
- unsigned int w_mask;
- unsigned int t_base;
- } window[4];
- } saved_config __attribute((common));
- void
- cia_save_srm_settings(int is_pyxis)
- {
- int i;
- /* Save some important registers. */
- saved_config.err_mask = *(vip)CIA_IOC_ERR_MASK;
- saved_config.cia_ctrl = *(vip)CIA_IOC_CIA_CTRL;
- saved_config.hae_mem = *(vip)CIA_IOC_HAE_MEM;
- saved_config.hae_io = *(vip)CIA_IOC_HAE_IO;
- saved_config.pci_dac_offset = *(vip)CIA_IOC_PCI_W_DAC;
- if (is_pyxis)
- saved_config.cia_cnfg = *(vip)CIA_IOC_CIA_CNFG;
- else
- saved_config.cia_cnfg = 0;
- /* Save DMA windows configuration. */
- for (i = 0; i < 4; i++) {
- saved_config.window[i].w_base = *(vip)CIA_IOC_PCI_Wn_BASE(i);
- saved_config.window[i].w_mask = *(vip)CIA_IOC_PCI_Wn_MASK(i);
- saved_config.window[i].t_base = *(vip)CIA_IOC_PCI_Tn_BASE(i);
- }
- mb();
- }
- void
- cia_restore_srm_settings(void)
- {
- int i;
- for (i = 0; i < 4; i++) {
- *(vip)CIA_IOC_PCI_Wn_BASE(i) = saved_config.window[i].w_base;
- *(vip)CIA_IOC_PCI_Wn_MASK(i) = saved_config.window[i].w_mask;
- *(vip)CIA_IOC_PCI_Tn_BASE(i) = saved_config.window[i].t_base;
- }
- *(vip)CIA_IOC_HAE_MEM = saved_config.hae_mem;
- *(vip)CIA_IOC_HAE_IO = saved_config.hae_io;
- *(vip)CIA_IOC_PCI_W_DAC = saved_config.pci_dac_offset;
- *(vip)CIA_IOC_ERR_MASK = saved_config.err_mask;
- *(vip)CIA_IOC_CIA_CTRL = saved_config.cia_ctrl;
- if (saved_config.cia_cnfg) /* Must be pyxis. */
- *(vip)CIA_IOC_CIA_CNFG = saved_config.cia_cnfg;
- mb();
- }
- #else /* ALPHA_RESTORE_SRM_SETUP */
- #define cia_save_srm_settings(p) do {} while (0)
- #define cia_restore_srm_settings() do {} while (0)
- #endif /* ALPHA_RESTORE_SRM_SETUP */
- static void __init
- do_init_arch(int is_pyxis)
- {
- struct pci_controller *hose;
- int temp, cia_rev, tbia_window;
- cia_rev = *(vip)CIA_IOC_CIA_REV & CIA_REV_MASK;
- printk("pci: cia revision %d%s\n",
- cia_rev, is_pyxis ? " (pyxis)" : "");
- if (alpha_using_srm)
- cia_save_srm_settings(is_pyxis);
- /* Set up error reporting. */
- temp = *(vip)CIA_IOC_ERR_MASK;
- temp &= ~(CIA_ERR_CPU_PE | CIA_ERR_MEM_NEM | CIA_ERR_PA_PTE_INV
- | CIA_ERR_RCVD_MAS_ABT | CIA_ERR_RCVD_TAR_ABT);
- *(vip)CIA_IOC_ERR_MASK = temp;
- /* Clear all currently pending errors. */
- temp = *(vip)CIA_IOC_CIA_ERR;
- *(vip)CIA_IOC_CIA_ERR = temp;
- /* Turn on mchecks. */
- temp = *(vip)CIA_IOC_CIA_CTRL;
- temp |= CIA_CTRL_FILL_ERR_EN | CIA_CTRL_MCHK_ERR_EN;
- *(vip)CIA_IOC_CIA_CTRL = temp;
- /* Clear the CFG register, which gets used for PCI config space
- accesses. That is the way we want to use it, and we do not
- want to depend on what ARC or SRM might have left behind. */
- *(vip)CIA_IOC_CFG = 0;
-
- /* Zero the HAEs. */
- *(vip)CIA_IOC_HAE_MEM = 0;
- *(vip)CIA_IOC_HAE_IO = 0;
- /* For PYXIS, we always use BWX bus and i/o accesses. To that end,
- make sure they're enabled on the controller. At the same time,
- enable the monster window. */
- if (is_pyxis) {
- temp = *(vip)CIA_IOC_CIA_CNFG;
- temp |= CIA_CNFG_IOA_BWEN | CIA_CNFG_PCI_MWEN;
- *(vip)CIA_IOC_CIA_CNFG = temp;
- }
- /* Synchronize with all previous changes. */
- mb();
- *(vip)CIA_IOC_CIA_REV;
- /*
- * Create our single hose.
- */
- pci_isa_hose = hose = alloc_pci_controller();
- hose->io_space = &ioport_resource;
- hose->mem_space = &iomem_resource;
- hose->index = 0;
- if (! is_pyxis) {
- struct resource *hae_mem = alloc_resource();
- hose->mem_space = hae_mem;
- hae_mem->start = 0;
- hae_mem->end = CIA_MEM_R1_MASK;
- hae_mem->name = pci_hae0_name;
- hae_mem->flags = IORESOURCE_MEM;
- if (request_resource(&iomem_resource, hae_mem) < 0)
- printk(KERN_ERR "Failed to request HAE_MEM\n");
- hose->sparse_mem_base = CIA_SPARSE_MEM - IDENT_ADDR;
- hose->dense_mem_base = CIA_DENSE_MEM - IDENT_ADDR;
- hose->sparse_io_base = CIA_IO - IDENT_ADDR;
- hose->dense_io_base = 0;
- } else {
- hose->sparse_mem_base = 0;
- hose->dense_mem_base = CIA_BW_MEM - IDENT_ADDR;
- hose->sparse_io_base = 0;
- hose->dense_io_base = CIA_BW_IO - IDENT_ADDR;
- }
- /*
- * Set up the PCI to main memory translation windows.
- *
- * Window 0 is S/G 8MB at 8MB (for isa)
- * Window 1 is S/G 1MB at 768MB (for tbia) (unused for CIA rev 1)
- * Window 2 is direct access 2GB at 2GB
- * Window 3 is DAC access 4GB at 8GB (or S/G for tbia if CIA rev 1)
- *
- * ??? NetBSD hints that page tables must be aligned to 32K,
- * possibly due to a hardware bug. This is over-aligned
- * from the 8K alignment one would expect for an 8MB window.
- * No description of what revisions affected.
- */
- hose->sg_pci = NULL;
- hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 32768);
- __direct_map_base = 0x80000000;
- __direct_map_size = 0x80000000;
- *(vip)CIA_IOC_PCI_W0_BASE = hose->sg_isa->dma_base | 3;
- *(vip)CIA_IOC_PCI_W0_MASK = (hose->sg_isa->size - 1) & 0xfff00000;
- *(vip)CIA_IOC_PCI_T0_BASE = virt_to_phys(hose->sg_isa->ptes) >> 2;
- *(vip)CIA_IOC_PCI_W2_BASE = __direct_map_base | 1;
- *(vip)CIA_IOC_PCI_W2_MASK = (__direct_map_size - 1) & 0xfff00000;
- *(vip)CIA_IOC_PCI_T2_BASE = 0 >> 2;
- /* On PYXIS we have the monster window, selected by bit 40, so
- there is no need for window3 to be enabled.
- On CIA, we don't have true arbitrary addressing -- bits <39:32>
- are compared against W_DAC. We can, however, directly map 4GB,
- which is better than before. However, due to assumptions made
- elsewhere, we should not claim that we support DAC unless that
- 4GB covers all of physical memory.
- On CIA rev 1, apparently W1 and W2 can't be used for SG.
- At least, there are reports that it doesn't work for Alcor.
- In that case, we have no choice but to use W3 for the TBIA
- workaround, which means we can't use DAC at all. */
- tbia_window = 1;
- if (is_pyxis) {
- *(vip)CIA_IOC_PCI_W3_BASE = 0;
- } else if (cia_rev == 1) {
- *(vip)CIA_IOC_PCI_W1_BASE = 0;
- tbia_window = 3;
- } else if (max_low_pfn > (0x100000000UL >> PAGE_SHIFT)) {
- *(vip)CIA_IOC_PCI_W3_BASE = 0;
- } else {
- *(vip)CIA_IOC_PCI_W3_BASE = 0x00000000 | 1 | 8;
- *(vip)CIA_IOC_PCI_W3_MASK = 0xfff00000;
- *(vip)CIA_IOC_PCI_T3_BASE = 0 >> 2;
- alpha_mv.pci_dac_offset = 0x200000000UL;
- *(vip)CIA_IOC_PCI_W_DAC = alpha_mv.pci_dac_offset >> 32;
- }
- /* Prepare workaround for apparently broken tbia. */
- cia_prepare_tbia_workaround(tbia_window);
- }
- void __init
- cia_init_arch(void)
- {
- do_init_arch(0);
- }
- void __init
- pyxis_init_arch(void)
- {
- /* On pyxis machines we can precisely calculate the
- CPU clock frequency using pyxis real time counter.
- It's especially useful for SX164 with broken RTC.
- Both CPU and chipset are driven by the single 16.666M
- or 16.667M crystal oscillator. PYXIS_RT_COUNT clock is
- 66.66 MHz. -ink */
- unsigned int cc0, cc1;
- unsigned long pyxis_cc;
- __asm__ __volatile__ ("rpcc %0" : "=r"(cc0));
- pyxis_cc = *(vulp)PYXIS_RT_COUNT;
- do { } while(*(vulp)PYXIS_RT_COUNT - pyxis_cc < 4096);
- __asm__ __volatile__ ("rpcc %0" : "=r"(cc1));
- cc1 -= cc0;
- hwrpb->cycle_freq = ((cc1 >> 11) * 100000000UL) / 3;
- hwrpb_update_checksum(hwrpb);
- do_init_arch(1);
- }
- void
- cia_kill_arch(int mode)
- {
- if (alpha_using_srm)
- cia_restore_srm_settings();
- }
- void __init
- cia_init_pci(void)
- {
- /* Must delay this from init_arch, as we need machine checks. */
- verify_tb_operation();
- common_init_pci();
- }
- static inline void
- cia_pci_clr_err(void)
- {
- int jd;
- jd = *(vip)CIA_IOC_CIA_ERR;
- *(vip)CIA_IOC_CIA_ERR = jd;
- mb();
- *(vip)CIA_IOC_CIA_ERR; /* re-read to force write. */
|