vserver 2.0 rc7
[linux-2.6.git] / drivers / parisc / lba_pci.c
index 8126dbd..dc83880 100644 (file)
@@ -1,4 +1,5 @@
 /*
+**
 **  PCI Lower Bus Adapter (LBA) manager
 **
 **     (c) Copyright 1999,2000 Grant Grundler
 #include <linux/smp_lock.h>
 
 #include <asm/byteorder.h>
-#include <asm/irq.h>           /* for struct irq_region support */
 #include <asm/pdc.h>
 #include <asm/pdcpat.h>
 #include <asm/page.h>
-#include <asm/segment.h>
 #include <asm/system.h>
 
 #include <asm/hardware.h>      /* for register_parisc_driver() stuff */
 #include <asm/iosapic.h>       /* for iosapic_register() */
 #include <asm/io.h>            /* read/write stuff */
 
-#ifndef TRUE
-#define TRUE (1 == 1)
-#define FALSE (1 == 0)
-#endif
-
 #undef DEBUG_LBA       /* general stuff */
 #undef DEBUG_LBA_PORT  /* debug I/O Port access */
 #undef DEBUG_LBA_CFG   /* debug Config Space Access (ie PCI Bus walk) */
 #define DBG_PAT(x...)
 #endif
 
-#ifdef DEBUG_LBA
-#undef ASSERT
-#define ASSERT(expr) \
-       if(!(expr)) { \
-               printk("\n%s:%d: Assertion " #expr " failed!\n", \
-                               __FILE__, __LINE__); \
-               panic(#expr); \
-       }
-#else
-#define ASSERT(expr)
-#endif
-
 
 /*
 ** Config accessor functions only pass in the 8-bit bus number and not
 ** bus number for each LBA depending on what firmware does.
 */
 
-#define MODULE_NAME "lba"
+#define MODULE_NAME "LBA"
 
 #define LBA_FUNC_ID    0x0000  /* function id */
 #define LBA_FCLASS     0x0008  /* function class, bist, header, rev... */
 #define LBA_EIOS_BASE  0x0260  /* Extra I/O port space */
 #define LBA_EIOS_MASK  0x0268
 
+#define LBA_GLOBAL_MASK        0x0270  /* Mercury only: Global Address Mask */
 #define LBA_DMA_CTL    0x0278  /* firmware sets this */
 
 #define LBA_IBASE      0x0300  /* SBA DMA support */
 #define LBA_IOSAPIC_BASE       0x800 /* Offset of IRQ logic */
 
 /* non-postable I/O port space, densely packed */
-#ifdef CONFIG_PARISC64
-#define LBA_ASTRO_PORT_BASE    (0xfffffffffee00000UL)
-#else
-#define LBA_ASTRO_PORT_BASE    (0xfee00000UL)
-#endif
+#define LBA_PORT_BASE  (PCI_F_EXTEND | 0xfee00000UL)
+static void __iomem *astro_iop_base;
 
 #define ELROY_HVERS    0x782
 #define MERCURY_HVERS  0x783
@@ -218,8 +198,8 @@ struct lba_device {
        spinlock_t      lba_lock;
        void            *iosapic_obj;
 
-#ifdef CONFIG_PARISC64
-       unsigned long   iop_base;    /* PA_VIEW - for IO port accessor funcs */
+#ifdef CONFIG_64BIT
+       void __iomem *  iop_base;    /* PA_VIEW - for IO port accessor funcs */
 #endif
 
        int             flags;       /* state/functionality enabled */
@@ -229,15 +209,9 @@ struct lba_device {
 
 static u32 lba_t32;
 
-/*
-** lba "flags"
-*/
-#define LBA_FLAG_NO_DMA_DURING_CFG     0x01
+/* lba flags */
 #define LBA_FLAG_SKIP_PROBE    0x10
 
-/* Tape Release 4 == hw_rev 5 */
-#define LBA_TR4PLUS(d)      ((d)->hw_rev > 0x4)
-#define LBA_DMA_DURING_CFG_DISABLED(d) ((d)->flags & LBA_FLAG_NO_DMA_DURING_CFG)
 #define LBA_SKIP_PROBE(d) ((d)->flags & LBA_FLAG_SKIP_PROBE)
 
 
@@ -283,8 +257,8 @@ static u32 lba_t32;
 ** Extract LBA (Rope) number from HPA
 ** REVISIT: 16 ropes for Stretch/Ike?
 */
-#define ROPES_PER_SBA  8
-#define LBA_NUM(x)    ((((unsigned long) x) >> 13) & (ROPES_PER_SBA-1))
+#define ROPES_PER_IOC  8
+#define LBA_NUM(x)    ((((unsigned long) x) >> 13) & (ROPES_PER_IOC-1))
 
 
 static void
@@ -297,7 +271,7 @@ lba_dump_res(struct resource *r, int d)
 
        printk(KERN_DEBUG "(%p)", r->parent);
        for (i = d; i ; --i) printk(" ");
-       printk(KERN_DEBUG "%p [%lx,%lx]/%x\n", r, r->start, r->end, (int) r->flags);
+       printk(KERN_DEBUG "%p [%lx,%lx]/%lx\n", r, r->start, r->end, r->flags);
        lba_dump_res(r->child, d+2);
        lba_dump_res(r->sibling, d);
 }
@@ -307,7 +281,7 @@ lba_dump_res(struct resource *r, int d)
 ** LBA rev 2.0, 2.1, 2.2, and 3.0 bus walks require a complex
 ** workaround for cfg cycles:
 **     -- preserve  LBA state
-**     -- LBA_FLAG_NO_DMA_DURING_CFG workaround
+**     -- prevent any DMA from occurring
 **     -- turn on smart mode
 **     -- probe with config writes before doing config reads
 **     -- check ERROR_STATUS
@@ -317,25 +291,18 @@ lba_dump_res(struct resource *r, int d)
 ** The workaround is only used for device discovery.
 */
 
-static int
-lba_device_present( u8 bus, u8 dfn, struct lba_device *d)
+static int lba_device_present(u8 bus, u8 dfn, struct lba_device *d)
 {
        u8 first_bus = d->hba.hba_bus->secondary;
        u8 last_sub_bus = d->hba.hba_bus->subordinate;
 
-       ASSERT(bus >= first_bus);
-       ASSERT(bus <= last_sub_bus);
-       ASSERT((bus - first_bus) < LBA_MAX_NUM_BUSES);
-
        if ((bus < first_bus) ||
            (bus > last_sub_bus) ||
-           ((bus - first_bus) >= LBA_MAX_NUM_BUSES))
-       {
-           /* devices that fall into any of these cases won't get claimed */
-           return(FALSE);
+           ((bus - first_bus) >= LBA_MAX_NUM_BUSES)) {
+               return 0;
        }
 
-       return TRUE;
+       return 1;
 }
 
 
@@ -350,7 +317,6 @@ lba_device_present( u8 bus, u8 dfn, struct lba_device *d)
     /* For LBA rev 2.0, 2.1, 2.2, and 3.0, we must disable DMA         \
     ** arbitration for full bus walks.                                 \
     */                                                                 \
-    if (LBA_DMA_DURING_CFG_DISABLED(d)) {                              \
        /* Save contents of arb mask register. */                       \
        arb_mask = READ_REG32(d->hba.base_addr + LBA_ARB_MASK);         \
 \
@@ -358,8 +324,7 @@ lba_device_present( u8 bus, u8 dfn, struct lba_device *d)
         * Turn off all device arbitration bits (i.e. everything        \
         * except arbitration enable bit).                              \
         */                                                             \
-       WRITE_REG32(0x1, d->hba.base_addr + LBA_ARB_MASK);                      \
-    }                                                                  \
+       WRITE_REG32(0x1, d->hba.base_addr + LBA_ARB_MASK);              \
 \
     /*                                                                 \
      * Set the smart mode bit so that master aborts don't cause                \
@@ -379,7 +344,7 @@ lba_device_present( u8 bus, u8 dfn, struct lba_device *d)
      * Read address register to ensure that LBA is the bus master,     \
      * which implies that DMA traffic has stopped when DMA arb is off. \
      */                                                                        \
-    lba_t32 = READ_REG32((d)->hba.base_addr + LBA_PCI_CFG_ADDR);               \
+    lba_t32 = READ_REG32((d)->hba.base_addr + LBA_PCI_CFG_ADDR);       \
     /*                                                                 \
      * Generate a cfg write cycle (will have no affect on              \
      * Vendor ID register since read-only).                            \
@@ -389,7 +354,7 @@ lba_device_present( u8 bus, u8 dfn, struct lba_device *d)
      * Make sure write has completed before proceeding further,                \
      * i.e. before setting clear enable.                               \
      */                                                                        \
-    lba_t32 = READ_REG32((d)->hba.base_addr + LBA_PCI_CFG_ADDR);               \
+    lba_t32 = READ_REG32((d)->hba.base_addr + LBA_PCI_CFG_ADDR);       \
 }
 
 
@@ -443,19 +408,16 @@ lba_device_present( u8 bus, u8 dfn, struct lba_device *d)
     }                                                                  \
 }
 
-#define LBA_CFG_TR4_ADDR_SETUP(d, addr) \
-    WRITE_REG32(((addr) & ~3), (d)->hba.base_addr + LBA_PCI_CFG_ADDR)
+#define LBA_CFG_TR4_ADDR_SETUP(d, addr)                                        \
+       WRITE_REG32(((addr) & ~3), (d)->hba.base_addr + LBA_PCI_CFG_ADDR);
 
-#define LBA_CFG_ADDR_SETUP(d, addr) {                          \
+#define LBA_CFG_ADDR_SETUP(d, addr) {                                  \
     WRITE_REG32(((addr) & ~3), (d)->hba.base_addr + LBA_PCI_CFG_ADDR); \
     /*                                                                 \
-     * HPREVISIT:                                                      \
-     *       --        Potentially could skip this once DMA bug fixed.         \
-     *                                                                 \
      * Read address register to ensure that LBA is the bus master,     \
      * which implies that DMA traffic has stopped when DMA arb is off. \
      */                                                                        \
-    lba_t32 = READ_REG32((d)->hba.base_addr + LBA_PCI_CFG_ADDR);               \
+    lba_t32 = READ_REG32((d)->hba.base_addr + LBA_PCI_CFG_ADDR);       \
 }
 
 
@@ -468,12 +430,10 @@ lba_device_present( u8 bus, u8 dfn, struct lba_device *d)
      * Restore error config register (turn off smart mode).            \
      */                                                                        \
     WRITE_REG32(error_config, base + LBA_ERROR_CONFIG);                        \
-    if (LBA_DMA_DURING_CFG_DISABLED(d)) {                              \
        /*                                                              \
         * Restore arb mask register (reenables DMA arbitration).       \
         */                                                             \
        WRITE_REG32(arb_mask, base + LBA_ARB_MASK);                     \
-    }                                                                  \
 }
 
 
@@ -481,100 +441,50 @@ lba_device_present( u8 bus, u8 dfn, struct lba_device *d)
 static unsigned int
 lba_rd_cfg(struct lba_device *d, u32 tok, u8 reg, u32 size)
 {
-       u32 data = ~0;
+       u32 data = ~0U;
        int error = 0;
        u32 arb_mask = 0;       /* used by LBA_CFG_SETUP/RESTORE */
        u32 error_config = 0;   /* used by LBA_CFG_SETUP/RESTORE */
        u32 status_control = 0; /* used by LBA_CFG_SETUP/RESTORE */
 
-       ASSERT((size == sizeof(u8)) ||
-               (size == sizeof(u16)) ||
-               (size == sizeof(u32)));
-
-       if ((size != sizeof(u8)) &&
-               (size != sizeof(u16)) &&
-               (size != sizeof(u32))) {
-               return(data);
-       }
-
        LBA_CFG_SETUP(d, tok);
        LBA_CFG_PROBE(d, tok);
        LBA_CFG_MASTER_ABORT_CHECK(d, d->hba.base_addr, tok, error);
        if (!error) {
+               void __iomem *data_reg = d->hba.base_addr + LBA_PCI_CFG_DATA;
+
                LBA_CFG_ADDR_SETUP(d, tok | reg);
                switch (size) {
-               case sizeof(u8):
-                       data = (u32) READ_REG8(d->hba.base_addr + LBA_PCI_CFG_DATA + (reg & 3));
-                       break;
-               case sizeof(u16):
-                       data = (u32) READ_REG16(d->hba.base_addr + LBA_PCI_CFG_DATA + (reg & 2));
-                       break;
-               case sizeof(u32):
-                       data = READ_REG32(d->hba.base_addr + LBA_PCI_CFG_DATA);
-                       break;
-               default:
-                       break; /* leave data as -1 */
+               case 1: data = (u32) READ_REG8(data_reg + (reg & 3)); break;
+               case 2: data = (u32) READ_REG16(data_reg+ (reg & 2)); break;
+               case 4: data = READ_REG32(data_reg); break;
                }
        }
        LBA_CFG_RESTORE(d, d->hba.base_addr);
        return(data);
 }
 
-#ifdef CONFIG_PARISC64
-#define pat_cfg_addr(bus, devfn, addr) (((bus) << 16) | ((devfn) << 8) | (addr))
-
-static int pat_cfg_read(struct pci_bus *bus, unsigned int devfn, int pos, int size, u32 *data)
-{
-       int tok = pat_cfg_addr(bus->number, devfn, pos);
-       u32 tmp;
-       int ret = pdc_pat_io_pci_cfg_read(tok, size, &tmp);
-
-       DBG_CFG("%s(%d:%d.%d+0x%02x) -> 0x%x %d\n", __FUNCTION__, bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn), pos, tmp, ret);
-
-       switch (size) {
-               case 1: *data = (u8)  tmp; return (tmp == (u8)  ~0);
-               case 2: *data = (u16) tmp; return (tmp == (u16) ~0);
-               case 4: *data = (u32) tmp; return (tmp == (u32) ~0);
-       }
-       *data = ~0;
-       return (ret);
-}
-
-static int pat_cfg_write(struct pci_bus *bus, unsigned int devfn, int pos, int size, u32 data)
-{
-       int tok = pat_cfg_addr(bus->number, devfn, pos);
-       int ret = pdc_pat_io_pci_cfg_write(tok, size, data);
-
-       DBG_CFG("%s(%d:%d.%d+0x%02x, 0x%lx/%d)\n", __FUNCTION__, bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn), pos, data, size);
-       return (ret);
-}
-
-static struct pci_ops pat_cfg_ops = {
-       .read =         pat_cfg_read,
-       .write =        pat_cfg_write,
-};
-#else
-/* keep the compiler from complaining about undeclared variables */
-#define pat_cfg_ops lba_cfg_ops
-#endif
 
-static int lba_cfg_read(struct pci_bus *bus, unsigned int devfn, int pos, int size, u32 *data)
+static int elroy_cfg_read(struct pci_bus *bus, unsigned int devfn, int pos, int size, u32 *data)
 {
        struct lba_device *d = LBA_DEV(parisc_walk_tree(bus->bridge));
        u32 local_bus = (bus->parent == NULL) ? 0 : bus->secondary;
        u32 tok = LBA_CFG_TOK(local_bus, devfn);
+       void __iomem *data_reg = d->hba.base_addr + LBA_PCI_CFG_DATA;
+
+       if ((pos > 255) || (devfn > 255))
+               return -EINVAL;
 
 /* FIXME: B2K/C3600 workaround is always use old method... */
-       /* if (!LBA_TR4PLUS(d) && !LBA_SKIP_PROBE(d)) */ {
+       /* if (!LBA_SKIP_PROBE(d)) */ {
                /* original - Generate config cycle on broken elroy
                  with risk we will miss PCI bus errors. */
                *data = lba_rd_cfg(d, tok, pos, size);
                DBG_CFG("%s(%x+%2x) -> 0x%x (a)\n", __FUNCTION__, tok, pos, *data);
-               return(*data == ~0U);
+               return 0;
        }
 
-       if (LBA_SKIP_PROBE(d) && (!lba_device_present(bus->secondary, devfn, d)))
-       {
+       if (LBA_SKIP_PROBE(d) && !lba_device_present(bus->secondary, devfn, d)) {
                DBG_CFG("%s(%x+%2x) -> -1 (b)\n", __FUNCTION__, tok, pos);
                /* either don't want to look or know device isn't present. */
                *data = ~0U;
@@ -585,52 +495,32 @@ static int lba_cfg_read(struct pci_bus *bus, unsigned int devfn, int pos, int si
        ** Should only get here on fully working LBA rev.
        ** This is how simple the code should have been.
        */
-       LBA_CFG_TR4_ADDR_SETUP(d, tok | pos);
+       LBA_CFG_ADDR_SETUP(d, tok | pos);
        switch(size) {
-       case 1: *(u8 *)  data = READ_REG8(d->hba.base_addr + LBA_PCI_CFG_DATA + (pos & 3));
-               break;
-       case 2: *(u16 *) data = READ_REG16(d->hba.base_addr + LBA_PCI_CFG_DATA + (pos & 2));
-               break;
-       case 4: *(u32 *) data = READ_REG32(d->hba.base_addr + LBA_PCI_CFG_DATA);
-          break;
+       case 1: *data = READ_REG8 (data_reg + (pos & 3)); break;
+       case 2: *data = READ_REG16(data_reg + (pos & 2)); break;
+       case 4: *data = READ_REG32(data_reg); break;
        }
        DBG_CFG("%s(%x+%2x) -> 0x%x (c)\n", __FUNCTION__, tok, pos, *data);
-       return(*data == ~0U);
+       return 0;
 }
 
 
 static void
-lba_wr_cfg( struct lba_device *d, u32 tok, u8 reg, u32 data, u32 size)
+lba_wr_cfg(struct lba_device *d, u32 tok, u8 reg, u32 data, u32 size)
 {
        int error = 0;
        u32 arb_mask = 0;
        u32 error_config = 0;
        u32 status_control = 0;
-
-       ASSERT((size == sizeof(u8)) ||
-               (size == sizeof(u16)) ||
-               (size == sizeof(u32)));
-
-       if ((size != sizeof(u8)) &&
-               (size != sizeof(u16)) &&
-               (size != sizeof(u32))) {
-                       return;
-       }
+       void __iomem *data_reg = d->hba.base_addr + LBA_PCI_CFG_DATA;
 
        LBA_CFG_SETUP(d, tok);
        LBA_CFG_ADDR_SETUP(d, tok | reg);
        switch (size) {
-       case sizeof(u8):
-               WRITE_REG8((u8) data, d->hba.base_addr + LBA_PCI_CFG_DATA + (reg&3));
-               break;
-       case sizeof(u16):
-               WRITE_REG16((u8) data, d->hba.base_addr + LBA_PCI_CFG_DATA +(reg&2));
-               break;
-       case sizeof(u32):
-               WRITE_REG32(data, d->hba.base_addr + LBA_PCI_CFG_DATA);
-               break;
-       default:
-               break;
+       case 1: WRITE_REG8 (data, data_reg + (reg & 3)); break;
+       case 2: WRITE_REG16(data, data_reg + (reg & 2)); break;
+       case 4: WRITE_REG32(data, data_reg);             break;
        }
        LBA_CFG_MASTER_ABORT_CHECK(d, d->hba.base_addr, tok, error);
        LBA_CFG_RESTORE(d, d->hba.base_addr);
@@ -642,16 +532,16 @@ lba_wr_cfg( struct lba_device *d, u32 tok, u8 reg, u32 data, u32 size)
  * by doing a read of CONFIG ADDR after the write.
  */
 
-static int lba_cfg_write(struct pci_bus *bus, unsigned int devfn, int pos, int size, u32 data)
+static int elroy_cfg_write(struct pci_bus *bus, unsigned int devfn, int pos, int size, u32 data)
 {
        struct lba_device *d = LBA_DEV(parisc_walk_tree(bus->bridge));
        u32 local_bus = (bus->parent == NULL) ? 0 : bus->secondary;
        u32 tok = LBA_CFG_TOK(local_bus,devfn);
 
-       ASSERT((tok & 0xff) == 0);
-       ASSERT(pos < 0x100);
+       if ((pos > 255) || (devfn > 255))
+               return -EINVAL;
 
-       if (!LBA_TR4PLUS(d) && !LBA_SKIP_PROBE(d)) {
+       if (!LBA_SKIP_PROBE(d)) {
                /* Original Workaround */
                lba_wr_cfg(d, tok, pos, (u32) data, size);
                DBG_CFG("%s(%x+%2x) = 0x%x (a)\n", __FUNCTION__, tok, pos,data);
@@ -666,7 +556,7 @@ static int lba_cfg_write(struct pci_bus *bus, unsigned int devfn, int pos, int s
        DBG_CFG("%s(%x+%2x) = 0x%x (c)\n", __FUNCTION__, tok, pos, data);
 
        /* Basic Algorithm */
-       LBA_CFG_TR4_ADDR_SETUP(d, tok | pos);
+       LBA_CFG_ADDR_SETUP(d, tok | pos);
        switch(size) {
        case 1: WRITE_REG8 (data, d->hba.base_addr + LBA_PCI_CFG_DATA + (pos & 3));
                   break;
@@ -681,9 +571,82 @@ static int lba_cfg_write(struct pci_bus *bus, unsigned int devfn, int pos, int s
 }
 
 
-static struct pci_ops lba_cfg_ops = {
-       .read =         lba_cfg_read,
-       .write =        lba_cfg_write,
+static struct pci_ops elroy_cfg_ops = {
+       .read =         elroy_cfg_read,
+       .write =        elroy_cfg_write,
+};
+
+/*
+ * The mercury_cfg_ops are slightly misnamed; they're also used for Elroy
+ * TR4.0 as no additional bugs were found in this areea between Elroy and
+ * Mercury
+ */
+
+static int mercury_cfg_read(struct pci_bus *bus, unsigned int devfn, int pos, int size, u32 *data)
+{
+       struct lba_device *d = LBA_DEV(parisc_walk_tree(bus->bridge));
+       u32 local_bus = (bus->parent == NULL) ? 0 : bus->secondary;
+       u32 tok = LBA_CFG_TOK(local_bus, devfn);
+       void __iomem *data_reg = d->hba.base_addr + LBA_PCI_CFG_DATA;
+
+       if ((pos > 255) || (devfn > 255))
+               return -EINVAL;
+
+       LBA_CFG_TR4_ADDR_SETUP(d, tok | pos);
+       switch(size) {
+       case 1:
+               *data = READ_REG8(data_reg + (pos & 3));
+               break;
+       case 2:
+               *data = READ_REG16(data_reg + (pos & 2));
+               break;
+       case 4:
+               *data = READ_REG32(data_reg);             break;
+               break;
+       }
+
+       DBG_CFG("mercury_cfg_read(%x+%2x) -> 0x%x\n", tok, pos, *data);
+       return 0;
+}
+
+/*
+ * LBA 4.0 config write code implements non-postable semantics
+ * by doing a read of CONFIG ADDR after the write.
+ */
+
+static int mercury_cfg_write(struct pci_bus *bus, unsigned int devfn, int pos, int size, u32 data)
+{
+       struct lba_device *d = LBA_DEV(parisc_walk_tree(bus->bridge));
+       void __iomem *data_reg = d->hba.base_addr + LBA_PCI_CFG_DATA;
+       u32 local_bus = (bus->parent == NULL) ? 0 : bus->secondary;
+       u32 tok = LBA_CFG_TOK(local_bus,devfn);
+
+       if ((pos > 255) || (devfn > 255))
+               return -EINVAL;
+
+       DBG_CFG("%s(%x+%2x) <- 0x%x (c)\n", __FUNCTION__, tok, pos, data);
+
+       LBA_CFG_TR4_ADDR_SETUP(d, tok | pos);
+       switch(size) {
+       case 1:
+               WRITE_REG8 (data, data_reg + (pos & 3));
+               break;
+       case 2:
+               WRITE_REG16(data, data_reg + (pos & 2));
+               break;
+       case 4:
+               WRITE_REG32(data, data_reg);
+               break;
+       }
+
+       /* flush posted write */
+       lba_t32 = READ_U32(d->hba.base_addr + LBA_PCI_CFG_ADDR);
+       return 0;
+}
+
+static struct pci_ops mercury_cfg_ops = {
+       .read =         mercury_cfg_read,
+       .write =        mercury_cfg_write,
 };
 
 
@@ -694,7 +657,7 @@ lba_bios_init(void)
 }
 
 
-#ifdef CONFIG_PARISC64
+#ifdef CONFIG_64BIT
 
 /*
 ** Determine if a device is already configured.
@@ -723,11 +686,11 @@ lba_claim_dev_resources(struct pci_dev *dev)
        for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
                if (dev->resource[i].flags & srch_flags) {
                        pci_claim_resource(dev, i);
-                       DBG("   claimed %s %d [%lx,%lx]/%x\n",
+                       DBG("   claimed %s %d [%lx,%lx]/%lx\n",
                                pci_name(dev), i,
                                dev->resource[i].start,
                                dev->resource[i].end,
-                               (int) dev->resource[i].flags
+                               dev->resource[i].flags
                                );
                }
        }
@@ -756,7 +719,7 @@ lba_fixup_bus(struct pci_bus *bus)
        struct lba_device *ldev = LBA_DEV(parisc_walk_tree(bus->bridge));
        int lba_portbase = HBA_PORT_BASE(ldev->hba.hba_num);
 
-       DBG("lba_fixup_bus(0x%p) bus %d sysdata 0x%p\n",
+       DBG("lba_fixup_bus(0x%p) bus %d platform_data 0x%p\n",
                bus, bus->secondary, bus->bridge->platform_data);
 
        /*
@@ -768,43 +731,78 @@ lba_fixup_bus(struct pci_bus *bus)
                pci_read_bridge_bases(bus);
        } else {
                /* Host-PCI Bridge */
-               int err;
+               int err, i;
 
-               DBG("lba_fixup_bus() %s [%lx/%lx]/%x\n",
+               DBG("lba_fixup_bus() %s [%lx/%lx]/%lx\n",
                        ldev->hba.io_space.name,
                        ldev->hba.io_space.start, ldev->hba.io_space.end,
-                       (int) ldev->hba.io_space.flags);
-               DBG("lba_fixup_bus() %s [%lx/%lx]/%x\n",
+                       ldev->hba.io_space.flags);
+               DBG("lba_fixup_bus() %s [%lx/%lx]/%lx\n",
                        ldev->hba.lmmio_space.name,
                        ldev->hba.lmmio_space.start, ldev->hba.lmmio_space.end,
-                       (int) ldev->hba.lmmio_space.flags);
+                       ldev->hba.lmmio_space.flags);
 
                err = request_resource(&ioport_resource, &(ldev->hba.io_space));
                if (err < 0) {
-                       BUG();
                        lba_dump_res(&ioport_resource, 2);
+                       BUG();
+               }
+
+               if (ldev->hba.elmmio_space.start) {
+                       err = request_resource(&iomem_resource,
+                                       &(ldev->hba.elmmio_space));
+                       if (err < 0) {
+
+                               printk("FAILED: lba_fixup_bus() request for "
+                                               "elmmio_space [%lx/%lx]\n",
+                                               ldev->hba.elmmio_space.start,
+                                               ldev->hba.elmmio_space.end);
+
+                               /* lba_dump_res(&iomem_resource, 2); */
+                               /* BUG(); */
+                       }
                }
 
                err = request_resource(&iomem_resource, &(ldev->hba.lmmio_space));
                if (err < 0) {
-                       BUG();
-                       lba_dump_res(&iomem_resource, 2);
+                       /*   FIXME  overlaps with elmmio will fail here.
+                        *   Need to prune (or disable) the distributed range.
+                        *
+                        *   BEWARE: conflicts with this lmmio range may be
+                        *   elmmio range which is pointing down another rope.
+                        */
+
+                       printk("FAILED: lba_fixup_bus() request for "
+                                       "lmmio_space [%lx/%lx]\n",
+                                       ldev->hba.lmmio_space.start,
+                                       ldev->hba.lmmio_space.end);
+                       /* lba_dump_res(&iomem_resource, 2); */
                }
 
-#ifdef CONFIG_PARISC64
+#ifdef CONFIG_64BIT
+               /* GMMIO is  distributed range. Every LBA/Rope gets part it. */
                if (ldev->hba.gmmio_space.flags) {
                        err = request_resource(&iomem_resource, &(ldev->hba.gmmio_space));
                        if (err < 0) {
-                               BUG();
+                               printk("FAILED: lba_fixup_bus() request for "
+                                       "gmmio_space [%lx/%lx]\n",
+                                       ldev->hba.gmmio_space.start,
+                                       ldev->hba.gmmio_space.end);
                                lba_dump_res(&iomem_resource, 2);
+                               BUG();
                        }
-                       bus->resource[2] = &(ldev->hba.gmmio_space);
                }
 #endif
 
                /* advertize Host bridge resources to PCI bus */
                bus->resource[0] = &(ldev->hba.io_space);
                bus->resource[1] = &(ldev->hba.lmmio_space);
+               i=2;
+               if (ldev->hba.elmmio_space.start)
+                       bus->resource[i++] = &(ldev->hba.elmmio_space);
+               if (ldev->hba.gmmio_space.start)
+                       bus->resource[i++] = &(ldev->hba.gmmio_space);
+                       
        }
 
        list_for_each(ln, &bus->devices) {
@@ -837,6 +835,9 @@ lba_fixup_bus(struct pci_bus *bus)
                                res->start = PCI_HOST_ADDR(HBA_DATA(ldev), res->start);
                                res->end   = PCI_HOST_ADDR(HBA_DATA(ldev), res->end);
                                DBG("[%lx/%lx]\n", res->start, res->end);
+                       } else {
+                               DBG("lba_fixup_bus() WTF? 0x%lx [%lx/%lx] XXX",
+                                       res->flags, res->start, res->end);
                        }
                }
 
@@ -918,7 +919,7 @@ struct pci_bios_ops lba_bios_ops = {
 static u##size lba_astro_in##size (struct pci_hba_data *d, u16 addr) \
 { \
        u##size t; \
-       t = READ_REG##size(LBA_ASTRO_PORT_BASE + addr); \
+       t = READ_REG##size(astro_iop_base + addr); \
        DBG_PORT(" 0x%x\n", t); \
        return (t); \
 }
@@ -958,9 +959,8 @@ LBA_PORT_IN(32, 0)
 #define LBA_PORT_OUT(size, mask) \
 static void lba_astro_out##size (struct pci_hba_data *d, u16 addr, u##size val) \
 { \
-       ASSERT(d != NULL); \
        DBG_PORT("%s(0x%p, 0x%x, 0x%x)\n", __FUNCTION__, d, addr, val); \
-       WRITE_REG##size(val, LBA_ASTRO_PORT_BASE + addr); \
+       WRITE_REG##size(val, astro_iop_base + addr); \
        if (LBA_DEV(d)->hw_rev < 3) \
                lba_t32 = READ_U32(d->base_addr + LBA_FUNC_ID); \
 }
@@ -980,7 +980,7 @@ static struct pci_port_ops lba_astro_port_ops = {
 };
 
 
-#ifdef CONFIG_PARISC64
+#ifdef CONFIG_64BIT
 #define PIOP_TO_GMMIO(lba, addr) \
        ((lba)->iop_base + (((addr)&0xFFFC)<<10) + ((addr)&3))
 
@@ -1001,7 +1001,6 @@ static struct pci_port_ops lba_astro_port_ops = {
 static u##size lba_pat_in##size (struct pci_hba_data *l, u16 addr) \
 { \
        u##size t; \
-       ASSERT(bus != NULL); \
        DBG_PORT("%s(0x%p, 0x%x) ->", __FUNCTION__, l, addr); \
        t = READ_REG##size(PIOP_TO_GMMIO(LBA_DEV(l), addr)); \
        DBG_PORT(" 0x%x\n", t); \
@@ -1018,7 +1017,6 @@ LBA_PORT_IN(32, 0)
 static void lba_pat_out##size (struct pci_hba_data *l, u16 addr, u##size val) \
 { \
        void *where = (void *) PIOP_TO_GMMIO(LBA_DEV(l), addr); \
-       ASSERT(bus != NULL); \
        DBG_PORT("%s(0x%p, 0x%x, 0x%x)\n", __FUNCTION__, l, addr, val); \
        WRITE_REG##size(val, where); \
        /* flush the I/O down to the elroy at least */ \
@@ -1099,10 +1097,26 @@ lba_pat_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev)
 
                case PAT_LMMIO:
                        /* used to fix up pre-initialized MEM BARs */
-                       lba_dev->hba.lmmio_space_offset = p->start - io->start;
+                       if (!lba_dev->hba.lmmio_space.start) {
+                               sprintf(lba_dev->hba.lmmio_name,
+                                               "PCI%02lx LMMIO",
+                                               lba_dev->hba.bus_num.start);
+                               lba_dev->hba.lmmio_space_offset = p->start -
+                                       io->start;
+                               r = &lba_dev->hba.lmmio_space;
+                               r->name = lba_dev->hba.lmmio_name;
+                       } else if (!lba_dev->hba.elmmio_space.start) {
+                               sprintf(lba_dev->hba.elmmio_name,
+                                               "PCI%02lx ELMMIO",
+                                               lba_dev->hba.bus_num.start);
+                               r = &lba_dev->hba.elmmio_space;
+                               r->name = lba_dev->hba.elmmio_name;
+                       } else {
+                               printk(KERN_WARNING MODULE_NAME
+                                       " only supports 2 LMMIO resources!\n");
+                               break;
+                       }
 
-                       r = &(lba_dev->hba.lmmio_space);
-                       r->name   = "LBA LMMIO";
                        r->start  = p->start;
                        r->end    = p->end;
                        r->flags  = IORESOURCE_MEM;
@@ -1111,8 +1125,10 @@ lba_pat_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev)
 
                case PAT_GMMIO:
                        /* MMIO space > 4GB phys addr; for 64-bit BAR */
-                       r = &(lba_dev->hba.gmmio_space);
-                       r->name   = "LBA GMMIO";
+                       sprintf(lba_dev->hba.gmmio_name, "PCI%02lx GMMIO",
+                                       lba_dev->hba.bus_num.start);
+                       r = &lba_dev->hba.gmmio_space;
+                       r->name  = lba_dev->hba.gmmio_name;
                        r->start  = p->start;
                        r->end    = p->end;
                        r->flags  = IORESOURCE_MEM;
@@ -1130,10 +1146,12 @@ lba_pat_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev)
                        ** Postable I/O port space is per PCI host adapter.
                        ** base of 64MB PIOP region
                        */
-                       lba_dev->iop_base = p->start;
+                       lba_dev->iop_base = ioremap(p->start, 64 * 1024 * 1024);
 
-                       r = &(lba_dev->hba.io_space);
-                       r->name   = "LBA I/O Port";
+                       sprintf(lba_dev->hba.io_name, "PCI%02lx Ports",
+                                       lba_dev->hba.bus_num.start);
+                       r = &lba_dev->hba.io_space;
+                       r->name  = lba_dev->hba.io_name;
                        r->start  = HBA_PORT_BASE(lba_dev->hba.hba_num);
                        r->end    = r->start + HBA_PORT_SPACE_SIZE - 1;
                        r->flags  = IORESOURCE_IO;
@@ -1152,27 +1170,20 @@ lba_pat_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev)
 /* keep compiler from complaining about missing declarations */
 #define lba_pat_port_ops lba_astro_port_ops
 #define lba_pat_resources(pa_dev, lba_dev)
-#endif /* CONFIG_PARISC64 */
+#endif /* CONFIG_64BIT */
+
+
+extern void sba_distributed_lmmio(struct parisc_device *, struct resource *);
+extern void sba_directed_lmmio(struct parisc_device *, struct resource *);
 
 
 static void
 lba_legacy_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev)
 {
        struct resource *r;
-       unsigned long rsize;
        int lba_num;
 
-#ifdef CONFIG_PARISC64
-       /*
-       ** Sign extend all BAR values on "legacy" platforms.
-       ** "Sprockets" PDC (Forte/Allegro) initializes everything
-       ** for "legacy" 32-bit OS (HPUX 10.20).
-       ** Upper 32-bits of 64-bit BAR will be zero too.
-       */
-       lba_dev->hba.lmmio_space_offset = 0xffffffff00000000UL;
-#else
-       lba_dev->hba.lmmio_space_offset = 0UL;
-#endif
+       lba_dev->hba.lmmio_space_offset = PCI_F_EXTEND;
 
        /*
        ** With "legacy" firmware, the lowest byte of FW_SCRATCH
@@ -1181,33 +1192,110 @@ lba_legacy_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev)
        ** PCI bus walk *should* end up with the same result.
        ** FIXME: But we don't have sanity checks in PCI or LBA.
        */
-       lba_num = READ_REG32(pa_dev->hpa + LBA_FW_SCRATCH);
+       lba_num = READ_REG32(lba_dev->hba.base_addr + LBA_FW_SCRATCH);
        r = &(lba_dev->hba.bus_num);
        r->name = "LBA PCI Busses";
        r->start = lba_num & 0xff;
        r->end = (lba_num>>8) & 0xff;
 
-       /* Set up local PCI Bus resources - we don't really need
-       ** them for Legacy boxes but it's nice to see in /proc.
+       /* Set up local PCI Bus resources - we don't need them for
+       ** Legacy boxes but it's nice to see in /proc/iomem.
        */
        r = &(lba_dev->hba.lmmio_space);
-       r->name  = "LBA PCI LMMIO";
-       r->flags = IORESOURCE_MEM;
-       /* Ignore "Range Enable" bit in the BASE register */
-       r->start = PCI_HOST_ADDR(HBA_DATA(lba_dev),
-               ((long) READ_REG32(pa_dev->hpa + LBA_LMMIO_BASE)) & ~1UL);
-       rsize =  ~READ_REG32(pa_dev->hpa + LBA_LMMIO_MASK) + 1;
-
+       sprintf(lba_dev->hba.lmmio_name, "PCI%02lx LMMIO",
+                                       lba_dev->hba.bus_num.start);
+       r->name  = lba_dev->hba.lmmio_name;
+
+#if 1
+       /* We want the CPU -> IO routing of addresses.
+        * The SBA BASE/MASK registers control CPU -> IO routing.
+        * Ask SBA what is routed to this rope/LBA.
+        */
+       sba_distributed_lmmio(pa_dev, r);
+#else
        /*
-       ** Each rope only gets part of the distributed range.
-       ** Adjust "window" for this rope
-       */
-       rsize /= ROPES_PER_SBA;
-       r->start += rsize * LBA_NUM(pa_dev->hpa);
-       r->end = r->start + rsize - 1 ;
+        * The LBA BASE/MASK registers control IO -> System routing.
+        *
+        * The following code works but doesn't get us what we want.
+        * Well, only because firmware (v5.0) on C3000 doesn't program
+        * the LBA BASE/MASE registers to be the exact inverse of 
+        * the corresponding SBA registers. Other Astro/Pluto
+        * based platform firmware may do it right.
+        *
+        * Should someone want to mess with MSI, they may need to
+        * reprogram LBA BASE/MASK registers. Thus preserve the code
+        * below until MSI is known to work on C3000/A500/N4000/RP3440.
+        *
+        * Using the code below, /proc/iomem shows:
+        * ...
+        * f0000000-f0ffffff : PCI00 LMMIO
+        *   f05d0000-f05d0000 : lcd_data
+        *   f05d0008-f05d0008 : lcd_cmd
+        * f1000000-f1ffffff : PCI01 LMMIO
+        * f4000000-f4ffffff : PCI02 LMMIO
+        *   f4000000-f4001fff : sym53c8xx
+        *   f4002000-f4003fff : sym53c8xx
+        *   f4004000-f40043ff : sym53c8xx
+        *   f4005000-f40053ff : sym53c8xx
+        *   f4007000-f4007fff : ohci_hcd
+        *   f4008000-f40083ff : tulip
+        * f6000000-f6ffffff : PCI03 LMMIO
+        * f8000000-fbffffff : PCI00 ELMMIO
+        *   fa100000-fa4fffff : stifb mmio
+        *   fb000000-fb1fffff : stifb fb
+        *
+        * But everything listed under PCI02 actually lives under PCI00.
+        * This is clearly wrong.
+        *
+        * Asking SBA how things are routed tells the correct story:
+        * LMMIO_BASE/MASK/ROUTE f4000001 fc000000 00000000
+        * DIR0_BASE/MASK/ROUTE fa000001 fe000000 00000006
+        * DIR1_BASE/MASK/ROUTE f9000001 ff000000 00000004
+        * DIR2_BASE/MASK/ROUTE f0000000 fc000000 00000000
+        * DIR3_BASE/MASK/ROUTE f0000000 fc000000 00000000
+        *
+        * Which looks like this in /proc/iomem:
+        * f4000000-f47fffff : PCI00 LMMIO
+        *   f4000000-f4001fff : sym53c8xx
+        *   ...[deteled core devices - same as above]...
+        *   f4008000-f40083ff : tulip
+        * f4800000-f4ffffff : PCI01 LMMIO
+        * f6000000-f67fffff : PCI02 LMMIO
+        * f7000000-f77fffff : PCI03 LMMIO
+        * f9000000-f9ffffff : PCI02 ELMMIO
+        * fa000000-fbffffff : PCI03 ELMMIO
+        *   fa100000-fa4fffff : stifb mmio
+        *   fb000000-fb1fffff : stifb fb
+        *
+        * ie all Built-in core are under now correctly under PCI00.
+        * The "PCI02 ELMMIO" directed range is for:
+        *  +-[02]---03.0  3Dfx Interactive, Inc. Voodoo 2
+        *
+        * All is well now.
+        */
+       r->start = READ_REG32(lba_dev->hba.base_addr + LBA_LMMIO_BASE);
+       if (r->start & 1) {
+               unsigned long rsize;
+
+               r->flags = IORESOURCE_MEM;
+               /* mmio_mask also clears Enable bit */
+               r->start &= mmio_mask;
+               r->start = PCI_HOST_ADDR(HBA_DATA(lba_dev), r->start);
+               rsize = ~ READ_REG32(lba_dev->hba.base_addr + LBA_LMMIO_MASK);
+
+               /*
+               ** Each rope only gets part of the distributed range.
+               ** Adjust "window" for this rope.
+               */
+               rsize /= ROPES_PER_IOC;
+               r->start += (rsize + 1) * LBA_NUM(pa_dev->hpa);
+               r->end = r->start + rsize;
+       } else {
+               r->end = r->start = 0;  /* Not enabled. */
+       }
+#endif
 
        /*
-       ** XXX FIXME - ignore LBA_ELMMIO_BASE for now
        ** "Directed" ranges are used when the "distributed range" isn't
        ** sufficient for all devices below a given LBA.  Typically devices
        ** like graphics cards or X25 may need a directed range when the
@@ -1216,38 +1304,41 @@ lba_legacy_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev)
        **
        ** The main reason for ignoring it now frigging complications.
        ** Directed ranges may overlap (and have precedence) over
-       ** distributed ranges. Ie a distributed range assigned to a unused
+       ** distributed ranges. Or a distributed range assigned to a unused
        ** rope may be used by a directed range on a different rope.
        ** Support for graphics devices may require fixing this
        ** since they may be assigned a directed range which overlaps
        ** an existing (but unused portion of) distributed range.
        */
        r = &(lba_dev->hba.elmmio_space);
-       r->name  = "extra LBA PCI LMMIO";
-       r->flags = IORESOURCE_MEM;
-       r->start = READ_REG32(pa_dev->hpa + LBA_ELMMIO_BASE);
-       r->end   = 0;
-
-       /* check Range Enable bit */
-       if (r->start & 1) {
-               /* First baby step to getting Direct Ranges listed in /proc.
-               ** AFAIK, only Sprockets PDC will setup a directed Range.
-               */
+       sprintf(lba_dev->hba.elmmio_name, "PCI%02lx ELMMIO",
+                                       lba_dev->hba.bus_num.start);
+       r->name  = lba_dev->hba.elmmio_name;
 
-               r->start &= ~1;
-               r->end    = r->start;
-               r->end   += ~READ_REG32(pa_dev->hpa + LBA_ELMMIO_MASK);
-               printk(KERN_DEBUG "WARNING: Ignoring enabled ELMMIO BASE 0x%0lx  SIZE 0x%lx\n",
-                       r->start,
-                       r->end + 1);
+#if 1
+       /* See comment which precedes call to sba_directed_lmmio() */
+       sba_directed_lmmio(pa_dev, r);
+#else
+       r->start = READ_REG32(lba_dev->hba.base_addr + LBA_ELMMIO_BASE);
 
+       if (r->start & 1) {
+               unsigned long rsize;
+               r->flags = IORESOURCE_MEM;
+               /* mmio_mask also clears Enable bit */
+               r->start &= mmio_mask;
+               r->start = PCI_HOST_ADDR(HBA_DATA(lba_dev), r->start);
+               rsize = READ_REG32(lba_dev->hba.base_addr + LBA_ELMMIO_MASK);
+               r->end = r->start + ~rsize;
        }
+#endif
 
        r = &(lba_dev->hba.io_space);
-       r->name  = "LBA PCI I/O Ports";
+       sprintf(lba_dev->hba.io_name, "PCI%02lx Ports",
+                                       lba_dev->hba.bus_num.start);
+       r->name  = lba_dev->hba.io_name;
        r->flags = IORESOURCE_IO;
-       r->start = READ_REG32(pa_dev->hpa + LBA_IOS_BASE) & ~1L;
-       r->end   = r->start + (READ_REG32(pa_dev->hpa + LBA_IOS_MASK) ^ (HBA_PORT_SPACE_SIZE - 1));
+       r->start = READ_REG32(lba_dev->hba.base_addr + LBA_IOS_BASE) & ~1L;
+       r->end   = r->start + (READ_REG32(lba_dev->hba.base_addr + LBA_IOS_MASK) ^ (HBA_PORT_SPACE_SIZE - 1));
 
        /* Virtualize the I/O Port space ranges */
        lba_num = HBA_PORT_BASE(lba_dev->hba.hba_num);
@@ -1296,7 +1387,7 @@ lba_hw_init(struct lba_device *d)
        printk("\n");
 #endif /* DEBUG_LBA_PAT */
 
-#ifdef CONFIG_PARISC64
+#ifdef CONFIG_64BIT
 /*
  * FIXME add support for PDC_PAT_IO "Get slot status" - OLAR support
  * Only N-Class and up can really make use of Get slot status.
@@ -1353,23 +1444,6 @@ lba_hw_init(struct lba_device *d)
 
 
 
-static void __init
-lba_common_init(struct lba_device *lba_dev)
-{
-       pci_bios = &lba_bios_ops;
-       pcibios_register_hba(HBA_DATA(lba_dev));
-       lba_dev->lba_lock = SPIN_LOCK_UNLOCKED; 
-
-       /*
-       ** Set flags which depend on hw_rev
-       */
-       if (!LBA_TR4PLUS(lba_dev)) {
-               lba_dev->flags |= LBA_FLAG_NO_DMA_DURING_CFG;
-       }
-}
-
-
-
 /*
 ** Determine if lba should claim this chip (return 0) or not (return 1).
 ** If so, initialize the chip and tell other partners in crime they
@@ -1380,12 +1454,14 @@ lba_driver_probe(struct parisc_device *dev)
 {
        struct lba_device *lba_dev;
        struct pci_bus *lba_bus;
+       struct pci_ops *cfg_ops;
        u32 func_class;
        void *tmp_obj;
        char *version;
+       void __iomem *addr = ioremap(dev->hpa, 4096);
 
        /* Read HW Rev First */
-       func_class = READ_REG32(dev->hpa + LBA_FCLASS);
+       func_class = READ_REG32(addr + LBA_FCLASS);
 
        if (IS_ELROY(dev)) {    
                func_class &= 0xf;
@@ -1398,24 +1474,40 @@ lba_driver_probe(struct parisc_device *dev)
                case 5: version = "TR4.0"; break;
                default: version = "TR4+";
                }
+
                printk(KERN_INFO "%s version %s (0x%x) found at 0x%lx\n",
                        MODULE_NAME, version, func_class & 0xf, dev->hpa);
 
-               /* Just in case we find some prototypes... */
+               if (func_class < 2) {
+                       printk(KERN_WARNING "Can't support LBA older than "
+                               "TR2.1 - continuing under adversity.\n");
+               }
+
+#if 0
+/* Elroy TR4.0 should work with simple algorithm.
+   But it doesn't.  Still missing something. *sigh*
+*/
+               if (func_class > 4) {
+                       cfg_ops = &mercury_cfg_ops;
+               } else
+#endif
+               {
+                       cfg_ops = &elroy_cfg_ops;
+               }
+
        } else if (IS_MERCURY(dev) || IS_QUICKSILVER(dev)) {
                func_class &= 0xff;
                version = kmalloc(6, GFP_KERNEL);
                sprintf(version,"TR%d.%d",(func_class >> 4),(func_class & 0xf));
-               /* We could use one printk for both and have it outside,
+               /* We could use one printk for both Elroy and Mercury,
                  * but for the mask for func_class.
                  */ 
                printk(KERN_INFO "%s version %s (0x%x) found at 0x%lx\n",
                        MODULE_NAME, version, func_class & 0xff, dev->hpa);
-       }
-
-       if (func_class < 2) {
-               printk(KERN_WARNING "Can't support LBA older than TR2.1"
-                               " - continuing under adversity.\n");
+               cfg_ops = &mercury_cfg_ops;
+       } else {
+               printk(KERN_ERR "Unknown LBA found at 0x%lx\n", dev->hpa);
+               return -ENODEV;
        }
 
        /*
@@ -1428,8 +1520,7 @@ lba_driver_probe(struct parisc_device *dev)
        */
        
        lba_dev = kmalloc(sizeof(struct lba_device), GFP_KERNEL);
-       if (NULL == lba_dev)
-       {
+       if (!lba_dev) {
                printk(KERN_ERR "lba_init_chip - couldn't alloc lba_device\n");
                return(1);
        }
@@ -1439,19 +1530,16 @@ lba_driver_probe(struct parisc_device *dev)
 
        /* ---------- First : initialize data we already have --------- */
 
-       /*
-       ** Need hw_rev to adjust configuration space behavior.
-       ** LBA_TR4PLUS macro uses hw_rev field.
-       */
        lba_dev->hw_rev = func_class;
-
-       lba_dev->hba.base_addr = dev->hpa;  /* faster access */
+       lba_dev->hba.base_addr = addr;
        lba_dev->hba.dev = dev;
        lba_dev->iosapic_obj = tmp_obj;  /* save interrupt handle */
        lba_dev->hba.iommu = sba_get_iommu(dev);  /* get iommu data */
 
        /* ------------ Second : initialize common stuff ---------- */
-       lba_common_init(lba_dev);
+       pci_bios = &lba_bios_ops;
+       pcibios_register_hba(HBA_DATA(lba_dev));
+       spin_lock_init(&lba_dev->lba_lock);
 
        if (lba_hw_init(lba_dev))
                return(1);
@@ -1464,8 +1552,11 @@ lba_driver_probe(struct parisc_device *dev)
                /* Go ask PDC PAT what resources this LBA has */
                lba_pat_resources(dev, lba_dev);
        } else {
-               /* Sprockets PDC uses NPIOP region */
-               pci_port = &lba_astro_port_ops;
+               if (!astro_iop_base) {
+                       /* Sprockets PDC uses NPIOP region */
+                       astro_iop_base = ioremap(LBA_PORT_BASE, 64 * 1024);
+                       pci_port = &lba_astro_port_ops;
+               }
 
                /* Poke the chip a bit for /proc output */
                lba_legacy_resources(dev, lba_dev);
@@ -1478,11 +1569,15 @@ lba_driver_probe(struct parisc_device *dev)
        dev->dev.platform_data = lba_dev;
        lba_bus = lba_dev->hba.hba_bus =
                pci_scan_bus_parented(&dev->dev, lba_dev->hba.bus_num.start,
-                               is_pdc_pat() ? &pat_cfg_ops : &lba_cfg_ops,
-                               NULL);
+                               cfg_ops, NULL);
 
+       /* This is in lieu of calling pci_assign_unassigned_resources() */
        if (is_pdc_pat()) {
                /* assign resources to un-initialized devices */
+
+               DBG_PAT("LBA pci_bus_size_bridges()\n");
+               pci_bus_size_bridges(lba_bus);
+
                DBG_PAT("LBA pci_bus_assign_resources()\n");
                pci_bus_assign_resources(lba_bus);
 
@@ -1493,13 +1588,15 @@ lba_driver_probe(struct parisc_device *dev)
                lba_dump_res(&lba_dev->hba.lmmio_space, 2);
 #endif
        }
+       pci_enable_bridges(lba_bus);
+
 
        /*
        ** Once PCI register ops has walked the bus, access to config
        ** space is restricted. Avoids master aborts on config cycles.
        ** Early LBA revs go fatal on *any* master abort.
        */
-       if (!LBA_TR4PLUS(lba_dev)) {
+       if (cfg_ops == &elroy_cfg_ops) {
                lba_dev->flags |= LBA_FLAG_SKIP_PROBE;
        }
 
@@ -1534,18 +1631,19 @@ void __init lba_init(void)
 ** Only called from sba_iommu.c in order to route ranges (MMIO vs DMA).
 ** sba_iommu is responsible for locking (none needed at init time).
 */
-void
-lba_set_iregs(struct parisc_device *lba, u32 ibase, u32 imask)
+void lba_set_iregs(struct parisc_device *lba, u32 ibase, u32 imask)
 {
-       unsigned long base_addr = lba->hpa;
+       void __iomem * base_addr = ioremap(lba->hpa, 4096);
 
        imask <<= 2;    /* adjust for hints - 2 more bits */
 
-       ASSERT((ibase & 0x003fffff) == 0);
-       ASSERT((imask & 0x003fffff) == 0);
+       /* Make sure we aren't trying to set bits that aren't writeable. */
+       WARN_ON((ibase & 0x001fffff) != 0);
+       WARN_ON((imask & 0x001fffff) != 0);
        
        DBG("%s() ibase 0x%x imask 0x%x\n", __FUNCTION__, ibase, imask);
        WRITE_REG32( imask, base_addr + LBA_IMASK);
        WRITE_REG32( ibase, base_addr + LBA_IBASE);
+       iounmap(base_addr);
 }