** FIXME: add DMA hint support programming in both sba and lba modules.
*/
+#include <linux/config.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
#define ROPE6_CTL 0x230
#define ROPE7_CTL 0x238
-#define IOC_ROPE0_CFG 0x500 /* pluto only */
-#define IOC_ROPE_AO 0x10 /* Allow "Relaxed Ordering" */
-
-
-
#define HF_ENABLE 0x40
**
** Superdome (in particular, REO) allows only 64-bit CSR accesses.
*/
-#define READ_REG32(addr) readl(addr)
-#define READ_REG64(addr) readq(addr)
-#define WRITE_REG32(val, addr) writel((val), (addr))
-#define WRITE_REG64(val, addr) writeq((val), (addr))
+#define READ_REG32(addr) le32_to_cpu(__raw_readl(addr))
+#define READ_REG64(addr) le64_to_cpu(__raw_readq(addr))
+#define WRITE_REG32(val, addr) __raw_writel(cpu_to_le32(val), addr)
+#define WRITE_REG64(val, addr) __raw_writeq(cpu_to_le64(val), addr)
#ifdef CONFIG_64BIT
#define READ_REG(addr) READ_REG64(addr)
iov_order = get_order(iova_space_size >> (IOVP_SHIFT - PAGE_SHIFT));
ioc->pdir_size = (iova_space_size / IOVP_SIZE) * sizeof(u64);
- DBG_INIT("%s() hpa 0x%p IOV %dMB (%d bits)\n",
+ DBG_INIT("%s() hpa 0x%lx IOV %dMB (%d bits)\n",
__FUNCTION__, ioc->ioc_hpa, iova_space_size >> 20,
iov_order + PAGE_SHIFT);
**
**************************************************************************/
-static void __iomem *ioc_remap(struct sba_device *sba_dev, unsigned int offset)
+static void __iomem *ioc_remap(struct sba_device *sba_dev, int offset)
{
- return ioremap_nocache(sba_dev->dev->hpa.start + offset, SBA_FUNC_SIZE);
+ return ioremap(sba_dev->dev->hpa.start + offset, SBA_FUNC_SIZE);
}
static void sba_hw_init(struct sba_device *sba_dev)
sba_dev->chip_resv.start = PCI_F_EXTEND | 0xfef00000UL;
sba_dev->chip_resv.end = PCI_F_EXTEND | (0xff000000UL - 1) ;
err = request_resource(&iomem_resource, &(sba_dev->chip_resv));
- BUG_ON(err < 0);
+ if (err < 0) {
+ BUG();
+ }
} else if (IS_PLUTO(sba_dev->iodc)) {
int err;
sba_dev->num_ioc = num_ioc;
for (i = 0; i < num_ioc; i++) {
- void __iomem *ioc_hpa = sba_dev->ioc[i].ioc_hpa;
- unsigned int j;
-
- for (j=0; j < sizeof(u64) * ROPES_PER_IOC; j+=sizeof(u64)) {
-
- /*
- * Clear ROPE(N)_CONFIG AO bit.
- * Disables "NT Ordering" (~= !"Relaxed Ordering")
- * Overrides bit 1 in DMA Hint Sets.
- * Improves netperf UDP_STREAM by ~10% for bcm5701.
- */
- if (IS_PLUTO(sba_dev->iodc)) {
- void __iomem *rope_cfg;
- unsigned long cfg_val;
-
- rope_cfg = ioc_hpa + IOC_ROPE0_CFG + j;
- cfg_val = READ_REG(rope_cfg);
- cfg_val &= ~IOC_ROPE_AO;
- WRITE_REG(cfg_val, rope_cfg);
- }
-
- /*
- ** Make sure the box crashes on rope errors.
- */
- WRITE_REG(HF_ENABLE, ioc_hpa + ROPE0_CTL + j);
- }
-
- /* flush out the last writes */
+ /*
+ ** Make sure the box crashes if we get any errors on a rope.
+ */
+ WRITE_REG(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE0_CTL);
+ WRITE_REG(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE1_CTL);
+ WRITE_REG(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE2_CTL);
+ WRITE_REG(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE3_CTL);
+ WRITE_REG(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE4_CTL);
+ WRITE_REG(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE5_CTL);
+ WRITE_REG(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE6_CTL);
+ WRITE_REG(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE7_CTL);
+
+ /* flush out the writes */
READ_REG(sba_dev->ioc[i].ioc_hpa + ROPE7_CTL);
DBG_INIT(" ioc[%d] ROPE_CFG 0x%Lx ROPE_DBG 0x%Lx\n",
* (bit #61, big endian), we have to flush and sync every time
* IO-PDIR is changed in Ike/Astro.
*/
- if (ioc_needs_fdc) {
+ if (boot_cpu_data.pdc.capabilities & PDC_MODEL_IOPDIR_FDC) {
printk(KERN_INFO MODULE_NAME " FDC/SYNC required.\n");
} else {
printk(KERN_INFO MODULE_NAME " IOC has cache coherent PDIR.\n");
u32 func_class;
int i;
char *version;
- void __iomem *sba_addr = ioremap_nocache(dev->hpa.start, SBA_FUNC_SIZE);
+ void __iomem *sba_addr = ioremap(dev->hpa.start, SBA_FUNC_SIZE);
struct proc_dir_entry *info_entry, *bitmap_entry, *root;
sba_dump_ranges(sba_addr);
int i;
int rope = (pci_hba->hw_path & (ROPES_PER_IOC-1)); /* rope # */
- BUG_ON((t!=HPHW_IOA) && (t!=HPHW_BCPORT));
+ if ((t!=HPHW_IOA) && (t!=HPHW_BCPORT))
+ BUG();
r->start = r->end = 0;
int base, size;
int rope = (pci_hba->hw_path & (ROPES_PER_IOC-1)); /* rope # */
- BUG_ON((t!=HPHW_IOA) && (t!=HPHW_BCPORT));
+ if ((t!=HPHW_IOA) && (t!=HPHW_BCPORT))
+ BUG();
r->start = r->end = 0;