int irq;
int kind;
int aapl_bus_id;
- int cable_80 : 1;
- int mediabay : 1;
- int broken_dma : 1;
- int broken_dma_warn : 1;
+ unsigned cable_80 : 1;
+ unsigned mediabay : 1;
+ unsigned broken_dma : 1;
+ unsigned broken_dma_warn : 1;
struct device_node* node;
struct macio_dev *mdev;
u32 timings[4];
* beeing done by the generic code about the kind of dma controller
* and format of the dma table. This will have to be fixed though.
*/
- volatile struct dbdma_regs* dma_regs;
+ volatile struct dbdma_regs __iomem * dma_regs;
struct dbdma_cmd* dma_table_cpu;
dma_addr_t dma_table_dma;
struct scatterlist* sg_table;
*irq = pmac_ide[ix].irq;
}
+#define PMAC_IDE_REG(x) ((void __iomem *)(IDE_DATA_REG+(x)))
+
/*
* Apply the timings of the proper unit (master/slave) to the shared
* timing register when selecting that unit. This version is for
return;
if (drive->select.b.unit & 0x01)
- writel(pmif->timings[1],
- (unsigned *)(IDE_DATA_REG+IDE_TIMING_CONFIG));
+ writel(pmif->timings[1], PMAC_IDE_REG(IDE_TIMING_CONFIG));
else
- writel(pmif->timings[0],
- (unsigned *)(IDE_DATA_REG+IDE_TIMING_CONFIG));
- (void)readl((unsigned *)(IDE_DATA_REG+IDE_TIMING_CONFIG));
+ writel(pmif->timings[0], PMAC_IDE_REG(IDE_TIMING_CONFIG));
+ (void)readl(PMAC_IDE_REG(IDE_TIMING_CONFIG));
}
/*
return;
if (drive->select.b.unit & 0x01) {
- writel(pmif->timings[1],
- (unsigned *)(IDE_DATA_REG + IDE_KAUAI_PIO_CONFIG));
- writel(pmif->timings[3],
- (unsigned *)(IDE_DATA_REG + IDE_KAUAI_ULTRA_CONFIG));
+ writel(pmif->timings[1], PMAC_IDE_REG(IDE_KAUAI_PIO_CONFIG));
+ writel(pmif->timings[3], PMAC_IDE_REG(IDE_KAUAI_ULTRA_CONFIG));
} else {
- writel(pmif->timings[0],
- (unsigned *)(IDE_DATA_REG + IDE_KAUAI_PIO_CONFIG));
- writel(pmif->timings[2],
- (unsigned *)(IDE_DATA_REG + IDE_KAUAI_ULTRA_CONFIG));
+ writel(pmif->timings[0], PMAC_IDE_REG(IDE_KAUAI_PIO_CONFIG));
+ writel(pmif->timings[2], PMAC_IDE_REG(IDE_KAUAI_ULTRA_CONFIG));
}
- (void)readl((unsigned *)(IDE_DATA_REG + IDE_KAUAI_PIO_CONFIG));
+ (void)readl(PMAC_IDE_REG(IDE_KAUAI_PIO_CONFIG));
}
/*
{
u32 tmp;
- writeb(value, port);
- tmp = readl((unsigned *)(IDE_DATA_REG + IDE_TIMING_CONFIG));
+ writeb(value, (void __iomem *) port);
+ tmp = readl(PMAC_IDE_REG(IDE_TIMING_CONFIG));
}
/*
set_timings_mdma(ide_drive_t *drive, int intf_type, u32 *timings, u32 *timings2,
u8 speed, int drive_cycle_time)
{
- int cycleTime, accessTime, recTime;
+ int cycleTime, accessTime = 0, recTime = 0;
unsigned accessTicks, recTicks;
struct mdma_timings_t* tm = NULL;
int i;
static int __devinit
pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_match *match)
{
- unsigned long base, regbase;
+ void __iomem *base;
+ unsigned long regbase;
int irq;
ide_hwif_t *hwif;
pmac_ide_hwif_t *pmif;
} else
irq = macio_irq(mdev, 0);
- base = (unsigned long)ioremap(macio_resource_start(mdev, 0), 0x400);
- regbase = base;
+ base = ioremap(macio_resource_start(mdev, 0), 0x400);
+ regbase = (unsigned long) base;
hwif->pci_dev = mdev->bus->pdev;
hwif->gendev.parent = &mdev->ofdev.dev;
if (macio_request_resource(mdev, 1, "ide-pmac (dma)"))
printk(KERN_WARNING "ide%d: can't request DMA resource !\n", i);
else
- pmif->dma_regs = (volatile struct dbdma_regs*)
- ioremap(macio_resource_start(mdev, 1), 0x1000);
+ pmif->dma_regs = ioremap(macio_resource_start(mdev, 1), 0x1000);
} else
pmif->dma_regs = NULL;
#endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */
if (rc != 0) {
/* The inteface is released to the common IDE layer */
dev_set_drvdata(&mdev->ofdev.dev, NULL);
- iounmap((void *)base);
+ iounmap(base);
if (pmif->dma_regs)
- iounmap((void *)pmif->dma_regs);
+ iounmap(pmif->dma_regs);
memset(pmif, 0, sizeof(*pmif));
macio_release_resource(mdev, 0);
if (pmif->dma_regs)
ide_hwif_t *hwif;
struct device_node *np;
pmac_ide_hwif_t *pmif;
- unsigned long base;
+ void __iomem *base;
unsigned long rbase, rlen;
int i, rc;
rbase = pci_resource_start(pdev, 0);
rlen = pci_resource_len(pdev, 0);
- base = (unsigned long) ioremap(rbase, rlen);
- pmif->regbase = base + 0x2000;
+ base = ioremap(rbase, rlen);
+ pmif->regbase = (unsigned long) base + 0x2000;
#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
- pmif->dma_regs = (volatile struct dbdma_regs*)(base + 0x1000);
+ pmif->dma_regs = base + 0x1000;
#endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */
/* We use the OF node irq mapping */
if (rc != 0) {
/* The inteface is released to the common IDE layer */
pci_set_drvdata(pdev, NULL);
- iounmap((void *)base);
+ iounmap(base);
memset(pmif, 0, sizeof(*pmif));
pci_release_regions(pdev);
}
struct scatterlist *sg = pmif->sg_table;
int nents;
- if (hwif->sg_dma_active)
- BUG();
-
nents = blk_rq_map_sg(drive->queue, rq, sg);
if (rq_data_dir(rq) == READ)
int i, count = 0;
ide_hwif_t *hwif = HWIF(drive);
pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)hwif->hwif_data;
- volatile struct dbdma_regs *dma = pmif->dma_regs;
+ volatile struct dbdma_regs __iomem *dma = pmif->dma_regs;
struct scatterlist *sg;
int wr = (rq_data_dir(rq) == WRITE);
pmif->sg_table,
pmif->sg_nents,
pmif->sg_dma_direction);
- hwif->sg_dma_active = 0;
return 0; /* revert to PIO for this request */
}
if (nents) {
pci_unmap_sg(dev, sg, nents, pmif->sg_dma_direction);
pmif->sg_nents = 0;
- HWIF(drive)->sg_dma_active = 0;
}
}
/* Apple adds 60ns to wrDataSetup on reads */
if (ata4 && (pmif->timings[unit] & TR_66_UDMA_EN)) {
writel(pmif->timings[unit] + (reading ? 0x00800000UL : 0),
- (unsigned *)(IDE_DATA_REG+IDE_TIMING_CONFIG));
- (void)readl((unsigned *)(IDE_DATA_REG + IDE_TIMING_CONFIG));
+ PMAC_IDE_REG(IDE_TIMING_CONFIG));
+ (void)readl(PMAC_IDE_REG(IDE_TIMING_CONFIG));
}
drive->waiting_for_dma = 1;
pmac_ide_dma_begin (ide_drive_t *drive)
{
pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
- volatile struct dbdma_regs *dma;
+ volatile struct dbdma_regs __iomem *dma;
if (pmif == NULL)
return 1;
pmac_ide_dma_end (ide_drive_t *drive)
{
pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
- volatile struct dbdma_regs *dma;
+ volatile struct dbdma_regs __iomem *dma;
u32 dstat;
if (pmif == NULL)
pmac_ide_dma_test_irq (ide_drive_t *drive)
{
pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
- volatile struct dbdma_regs *dma;
+ volatile struct dbdma_regs __iomem *dma;
unsigned long status, timeout;
if (pmif == NULL)
pmac_ide_dma_lostirq (ide_drive_t *drive)
{
pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
- volatile struct dbdma_regs *dma;
+ volatile struct dbdma_regs __iomem *dma;
unsigned long status;
if (pmif == NULL)