2 * linux/drivers/ide/ide-pmac.c
4 * Support for IDE interfaces on PowerMacs.
5 * These IDE interfaces are memory-mapped and have a DBDMA channel
8 * Copyright (C) 1998-2003 Paul Mackerras & Ben. Herrenschmidt
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
15 * Some code taken from drivers/ide/ide-dma.c:
17 * Copyright (c) 1995-1998 Mark Lord
19 * TODO: - Use pre-calculated (kauai) timing tables all the time and
20 * get rid of the "rounded" tables used previously, so we have the
21 * same table format for all controllers and can then just have one
25 #include <linux/config.h>
26 #include <linux/types.h>
27 #include <linux/kernel.h>
28 #include <linux/sched.h>
29 #include <linux/init.h>
30 #include <linux/delay.h>
31 #include <linux/ide.h>
32 #include <linux/notifier.h>
33 #include <linux/reboot.h>
34 #include <linux/pci.h>
35 #include <linux/adb.h>
36 #include <linux/pmu.h>
40 #include <asm/dbdma.h>
42 #include <asm/pci-bridge.h>
43 #include <asm/machdep.h>
44 #include <asm/pmac_feature.h>
45 #include <asm/sections.h>
49 #include <asm/mediabay.h>
52 #include "ide-timing.h"
54 extern void ide_do_request(ide_hwgroup_t *hwgroup, int masked_irq);
56 #define IDE_PMAC_DEBUG
58 #define DMA_WAIT_TIMEOUT 50
60 typedef struct pmac_ide_hwif {
61 unsigned long regbase;
65 unsigned cable_80 : 1;
66 unsigned mediabay : 1;
67 unsigned broken_dma : 1;
68 unsigned broken_dma_warn : 1;
69 struct device_node* node;
70 struct macio_dev *mdev;
72 #ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
73 /* Those fields are duplicating what is in hwif. We currently
74 * can't use the hwif ones because of some assumptions that are
75 * beeing done by the generic code about the kind of dma controller
76 * and format of the dma table. This will have to be fixed though.
78 volatile struct dbdma_regs __iomem * dma_regs;
79 struct dbdma_cmd* dma_table_cpu;
80 dma_addr_t dma_table_dma;
81 struct scatterlist* sg_table;
88 static pmac_ide_hwif_t pmac_ide[MAX_HWIFS] __pmacdata;
89 static int pmac_ide_count;
92 controller_ohare, /* OHare based */
93 controller_heathrow, /* Heathrow/Paddington */
94 controller_kl_ata3, /* KeyLargo ATA-3 */
95 controller_kl_ata4, /* KeyLargo ATA-4 */
96 controller_un_ata6, /* UniNorth2 ATA-6 */
97 controller_k2_ata6 /* K2 ATA-6 */
100 static const char* model_name[] = {
101 "OHare ATA", /* OHare based */
102 "Heathrow ATA", /* Heathrow/Paddington */
103 "KeyLargo ATA-3", /* KeyLargo ATA-3 (MDMA only) */
104 "KeyLargo ATA-4", /* KeyLargo ATA-4 (UDMA/66) */
105 "UniNorth ATA-6", /* UniNorth2 ATA-6 (UDMA/100) */
106 "K2 ATA-6", /* K2 ATA-6 (UDMA/100) */
110 * Extra registers, both 32-bit little-endian
112 #define IDE_TIMING_CONFIG 0x200
113 #define IDE_INTERRUPT 0x300
115 /* Kauai (U2) ATA has different register setup */
116 #define IDE_KAUAI_PIO_CONFIG 0x200
117 #define IDE_KAUAI_ULTRA_CONFIG 0x210
118 #define IDE_KAUAI_POLL_CONFIG 0x220
121 * Timing configuration register definitions
124 /* Number of IDE_SYSCLK_NS ticks, argument is in nanoseconds */
125 #define SYSCLK_TICKS(t) (((t) + IDE_SYSCLK_NS - 1) / IDE_SYSCLK_NS)
126 #define SYSCLK_TICKS_66(t) (((t) + IDE_SYSCLK_66_NS - 1) / IDE_SYSCLK_66_NS)
127 #define IDE_SYSCLK_NS 30 /* 33Mhz cell */
128 #define IDE_SYSCLK_66_NS 15 /* 66Mhz cell */
130 /* 100Mhz cell, found in Uninorth 2. I don't have much infos about
131 * this one yet, it appears as a pci device (106b/0033) on uninorth
132 * internal PCI bus and it's clock is controlled like gem or fw. It
133 * appears to be an evolution of keylargo ATA4 with a timing register
134 * extended to 2 32bits registers and a similar DBDMA channel. Other
135 * registers seem to exist but I can't tell much about them.
137 * So far, I'm using pre-calculated tables for this extracted from
138 * the values used by the MacOS X driver.
140 * The "PIO" register controls PIO and MDMA timings, the "ULTRA"
141 * register controls the UDMA timings. At least, it seems bit 0
142 * of this one enables UDMA vs. MDMA, and bits 4..7 are the
143 * cycle time in units of 10ns. Bits 8..15 are used by I don't
144 * know their meaning yet
146 #define TR_100_PIOREG_PIO_MASK 0xff000fff
147 #define TR_100_PIOREG_MDMA_MASK 0x00fff000
148 #define TR_100_UDMAREG_UDMA_MASK 0x0000ffff
149 #define TR_100_UDMAREG_UDMA_EN 0x00000001
152 /* 66Mhz cell, found in KeyLargo. Can do ultra mode 0 to 2 on
153 * 40 connector cable and to 4 on 80 connector one.
154 * Clock unit is 15ns (66Mhz)
156 * 3 Values can be programmed:
157 * - Write data setup, which appears to match the cycle time. They
158 * also call it DIOW setup.
159 * - Ready to pause time (from spec)
160 * - Address setup. That one is weird. I don't see where exactly
161 * it fits in UDMA cycles, I got it's name from an obscure piece
162 * of commented out code in Darwin. They leave it to 0, we do as
163 * well, despite a comment that would lead to think it has a
165 * Apple also add 60ns to the write data setup (or cycle time ?) on
168 #define TR_66_UDMA_MASK 0xfff00000
169 #define TR_66_UDMA_EN 0x00100000 /* Enable Ultra mode for DMA */
170 #define TR_66_UDMA_ADDRSETUP_MASK 0xe0000000 /* Address setup */
171 #define TR_66_UDMA_ADDRSETUP_SHIFT 29
172 #define TR_66_UDMA_RDY2PAUS_MASK 0x1e000000 /* Ready 2 pause time */
173 #define TR_66_UDMA_RDY2PAUS_SHIFT 25
174 #define TR_66_UDMA_WRDATASETUP_MASK 0x01e00000 /* Write data setup time */
175 #define TR_66_UDMA_WRDATASETUP_SHIFT 21
176 #define TR_66_MDMA_MASK 0x000ffc00
177 #define TR_66_MDMA_RECOVERY_MASK 0x000f8000
178 #define TR_66_MDMA_RECOVERY_SHIFT 15
179 #define TR_66_MDMA_ACCESS_MASK 0x00007c00
180 #define TR_66_MDMA_ACCESS_SHIFT 10
181 #define TR_66_PIO_MASK 0x000003ff
182 #define TR_66_PIO_RECOVERY_MASK 0x000003e0
183 #define TR_66_PIO_RECOVERY_SHIFT 5
184 #define TR_66_PIO_ACCESS_MASK 0x0000001f
185 #define TR_66_PIO_ACCESS_SHIFT 0
187 /* 33Mhz cell, found in OHare, Heathrow (& Paddington) and KeyLargo
188 * Can do pio & mdma modes, clock unit is 30ns (33Mhz)
190 * The access time and recovery time can be programmed. Some older
191 * Darwin code base limit OHare to 150ns cycle time. I decided to do
192 * the same here fore safety against broken old hardware ;)
193 * The HalfTick bit, when set, adds half a clock (15ns) to the access
194 * time and removes one from recovery. It's not supported on KeyLargo
195 * implementation afaik. The E bit appears to be set for PIO mode 0 and
196 * is used to reach long timings used in this mode.
198 #define TR_33_MDMA_MASK 0x003ff800
199 #define TR_33_MDMA_RECOVERY_MASK 0x001f0000
200 #define TR_33_MDMA_RECOVERY_SHIFT 16
201 #define TR_33_MDMA_ACCESS_MASK 0x0000f800
202 #define TR_33_MDMA_ACCESS_SHIFT 11
203 #define TR_33_MDMA_HALFTICK 0x00200000
204 #define TR_33_PIO_MASK 0x000007ff
205 #define TR_33_PIO_E 0x00000400
206 #define TR_33_PIO_RECOVERY_MASK 0x000003e0
207 #define TR_33_PIO_RECOVERY_SHIFT 5
208 #define TR_33_PIO_ACCESS_MASK 0x0000001f
209 #define TR_33_PIO_ACCESS_SHIFT 0
212 * Interrupt register definitions
214 #define IDE_INTR_DMA 0x80000000
215 #define IDE_INTR_DEVICE 0x40000000
217 #ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
219 /* Rounded Multiword DMA timings
221 * I gave up finding a generic formula for all controller
222 * types and instead, built tables based on timing values
223 * used by Apple in Darwin's implementation.
225 struct mdma_timings_t {
231 struct mdma_timings_t mdma_timings_33[] __pmacdata =
244 struct mdma_timings_t mdma_timings_33k[] __pmacdata =
257 struct mdma_timings_t mdma_timings_66[] __pmacdata =
270 /* KeyLargo ATA-4 Ultra DMA timings (rounded) */
272 int addrSetup; /* ??? */
275 } kl66_udma_timings[] __pmacdata =
277 { 0, 180, 120 }, /* Mode 0 */
278 { 0, 150, 90 }, /* 1 */
279 { 0, 120, 60 }, /* 2 */
280 { 0, 90, 45 }, /* 3 */
281 { 0, 90, 30 } /* 4 */
284 /* UniNorth 2 ATA/100 timings */
285 struct kauai_timing {
290 static struct kauai_timing kauai_pio_timings[] __pmacdata =
292 { 930 , 0x08000fff },
293 { 600 , 0x08000a92 },
294 { 383 , 0x0800060f },
295 { 360 , 0x08000492 },
296 { 330 , 0x0800048f },
297 { 300 , 0x080003cf },
298 { 270 , 0x080003cc },
299 { 240 , 0x0800038b },
300 { 239 , 0x0800030c },
301 { 180 , 0x05000249 },
305 static struct kauai_timing kauai_mdma_timings[] __pmacdata =
307 { 1260 , 0x00fff000 },
308 { 480 , 0x00618000 },
309 { 360 , 0x00492000 },
310 { 270 , 0x0038e000 },
311 { 240 , 0x0030c000 },
312 { 210 , 0x002cb000 },
313 { 180 , 0x00249000 },
314 { 150 , 0x00209000 },
315 { 120 , 0x00148000 },
319 static struct kauai_timing kauai_udma_timings[] __pmacdata =
321 { 120 , 0x000070c0 },
331 kauai_lookup_timing(struct kauai_timing* table, int cycle_time)
335 for (i=0; table[i].cycle_time; i++)
336 if (cycle_time > table[i+1].cycle_time)
337 return table[i].timing_reg;
341 /* allow up to 256 DBDMA commands per xfer */
342 #define MAX_DCMDS 256
345 * Wait 1s for disk to answer on IDE bus after a hard reset
346 * of the device (via GPIO/FCR).
348 * Some devices seem to "pollute" the bus even after dropping
349 * the BSY bit (typically some combo drives slave on the UDMA
350 * bus) after a hard reset. Since we hard reset all drives on
351 * KeyLargo ATA66, we have to keep that delay around. I may end
352 * up not hard resetting anymore on these and keep the delay only
353 * for older interfaces instead (we have to reset when coming
354 * from MacOS...) --BenH.
356 #define IDE_WAKEUP_DELAY (1*HZ)
358 static void pmac_ide_setup_dma(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif);
359 static int pmac_ide_build_dmatable(ide_drive_t *drive, struct request *rq);
360 static int pmac_ide_tune_chipset(ide_drive_t *drive, u8 speed);
361 static void pmac_ide_tuneproc(ide_drive_t *drive, u8 pio);
362 static void pmac_ide_selectproc(ide_drive_t *drive);
363 static void pmac_ide_kauai_selectproc(ide_drive_t *drive);
364 static int pmac_ide_dma_begin (ide_drive_t *drive);
366 #endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */
369 * Below is the code for blinking the laptop LED along with hard
373 #ifdef CONFIG_BLK_DEV_IDE_PMAC_BLINK
375 /* Set to 50ms minimum led-on time (also used to limit frequency
376 * of requests sent to the PMU
378 #define PMU_HD_BLINK_TIME (HZ/50)
380 static struct adb_request pmu_blink_on, pmu_blink_off;
381 static spinlock_t pmu_blink_lock;
382 static unsigned long pmu_blink_stoptime;
383 static int pmu_blink_ledstate;
384 static struct timer_list pmu_blink_timer;
385 static int pmu_ide_blink_enabled;
389 pmu_hd_blink_timeout(unsigned long data)
393 spin_lock_irqsave(&pmu_blink_lock, flags);
395 /* We may have been triggered again in a racy way, check
396 * that we really want to switch it off
398 if (time_after(pmu_blink_stoptime, jiffies))
401 /* Previous req. not complete, try 100ms more */
402 if (pmu_blink_off.complete == 0)
403 mod_timer(&pmu_blink_timer, jiffies + PMU_HD_BLINK_TIME);
404 else if (pmu_blink_ledstate) {
405 pmu_request(&pmu_blink_off, NULL, 4, 0xee, 4, 0, 0);
406 pmu_blink_ledstate = 0;
409 spin_unlock_irqrestore(&pmu_blink_lock, flags);
413 pmu_hd_kick_blink(void *data, int rw)
417 pmu_blink_stoptime = jiffies + PMU_HD_BLINK_TIME;
419 mod_timer(&pmu_blink_timer, pmu_blink_stoptime);
420 /* Fast path when LED is already ON */
421 if (pmu_blink_ledstate == 1)
423 spin_lock_irqsave(&pmu_blink_lock, flags);
424 if (pmu_blink_on.complete && !pmu_blink_ledstate) {
425 pmu_request(&pmu_blink_on, NULL, 4, 0xee, 4, 0, 1);
426 pmu_blink_ledstate = 1;
428 spin_unlock_irqrestore(&pmu_blink_lock, flags);
432 pmu_hd_blink_init(void)
434 struct device_node *dt;
437 /* Currently, I only enable this feature on KeyLargo based laptops,
438 * older laptops may support it (at least heathrow/paddington) but
439 * I don't feel like loading those venerable old machines with so
440 * much additional interrupt & PMU activity...
442 if (pmu_get_model() != PMU_KEYLARGO_BASED)
445 dt = find_devices("device-tree");
448 model = (const char *)get_property(dt, "model", NULL);
451 if (strncmp(model, "PowerBook", strlen("PowerBook")) != 0 &&
452 strncmp(model, "iBook", strlen("iBook")) != 0)
455 pmu_blink_on.complete = 1;
456 pmu_blink_off.complete = 1;
457 spin_lock_init(&pmu_blink_lock);
458 init_timer(&pmu_blink_timer);
459 pmu_blink_timer.function = pmu_hd_blink_timeout;
464 #endif /* CONFIG_BLK_DEV_IDE_PMAC_BLINK */
467 * N.B. this can't be an initfunc, because the media-bay task can
468 * call ide_[un]register at any time.
471 pmac_ide_init_hwif_ports(hw_regs_t *hw,
472 unsigned long data_port, unsigned long ctrl_port,
480 for (ix = 0; ix < MAX_HWIFS; ++ix)
481 if (data_port == pmac_ide[ix].regbase)
484 if (ix >= MAX_HWIFS) {
485 /* Probably a PCI interface... */
486 for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; ++i)
487 hw->io_ports[i] = data_port + i - IDE_DATA_OFFSET;
488 hw->io_ports[IDE_CONTROL_OFFSET] = ctrl_port;
492 for (i = 0; i < 8; ++i)
493 hw->io_ports[i] = data_port + i * 0x10;
494 hw->io_ports[8] = data_port + 0x160;
497 *irq = pmac_ide[ix].irq;
500 #define PMAC_IDE_REG(x) ((void __iomem *)(IDE_DATA_REG+(x)))
503 * Apply the timings of the proper unit (master/slave) to the shared
504 * timing register when selecting that unit. This version is for
505 * ASICs with a single timing register
508 pmac_ide_selectproc(ide_drive_t *drive)
510 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
515 if (drive->select.b.unit & 0x01)
516 writel(pmif->timings[1], PMAC_IDE_REG(IDE_TIMING_CONFIG));
518 writel(pmif->timings[0], PMAC_IDE_REG(IDE_TIMING_CONFIG));
519 (void)readl(PMAC_IDE_REG(IDE_TIMING_CONFIG));
523 * Apply the timings of the proper unit (master/slave) to the shared
524 * timing register when selecting that unit. This version is for
525 * ASICs with a dual timing register (Kauai)
528 pmac_ide_kauai_selectproc(ide_drive_t *drive)
530 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
535 if (drive->select.b.unit & 0x01) {
536 writel(pmif->timings[1], PMAC_IDE_REG(IDE_KAUAI_PIO_CONFIG));
537 writel(pmif->timings[3], PMAC_IDE_REG(IDE_KAUAI_ULTRA_CONFIG));
539 writel(pmif->timings[0], PMAC_IDE_REG(IDE_KAUAI_PIO_CONFIG));
540 writel(pmif->timings[2], PMAC_IDE_REG(IDE_KAUAI_ULTRA_CONFIG));
542 (void)readl(PMAC_IDE_REG(IDE_KAUAI_PIO_CONFIG));
546 * Force an update of controller timing values for a given drive
549 pmac_ide_do_update_timings(ide_drive_t *drive)
551 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
556 if (pmif->kind == controller_un_ata6 || pmif->kind == controller_k2_ata6)
557 pmac_ide_kauai_selectproc(drive);
559 pmac_ide_selectproc(drive);
563 pmac_outbsync(ide_drive_t *drive, u8 value, unsigned long port)
567 writeb(value, (void __iomem *) port);
568 tmp = readl(PMAC_IDE_REG(IDE_TIMING_CONFIG));
572 * Send the SET_FEATURE IDE command to the drive and update drive->id with
573 * the new state. We currently don't use the generic routine as it used to
574 * cause various trouble, especially with older mediabays.
575 * This code is sometimes triggering a spurrious interrupt though, I need
576 * to sort that out sooner or later and see if I can finally get the
577 * common version to work properly in all cases
580 pmac_ide_do_setfeature(ide_drive_t *drive, u8 command)
582 ide_hwif_t *hwif = HWIF(drive);
585 disable_irq_nosync(hwif->irq);
588 SELECT_MASK(drive, 0);
590 /* Get rid of pending error state */
591 (void) hwif->INB(IDE_STATUS_REG);
592 /* Timeout bumped for some powerbooks */
593 if (wait_for_ready(drive, 2000)) {
594 /* Timeout bumped for some powerbooks */
595 printk(KERN_ERR "%s: pmac_ide_do_setfeature disk not ready "
596 "before SET_FEATURE!\n", drive->name);
600 hwif->OUTB(drive->ctl | 2, IDE_CONTROL_REG);
601 hwif->OUTB(command, IDE_NSECTOR_REG);
602 hwif->OUTB(SETFEATURES_XFER, IDE_FEATURE_REG);
603 hwif->OUTBSYNC(drive, WIN_SETFEATURES, IDE_COMMAND_REG);
605 /* Timeout bumped for some powerbooks */
606 result = wait_for_ready(drive, 2000);
607 hwif->OUTB(drive->ctl, IDE_CONTROL_REG);
609 printk(KERN_ERR "%s: pmac_ide_do_setfeature disk not ready "
610 "after SET_FEATURE !\n", drive->name);
612 SELECT_MASK(drive, 0);
614 drive->id->dma_ultra &= ~0xFF00;
615 drive->id->dma_mword &= ~0x0F00;
616 drive->id->dma_1word &= ~0x0F00;
619 drive->id->dma_ultra |= 0x8080; break;
621 drive->id->dma_ultra |= 0x4040; break;
623 drive->id->dma_ultra |= 0x2020; break;
625 drive->id->dma_ultra |= 0x1010; break;
627 drive->id->dma_ultra |= 0x0808; break;
629 drive->id->dma_ultra |= 0x0404; break;
631 drive->id->dma_ultra |= 0x0202; break;
633 drive->id->dma_ultra |= 0x0101; break;
635 drive->id->dma_mword |= 0x0404; break;
637 drive->id->dma_mword |= 0x0202; break;
639 drive->id->dma_mword |= 0x0101; break;
641 drive->id->dma_1word |= 0x0404; break;
643 drive->id->dma_1word |= 0x0202; break;
645 drive->id->dma_1word |= 0x0101; break;
649 enable_irq(hwif->irq);
654 * Old tuning functions (called on hdparm -p), sets up drive PIO timings
657 pmac_ide_tuneproc(ide_drive_t *drive, u8 pio)
661 unsigned accessTicks, recTicks;
662 unsigned accessTime, recTime;
663 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
668 /* which drive is it ? */
669 timings = &pmif->timings[drive->select.b.unit & 0x01];
671 pio = ide_get_best_pio_mode(drive, pio, 4, &d);
673 switch (pmif->kind) {
674 case controller_un_ata6:
675 case controller_k2_ata6: {
677 u32 tr = kauai_lookup_timing(kauai_pio_timings, d.cycle_time);
680 *timings = ((*timings) & ~TR_100_PIOREG_PIO_MASK) | tr;
683 case controller_kl_ata4:
685 recTime = d.cycle_time - ide_pio_timings[pio].active_time
686 - ide_pio_timings[pio].setup_time;
687 recTime = max(recTime, 150U);
688 accessTime = ide_pio_timings[pio].active_time;
689 accessTime = max(accessTime, 150U);
690 accessTicks = SYSCLK_TICKS_66(accessTime);
691 accessTicks = min(accessTicks, 0x1fU);
692 recTicks = SYSCLK_TICKS_66(recTime);
693 recTicks = min(recTicks, 0x1fU);
694 *timings = ((*timings) & ~TR_66_PIO_MASK) |
695 (accessTicks << TR_66_PIO_ACCESS_SHIFT) |
696 (recTicks << TR_66_PIO_RECOVERY_SHIFT);
701 recTime = d.cycle_time - ide_pio_timings[pio].active_time
702 - ide_pio_timings[pio].setup_time;
703 recTime = max(recTime, 150U);
704 accessTime = ide_pio_timings[pio].active_time;
705 accessTime = max(accessTime, 150U);
706 accessTicks = SYSCLK_TICKS(accessTime);
707 accessTicks = min(accessTicks, 0x1fU);
708 accessTicks = max(accessTicks, 4U);
709 recTicks = SYSCLK_TICKS(recTime);
710 recTicks = min(recTicks, 0x1fU);
711 recTicks = max(recTicks, 5U) - 4;
713 recTicks--; /* guess, but it's only for PIO0, so... */
716 *timings = ((*timings) & ~TR_33_PIO_MASK) |
717 (accessTicks << TR_33_PIO_ACCESS_SHIFT) |
718 (recTicks << TR_33_PIO_RECOVERY_SHIFT);
720 *timings |= TR_33_PIO_E;
725 #ifdef IDE_PMAC_DEBUG
726 printk(KERN_ERR "%s: Set PIO timing for mode %d, reg: 0x%08x\n",
727 drive->name, pio, *timings);
730 if (drive->select.all == HWIF(drive)->INB(IDE_SELECT_REG))
731 pmac_ide_do_update_timings(drive);
734 #ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
737 * Calculate KeyLargo ATA/66 UDMA timings
740 set_timings_udma_ata4(u32 *timings, u8 speed)
742 unsigned rdyToPauseTicks, wrDataSetupTicks, addrTicks;
744 if (speed > XFER_UDMA_4)
747 rdyToPauseTicks = SYSCLK_TICKS_66(kl66_udma_timings[speed & 0xf].rdy2pause);
748 wrDataSetupTicks = SYSCLK_TICKS_66(kl66_udma_timings[speed & 0xf].wrDataSetup);
749 addrTicks = SYSCLK_TICKS_66(kl66_udma_timings[speed & 0xf].addrSetup);
751 *timings = ((*timings) & ~(TR_66_UDMA_MASK | TR_66_MDMA_MASK)) |
752 (wrDataSetupTicks << TR_66_UDMA_WRDATASETUP_SHIFT) |
753 (rdyToPauseTicks << TR_66_UDMA_RDY2PAUS_SHIFT) |
754 (addrTicks <<TR_66_UDMA_ADDRSETUP_SHIFT) |
756 #ifdef IDE_PMAC_DEBUG
757 printk(KERN_ERR "ide_pmac: Set UDMA timing for mode %d, reg: 0x%08x\n",
758 speed & 0xf, *timings);
765 * Calculate Kauai ATA/100 UDMA timings
768 set_timings_udma_ata6(u32 *pio_timings, u32 *ultra_timings, u8 speed)
770 struct ide_timing *t = ide_timing_find_mode(speed);
773 if (speed > XFER_UDMA_5 || t == NULL)
775 tr = kauai_lookup_timing(kauai_udma_timings, (int)t->udma);
778 *ultra_timings = ((*ultra_timings) & ~TR_100_UDMAREG_UDMA_MASK) | tr;
779 *ultra_timings = (*ultra_timings) | TR_100_UDMAREG_UDMA_EN;
785 * Calculate MDMA timings for all cells
788 set_timings_mdma(ide_drive_t *drive, int intf_type, u32 *timings, u32 *timings2,
789 u8 speed, int drive_cycle_time)
791 int cycleTime, accessTime = 0, recTime = 0;
792 unsigned accessTicks, recTicks;
793 struct mdma_timings_t* tm = NULL;
796 /* Get default cycle time for mode */
797 switch(speed & 0xf) {
798 case 0: cycleTime = 480; break;
799 case 1: cycleTime = 150; break;
800 case 2: cycleTime = 120; break;
804 /* Adjust for drive */
805 if (drive_cycle_time && drive_cycle_time > cycleTime)
806 cycleTime = drive_cycle_time;
807 /* OHare limits according to some old Apple sources */
808 if ((intf_type == controller_ohare) && (cycleTime < 150))
810 /* Get the proper timing array for this controller */
812 case controller_un_ata6:
813 case controller_k2_ata6:
815 case controller_kl_ata4:
816 tm = mdma_timings_66;
818 case controller_kl_ata3:
819 tm = mdma_timings_33k;
822 tm = mdma_timings_33;
826 /* Lookup matching access & recovery times */
829 if (tm[i+1].cycleTime < cycleTime)
835 cycleTime = tm[i].cycleTime;
836 accessTime = tm[i].accessTime;
837 recTime = tm[i].recoveryTime;
839 #ifdef IDE_PMAC_DEBUG
840 printk(KERN_ERR "%s: MDMA, cycleTime: %d, accessTime: %d, recTime: %d\n",
841 drive->name, cycleTime, accessTime, recTime);
845 case controller_un_ata6:
846 case controller_k2_ata6: {
848 u32 tr = kauai_lookup_timing(kauai_mdma_timings, cycleTime);
851 *timings = ((*timings) & ~TR_100_PIOREG_MDMA_MASK) | tr;
852 *timings2 = (*timings2) & ~TR_100_UDMAREG_UDMA_EN;
855 case controller_kl_ata4:
857 accessTicks = SYSCLK_TICKS_66(accessTime);
858 accessTicks = min(accessTicks, 0x1fU);
859 accessTicks = max(accessTicks, 0x1U);
860 recTicks = SYSCLK_TICKS_66(recTime);
861 recTicks = min(recTicks, 0x1fU);
862 recTicks = max(recTicks, 0x3U);
863 /* Clear out mdma bits and disable udma */
864 *timings = ((*timings) & ~(TR_66_MDMA_MASK | TR_66_UDMA_MASK)) |
865 (accessTicks << TR_66_MDMA_ACCESS_SHIFT) |
866 (recTicks << TR_66_MDMA_RECOVERY_SHIFT);
868 case controller_kl_ata3:
869 /* 33Mhz cell on KeyLargo */
870 accessTicks = SYSCLK_TICKS(accessTime);
871 accessTicks = max(accessTicks, 1U);
872 accessTicks = min(accessTicks, 0x1fU);
873 accessTime = accessTicks * IDE_SYSCLK_NS;
874 recTicks = SYSCLK_TICKS(recTime);
875 recTicks = max(recTicks, 1U);
876 recTicks = min(recTicks, 0x1fU);
877 *timings = ((*timings) & ~TR_33_MDMA_MASK) |
878 (accessTicks << TR_33_MDMA_ACCESS_SHIFT) |
879 (recTicks << TR_33_MDMA_RECOVERY_SHIFT);
882 /* 33Mhz cell on others */
884 int origAccessTime = accessTime;
885 int origRecTime = recTime;
887 accessTicks = SYSCLK_TICKS(accessTime);
888 accessTicks = max(accessTicks, 1U);
889 accessTicks = min(accessTicks, 0x1fU);
890 accessTime = accessTicks * IDE_SYSCLK_NS;
891 recTicks = SYSCLK_TICKS(recTime);
892 recTicks = max(recTicks, 2U) - 1;
893 recTicks = min(recTicks, 0x1fU);
894 recTime = (recTicks + 1) * IDE_SYSCLK_NS;
895 if ((accessTicks > 1) &&
896 ((accessTime - IDE_SYSCLK_NS/2) >= origAccessTime) &&
897 ((recTime - IDE_SYSCLK_NS/2) >= origRecTime)) {
901 *timings = ((*timings) & ~TR_33_MDMA_MASK) |
902 (accessTicks << TR_33_MDMA_ACCESS_SHIFT) |
903 (recTicks << TR_33_MDMA_RECOVERY_SHIFT);
905 *timings |= TR_33_MDMA_HALFTICK;
908 #ifdef IDE_PMAC_DEBUG
909 printk(KERN_ERR "%s: Set MDMA timing for mode %d, reg: 0x%08x\n",
910 drive->name, speed & 0xf, *timings);
914 #endif /* #ifdef CONFIG_BLK_DEV_IDEDMA_PMAC */
917 * Speedproc. This function is called by the core to set any of the standard
918 * timing (PIO, MDMA or UDMA) to both the drive and the controller.
919 * You may notice we don't use this function on normal "dma check" operation,
920 * our dedicated function is more precise as it uses the drive provided
921 * cycle time value. We should probably fix this one to deal with that too...
924 pmac_ide_tune_chipset (ide_drive_t *drive, byte speed)
926 int unit = (drive->select.b.unit & 0x01);
928 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
929 u32 *timings, *timings2;
934 timings = &pmif->timings[unit];
935 timings2 = &pmif->timings[unit+2];
938 #ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
940 if (pmif->kind != controller_un_ata6 &&
941 pmif->kind != controller_k2_ata6)
945 if (HWIF(drive)->udma_four == 0)
950 if (pmif->kind == controller_kl_ata4)
951 ret = set_timings_udma_ata4(timings, speed);
952 else if (pmif->kind == controller_un_ata6
953 || pmif->kind == controller_k2_ata6)
954 ret = set_timings_udma_ata6(timings, timings2, speed);
961 ret = set_timings_mdma(drive, pmif->kind, timings, timings2, speed, 0);
967 #endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */
973 pmac_ide_tuneproc(drive, speed & 0x07);
981 ret = pmac_ide_do_setfeature(drive, speed);
985 pmac_ide_do_update_timings(drive);
986 drive->current_speed = speed;
992 * Blast some well known "safe" values to the timing registers at init or
993 * wakeup from sleep time, before we do real calculation
996 sanitize_timings(pmac_ide_hwif_t *pmif)
998 unsigned int value, value2 = 0;
1000 switch(pmif->kind) {
1001 case controller_un_ata6:
1002 case controller_k2_ata6:
1004 value2 = 0x00002921;
1006 case controller_kl_ata4:
1009 case controller_kl_ata3:
1012 case controller_heathrow:
1013 case controller_ohare:
1018 pmif->timings[0] = pmif->timings[1] = value;
1019 pmif->timings[2] = pmif->timings[3] = value2;
1022 unsigned long __pmac
1023 pmac_ide_get_base(int index)
1025 return pmac_ide[index].regbase;
1029 pmac_ide_check_base(unsigned long base)
1033 for (ix = 0; ix < MAX_HWIFS; ++ix)
1034 if (base == pmac_ide[ix].regbase)
1040 pmac_ide_get_irq(unsigned long base)
1044 for (ix = 0; ix < MAX_HWIFS; ++ix)
1045 if (base == pmac_ide[ix].regbase)
1046 return pmac_ide[ix].irq;
1050 static int ide_majors[] __pmacdata = { 3, 22, 33, 34, 56, 57 };
1053 pmac_find_ide_boot(char *bootdevice, int n)
1058 * Look through the list of IDE interfaces for this one.
1060 for (i = 0; i < pmac_ide_count; ++i) {
1062 if (!pmac_ide[i].node || !pmac_ide[i].node->full_name)
1064 name = pmac_ide[i].node->full_name;
1065 if (memcmp(name, bootdevice, n) == 0 && name[n] == 0) {
1066 /* XXX should cope with the 2nd drive as well... */
1067 return MKDEV(ide_majors[i], 0);
1074 /* Suspend call back, should be called after the child devices
1075 * have actually been suspended
1078 pmac_ide_do_suspend(ide_hwif_t *hwif)
1080 pmac_ide_hwif_t *pmif = (pmac_ide_hwif_t *)hwif->hwif_data;
1082 /* We clear the timings */
1083 pmif->timings[0] = 0;
1084 pmif->timings[1] = 0;
1086 #ifdef CONFIG_BLK_DEV_IDE_PMAC_BLINK
1087 /* Note: This code will be called for every hwif, thus we'll
1088 * try several time to stop the LED blinker timer, but that
1089 * should be harmless
1091 if (pmu_ide_blink_enabled) {
1092 unsigned long flags;
1094 /* Make sure we don't hit the PMU blink */
1095 spin_lock_irqsave(&pmu_blink_lock, flags);
1096 if (pmu_blink_ledstate)
1097 del_timer(&pmu_blink_timer);
1098 pmu_blink_ledstate = 0;
1099 spin_unlock_irqrestore(&pmu_blink_lock, flags);
1101 #endif /* CONFIG_BLK_DEV_IDE_PMAC_BLINK */
1103 /* The media bay will handle itself just fine */
1107 /* Disable the bus */
1108 ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, pmif->node, pmif->aapl_bus_id, 0);
1113 /* Resume call back, should be called before the child devices
1117 pmac_ide_do_resume(ide_hwif_t *hwif)
1119 pmac_ide_hwif_t *pmif = (pmac_ide_hwif_t *)hwif->hwif_data;
1121 /* Hard reset & re-enable controller (do we really need to reset ? -BenH) */
1122 if (!pmif->mediabay) {
1123 ppc_md.feature_call(PMAC_FTR_IDE_RESET, pmif->node, pmif->aapl_bus_id, 1);
1124 ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, pmif->node, pmif->aapl_bus_id, 1);
1126 ppc_md.feature_call(PMAC_FTR_IDE_RESET, pmif->node, pmif->aapl_bus_id, 0);
1127 msleep(jiffies_to_msecs(IDE_WAKEUP_DELAY));
1130 /* Sanitize drive timings */
1131 sanitize_timings(pmif);
1137 * Setup, register & probe an IDE channel driven by this driver, this is
1138 * called by one of the 2 probe functions (macio or PCI). Note that a channel
1139 * that ends up beeing free of any device is not kept around by this driver
1140 * (it is kept in 2.4). This introduce an interface numbering change on some
1141 * rare machines unfortunately, but it's better this way.
1144 pmac_ide_setup_device(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif)
1146 struct device_node *np = pmif->node;
1150 pmif->broken_dma = pmif->broken_dma_warn = 0;
1151 if (device_is_compatible(np, "kauai-ata"))
1152 pmif->kind = controller_un_ata6;
1153 else if (device_is_compatible(np, "K2-UATA"))
1154 pmif->kind = controller_k2_ata6;
1155 else if (device_is_compatible(np, "keylargo-ata")) {
1156 if (strcmp(np->name, "ata-4") == 0)
1157 pmif->kind = controller_kl_ata4;
1159 pmif->kind = controller_kl_ata3;
1160 } else if (device_is_compatible(np, "heathrow-ata"))
1161 pmif->kind = controller_heathrow;
1163 pmif->kind = controller_ohare;
1164 pmif->broken_dma = 1;
1167 bidp = (int *)get_property(np, "AAPL,bus-id", NULL);
1168 pmif->aapl_bus_id = bidp ? *bidp : 0;
1170 /* Get cable type from device-tree */
1171 if (pmif->kind == controller_kl_ata4 || pmif->kind == controller_un_ata6
1172 || pmif->kind == controller_k2_ata6) {
1173 char* cable = get_property(np, "cable-type", NULL);
1174 if (cable && !strncmp(cable, "80-", 3))
1180 /* Make sure we have sane timings */
1181 sanitize_timings(pmif);
1183 #ifndef CONFIG_PPC64
1184 /* XXX FIXME: Media bay stuff need re-organizing */
1185 if (np->parent && np->parent->name
1186 && strcasecmp(np->parent->name, "media-bay") == 0) {
1187 #ifdef CONFIG_PMAC_PBOOK
1188 media_bay_set_ide_infos(np->parent, pmif->regbase, pmif->irq, hwif->index);
1189 #endif /* CONFIG_PMAC_PBOOK */
1192 pmif->aapl_bus_id = 1;
1193 } else if (pmif->kind == controller_ohare) {
1194 /* The code below is having trouble on some ohare machines
1195 * (timing related ?). Until I can put my hand on one of these
1196 * units, I keep the old way
1198 ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, np, 0, 1);
1202 /* This is necessary to enable IDE when net-booting */
1203 ppc_md.feature_call(PMAC_FTR_IDE_RESET, np, pmif->aapl_bus_id, 1);
1204 ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, np, pmif->aapl_bus_id, 1);
1206 ppc_md.feature_call(PMAC_FTR_IDE_RESET, np, pmif->aapl_bus_id, 0);
1207 msleep(jiffies_to_msecs(IDE_WAKEUP_DELAY));
1210 /* Setup MMIO ops */
1211 default_hwif_mmiops(hwif);
1212 hwif->OUTBSYNC = pmac_outbsync;
1214 /* Tell common code _not_ to mess with resources */
1216 hwif->hwif_data = pmif;
1217 pmac_ide_init_hwif_ports(&hwif->hw, pmif->regbase, 0, &hwif->irq);
1218 memcpy(hwif->io_ports, hwif->hw.io_ports, sizeof(hwif->io_ports));
1219 hwif->chipset = ide_pmac;
1220 hwif->noprobe = !hwif->io_ports[IDE_DATA_OFFSET] || pmif->mediabay;
1221 hwif->hold = pmif->mediabay;
1222 hwif->udma_four = pmif->cable_80;
1223 hwif->drives[0].unmask = 1;
1224 hwif->drives[1].unmask = 1;
1225 hwif->tuneproc = pmac_ide_tuneproc;
1226 if (pmif->kind == controller_un_ata6 || pmif->kind == controller_k2_ata6)
1227 hwif->selectproc = pmac_ide_kauai_selectproc;
1229 hwif->selectproc = pmac_ide_selectproc;
1230 hwif->speedproc = pmac_ide_tune_chipset;
1232 #ifdef CONFIG_BLK_DEV_IDE_PMAC_BLINK
1233 pmu_ide_blink_enabled = pmu_hd_blink_init();
1235 if (pmu_ide_blink_enabled)
1236 hwif->led_act = pmu_hd_kick_blink;
1239 printk(KERN_INFO "ide%d: Found Apple %s controller, bus ID %d%s, irq %d\n",
1240 hwif->index, model_name[pmif->kind], pmif->aapl_bus_id,
1241 pmif->mediabay ? " (mediabay)" : "", hwif->irq);
1243 #ifdef CONFIG_PMAC_PBOOK
1244 if (pmif->mediabay && check_media_bay_by_base(pmif->regbase, MB_CD) == 0)
1246 #endif /* CONFIG_PMAC_PBOOK */
1248 #ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
1249 /* has a DBDMA controller channel */
1251 pmac_ide_setup_dma(pmif, hwif);
1252 #endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */
1254 /* We probe the hwif now */
1255 probe_hwif_init(hwif);
1257 /* The code IDE code will have set hwif->present if we have devices attached,
1258 * if we don't, the discard the interface except if we are on a media bay slot
1260 if (!hwif->present && !pmif->mediabay) {
1261 printk(KERN_INFO "ide%d: Bus empty, interface released.\n",
1263 default_hwif_iops(hwif);
1264 for (i = IDE_DATA_OFFSET; i <= IDE_CONTROL_OFFSET; ++i)
1265 hwif->io_ports[i] = 0;
1266 hwif->chipset = ide_unknown;
1275 * Attach to a macio probed interface
1277 static int __devinit
1278 pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_match *match)
1281 unsigned long regbase;
1284 pmac_ide_hwif_t *pmif;
1288 while (i < MAX_HWIFS && (ide_hwifs[i].io_ports[IDE_DATA_OFFSET] != 0
1289 || pmac_ide[i].node != NULL))
1291 if (i >= MAX_HWIFS) {
1292 printk(KERN_ERR "ide-pmac: MacIO interface attach with no slot\n");
1293 printk(KERN_ERR " %s\n", mdev->ofdev.node->full_name);
1297 pmif = &pmac_ide[i];
1298 hwif = &ide_hwifs[i];
1300 if (mdev->ofdev.node->n_addrs == 0) {
1301 printk(KERN_WARNING "ide%d: no address for %s\n",
1302 i, mdev->ofdev.node->full_name);
1306 /* Request memory resource for IO ports */
1307 if (macio_request_resource(mdev, 0, "ide-pmac (ports)")) {
1308 printk(KERN_ERR "ide%d: can't request mmio resource !\n", i);
1312 /* XXX This is bogus. Should be fixed in the registry by checking
1313 * the kind of host interrupt controller, a bit like gatwick
1314 * fixes in irq.c. That works well enough for the single case
1315 * where that happens though...
1317 if (macio_irq_count(mdev) == 0) {
1318 printk(KERN_WARNING "ide%d: no intrs for device %s, using 13\n",
1319 i, mdev->ofdev.node->full_name);
1322 irq = macio_irq(mdev, 0);
1324 base = ioremap(macio_resource_start(mdev, 0), 0x400);
1325 regbase = (unsigned long) base;
1327 hwif->pci_dev = mdev->bus->pdev;
1328 hwif->gendev.parent = &mdev->ofdev.dev;
1331 pmif->node = mdev->ofdev.node;
1332 pmif->regbase = regbase;
1334 #ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
1335 if (macio_resource_count(mdev) >= 2) {
1336 if (macio_request_resource(mdev, 1, "ide-pmac (dma)"))
1337 printk(KERN_WARNING "ide%d: can't request DMA resource !\n", i);
1339 pmif->dma_regs = ioremap(macio_resource_start(mdev, 1), 0x1000);
1341 pmif->dma_regs = NULL;
1342 #endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */
1343 dev_set_drvdata(&mdev->ofdev.dev, hwif);
1345 rc = pmac_ide_setup_device(pmif, hwif);
1347 /* The inteface is released to the common IDE layer */
1348 dev_set_drvdata(&mdev->ofdev.dev, NULL);
1351 iounmap(pmif->dma_regs);
1352 memset(pmif, 0, sizeof(*pmif));
1353 macio_release_resource(mdev, 0);
1355 macio_release_resource(mdev, 1);
1362 pmac_ide_macio_suspend(struct macio_dev *mdev, u32 state)
1364 ide_hwif_t *hwif = (ide_hwif_t *)dev_get_drvdata(&mdev->ofdev.dev);
1367 if (state != mdev->ofdev.dev.power_state && state >= 2) {
1368 rc = pmac_ide_do_suspend(hwif);
1370 mdev->ofdev.dev.power_state = state;
1377 pmac_ide_macio_resume(struct macio_dev *mdev)
1379 ide_hwif_t *hwif = (ide_hwif_t *)dev_get_drvdata(&mdev->ofdev.dev);
1382 if (mdev->ofdev.dev.power_state != 0) {
1383 rc = pmac_ide_do_resume(hwif);
1385 mdev->ofdev.dev.power_state = 0;
1392 * Attach to a PCI probed interface
1394 static int __devinit
1395 pmac_ide_pci_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1398 struct device_node *np;
1399 pmac_ide_hwif_t *pmif;
1401 unsigned long rbase, rlen;
1404 np = pci_device_to_OF_node(pdev);
1406 printk(KERN_ERR "ide-pmac: cannot find MacIO node for Kauai ATA interface\n");
1410 while (i < MAX_HWIFS && (ide_hwifs[i].io_ports[IDE_DATA_OFFSET] != 0
1411 || pmac_ide[i].node != NULL))
1413 if (i >= MAX_HWIFS) {
1414 printk(KERN_ERR "ide-pmac: PCI interface attach with no slot\n");
1415 printk(KERN_ERR " %s\n", np->full_name);
1419 pmif = &pmac_ide[i];
1420 hwif = &ide_hwifs[i];
1422 if (pci_enable_device(pdev)) {
1423 printk(KERN_WARNING "ide%i: Can't enable PCI device for %s\n",
1427 pci_set_master(pdev);
1429 if (pci_request_regions(pdev, "Kauai ATA")) {
1430 printk(KERN_ERR "ide%d: Cannot obtain PCI resources for %s\n",
1435 hwif->pci_dev = pdev;
1436 hwif->gendev.parent = &pdev->dev;
1440 rbase = pci_resource_start(pdev, 0);
1441 rlen = pci_resource_len(pdev, 0);
1443 base = ioremap(rbase, rlen);
1444 pmif->regbase = (unsigned long) base + 0x2000;
1445 #ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
1446 pmif->dma_regs = base + 0x1000;
1447 #endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */
1449 /* We use the OF node irq mapping */
1450 if (np->n_intrs == 0)
1451 pmif->irq = pdev->irq;
1453 pmif->irq = np->intrs[0].line;
1455 pci_set_drvdata(pdev, hwif);
1457 rc = pmac_ide_setup_device(pmif, hwif);
1459 /* The inteface is released to the common IDE layer */
1460 pci_set_drvdata(pdev, NULL);
1462 memset(pmif, 0, sizeof(*pmif));
1463 pci_release_regions(pdev);
1470 pmac_ide_pci_suspend(struct pci_dev *pdev, u32 state)
1472 ide_hwif_t *hwif = (ide_hwif_t *)pci_get_drvdata(pdev);
1475 if (state != pdev->dev.power_state && state >= 2) {
1476 rc = pmac_ide_do_suspend(hwif);
1478 pdev->dev.power_state = state;
1485 pmac_ide_pci_resume(struct pci_dev *pdev)
1487 ide_hwif_t *hwif = (ide_hwif_t *)pci_get_drvdata(pdev);
1490 if (pdev->dev.power_state != 0) {
1491 rc = pmac_ide_do_resume(hwif);
1493 pdev->dev.power_state = 0;
1499 static struct of_match pmac_ide_macio_match[] =
1503 .type = OF_ANY_MATCH,
1504 .compatible = OF_ANY_MATCH
1508 .type = OF_ANY_MATCH,
1509 .compatible = OF_ANY_MATCH
1512 .name = OF_ANY_MATCH,
1514 .compatible = OF_ANY_MATCH
1517 .name = OF_ANY_MATCH,
1519 .compatible = OF_ANY_MATCH
1524 static struct macio_driver pmac_ide_macio_driver =
1527 .match_table = pmac_ide_macio_match,
1528 .probe = pmac_ide_macio_attach,
1529 .suspend = pmac_ide_macio_suspend,
1530 .resume = pmac_ide_macio_resume,
1533 static struct pci_device_id pmac_ide_pci_match[] = {
1534 { PCI_VENDOR_ID_APPLE, PCI_DEVIEC_ID_APPLE_UNI_N_ATA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
1535 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_IPID_ATA100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
1536 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_K2_ATA100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
1539 static struct pci_driver pmac_ide_pci_driver = {
1541 .id_table = pmac_ide_pci_match,
1542 .probe = pmac_ide_pci_attach,
1543 .suspend = pmac_ide_pci_suspend,
1544 .resume = pmac_ide_pci_resume,
1548 pmac_ide_probe(void)
1550 if (_machine != _MACH_Pmac)
1553 #ifdef CONFIG_BLK_DEV_IDE_PMAC_ATA100FIRST
1554 pci_register_driver(&pmac_ide_pci_driver);
1555 macio_register_driver(&pmac_ide_macio_driver);
1557 macio_register_driver(&pmac_ide_macio_driver);
1558 pci_register_driver(&pmac_ide_pci_driver);
1562 #ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
1565 * This is very close to the generic ide-dma version of the function except
1566 * that we don't use the fields in the hwif but our own copies for sg_table
1567 * and friends. We build & map the sglist for a given request
1570 pmac_ide_build_sglist(ide_drive_t *drive, struct request *rq)
1572 ide_hwif_t *hwif = HWIF(drive);
1573 pmac_ide_hwif_t *pmif = (pmac_ide_hwif_t *)hwif->hwif_data;
1574 struct scatterlist *sg = pmif->sg_table;
1577 nents = blk_rq_map_sg(drive->queue, rq, sg);
1579 if (rq_data_dir(rq) == READ)
1580 pmif->sg_dma_direction = PCI_DMA_FROMDEVICE;
1582 pmif->sg_dma_direction = PCI_DMA_TODEVICE;
1584 return pci_map_sg(hwif->pci_dev, sg, nents, pmif->sg_dma_direction);
1588 * Same as above but for a "raw" taskfile request
1591 pmac_ide_raw_build_sglist(ide_drive_t *drive, struct request *rq)
1593 ide_hwif_t *hwif = HWIF(drive);
1594 pmac_ide_hwif_t *pmif = (pmac_ide_hwif_t *)hwif->hwif_data;
1595 struct scatterlist *sg = pmif->sg_table;
1597 ide_task_t *args = rq->special;
1598 unsigned char *virt_addr = rq->buffer;
1599 int sector_count = rq->nr_sectors;
1601 if (args->command_type == IDE_DRIVE_TASK_RAW_WRITE)
1602 pmif->sg_dma_direction = PCI_DMA_TODEVICE;
1604 pmif->sg_dma_direction = PCI_DMA_FROMDEVICE;
1606 if (sector_count > 128) {
1607 memset(&sg[nents], 0, sizeof(*sg));
1608 sg[nents].page = virt_to_page(virt_addr);
1609 sg[nents].offset = offset_in_page(virt_addr);
1610 sg[nents].length = 128 * SECTOR_SIZE;
1612 virt_addr = virt_addr + (128 * SECTOR_SIZE);
1613 sector_count -= 128;
1615 memset(&sg[nents], 0, sizeof(*sg));
1616 sg[nents].page = virt_to_page(virt_addr);
1617 sg[nents].offset = offset_in_page(virt_addr);
1618 sg[nents].length = sector_count * SECTOR_SIZE;
1621 return pci_map_sg(hwif->pci_dev, sg, nents, pmif->sg_dma_direction);
1625 * pmac_ide_build_dmatable builds the DBDMA command list
1626 * for a transfer and sets the DBDMA channel to point to it.
1629 pmac_ide_build_dmatable(ide_drive_t *drive, struct request *rq)
1631 struct dbdma_cmd *table;
1633 ide_hwif_t *hwif = HWIF(drive);
1634 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)hwif->hwif_data;
1635 volatile struct dbdma_regs __iomem *dma = pmif->dma_regs;
1636 struct scatterlist *sg;
1637 int wr = (rq_data_dir(rq) == WRITE);
1639 /* DMA table is already aligned */
1640 table = (struct dbdma_cmd *) pmif->dma_table_cpu;
1642 /* Make sure DMA controller is stopped (necessary ?) */
1643 writel((RUN|PAUSE|FLUSH|WAKE|DEAD) << 16, &dma->control);
1644 while (readl(&dma->status) & RUN)
1648 if (HWGROUP(drive)->rq->flags & REQ_DRIVE_TASKFILE)
1649 pmif->sg_nents = i = pmac_ide_raw_build_sglist(drive, rq);
1651 pmif->sg_nents = i = pmac_ide_build_sglist(drive, rq);
1655 /* Build DBDMA commands list */
1656 sg = pmif->sg_table;
1657 while (i && sg_dma_len(sg)) {
1661 cur_addr = sg_dma_address(sg);
1662 cur_len = sg_dma_len(sg);
1664 if (pmif->broken_dma && cur_addr & (L1_CACHE_BYTES - 1)) {
1665 if (pmif->broken_dma_warn == 0) {
1666 printk(KERN_WARNING "%s: DMA on non aligned address,"
1667 "switching to PIO on Ohare chipset\n", drive->name);
1668 pmif->broken_dma_warn = 1;
1670 goto use_pio_instead;
1673 unsigned int tc = (cur_len < 0xfe00)? cur_len: 0xfe00;
1675 if (count++ >= MAX_DCMDS) {
1676 printk(KERN_WARNING "%s: DMA table too small\n",
1678 goto use_pio_instead;
1680 st_le16(&table->command, wr? OUTPUT_MORE: INPUT_MORE);
1681 st_le16(&table->req_count, tc);
1682 st_le32(&table->phy_addr, cur_addr);
1684 table->xfer_status = 0;
1685 table->res_count = 0;
1694 /* convert the last command to an input/output last command */
1696 st_le16(&table[-1].command, wr? OUTPUT_LAST: INPUT_LAST);
1697 /* add the stop command to the end of the list */
1698 memset(table, 0, sizeof(struct dbdma_cmd));
1699 st_le16(&table->command, DBDMA_STOP);
1701 writel(pmif->dma_table_dma, &dma->cmdptr);
1705 printk(KERN_DEBUG "%s: empty DMA table?\n", drive->name);
1707 pci_unmap_sg(hwif->pci_dev,
1710 pmif->sg_dma_direction);
1711 return 0; /* revert to PIO for this request */
1714 /* Teardown mappings after DMA has completed. */
1716 pmac_ide_destroy_dmatable (ide_drive_t *drive)
1718 struct pci_dev *dev = HWIF(drive)->pci_dev;
1719 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
1720 struct scatterlist *sg = pmif->sg_table;
1721 int nents = pmif->sg_nents;
1724 pci_unmap_sg(dev, sg, nents, pmif->sg_dma_direction);
1730 * Pick up best MDMA timing for the drive and apply it
1733 pmac_ide_mdma_enable(ide_drive_t *drive, u16 mode)
1735 ide_hwif_t *hwif = HWIF(drive);
1736 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)hwif->hwif_data;
1737 int drive_cycle_time;
1738 struct hd_driveid *id = drive->id;
1739 u32 *timings, *timings2;
1740 u32 timing_local[2];
1743 /* which drive is it ? */
1744 timings = &pmif->timings[drive->select.b.unit & 0x01];
1745 timings2 = &pmif->timings[(drive->select.b.unit & 0x01) + 2];
1747 /* Check if drive provide explicit cycle time */
1748 if ((id->field_valid & 2) && (id->eide_dma_time))
1749 drive_cycle_time = id->eide_dma_time;
1751 drive_cycle_time = 0;
1753 /* Copy timings to local image */
1754 timing_local[0] = *timings;
1755 timing_local[1] = *timings2;
1757 /* Calculate controller timings */
1758 ret = set_timings_mdma( drive, pmif->kind,
1766 /* Set feature on drive */
1767 printk(KERN_INFO "%s: Enabling MultiWord DMA %d\n", drive->name, mode & 0xf);
1768 ret = pmac_ide_do_setfeature(drive, mode);
1770 printk(KERN_WARNING "%s: Failed !\n", drive->name);
1774 /* Apply timings to controller */
1775 *timings = timing_local[0];
1776 *timings2 = timing_local[1];
1778 /* Set speed info in drive */
1779 drive->current_speed = mode;
1780 if (!drive->init_speed)
1781 drive->init_speed = mode;
1787 * Pick up best UDMA timing for the drive and apply it
1790 pmac_ide_udma_enable(ide_drive_t *drive, u16 mode)
1792 ide_hwif_t *hwif = HWIF(drive);
1793 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)hwif->hwif_data;
1794 u32 *timings, *timings2;
1795 u32 timing_local[2];
1798 /* which drive is it ? */
1799 timings = &pmif->timings[drive->select.b.unit & 0x01];
1800 timings2 = &pmif->timings[(drive->select.b.unit & 0x01) + 2];
1802 /* Copy timings to local image */
1803 timing_local[0] = *timings;
1804 timing_local[1] = *timings2;
1806 /* Calculate timings for interface */
1807 if (pmif->kind == controller_un_ata6 || pmif->kind == controller_k2_ata6)
1808 ret = set_timings_udma_ata6( &timing_local[0],
1812 ret = set_timings_udma_ata4(&timing_local[0], mode);
1816 /* Set feature on drive */
1817 printk(KERN_INFO "%s: Enabling Ultra DMA %d\n", drive->name, mode & 0x0f);
1818 ret = pmac_ide_do_setfeature(drive, mode);
1820 printk(KERN_WARNING "%s: Failed !\n", drive->name);
1824 /* Apply timings to controller */
1825 *timings = timing_local[0];
1826 *timings2 = timing_local[1];
1828 /* Set speed info in drive */
1829 drive->current_speed = mode;
1830 if (!drive->init_speed)
1831 drive->init_speed = mode;
1837 * Check what is the best DMA timing setting for the drive and
1838 * call appropriate functions to apply it.
1841 pmac_ide_dma_check(ide_drive_t *drive)
1843 struct hd_driveid *id = drive->id;
1844 ide_hwif_t *hwif = HWIF(drive);
1845 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)hwif->hwif_data;
1848 drive->using_dma = 0;
1850 if (drive->media == ide_floppy)
1852 if (((id->capability & 1) == 0) && !__ide_dma_good_drive(drive))
1854 if (__ide_dma_bad_drive(drive))
1861 if (pmif->kind == controller_kl_ata4 || pmif->kind == controller_un_ata6
1862 || pmif->kind == controller_k2_ata6) {
1864 if (pmif->cable_80) {
1865 map |= XFER_UDMA_66;
1866 if (pmif->kind == controller_un_ata6 ||
1867 pmif->kind == controller_k2_ata6)
1868 map |= XFER_UDMA_100;
1871 mode = ide_find_best_mode(drive, map);
1872 if (mode & XFER_UDMA)
1873 drive->using_dma = pmac_ide_udma_enable(drive, mode);
1874 else if (mode & XFER_MWDMA)
1875 drive->using_dma = pmac_ide_mdma_enable(drive, mode);
1876 hwif->OUTB(0, IDE_CONTROL_REG);
1877 /* Apply settings to controller */
1878 pmac_ide_do_update_timings(drive);
1884 * Prepare a DMA transfer. We build the DMA table, adjust the timings for
1885 * a read on KeyLargo ATA/66 and mark us as waiting for DMA completion
1888 pmac_ide_dma_start(ide_drive_t *drive, int reading)
1890 ide_hwif_t *hwif = HWIF(drive);
1891 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)hwif->hwif_data;
1892 struct request *rq = HWGROUP(drive)->rq;
1893 u8 unit = (drive->select.b.unit & 0x01);
1898 ata4 = (pmif->kind == controller_kl_ata4);
1900 if (!pmac_ide_build_dmatable(drive, rq))
1903 /* Apple adds 60ns to wrDataSetup on reads */
1904 if (ata4 && (pmif->timings[unit] & TR_66_UDMA_EN)) {
1905 writel(pmif->timings[unit] + (reading ? 0x00800000UL : 0),
1906 PMAC_IDE_REG(IDE_TIMING_CONFIG));
1907 (void)readl(PMAC_IDE_REG(IDE_TIMING_CONFIG));
1910 drive->waiting_for_dma = 1;
1916 * Start a DMA READ command
1919 pmac_ide_dma_read(ide_drive_t *drive)
1921 struct request *rq = HWGROUP(drive)->rq;
1922 u8 lba48 = (drive->addressing == 1) ? 1 : 0;
1923 task_ioreg_t command = WIN_NOP;
1925 if (pmac_ide_dma_start(drive, 1))
1928 if (drive->media != ide_disk)
1931 command = (lba48) ? WIN_READDMA_EXT : WIN_READDMA;
1934 command = (lba48) ? WIN_READ_EXT: WIN_READ;
1936 if (rq->flags & REQ_DRIVE_TASKFILE) {
1937 ide_task_t *args = rq->special;
1938 command = args->tfRegister[IDE_COMMAND_OFFSET];
1941 /* issue cmd to drive */
1942 ide_execute_command(drive, command, &ide_dma_intr, 2*WAIT_CMD, NULL);
1944 return pmac_ide_dma_begin(drive);
1948 * Start a DMA WRITE command
1951 pmac_ide_dma_write (ide_drive_t *drive)
1953 struct request *rq = HWGROUP(drive)->rq;
1954 u8 lba48 = (drive->addressing == 1) ? 1 : 0;
1955 task_ioreg_t command = WIN_NOP;
1957 if (pmac_ide_dma_start(drive, 0))
1960 if (drive->media != ide_disk)
1963 command = (lba48) ? WIN_WRITEDMA_EXT : WIN_WRITEDMA;
1965 command = (lba48) ? WIN_WRITE_EXT: WIN_WRITE;
1967 if (rq->flags & REQ_DRIVE_TASKFILE) {
1968 ide_task_t *args = rq->special;
1969 command = args->tfRegister[IDE_COMMAND_OFFSET];
1972 /* issue cmd to drive */
1973 ide_execute_command(drive, command, &ide_dma_intr, 2*WAIT_CMD, NULL);
1975 return pmac_ide_dma_begin(drive);
1979 * Kick the DMA controller into life after the DMA command has been issued
1983 pmac_ide_dma_begin (ide_drive_t *drive)
1985 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
1986 volatile struct dbdma_regs __iomem *dma;
1990 dma = pmif->dma_regs;
1992 writel((RUN << 16) | RUN, &dma->control);
1993 /* Make sure it gets to the controller right now */
1994 (void)readl(&dma->control);
1999 * After a DMA transfer, make sure the controller is stopped
2002 pmac_ide_dma_end (ide_drive_t *drive)
2004 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
2005 volatile struct dbdma_regs __iomem *dma;
2010 dma = pmif->dma_regs;
2012 drive->waiting_for_dma = 0;
2013 dstat = readl(&dma->status);
2014 writel(((RUN|WAKE|DEAD) << 16), &dma->control);
2015 pmac_ide_destroy_dmatable(drive);
2016 /* verify good dma status. we don't check for ACTIVE beeing 0. We should...
2017 * in theory, but with ATAPI decices doing buffer underruns, that would
2018 * cause us to disable DMA, which isn't what we want
2020 return (dstat & (RUN|DEAD)) != RUN;
2024 * Check out that the interrupt we got was for us. We can't always know this
2025 * for sure with those Apple interfaces (well, we could on the recent ones but
2026 * that's not implemented yet), on the other hand, we don't have shared interrupts
2027 * so it's not really a problem
2030 pmac_ide_dma_test_irq (ide_drive_t *drive)
2032 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
2033 volatile struct dbdma_regs __iomem *dma;
2034 unsigned long status, timeout;
2038 dma = pmif->dma_regs;
2040 /* We have to things to deal with here:
2042 * - The dbdma won't stop if the command was started
2043 * but completed with an error without transferring all
2044 * datas. This happens when bad blocks are met during
2045 * a multi-block transfer.
2047 * - The dbdma fifo hasn't yet finished flushing to
2048 * to system memory when the disk interrupt occurs.
2052 /* If ACTIVE is cleared, the STOP command have passed and
2053 * transfer is complete.
2055 status = readl(&dma->status);
2056 if (!(status & ACTIVE))
2058 if (!drive->waiting_for_dma)
2059 printk(KERN_WARNING "ide%d, ide_dma_test_irq \
2060 called while not waiting\n", HWIF(drive)->index);
2062 /* If dbdma didn't execute the STOP command yet, the
2063 * active bit is still set. We consider that we aren't
2064 * sharing interrupts (which is hopefully the case with
2065 * those controllers) and so we just try to flush the
2066 * channel for pending data in the fifo
2069 writel((FLUSH << 16) | FLUSH, &dma->control);
2073 status = readl(&dma->status);
2074 if ((status & FLUSH) == 0)
2076 if (++timeout > 100) {
2077 printk(KERN_WARNING "ide%d, ide_dma_test_irq \
2078 timeout flushing channel\n", HWIF(drive)->index);
2086 pmac_ide_dma_host_off (ide_drive_t *drive)
2092 pmac_ide_dma_host_on (ide_drive_t *drive)
2098 pmac_ide_dma_lostirq (ide_drive_t *drive)
2100 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
2101 volatile struct dbdma_regs __iomem *dma;
2102 unsigned long status;
2106 dma = pmif->dma_regs;
2108 status = readl(&dma->status);
2109 printk(KERN_ERR "ide-pmac lost interrupt, dma status: %lx\n", status);
2114 * Allocate the data structures needed for using DMA with an interface
2115 * and fill the proper list of functions pointers
2118 pmac_ide_setup_dma(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif)
2120 /* We won't need pci_dev if we switch to generic consistent
2123 if (hwif->pci_dev == NULL)
2126 * Allocate space for the DBDMA commands.
2127 * The +2 is +1 for the stop command and +1 to allow for
2128 * aligning the start address to a multiple of 16 bytes.
2130 pmif->dma_table_cpu = (struct dbdma_cmd*)pci_alloc_consistent(
2132 (MAX_DCMDS + 2) * sizeof(struct dbdma_cmd),
2133 &pmif->dma_table_dma);
2134 if (pmif->dma_table_cpu == NULL) {
2135 printk(KERN_ERR "%s: unable to allocate DMA command list\n",
2140 pmif->sg_table = kmalloc(sizeof(struct scatterlist) * MAX_DCMDS,
2142 if (pmif->sg_table == NULL) {
2143 pci_free_consistent( hwif->pci_dev,
2144 (MAX_DCMDS + 2) * sizeof(struct dbdma_cmd),
2145 pmif->dma_table_cpu, pmif->dma_table_dma);
2148 hwif->ide_dma_off_quietly = &__ide_dma_off_quietly;
2149 hwif->ide_dma_on = &__ide_dma_on;
2150 hwif->ide_dma_check = &pmac_ide_dma_check;
2151 hwif->ide_dma_read = &pmac_ide_dma_read;
2152 hwif->ide_dma_write = &pmac_ide_dma_write;
2153 hwif->ide_dma_begin = &pmac_ide_dma_begin;
2154 hwif->ide_dma_end = &pmac_ide_dma_end;
2155 hwif->ide_dma_test_irq = &pmac_ide_dma_test_irq;
2156 hwif->ide_dma_host_off = &pmac_ide_dma_host_off;
2157 hwif->ide_dma_host_on = &pmac_ide_dma_host_on;
2158 hwif->ide_dma_verbose = &__ide_dma_verbose;
2159 hwif->ide_dma_timeout = &__ide_dma_timeout;
2160 hwif->ide_dma_lostirq = &pmac_ide_dma_lostirq;
2162 #ifdef CONFIG_BLK_DEV_IDEDMA_PMAC_AUTO
2166 hwif->drives[0].autodma = hwif->autodma;
2167 hwif->drives[1].autodma = hwif->autodma;
2169 hwif->atapi_dma = 1;
2170 switch(pmif->kind) {
2171 case controller_un_ata6:
2172 case controller_k2_ata6:
2173 hwif->ultra_mask = pmif->cable_80 ? 0x3f : 0x07;
2174 hwif->mwdma_mask = 0x07;
2175 hwif->swdma_mask = 0x00;
2177 case controller_kl_ata4:
2178 hwif->ultra_mask = pmif->cable_80 ? 0x1f : 0x07;
2179 hwif->mwdma_mask = 0x07;
2180 hwif->swdma_mask = 0x00;
2183 hwif->ultra_mask = 0x00;
2184 hwif->mwdma_mask = 0x07;
2185 hwif->swdma_mask = 0x00;
2190 #endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */