2 * linux/drivers/ide/ide-pmac.c
4 * Support for IDE interfaces on PowerMacs.
5 * These IDE interfaces are memory-mapped and have a DBDMA channel
8 * Copyright (C) 1998-2003 Paul Mackerras & Ben. Herrenschmidt
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
15 * Some code taken from drivers/ide/ide-dma.c:
17 * Copyright (c) 1995-1998 Mark Lord
19 * TODO: - Use pre-calculated (kauai) timing tables all the time and
20 * get rid of the "rounded" tables used previously, so we have the
21 * same table format for all controllers and can then just have one
25 #include <linux/config.h>
26 #include <linux/types.h>
27 #include <linux/kernel.h>
28 #include <linux/sched.h>
29 #include <linux/init.h>
30 #include <linux/delay.h>
31 #include <linux/ide.h>
32 #include <linux/notifier.h>
33 #include <linux/reboot.h>
34 #include <linux/pci.h>
35 #include <linux/adb.h>
36 #include <linux/pmu.h>
40 #include <asm/dbdma.h>
42 #include <asm/pci-bridge.h>
43 #include <asm/machdep.h>
44 #include <asm/pmac_feature.h>
45 #include <asm/sections.h>
49 #include <asm/mediabay.h>
52 #include "ide-timing.h"
54 extern void ide_do_request(ide_hwgroup_t *hwgroup, int masked_irq);
56 #define IDE_PMAC_DEBUG
58 #define DMA_WAIT_TIMEOUT 50
60 typedef struct pmac_ide_hwif {
61 unsigned long regbase;
68 int broken_dma_warn : 1;
69 struct device_node* node;
70 struct macio_dev *mdev;
72 #ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
73 /* Those fields are duplicating what is in hwif. We currently
74 * can't use the hwif ones because of some assumptions that are
75 * beeing done by the generic code about the kind of dma controller
76 * and format of the dma table. This will have to be fixed though.
78 volatile struct dbdma_regs* dma_regs;
79 struct dbdma_cmd* dma_table_cpu;
80 dma_addr_t dma_table_dma;
81 struct scatterlist* sg_table;
88 static pmac_ide_hwif_t pmac_ide[MAX_HWIFS] __pmacdata;
89 static int pmac_ide_count;
92 controller_ohare, /* OHare based */
93 controller_heathrow, /* Heathrow/Paddington */
94 controller_kl_ata3, /* KeyLargo ATA-3 */
95 controller_kl_ata4, /* KeyLargo ATA-4 */
96 controller_un_ata6, /* UniNorth2 ATA-6 */
97 controller_k2_ata6 /* K2 ATA-6 */
100 static const char* model_name[] = {
101 "OHare ATA", /* OHare based */
102 "Heathrow ATA", /* Heathrow/Paddington */
103 "KeyLargo ATA-3", /* KeyLargo ATA-3 (MDMA only) */
104 "KeyLargo ATA-4", /* KeyLargo ATA-4 (UDMA/66) */
105 "UniNorth ATA-6", /* UniNorth2 ATA-6 (UDMA/100) */
106 "K2 ATA-6", /* K2 ATA-6 (UDMA/100) */
110 * Extra registers, both 32-bit little-endian
112 #define IDE_TIMING_CONFIG 0x200
113 #define IDE_INTERRUPT 0x300
115 /* Kauai (U2) ATA has different register setup */
116 #define IDE_KAUAI_PIO_CONFIG 0x200
117 #define IDE_KAUAI_ULTRA_CONFIG 0x210
118 #define IDE_KAUAI_POLL_CONFIG 0x220
121 * Timing configuration register definitions
124 /* Number of IDE_SYSCLK_NS ticks, argument is in nanoseconds */
125 #define SYSCLK_TICKS(t) (((t) + IDE_SYSCLK_NS - 1) / IDE_SYSCLK_NS)
126 #define SYSCLK_TICKS_66(t) (((t) + IDE_SYSCLK_66_NS - 1) / IDE_SYSCLK_66_NS)
127 #define IDE_SYSCLK_NS 30 /* 33Mhz cell */
128 #define IDE_SYSCLK_66_NS 15 /* 66Mhz cell */
130 /* 100Mhz cell, found in Uninorth 2. I don't have much infos about
131 * this one yet, it appears as a pci device (106b/0033) on uninorth
132 * internal PCI bus and it's clock is controlled like gem or fw. It
133 * appears to be an evolution of keylargo ATA4 with a timing register
134 * extended to 2 32bits registers and a similar DBDMA channel. Other
135 * registers seem to exist but I can't tell much about them.
137 * So far, I'm using pre-calculated tables for this extracted from
138 * the values used by the MacOS X driver.
140 * The "PIO" register controls PIO and MDMA timings, the "ULTRA"
141 * register controls the UDMA timings. At least, it seems bit 0
142 * of this one enables UDMA vs. MDMA, and bits 4..7 are the
143 * cycle time in units of 10ns. Bits 8..15 are used by I don't
144 * know their meaning yet
146 #define TR_100_PIOREG_PIO_MASK 0xff000fff
147 #define TR_100_PIOREG_MDMA_MASK 0x00fff000
148 #define TR_100_UDMAREG_UDMA_MASK 0x0000ffff
149 #define TR_100_UDMAREG_UDMA_EN 0x00000001
152 /* 66Mhz cell, found in KeyLargo. Can do ultra mode 0 to 2 on
153 * 40 connector cable and to 4 on 80 connector one.
154 * Clock unit is 15ns (66Mhz)
156 * 3 Values can be programmed:
157 * - Write data setup, which appears to match the cycle time. They
158 * also call it DIOW setup.
159 * - Ready to pause time (from spec)
160 * - Address setup. That one is weird. I don't see where exactly
161 * it fits in UDMA cycles, I got it's name from an obscure piece
162 * of commented out code in Darwin. They leave it to 0, we do as
163 * well, despite a comment that would lead to think it has a
165 * Apple also add 60ns to the write data setup (or cycle time ?) on
168 #define TR_66_UDMA_MASK 0xfff00000
169 #define TR_66_UDMA_EN 0x00100000 /* Enable Ultra mode for DMA */
170 #define TR_66_UDMA_ADDRSETUP_MASK 0xe0000000 /* Address setup */
171 #define TR_66_UDMA_ADDRSETUP_SHIFT 29
172 #define TR_66_UDMA_RDY2PAUS_MASK 0x1e000000 /* Ready 2 pause time */
173 #define TR_66_UDMA_RDY2PAUS_SHIFT 25
174 #define TR_66_UDMA_WRDATASETUP_MASK 0x01e00000 /* Write data setup time */
175 #define TR_66_UDMA_WRDATASETUP_SHIFT 21
176 #define TR_66_MDMA_MASK 0x000ffc00
177 #define TR_66_MDMA_RECOVERY_MASK 0x000f8000
178 #define TR_66_MDMA_RECOVERY_SHIFT 15
179 #define TR_66_MDMA_ACCESS_MASK 0x00007c00
180 #define TR_66_MDMA_ACCESS_SHIFT 10
181 #define TR_66_PIO_MASK 0x000003ff
182 #define TR_66_PIO_RECOVERY_MASK 0x000003e0
183 #define TR_66_PIO_RECOVERY_SHIFT 5
184 #define TR_66_PIO_ACCESS_MASK 0x0000001f
185 #define TR_66_PIO_ACCESS_SHIFT 0
187 /* 33Mhz cell, found in OHare, Heathrow (& Paddington) and KeyLargo
188 * Can do pio & mdma modes, clock unit is 30ns (33Mhz)
190 * The access time and recovery time can be programmed. Some older
191 * Darwin code base limit OHare to 150ns cycle time. I decided to do
192 * the same here fore safety against broken old hardware ;)
193 * The HalfTick bit, when set, adds half a clock (15ns) to the access
194 * time and removes one from recovery. It's not supported on KeyLargo
195 * implementation afaik. The E bit appears to be set for PIO mode 0 and
196 * is used to reach long timings used in this mode.
198 #define TR_33_MDMA_MASK 0x003ff800
199 #define TR_33_MDMA_RECOVERY_MASK 0x001f0000
200 #define TR_33_MDMA_RECOVERY_SHIFT 16
201 #define TR_33_MDMA_ACCESS_MASK 0x0000f800
202 #define TR_33_MDMA_ACCESS_SHIFT 11
203 #define TR_33_MDMA_HALFTICK 0x00200000
204 #define TR_33_PIO_MASK 0x000007ff
205 #define TR_33_PIO_E 0x00000400
206 #define TR_33_PIO_RECOVERY_MASK 0x000003e0
207 #define TR_33_PIO_RECOVERY_SHIFT 5
208 #define TR_33_PIO_ACCESS_MASK 0x0000001f
209 #define TR_33_PIO_ACCESS_SHIFT 0
212 * Interrupt register definitions
214 #define IDE_INTR_DMA 0x80000000
215 #define IDE_INTR_DEVICE 0x40000000
217 #ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
219 /* Rounded Multiword DMA timings
221 * I gave up finding a generic formula for all controller
222 * types and instead, built tables based on timing values
223 * used by Apple in Darwin's implementation.
225 struct mdma_timings_t {
231 struct mdma_timings_t mdma_timings_33[] __pmacdata =
244 struct mdma_timings_t mdma_timings_33k[] __pmacdata =
257 struct mdma_timings_t mdma_timings_66[] __pmacdata =
270 /* KeyLargo ATA-4 Ultra DMA timings (rounded) */
272 int addrSetup; /* ??? */
275 } kl66_udma_timings[] __pmacdata =
277 { 0, 180, 120 }, /* Mode 0 */
278 { 0, 150, 90 }, /* 1 */
279 { 0, 120, 60 }, /* 2 */
280 { 0, 90, 45 }, /* 3 */
281 { 0, 90, 30 } /* 4 */
284 /* UniNorth 2 ATA/100 timings */
285 struct kauai_timing {
290 static struct kauai_timing kauai_pio_timings[] __pmacdata =
292 { 930 , 0x08000fff },
293 { 600 , 0x08000a92 },
294 { 383 , 0x0800060f },
295 { 360 , 0x08000492 },
296 { 330 , 0x0800048f },
297 { 300 , 0x080003cf },
298 { 270 , 0x080003cc },
299 { 240 , 0x0800038b },
300 { 239 , 0x0800030c },
301 { 180 , 0x05000249 },
305 static struct kauai_timing kauai_mdma_timings[] __pmacdata =
307 { 1260 , 0x00fff000 },
308 { 480 , 0x00618000 },
309 { 360 , 0x00492000 },
310 { 270 , 0x0038e000 },
311 { 240 , 0x0030c000 },
312 { 210 , 0x002cb000 },
313 { 180 , 0x00249000 },
314 { 150 , 0x00209000 },
315 { 120 , 0x00148000 },
319 static struct kauai_timing kauai_udma_timings[] __pmacdata =
321 { 120 , 0x000070c0 },
331 kauai_lookup_timing(struct kauai_timing* table, int cycle_time)
335 for (i=0; table[i].cycle_time; i++)
336 if (cycle_time > table[i+1].cycle_time)
337 return table[i].timing_reg;
341 /* allow up to 256 DBDMA commands per xfer */
342 #define MAX_DCMDS 256
345 * Wait 1s for disk to answer on IDE bus after a hard reset
346 * of the device (via GPIO/FCR).
348 * Some devices seem to "pollute" the bus even after dropping
349 * the BSY bit (typically some combo drives slave on the UDMA
350 * bus) after a hard reset. Since we hard reset all drives on
351 * KeyLargo ATA66, we have to keep that delay around. I may end
352 * up not hard resetting anymore on these and keep the delay only
353 * for older interfaces instead (we have to reset when coming
354 * from MacOS...) --BenH.
356 #define IDE_WAKEUP_DELAY (1*HZ)
358 static void pmac_ide_setup_dma(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif);
359 static int pmac_ide_build_dmatable(ide_drive_t *drive, struct request *rq);
360 static int pmac_ide_tune_chipset(ide_drive_t *drive, u8 speed);
361 static void pmac_ide_tuneproc(ide_drive_t *drive, u8 pio);
362 static void pmac_ide_selectproc(ide_drive_t *drive);
363 static void pmac_ide_kauai_selectproc(ide_drive_t *drive);
364 static int pmac_ide_dma_begin (ide_drive_t *drive);
366 #endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */
369 * Below is the code for blinking the laptop LED along with hard
373 #ifdef CONFIG_BLK_DEV_IDE_PMAC_BLINK
375 /* Set to 50ms minimum led-on time (also used to limit frequency
376 * of requests sent to the PMU
378 #define PMU_HD_BLINK_TIME (HZ/50)
380 static struct adb_request pmu_blink_on, pmu_blink_off;
381 static spinlock_t pmu_blink_lock;
382 static unsigned long pmu_blink_stoptime;
383 static int pmu_blink_ledstate;
384 static struct timer_list pmu_blink_timer;
385 static int pmu_ide_blink_enabled;
389 pmu_hd_blink_timeout(unsigned long data)
393 spin_lock_irqsave(&pmu_blink_lock, flags);
395 /* We may have been triggered again in a racy way, check
396 * that we really want to switch it off
398 if (time_after(pmu_blink_stoptime, jiffies))
401 /* Previous req. not complete, try 100ms more */
402 if (pmu_blink_off.complete == 0)
403 mod_timer(&pmu_blink_timer, jiffies + PMU_HD_BLINK_TIME);
404 else if (pmu_blink_ledstate) {
405 pmu_request(&pmu_blink_off, NULL, 4, 0xee, 4, 0, 0);
406 pmu_blink_ledstate = 0;
409 spin_unlock_irqrestore(&pmu_blink_lock, flags);
413 pmu_hd_kick_blink(void *data, int rw)
417 pmu_blink_stoptime = jiffies + PMU_HD_BLINK_TIME;
419 mod_timer(&pmu_blink_timer, pmu_blink_stoptime);
420 /* Fast path when LED is already ON */
421 if (pmu_blink_ledstate == 1)
423 spin_lock_irqsave(&pmu_blink_lock, flags);
424 if (pmu_blink_on.complete && !pmu_blink_ledstate) {
425 pmu_request(&pmu_blink_on, NULL, 4, 0xee, 4, 0, 1);
426 pmu_blink_ledstate = 1;
428 spin_unlock_irqrestore(&pmu_blink_lock, flags);
432 pmu_hd_blink_init(void)
434 struct device_node *dt;
437 /* Currently, I only enable this feature on KeyLargo based laptops,
438 * older laptops may support it (at least heathrow/paddington) but
439 * I don't feel like loading those venerable old machines with so
440 * much additional interrupt & PMU activity...
442 if (pmu_get_model() != PMU_KEYLARGO_BASED)
445 dt = find_devices("device-tree");
448 model = (const char *)get_property(dt, "model", NULL);
451 if (strncmp(model, "PowerBook", strlen("PowerBook")) != 0 &&
452 strncmp(model, "iBook", strlen("iBook")) != 0)
455 pmu_blink_on.complete = 1;
456 pmu_blink_off.complete = 1;
457 spin_lock_init(&pmu_blink_lock);
458 init_timer(&pmu_blink_timer);
459 pmu_blink_timer.function = pmu_hd_blink_timeout;
464 #endif /* CONFIG_BLK_DEV_IDE_PMAC_BLINK */
467 * N.B. this can't be an initfunc, because the media-bay task can
468 * call ide_[un]register at any time.
471 pmac_ide_init_hwif_ports(hw_regs_t *hw,
472 unsigned long data_port, unsigned long ctrl_port,
480 for (ix = 0; ix < MAX_HWIFS; ++ix)
481 if (data_port == pmac_ide[ix].regbase)
484 if (ix >= MAX_HWIFS) {
485 /* Probably a PCI interface... */
486 for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; ++i)
487 hw->io_ports[i] = data_port + i - IDE_DATA_OFFSET;
488 hw->io_ports[IDE_CONTROL_OFFSET] = ctrl_port;
492 for (i = 0; i < 8; ++i)
493 hw->io_ports[i] = data_port + i * 0x10;
494 hw->io_ports[8] = data_port + 0x160;
497 *irq = pmac_ide[ix].irq;
501 * Apply the timings of the proper unit (master/slave) to the shared
502 * timing register when selecting that unit. This version is for
503 * ASICs with a single timing register
506 pmac_ide_selectproc(ide_drive_t *drive)
508 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
513 if (drive->select.b.unit & 0x01)
514 writel(pmif->timings[1],
515 (unsigned *)(IDE_DATA_REG+IDE_TIMING_CONFIG));
517 writel(pmif->timings[0],
518 (unsigned *)(IDE_DATA_REG+IDE_TIMING_CONFIG));
519 (void)readl((unsigned *)(IDE_DATA_REG+IDE_TIMING_CONFIG));
523 * Apply the timings of the proper unit (master/slave) to the shared
524 * timing register when selecting that unit. This version is for
525 * ASICs with a dual timing register (Kauai)
528 pmac_ide_kauai_selectproc(ide_drive_t *drive)
530 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
535 if (drive->select.b.unit & 0x01) {
536 writel(pmif->timings[1],
537 (unsigned *)(IDE_DATA_REG + IDE_KAUAI_PIO_CONFIG));
538 writel(pmif->timings[3],
539 (unsigned *)(IDE_DATA_REG + IDE_KAUAI_ULTRA_CONFIG));
541 writel(pmif->timings[0],
542 (unsigned *)(IDE_DATA_REG + IDE_KAUAI_PIO_CONFIG));
543 writel(pmif->timings[2],
544 (unsigned *)(IDE_DATA_REG + IDE_KAUAI_ULTRA_CONFIG));
546 (void)readl((unsigned *)(IDE_DATA_REG + IDE_KAUAI_PIO_CONFIG));
550 * Force an update of controller timing values for a given drive
553 pmac_ide_do_update_timings(ide_drive_t *drive)
555 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
560 if (pmif->kind == controller_un_ata6 || pmif->kind == controller_k2_ata6)
561 pmac_ide_kauai_selectproc(drive);
563 pmac_ide_selectproc(drive);
567 pmac_outbsync(ide_drive_t *drive, u8 value, unsigned long port)
572 tmp = readl((unsigned *)(IDE_DATA_REG + IDE_TIMING_CONFIG));
576 * Send the SET_FEATURE IDE command to the drive and update drive->id with
577 * the new state. We currently don't use the generic routine as it used to
578 * cause various trouble, especially with older mediabays.
579 * This code is sometimes triggering a spurrious interrupt though, I need
580 * to sort that out sooner or later and see if I can finally get the
581 * common version to work properly in all cases
584 pmac_ide_do_setfeature(ide_drive_t *drive, u8 command)
586 ide_hwif_t *hwif = HWIF(drive);
589 disable_irq_nosync(hwif->irq);
592 SELECT_MASK(drive, 0);
594 /* Get rid of pending error state */
595 (void) hwif->INB(IDE_STATUS_REG);
596 /* Timeout bumped for some powerbooks */
597 if (wait_for_ready(drive, 2000)) {
598 /* Timeout bumped for some powerbooks */
599 printk(KERN_ERR "%s: pmac_ide_do_setfeature disk not ready "
600 "before SET_FEATURE!\n", drive->name);
604 hwif->OUTB(drive->ctl | 2, IDE_CONTROL_REG);
605 hwif->OUTB(command, IDE_NSECTOR_REG);
606 hwif->OUTB(SETFEATURES_XFER, IDE_FEATURE_REG);
607 hwif->OUTBSYNC(drive, WIN_SETFEATURES, IDE_COMMAND_REG);
609 /* Timeout bumped for some powerbooks */
610 result = wait_for_ready(drive, 2000);
611 hwif->OUTB(drive->ctl, IDE_CONTROL_REG);
613 printk(KERN_ERR "%s: pmac_ide_do_setfeature disk not ready "
614 "after SET_FEATURE !\n", drive->name);
616 SELECT_MASK(drive, 0);
618 drive->id->dma_ultra &= ~0xFF00;
619 drive->id->dma_mword &= ~0x0F00;
620 drive->id->dma_1word &= ~0x0F00;
623 drive->id->dma_ultra |= 0x8080; break;
625 drive->id->dma_ultra |= 0x4040; break;
627 drive->id->dma_ultra |= 0x2020; break;
629 drive->id->dma_ultra |= 0x1010; break;
631 drive->id->dma_ultra |= 0x0808; break;
633 drive->id->dma_ultra |= 0x0404; break;
635 drive->id->dma_ultra |= 0x0202; break;
637 drive->id->dma_ultra |= 0x0101; break;
639 drive->id->dma_mword |= 0x0404; break;
641 drive->id->dma_mword |= 0x0202; break;
643 drive->id->dma_mword |= 0x0101; break;
645 drive->id->dma_1word |= 0x0404; break;
647 drive->id->dma_1word |= 0x0202; break;
649 drive->id->dma_1word |= 0x0101; break;
653 enable_irq(hwif->irq);
658 * Old tuning functions (called on hdparm -p), sets up drive PIO timings
661 pmac_ide_tuneproc(ide_drive_t *drive, u8 pio)
665 unsigned accessTicks, recTicks;
666 unsigned accessTime, recTime;
667 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
672 /* which drive is it ? */
673 timings = &pmif->timings[drive->select.b.unit & 0x01];
675 pio = ide_get_best_pio_mode(drive, pio, 4, &d);
677 switch (pmif->kind) {
678 case controller_un_ata6:
679 case controller_k2_ata6: {
681 u32 tr = kauai_lookup_timing(kauai_pio_timings, d.cycle_time);
684 *timings = ((*timings) & ~TR_100_PIOREG_PIO_MASK) | tr;
687 case controller_kl_ata4:
689 recTime = d.cycle_time - ide_pio_timings[pio].active_time
690 - ide_pio_timings[pio].setup_time;
691 recTime = max(recTime, 150U);
692 accessTime = ide_pio_timings[pio].active_time;
693 accessTime = max(accessTime, 150U);
694 accessTicks = SYSCLK_TICKS_66(accessTime);
695 accessTicks = min(accessTicks, 0x1fU);
696 recTicks = SYSCLK_TICKS_66(recTime);
697 recTicks = min(recTicks, 0x1fU);
698 *timings = ((*timings) & ~TR_66_PIO_MASK) |
699 (accessTicks << TR_66_PIO_ACCESS_SHIFT) |
700 (recTicks << TR_66_PIO_RECOVERY_SHIFT);
705 recTime = d.cycle_time - ide_pio_timings[pio].active_time
706 - ide_pio_timings[pio].setup_time;
707 recTime = max(recTime, 150U);
708 accessTime = ide_pio_timings[pio].active_time;
709 accessTime = max(accessTime, 150U);
710 accessTicks = SYSCLK_TICKS(accessTime);
711 accessTicks = min(accessTicks, 0x1fU);
712 accessTicks = max(accessTicks, 4U);
713 recTicks = SYSCLK_TICKS(recTime);
714 recTicks = min(recTicks, 0x1fU);
715 recTicks = max(recTicks, 5U) - 4;
717 recTicks--; /* guess, but it's only for PIO0, so... */
720 *timings = ((*timings) & ~TR_33_PIO_MASK) |
721 (accessTicks << TR_33_PIO_ACCESS_SHIFT) |
722 (recTicks << TR_33_PIO_RECOVERY_SHIFT);
724 *timings |= TR_33_PIO_E;
729 #ifdef IDE_PMAC_DEBUG
730 printk(KERN_ERR "%s: Set PIO timing for mode %d, reg: 0x%08x\n",
731 drive->name, pio, *timings);
734 if (drive->select.all == HWIF(drive)->INB(IDE_SELECT_REG))
735 pmac_ide_do_update_timings(drive);
738 #ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
741 * Calculate KeyLargo ATA/66 UDMA timings
744 set_timings_udma_ata4(u32 *timings, u8 speed)
746 unsigned rdyToPauseTicks, wrDataSetupTicks, addrTicks;
748 if (speed > XFER_UDMA_4)
751 rdyToPauseTicks = SYSCLK_TICKS_66(kl66_udma_timings[speed & 0xf].rdy2pause);
752 wrDataSetupTicks = SYSCLK_TICKS_66(kl66_udma_timings[speed & 0xf].wrDataSetup);
753 addrTicks = SYSCLK_TICKS_66(kl66_udma_timings[speed & 0xf].addrSetup);
755 *timings = ((*timings) & ~(TR_66_UDMA_MASK | TR_66_MDMA_MASK)) |
756 (wrDataSetupTicks << TR_66_UDMA_WRDATASETUP_SHIFT) |
757 (rdyToPauseTicks << TR_66_UDMA_RDY2PAUS_SHIFT) |
758 (addrTicks <<TR_66_UDMA_ADDRSETUP_SHIFT) |
760 #ifdef IDE_PMAC_DEBUG
761 printk(KERN_ERR "ide_pmac: Set UDMA timing for mode %d, reg: 0x%08x\n",
762 speed & 0xf, *timings);
769 * Calculate Kauai ATA/100 UDMA timings
772 set_timings_udma_ata6(u32 *pio_timings, u32 *ultra_timings, u8 speed)
774 struct ide_timing *t = ide_timing_find_mode(speed);
777 if (speed > XFER_UDMA_5 || t == NULL)
779 tr = kauai_lookup_timing(kauai_udma_timings, (int)t->udma);
782 *ultra_timings = ((*ultra_timings) & ~TR_100_UDMAREG_UDMA_MASK) | tr;
783 *ultra_timings = (*ultra_timings) | TR_100_UDMAREG_UDMA_EN;
789 * Calculate MDMA timings for all cells
792 set_timings_mdma(ide_drive_t *drive, int intf_type, u32 *timings, u32 *timings2,
793 u8 speed, int drive_cycle_time)
795 int cycleTime, accessTime, recTime;
796 unsigned accessTicks, recTicks;
797 struct mdma_timings_t* tm = NULL;
800 /* Get default cycle time for mode */
801 switch(speed & 0xf) {
802 case 0: cycleTime = 480; break;
803 case 1: cycleTime = 150; break;
804 case 2: cycleTime = 120; break;
808 /* Adjust for drive */
809 if (drive_cycle_time && drive_cycle_time > cycleTime)
810 cycleTime = drive_cycle_time;
811 /* OHare limits according to some old Apple sources */
812 if ((intf_type == controller_ohare) && (cycleTime < 150))
814 /* Get the proper timing array for this controller */
816 case controller_un_ata6:
817 case controller_k2_ata6:
819 case controller_kl_ata4:
820 tm = mdma_timings_66;
822 case controller_kl_ata3:
823 tm = mdma_timings_33k;
826 tm = mdma_timings_33;
830 /* Lookup matching access & recovery times */
833 if (tm[i+1].cycleTime < cycleTime)
839 cycleTime = tm[i].cycleTime;
840 accessTime = tm[i].accessTime;
841 recTime = tm[i].recoveryTime;
843 #ifdef IDE_PMAC_DEBUG
844 printk(KERN_ERR "%s: MDMA, cycleTime: %d, accessTime: %d, recTime: %d\n",
845 drive->name, cycleTime, accessTime, recTime);
849 case controller_un_ata6:
850 case controller_k2_ata6: {
852 u32 tr = kauai_lookup_timing(kauai_mdma_timings, cycleTime);
855 *timings = ((*timings) & ~TR_100_PIOREG_MDMA_MASK) | tr;
856 *timings2 = (*timings2) & ~TR_100_UDMAREG_UDMA_EN;
859 case controller_kl_ata4:
861 accessTicks = SYSCLK_TICKS_66(accessTime);
862 accessTicks = min(accessTicks, 0x1fU);
863 accessTicks = max(accessTicks, 0x1U);
864 recTicks = SYSCLK_TICKS_66(recTime);
865 recTicks = min(recTicks, 0x1fU);
866 recTicks = max(recTicks, 0x3U);
867 /* Clear out mdma bits and disable udma */
868 *timings = ((*timings) & ~(TR_66_MDMA_MASK | TR_66_UDMA_MASK)) |
869 (accessTicks << TR_66_MDMA_ACCESS_SHIFT) |
870 (recTicks << TR_66_MDMA_RECOVERY_SHIFT);
872 case controller_kl_ata3:
873 /* 33Mhz cell on KeyLargo */
874 accessTicks = SYSCLK_TICKS(accessTime);
875 accessTicks = max(accessTicks, 1U);
876 accessTicks = min(accessTicks, 0x1fU);
877 accessTime = accessTicks * IDE_SYSCLK_NS;
878 recTicks = SYSCLK_TICKS(recTime);
879 recTicks = max(recTicks, 1U);
880 recTicks = min(recTicks, 0x1fU);
881 *timings = ((*timings) & ~TR_33_MDMA_MASK) |
882 (accessTicks << TR_33_MDMA_ACCESS_SHIFT) |
883 (recTicks << TR_33_MDMA_RECOVERY_SHIFT);
886 /* 33Mhz cell on others */
888 int origAccessTime = accessTime;
889 int origRecTime = recTime;
891 accessTicks = SYSCLK_TICKS(accessTime);
892 accessTicks = max(accessTicks, 1U);
893 accessTicks = min(accessTicks, 0x1fU);
894 accessTime = accessTicks * IDE_SYSCLK_NS;
895 recTicks = SYSCLK_TICKS(recTime);
896 recTicks = max(recTicks, 2U) - 1;
897 recTicks = min(recTicks, 0x1fU);
898 recTime = (recTicks + 1) * IDE_SYSCLK_NS;
899 if ((accessTicks > 1) &&
900 ((accessTime - IDE_SYSCLK_NS/2) >= origAccessTime) &&
901 ((recTime - IDE_SYSCLK_NS/2) >= origRecTime)) {
905 *timings = ((*timings) & ~TR_33_MDMA_MASK) |
906 (accessTicks << TR_33_MDMA_ACCESS_SHIFT) |
907 (recTicks << TR_33_MDMA_RECOVERY_SHIFT);
909 *timings |= TR_33_MDMA_HALFTICK;
912 #ifdef IDE_PMAC_DEBUG
913 printk(KERN_ERR "%s: Set MDMA timing for mode %d, reg: 0x%08x\n",
914 drive->name, speed & 0xf, *timings);
918 #endif /* #ifdef CONFIG_BLK_DEV_IDEDMA_PMAC */
921 * Speedproc. This function is called by the core to set any of the standard
922 * timing (PIO, MDMA or UDMA) to both the drive and the controller.
923 * You may notice we don't use this function on normal "dma check" operation,
924 * our dedicated function is more precise as it uses the drive provided
925 * cycle time value. We should probably fix this one to deal with that too...
928 pmac_ide_tune_chipset (ide_drive_t *drive, byte speed)
930 int unit = (drive->select.b.unit & 0x01);
932 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
933 u32 *timings, *timings2;
938 timings = &pmif->timings[unit];
939 timings2 = &pmif->timings[unit+2];
942 #ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
944 if (pmif->kind != controller_un_ata6 &&
945 pmif->kind != controller_k2_ata6)
949 if (HWIF(drive)->udma_four == 0)
954 if (pmif->kind == controller_kl_ata4)
955 ret = set_timings_udma_ata4(timings, speed);
956 else if (pmif->kind == controller_un_ata6
957 || pmif->kind == controller_k2_ata6)
958 ret = set_timings_udma_ata6(timings, timings2, speed);
965 ret = set_timings_mdma(drive, pmif->kind, timings, timings2, speed, 0);
971 #endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */
977 pmac_ide_tuneproc(drive, speed & 0x07);
985 ret = pmac_ide_do_setfeature(drive, speed);
989 pmac_ide_do_update_timings(drive);
990 drive->current_speed = speed;
996 * Blast some well known "safe" values to the timing registers at init or
997 * wakeup from sleep time, before we do real calculation
1000 sanitize_timings(pmac_ide_hwif_t *pmif)
1002 unsigned int value, value2 = 0;
1004 switch(pmif->kind) {
1005 case controller_un_ata6:
1006 case controller_k2_ata6:
1008 value2 = 0x00002921;
1010 case controller_kl_ata4:
1013 case controller_kl_ata3:
1016 case controller_heathrow:
1017 case controller_ohare:
1022 pmif->timings[0] = pmif->timings[1] = value;
1023 pmif->timings[2] = pmif->timings[3] = value2;
1026 unsigned long __pmac
1027 pmac_ide_get_base(int index)
1029 return pmac_ide[index].regbase;
1033 pmac_ide_check_base(unsigned long base)
1037 for (ix = 0; ix < MAX_HWIFS; ++ix)
1038 if (base == pmac_ide[ix].regbase)
1044 pmac_ide_get_irq(unsigned long base)
1048 for (ix = 0; ix < MAX_HWIFS; ++ix)
1049 if (base == pmac_ide[ix].regbase)
1050 return pmac_ide[ix].irq;
1054 static int ide_majors[] __pmacdata = { 3, 22, 33, 34, 56, 57 };
1057 pmac_find_ide_boot(char *bootdevice, int n)
1062 * Look through the list of IDE interfaces for this one.
1064 for (i = 0; i < pmac_ide_count; ++i) {
1066 if (!pmac_ide[i].node || !pmac_ide[i].node->full_name)
1068 name = pmac_ide[i].node->full_name;
1069 if (memcmp(name, bootdevice, n) == 0 && name[n] == 0) {
1070 /* XXX should cope with the 2nd drive as well... */
1071 return MKDEV(ide_majors[i], 0);
1078 /* Suspend call back, should be called after the child devices
1079 * have actually been suspended
1082 pmac_ide_do_suspend(ide_hwif_t *hwif)
1084 pmac_ide_hwif_t *pmif = (pmac_ide_hwif_t *)hwif->hwif_data;
1086 /* We clear the timings */
1087 pmif->timings[0] = 0;
1088 pmif->timings[1] = 0;
1090 #ifdef CONFIG_BLK_DEV_IDE_PMAC_BLINK
1091 /* Note: This code will be called for every hwif, thus we'll
1092 * try several time to stop the LED blinker timer, but that
1093 * should be harmless
1095 if (pmu_ide_blink_enabled) {
1096 unsigned long flags;
1098 /* Make sure we don't hit the PMU blink */
1099 spin_lock_irqsave(&pmu_blink_lock, flags);
1100 if (pmu_blink_ledstate)
1101 del_timer(&pmu_blink_timer);
1102 pmu_blink_ledstate = 0;
1103 spin_unlock_irqrestore(&pmu_blink_lock, flags);
1105 #endif /* CONFIG_BLK_DEV_IDE_PMAC_BLINK */
1107 /* The media bay will handle itself just fine */
1111 /* Disable the bus */
1112 ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, pmif->node, pmif->aapl_bus_id, 0);
1117 /* Resume call back, should be called before the child devices
1121 pmac_ide_do_resume(ide_hwif_t *hwif)
1123 pmac_ide_hwif_t *pmif = (pmac_ide_hwif_t *)hwif->hwif_data;
1125 /* Hard reset & re-enable controller (do we really need to reset ? -BenH) */
1126 if (!pmif->mediabay) {
1127 ppc_md.feature_call(PMAC_FTR_IDE_RESET, pmif->node, pmif->aapl_bus_id, 1);
1128 ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, pmif->node, pmif->aapl_bus_id, 1);
1129 set_current_state(TASK_UNINTERRUPTIBLE);
1130 schedule_timeout(HZ/100);
1131 ppc_md.feature_call(PMAC_FTR_IDE_RESET, pmif->node, pmif->aapl_bus_id, 0);
1132 set_current_state(TASK_UNINTERRUPTIBLE);
1133 schedule_timeout(IDE_WAKEUP_DELAY);
1136 /* Sanitize drive timings */
1137 sanitize_timings(pmif);
1143 * Setup, register & probe an IDE channel driven by this driver, this is
1144 * called by one of the 2 probe functions (macio or PCI). Note that a channel
1145 * that ends up beeing free of any device is not kept around by this driver
1146 * (it is kept in 2.4). This introduce an interface numbering change on some
1147 * rare machines unfortunately, but it's better this way.
1150 pmac_ide_setup_device(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif)
1152 struct device_node *np = pmif->node;
1156 pmif->broken_dma = pmif->broken_dma_warn = 0;
1157 if (device_is_compatible(np, "kauai-ata"))
1158 pmif->kind = controller_un_ata6;
1159 else if (device_is_compatible(np, "K2-UATA"))
1160 pmif->kind = controller_k2_ata6;
1161 else if (device_is_compatible(np, "keylargo-ata")) {
1162 if (strcmp(np->name, "ata-4") == 0)
1163 pmif->kind = controller_kl_ata4;
1165 pmif->kind = controller_kl_ata3;
1166 } else if (device_is_compatible(np, "heathrow-ata"))
1167 pmif->kind = controller_heathrow;
1169 pmif->kind = controller_ohare;
1170 pmif->broken_dma = 1;
1173 bidp = (int *)get_property(np, "AAPL,bus-id", NULL);
1174 pmif->aapl_bus_id = bidp ? *bidp : 0;
1176 /* Get cable type from device-tree */
1177 if (pmif->kind == controller_kl_ata4 || pmif->kind == controller_un_ata6
1178 || pmif->kind == controller_k2_ata6) {
1179 char* cable = get_property(np, "cable-type", NULL);
1180 if (cable && !strncmp(cable, "80-", 3))
1186 /* Make sure we have sane timings */
1187 sanitize_timings(pmif);
1189 #ifndef CONFIG_PPC64
1190 /* XXX FIXME: Media bay stuff need re-organizing */
1191 if (np->parent && np->parent->name
1192 && strcasecmp(np->parent->name, "media-bay") == 0) {
1193 #ifdef CONFIG_PMAC_PBOOK
1194 media_bay_set_ide_infos(np->parent, pmif->regbase, pmif->irq, hwif->index);
1195 #endif /* CONFIG_PMAC_PBOOK */
1198 pmif->aapl_bus_id = 1;
1199 } else if (pmif->kind == controller_ohare) {
1200 /* The code below is having trouble on some ohare machines
1201 * (timing related ?). Until I can put my hand on one of these
1202 * units, I keep the old way
1204 ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, np, 0, 1);
1208 /* This is necessary to enable IDE when net-booting */
1209 ppc_md.feature_call(PMAC_FTR_IDE_RESET, np, pmif->aapl_bus_id, 1);
1210 ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, np, pmif->aapl_bus_id, 1);
1211 set_current_state(TASK_UNINTERRUPTIBLE);
1212 schedule_timeout(HZ/100);
1213 ppc_md.feature_call(PMAC_FTR_IDE_RESET, np, pmif->aapl_bus_id, 0);
1214 set_current_state(TASK_UNINTERRUPTIBLE);
1215 schedule_timeout(IDE_WAKEUP_DELAY);
1218 /* Setup MMIO ops */
1219 default_hwif_mmiops(hwif);
1220 hwif->OUTBSYNC = pmac_outbsync;
1222 /* Tell common code _not_ to mess with resources */
1224 hwif->hwif_data = pmif;
1225 pmac_ide_init_hwif_ports(&hwif->hw, pmif->regbase, 0, &hwif->irq);
1226 memcpy(hwif->io_ports, hwif->hw.io_ports, sizeof(hwif->io_ports));
1227 hwif->chipset = ide_pmac;
1228 hwif->noprobe = !hwif->io_ports[IDE_DATA_OFFSET] || pmif->mediabay;
1229 hwif->hold = pmif->mediabay;
1230 hwif->udma_four = pmif->cable_80;
1231 hwif->drives[0].unmask = 1;
1232 hwif->drives[1].unmask = 1;
1233 hwif->tuneproc = pmac_ide_tuneproc;
1234 if (pmif->kind == controller_un_ata6 || pmif->kind == controller_k2_ata6)
1235 hwif->selectproc = pmac_ide_kauai_selectproc;
1237 hwif->selectproc = pmac_ide_selectproc;
1238 hwif->speedproc = pmac_ide_tune_chipset;
1240 #ifdef CONFIG_BLK_DEV_IDE_PMAC_BLINK
1241 pmu_ide_blink_enabled = pmu_hd_blink_init();
1243 if (pmu_ide_blink_enabled)
1244 hwif->led_act = pmu_hd_kick_blink;
1247 printk(KERN_INFO "ide%d: Found Apple %s controller, bus ID %d%s, irq %d\n",
1248 hwif->index, model_name[pmif->kind], pmif->aapl_bus_id,
1249 pmif->mediabay ? " (mediabay)" : "", hwif->irq);
1251 #ifdef CONFIG_PMAC_PBOOK
1252 if (pmif->mediabay && check_media_bay_by_base(pmif->regbase, MB_CD) == 0)
1254 #endif /* CONFIG_PMAC_PBOOK */
1256 #ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
1257 /* has a DBDMA controller channel */
1259 pmac_ide_setup_dma(pmif, hwif);
1260 #endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */
1262 /* We probe the hwif now */
1263 probe_hwif_init(hwif);
1265 /* The code IDE code will have set hwif->present if we have devices attached,
1266 * if we don't, the discard the interface except if we are on a media bay slot
1268 if (!hwif->present && !pmif->mediabay) {
1269 printk(KERN_INFO "ide%d: Bus empty, interface released.\n",
1271 default_hwif_iops(hwif);
1272 for (i = IDE_DATA_OFFSET; i <= IDE_CONTROL_OFFSET; ++i)
1273 hwif->io_ports[i] = 0;
1274 hwif->chipset = ide_unknown;
1283 * Attach to a macio probed interface
1285 static int __devinit
1286 pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_match *match)
1288 unsigned long base, regbase;
1291 pmac_ide_hwif_t *pmif;
1295 while (i < MAX_HWIFS && (ide_hwifs[i].io_ports[IDE_DATA_OFFSET] != 0
1296 || pmac_ide[i].node != NULL))
1298 if (i >= MAX_HWIFS) {
1299 printk(KERN_ERR "ide-pmac: MacIO interface attach with no slot\n");
1300 printk(KERN_ERR " %s\n", mdev->ofdev.node->full_name);
1304 pmif = &pmac_ide[i];
1305 hwif = &ide_hwifs[i];
1307 if (mdev->ofdev.node->n_addrs == 0) {
1308 printk(KERN_WARNING "ide%d: no address for %s\n",
1309 i, mdev->ofdev.node->full_name);
1313 /* Request memory resource for IO ports */
1314 if (macio_request_resource(mdev, 0, "ide-pmac (ports)")) {
1315 printk(KERN_ERR "ide%d: can't request mmio resource !\n", i);
1319 /* XXX This is bogus. Should be fixed in the registry by checking
1320 * the kind of host interrupt controller, a bit like gatwick
1321 * fixes in irq.c. That works well enough for the single case
1322 * where that happens though...
1324 if (macio_irq_count(mdev) == 0) {
1325 printk(KERN_WARNING "ide%d: no intrs for device %s, using 13\n",
1326 i, mdev->ofdev.node->full_name);
1329 irq = macio_irq(mdev, 0);
1331 base = (unsigned long)ioremap(macio_resource_start(mdev, 0), 0x400);
1334 hwif->pci_dev = mdev->bus->pdev;
1335 hwif->gendev.parent = &mdev->ofdev.dev;
1338 pmif->node = mdev->ofdev.node;
1339 pmif->regbase = regbase;
1341 #ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
1342 if (macio_resource_count(mdev) >= 2) {
1343 if (macio_request_resource(mdev, 1, "ide-pmac (dma)"))
1344 printk(KERN_WARNING "ide%d: can't request DMA resource !\n", i);
1346 pmif->dma_regs = (volatile struct dbdma_regs*)
1347 ioremap(macio_resource_start(mdev, 1), 0x1000);
1349 pmif->dma_regs = NULL;
1350 #endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */
1351 dev_set_drvdata(&mdev->ofdev.dev, hwif);
1353 rc = pmac_ide_setup_device(pmif, hwif);
1355 /* The inteface is released to the common IDE layer */
1356 dev_set_drvdata(&mdev->ofdev.dev, NULL);
1357 iounmap((void *)base);
1359 iounmap((void *)pmif->dma_regs);
1360 memset(pmif, 0, sizeof(*pmif));
1361 macio_release_resource(mdev, 0);
1363 macio_release_resource(mdev, 1);
1370 pmac_ide_macio_suspend(struct macio_dev *mdev, u32 state)
1372 ide_hwif_t *hwif = (ide_hwif_t *)dev_get_drvdata(&mdev->ofdev.dev);
1375 if (state != mdev->ofdev.dev.power_state && state >= 2) {
1376 rc = pmac_ide_do_suspend(hwif);
1378 mdev->ofdev.dev.power_state = state;
1385 pmac_ide_macio_resume(struct macio_dev *mdev)
1387 ide_hwif_t *hwif = (ide_hwif_t *)dev_get_drvdata(&mdev->ofdev.dev);
1390 if (mdev->ofdev.dev.power_state != 0) {
1391 rc = pmac_ide_do_resume(hwif);
1393 mdev->ofdev.dev.power_state = 0;
1400 * Attach to a PCI probed interface
1402 static int __devinit
1403 pmac_ide_pci_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1406 struct device_node *np;
1407 pmac_ide_hwif_t *pmif;
1409 unsigned long rbase, rlen;
1412 np = pci_device_to_OF_node(pdev);
1414 printk(KERN_ERR "ide-pmac: cannot find MacIO node for Kauai ATA interface\n");
1418 while (i < MAX_HWIFS && (ide_hwifs[i].io_ports[IDE_DATA_OFFSET] != 0
1419 || pmac_ide[i].node != NULL))
1421 if (i >= MAX_HWIFS) {
1422 printk(KERN_ERR "ide-pmac: PCI interface attach with no slot\n");
1423 printk(KERN_ERR " %s\n", np->full_name);
1427 pmif = &pmac_ide[i];
1428 hwif = &ide_hwifs[i];
1430 if (pci_enable_device(pdev)) {
1431 printk(KERN_WARNING "ide%i: Can't enable PCI device for %s\n",
1435 pci_set_master(pdev);
1437 if (pci_request_regions(pdev, "Kauai ATA")) {
1438 printk(KERN_ERR "ide%d: Cannot obtain PCI resources for %s\n",
1443 hwif->pci_dev = pdev;
1444 hwif->gendev.parent = &pdev->dev;
1448 rbase = pci_resource_start(pdev, 0);
1449 rlen = pci_resource_len(pdev, 0);
1451 base = (unsigned long) ioremap(rbase, rlen);
1452 pmif->regbase = base + 0x2000;
1453 #ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
1454 pmif->dma_regs = (volatile struct dbdma_regs*)(base + 0x1000);
1455 #endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */
1457 /* We use the OF node irq mapping */
1458 if (np->n_intrs == 0)
1459 pmif->irq = pdev->irq;
1461 pmif->irq = np->intrs[0].line;
1463 pci_set_drvdata(pdev, hwif);
1465 rc = pmac_ide_setup_device(pmif, hwif);
1467 /* The inteface is released to the common IDE layer */
1468 pci_set_drvdata(pdev, NULL);
1469 iounmap((void *)base);
1470 memset(pmif, 0, sizeof(*pmif));
1471 pci_release_regions(pdev);
1478 pmac_ide_pci_suspend(struct pci_dev *pdev, u32 state)
1480 ide_hwif_t *hwif = (ide_hwif_t *)pci_get_drvdata(pdev);
1483 if (state != pdev->dev.power_state && state >= 2) {
1484 rc = pmac_ide_do_suspend(hwif);
1486 pdev->dev.power_state = state;
1493 pmac_ide_pci_resume(struct pci_dev *pdev)
1495 ide_hwif_t *hwif = (ide_hwif_t *)pci_get_drvdata(pdev);
1498 if (pdev->dev.power_state != 0) {
1499 rc = pmac_ide_do_resume(hwif);
1501 pdev->dev.power_state = 0;
1507 static struct of_match pmac_ide_macio_match[] =
1511 .type = OF_ANY_MATCH,
1512 .compatible = OF_ANY_MATCH
1516 .type = OF_ANY_MATCH,
1517 .compatible = OF_ANY_MATCH
1520 .name = OF_ANY_MATCH,
1522 .compatible = OF_ANY_MATCH
1525 .name = OF_ANY_MATCH,
1527 .compatible = OF_ANY_MATCH
1532 static struct macio_driver pmac_ide_macio_driver =
1535 .match_table = pmac_ide_macio_match,
1536 .probe = pmac_ide_macio_attach,
1537 .suspend = pmac_ide_macio_suspend,
1538 .resume = pmac_ide_macio_resume,
1541 static struct pci_device_id pmac_ide_pci_match[] = {
1542 { PCI_VENDOR_ID_APPLE, PCI_DEVIEC_ID_APPLE_UNI_N_ATA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
1543 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_IPID_ATA100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
1544 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_K2_ATA100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
1547 static struct pci_driver pmac_ide_pci_driver = {
1549 .id_table = pmac_ide_pci_match,
1550 .probe = pmac_ide_pci_attach,
1551 .suspend = pmac_ide_pci_suspend,
1552 .resume = pmac_ide_pci_resume,
1556 pmac_ide_probe(void)
1558 if (_machine != _MACH_Pmac)
1561 #ifdef CONFIG_BLK_DEV_IDE_PMAC_ATA100FIRST
1562 pci_register_driver(&pmac_ide_pci_driver);
1563 macio_register_driver(&pmac_ide_macio_driver);
1565 macio_register_driver(&pmac_ide_macio_driver);
1566 pci_register_driver(&pmac_ide_pci_driver);
1570 #ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
1573 * This is very close to the generic ide-dma version of the function except
1574 * that we don't use the fields in the hwif but our own copies for sg_table
1575 * and friends. We build & map the sglist for a given request
1578 pmac_ide_build_sglist(ide_drive_t *drive, struct request *rq)
1580 ide_hwif_t *hwif = HWIF(drive);
1581 pmac_ide_hwif_t *pmif = (pmac_ide_hwif_t *)hwif->hwif_data;
1582 struct scatterlist *sg = pmif->sg_table;
1585 if (hwif->sg_dma_active)
1588 nents = blk_rq_map_sg(drive->queue, rq, sg);
1590 if (rq_data_dir(rq) == READ)
1591 pmif->sg_dma_direction = PCI_DMA_FROMDEVICE;
1593 pmif->sg_dma_direction = PCI_DMA_TODEVICE;
1595 return pci_map_sg(hwif->pci_dev, sg, nents, pmif->sg_dma_direction);
1599 * Same as above but for a "raw" taskfile request
1602 pmac_ide_raw_build_sglist(ide_drive_t *drive, struct request *rq)
1604 ide_hwif_t *hwif = HWIF(drive);
1605 pmac_ide_hwif_t *pmif = (pmac_ide_hwif_t *)hwif->hwif_data;
1606 struct scatterlist *sg = pmif->sg_table;
1608 ide_task_t *args = rq->special;
1609 unsigned char *virt_addr = rq->buffer;
1610 int sector_count = rq->nr_sectors;
1612 if (args->command_type == IDE_DRIVE_TASK_RAW_WRITE)
1613 pmif->sg_dma_direction = PCI_DMA_TODEVICE;
1615 pmif->sg_dma_direction = PCI_DMA_FROMDEVICE;
1617 if (sector_count > 128) {
1618 memset(&sg[nents], 0, sizeof(*sg));
1619 sg[nents].page = virt_to_page(virt_addr);
1620 sg[nents].offset = offset_in_page(virt_addr);
1621 sg[nents].length = 128 * SECTOR_SIZE;
1623 virt_addr = virt_addr + (128 * SECTOR_SIZE);
1624 sector_count -= 128;
1626 memset(&sg[nents], 0, sizeof(*sg));
1627 sg[nents].page = virt_to_page(virt_addr);
1628 sg[nents].offset = offset_in_page(virt_addr);
1629 sg[nents].length = sector_count * SECTOR_SIZE;
1632 return pci_map_sg(hwif->pci_dev, sg, nents, pmif->sg_dma_direction);
1636 * pmac_ide_build_dmatable builds the DBDMA command list
1637 * for a transfer and sets the DBDMA channel to point to it.
1640 pmac_ide_build_dmatable(ide_drive_t *drive, struct request *rq)
1642 struct dbdma_cmd *table;
1644 ide_hwif_t *hwif = HWIF(drive);
1645 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)hwif->hwif_data;
1646 volatile struct dbdma_regs *dma = pmif->dma_regs;
1647 struct scatterlist *sg;
1648 int wr = (rq_data_dir(rq) == WRITE);
1650 /* DMA table is already aligned */
1651 table = (struct dbdma_cmd *) pmif->dma_table_cpu;
1653 /* Make sure DMA controller is stopped (necessary ?) */
1654 writel((RUN|PAUSE|FLUSH|WAKE|DEAD) << 16, &dma->control);
1655 while (readl(&dma->status) & RUN)
1659 if (HWGROUP(drive)->rq->flags & REQ_DRIVE_TASKFILE)
1660 pmif->sg_nents = i = pmac_ide_raw_build_sglist(drive, rq);
1662 pmif->sg_nents = i = pmac_ide_build_sglist(drive, rq);
1666 /* Build DBDMA commands list */
1667 sg = pmif->sg_table;
1668 while (i && sg_dma_len(sg)) {
1672 cur_addr = sg_dma_address(sg);
1673 cur_len = sg_dma_len(sg);
1675 if (pmif->broken_dma && cur_addr & (L1_CACHE_BYTES - 1)) {
1676 if (pmif->broken_dma_warn == 0) {
1677 printk(KERN_WARNING "%s: DMA on non aligned address,"
1678 "switching to PIO on Ohare chipset\n", drive->name);
1679 pmif->broken_dma_warn = 1;
1681 goto use_pio_instead;
1684 unsigned int tc = (cur_len < 0xfe00)? cur_len: 0xfe00;
1686 if (count++ >= MAX_DCMDS) {
1687 printk(KERN_WARNING "%s: DMA table too small\n",
1689 goto use_pio_instead;
1691 st_le16(&table->command, wr? OUTPUT_MORE: INPUT_MORE);
1692 st_le16(&table->req_count, tc);
1693 st_le32(&table->phy_addr, cur_addr);
1695 table->xfer_status = 0;
1696 table->res_count = 0;
1705 /* convert the last command to an input/output last command */
1707 st_le16(&table[-1].command, wr? OUTPUT_LAST: INPUT_LAST);
1708 /* add the stop command to the end of the list */
1709 memset(table, 0, sizeof(struct dbdma_cmd));
1710 st_le16(&table->command, DBDMA_STOP);
1712 writel(pmif->dma_table_dma, &dma->cmdptr);
1716 printk(KERN_DEBUG "%s: empty DMA table?\n", drive->name);
1718 pci_unmap_sg(hwif->pci_dev,
1721 pmif->sg_dma_direction);
1722 hwif->sg_dma_active = 0;
1723 return 0; /* revert to PIO for this request */
1726 /* Teardown mappings after DMA has completed. */
1728 pmac_ide_destroy_dmatable (ide_drive_t *drive)
1730 struct pci_dev *dev = HWIF(drive)->pci_dev;
1731 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
1732 struct scatterlist *sg = pmif->sg_table;
1733 int nents = pmif->sg_nents;
1736 pci_unmap_sg(dev, sg, nents, pmif->sg_dma_direction);
1738 HWIF(drive)->sg_dma_active = 0;
1743 * Pick up best MDMA timing for the drive and apply it
1746 pmac_ide_mdma_enable(ide_drive_t *drive, u16 mode)
1748 ide_hwif_t *hwif = HWIF(drive);
1749 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)hwif->hwif_data;
1750 int drive_cycle_time;
1751 struct hd_driveid *id = drive->id;
1752 u32 *timings, *timings2;
1753 u32 timing_local[2];
1756 /* which drive is it ? */
1757 timings = &pmif->timings[drive->select.b.unit & 0x01];
1758 timings2 = &pmif->timings[(drive->select.b.unit & 0x01) + 2];
1760 /* Check if drive provide explicit cycle time */
1761 if ((id->field_valid & 2) && (id->eide_dma_time))
1762 drive_cycle_time = id->eide_dma_time;
1764 drive_cycle_time = 0;
1766 /* Copy timings to local image */
1767 timing_local[0] = *timings;
1768 timing_local[1] = *timings2;
1770 /* Calculate controller timings */
1771 ret = set_timings_mdma( drive, pmif->kind,
1779 /* Set feature on drive */
1780 printk(KERN_INFO "%s: Enabling MultiWord DMA %d\n", drive->name, mode & 0xf);
1781 ret = pmac_ide_do_setfeature(drive, mode);
1783 printk(KERN_WARNING "%s: Failed !\n", drive->name);
1787 /* Apply timings to controller */
1788 *timings = timing_local[0];
1789 *timings2 = timing_local[1];
1791 /* Set speed info in drive */
1792 drive->current_speed = mode;
1793 if (!drive->init_speed)
1794 drive->init_speed = mode;
1800 * Pick up best UDMA timing for the drive and apply it
1803 pmac_ide_udma_enable(ide_drive_t *drive, u16 mode)
1805 ide_hwif_t *hwif = HWIF(drive);
1806 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)hwif->hwif_data;
1807 u32 *timings, *timings2;
1808 u32 timing_local[2];
1811 /* which drive is it ? */
1812 timings = &pmif->timings[drive->select.b.unit & 0x01];
1813 timings2 = &pmif->timings[(drive->select.b.unit & 0x01) + 2];
1815 /* Copy timings to local image */
1816 timing_local[0] = *timings;
1817 timing_local[1] = *timings2;
1819 /* Calculate timings for interface */
1820 if (pmif->kind == controller_un_ata6 || pmif->kind == controller_k2_ata6)
1821 ret = set_timings_udma_ata6( &timing_local[0],
1825 ret = set_timings_udma_ata4(&timing_local[0], mode);
1829 /* Set feature on drive */
1830 printk(KERN_INFO "%s: Enabling Ultra DMA %d\n", drive->name, mode & 0x0f);
1831 ret = pmac_ide_do_setfeature(drive, mode);
1833 printk(KERN_WARNING "%s: Failed !\n", drive->name);
1837 /* Apply timings to controller */
1838 *timings = timing_local[0];
1839 *timings2 = timing_local[1];
1841 /* Set speed info in drive */
1842 drive->current_speed = mode;
1843 if (!drive->init_speed)
1844 drive->init_speed = mode;
1850 * Check what is the best DMA timing setting for the drive and
1851 * call appropriate functions to apply it.
1854 pmac_ide_dma_check(ide_drive_t *drive)
1856 struct hd_driveid *id = drive->id;
1857 ide_hwif_t *hwif = HWIF(drive);
1858 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)hwif->hwif_data;
1861 drive->using_dma = 0;
1863 if (drive->media == ide_floppy)
1865 if (((id->capability & 1) == 0) && !__ide_dma_good_drive(drive))
1867 if (__ide_dma_bad_drive(drive))
1874 if (pmif->kind == controller_kl_ata4 || pmif->kind == controller_un_ata6
1875 || pmif->kind == controller_k2_ata6) {
1877 if (pmif->cable_80) {
1878 map |= XFER_UDMA_66;
1879 if (pmif->kind == controller_un_ata6 ||
1880 pmif->kind == controller_k2_ata6)
1881 map |= XFER_UDMA_100;
1884 mode = ide_find_best_mode(drive, map);
1885 if (mode & XFER_UDMA)
1886 drive->using_dma = pmac_ide_udma_enable(drive, mode);
1887 else if (mode & XFER_MWDMA)
1888 drive->using_dma = pmac_ide_mdma_enable(drive, mode);
1889 hwif->OUTB(0, IDE_CONTROL_REG);
1890 /* Apply settings to controller */
1891 pmac_ide_do_update_timings(drive);
1897 * Prepare a DMA transfer. We build the DMA table, adjust the timings for
1898 * a read on KeyLargo ATA/66 and mark us as waiting for DMA completion
1901 pmac_ide_dma_start(ide_drive_t *drive, int reading)
1903 ide_hwif_t *hwif = HWIF(drive);
1904 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)hwif->hwif_data;
1905 struct request *rq = HWGROUP(drive)->rq;
1906 u8 unit = (drive->select.b.unit & 0x01);
1911 ata4 = (pmif->kind == controller_kl_ata4);
1913 if (!pmac_ide_build_dmatable(drive, rq))
1916 /* Apple adds 60ns to wrDataSetup on reads */
1917 if (ata4 && (pmif->timings[unit] & TR_66_UDMA_EN)) {
1918 writel(pmif->timings[unit] + (reading ? 0x00800000UL : 0),
1919 (unsigned *)(IDE_DATA_REG+IDE_TIMING_CONFIG));
1920 (void)readl((unsigned *)(IDE_DATA_REG + IDE_TIMING_CONFIG));
1923 drive->waiting_for_dma = 1;
1929 * Start a DMA READ command
1932 pmac_ide_dma_read(ide_drive_t *drive)
1934 struct request *rq = HWGROUP(drive)->rq;
1935 u8 lba48 = (drive->addressing == 1) ? 1 : 0;
1936 task_ioreg_t command = WIN_NOP;
1938 if (pmac_ide_dma_start(drive, 1))
1941 if (drive->media != ide_disk)
1944 command = (lba48) ? WIN_READDMA_EXT : WIN_READDMA;
1947 command = (lba48) ? WIN_READ_EXT: WIN_READ;
1949 if (rq->flags & REQ_DRIVE_TASKFILE) {
1950 ide_task_t *args = rq->special;
1951 command = args->tfRegister[IDE_COMMAND_OFFSET];
1954 /* issue cmd to drive */
1955 ide_execute_command(drive, command, &ide_dma_intr, 2*WAIT_CMD, NULL);
1957 return pmac_ide_dma_begin(drive);
1961 * Start a DMA WRITE command
1964 pmac_ide_dma_write (ide_drive_t *drive)
1966 struct request *rq = HWGROUP(drive)->rq;
1967 u8 lba48 = (drive->addressing == 1) ? 1 : 0;
1968 task_ioreg_t command = WIN_NOP;
1970 if (pmac_ide_dma_start(drive, 0))
1973 if (drive->media != ide_disk)
1976 command = (lba48) ? WIN_WRITEDMA_EXT : WIN_WRITEDMA;
1978 command = (lba48) ? WIN_WRITE_EXT: WIN_WRITE;
1980 if (rq->flags & REQ_DRIVE_TASKFILE) {
1981 ide_task_t *args = rq->special;
1982 command = args->tfRegister[IDE_COMMAND_OFFSET];
1985 /* issue cmd to drive */
1986 ide_execute_command(drive, command, &ide_dma_intr, 2*WAIT_CMD, NULL);
1988 return pmac_ide_dma_begin(drive);
1992 * Kick the DMA controller into life after the DMA command has been issued
1996 pmac_ide_dma_begin (ide_drive_t *drive)
1998 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
1999 volatile struct dbdma_regs *dma;
2003 dma = pmif->dma_regs;
2005 writel((RUN << 16) | RUN, &dma->control);
2006 /* Make sure it gets to the controller right now */
2007 (void)readl(&dma->control);
2012 * After a DMA transfer, make sure the controller is stopped
2015 pmac_ide_dma_end (ide_drive_t *drive)
2017 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
2018 volatile struct dbdma_regs *dma;
2023 dma = pmif->dma_regs;
2025 drive->waiting_for_dma = 0;
2026 dstat = readl(&dma->status);
2027 writel(((RUN|WAKE|DEAD) << 16), &dma->control);
2028 pmac_ide_destroy_dmatable(drive);
2029 /* verify good dma status. we don't check for ACTIVE beeing 0. We should...
2030 * in theory, but with ATAPI decices doing buffer underruns, that would
2031 * cause us to disable DMA, which isn't what we want
2033 return (dstat & (RUN|DEAD)) != RUN;
2037 * Check out that the interrupt we got was for us. We can't always know this
2038 * for sure with those Apple interfaces (well, we could on the recent ones but
2039 * that's not implemented yet), on the other hand, we don't have shared interrupts
2040 * so it's not really a problem
2043 pmac_ide_dma_test_irq (ide_drive_t *drive)
2045 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
2046 volatile struct dbdma_regs *dma;
2047 unsigned long status, timeout;
2051 dma = pmif->dma_regs;
2053 /* We have to things to deal with here:
2055 * - The dbdma won't stop if the command was started
2056 * but completed with an error without transferring all
2057 * datas. This happens when bad blocks are met during
2058 * a multi-block transfer.
2060 * - The dbdma fifo hasn't yet finished flushing to
2061 * to system memory when the disk interrupt occurs.
2065 /* If ACTIVE is cleared, the STOP command have passed and
2066 * transfer is complete.
2068 status = readl(&dma->status);
2069 if (!(status & ACTIVE))
2071 if (!drive->waiting_for_dma)
2072 printk(KERN_WARNING "ide%d, ide_dma_test_irq \
2073 called while not waiting\n", HWIF(drive)->index);
2075 /* If dbdma didn't execute the STOP command yet, the
2076 * active bit is still set. We consider that we aren't
2077 * sharing interrupts (which is hopefully the case with
2078 * those controllers) and so we just try to flush the
2079 * channel for pending data in the fifo
2082 writel((FLUSH << 16) | FLUSH, &dma->control);
2086 status = readl(&dma->status);
2087 if ((status & FLUSH) == 0)
2089 if (++timeout > 100) {
2090 printk(KERN_WARNING "ide%d, ide_dma_test_irq \
2091 timeout flushing channel\n", HWIF(drive)->index);
2099 pmac_ide_dma_host_off (ide_drive_t *drive)
2105 pmac_ide_dma_host_on (ide_drive_t *drive)
2111 pmac_ide_dma_lostirq (ide_drive_t *drive)
2113 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
2114 volatile struct dbdma_regs *dma;
2115 unsigned long status;
2119 dma = pmif->dma_regs;
2121 status = readl(&dma->status);
2122 printk(KERN_ERR "ide-pmac lost interrupt, dma status: %lx\n", status);
2127 * Allocate the data structures needed for using DMA with an interface
2128 * and fill the proper list of functions pointers
2131 pmac_ide_setup_dma(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif)
2133 /* We won't need pci_dev if we switch to generic consistent
2136 if (hwif->pci_dev == NULL)
2139 * Allocate space for the DBDMA commands.
2140 * The +2 is +1 for the stop command and +1 to allow for
2141 * aligning the start address to a multiple of 16 bytes.
2143 pmif->dma_table_cpu = (struct dbdma_cmd*)pci_alloc_consistent(
2145 (MAX_DCMDS + 2) * sizeof(struct dbdma_cmd),
2146 &pmif->dma_table_dma);
2147 if (pmif->dma_table_cpu == NULL) {
2148 printk(KERN_ERR "%s: unable to allocate DMA command list\n",
2153 pmif->sg_table = kmalloc(sizeof(struct scatterlist) * MAX_DCMDS,
2155 if (pmif->sg_table == NULL) {
2156 pci_free_consistent( hwif->pci_dev,
2157 (MAX_DCMDS + 2) * sizeof(struct dbdma_cmd),
2158 pmif->dma_table_cpu, pmif->dma_table_dma);
2161 hwif->ide_dma_off_quietly = &__ide_dma_off_quietly;
2162 hwif->ide_dma_on = &__ide_dma_on;
2163 hwif->ide_dma_check = &pmac_ide_dma_check;
2164 hwif->ide_dma_read = &pmac_ide_dma_read;
2165 hwif->ide_dma_write = &pmac_ide_dma_write;
2166 hwif->ide_dma_begin = &pmac_ide_dma_begin;
2167 hwif->ide_dma_end = &pmac_ide_dma_end;
2168 hwif->ide_dma_test_irq = &pmac_ide_dma_test_irq;
2169 hwif->ide_dma_host_off = &pmac_ide_dma_host_off;
2170 hwif->ide_dma_host_on = &pmac_ide_dma_host_on;
2171 hwif->ide_dma_verbose = &__ide_dma_verbose;
2172 hwif->ide_dma_timeout = &__ide_dma_timeout;
2173 hwif->ide_dma_lostirq = &pmac_ide_dma_lostirq;
2175 #ifdef CONFIG_BLK_DEV_IDEDMA_PMAC_AUTO
2179 hwif->drives[0].autodma = hwif->autodma;
2180 hwif->drives[1].autodma = hwif->autodma;
2182 hwif->atapi_dma = 1;
2183 switch(pmif->kind) {
2184 case controller_un_ata6:
2185 case controller_k2_ata6:
2186 hwif->ultra_mask = pmif->cable_80 ? 0x3f : 0x07;
2187 hwif->mwdma_mask = 0x07;
2188 hwif->swdma_mask = 0x00;
2190 case controller_kl_ata4:
2191 hwif->ultra_mask = pmif->cable_80 ? 0x1f : 0x07;
2192 hwif->mwdma_mask = 0x07;
2193 hwif->swdma_mask = 0x00;
2196 hwif->ultra_mask = 0x00;
2197 hwif->mwdma_mask = 0x07;
2198 hwif->swdma_mask = 0x00;
2203 #endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */