2 * linux/drivers/ide/ide-iops.c Version 0.37 Mar 05, 2003
4 * Copyright (C) 2000-2002 Andre Hedrick <andre@linux-ide.org>
5 * Copyright (C) 2003 Red Hat <alan@redhat.com>
9 #include <linux/config.h>
10 #include <linux/module.h>
11 #include <linux/types.h>
12 #include <linux/string.h>
13 #include <linux/kernel.h>
14 #include <linux/timer.h>
16 #include <linux/interrupt.h>
17 #include <linux/major.h>
18 #include <linux/errno.h>
19 #include <linux/genhd.h>
20 #include <linux/blkpg.h>
21 #include <linux/slab.h>
22 #include <linux/pci.h>
23 #include <linux/delay.h>
24 #include <linux/hdreg.h>
25 #include <linux/ide.h>
26 #include <linux/bitops.h>
28 #include <asm/byteorder.h>
30 #include <asm/uaccess.h>
34 * Conventional PIO operations for ATA devices
37 static u8 ide_inb (unsigned long port)
39 return (u8) inb(port);
42 static u16 ide_inw (unsigned long port)
44 return (u16) inw(port);
47 static void ide_insw (unsigned long port, void *addr, u32 count)
49 insw(port, addr, count);
52 static u32 ide_inl (unsigned long port)
54 return (u32) inl(port);
57 static void ide_insl (unsigned long port, void *addr, u32 count)
59 insl(port, addr, count);
62 static void ide_outb (u8 val, unsigned long port)
67 static void ide_outbsync (ide_drive_t *drive, u8 addr, unsigned long port)
72 static void ide_outw (u16 val, unsigned long port)
77 static void ide_outsw (unsigned long port, void *addr, u32 count)
79 outsw(port, addr, count);
82 static void ide_outl (u32 val, unsigned long port)
87 static void ide_outsl (unsigned long port, void *addr, u32 count)
89 outsl(port, addr, count);
92 void default_hwif_iops (ide_hwif_t *hwif)
94 hwif->OUTB = ide_outb;
95 hwif->OUTBSYNC = ide_outbsync;
96 hwif->OUTW = ide_outw;
97 hwif->OUTL = ide_outl;
98 hwif->OUTSW = ide_outsw;
99 hwif->OUTSL = ide_outsl;
103 hwif->INSW = ide_insw;
104 hwif->INSL = ide_insl;
107 EXPORT_SYMBOL(default_hwif_iops);
113 static u8 ide_no_inb(unsigned long port)
118 static u16 ide_no_inw (unsigned long port)
123 static void ide_no_insw (unsigned long port, void *addr, u32 count)
127 static u32 ide_no_inl (unsigned long port)
132 static void ide_no_insl (unsigned long port, void *addr, u32 count)
136 static void ide_no_outb (u8 val, unsigned long port)
140 static void ide_no_outbsync (ide_drive_t *drive, u8 addr, unsigned long port)
144 static void ide_no_outw (u16 val, unsigned long port)
148 static void ide_no_outsw (unsigned long port, void *addr, u32 count)
152 static void ide_no_outl (u32 val, unsigned long port)
156 static void ide_no_outsl (unsigned long port, void *addr, u32 count)
160 void removed_hwif_iops (ide_hwif_t *hwif)
162 hwif->OUTB = ide_no_outb;
163 hwif->OUTBSYNC = ide_no_outbsync;
164 hwif->OUTW = ide_no_outw;
165 hwif->OUTL = ide_no_outl;
166 hwif->OUTSW = ide_no_outsw;
167 hwif->OUTSL = ide_no_outsl;
168 hwif->INB = ide_no_inb;
169 hwif->INW = ide_no_inw;
170 hwif->INL = ide_no_inl;
171 hwif->INSW = ide_no_insw;
172 hwif->INSL = ide_no_insl;
175 EXPORT_SYMBOL(removed_hwif_iops);
178 * MMIO operations, typically used for SATA controllers
181 static u8 ide_mm_inb (unsigned long port)
183 return (u8) readb((void __iomem *) port);
186 static u16 ide_mm_inw (unsigned long port)
188 return (u16) readw((void __iomem *) port);
191 static void ide_mm_insw (unsigned long port, void *addr, u32 count)
193 __ide_mm_insw((void __iomem *) port, addr, count);
196 static u32 ide_mm_inl (unsigned long port)
198 return (u32) readl((void __iomem *) port);
201 static void ide_mm_insl (unsigned long port, void *addr, u32 count)
203 __ide_mm_insl((void __iomem *) port, addr, count);
206 static void ide_mm_outb (u8 value, unsigned long port)
208 writeb(value, (void __iomem *) port);
211 static void ide_mm_outbsync (ide_drive_t *drive, u8 value, unsigned long port)
213 writeb(value, (void __iomem *) port);
216 static void ide_mm_outw (u16 value, unsigned long port)
218 writew(value, (void __iomem *) port);
221 static void ide_mm_outsw (unsigned long port, void *addr, u32 count)
223 __ide_mm_outsw((void __iomem *) port, addr, count);
226 static void ide_mm_outl (u32 value, unsigned long port)
228 writel(value, (void __iomem *) port);
231 static void ide_mm_outsl (unsigned long port, void *addr, u32 count)
233 __ide_mm_outsl((void __iomem *) port, addr, count);
236 void default_hwif_mmiops (ide_hwif_t *hwif)
238 hwif->OUTB = ide_mm_outb;
239 /* Most systems will need to override OUTBSYNC, alas however
240 this one is controller specific! */
241 hwif->OUTBSYNC = ide_mm_outbsync;
242 hwif->OUTW = ide_mm_outw;
243 hwif->OUTL = ide_mm_outl;
244 hwif->OUTSW = ide_mm_outsw;
245 hwif->OUTSL = ide_mm_outsl;
246 hwif->INB = ide_mm_inb;
247 hwif->INW = ide_mm_inw;
248 hwif->INL = ide_mm_inl;
249 hwif->INSW = ide_mm_insw;
250 hwif->INSL = ide_mm_insl;
253 EXPORT_SYMBOL(default_hwif_mmiops);
255 void default_hwif_transport (ide_hwif_t *hwif)
257 hwif->ata_input_data = ata_input_data;
258 hwif->ata_output_data = ata_output_data;
259 hwif->atapi_input_bytes = atapi_input_bytes;
260 hwif->atapi_output_bytes = atapi_output_bytes;
263 EXPORT_SYMBOL(default_hwif_transport);
265 u32 ide_read_24 (ide_drive_t *drive)
267 u8 hcyl = HWIF(drive)->INB(IDE_HCYL_REG);
268 u8 lcyl = HWIF(drive)->INB(IDE_LCYL_REG);
269 u8 sect = HWIF(drive)->INB(IDE_SECTOR_REG);
270 return (hcyl<<16)|(lcyl<<8)|sect;
273 EXPORT_SYMBOL(ide_read_24);
275 void SELECT_DRIVE (ide_drive_t *drive)
277 if (HWIF(drive)->selectproc)
278 HWIF(drive)->selectproc(drive);
279 HWIF(drive)->OUTB(drive->select.all, IDE_SELECT_REG);
282 EXPORT_SYMBOL(SELECT_DRIVE);
284 void SELECT_INTERRUPT (ide_drive_t *drive)
286 if (HWIF(drive)->intrproc)
287 HWIF(drive)->intrproc(drive);
289 HWIF(drive)->OUTB(drive->ctl|2, IDE_CONTROL_REG);
292 void SELECT_MASK (ide_drive_t *drive, int mask)
294 if (HWIF(drive)->maskproc)
295 HWIF(drive)->maskproc(drive, mask);
298 void QUIRK_LIST (ide_drive_t *drive)
300 if (HWIF(drive)->quirkproc)
301 drive->quirk_list = HWIF(drive)->quirkproc(drive);
305 * Some localbus EIDE interfaces require a special access sequence
306 * when using 32-bit I/O instructions to transfer data. We call this
307 * the "vlb_sync" sequence, which consists of three successive reads
308 * of the sector count register location, with interrupts disabled
309 * to ensure that the reads all happen together.
311 void ata_vlb_sync (ide_drive_t *drive, unsigned long port)
313 (void) HWIF(drive)->INB(port);
314 (void) HWIF(drive)->INB(port);
315 (void) HWIF(drive)->INB(port);
319 * This is used for most PIO data transfers *from* the IDE interface
321 void ata_input_data (ide_drive_t *drive, void *buffer, u32 wcount)
323 ide_hwif_t *hwif = HWIF(drive);
324 u8 io_32bit = drive->io_32bit;
329 local_irq_save(flags);
330 ata_vlb_sync(drive, IDE_NSECTOR_REG);
331 hwif->INSL(IDE_DATA_REG, buffer, wcount);
332 local_irq_restore(flags);
334 hwif->INSL(IDE_DATA_REG, buffer, wcount);
336 hwif->INSW(IDE_DATA_REG, buffer, wcount<<1);
341 * This is used for most PIO data transfers *to* the IDE interface
343 void ata_output_data (ide_drive_t *drive, void *buffer, u32 wcount)
345 ide_hwif_t *hwif = HWIF(drive);
346 u8 io_32bit = drive->io_32bit;
351 local_irq_save(flags);
352 ata_vlb_sync(drive, IDE_NSECTOR_REG);
353 hwif->OUTSL(IDE_DATA_REG, buffer, wcount);
354 local_irq_restore(flags);
356 hwif->OUTSL(IDE_DATA_REG, buffer, wcount);
358 hwif->OUTSW(IDE_DATA_REG, buffer, wcount<<1);
363 * The following routines are mainly used by the ATAPI drivers.
365 * These routines will round up any request for an odd number of bytes,
366 * so if an odd bytecount is specified, be sure that there's at least one
367 * extra byte allocated for the buffer.
370 void atapi_input_bytes (ide_drive_t *drive, void *buffer, u32 bytecount)
372 ide_hwif_t *hwif = HWIF(drive);
375 #if defined(CONFIG_ATARI) || defined(CONFIG_Q40)
376 if (MACH_IS_ATARI || MACH_IS_Q40) {
377 /* Atari has a byte-swapped IDE interface */
378 insw_swapw(IDE_DATA_REG, buffer, bytecount / 2);
381 #endif /* CONFIG_ATARI || CONFIG_Q40 */
382 hwif->ata_input_data(drive, buffer, bytecount / 4);
383 if ((bytecount & 0x03) >= 2)
384 hwif->INSW(IDE_DATA_REG, ((u8 *)buffer)+(bytecount & ~0x03), 1);
387 EXPORT_SYMBOL(atapi_input_bytes);
389 void atapi_output_bytes (ide_drive_t *drive, void *buffer, u32 bytecount)
391 ide_hwif_t *hwif = HWIF(drive);
394 #if defined(CONFIG_ATARI) || defined(CONFIG_Q40)
395 if (MACH_IS_ATARI || MACH_IS_Q40) {
396 /* Atari has a byte-swapped IDE interface */
397 outsw_swapw(IDE_DATA_REG, buffer, bytecount / 2);
400 #endif /* CONFIG_ATARI || CONFIG_Q40 */
401 hwif->ata_output_data(drive, buffer, bytecount / 4);
402 if ((bytecount & 0x03) >= 2)
403 hwif->OUTSW(IDE_DATA_REG, ((u8*)buffer)+(bytecount & ~0x03), 1);
406 EXPORT_SYMBOL(atapi_output_bytes);
409 * Beginning of Taskfile OPCODE Library and feature sets.
411 void ide_fix_driveid (struct hd_driveid *id)
413 #ifndef __LITTLE_ENDIAN
418 id->config = __le16_to_cpu(id->config);
419 id->cyls = __le16_to_cpu(id->cyls);
420 id->reserved2 = __le16_to_cpu(id->reserved2);
421 id->heads = __le16_to_cpu(id->heads);
422 id->track_bytes = __le16_to_cpu(id->track_bytes);
423 id->sector_bytes = __le16_to_cpu(id->sector_bytes);
424 id->sectors = __le16_to_cpu(id->sectors);
425 id->vendor0 = __le16_to_cpu(id->vendor0);
426 id->vendor1 = __le16_to_cpu(id->vendor1);
427 id->vendor2 = __le16_to_cpu(id->vendor2);
428 stringcast = (u16 *)&id->serial_no[0];
429 for (i = 0; i < (20/2); i++)
430 stringcast[i] = __le16_to_cpu(stringcast[i]);
431 id->buf_type = __le16_to_cpu(id->buf_type);
432 id->buf_size = __le16_to_cpu(id->buf_size);
433 id->ecc_bytes = __le16_to_cpu(id->ecc_bytes);
434 stringcast = (u16 *)&id->fw_rev[0];
435 for (i = 0; i < (8/2); i++)
436 stringcast[i] = __le16_to_cpu(stringcast[i]);
437 stringcast = (u16 *)&id->model[0];
438 for (i = 0; i < (40/2); i++)
439 stringcast[i] = __le16_to_cpu(stringcast[i]);
440 id->dword_io = __le16_to_cpu(id->dword_io);
441 id->reserved50 = __le16_to_cpu(id->reserved50);
442 id->field_valid = __le16_to_cpu(id->field_valid);
443 id->cur_cyls = __le16_to_cpu(id->cur_cyls);
444 id->cur_heads = __le16_to_cpu(id->cur_heads);
445 id->cur_sectors = __le16_to_cpu(id->cur_sectors);
446 id->cur_capacity0 = __le16_to_cpu(id->cur_capacity0);
447 id->cur_capacity1 = __le16_to_cpu(id->cur_capacity1);
448 id->lba_capacity = __le32_to_cpu(id->lba_capacity);
449 id->dma_1word = __le16_to_cpu(id->dma_1word);
450 id->dma_mword = __le16_to_cpu(id->dma_mword);
451 id->eide_pio_modes = __le16_to_cpu(id->eide_pio_modes);
452 id->eide_dma_min = __le16_to_cpu(id->eide_dma_min);
453 id->eide_dma_time = __le16_to_cpu(id->eide_dma_time);
454 id->eide_pio = __le16_to_cpu(id->eide_pio);
455 id->eide_pio_iordy = __le16_to_cpu(id->eide_pio_iordy);
456 for (i = 0; i < 2; ++i)
457 id->words69_70[i] = __le16_to_cpu(id->words69_70[i]);
458 for (i = 0; i < 4; ++i)
459 id->words71_74[i] = __le16_to_cpu(id->words71_74[i]);
460 id->queue_depth = __le16_to_cpu(id->queue_depth);
461 for (i = 0; i < 4; ++i)
462 id->words76_79[i] = __le16_to_cpu(id->words76_79[i]);
463 id->major_rev_num = __le16_to_cpu(id->major_rev_num);
464 id->minor_rev_num = __le16_to_cpu(id->minor_rev_num);
465 id->command_set_1 = __le16_to_cpu(id->command_set_1);
466 id->command_set_2 = __le16_to_cpu(id->command_set_2);
467 id->cfsse = __le16_to_cpu(id->cfsse);
468 id->cfs_enable_1 = __le16_to_cpu(id->cfs_enable_1);
469 id->cfs_enable_2 = __le16_to_cpu(id->cfs_enable_2);
470 id->csf_default = __le16_to_cpu(id->csf_default);
471 id->dma_ultra = __le16_to_cpu(id->dma_ultra);
472 id->trseuc = __le16_to_cpu(id->trseuc);
473 id->trsEuc = __le16_to_cpu(id->trsEuc);
474 id->CurAPMvalues = __le16_to_cpu(id->CurAPMvalues);
475 id->mprc = __le16_to_cpu(id->mprc);
476 id->hw_config = __le16_to_cpu(id->hw_config);
477 id->acoustic = __le16_to_cpu(id->acoustic);
478 id->msrqs = __le16_to_cpu(id->msrqs);
479 id->sxfert = __le16_to_cpu(id->sxfert);
480 id->sal = __le16_to_cpu(id->sal);
481 id->spg = __le32_to_cpu(id->spg);
482 id->lba_capacity_2 = __le64_to_cpu(id->lba_capacity_2);
483 for (i = 0; i < 22; i++)
484 id->words104_125[i] = __le16_to_cpu(id->words104_125[i]);
485 id->last_lun = __le16_to_cpu(id->last_lun);
486 id->word127 = __le16_to_cpu(id->word127);
487 id->dlf = __le16_to_cpu(id->dlf);
488 id->csfo = __le16_to_cpu(id->csfo);
489 for (i = 0; i < 26; i++)
490 id->words130_155[i] = __le16_to_cpu(id->words130_155[i]);
491 id->word156 = __le16_to_cpu(id->word156);
492 for (i = 0; i < 3; i++)
493 id->words157_159[i] = __le16_to_cpu(id->words157_159[i]);
494 id->cfa_power = __le16_to_cpu(id->cfa_power);
495 for (i = 0; i < 14; i++)
496 id->words161_175[i] = __le16_to_cpu(id->words161_175[i]);
497 for (i = 0; i < 31; i++)
498 id->words176_205[i] = __le16_to_cpu(id->words176_205[i]);
499 for (i = 0; i < 48; i++)
500 id->words206_254[i] = __le16_to_cpu(id->words206_254[i]);
501 id->integrity_word = __le16_to_cpu(id->integrity_word);
503 # error "Please fix <asm/byteorder.h>"
508 EXPORT_SYMBOL(ide_fix_driveid);
510 void ide_fixstring (u8 *s, const int bytecount, const int byteswap)
512 u8 *p = s, *end = &s[bytecount & ~1]; /* bytecount must be even */
515 /* convert from big-endian to host byte order */
516 for (p = end ; p != s;) {
517 unsigned short *pp = (unsigned short *) (p -= 2);
521 /* strip leading blanks */
522 while (s != end && *s == ' ')
524 /* compress internal blanks and strip trailing blanks */
525 while (s != end && *s) {
526 if (*s++ != ' ' || (s != end && *s && *s != ' '))
529 /* wipe out trailing garbage */
534 EXPORT_SYMBOL(ide_fixstring);
537 * Needed for PCI irq sharing
539 int drive_is_ready (ide_drive_t *drive)
541 ide_hwif_t *hwif = HWIF(drive);
544 if (drive->waiting_for_dma)
545 return hwif->ide_dma_test_irq(drive);
548 /* need to guarantee 400ns since last command was issued */
552 #ifdef CONFIG_IDEPCI_SHARE_IRQ
554 * We do a passive status test under shared PCI interrupts on
555 * cards that truly share the ATA side interrupt, but may also share
556 * an interrupt with another pci card/device. We make no assumptions
557 * about possible isa-pnp and pci-pnp issues yet.
560 stat = hwif->INB(IDE_ALTSTATUS_REG);
562 #endif /* CONFIG_IDEPCI_SHARE_IRQ */
563 /* Note: this may clear a pending IRQ!! */
564 stat = hwif->INB(IDE_STATUS_REG);
566 if (stat & BUSY_STAT)
567 /* drive busy: definitely not interrupting */
570 /* drive ready: *might* be interrupting */
574 EXPORT_SYMBOL(drive_is_ready);
577 * Global for All, and taken from ide-pmac.c. Can be called
578 * with spinlock held & IRQs disabled, so don't schedule !
580 int wait_for_ready (ide_drive_t *drive, int timeout)
582 ide_hwif_t *hwif = HWIF(drive);
586 stat = hwif->INB(IDE_STATUS_REG);
587 if (!(stat & BUSY_STAT)) {
588 if (drive->ready_stat == 0)
590 else if ((stat & drive->ready_stat)||(stat & ERR_STAT))
595 if ((stat & ERR_STAT) || timeout <= 0) {
596 if (stat & ERR_STAT) {
597 printk(KERN_ERR "%s: wait_for_ready, "
598 "error status: %x\n", drive->name, stat);
605 EXPORT_SYMBOL(wait_for_ready);
608 * This routine busy-waits for the drive status to be not "busy".
609 * It then checks the status for all of the "good" bits and none
610 * of the "bad" bits, and if all is okay it returns 0. All other
611 * cases return 1 after invoking ide_error() -- caller should just return.
613 * This routine should get fixed to not hog the cpu during extra long waits..
614 * That could be done by busy-waiting for the first jiffy or two, and then
615 * setting a timer to wake up at half second intervals thereafter,
616 * until timeout is achieved, before timing out.
618 int ide_wait_stat (ide_startstop_t *startstop, ide_drive_t *drive, u8 good, u8 bad, unsigned long timeout)
620 ide_hwif_t *hwif = HWIF(drive);
625 /* bail early if we've exceeded max_failures */
626 if (drive->max_failures && (drive->failures > drive->max_failures)) {
627 *startstop = ide_stopped;
631 udelay(1); /* spec allows drive 400ns to assert "BUSY" */
632 if ((stat = hwif->INB(IDE_STATUS_REG)) & BUSY_STAT) {
633 local_irq_set(flags);
635 while ((stat = hwif->INB(IDE_STATUS_REG)) & BUSY_STAT) {
636 if (time_after(jiffies, timeout)) {
638 * One last read after the timeout in case
639 * heavy interrupt load made us not make any
640 * progress during the timeout..
642 stat = hwif->INB(IDE_STATUS_REG);
643 if (!(stat & BUSY_STAT))
646 local_irq_restore(flags);
647 *startstop = DRIVER(drive)->error(drive, "status timeout", stat);
651 local_irq_restore(flags);
654 * Allow status to settle, then read it again.
655 * A few rare drives vastly violate the 400ns spec here,
656 * so we'll wait up to 10usec for a "good" status
657 * rather than expensively fail things immediately.
658 * This fix courtesy of Matthew Faupel & Niccolo Rigacci.
660 for (i = 0; i < 10; i++) {
662 if (OK_STAT((stat = hwif->INB(IDE_STATUS_REG)), good, bad))
665 *startstop = DRIVER(drive)->error(drive, "status error", stat);
669 EXPORT_SYMBOL(ide_wait_stat);
672 * All hosts that use the 80c ribbon must use!
673 * The name is derived from upper byte of word 93 and the 80c ribbon.
675 u8 eighty_ninty_three (ide_drive_t *drive)
677 if(HWIF(drive)->udma_four == 0)
679 if (!(drive->id->hw_config & 0x6000))
681 #ifndef CONFIG_IDEDMA_IVB
682 if(!(drive->id->hw_config & 0x4000))
684 #endif /* CONFIG_IDEDMA_IVB */
688 EXPORT_SYMBOL(eighty_ninty_three);
690 int ide_ata66_check (ide_drive_t *drive, ide_task_t *args)
692 if ((args->tfRegister[IDE_COMMAND_OFFSET] == WIN_SETFEATURES) &&
693 (args->tfRegister[IDE_SECTOR_OFFSET] > XFER_UDMA_2) &&
694 (args->tfRegister[IDE_FEATURE_OFFSET] == SETFEATURES_XFER)) {
695 #ifndef CONFIG_IDEDMA_IVB
696 if ((drive->id->hw_config & 0x6000) == 0) {
697 #else /* !CONFIG_IDEDMA_IVB */
698 if (((drive->id->hw_config & 0x2000) == 0) ||
699 ((drive->id->hw_config & 0x4000) == 0)) {
700 #endif /* CONFIG_IDEDMA_IVB */
701 printk("%s: Speed warnings UDMA 3/4/5 is not "
702 "functional.\n", drive->name);
705 if (!HWIF(drive)->udma_four) {
706 printk("%s: Speed warnings UDMA 3/4/5 is not "
716 * Backside of HDIO_DRIVE_CMD call of SETFEATURES_XFER.
717 * 1 : Safe to update drive->id DMA registers.
718 * 0 : OOPs not allowed.
720 int set_transfer (ide_drive_t *drive, ide_task_t *args)
722 if ((args->tfRegister[IDE_COMMAND_OFFSET] == WIN_SETFEATURES) &&
723 (args->tfRegister[IDE_SECTOR_OFFSET] >= XFER_SW_DMA_0) &&
724 (args->tfRegister[IDE_FEATURE_OFFSET] == SETFEATURES_XFER) &&
725 (drive->id->dma_ultra ||
726 drive->id->dma_mword ||
727 drive->id->dma_1word))
733 #ifdef CONFIG_BLK_DEV_IDEDMA
734 static u8 ide_auto_reduce_xfer (ide_drive_t *drive)
736 if (!drive->crc_count)
737 return drive->current_speed;
738 drive->crc_count = 0;
740 switch(drive->current_speed) {
741 case XFER_UDMA_7: return XFER_UDMA_6;
742 case XFER_UDMA_6: return XFER_UDMA_5;
743 case XFER_UDMA_5: return XFER_UDMA_4;
744 case XFER_UDMA_4: return XFER_UDMA_3;
745 case XFER_UDMA_3: return XFER_UDMA_2;
746 case XFER_UDMA_2: return XFER_UDMA_1;
747 case XFER_UDMA_1: return XFER_UDMA_0;
749 * OOPS we do not goto non Ultra DMA modes
750 * without iCRC's available we force
751 * the system to PIO and make the user
752 * invoke the ATA-1 ATA-2 DMA modes.
755 default: return XFER_PIO_4;
758 #endif /* CONFIG_BLK_DEV_IDEDMA */
763 int ide_driveid_update (ide_drive_t *drive)
765 ide_hwif_t *hwif = HWIF(drive);
766 struct hd_driveid *id;
768 id = kmalloc(SECTOR_WORDS*4, GFP_ATOMIC);
772 taskfile_lib_get_identify(drive, (char *)&id);
776 drive->id->dma_ultra = id->dma_ultra;
777 drive->id->dma_mword = id->dma_mword;
778 drive->id->dma_1word = id->dma_1word;
779 /* anything more ? */
785 * Re-read drive->id for possible DMA mode
786 * change (copied from ide-probe.c)
788 unsigned long timeout, flags;
790 SELECT_MASK(drive, 1);
792 hwif->OUTB(drive->ctl,IDE_CONTROL_REG);
794 hwif->OUTB(WIN_IDENTIFY, IDE_COMMAND_REG);
795 timeout = jiffies + WAIT_WORSTCASE;
797 if (time_after(jiffies, timeout)) {
798 SELECT_MASK(drive, 0);
799 return 0; /* drive timed-out */
801 msleep(50); /* give drive a breather */
802 } while (hwif->INB(IDE_ALTSTATUS_REG) & BUSY_STAT);
803 msleep(50); /* wait for IRQ and DRQ_STAT */
804 if (!OK_STAT(hwif->INB(IDE_STATUS_REG),DRQ_STAT,BAD_R_STAT)) {
805 SELECT_MASK(drive, 0);
806 printk("%s: CHECK for good STATUS\n", drive->name);
809 local_irq_save(flags);
810 SELECT_MASK(drive, 0);
811 id = kmalloc(SECTOR_WORDS*4, GFP_ATOMIC);
813 local_irq_restore(flags);
816 ata_input_data(drive, id, SECTOR_WORDS);
817 (void) hwif->INB(IDE_STATUS_REG); /* clear drive IRQ */
819 local_irq_restore(flags);
822 drive->id->dma_ultra = id->dma_ultra;
823 drive->id->dma_mword = id->dma_mword;
824 drive->id->dma_1word = id->dma_1word;
825 /* anything more ? */
834 * Similar to ide_wait_stat(), except it never calls ide_error internally.
835 * This is a kludge to handle the new ide_config_drive_speed() function,
836 * and should not otherwise be used anywhere. Eventually, the tuneproc's
837 * should be updated to return ide_startstop_t, in which case we can get
838 * rid of this abomination again. :) -ml
840 * It is gone..........
842 * const char *msg == consider adding for verbose errors.
844 * Beware. If we timed out from a series of CRC errors and the timer
845 * expiry caused a switch to PIO mode and we take an IRQ as the drive times
846 * out about the same moment we may be entering this function with a
849 int ide_config_drive_speed (ide_drive_t *drive, u8 speed)
851 ide_hwif_t *hwif = HWIF(drive);
855 // while (HWGROUP(drive)->busy)
858 #ifdef CONFIG_BLK_DEV_IDEDMA
859 if (hwif->ide_dma_check) /* check if host supports DMA */
860 hwif->ide_dma_host_off(drive);
864 * Don't use ide_wait_cmd here - it will
865 * attempt to set_geometry and recalibrate, We can't
866 * do that here as we may be in the IRQ handler already
868 * Select the drive, and issue the SETFEATURES command in
871 disable_irq_nosync(hwif->irq);
874 * We race against the running IRQ here if
875 * this is called from non IRQ context. If we use
876 * disable_irq() we hang on the error path. Instead we
877 * must let the core code know the hwif is doing a polling
883 SELECT_MASK(drive, 0);
886 hwif->OUTB(drive->ctl | 2, IDE_CONTROL_REG);
887 hwif->OUTB(speed, IDE_NSECTOR_REG);
888 hwif->OUTB(SETFEATURES_XFER, IDE_FEATURE_REG);
889 hwif->OUTBSYNC(drive, WIN_SETFEATURES, IDE_COMMAND_REG);
890 /* The status bits are not valid for 400nS */
893 /* Drive status is now valid which means we can allow interrupts
894 to occur as they will see the drive as busy and will not
895 interfere erroneously. IRQ's for this drive will also be off
896 providing control and quirks allow for it */
898 if ((IDE_CONTROL_REG) && drive->quirk_list == 2)
899 hwif->OUTB(drive->ctl, IDE_CONTROL_REG);
903 * Tell the interrupt layer that we are doing polled recovery.
904 * Eventually this should use the same mechanism do_reset does
911 * Wait for drive to become non-BUSY
913 if ((stat = hwif->INB(IDE_STATUS_REG)) & BUSY_STAT) {
914 unsigned long timeout;
916 /* spin_unlock_irq(&ide_lock); */
917 timeout = jiffies + WAIT_CMD;
918 while ((stat = hwif->INB(IDE_STATUS_REG)) & BUSY_STAT) {
919 if (time_after(jiffies, timeout))
922 /* spin_lock_irq(&ide_lock); */
928 * Allow status to settle, then read it again.
929 * A few rare drives vastly violate the 400ns spec here,
930 * so we'll wait up to 10usec for a "good" status
931 * rather than expensively fail things immediately.
932 * This fix courtesy of Matthew Faupel & Niccolo Rigacci.
934 for (i = 0; i < 10; i++) {
936 if (OK_STAT((stat = hwif->INB(IDE_STATUS_REG)), DRIVE_READY, BUSY_STAT|DRQ_STAT|ERR_STAT)) {
942 SELECT_MASK(drive, 0);
944 enable_irq(hwif->irq);
947 (void) ide_dump_status(drive, "set_drive_speed_status", stat);
951 drive->id->dma_ultra &= ~0xFF00;
952 drive->id->dma_mword &= ~0x0F00;
953 drive->id->dma_1word &= ~0x0F00;
955 #ifdef CONFIG_BLK_DEV_IDEDMA
956 if (speed >= XFER_SW_DMA_0)
957 hwif->ide_dma_host_on(drive);
958 else if (hwif->ide_dma_check) /* check if host supports DMA */
959 hwif->ide_dma_off_quietly(drive);
963 case XFER_UDMA_7: drive->id->dma_ultra |= 0x8080; break;
964 case XFER_UDMA_6: drive->id->dma_ultra |= 0x4040; break;
965 case XFER_UDMA_5: drive->id->dma_ultra |= 0x2020; break;
966 case XFER_UDMA_4: drive->id->dma_ultra |= 0x1010; break;
967 case XFER_UDMA_3: drive->id->dma_ultra |= 0x0808; break;
968 case XFER_UDMA_2: drive->id->dma_ultra |= 0x0404; break;
969 case XFER_UDMA_1: drive->id->dma_ultra |= 0x0202; break;
970 case XFER_UDMA_0: drive->id->dma_ultra |= 0x0101; break;
971 case XFER_MW_DMA_2: drive->id->dma_mword |= 0x0404; break;
972 case XFER_MW_DMA_1: drive->id->dma_mword |= 0x0202; break;
973 case XFER_MW_DMA_0: drive->id->dma_mword |= 0x0101; break;
974 case XFER_SW_DMA_2: drive->id->dma_1word |= 0x0404; break;
975 case XFER_SW_DMA_1: drive->id->dma_1word |= 0x0202; break;
976 case XFER_SW_DMA_0: drive->id->dma_1word |= 0x0101; break;
979 if (!drive->init_speed)
980 drive->init_speed = speed;
981 drive->current_speed = speed;
985 EXPORT_SYMBOL(ide_config_drive_speed);
989 * This should get invoked any time we exit the driver to
990 * wait for an interrupt response from a drive. handler() points
991 * at the appropriate code to handle the next interrupt, and a
992 * timer is started to prevent us from waiting forever in case
993 * something goes wrong (see the ide_timer_expiry() handler later on).
995 * See also ide_execute_command
997 static void __ide_set_handler (ide_drive_t *drive, ide_handler_t *handler,
998 unsigned int timeout, ide_expiry_t *expiry)
1000 ide_hwgroup_t *hwgroup = HWGROUP(drive);
1002 if (hwgroup->handler != NULL) {
1003 printk(KERN_CRIT "%s: ide_set_handler: handler not null; "
1005 drive->name, hwgroup->handler, handler);
1007 hwgroup->handler = handler;
1008 hwgroup->expiry = expiry;
1009 hwgroup->timer.expires = jiffies + timeout;
1010 add_timer(&hwgroup->timer);
1013 void ide_set_handler (ide_drive_t *drive, ide_handler_t *handler,
1014 unsigned int timeout, ide_expiry_t *expiry)
1016 unsigned long flags;
1017 spin_lock_irqsave(&ide_lock, flags);
1018 __ide_set_handler(drive, handler, timeout, expiry);
1019 spin_unlock_irqrestore(&ide_lock, flags);
1022 EXPORT_SYMBOL(ide_set_handler);
1025 * ide_execute_command - execute an IDE command
1026 * @drive: IDE drive to issue the command against
1027 * @command: command byte to write
1028 * @handler: handler for next phase
1029 * @timeout: timeout for command
1030 * @expiry: handler to run on timeout
1032 * Helper function to issue an IDE command. This handles the
1033 * atomicity requirements, command timing and ensures that the
1034 * handler and IRQ setup do not race. All IDE command kick off
1035 * should go via this function or do equivalent locking.
1038 void ide_execute_command(ide_drive_t *drive, task_ioreg_t cmd, ide_handler_t *handler, unsigned timeout, ide_expiry_t *expiry)
1040 unsigned long flags;
1041 ide_hwgroup_t *hwgroup = HWGROUP(drive);
1042 ide_hwif_t *hwif = HWIF(drive);
1044 spin_lock_irqsave(&ide_lock, flags);
1046 if(hwgroup->handler)
1048 hwgroup->handler = handler;
1049 hwgroup->expiry = expiry;
1050 hwgroup->timer.expires = jiffies + timeout;
1051 add_timer(&hwgroup->timer);
1052 hwif->OUTBSYNC(drive, cmd, IDE_COMMAND_REG);
1053 /* Drive takes 400nS to respond, we must avoid the IRQ being
1054 serviced before that.
1056 FIXME: we could skip this delay with care on non shared
1060 spin_unlock_irqrestore(&ide_lock, flags);
1063 EXPORT_SYMBOL(ide_execute_command);
1067 static ide_startstop_t do_reset1 (ide_drive_t *, int);
1070 * atapi_reset_pollfunc() gets invoked to poll the interface for completion every 50ms
1071 * during an atapi drive reset operation. If the drive has not yet responded,
1072 * and we have not yet hit our maximum waiting time, then the timer is restarted
1075 static ide_startstop_t atapi_reset_pollfunc (ide_drive_t *drive)
1077 ide_hwgroup_t *hwgroup = HWGROUP(drive);
1078 ide_hwif_t *hwif = HWIF(drive);
1081 SELECT_DRIVE(drive);
1084 if (OK_STAT(stat = hwif->INB(IDE_STATUS_REG), 0, BUSY_STAT)) {
1085 printk("%s: ATAPI reset complete\n", drive->name);
1087 if (time_before(jiffies, hwgroup->poll_timeout)) {
1088 if (HWGROUP(drive)->handler != NULL)
1090 ide_set_handler(drive, &atapi_reset_pollfunc, HZ/20, NULL);
1091 /* continue polling */
1094 /* end of polling */
1095 hwgroup->poll_timeout = 0;
1096 printk("%s: ATAPI reset timed-out, status=0x%02x\n",
1098 /* do it the old fashioned way */
1099 return do_reset1(drive, 1);
1102 hwgroup->poll_timeout = 0;
1107 * reset_pollfunc() gets invoked to poll the interface for completion every 50ms
1108 * during an ide reset operation. If the drives have not yet responded,
1109 * and we have not yet hit our maximum waiting time, then the timer is restarted
1112 static ide_startstop_t reset_pollfunc (ide_drive_t *drive)
1114 ide_hwgroup_t *hwgroup = HWGROUP(drive);
1115 ide_hwif_t *hwif = HWIF(drive);
1118 if (hwif->reset_poll != NULL) {
1119 if (hwif->reset_poll(drive)) {
1120 printk(KERN_ERR "%s: host reset_poll failure for %s.\n",
1121 hwif->name, drive->name);
1126 if (!OK_STAT(tmp = hwif->INB(IDE_STATUS_REG), 0, BUSY_STAT)) {
1127 if (time_before(jiffies, hwgroup->poll_timeout)) {
1128 if (HWGROUP(drive)->handler != NULL)
1130 ide_set_handler(drive, &reset_pollfunc, HZ/20, NULL);
1131 /* continue polling */
1134 printk("%s: reset timed-out, status=0x%02x\n", hwif->name, tmp);
1137 printk("%s: reset: ", hwif->name);
1138 if ((tmp = hwif->INB(IDE_ERROR_REG)) == 1) {
1139 printk("success\n");
1140 drive->failures = 0;
1144 switch (tmp & 0x7f) {
1145 case 1: printk("passed");
1147 case 2: printk("formatter device error");
1149 case 3: printk("sector buffer error");
1151 case 4: printk("ECC circuitry error");
1153 case 5: printk("controlling MPU error");
1155 default:printk("error (0x%02x?)", tmp);
1158 printk("; slave: failed");
1162 hwgroup->poll_timeout = 0; /* done polling */
1166 static void check_dma_crc(ide_drive_t *drive)
1168 #ifdef CONFIG_BLK_DEV_IDEDMA
1169 if (drive->crc_count) {
1170 (void) HWIF(drive)->ide_dma_off_quietly(drive);
1171 ide_set_xfer_rate(drive, ide_auto_reduce_xfer(drive));
1172 if (drive->current_speed >= XFER_SW_DMA_0)
1173 (void) HWIF(drive)->ide_dma_on(drive);
1175 (void)__ide_dma_off(drive);
1179 void pre_reset (ide_drive_t *drive)
1181 DRIVER(drive)->pre_reset(drive);
1183 if (!drive->keep_settings) {
1184 if (drive->using_dma) {
1185 check_dma_crc(drive);
1188 drive->io_32bit = 0;
1192 if (drive->using_dma)
1193 check_dma_crc(drive);
1195 if (HWIF(drive)->pre_reset != NULL)
1196 HWIF(drive)->pre_reset(drive);
1201 * do_reset1() attempts to recover a confused drive by resetting it.
1202 * Unfortunately, resetting a disk drive actually resets all devices on
1203 * the same interface, so it can really be thought of as resetting the
1204 * interface rather than resetting the drive.
1206 * ATAPI devices have their own reset mechanism which allows them to be
1207 * individually reset without clobbering other devices on the same interface.
1209 * Unfortunately, the IDE interface does not generate an interrupt to let
1210 * us know when the reset operation has finished, so we must poll for this.
1211 * Equally poor, though, is the fact that this may a very long time to complete,
1212 * (up to 30 seconds worstcase). So, instead of busy-waiting here for it,
1213 * we set a timer to poll at 50ms intervals.
1215 static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
1218 unsigned long flags;
1220 ide_hwgroup_t *hwgroup;
1222 spin_lock_irqsave(&ide_lock, flags);
1224 hwgroup = HWGROUP(drive);
1226 /* We must not reset with running handlers */
1227 if(hwgroup->handler != NULL)
1230 /* For an ATAPI device, first try an ATAPI SRST. */
1231 if (drive->media != ide_disk && !do_not_try_atapi) {
1233 SELECT_DRIVE(drive);
1235 hwif->OUTBSYNC(drive, WIN_SRST, IDE_COMMAND_REG);
1237 hwgroup->poll_timeout = jiffies + WAIT_WORSTCASE;
1238 __ide_set_handler(drive, &atapi_reset_pollfunc, HZ/20, NULL);
1239 spin_unlock_irqrestore(&ide_lock, flags);
1244 * First, reset any device state data we were maintaining
1245 * for any of the drives on this interface.
1247 for (unit = 0; unit < MAX_DRIVES; ++unit)
1248 pre_reset(&hwif->drives[unit]);
1250 #if OK_TO_RESET_CONTROLLER
1251 if (!IDE_CONTROL_REG) {
1252 spin_unlock_irqrestore(&ide_lock, flags);
1257 * Note that we also set nIEN while resetting the device,
1258 * to mask unwanted interrupts from the interface during the reset.
1259 * However, due to the design of PC hardware, this will cause an
1260 * immediate interrupt due to the edge transition it produces.
1261 * This single interrupt gives us a "fast poll" for drives that
1262 * recover from reset very quickly, saving us the first 50ms wait time.
1264 /* set SRST and nIEN */
1265 hwif->OUTBSYNC(drive, drive->ctl|6,IDE_CONTROL_REG);
1266 /* more than enough time */
1268 if (drive->quirk_list == 2) {
1269 /* clear SRST and nIEN */
1270 hwif->OUTBSYNC(drive, drive->ctl, IDE_CONTROL_REG);
1272 /* clear SRST, leave nIEN */
1273 hwif->OUTBSYNC(drive, drive->ctl|2, IDE_CONTROL_REG);
1275 /* more than enough time */
1277 hwgroup->poll_timeout = jiffies + WAIT_WORSTCASE;
1278 __ide_set_handler(drive, &reset_pollfunc, HZ/20, NULL);
1281 * Some weird controller like resetting themselves to a strange
1282 * state when the disks are reset this way. At least, the Winbond
1283 * 553 documentation says that
1285 if (hwif->resetproc != NULL) {
1286 hwif->resetproc(drive);
1289 #endif /* OK_TO_RESET_CONTROLLER */
1291 spin_unlock_irqrestore(&ide_lock, flags);
1296 * ide_do_reset() is the entry point to the drive/interface reset code.
1299 ide_startstop_t ide_do_reset (ide_drive_t *drive)
1301 return do_reset1(drive, 0);
1304 EXPORT_SYMBOL(ide_do_reset);
1307 * ide_wait_not_busy() waits for the currently selected device on the hwif
1308 * to report a non-busy status, see comments in probe_hwif().
1310 int ide_wait_not_busy(ide_hwif_t *hwif, unsigned long timeout)
1316 * Turn this into a schedule() sleep once I'm sure
1317 * about locking issues (2.5 work ?).
1320 stat = hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]);
1321 if ((stat & BUSY_STAT) == 0)
1324 * Assume a value of 0xff means nothing is connected to
1325 * the interface and it doesn't implement the pull-down
1334 EXPORT_SYMBOL_GPL(ide_wait_not_busy);