2d9b028e8e708fbe09f45ff03a13f9499e499967
[linux-2.6.git] / drivers / ide / ide-iops.c
1 /*
2  * linux/drivers/ide/ide-iops.c Version 0.37    Mar 05, 2003
3  *
4  *  Copyright (C) 2000-2002     Andre Hedrick <andre@linux-ide.org>
5  *  Copyright (C) 2003          Red Hat <alan@redhat.com>
6  *
7  */
8
9 #include <linux/config.h>
10 #include <linux/module.h>
11 #include <linux/types.h>
12 #include <linux/string.h>
13 #include <linux/kernel.h>
14 #include <linux/timer.h>
15 #include <linux/mm.h>
16 #include <linux/interrupt.h>
17 #include <linux/major.h>
18 #include <linux/errno.h>
19 #include <linux/genhd.h>
20 #include <linux/blkpg.h>
21 #include <linux/slab.h>
22 #include <linux/pci.h>
23 #include <linux/delay.h>
24 #include <linux/hdreg.h>
25 #include <linux/ide.h>
26 #include <linux/bitops.h>
27
28 #include <asm/byteorder.h>
29 #include <asm/irq.h>
30 #include <asm/uaccess.h>
31 #include <asm/io.h>
32
33 /*
34  *      Conventional PIO operations for ATA devices
35  */
36
37 static u8 ide_inb (unsigned long port)
38 {
39         return (u8) inb(port);
40 }
41
42 static u16 ide_inw (unsigned long port)
43 {
44         return (u16) inw(port);
45 }
46
47 static void ide_insw (unsigned long port, void *addr, u32 count)
48 {
49         insw(port, addr, count);
50 }
51
52 static u32 ide_inl (unsigned long port)
53 {
54         return (u32) inl(port);
55 }
56
57 static void ide_insl (unsigned long port, void *addr, u32 count)
58 {
59         insl(port, addr, count);
60 }
61
62 static void ide_outb (u8 val, unsigned long port)
63 {
64         outb(val, port);
65 }
66
67 static void ide_outbsync (ide_drive_t *drive, u8 addr, unsigned long port)
68 {
69         outb(addr, port);
70 }
71
72 static void ide_outw (u16 val, unsigned long port)
73 {
74         outw(val, port);
75 }
76
77 static void ide_outsw (unsigned long port, void *addr, u32 count)
78 {
79         outsw(port, addr, count);
80 }
81
82 static void ide_outl (u32 val, unsigned long port)
83 {
84         outl(val, port);
85 }
86
87 static void ide_outsl (unsigned long port, void *addr, u32 count)
88 {
89         outsl(port, addr, count);
90 }
91
92 void default_hwif_iops (ide_hwif_t *hwif)
93 {
94         hwif->OUTB      = ide_outb;
95         hwif->OUTBSYNC  = ide_outbsync;
96         hwif->OUTW      = ide_outw;
97         hwif->OUTL      = ide_outl;
98         hwif->OUTSW     = ide_outsw;
99         hwif->OUTSL     = ide_outsl;
100         hwif->INB       = ide_inb;
101         hwif->INW       = ide_inw;
102         hwif->INL       = ide_inl;
103         hwif->INSW      = ide_insw;
104         hwif->INSL      = ide_insl;
105 }
106
107 EXPORT_SYMBOL(default_hwif_iops);
108
109 /*
110  *      Interface removed
111  */
112
113 static u8 ide_no_inb(unsigned long port)
114 {
115         return 0xFF;
116 }
117
118 static u16 ide_no_inw (unsigned long port)
119 {
120         return 0xFFFF;
121 }
122
123 static void ide_no_insw (unsigned long port, void *addr, u32 count)
124 {
125 }
126
127 static u32 ide_no_inl (unsigned long port)
128 {
129         return 0xFFFFFFFF;
130 }
131
132 static void ide_no_insl (unsigned long port, void *addr, u32 count)
133 {
134 }
135
136 static void ide_no_outb (u8 val, unsigned long port)
137 {
138 }
139
140 static void ide_no_outbsync (ide_drive_t *drive, u8 addr, unsigned long port)
141 {
142 }
143
144 static void ide_no_outw (u16 val, unsigned long port)
145 {
146 }
147
148 static void ide_no_outsw (unsigned long port, void *addr, u32 count)
149 {
150 }
151
152 static void ide_no_outl (u32 val, unsigned long port)
153 {
154 }
155
156 static void ide_no_outsl (unsigned long port, void *addr, u32 count)
157 {
158 }
159
160 void removed_hwif_iops (ide_hwif_t *hwif)
161 {
162         hwif->OUTB      = ide_no_outb;
163         hwif->OUTBSYNC  = ide_no_outbsync;
164         hwif->OUTW      = ide_no_outw;
165         hwif->OUTL      = ide_no_outl;
166         hwif->OUTSW     = ide_no_outsw;
167         hwif->OUTSL     = ide_no_outsl;
168         hwif->INB       = ide_no_inb;
169         hwif->INW       = ide_no_inw;
170         hwif->INL       = ide_no_inl;
171         hwif->INSW      = ide_no_insw;
172         hwif->INSL      = ide_no_insl;
173 }
174
175 EXPORT_SYMBOL(removed_hwif_iops);
176
177 /*
178  *      MMIO operations, typically used for SATA controllers
179  */
180
181 static u8 ide_mm_inb (unsigned long port)
182 {
183         return (u8) readb((void __iomem *) port);
184 }
185
186 static u16 ide_mm_inw (unsigned long port)
187 {
188         return (u16) readw((void __iomem *) port);
189 }
190
191 static void ide_mm_insw (unsigned long port, void *addr, u32 count)
192 {
193         __ide_mm_insw((void __iomem *) port, addr, count);
194 }
195
196 static u32 ide_mm_inl (unsigned long port)
197 {
198         return (u32) readl((void __iomem *) port);
199 }
200
201 static void ide_mm_insl (unsigned long port, void *addr, u32 count)
202 {
203         __ide_mm_insl((void __iomem *) port, addr, count);
204 }
205
206 static void ide_mm_outb (u8 value, unsigned long port)
207 {
208         writeb(value, (void __iomem *) port);
209 }
210
211 static void ide_mm_outbsync (ide_drive_t *drive, u8 value, unsigned long port)
212 {
213         writeb(value, (void __iomem *) port);
214 }
215
216 static void ide_mm_outw (u16 value, unsigned long port)
217 {
218         writew(value, (void __iomem *) port);
219 }
220
221 static void ide_mm_outsw (unsigned long port, void *addr, u32 count)
222 {
223         __ide_mm_outsw((void __iomem *) port, addr, count);
224 }
225
226 static void ide_mm_outl (u32 value, unsigned long port)
227 {
228         writel(value, (void __iomem *) port);
229 }
230
231 static void ide_mm_outsl (unsigned long port, void *addr, u32 count)
232 {
233         __ide_mm_outsl((void __iomem *) port, addr, count);
234 }
235
236 void default_hwif_mmiops (ide_hwif_t *hwif)
237 {
238         hwif->OUTB      = ide_mm_outb;
239         /* Most systems will need to override OUTBSYNC, alas however
240            this one is controller specific! */
241         hwif->OUTBSYNC  = ide_mm_outbsync;
242         hwif->OUTW      = ide_mm_outw;
243         hwif->OUTL      = ide_mm_outl;
244         hwif->OUTSW     = ide_mm_outsw;
245         hwif->OUTSL     = ide_mm_outsl;
246         hwif->INB       = ide_mm_inb;
247         hwif->INW       = ide_mm_inw;
248         hwif->INL       = ide_mm_inl;
249         hwif->INSW      = ide_mm_insw;
250         hwif->INSL      = ide_mm_insl;
251 }
252
253 EXPORT_SYMBOL(default_hwif_mmiops);
254
255 void default_hwif_transport (ide_hwif_t *hwif)
256 {
257         hwif->ata_input_data            = ata_input_data;
258         hwif->ata_output_data           = ata_output_data;
259         hwif->atapi_input_bytes         = atapi_input_bytes;
260         hwif->atapi_output_bytes        = atapi_output_bytes;
261 }
262
263 EXPORT_SYMBOL(default_hwif_transport);
264
265 u32 ide_read_24 (ide_drive_t *drive)
266 {
267         u8 hcyl = HWIF(drive)->INB(IDE_HCYL_REG);
268         u8 lcyl = HWIF(drive)->INB(IDE_LCYL_REG);
269         u8 sect = HWIF(drive)->INB(IDE_SECTOR_REG);
270         return (hcyl<<16)|(lcyl<<8)|sect;
271 }
272
273 EXPORT_SYMBOL(ide_read_24);
274
275 void SELECT_DRIVE (ide_drive_t *drive)
276 {
277         if (HWIF(drive)->selectproc)
278                 HWIF(drive)->selectproc(drive);
279         HWIF(drive)->OUTB(drive->select.all, IDE_SELECT_REG);
280 }
281
282 EXPORT_SYMBOL(SELECT_DRIVE);
283
284 void SELECT_INTERRUPT (ide_drive_t *drive)
285 {
286         if (HWIF(drive)->intrproc)
287                 HWIF(drive)->intrproc(drive);
288         else
289                 HWIF(drive)->OUTB(drive->ctl|2, IDE_CONTROL_REG);
290 }
291
292 void SELECT_MASK (ide_drive_t *drive, int mask)
293 {
294         if (HWIF(drive)->maskproc)
295                 HWIF(drive)->maskproc(drive, mask);
296 }
297
298 void QUIRK_LIST (ide_drive_t *drive)
299 {
300         if (HWIF(drive)->quirkproc)
301                 drive->quirk_list = HWIF(drive)->quirkproc(drive);
302 }
303
304 /*
305  * Some localbus EIDE interfaces require a special access sequence
306  * when using 32-bit I/O instructions to transfer data.  We call this
307  * the "vlb_sync" sequence, which consists of three successive reads
308  * of the sector count register location, with interrupts disabled
309  * to ensure that the reads all happen together.
310  */
311 void ata_vlb_sync (ide_drive_t *drive, unsigned long port)
312 {
313         (void) HWIF(drive)->INB(port);
314         (void) HWIF(drive)->INB(port);
315         (void) HWIF(drive)->INB(port);
316 }
317
318 /*
319  * This is used for most PIO data transfers *from* the IDE interface
320  */
321 void ata_input_data (ide_drive_t *drive, void *buffer, u32 wcount)
322 {
323         ide_hwif_t *hwif        = HWIF(drive);
324         u8 io_32bit             = drive->io_32bit;
325
326         if (io_32bit) {
327                 if (io_32bit & 2) {
328                         unsigned long flags;
329                         local_irq_save(flags);
330                         ata_vlb_sync(drive, IDE_NSECTOR_REG);
331                         hwif->INSL(IDE_DATA_REG, buffer, wcount);
332                         local_irq_restore(flags);
333                 } else
334                         hwif->INSL(IDE_DATA_REG, buffer, wcount);
335         } else {
336                 hwif->INSW(IDE_DATA_REG, buffer, wcount<<1);
337         }
338 }
339
340 /*
341  * This is used for most PIO data transfers *to* the IDE interface
342  */
343 void ata_output_data (ide_drive_t *drive, void *buffer, u32 wcount)
344 {
345         ide_hwif_t *hwif        = HWIF(drive);
346         u8 io_32bit             = drive->io_32bit;
347
348         if (io_32bit) {
349                 if (io_32bit & 2) {
350                         unsigned long flags;
351                         local_irq_save(flags);
352                         ata_vlb_sync(drive, IDE_NSECTOR_REG);
353                         hwif->OUTSL(IDE_DATA_REG, buffer, wcount);
354                         local_irq_restore(flags);
355                 } else
356                         hwif->OUTSL(IDE_DATA_REG, buffer, wcount);
357         } else {
358                 hwif->OUTSW(IDE_DATA_REG, buffer, wcount<<1);
359         }
360 }
361
362 /*
363  * The following routines are mainly used by the ATAPI drivers.
364  *
365  * These routines will round up any request for an odd number of bytes,
366  * so if an odd bytecount is specified, be sure that there's at least one
367  * extra byte allocated for the buffer.
368  */
369
370 void atapi_input_bytes (ide_drive_t *drive, void *buffer, u32 bytecount)
371 {
372         ide_hwif_t *hwif = HWIF(drive);
373
374         ++bytecount;
375 #if defined(CONFIG_ATARI) || defined(CONFIG_Q40)
376         if (MACH_IS_ATARI || MACH_IS_Q40) {
377                 /* Atari has a byte-swapped IDE interface */
378                 insw_swapw(IDE_DATA_REG, buffer, bytecount / 2);
379                 return;
380         }
381 #endif /* CONFIG_ATARI || CONFIG_Q40 */
382         hwif->ata_input_data(drive, buffer, bytecount / 4);
383         if ((bytecount & 0x03) >= 2)
384                 hwif->INSW(IDE_DATA_REG, ((u8 *)buffer)+(bytecount & ~0x03), 1);
385 }
386
387 EXPORT_SYMBOL(atapi_input_bytes);
388
389 void atapi_output_bytes (ide_drive_t *drive, void *buffer, u32 bytecount)
390 {
391         ide_hwif_t *hwif = HWIF(drive);
392
393         ++bytecount;
394 #if defined(CONFIG_ATARI) || defined(CONFIG_Q40)
395         if (MACH_IS_ATARI || MACH_IS_Q40) {
396                 /* Atari has a byte-swapped IDE interface */
397                 outsw_swapw(IDE_DATA_REG, buffer, bytecount / 2);
398                 return;
399         }
400 #endif /* CONFIG_ATARI || CONFIG_Q40 */
401         hwif->ata_output_data(drive, buffer, bytecount / 4);
402         if ((bytecount & 0x03) >= 2)
403                 hwif->OUTSW(IDE_DATA_REG, ((u8*)buffer)+(bytecount & ~0x03), 1);
404 }
405
406 EXPORT_SYMBOL(atapi_output_bytes);
407
408 /*
409  * Beginning of Taskfile OPCODE Library and feature sets.
410  */
411 void ide_fix_driveid (struct hd_driveid *id)
412 {
413 #ifndef __LITTLE_ENDIAN
414 # ifdef __BIG_ENDIAN
415         int i;
416         u16 *stringcast;
417
418         id->config         = __le16_to_cpu(id->config);
419         id->cyls           = __le16_to_cpu(id->cyls);
420         id->reserved2      = __le16_to_cpu(id->reserved2);
421         id->heads          = __le16_to_cpu(id->heads);
422         id->track_bytes    = __le16_to_cpu(id->track_bytes);
423         id->sector_bytes   = __le16_to_cpu(id->sector_bytes);
424         id->sectors        = __le16_to_cpu(id->sectors);
425         id->vendor0        = __le16_to_cpu(id->vendor0);
426         id->vendor1        = __le16_to_cpu(id->vendor1);
427         id->vendor2        = __le16_to_cpu(id->vendor2);
428         stringcast = (u16 *)&id->serial_no[0];
429         for (i = 0; i < (20/2); i++)
430                 stringcast[i] = __le16_to_cpu(stringcast[i]);
431         id->buf_type       = __le16_to_cpu(id->buf_type);
432         id->buf_size       = __le16_to_cpu(id->buf_size);
433         id->ecc_bytes      = __le16_to_cpu(id->ecc_bytes);
434         stringcast = (u16 *)&id->fw_rev[0];
435         for (i = 0; i < (8/2); i++)
436                 stringcast[i] = __le16_to_cpu(stringcast[i]);
437         stringcast = (u16 *)&id->model[0];
438         for (i = 0; i < (40/2); i++)
439                 stringcast[i] = __le16_to_cpu(stringcast[i]);
440         id->dword_io       = __le16_to_cpu(id->dword_io);
441         id->reserved50     = __le16_to_cpu(id->reserved50);
442         id->field_valid    = __le16_to_cpu(id->field_valid);
443         id->cur_cyls       = __le16_to_cpu(id->cur_cyls);
444         id->cur_heads      = __le16_to_cpu(id->cur_heads);
445         id->cur_sectors    = __le16_to_cpu(id->cur_sectors);
446         id->cur_capacity0  = __le16_to_cpu(id->cur_capacity0);
447         id->cur_capacity1  = __le16_to_cpu(id->cur_capacity1);
448         id->lba_capacity   = __le32_to_cpu(id->lba_capacity);
449         id->dma_1word      = __le16_to_cpu(id->dma_1word);
450         id->dma_mword      = __le16_to_cpu(id->dma_mword);
451         id->eide_pio_modes = __le16_to_cpu(id->eide_pio_modes);
452         id->eide_dma_min   = __le16_to_cpu(id->eide_dma_min);
453         id->eide_dma_time  = __le16_to_cpu(id->eide_dma_time);
454         id->eide_pio       = __le16_to_cpu(id->eide_pio);
455         id->eide_pio_iordy = __le16_to_cpu(id->eide_pio_iordy);
456         for (i = 0; i < 2; ++i)
457                 id->words69_70[i] = __le16_to_cpu(id->words69_70[i]);
458         for (i = 0; i < 4; ++i)
459                 id->words71_74[i] = __le16_to_cpu(id->words71_74[i]);
460         id->queue_depth    = __le16_to_cpu(id->queue_depth);
461         for (i = 0; i < 4; ++i)
462                 id->words76_79[i] = __le16_to_cpu(id->words76_79[i]);
463         id->major_rev_num  = __le16_to_cpu(id->major_rev_num);
464         id->minor_rev_num  = __le16_to_cpu(id->minor_rev_num);
465         id->command_set_1  = __le16_to_cpu(id->command_set_1);
466         id->command_set_2  = __le16_to_cpu(id->command_set_2);
467         id->cfsse          = __le16_to_cpu(id->cfsse);
468         id->cfs_enable_1   = __le16_to_cpu(id->cfs_enable_1);
469         id->cfs_enable_2   = __le16_to_cpu(id->cfs_enable_2);
470         id->csf_default    = __le16_to_cpu(id->csf_default);
471         id->dma_ultra      = __le16_to_cpu(id->dma_ultra);
472         id->trseuc         = __le16_to_cpu(id->trseuc);
473         id->trsEuc         = __le16_to_cpu(id->trsEuc);
474         id->CurAPMvalues   = __le16_to_cpu(id->CurAPMvalues);
475         id->mprc           = __le16_to_cpu(id->mprc);
476         id->hw_config      = __le16_to_cpu(id->hw_config);
477         id->acoustic       = __le16_to_cpu(id->acoustic);
478         id->msrqs          = __le16_to_cpu(id->msrqs);
479         id->sxfert         = __le16_to_cpu(id->sxfert);
480         id->sal            = __le16_to_cpu(id->sal);
481         id->spg            = __le32_to_cpu(id->spg);
482         id->lba_capacity_2 = __le64_to_cpu(id->lba_capacity_2);
483         for (i = 0; i < 22; i++)
484                 id->words104_125[i]   = __le16_to_cpu(id->words104_125[i]);
485         id->last_lun       = __le16_to_cpu(id->last_lun);
486         id->word127        = __le16_to_cpu(id->word127);
487         id->dlf            = __le16_to_cpu(id->dlf);
488         id->csfo           = __le16_to_cpu(id->csfo);
489         for (i = 0; i < 26; i++)
490                 id->words130_155[i] = __le16_to_cpu(id->words130_155[i]);
491         id->word156        = __le16_to_cpu(id->word156);
492         for (i = 0; i < 3; i++)
493                 id->words157_159[i] = __le16_to_cpu(id->words157_159[i]);
494         id->cfa_power      = __le16_to_cpu(id->cfa_power);
495         for (i = 0; i < 14; i++)
496                 id->words161_175[i] = __le16_to_cpu(id->words161_175[i]);
497         for (i = 0; i < 31; i++)
498                 id->words176_205[i] = __le16_to_cpu(id->words176_205[i]);
499         for (i = 0; i < 48; i++)
500                 id->words206_254[i] = __le16_to_cpu(id->words206_254[i]);
501         id->integrity_word  = __le16_to_cpu(id->integrity_word);
502 # else
503 #  error "Please fix <asm/byteorder.h>"
504 # endif
505 #endif
506 }
507
508 EXPORT_SYMBOL(ide_fix_driveid);
509
510 void ide_fixstring (u8 *s, const int bytecount, const int byteswap)
511 {
512         u8 *p = s, *end = &s[bytecount & ~1]; /* bytecount must be even */
513
514         if (byteswap) {
515                 /* convert from big-endian to host byte order */
516                 for (p = end ; p != s;) {
517                         unsigned short *pp = (unsigned short *) (p -= 2);
518                         *pp = ntohs(*pp);
519                 }
520         }
521         /* strip leading blanks */
522         while (s != end && *s == ' ')
523                 ++s;
524         /* compress internal blanks and strip trailing blanks */
525         while (s != end && *s) {
526                 if (*s++ != ' ' || (s != end && *s && *s != ' '))
527                         *p++ = *(s-1);
528         }
529         /* wipe out trailing garbage */
530         while (p != end)
531                 *p++ = '\0';
532 }
533
534 EXPORT_SYMBOL(ide_fixstring);
535
536 /*
537  * Needed for PCI irq sharing
538  */
539 int drive_is_ready (ide_drive_t *drive)
540 {
541         ide_hwif_t *hwif        = HWIF(drive);
542         u8 stat                 = 0;
543
544         if (drive->waiting_for_dma)
545                 return hwif->ide_dma_test_irq(drive);
546
547 #if 0
548         /* need to guarantee 400ns since last command was issued */
549         udelay(1);
550 #endif
551
552 #ifdef CONFIG_IDEPCI_SHARE_IRQ
553         /*
554          * We do a passive status test under shared PCI interrupts on
555          * cards that truly share the ATA side interrupt, but may also share
556          * an interrupt with another pci card/device.  We make no assumptions
557          * about possible isa-pnp and pci-pnp issues yet.
558          */
559         if (IDE_CONTROL_REG)
560                 stat = hwif->INB(IDE_ALTSTATUS_REG);
561         else
562 #endif /* CONFIG_IDEPCI_SHARE_IRQ */
563                 /* Note: this may clear a pending IRQ!! */
564                 stat = hwif->INB(IDE_STATUS_REG);
565
566         if (stat & BUSY_STAT)
567                 /* drive busy:  definitely not interrupting */
568                 return 0;
569
570         /* drive ready: *might* be interrupting */
571         return 1;
572 }
573
574 EXPORT_SYMBOL(drive_is_ready);
575
576 /*
577  * Global for All, and taken from ide-pmac.c. Can be called
578  * with spinlock held & IRQs disabled, so don't schedule !
579  */
580 int wait_for_ready (ide_drive_t *drive, int timeout)
581 {
582         ide_hwif_t *hwif        = HWIF(drive);
583         u8 stat                 = 0;
584
585         while(--timeout) {
586                 stat = hwif->INB(IDE_STATUS_REG);
587                 if (!(stat & BUSY_STAT)) {
588                         if (drive->ready_stat == 0)
589                                 break;
590                         else if ((stat & drive->ready_stat)||(stat & ERR_STAT))
591                                 break;
592                 }
593                 mdelay(1);
594         }
595         if ((stat & ERR_STAT) || timeout <= 0) {
596                 if (stat & ERR_STAT) {
597                         printk(KERN_ERR "%s: wait_for_ready, "
598                                 "error status: %x\n", drive->name, stat);
599                 }
600                 return 1;
601         }
602         return 0;
603 }
604
605 EXPORT_SYMBOL(wait_for_ready);
606
607 /*
608  * This routine busy-waits for the drive status to be not "busy".
609  * It then checks the status for all of the "good" bits and none
610  * of the "bad" bits, and if all is okay it returns 0.  All other
611  * cases return 1 after invoking ide_error() -- caller should just return.
612  *
613  * This routine should get fixed to not hog the cpu during extra long waits..
614  * That could be done by busy-waiting for the first jiffy or two, and then
615  * setting a timer to wake up at half second intervals thereafter,
616  * until timeout is achieved, before timing out.
617  */
618 int ide_wait_stat (ide_startstop_t *startstop, ide_drive_t *drive, u8 good, u8 bad, unsigned long timeout)
619 {
620         ide_hwif_t *hwif = HWIF(drive);
621         u8 stat;
622         int i;
623         unsigned long flags;
624  
625         /* bail early if we've exceeded max_failures */
626         if (drive->max_failures && (drive->failures > drive->max_failures)) {
627                 *startstop = ide_stopped;
628                 return 1;
629         }
630
631         udelay(1);      /* spec allows drive 400ns to assert "BUSY" */
632         if ((stat = hwif->INB(IDE_STATUS_REG)) & BUSY_STAT) {
633                 local_irq_set(flags);
634                 timeout += jiffies;
635                 while ((stat = hwif->INB(IDE_STATUS_REG)) & BUSY_STAT) {
636                         if (time_after(jiffies, timeout)) {
637                                 /*
638                                  * One last read after the timeout in case
639                                  * heavy interrupt load made us not make any
640                                  * progress during the timeout..
641                                  */
642                                 stat = hwif->INB(IDE_STATUS_REG);
643                                 if (!(stat & BUSY_STAT))
644                                         break;
645
646                                 local_irq_restore(flags);
647                                 *startstop = DRIVER(drive)->error(drive, "status timeout", stat);
648                                 return 1;
649                         }
650                 }
651                 local_irq_restore(flags);
652         }
653         /*
654          * Allow status to settle, then read it again.
655          * A few rare drives vastly violate the 400ns spec here,
656          * so we'll wait up to 10usec for a "good" status
657          * rather than expensively fail things immediately.
658          * This fix courtesy of Matthew Faupel & Niccolo Rigacci.
659          */
660         for (i = 0; i < 10; i++) {
661                 udelay(1);
662                 if (OK_STAT((stat = hwif->INB(IDE_STATUS_REG)), good, bad))
663                         return 0;
664         }
665         *startstop = DRIVER(drive)->error(drive, "status error", stat);
666         return 1;
667 }
668
669 EXPORT_SYMBOL(ide_wait_stat);
670
671 /*
672  *  All hosts that use the 80c ribbon must use!
673  *  The name is derived from upper byte of word 93 and the 80c ribbon.
674  */
675 u8 eighty_ninty_three (ide_drive_t *drive)
676 {
677         if(HWIF(drive)->udma_four == 0)
678                 return 0;
679         if (!(drive->id->hw_config & 0x6000))
680                 return 0;
681 #ifndef CONFIG_IDEDMA_IVB
682         if(!(drive->id->hw_config & 0x4000))
683                 return 0;
684 #endif /* CONFIG_IDEDMA_IVB */
685         return 1;
686 }
687
688 EXPORT_SYMBOL(eighty_ninty_three);
689
690 int ide_ata66_check (ide_drive_t *drive, ide_task_t *args)
691 {
692         if ((args->tfRegister[IDE_COMMAND_OFFSET] == WIN_SETFEATURES) &&
693             (args->tfRegister[IDE_SECTOR_OFFSET] > XFER_UDMA_2) &&
694             (args->tfRegister[IDE_FEATURE_OFFSET] == SETFEATURES_XFER)) {
695 #ifndef CONFIG_IDEDMA_IVB
696                 if ((drive->id->hw_config & 0x6000) == 0) {
697 #else /* !CONFIG_IDEDMA_IVB */
698                 if (((drive->id->hw_config & 0x2000) == 0) ||
699                     ((drive->id->hw_config & 0x4000) == 0)) {
700 #endif /* CONFIG_IDEDMA_IVB */
701                         printk("%s: Speed warnings UDMA 3/4/5 is not "
702                                 "functional.\n", drive->name);
703                         return 1;
704                 }
705                 if (!HWIF(drive)->udma_four) {
706                         printk("%s: Speed warnings UDMA 3/4/5 is not "
707                                 "functional.\n",
708                                 HWIF(drive)->name);
709                         return 1;
710                 }
711         }
712         return 0;
713 }
714
715 /*
716  * Backside of HDIO_DRIVE_CMD call of SETFEATURES_XFER.
717  * 1 : Safe to update drive->id DMA registers.
718  * 0 : OOPs not allowed.
719  */
720 int set_transfer (ide_drive_t *drive, ide_task_t *args)
721 {
722         if ((args->tfRegister[IDE_COMMAND_OFFSET] == WIN_SETFEATURES) &&
723             (args->tfRegister[IDE_SECTOR_OFFSET] >= XFER_SW_DMA_0) &&
724             (args->tfRegister[IDE_FEATURE_OFFSET] == SETFEATURES_XFER) &&
725             (drive->id->dma_ultra ||
726              drive->id->dma_mword ||
727              drive->id->dma_1word))
728                 return 1;
729
730         return 0;
731 }
732
733 #ifdef CONFIG_BLK_DEV_IDEDMA
734 static u8 ide_auto_reduce_xfer (ide_drive_t *drive)
735 {
736         if (!drive->crc_count)
737                 return drive->current_speed;
738         drive->crc_count = 0;
739
740         switch(drive->current_speed) {
741                 case XFER_UDMA_7:       return XFER_UDMA_6;
742                 case XFER_UDMA_6:       return XFER_UDMA_5;
743                 case XFER_UDMA_5:       return XFER_UDMA_4;
744                 case XFER_UDMA_4:       return XFER_UDMA_3;
745                 case XFER_UDMA_3:       return XFER_UDMA_2;
746                 case XFER_UDMA_2:       return XFER_UDMA_1;
747                 case XFER_UDMA_1:       return XFER_UDMA_0;
748                         /*
749                          * OOPS we do not goto non Ultra DMA modes
750                          * without iCRC's available we force
751                          * the system to PIO and make the user
752                          * invoke the ATA-1 ATA-2 DMA modes.
753                          */
754                 case XFER_UDMA_0:
755                 default:                return XFER_PIO_4;
756         }
757 }
758 #endif /* CONFIG_BLK_DEV_IDEDMA */
759
760 /*
761  * Update the 
762  */
763 int ide_driveid_update (ide_drive_t *drive)
764 {
765         ide_hwif_t *hwif        = HWIF(drive);
766         struct hd_driveid *id;
767 #if 0
768         id = kmalloc(SECTOR_WORDS*4, GFP_ATOMIC);
769         if (!id)
770                 return 0;
771
772         taskfile_lib_get_identify(drive, (char *)&id);
773
774         ide_fix_driveid(id);
775         if (id) {
776                 drive->id->dma_ultra = id->dma_ultra;
777                 drive->id->dma_mword = id->dma_mword;
778                 drive->id->dma_1word = id->dma_1word;
779                 /* anything more ? */
780                 kfree(id);
781         }
782         return 1;
783 #else
784         /*
785          * Re-read drive->id for possible DMA mode
786          * change (copied from ide-probe.c)
787          */
788         unsigned long timeout, flags;
789
790         SELECT_MASK(drive, 1);
791         if (IDE_CONTROL_REG)
792                 hwif->OUTB(drive->ctl,IDE_CONTROL_REG);
793         msleep(50);
794         hwif->OUTB(WIN_IDENTIFY, IDE_COMMAND_REG);
795         timeout = jiffies + WAIT_WORSTCASE;
796         do {
797                 if (time_after(jiffies, timeout)) {
798                         SELECT_MASK(drive, 0);
799                         return 0;       /* drive timed-out */
800                 }
801                 msleep(50);     /* give drive a breather */
802         } while (hwif->INB(IDE_ALTSTATUS_REG) & BUSY_STAT);
803         msleep(50);     /* wait for IRQ and DRQ_STAT */
804         if (!OK_STAT(hwif->INB(IDE_STATUS_REG),DRQ_STAT,BAD_R_STAT)) {
805                 SELECT_MASK(drive, 0);
806                 printk("%s: CHECK for good STATUS\n", drive->name);
807                 return 0;
808         }
809         local_irq_save(flags);
810         SELECT_MASK(drive, 0);
811         id = kmalloc(SECTOR_WORDS*4, GFP_ATOMIC);
812         if (!id) {
813                 local_irq_restore(flags);
814                 return 0;
815         }
816         ata_input_data(drive, id, SECTOR_WORDS);
817         (void) hwif->INB(IDE_STATUS_REG);       /* clear drive IRQ */
818         local_irq_enable();
819         local_irq_restore(flags);
820         ide_fix_driveid(id);
821         if (id) {
822                 drive->id->dma_ultra = id->dma_ultra;
823                 drive->id->dma_mword = id->dma_mword;
824                 drive->id->dma_1word = id->dma_1word;
825                 /* anything more ? */
826                 kfree(id);
827         }
828
829         return 1;
830 #endif
831 }
832
833 /*
834  * Similar to ide_wait_stat(), except it never calls ide_error internally.
835  * This is a kludge to handle the new ide_config_drive_speed() function,
836  * and should not otherwise be used anywhere.  Eventually, the tuneproc's
837  * should be updated to return ide_startstop_t, in which case we can get
838  * rid of this abomination again.  :)   -ml
839  *
840  * It is gone..........
841  *
842  * const char *msg == consider adding for verbose errors.
843  *
844  * Beware. If we timed out from a series of CRC errors and the timer
845  * expiry caused a switch to PIO mode and we take an IRQ as the drive times
846  * out about the same moment we may be entering this function with a
847  * pending interrupt. 
848  */
849 int ide_config_drive_speed (ide_drive_t *drive, u8 speed)
850 {
851         ide_hwif_t *hwif        = HWIF(drive);
852         int     i, error        = 1;
853         u8 stat;
854
855 //      while (HWGROUP(drive)->busy)
856 //              msleep(50);
857
858 #ifdef CONFIG_BLK_DEV_IDEDMA
859         if (hwif->ide_dma_check)         /* check if host supports DMA */
860                 hwif->ide_dma_host_off(drive);
861 #endif
862
863         /*
864          * Don't use ide_wait_cmd here - it will
865          * attempt to set_geometry and recalibrate, We can't
866          * do that here as we may be in the IRQ handler already
867          *
868          * Select the drive, and issue the SETFEATURES command in
869          * polled mode.
870          */
871         disable_irq_nosync(hwif->irq);
872         
873         /*
874          *      We race against the running IRQ here if
875          *      this is called from non IRQ context. If we use
876          *      disable_irq() we hang on the error path. Instead we
877          *      must let the core code know the hwif is doing a polling
878          *      recovery.
879          */
880          
881         udelay(1);
882         SELECT_DRIVE(drive);
883         SELECT_MASK(drive, 0);
884         udelay(1);
885         if (IDE_CONTROL_REG)
886                 hwif->OUTB(drive->ctl | 2, IDE_CONTROL_REG);
887         hwif->OUTB(speed, IDE_NSECTOR_REG);
888         hwif->OUTB(SETFEATURES_XFER, IDE_FEATURE_REG);
889         hwif->OUTBSYNC(drive, WIN_SETFEATURES, IDE_COMMAND_REG);
890         /* The status bits are not valid for 400nS */
891         udelay(1);
892         
893         /* Drive status is now valid which means we can allow interrupts
894            to occur as they will see the drive as busy and will not
895            interfere erroneously. IRQ's for this drive will also be off
896            providing control and quirks allow for it */
897            
898         if ((IDE_CONTROL_REG) && drive->quirk_list == 2)
899                 hwif->OUTB(drive->ctl, IDE_CONTROL_REG);
900         udelay(1);
901         
902         /*
903          * Tell the interrupt layer that we are doing polled recovery.
904          * Eventually this should use the same mechanism do_reset does
905          * internally.
906          */
907          
908         hwif->polling = 1;
909         
910         /*
911          * Wait for drive to become non-BUSY
912          */
913         if ((stat = hwif->INB(IDE_STATUS_REG)) & BUSY_STAT) {
914                 unsigned long timeout;
915                 /* FIXME */
916 /*              spin_unlock_irq(&ide_lock); */
917                 timeout = jiffies + WAIT_CMD;
918                 while ((stat = hwif->INB(IDE_STATUS_REG)) & BUSY_STAT) {
919                         if (time_after(jiffies, timeout))
920                                 break;
921                 }
922 /*              spin_lock_irq(&ide_lock); */
923         }
924         
925         hwif->polling = 0;
926
927         /*
928          * Allow status to settle, then read it again.
929          * A few rare drives vastly violate the 400ns spec here,
930          * so we'll wait up to 10usec for a "good" status
931          * rather than expensively fail things immediately.
932          * This fix courtesy of Matthew Faupel & Niccolo Rigacci.
933          */
934         for (i = 0; i < 10; i++) {
935                 udelay(1);
936                 if (OK_STAT((stat = hwif->INB(IDE_STATUS_REG)), DRIVE_READY, BUSY_STAT|DRQ_STAT|ERR_STAT)) {
937                         error = 0;
938                         break;
939                 }
940         }
941
942         SELECT_MASK(drive, 0);
943
944         enable_irq(hwif->irq);
945
946         if (error) {
947                 (void) ide_dump_status(drive, "set_drive_speed_status", stat);
948                 return error;
949         }
950
951         drive->id->dma_ultra &= ~0xFF00;
952         drive->id->dma_mword &= ~0x0F00;
953         drive->id->dma_1word &= ~0x0F00;
954
955 #ifdef CONFIG_BLK_DEV_IDEDMA
956         if (speed >= XFER_SW_DMA_0)
957                 hwif->ide_dma_host_on(drive);
958         else if (hwif->ide_dma_check)   /* check if host supports DMA */
959                 hwif->ide_dma_off_quietly(drive);
960 #endif
961
962         switch(speed) {
963                 case XFER_UDMA_7:   drive->id->dma_ultra |= 0x8080; break;
964                 case XFER_UDMA_6:   drive->id->dma_ultra |= 0x4040; break;
965                 case XFER_UDMA_5:   drive->id->dma_ultra |= 0x2020; break;
966                 case XFER_UDMA_4:   drive->id->dma_ultra |= 0x1010; break;
967                 case XFER_UDMA_3:   drive->id->dma_ultra |= 0x0808; break;
968                 case XFER_UDMA_2:   drive->id->dma_ultra |= 0x0404; break;
969                 case XFER_UDMA_1:   drive->id->dma_ultra |= 0x0202; break;
970                 case XFER_UDMA_0:   drive->id->dma_ultra |= 0x0101; break;
971                 case XFER_MW_DMA_2: drive->id->dma_mword |= 0x0404; break;
972                 case XFER_MW_DMA_1: drive->id->dma_mword |= 0x0202; break;
973                 case XFER_MW_DMA_0: drive->id->dma_mword |= 0x0101; break;
974                 case XFER_SW_DMA_2: drive->id->dma_1word |= 0x0404; break;
975                 case XFER_SW_DMA_1: drive->id->dma_1word |= 0x0202; break;
976                 case XFER_SW_DMA_0: drive->id->dma_1word |= 0x0101; break;
977                 default: break;
978         }
979         if (!drive->init_speed)
980                 drive->init_speed = speed;
981         drive->current_speed = speed;
982         return error;
983 }
984
985 EXPORT_SYMBOL(ide_config_drive_speed);
986
987
988 /*
989  * This should get invoked any time we exit the driver to
990  * wait for an interrupt response from a drive.  handler() points
991  * at the appropriate code to handle the next interrupt, and a
992  * timer is started to prevent us from waiting forever in case
993  * something goes wrong (see the ide_timer_expiry() handler later on).
994  *
995  * See also ide_execute_command
996  */
997 static void __ide_set_handler (ide_drive_t *drive, ide_handler_t *handler,
998                       unsigned int timeout, ide_expiry_t *expiry)
999 {
1000         ide_hwgroup_t *hwgroup = HWGROUP(drive);
1001
1002         if (hwgroup->handler != NULL) {
1003                 printk(KERN_CRIT "%s: ide_set_handler: handler not null; "
1004                         "old=%p, new=%p\n",
1005                         drive->name, hwgroup->handler, handler);
1006         }
1007         hwgroup->handler        = handler;
1008         hwgroup->expiry         = expiry;
1009         hwgroup->timer.expires  = jiffies + timeout;
1010         add_timer(&hwgroup->timer);
1011 }
1012
1013 void ide_set_handler (ide_drive_t *drive, ide_handler_t *handler,
1014                       unsigned int timeout, ide_expiry_t *expiry)
1015 {
1016         unsigned long flags;
1017         spin_lock_irqsave(&ide_lock, flags);
1018         __ide_set_handler(drive, handler, timeout, expiry);
1019         spin_unlock_irqrestore(&ide_lock, flags);
1020 }
1021
1022 EXPORT_SYMBOL(ide_set_handler);
1023  
1024 /**
1025  *      ide_execute_command     -       execute an IDE command
1026  *      @drive: IDE drive to issue the command against
1027  *      @command: command byte to write
1028  *      @handler: handler for next phase
1029  *      @timeout: timeout for command
1030  *      @expiry:  handler to run on timeout
1031  *
1032  *      Helper function to issue an IDE command. This handles the
1033  *      atomicity requirements, command timing and ensures that the 
1034  *      handler and IRQ setup do not race. All IDE command kick off
1035  *      should go via this function or do equivalent locking.
1036  */
1037  
1038 void ide_execute_command(ide_drive_t *drive, task_ioreg_t cmd, ide_handler_t *handler, unsigned timeout, ide_expiry_t *expiry)
1039 {
1040         unsigned long flags;
1041         ide_hwgroup_t *hwgroup = HWGROUP(drive);
1042         ide_hwif_t *hwif = HWIF(drive);
1043         
1044         spin_lock_irqsave(&ide_lock, flags);
1045         
1046         if(hwgroup->handler)
1047                 BUG();
1048         hwgroup->handler        = handler;
1049         hwgroup->expiry         = expiry;
1050         hwgroup->timer.expires  = jiffies + timeout;
1051         add_timer(&hwgroup->timer);
1052         hwif->OUTBSYNC(drive, cmd, IDE_COMMAND_REG);
1053         /* Drive takes 400nS to respond, we must avoid the IRQ being
1054            serviced before that. 
1055            
1056            FIXME: we could skip this delay with care on non shared
1057            devices 
1058         */
1059         ndelay(400);
1060         spin_unlock_irqrestore(&ide_lock, flags);
1061 }
1062
1063 EXPORT_SYMBOL(ide_execute_command);
1064
1065
1066 /* needed below */
1067 static ide_startstop_t do_reset1 (ide_drive_t *, int);
1068
1069 /*
1070  * atapi_reset_pollfunc() gets invoked to poll the interface for completion every 50ms
1071  * during an atapi drive reset operation. If the drive has not yet responded,
1072  * and we have not yet hit our maximum waiting time, then the timer is restarted
1073  * for another 50ms.
1074  */
1075 static ide_startstop_t atapi_reset_pollfunc (ide_drive_t *drive)
1076 {
1077         ide_hwgroup_t *hwgroup  = HWGROUP(drive);
1078         ide_hwif_t *hwif        = HWIF(drive);
1079         u8 stat;
1080
1081         SELECT_DRIVE(drive);
1082         udelay (10);
1083
1084         if (OK_STAT(stat = hwif->INB(IDE_STATUS_REG), 0, BUSY_STAT)) {
1085                 printk("%s: ATAPI reset complete\n", drive->name);
1086         } else {
1087                 if (time_before(jiffies, hwgroup->poll_timeout)) {
1088                         if (HWGROUP(drive)->handler != NULL)
1089                                 BUG();
1090                         ide_set_handler(drive, &atapi_reset_pollfunc, HZ/20, NULL);
1091                         /* continue polling */
1092                         return ide_started;
1093                 }
1094                 /* end of polling */
1095                 hwgroup->poll_timeout = 0;
1096                 printk("%s: ATAPI reset timed-out, status=0x%02x\n",
1097                                 drive->name, stat);
1098                 /* do it the old fashioned way */
1099                 return do_reset1(drive, 1);
1100         }
1101         /* done polling */
1102         hwgroup->poll_timeout = 0;
1103         return ide_stopped;
1104 }
1105
1106 /*
1107  * reset_pollfunc() gets invoked to poll the interface for completion every 50ms
1108  * during an ide reset operation. If the drives have not yet responded,
1109  * and we have not yet hit our maximum waiting time, then the timer is restarted
1110  * for another 50ms.
1111  */
1112 static ide_startstop_t reset_pollfunc (ide_drive_t *drive)
1113 {
1114         ide_hwgroup_t *hwgroup  = HWGROUP(drive);
1115         ide_hwif_t *hwif        = HWIF(drive);
1116         u8 tmp;
1117
1118         if (hwif->reset_poll != NULL) {
1119                 if (hwif->reset_poll(drive)) {
1120                         printk(KERN_ERR "%s: host reset_poll failure for %s.\n",
1121                                 hwif->name, drive->name);
1122                         return ide_stopped;
1123                 }
1124         }
1125
1126         if (!OK_STAT(tmp = hwif->INB(IDE_STATUS_REG), 0, BUSY_STAT)) {
1127                 if (time_before(jiffies, hwgroup->poll_timeout)) {
1128                         if (HWGROUP(drive)->handler != NULL)
1129                                 BUG();
1130                         ide_set_handler(drive, &reset_pollfunc, HZ/20, NULL);
1131                         /* continue polling */
1132                         return ide_started;
1133                 }
1134                 printk("%s: reset timed-out, status=0x%02x\n", hwif->name, tmp);
1135                 drive->failures++;
1136         } else  {
1137                 printk("%s: reset: ", hwif->name);
1138                 if ((tmp = hwif->INB(IDE_ERROR_REG)) == 1) {
1139                         printk("success\n");
1140                         drive->failures = 0;
1141                 } else {
1142                         drive->failures++;
1143                         printk("master: ");
1144                         switch (tmp & 0x7f) {
1145                                 case 1: printk("passed");
1146                                         break;
1147                                 case 2: printk("formatter device error");
1148                                         break;
1149                                 case 3: printk("sector buffer error");
1150                                         break;
1151                                 case 4: printk("ECC circuitry error");
1152                                         break;
1153                                 case 5: printk("controlling MPU error");
1154                                         break;
1155                                 default:printk("error (0x%02x?)", tmp);
1156                         }
1157                         if (tmp & 0x80)
1158                                 printk("; slave: failed");
1159                         printk("\n");
1160                 }
1161         }
1162         hwgroup->poll_timeout = 0;      /* done polling */
1163         return ide_stopped;
1164 }
1165
1166 static void check_dma_crc(ide_drive_t *drive)
1167 {
1168 #ifdef CONFIG_BLK_DEV_IDEDMA
1169         if (drive->crc_count) {
1170                 (void) HWIF(drive)->ide_dma_off_quietly(drive);
1171                 ide_set_xfer_rate(drive, ide_auto_reduce_xfer(drive));
1172                 if (drive->current_speed >= XFER_SW_DMA_0)
1173                         (void) HWIF(drive)->ide_dma_on(drive);
1174         } else
1175                 (void)__ide_dma_off(drive);
1176 #endif
1177 }
1178
1179 void pre_reset (ide_drive_t *drive)
1180 {
1181         DRIVER(drive)->pre_reset(drive);
1182
1183         if (!drive->keep_settings) {
1184                 if (drive->using_dma) {
1185                         check_dma_crc(drive);
1186                 } else {
1187                         drive->unmask = 0;
1188                         drive->io_32bit = 0;
1189                 }
1190                 return;
1191         }
1192         if (drive->using_dma)
1193                 check_dma_crc(drive);
1194
1195         if (HWIF(drive)->pre_reset != NULL)
1196                 HWIF(drive)->pre_reset(drive);
1197
1198 }
1199
1200 /*
1201  * do_reset1() attempts to recover a confused drive by resetting it.
1202  * Unfortunately, resetting a disk drive actually resets all devices on
1203  * the same interface, so it can really be thought of as resetting the
1204  * interface rather than resetting the drive.
1205  *
1206  * ATAPI devices have their own reset mechanism which allows them to be
1207  * individually reset without clobbering other devices on the same interface.
1208  *
1209  * Unfortunately, the IDE interface does not generate an interrupt to let
1210  * us know when the reset operation has finished, so we must poll for this.
1211  * Equally poor, though, is the fact that this may a very long time to complete,
1212  * (up to 30 seconds worstcase).  So, instead of busy-waiting here for it,
1213  * we set a timer to poll at 50ms intervals.
1214  */
1215 static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
1216 {
1217         unsigned int unit;
1218         unsigned long flags;
1219         ide_hwif_t *hwif;
1220         ide_hwgroup_t *hwgroup;
1221         
1222         spin_lock_irqsave(&ide_lock, flags);
1223         hwif = HWIF(drive);
1224         hwgroup = HWGROUP(drive);
1225
1226         /* We must not reset with running handlers */
1227         if(hwgroup->handler != NULL)
1228                 BUG();
1229
1230         /* For an ATAPI device, first try an ATAPI SRST. */
1231         if (drive->media != ide_disk && !do_not_try_atapi) {
1232                 pre_reset(drive);
1233                 SELECT_DRIVE(drive);
1234                 udelay (20);
1235                 hwif->OUTBSYNC(drive, WIN_SRST, IDE_COMMAND_REG);
1236                 ndelay(400);
1237                 hwgroup->poll_timeout = jiffies + WAIT_WORSTCASE;
1238                 __ide_set_handler(drive, &atapi_reset_pollfunc, HZ/20, NULL);
1239                 spin_unlock_irqrestore(&ide_lock, flags);
1240                 return ide_started;
1241         }
1242
1243         /*
1244          * First, reset any device state data we were maintaining
1245          * for any of the drives on this interface.
1246          */
1247         for (unit = 0; unit < MAX_DRIVES; ++unit)
1248                 pre_reset(&hwif->drives[unit]);
1249
1250 #if OK_TO_RESET_CONTROLLER
1251         if (!IDE_CONTROL_REG) {
1252                 spin_unlock_irqrestore(&ide_lock, flags);
1253                 return ide_stopped;
1254         }
1255
1256         /*
1257          * Note that we also set nIEN while resetting the device,
1258          * to mask unwanted interrupts from the interface during the reset.
1259          * However, due to the design of PC hardware, this will cause an
1260          * immediate interrupt due to the edge transition it produces.
1261          * This single interrupt gives us a "fast poll" for drives that
1262          * recover from reset very quickly, saving us the first 50ms wait time.
1263          */
1264         /* set SRST and nIEN */
1265         hwif->OUTBSYNC(drive, drive->ctl|6,IDE_CONTROL_REG);
1266         /* more than enough time */
1267         udelay(10);
1268         if (drive->quirk_list == 2) {
1269                 /* clear SRST and nIEN */
1270                 hwif->OUTBSYNC(drive, drive->ctl, IDE_CONTROL_REG);
1271         } else {
1272                 /* clear SRST, leave nIEN */
1273                 hwif->OUTBSYNC(drive, drive->ctl|2, IDE_CONTROL_REG);
1274         }
1275         /* more than enough time */
1276         udelay(10);
1277         hwgroup->poll_timeout = jiffies + WAIT_WORSTCASE;
1278         __ide_set_handler(drive, &reset_pollfunc, HZ/20, NULL);
1279
1280         /*
1281          * Some weird controller like resetting themselves to a strange
1282          * state when the disks are reset this way. At least, the Winbond
1283          * 553 documentation says that
1284          */
1285         if (hwif->resetproc != NULL) {
1286                 hwif->resetproc(drive);
1287         }
1288         
1289 #endif  /* OK_TO_RESET_CONTROLLER */
1290
1291         spin_unlock_irqrestore(&ide_lock, flags);
1292         return ide_started;
1293 }
1294
1295 /*
1296  * ide_do_reset() is the entry point to the drive/interface reset code.
1297  */
1298
1299 ide_startstop_t ide_do_reset (ide_drive_t *drive)
1300 {
1301         return do_reset1(drive, 0);
1302 }
1303
1304 EXPORT_SYMBOL(ide_do_reset);
1305
1306 /*
1307  * ide_wait_not_busy() waits for the currently selected device on the hwif
1308  * to report a non-busy status, see comments in probe_hwif().
1309  */
1310 int ide_wait_not_busy(ide_hwif_t *hwif, unsigned long timeout)
1311 {
1312         u8 stat = 0;
1313
1314         while(timeout--) {
1315                 /*
1316                  * Turn this into a schedule() sleep once I'm sure
1317                  * about locking issues (2.5 work ?).
1318                  */
1319                 mdelay(1);
1320                 stat = hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]);
1321                 if ((stat & BUSY_STAT) == 0)
1322                         return 0;
1323                 /*
1324                  * Assume a value of 0xff means nothing is connected to
1325                  * the interface and it doesn't implement the pull-down
1326                  * resistor on D7.
1327                  */
1328                 if (stat == 0xff)
1329                         return -ENODEV;
1330         }
1331         return -EBUSY;
1332 }
1333
1334 EXPORT_SYMBOL_GPL(ide_wait_not_busy);
1335