4 * Basic PIO and command management functionality.
6 * This code was split off from ide.c. See ide.c for history and original
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2, or (at your option) any
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
19 * For the avoidance of doubt the "preferred form" of this code is one which
20 * is in an open non patent encumbered format. Where cryptographic key signing
21 * forms part of the process of creating an executable the information
22 * including keys needed to generate an equivalently functional executable
23 * are deemed to be part of the source code.
27 #include <linux/config.h>
28 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/string.h>
31 #include <linux/kernel.h>
32 #include <linux/timer.h>
34 #include <linux/interrupt.h>
35 #include <linux/major.h>
36 #include <linux/errno.h>
37 #include <linux/genhd.h>
38 #include <linux/blkpg.h>
39 #include <linux/slab.h>
40 #include <linux/init.h>
41 #include <linux/pci.h>
42 #include <linux/delay.h>
43 #include <linux/ide.h>
44 #include <linux/completion.h>
45 #include <linux/reboot.h>
46 #include <linux/cdrom.h>
47 #include <linux/seq_file.h>
48 #include <linux/device.h>
49 #include <linux/kmod.h>
51 #include <asm/byteorder.h>
53 #include <asm/uaccess.h>
55 #include <asm/bitops.h>
57 static void ide_fill_flush_cmd(ide_drive_t *drive, struct request *rq)
62 * reuse cdb space for ata command
64 memset(buf, 0, sizeof(rq->cmd));
66 rq->flags |= REQ_DRIVE_TASK | REQ_STARTED;
68 rq->buffer[0] = WIN_FLUSH_CACHE;
70 if (ide_id_has_flush_cache_ext(drive->id) &&
71 (drive->capacity64 >= (1UL << 28)))
72 rq->buffer[0] = WIN_FLUSH_CACHE_EXT;
76 * preempt pending requests, and store this cache flush for immediate
79 static struct request *ide_queue_flush_cmd(ide_drive_t *drive,
80 struct request *rq, int post)
82 struct request *flush_rq = &HWGROUP(drive)->wrq;
85 * write cache disabled, clear the barrier bit and treat it like
89 rq->flags |= REQ_BAR_PREFLUSH;
93 ide_init_drive_cmd(flush_rq);
94 ide_fill_flush_cmd(drive, flush_rq);
96 flush_rq->special = rq;
97 flush_rq->nr_sectors = rq->nr_sectors;
100 drive->doing_barrier = 1;
101 flush_rq->flags |= REQ_BAR_PREFLUSH;
102 blkdev_dequeue_request(rq);
104 flush_rq->flags |= REQ_BAR_POSTFLUSH;
106 __elv_add_request(drive->queue, flush_rq, ELEVATOR_INSERT_FRONT, 0);
107 HWGROUP(drive)->rq = NULL;
111 static int __ide_end_request(ide_drive_t *drive, struct request *rq,
112 int uptodate, int nr_sectors)
116 BUG_ON(!(rq->flags & REQ_STARTED));
119 * if failfast is set on a request, override number of sectors and
120 * complete the whole request right now
122 if (blk_noretry_request(rq) && end_io_error(uptodate))
123 nr_sectors = rq->hard_nr_sectors;
125 if (!blk_fs_request(rq) && end_io_error(uptodate) && !rq->errors)
129 * decide whether to reenable DMA -- 3 is a random magic for now,
130 * if we DMA timeout more than 3 times, just stay in PIO
132 if (drive->state == DMA_PIO_RETRY && drive->retry_pio <= 3) {
134 HWGROUP(drive)->hwif->ide_dma_on(drive);
137 if (!end_that_request_first(rq, uptodate, nr_sectors)) {
138 add_disk_randomness(rq->rq_disk);
140 if (blk_rq_tagged(rq))
141 blk_queue_end_tag(drive->queue, rq);
143 blkdev_dequeue_request(rq);
144 HWGROUP(drive)->rq = NULL;
145 end_that_request_last(rq);
152 * ide_end_request - complete an IDE I/O
153 * @drive: IDE device for the I/O
155 * @nr_sectors: number of sectors completed
157 * This is our end_request wrapper function. We complete the I/O
158 * update random number input and dequeue the request, which if
159 * it was tagged may be out of order.
162 int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors)
168 spin_lock_irqsave(&ide_lock, flags);
169 rq = HWGROUP(drive)->rq;
172 nr_sectors = rq->hard_cur_sectors;
174 if (!blk_barrier_rq(rq) || !drive->wcache)
175 ret = __ide_end_request(drive, rq, uptodate, nr_sectors);
177 struct request *flush_rq = &HWGROUP(drive)->wrq;
179 flush_rq->nr_sectors -= nr_sectors;
180 if (!flush_rq->nr_sectors) {
181 ide_queue_flush_cmd(drive, rq, 1);
186 spin_unlock_irqrestore(&ide_lock, flags);
189 EXPORT_SYMBOL(ide_end_request);
192 * ide_end_dequeued_request - complete an IDE I/O
193 * @drive: IDE device for the I/O
195 * @nr_sectors: number of sectors completed
197 * Complete an I/O that is no longer on the request queue. This
198 * typically occurs when we pull the request and issue a REQUEST_SENSE.
199 * We must still finish the old request but we must not tamper with the
200 * queue in the meantime.
202 * NOTE: This path does not handle barrier, but barrier is not supported
206 int ide_end_dequeued_request(ide_drive_t *drive, struct request *rq,
207 int uptodate, int nr_sectors)
212 spin_lock_irqsave(&ide_lock, flags);
214 BUG_ON(!(rq->flags & REQ_STARTED));
217 * if failfast is set on a request, override number of sectors and
218 * complete the whole request right now
220 if (blk_noretry_request(rq) && end_io_error(uptodate))
221 nr_sectors = rq->hard_nr_sectors;
223 if (!blk_fs_request(rq) && end_io_error(uptodate) && !rq->errors)
227 * decide whether to reenable DMA -- 3 is a random magic for now,
228 * if we DMA timeout more than 3 times, just stay in PIO
230 if (drive->state == DMA_PIO_RETRY && drive->retry_pio <= 3) {
232 HWGROUP(drive)->hwif->ide_dma_on(drive);
235 if (!end_that_request_first(rq, uptodate, nr_sectors)) {
236 add_disk_randomness(rq->rq_disk);
237 if (blk_rq_tagged(rq))
238 blk_queue_end_tag(drive->queue, rq);
239 end_that_request_last(rq);
242 spin_unlock_irqrestore(&ide_lock, flags);
245 EXPORT_SYMBOL_GPL(ide_end_dequeued_request);
248 * ide_complete_pm_request - end the current Power Management request
249 * @drive: target drive
252 * This function cleans up the current PM request and stops the queue
255 static void ide_complete_pm_request (ide_drive_t *drive, struct request *rq)
260 printk("%s: completing PM request, %s\n", drive->name,
261 blk_pm_suspend_request(rq) ? "suspend" : "resume");
263 spin_lock_irqsave(&ide_lock, flags);
264 if (blk_pm_suspend_request(rq)) {
265 blk_stop_queue(drive->queue);
268 blk_start_queue(drive->queue);
270 blkdev_dequeue_request(rq);
271 HWGROUP(drive)->rq = NULL;
272 end_that_request_last(rq);
273 spin_unlock_irqrestore(&ide_lock, flags);
277 * FIXME: probably move this somewhere else, name is bad too :)
279 u64 ide_get_error_location(ide_drive_t *drive, char *args)
290 if (ide_id_has_flush_cache_ext(drive->id)) {
291 low = (hcyl << 16) | (lcyl << 8) | sect;
292 HWIF(drive)->OUTB(drive->ctl|0x80, IDE_CONTROL_REG);
293 high = ide_read_24(drive);
295 u8 cur = HWIF(drive)->INB(IDE_SELECT_REG);
297 low = (hcyl << 16) | (lcyl << 8) | sect;
299 low = hcyl * drive->head * drive->sect;
300 low += lcyl * drive->sect;
305 sector = ((u64) high << 24) | low;
308 EXPORT_SYMBOL(ide_get_error_location);
310 static void ide_complete_barrier(ide_drive_t *drive, struct request *rq,
313 struct request *real_rq = rq->special;
314 int good_sectors, bad_sectors;
318 if (blk_barrier_postflush(rq)) {
320 * this completes the barrier write
322 __ide_end_request(drive, real_rq, 1, real_rq->hard_nr_sectors);
323 drive->doing_barrier = 0;
326 * just indicate that we did the pre flush
328 real_rq->flags |= REQ_BAR_PREFLUSH;
329 elv_requeue_request(drive->queue, real_rq);
332 * all is fine, return
338 * we need to end real_rq, but it's not on the queue currently.
339 * put it back on the queue, so we don't have to special case
340 * anything else for completing it
342 if (!blk_barrier_postflush(rq))
343 elv_requeue_request(drive->queue, real_rq);
346 * drive aborted flush command, assume FLUSH_CACHE_* doesn't
347 * work and disable barrier support
349 if (error & ABRT_ERR) {
350 printk(KERN_ERR "%s: barrier support doesn't work\n", drive->name);
351 __ide_end_request(drive, real_rq, -EOPNOTSUPP, real_rq->hard_nr_sectors);
352 blk_queue_ordered(drive->queue, 0);
353 blk_queue_issue_flush_fn(drive->queue, NULL);
356 * find out what part of the request failed
359 if (blk_barrier_postflush(rq)) {
360 sector = ide_get_error_location(drive, rq->buffer);
362 if ((sector >= real_rq->hard_sector) &&
363 (sector < real_rq->hard_sector + real_rq->hard_nr_sectors))
364 good_sectors = sector - real_rq->hard_sector;
366 sector = real_rq->hard_sector;
368 bad_sectors = real_rq->hard_nr_sectors - good_sectors;
370 __ide_end_request(drive, real_rq, 1, good_sectors);
372 __ide_end_request(drive, real_rq, 0, bad_sectors);
374 printk(KERN_ERR "%s: failed barrier write: "
375 "sector=%Lx(good=%d/bad=%d)\n",
376 drive->name, (unsigned long long)sector,
377 good_sectors, bad_sectors);
380 drive->doing_barrier = 0;
384 * ide_end_drive_cmd - end an explicit drive command
389 * Clean up after success/failure of an explicit drive command.
390 * These get thrown onto the queue so they are synchronized with
391 * real I/O operations on the drive.
393 * In LBA48 mode we have to read the register set twice to get
394 * all the extra information out.
397 void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err)
399 ide_hwif_t *hwif = HWIF(drive);
403 spin_lock_irqsave(&ide_lock, flags);
404 rq = HWGROUP(drive)->rq;
405 spin_unlock_irqrestore(&ide_lock, flags);
407 if (rq->flags & REQ_DRIVE_CMD) {
408 u8 *args = (u8 *) rq->buffer;
410 rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT);
415 args[2] = hwif->INB(IDE_NSECTOR_REG);
417 } else if (rq->flags & REQ_DRIVE_TASK) {
418 u8 *args = (u8 *) rq->buffer;
420 rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT);
425 args[2] = hwif->INB(IDE_NSECTOR_REG);
426 args[3] = hwif->INB(IDE_SECTOR_REG);
427 args[4] = hwif->INB(IDE_LCYL_REG);
428 args[5] = hwif->INB(IDE_HCYL_REG);
429 args[6] = hwif->INB(IDE_SELECT_REG);
431 } else if (rq->flags & REQ_DRIVE_TASKFILE) {
432 ide_task_t *args = (ide_task_t *) rq->special;
434 rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT);
437 if (args->tf_in_flags.b.data) {
438 u16 data = hwif->INW(IDE_DATA_REG);
439 args->tfRegister[IDE_DATA_OFFSET] = (data) & 0xFF;
440 args->hobRegister[IDE_DATA_OFFSET] = (data >> 8) & 0xFF;
442 args->tfRegister[IDE_ERROR_OFFSET] = err;
443 /* be sure we're looking at the low order bits */
444 hwif->OUTB(drive->ctl & ~0x80, IDE_CONTROL_REG);
445 args->tfRegister[IDE_NSECTOR_OFFSET] = hwif->INB(IDE_NSECTOR_REG);
446 args->tfRegister[IDE_SECTOR_OFFSET] = hwif->INB(IDE_SECTOR_REG);
447 args->tfRegister[IDE_LCYL_OFFSET] = hwif->INB(IDE_LCYL_REG);
448 args->tfRegister[IDE_HCYL_OFFSET] = hwif->INB(IDE_HCYL_REG);
449 args->tfRegister[IDE_SELECT_OFFSET] = hwif->INB(IDE_SELECT_REG);
450 args->tfRegister[IDE_STATUS_OFFSET] = stat;
452 if (drive->addressing == 1) {
453 hwif->OUTB(drive->ctl|0x80, IDE_CONTROL_REG);
454 args->hobRegister[IDE_FEATURE_OFFSET] = hwif->INB(IDE_FEATURE_REG);
455 args->hobRegister[IDE_NSECTOR_OFFSET] = hwif->INB(IDE_NSECTOR_REG);
456 args->hobRegister[IDE_SECTOR_OFFSET] = hwif->INB(IDE_SECTOR_REG);
457 args->hobRegister[IDE_LCYL_OFFSET] = hwif->INB(IDE_LCYL_REG);
458 args->hobRegister[IDE_HCYL_OFFSET] = hwif->INB(IDE_HCYL_REG);
461 } else if (blk_pm_request(rq)) {
463 printk("%s: complete_power_step(step: %d, stat: %x, err: %x)\n",
464 drive->name, rq->pm->pm_step, stat, err);
466 DRIVER(drive)->complete_power_step(drive, rq, stat, err);
467 if (rq->pm->pm_step == ide_pm_state_completed)
468 ide_complete_pm_request(drive, rq);
472 spin_lock_irqsave(&ide_lock, flags);
473 blkdev_dequeue_request(rq);
475 if (blk_barrier_preflush(rq) || blk_barrier_postflush(rq))
476 ide_complete_barrier(drive, rq, err);
478 HWGROUP(drive)->rq = NULL;
479 end_that_request_last(rq);
480 spin_unlock_irqrestore(&ide_lock, flags);
483 EXPORT_SYMBOL(ide_end_drive_cmd);
486 * try_to_flush_leftover_data - flush junk
487 * @drive: drive to flush
489 * try_to_flush_leftover_data() is invoked in response to a drive
490 * unexpectedly having its DRQ_STAT bit set. As an alternative to
491 * resetting the drive, this routine tries to clear the condition
492 * by read a sector's worth of data from the drive. Of course,
493 * this may not help if the drive is *waiting* for data from *us*.
495 void try_to_flush_leftover_data (ide_drive_t *drive)
497 int i = (drive->mult_count ? drive->mult_count : 1) * SECTOR_WORDS;
499 if (drive->media != ide_disk)
503 u32 wcount = (i > 16) ? 16 : i;
506 HWIF(drive)->ata_input_data(drive, buffer, wcount);
510 EXPORT_SYMBOL(try_to_flush_leftover_data);
513 * FIXME Add an ATAPI error
517 * ide_error - handle an error on the IDE
518 * @drive: drive the error occurred on
519 * @msg: message to report
522 * ide_error() takes action based on the error returned by the drive.
523 * For normal I/O that may well include retries. We deal with
524 * both new-style (taskfile) and old style command handling here.
525 * In the case of taskfile command handling there is work left to
529 ide_startstop_t ide_error (ide_drive_t *drive, const char *msg, u8 stat)
535 err = ide_dump_status(drive, msg, stat);
536 if (drive == NULL || (rq = HWGROUP(drive)->rq) == NULL)
540 /* retry only "normal" I/O: */
541 if (rq->flags & (REQ_DRIVE_CMD | REQ_DRIVE_TASK)) {
543 ide_end_drive_cmd(drive, stat, err);
546 if (rq->flags & REQ_DRIVE_TASKFILE) {
548 ide_end_drive_cmd(drive, stat, err);
552 if (stat & BUSY_STAT || ((stat & WRERR_STAT) && !drive->nowerr)) {
553 /* other bits are useless when BUSY */
554 rq->errors |= ERROR_RESET;
556 if (drive->media != ide_disk)
559 if (stat & ERR_STAT) {
560 /* err has different meaning on cdrom and tape */
561 if (err == ABRT_ERR) {
562 if (drive->select.b.lba &&
563 (hwif->INB(IDE_COMMAND_REG) == WIN_SPECIFY))
564 /* some newer drives don't
565 * support WIN_SPECIFY
568 } else if ((err & BAD_CRC) == BAD_CRC) {
570 /* UDMA crc error -- just retry the operation */
571 } else if (err & (BBD_ERR | ECC_ERR)) {
572 /* retries won't help these */
573 rq->errors = ERROR_MAX;
574 } else if (err & TRK0_ERR) {
575 /* help it find track zero */
576 rq->errors |= ERROR_RECAL;
580 if ((stat & DRQ_STAT) && rq_data_dir(rq) != WRITE)
581 try_to_flush_leftover_data(drive);
583 if (hwif->INB(IDE_STATUS_REG) & (BUSY_STAT|DRQ_STAT)) {
585 hwif->OUTB(WIN_IDLEIMMEDIATE,IDE_COMMAND_REG);
587 if (rq->errors >= ERROR_MAX) {
588 DRIVER(drive)->end_request(drive, 0, 0);
590 if ((rq->errors & ERROR_RESET) == ERROR_RESET) {
592 return ide_do_reset(drive);
594 if ((rq->errors & ERROR_RECAL) == ERROR_RECAL)
595 drive->special.b.recalibrate = 1;
601 EXPORT_SYMBOL(ide_error);
604 * ide_abort - abort pending IDE operatins
605 * @drive: drive the error occurred on
606 * @msg: message to report
608 * ide_abort kills and cleans up when we are about to do a
609 * host initiated reset on active commands. Longer term we
610 * want handlers to have sensible abort handling themselves
612 * This differs fundamentally from ide_error because in
613 * this case the command is doing just fine when we
617 ide_startstop_t ide_abort(ide_drive_t *drive, const char *msg)
622 if (drive == NULL || (rq = HWGROUP(drive)->rq) == NULL)
626 /* retry only "normal" I/O: */
627 if (rq->flags & (REQ_DRIVE_CMD | REQ_DRIVE_TASK)) {
629 ide_end_drive_cmd(drive, BUSY_STAT, 0);
632 if (rq->flags & REQ_DRIVE_TASKFILE) {
634 ide_end_drive_cmd(drive, BUSY_STAT, 0);
638 rq->errors |= ERROR_RESET;
639 DRIVER(drive)->end_request(drive, 0, 0);
643 EXPORT_SYMBOL(ide_abort);
646 * ide_cmd - issue a simple drive command
647 * @drive: drive the command is for
649 * @nsect: sector byte
650 * @handler: handler for the command completion
652 * Issue a simple drive command with interrupts.
653 * The drive must be selected beforehand.
656 void ide_cmd (ide_drive_t *drive, u8 cmd, u8 nsect, ide_handler_t *handler)
658 ide_hwif_t *hwif = HWIF(drive);
660 hwif->OUTB(drive->ctl,IDE_CONTROL_REG); /* clear nIEN */
661 SELECT_MASK(drive,0);
662 hwif->OUTB(nsect,IDE_NSECTOR_REG);
663 ide_execute_command(drive, cmd, handler, WAIT_CMD, NULL);
666 EXPORT_SYMBOL(ide_cmd);
669 * drive_cmd_intr - drive command completion interrupt
670 * @drive: drive the completion interrupt occurred on
672 * drive_cmd_intr() is invoked on completion of a special DRIVE_CMD.
673 * We do any necessary daya reading and then wait for the drive to
674 * go non busy. At that point we may read the error data and complete
678 ide_startstop_t drive_cmd_intr (ide_drive_t *drive)
680 struct request *rq = HWGROUP(drive)->rq;
681 ide_hwif_t *hwif = HWIF(drive);
682 u8 *args = (u8 *) rq->buffer;
683 u8 stat = hwif->INB(IDE_STATUS_REG);
687 if ((stat & DRQ_STAT) && args && args[3]) {
688 u8 io_32bit = drive->io_32bit;
690 hwif->ata_input_data(drive, &args[4], args[3] * SECTOR_WORDS);
691 drive->io_32bit = io_32bit;
692 while (((stat = hwif->INB(IDE_STATUS_REG)) & BUSY_STAT) && retries--)
696 if (!OK_STAT(stat, READY_STAT, BAD_STAT) && DRIVER(drive) != NULL)
697 return DRIVER(drive)->error(drive, "drive_cmd", stat);
698 /* calls ide_end_drive_cmd */
699 ide_end_drive_cmd(drive, stat, hwif->INB(IDE_ERROR_REG));
703 EXPORT_SYMBOL(drive_cmd_intr);
706 * do_special - issue some special commands
707 * @drive: drive the command is for
709 * do_special() is used to issue WIN_SPECIFY, WIN_RESTORE, and WIN_SETMULT
710 * commands to a drive. It used to do much more, but has been scaled
714 ide_startstop_t do_special (ide_drive_t *drive)
716 special_t *s = &drive->special;
719 printk("%s: do_special: 0x%02x\n", drive->name, s->all);
723 if (HWIF(drive)->tuneproc != NULL)
724 HWIF(drive)->tuneproc(drive, drive->tune_req);
728 return DRIVER(drive)->special(drive);
731 EXPORT_SYMBOL(do_special);
734 * execute_drive_command - issue special drive command
735 * @drive: the drive to issue th command on
736 * @rq: the request structure holding the command
738 * execute_drive_cmd() issues a special drive command, usually
739 * initiated by ioctl() from the external hdparm program. The
740 * command can be a drive command, drive task or taskfile
741 * operation. Weirdly you can call it with NULL to wait for
742 * all commands to finish. Don't do this as that is due to change
745 ide_startstop_t execute_drive_cmd (ide_drive_t *drive, struct request *rq)
747 ide_hwif_t *hwif = HWIF(drive);
748 if (rq->flags & REQ_DRIVE_TASKFILE) {
749 ide_task_t *args = rq->special;
754 hwif->data_phase = args->data_phase;
756 if (args->tf_out_flags.all != 0)
757 return flagged_taskfile(drive, args);
758 return do_rw_taskfile(drive, args);
759 } else if (rq->flags & REQ_DRIVE_TASK) {
760 u8 *args = rq->buffer;
766 printk("%s: DRIVE_TASK_CMD ", drive->name);
767 printk("cmd=0x%02x ", args[0]);
768 printk("fr=0x%02x ", args[1]);
769 printk("ns=0x%02x ", args[2]);
770 printk("sc=0x%02x ", args[3]);
771 printk("lcyl=0x%02x ", args[4]);
772 printk("hcyl=0x%02x ", args[5]);
773 printk("sel=0x%02x\n", args[6]);
775 hwif->OUTB(args[1], IDE_FEATURE_REG);
776 hwif->OUTB(args[3], IDE_SECTOR_REG);
777 hwif->OUTB(args[4], IDE_LCYL_REG);
778 hwif->OUTB(args[5], IDE_HCYL_REG);
779 sel = (args[6] & ~0x10);
780 if (drive->select.b.unit)
782 hwif->OUTB(sel, IDE_SELECT_REG);
783 ide_cmd(drive, args[0], args[2], &drive_cmd_intr);
785 } else if (rq->flags & REQ_DRIVE_CMD) {
786 u8 *args = rq->buffer;
791 printk("%s: DRIVE_CMD ", drive->name);
792 printk("cmd=0x%02x ", args[0]);
793 printk("sc=0x%02x ", args[1]);
794 printk("fr=0x%02x ", args[2]);
795 printk("xx=0x%02x\n", args[3]);
797 if (args[0] == WIN_SMART) {
798 hwif->OUTB(0x4f, IDE_LCYL_REG);
799 hwif->OUTB(0xc2, IDE_HCYL_REG);
800 hwif->OUTB(args[2],IDE_FEATURE_REG);
801 hwif->OUTB(args[1],IDE_SECTOR_REG);
802 ide_cmd(drive, args[0], args[3], &drive_cmd_intr);
805 hwif->OUTB(args[2],IDE_FEATURE_REG);
806 ide_cmd(drive, args[0], args[1], &drive_cmd_intr);
812 * NULL is actually a valid way of waiting for
813 * all current requests to be flushed from the queue.
816 printk("%s: DRIVE_CMD (null)\n", drive->name);
818 ide_end_drive_cmd(drive,
819 hwif->INB(IDE_STATUS_REG),
820 hwif->INB(IDE_ERROR_REG));
824 EXPORT_SYMBOL(execute_drive_cmd);
827 * start_request - start of I/O and command issuing for IDE
829 * start_request() initiates handling of a new I/O request. It
830 * accepts commands and I/O (read/write) requests. It also does
831 * the final remapping for weird stuff like EZDrive. Once
832 * device mapper can work sector level the EZDrive stuff can go away
834 * FIXME: this function needs a rename
837 ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
839 ide_startstop_t startstop;
842 BUG_ON(!(rq->flags & REQ_STARTED));
845 printk("%s: start_request: current=0x%08lx\n",
846 HWIF(drive)->name, (unsigned long) rq);
849 /* bail early if we've exceeded max_failures */
850 if (drive->max_failures && (drive->failures > drive->max_failures)) {
855 * bail early if we've sent a device to sleep, however how to wake
856 * this needs to be a masked flag. FIXME for proper operations.
858 if (drive->suspend_reset)
862 if (blk_fs_request(rq) &&
863 (drive->media == ide_disk || drive->media == ide_floppy)) {
864 block += drive->sect0;
866 /* Yecch - this will shift the entire interval,
867 possibly killing some innocent following sector */
868 if (block == 0 && drive->remap_0_to_1 == 1)
869 block = 1; /* redirect MBR access to EZ-Drive partn table */
871 if (blk_pm_suspend_request(rq) &&
872 rq->pm->pm_step == ide_pm_state_start_suspend)
873 /* Mark drive blocked when starting the suspend sequence. */
875 else if (blk_pm_resume_request(rq) &&
876 rq->pm->pm_step == ide_pm_state_start_resume) {
878 * The first thing we do on wakeup is to wait for BSY bit to
879 * go away (with a looong timeout) as a drive on this hwif may
880 * just be POSTing itself.
881 * We do that before even selecting as the "other" device on
882 * the bus may be broken enough to walk on our toes at this
887 printk("%s: Wakeup request inited, waiting for !BSY...\n", drive->name);
889 rc = ide_wait_not_busy(HWIF(drive), 35000);
891 printk(KERN_WARNING "%s: bus not ready on wakeup\n", drive->name);
893 HWIF(drive)->OUTB(8, HWIF(drive)->io_ports[IDE_CONTROL_OFFSET]);
894 rc = ide_wait_not_busy(HWIF(drive), 10000);
896 printk(KERN_WARNING "%s: drive not ready on wakeup\n", drive->name);
900 if (ide_wait_stat(&startstop, drive, drive->ready_stat, BUSY_STAT|DRQ_STAT, WAIT_READY)) {
901 printk(KERN_ERR "%s: drive not ready for command\n", drive->name);
904 if (!drive->special.all) {
905 if (rq->flags & (REQ_DRIVE_CMD | REQ_DRIVE_TASK))
906 return execute_drive_cmd(drive, rq);
907 else if (rq->flags & REQ_DRIVE_TASKFILE)
908 return execute_drive_cmd(drive, rq);
909 else if (blk_pm_request(rq)) {
911 printk("%s: start_power_step(step: %d)\n",
912 drive->name, rq->pm->pm_step);
914 startstop = DRIVER(drive)->start_power_step(drive, rq);
915 if (startstop == ide_stopped &&
916 rq->pm->pm_step == ide_pm_state_completed)
917 ide_complete_pm_request(drive, rq);
920 return (DRIVER(drive)->do_request(drive, rq, block));
922 return do_special(drive);
924 DRIVER(drive)->end_request(drive, 0, 0);
928 EXPORT_SYMBOL(start_request);
931 * ide_stall_queue - pause an IDE device
932 * @drive: drive to stall
933 * @timeout: time to stall for (jiffies)
935 * ide_stall_queue() can be used by a drive to give excess bandwidth back
936 * to the hwgroup by sleeping for timeout jiffies.
939 void ide_stall_queue (ide_drive_t *drive, unsigned long timeout)
941 if (timeout > WAIT_WORSTCASE)
942 timeout = WAIT_WORSTCASE;
943 drive->sleep = timeout + jiffies;
946 EXPORT_SYMBOL(ide_stall_queue);
948 #define WAKEUP(drive) ((drive)->service_start + 2 * (drive)->service_time)
951 * choose_drive - select a drive to service
952 * @hwgroup: hardware group to select on
954 * choose_drive() selects the next drive which will be serviced.
955 * This is necessary because the IDE layer can't issue commands
956 * to both drives on the same cable, unlike SCSI.
959 static inline ide_drive_t *choose_drive (ide_hwgroup_t *hwgroup)
961 ide_drive_t *drive, *best;
965 drive = hwgroup->drive;
968 * drive is doing pre-flush, ordered write, post-flush sequence. even
969 * though that is 3 requests, it must be seen as a single transaction.
970 * we must not preempt this drive until that is complete
972 if (drive->doing_barrier) {
974 * small race where queue could get replugged during
975 * the 3-request flush cycle, just yank the plug since
976 * we want it to finish asap
978 blk_remove_plug(drive->queue);
983 if ((!drive->sleep || time_after_eq(jiffies, drive->sleep))
984 && !elv_queue_empty(drive->queue)) {
986 || (drive->sleep && (!best->sleep || 0 < (signed long)(best->sleep - drive->sleep)))
987 || (!best->sleep && 0 < (signed long)(WAKEUP(best) - WAKEUP(drive))))
989 if (!blk_queue_plugged(drive->queue))
993 } while ((drive = drive->next) != hwgroup->drive);
994 if (best && best->nice1 && !best->sleep && best != hwgroup->drive && best->service_time > WAIT_MIN_SLEEP) {
995 long t = (signed long)(WAKEUP(best) - jiffies);
996 if (t >= WAIT_MIN_SLEEP) {
998 * We *may* have some time to spare, but first let's see if
999 * someone can potentially benefit from our nice mood today..
1004 /* FIXME: use time_before */
1005 && 0 < (signed long)(WAKEUP(drive) - (jiffies - best->service_time))
1006 && 0 < (signed long)((jiffies + t) - WAKEUP(drive)))
1008 ide_stall_queue(best, min_t(long, t, 10 * WAIT_MIN_SLEEP));
1011 } while ((drive = drive->next) != best);
1018 * Issue a new request to a drive from hwgroup
1019 * Caller must have already done spin_lock_irqsave(&ide_lock, ..);
1021 * A hwgroup is a serialized group of IDE interfaces. Usually there is
1022 * exactly one hwif (interface) per hwgroup, but buggy controllers (eg. CMD640)
1023 * may have both interfaces in a single hwgroup to "serialize" access.
1024 * Or possibly multiple ISA interfaces can share a common IRQ by being grouped
1025 * together into one hwgroup for serialized access.
1027 * Note also that several hwgroups can end up sharing a single IRQ,
1028 * possibly along with many other devices. This is especially common in
1029 * PCI-based systems with off-board IDE controller cards.
1031 * The IDE driver uses the single global ide_lock spinlock to protect
1032 * access to the request queues, and to protect the hwgroup->busy flag.
1034 * The first thread into the driver for a particular hwgroup sets the
1035 * hwgroup->busy flag to indicate that this hwgroup is now active,
1036 * and then initiates processing of the top request from the request queue.
1038 * Other threads attempting entry notice the busy setting, and will simply
1039 * queue their new requests and exit immediately. Note that hwgroup->busy
1040 * remains set even when the driver is merely awaiting the next interrupt.
1041 * Thus, the meaning is "this hwgroup is busy processing a request".
1043 * When processing of a request completes, the completing thread or IRQ-handler
1044 * will start the next request from the queue. If no more work remains,
1045 * the driver will clear the hwgroup->busy flag and exit.
1047 * The ide_lock (spinlock) is used to protect all access to the
1048 * hwgroup->busy flag, but is otherwise not needed for most processing in
1049 * the driver. This makes the driver much more friendlier to shared IRQs
1050 * than previous designs, while remaining 100% (?) SMP safe and capable.
1052 /* --BenH: made non-static as ide-pmac.c uses it to kick the hwgroup back
1053 * into life on wakeup from machine sleep.
1055 void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq)
1060 ide_startstop_t startstop;
1062 /* for atari only: POSSIBLY BROKEN HERE(?) */
1063 ide_get_lock(ide_intr, hwgroup);
1065 /* caller must own ide_lock */
1066 BUG_ON(!irqs_disabled());
1068 while (!hwgroup->busy) {
1070 drive = choose_drive(hwgroup);
1071 if (drive == NULL) {
1072 unsigned long sleep = 0;
1074 drive = hwgroup->drive;
1076 if (drive->sleep && (!sleep || 0 < (signed long)(sleep - drive->sleep)))
1077 sleep = drive->sleep;
1078 } while ((drive = drive->next) != hwgroup->drive);
1081 * Take a short snooze, and then wake up this hwgroup again.
1082 * This gives other hwgroups on the same a chance to
1083 * play fairly with us, just in case there are big differences
1084 * in relative throughputs.. don't want to hog the cpu too much.
1086 if (time_before(sleep, jiffies + WAIT_MIN_SLEEP))
1087 sleep = jiffies + WAIT_MIN_SLEEP;
1089 if (timer_pending(&hwgroup->timer))
1090 printk(KERN_CRIT "ide_set_handler: timer already active\n");
1092 /* so that ide_timer_expiry knows what to do */
1093 hwgroup->sleeping = 1;
1094 mod_timer(&hwgroup->timer, sleep);
1095 /* we purposely leave hwgroup->busy==1
1098 /* Ugly, but how can we sleep for the lock
1099 * otherwise? perhaps from tq_disk?
1102 /* for atari only */
1107 /* no more work for this hwgroup (for now) */
1111 if (hwgroup->hwif->sharing_irq &&
1112 hwif != hwgroup->hwif &&
1113 hwif->io_ports[IDE_CONTROL_OFFSET]) {
1114 /* set nIEN for previous hwif */
1115 SELECT_INTERRUPT(drive);
1117 hwgroup->hwif = hwif;
1118 hwgroup->drive = drive;
1120 drive->service_start = jiffies;
1122 if (blk_queue_plugged(drive->queue)) {
1123 printk(KERN_ERR "ide: huh? queue was plugged!\n");
1128 * we know that the queue isn't empty, but this can happen
1129 * if the q->prep_rq_fn() decides to kill a request
1131 rq = elv_next_request(drive->queue);
1138 * if rq is a barrier write, issue pre cache flush if not
1141 if (blk_barrier_rq(rq) && !blk_barrier_preflush(rq))
1142 rq = ide_queue_flush_cmd(drive, rq, 0);
1145 * Sanity: don't accept a request that isn't a PM request
1146 * if we are currently power managed. This is very important as
1147 * blk_stop_queue() doesn't prevent the elv_next_request()
1148 * above to return us whatever is in the queue. Since we call
1149 * ide_do_request() ourselves, we end up taking requests while
1150 * the queue is blocked...
1152 * We let requests forced at head of queue with ide-preempt
1153 * though. I hope that doesn't happen too much, hopefully not
1154 * unless the subdriver triggers such a thing in its own PM
1157 if (drive->blocked && !blk_pm_request(rq) && !(rq->flags & REQ_PREEMPT)) {
1158 /* We clear busy, there should be no pending ATA command at this point. */
1166 * Some systems have trouble with IDE IRQs arriving while
1167 * the driver is still setting things up. So, here we disable
1168 * the IRQ used by this interface while the request is being started.
1169 * This may look bad at first, but pretty much the same thing
1170 * happens anyway when any interrupt comes in, IDE or otherwise
1171 * -- the kernel masks the IRQ while it is being handled.
1173 if (hwif->irq != masked_irq)
1174 disable_irq_nosync(hwif->irq);
1175 spin_unlock(&ide_lock);
1177 /* allow other IRQs while we start this request */
1178 startstop = start_request(drive, rq);
1179 spin_lock_irq(&ide_lock);
1180 if (hwif->irq != masked_irq)
1181 enable_irq(hwif->irq);
1182 if (startstop == ide_stopped)
1187 EXPORT_SYMBOL(ide_do_request);
1190 * Passes the stuff to ide_do_request
1192 void do_ide_request(request_queue_t *q)
1194 ide_drive_t *drive = q->queuedata;
1196 ide_do_request(HWGROUP(drive), IDE_NO_IRQ);
1200 * un-busy the hwgroup etc, and clear any pending DMA status. we want to
1201 * retry the current request in pio mode instead of risking tossing it
1204 static ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error)
1206 ide_hwif_t *hwif = HWIF(drive);
1208 ide_startstop_t ret = ide_stopped;
1211 * end current dma transaction
1215 printk(KERN_WARNING "%s: DMA timeout error\n", drive->name);
1216 (void)HWIF(drive)->ide_dma_end(drive);
1217 ret = DRIVER(drive)->error(drive, "dma timeout error",
1218 hwif->INB(IDE_STATUS_REG));
1220 printk(KERN_WARNING "%s: DMA timeout retry\n", drive->name);
1221 (void) hwif->ide_dma_timeout(drive);
1225 * disable dma for now, but remember that we did so because of
1226 * a timeout -- we'll reenable after we finish this next request
1227 * (or rather the first chunk of it) in pio.
1230 drive->state = DMA_PIO_RETRY;
1231 (void) hwif->ide_dma_off_quietly(drive);
1234 * un-busy drive etc (hwgroup->busy is cleared on return) and
1235 * make sure request is sane
1237 rq = HWGROUP(drive)->rq;
1238 HWGROUP(drive)->rq = NULL;
1241 rq->sector = rq->bio->bi_sector;
1242 rq->current_nr_sectors = bio_iovec(rq->bio)->bv_len >> 9;
1243 rq->hard_cur_sectors = rq->current_nr_sectors;
1251 * ide_timer_expiry - handle lack of an IDE interrupt
1252 * @data: timer callback magic (hwgroup)
1254 * An IDE command has timed out before the expected drive return
1255 * occurred. At this point we attempt to clean up the current
1256 * mess. If the current handler includes an expiry handler then
1257 * we invoke the expiry handler, and providing it is happy the
1258 * work is done. If that fails we apply generic recovery rules
1259 * invoking the handler and checking the drive DMA status. We
1260 * have an excessively incestuous relationship with the DMA
1261 * logic that wants cleaning up.
1264 void ide_timer_expiry (unsigned long data)
1266 ide_hwgroup_t *hwgroup = (ide_hwgroup_t *) data;
1267 ide_handler_t *handler;
1268 ide_expiry_t *expiry;
1269 unsigned long flags;
1270 unsigned long wait = -1;
1272 spin_lock_irqsave(&ide_lock, flags);
1274 if ((handler = hwgroup->handler) == NULL) {
1276 * Either a marginal timeout occurred
1277 * (got the interrupt just as timer expired),
1278 * or we were "sleeping" to give other devices a chance.
1279 * Either way, we don't really want to complain about anything.
1281 if (hwgroup->sleeping) {
1282 hwgroup->sleeping = 0;
1286 ide_drive_t *drive = hwgroup->drive;
1288 printk(KERN_ERR "ide_timer_expiry: hwgroup->drive was NULL\n");
1289 hwgroup->handler = NULL;
1292 ide_startstop_t startstop = ide_stopped;
1293 if (!hwgroup->busy) {
1294 hwgroup->busy = 1; /* paranoia */
1295 printk(KERN_ERR "%s: ide_timer_expiry: hwgroup->busy was 0 ??\n", drive->name);
1297 if ((expiry = hwgroup->expiry) != NULL) {
1299 if ((wait = expiry(drive)) > 0) {
1301 hwgroup->timer.expires = jiffies + wait;
1302 add_timer(&hwgroup->timer);
1303 spin_unlock_irqrestore(&ide_lock, flags);
1307 hwgroup->handler = NULL;
1309 * We need to simulate a real interrupt when invoking
1310 * the handler() function, which means we need to
1311 * globally mask the specific IRQ:
1313 spin_unlock(&ide_lock);
1315 #if DISABLE_IRQ_NOSYNC
1316 disable_irq_nosync(hwif->irq);
1318 /* disable_irq_nosync ?? */
1319 disable_irq(hwif->irq);
1320 #endif /* DISABLE_IRQ_NOSYNC */
1322 * as if we were handling an interrupt */
1323 local_irq_disable();
1324 if (hwgroup->poll_timeout != 0) {
1325 startstop = handler(drive);
1326 } else if (drive_is_ready(drive)) {
1327 if (drive->waiting_for_dma)
1328 (void) hwgroup->hwif->ide_dma_lostirq(drive);
1329 (void)ide_ack_intr(hwif);
1330 printk(KERN_WARNING "%s: lost interrupt\n", drive->name);
1331 startstop = handler(drive);
1333 if (drive->waiting_for_dma) {
1334 startstop = ide_dma_timeout_retry(drive, wait);
1337 DRIVER(drive)->error(drive, "irq timeout", hwif->INB(IDE_STATUS_REG));
1339 drive->service_time = jiffies - drive->service_start;
1340 spin_lock_irq(&ide_lock);
1341 enable_irq(hwif->irq);
1342 if (startstop == ide_stopped)
1346 ide_do_request(hwgroup, IDE_NO_IRQ);
1347 spin_unlock_irqrestore(&ide_lock, flags);
1350 EXPORT_SYMBOL(ide_timer_expiry);
1353 * unexpected_intr - handle an unexpected IDE interrupt
1354 * @irq: interrupt line
1355 * @hwgroup: hwgroup being processed
1357 * There's nothing really useful we can do with an unexpected interrupt,
1358 * other than reading the status register (to clear it), and logging it.
1359 * There should be no way that an irq can happen before we're ready for it,
1360 * so we needn't worry much about losing an "important" interrupt here.
1362 * On laptops (and "green" PCs), an unexpected interrupt occurs whenever
1363 * the drive enters "idle", "standby", or "sleep" mode, so if the status
1364 * looks "good", we just ignore the interrupt completely.
1366 * This routine assumes __cli() is in effect when called.
1368 * If an unexpected interrupt happens on irq15 while we are handling irq14
1369 * and if the two interfaces are "serialized" (CMD640), then it looks like
1370 * we could screw up by interfering with a new request being set up for
1373 * In reality, this is a non-issue. The new command is not sent unless
1374 * the drive is ready to accept one, in which case we know the drive is
1375 * not trying to interrupt us. And ide_set_handler() is always invoked
1376 * before completing the issuance of any new drive command, so we will not
1377 * be accidentally invoked as a result of any valid command completion
1380 * Note that we must walk the entire hwgroup here. We know which hwif
1381 * is doing the current command, but we don't know which hwif burped
1385 static void unexpected_intr (int irq, ide_hwgroup_t *hwgroup)
1388 ide_hwif_t *hwif = hwgroup->hwif;
1391 * handle the unexpected interrupt
1394 if (hwif->irq == irq) {
1395 stat = hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]);
1396 if (!OK_STAT(stat, READY_STAT, BAD_STAT)) {
1397 /* Try to not flood the console with msgs */
1398 static unsigned long last_msgtime, count;
1400 if (time_after(jiffies, last_msgtime + HZ)) {
1401 last_msgtime = jiffies;
1402 printk(KERN_ERR "%s%s: unexpected interrupt, "
1403 "status=0x%02x, count=%ld\n",
1405 (hwif->next==hwgroup->hwif) ? "" : "(?)", stat, count);
1409 } while ((hwif = hwif->next) != hwgroup->hwif);
1413 * ide_intr - default IDE interrupt handler
1414 * @irq: interrupt number
1415 * @dev_id: hwif group
1416 * @regs: unused weirdness from the kernel irq layer
1418 * This is the default IRQ handler for the IDE layer. You should
1419 * not need to override it. If you do be aware it is subtle in
1422 * hwgroup->hwif is the interface in the group currently performing
1423 * a command. hwgroup->drive is the drive and hwgroup->handler is
1424 * the IRQ handler to call. As we issue a command the handlers
1425 * step through multiple states, reassigning the handler to the
1426 * next step in the process. Unlike a smart SCSI controller IDE
1427 * expects the main processor to sequence the various transfer
1428 * stages. We also manage a poll timer to catch up with most
1429 * timeout situations. There are still a few where the handlers
1430 * don't ever decide to give up.
1432 * The handler eventually returns ide_stopped to indicate the
1433 * request completed. At this point we issue the next request
1434 * on the hwgroup and the process begins again.
1437 irqreturn_t ide_intr (int irq, void *dev_id, struct pt_regs *regs)
1439 unsigned long flags;
1440 ide_hwgroup_t *hwgroup = (ide_hwgroup_t *)dev_id;
1443 ide_handler_t *handler;
1444 ide_startstop_t startstop;
1446 spin_lock_irqsave(&ide_lock, flags);
1447 hwif = hwgroup->hwif;
1449 if (!ide_ack_intr(hwif)) {
1450 spin_unlock_irqrestore(&ide_lock, flags);
1454 if ((handler = hwgroup->handler) == NULL ||
1455 hwgroup->poll_timeout != 0) {
1457 * Not expecting an interrupt from this drive.
1458 * That means this could be:
1459 * (1) an interrupt from another PCI device
1460 * sharing the same PCI INT# as us.
1461 * or (2) a drive just entered sleep or standby mode,
1462 * and is interrupting to let us know.
1463 * or (3) a spurious interrupt of unknown origin.
1465 * For PCI, we cannot tell the difference,
1466 * so in that case we just ignore it and hope it goes away.
1468 * FIXME: unexpected_intr should be hwif-> then we can
1469 * remove all the ifdef PCI crap
1471 #ifdef CONFIG_BLK_DEV_IDEPCI
1472 if (hwif->pci_dev && !hwif->pci_dev->vendor)
1473 #endif /* CONFIG_BLK_DEV_IDEPCI */
1476 * Probably not a shared PCI interrupt,
1477 * so we can safely try to do something about it:
1479 unexpected_intr(irq, hwgroup);
1480 #ifdef CONFIG_BLK_DEV_IDEPCI
1483 * Whack the status register, just in case
1484 * we have a leftover pending IRQ.
1486 (void) hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]);
1487 #endif /* CONFIG_BLK_DEV_IDEPCI */
1489 spin_unlock_irqrestore(&ide_lock, flags);
1492 drive = hwgroup->drive;
1495 * This should NEVER happen, and there isn't much
1496 * we could do about it here.
1498 * [Note - this can occur if the drive is hot unplugged]
1500 spin_unlock_irqrestore(&ide_lock, flags);
1503 if (!drive_is_ready(drive)) {
1505 * This happens regularly when we share a PCI IRQ with
1506 * another device. Unfortunately, it can also happen
1507 * with some buggy drives that trigger the IRQ before
1508 * their status register is up to date. Hopefully we have
1509 * enough advance overhead that the latter isn't a problem.
1511 spin_unlock_irqrestore(&ide_lock, flags);
1514 if (!hwgroup->busy) {
1515 hwgroup->busy = 1; /* paranoia */
1516 printk(KERN_ERR "%s: ide_intr: hwgroup->busy was 0 ??\n", drive->name);
1518 hwgroup->handler = NULL;
1519 del_timer(&hwgroup->timer);
1520 spin_unlock(&ide_lock);
1524 /* service this interrupt, may set handler for next interrupt */
1525 startstop = handler(drive);
1526 spin_lock_irq(&ide_lock);
1529 * Note that handler() may have set things up for another
1530 * interrupt to occur soon, but it cannot happen until
1531 * we exit from this routine, because it will be the
1532 * same irq as is currently being serviced here, and Linux
1533 * won't allow another of the same (on any CPU) until we return.
1535 drive->service_time = jiffies - drive->service_start;
1536 if (startstop == ide_stopped) {
1537 if (hwgroup->handler == NULL) { /* paranoia */
1539 ide_do_request(hwgroup, hwif->irq);
1541 printk(KERN_ERR "%s: ide_intr: huh? expected NULL handler "
1542 "on exit\n", drive->name);
1545 spin_unlock_irqrestore(&ide_lock, flags);
1549 EXPORT_SYMBOL(ide_intr);
1552 * ide_init_drive_cmd - initialize a drive command request
1553 * @rq: request object
1555 * Initialize a request before we fill it in and send it down to
1556 * ide_do_drive_cmd. Commands must be set up by this function. Right
1557 * now it doesn't do a lot, but if that changes abusers will have a
1561 void ide_init_drive_cmd (struct request *rq)
1563 memset(rq, 0, sizeof(*rq));
1564 rq->flags = REQ_DRIVE_CMD;
1568 EXPORT_SYMBOL(ide_init_drive_cmd);
1571 * ide_do_drive_cmd - issue IDE special command
1572 * @drive: device to issue command
1573 * @rq: request to issue
1574 * @action: action for processing
1576 * This function issues a special IDE device request
1577 * onto the request queue.
1579 * If action is ide_wait, then the rq is queued at the end of the
1580 * request queue, and the function sleeps until it has been processed.
1581 * This is for use when invoked from an ioctl handler.
1583 * If action is ide_preempt, then the rq is queued at the head of
1584 * the request queue, displacing the currently-being-processed
1585 * request and this function returns immediately without waiting
1586 * for the new rq to be completed. This is VERY DANGEROUS, and is
1587 * intended for careful use by the ATAPI tape/cdrom driver code.
1589 * If action is ide_next, then the rq is queued immediately after
1590 * the currently-being-processed-request (if any), and the function
1591 * returns without waiting for the new rq to be completed. As above,
1592 * This is VERY DANGEROUS, and is intended for careful use by the
1593 * ATAPI tape/cdrom driver code.
1595 * If action is ide_end, then the rq is queued at the end of the
1596 * request queue, and the function returns immediately without waiting
1597 * for the new rq to be completed. This is again intended for careful
1598 * use by the ATAPI tape/cdrom driver code.
1601 int ide_do_drive_cmd (ide_drive_t *drive, struct request *rq, ide_action_t action)
1603 unsigned long flags;
1604 ide_hwgroup_t *hwgroup = HWGROUP(drive);
1605 DECLARE_COMPLETION(wait);
1606 int where = ELEVATOR_INSERT_BACK, err;
1607 int must_wait = (action == ide_wait || action == ide_head_wait);
1609 #ifdef CONFIG_BLK_DEV_PDC4030
1611 * FIXME: there should be a drive or hwif->special
1612 * handler that points here by default, not hacks
1613 * in the ide-io.c code
1615 * FIXME2: That code breaks power management if used with
1616 * this chipset, that really doesn't belong here !
1618 if (HWIF(drive)->chipset == ide_pdc4030 && rq->buffer != NULL)
1619 return -ENOSYS; /* special drive cmds not supported */
1622 rq->rq_status = RQ_ACTIVE;
1624 rq->rq_disk = drive->disk;
1627 * we need to hold an extra reference to request for safe inspection
1632 rq->waiting = &wait;
1635 spin_lock_irqsave(&ide_lock, flags);
1636 if (action == ide_preempt)
1638 if (action == ide_preempt || action == ide_head_wait) {
1639 where = ELEVATOR_INSERT_FRONT;
1640 rq->flags |= REQ_PREEMPT;
1642 __elv_add_request(drive->queue, rq, where, 0);
1643 ide_do_request(hwgroup, IDE_NO_IRQ);
1644 spin_unlock_irqrestore(&ide_lock, flags);
1648 wait_for_completion(&wait);
1653 blk_put_request(rq);
1659 EXPORT_SYMBOL(ide_do_drive_cmd);