ftp://ftp.kernel.org/pub/linux/kernel/v2.6/linux-2.6.6.tar.bz2
[linux-2.6.git] / drivers / ide / ide-tcq.c
1 /*
2  * Copyright (C) 2001, 2002 Jens Axboe <axboe@suse.de>
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, write to the Free Software
15  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
16  */
17
18 /*
19  * Support for the DMA queued protocol, which enables ATA disk drives to
20  * use tagged command queueing.
21  */
22 #include <linux/config.h>
23 #include <linux/types.h>
24 #include <linux/kernel.h>
25 #include <linux/init.h>
26 #include <linux/ide.h>
27
28 #include <asm/io.h>
29 #include <asm/delay.h>
30
31 /*
32  * warning: it will be _very_ verbose if defined
33  */
34 #undef IDE_TCQ_DEBUG
35
36 #ifdef IDE_TCQ_DEBUG
37 #define TCQ_PRINTK printk
38 #else
39 #define TCQ_PRINTK(x...)
40 #endif
41
42 /*
43  * use nIEN or not
44  */
45 #undef IDE_TCQ_NIEN
46
47 /*
48  * we are leaving the SERVICE interrupt alone, IBM drives have it
49  * on per default and it can't be turned off. Doesn't matter, this
50  * is the sane config.
51  */
52 #undef IDE_TCQ_FIDDLE_SI
53
54 /*
55  * bad drive blacklist, for drives that raport tcq capability but don't
56  * work reliably with the default config. initially from freebsd table.
57  */
58 struct ide_tcq_blacklist {
59         char *model;
60         char works;
61         unsigned int max_sectors;
62 };
63
64 static struct ide_tcq_blacklist ide_tcq_blacklist[] = {
65         {
66                 .model =        "IBM-DTTA",
67                 .works =        1,
68                 .max_sectors =  128,
69         },
70         {
71                 .model =        "IBM-DJNA",
72                 .works =        0,
73         },
74         {
75                 .model =        "WDC AC",
76                 .works =        0,
77         },
78         {
79                 .model =        NULL,
80         },
81 };
82
83 ide_startstop_t ide_dmaq_intr(ide_drive_t *drive);
84 ide_startstop_t ide_service(ide_drive_t *drive);
85
86 static struct ide_tcq_blacklist *ide_find_drive_blacklist(ide_drive_t *drive)
87 {
88         struct ide_tcq_blacklist *itb;
89         int i = 0;
90
91         do {
92                 itb = &ide_tcq_blacklist[i];
93
94                 if (!itb->model)
95                         break;
96
97                 if (!strncmp(drive->id->model, itb->model, strlen(itb->model)))
98                         return itb;
99
100                 i++;
101         } while (1);
102
103         return NULL;
104 }
105
106 static inline void drive_ctl_nien(ide_drive_t *drive, int set)
107 {
108 #ifdef IDE_TCQ_NIEN
109         if (IDE_CONTROL_REG) {
110                 int mask = set ? 0x02 : 0x00;
111
112                 hwif->OUTB(drive->ctl | mask, IDE_CONTROL_REG);
113         }
114 #endif
115 }
116
117 static ide_startstop_t ide_tcq_nop_handler(ide_drive_t *drive)
118 {
119         ide_task_t *args = HWGROUP(drive)->rq->special;
120         ide_hwif_t *hwif = HWIF(drive);
121         int auto_poll_check = 0;
122         u8 stat, err;
123
124         if (args->tfRegister[IDE_FEATURE_OFFSET] & 0x01)
125                 auto_poll_check = 1;
126
127         local_irq_enable();
128
129         stat = hwif->INB(IDE_STATUS_REG);
130         err = hwif->INB(IDE_ERROR_REG);
131         ide_end_drive_cmd(drive, stat, err);
132
133         /*
134          * do taskfile and check ABRT bit -- intelligent adapters will not
135          * pass NOP with sub-code 0x01 to device, so the command will not
136          * fail there
137          */
138         if (auto_poll_check) {
139                 if (!(args->tfRegister[IDE_FEATURE_OFFSET] & ABRT_ERR)) {
140                         HWIF(drive)->auto_poll = 1;
141                         printk("%s: NOP Auto-poll enabled\n",HWIF(drive)->name);
142                 }
143         }
144
145         kfree(args);
146         return ide_stopped;
147 }
148
149 /*
150  * if we encounter _any_ error doing I/O to one of the tags, we must
151  * invalidate the pending queue. clear the software busy queue and requeue
152  * on the request queue for restart. issue a WIN_NOP to clear hardware queue
153  */
154 static void ide_tcq_invalidate_queue(ide_drive_t *drive)
155 {
156         ide_hwgroup_t *hwgroup = HWGROUP(drive);
157         request_queue_t *q = drive->queue;
158         struct request *rq;
159         unsigned long flags;
160
161         printk("%s: invalidating tag queue (%d commands)\n", drive->name, ata_pending_commands(drive));
162
163         /*
164          * first kill timer and block queue
165          */
166         spin_lock_irqsave(&ide_lock, flags);
167
168         del_timer(&hwgroup->timer);
169
170         if (HWIF(drive)->dma)
171                 HWIF(drive)->ide_dma_end(drive);
172
173         blk_queue_invalidate_tags(q);
174
175         drive->using_tcq = 0;
176         drive->queue_depth = 1;
177         hwgroup->busy = 0;
178         hwgroup->handler = NULL;
179
180         spin_unlock_irqrestore(&ide_lock, flags);
181
182         /*
183          * now kill hardware queue with a NOP
184          */
185         rq = &hwgroup->wrq;
186         ide_init_drive_cmd(rq);
187         rq->buffer = hwgroup->cmd_buf;
188         memset(rq->buffer, 0, sizeof(hwgroup->cmd_buf));
189         rq->buffer[0] = WIN_NOP;
190         ide_do_drive_cmd(drive, rq, ide_preempt);
191 }
192
193 void ide_tcq_intr_timeout(unsigned long data)
194 {
195         ide_drive_t *drive = (ide_drive_t *) data;
196         ide_hwgroup_t *hwgroup = HWGROUP(drive);
197         ide_hwif_t *hwif = HWIF(drive);
198         unsigned long flags;
199
200         printk(KERN_ERR "ide_tcq_intr_timeout: timeout waiting for %s interrupt\n", hwgroup->rq ? "completion" : "service");
201
202         spin_lock_irqsave(&ide_lock, flags);
203
204         if (!hwgroup->busy)
205                 printk(KERN_ERR "ide_tcq_intr_timeout: hwgroup not busy\n");
206         if (hwgroup->handler == NULL)
207                 printk(KERN_ERR "ide_tcq_intr_timeout: missing isr!\n");
208
209         hwgroup->busy = 1;
210         spin_unlock_irqrestore(&ide_lock, flags);
211
212         /*
213          * if pending commands, try service before giving up
214          */
215         if (ata_pending_commands(drive)) {
216                 u8 stat = hwif->INB(IDE_STATUS_REG);
217
218                 if ((stat & SRV_STAT) && (ide_service(drive) == ide_started))
219                         return;
220         }
221
222         if (drive)
223                 ide_tcq_invalidate_queue(drive);
224 }
225
226 void __ide_tcq_set_intr(ide_hwgroup_t *hwgroup, ide_handler_t *handler)
227 {
228         /*
229          * always just bump the timer for now, the timeout handling will
230          * have to be changed to be per-command
231          */
232         hwgroup->timer.function = ide_tcq_intr_timeout;
233         hwgroup->timer.data = (unsigned long) hwgroup->drive;
234         mod_timer(&hwgroup->timer, jiffies + 5 * HZ);
235
236         hwgroup->handler = handler;
237 }
238
239 void ide_tcq_set_intr(ide_hwgroup_t *hwgroup, ide_handler_t *handler)
240 {
241         unsigned long flags;
242
243         spin_lock_irqsave(&ide_lock, flags);
244         __ide_tcq_set_intr(hwgroup, handler);
245         spin_unlock_irqrestore(&ide_lock, flags);
246 }
247
248 /*
249  * wait 400ns, then poll for busy_mask to clear from alt status
250  */
251 #define IDE_TCQ_WAIT    (10000)
252 int ide_tcq_wait_altstat(ide_drive_t *drive, byte *stat, byte busy_mask)
253 {
254         ide_hwif_t *hwif = HWIF(drive);
255         int i = 0;
256
257         udelay(1);
258
259         do {
260                 *stat = hwif->INB(IDE_ALTSTATUS_REG);
261
262                 if (!(*stat & busy_mask))
263                         break;
264
265                 if (unlikely(i++ > IDE_TCQ_WAIT))
266                         return 1;
267
268                 udelay(10);
269         } while (1);
270
271         return 0;
272 }
273
274 /*
275  * issue SERVICE command to drive -- drive must have been selected first,
276  * and it must have reported a need for service (status has SRV_STAT set)
277  *
278  * Also, nIEN must be set as not to need protection against ide_dmaq_intr
279  */
280 ide_startstop_t ide_service(ide_drive_t *drive)
281 {
282         ide_hwif_t *hwif = HWIF(drive);
283         unsigned long flags;
284         struct request *rq;
285         byte feat, stat;
286         int tag;
287
288         TCQ_PRINTK("%s: started service\n", drive->name);
289
290         /*
291          * could be called with IDE_DMA in-progress from invalidate
292          * handler, refuse to do anything
293          */
294         if (hwif->dma)
295                 return ide_stopped;
296
297         /*
298          * need to select the right drive first...
299          */
300         if (drive != HWGROUP(drive)->drive) {
301                 SELECT_DRIVE(drive);
302                 udelay(10);
303         }
304
305         drive_ctl_nien(drive, 1);
306
307         /*
308          * send SERVICE, wait 400ns, wait for BUSY_STAT to clear
309          */
310         hwif->OUTB(WIN_QUEUED_SERVICE, IDE_COMMAND_REG);
311
312         if (ide_tcq_wait_altstat(drive, &stat, BUSY_STAT)) {
313                 printk(KERN_ERR "ide_service: BUSY clear took too long\n");
314                 ide_dump_status(drive, "ide_service", stat);
315                 ide_tcq_invalidate_queue(drive);
316                 return ide_stopped;
317         }
318
319         drive_ctl_nien(drive, 0);
320
321         /*
322          * FIXME, invalidate queue
323          */
324         if (stat & ERR_STAT) {
325                 ide_dump_status(drive, "ide_service", stat);
326                 ide_tcq_invalidate_queue(drive);
327                 return ide_stopped;
328         }
329
330         /*
331          * should not happen, a buggy device could introduce loop
332          */
333         feat = hwif->INB(IDE_NSECTOR_REG);
334         if (feat & REL) {
335                 HWGROUP(drive)->rq = NULL;
336                 printk(KERN_ERR "%s: release in service\n", drive->name);
337                 return ide_stopped;
338         }
339
340         tag = feat >> 3;
341
342         TCQ_PRINTK("ide_service: stat %x, feat %x\n", stat, feat);
343
344         spin_lock_irqsave(&ide_lock, flags);
345
346         if ((rq = blk_queue_find_tag(drive->queue, tag))) {
347                 HWGROUP(drive)->rq = rq;
348
349                 /*
350                  * we'll start a dma read or write, device will trigger
351                  * interrupt to indicate end of transfer, release is not
352                  * allowed
353                  */
354                 TCQ_PRINTK("ide_service: starting command, stat=%x\n", stat);
355                 spin_unlock_irqrestore(&ide_lock, flags);
356                 return __ide_dma_queued_start(drive);
357         }
358
359         printk(KERN_ERR "ide_service: missing request for tag %d\n", tag);
360         spin_unlock_irqrestore(&ide_lock, flags);
361         return ide_stopped;
362 }
363
364 ide_startstop_t ide_check_service(ide_drive_t *drive)
365 {
366         ide_hwif_t *hwif = HWIF(drive);
367         byte stat;
368
369         TCQ_PRINTK("%s: ide_check_service\n", drive->name);
370
371         if (!ata_pending_commands(drive))
372                 return ide_stopped;
373
374         stat = hwif->INB(IDE_STATUS_REG);
375         if (stat & SRV_STAT)
376                 return ide_service(drive);
377
378         /*
379          * we have pending commands, wait for interrupt
380          */
381         TCQ_PRINTK("%s: wait for service interrupt\n", drive->name);
382         ide_tcq_set_intr(HWGROUP(drive), ide_dmaq_intr);
383         return ide_started;
384 }
385
386 ide_startstop_t ide_dmaq_complete(ide_drive_t *drive, struct request *rq, byte stat)
387 {
388         byte dma_stat;
389
390         /*
391          * transfer was in progress, stop DMA engine
392          */
393         dma_stat = HWIF(drive)->ide_dma_end(drive);
394
395         /*
396          * must be end of I/O, check status and complete as necessary
397          */
398         if (unlikely(!OK_STAT(stat, READY_STAT, drive->bad_wstat | DRQ_STAT))) {
399                 printk(KERN_ERR "ide_dmaq_intr: %s: error status %x\n",drive->name,stat);
400                 ide_dump_status(drive, "ide_dmaq_complete", stat);
401                 ide_tcq_invalidate_queue(drive);
402                 return ide_stopped;
403         }
404
405         if (dma_stat)
406                 printk(KERN_WARNING "%s: bad DMA status (dma_stat=%x)\n", drive->name, dma_stat);
407
408         TCQ_PRINTK("ide_dmaq_complete: ending %p, tag %d\n", rq, rq->tag);
409         ide_end_request(drive, 1, rq->nr_sectors);
410
411         /*
412          * we completed this command, check if we can service a new command
413          */
414         return ide_check_service(drive);
415 }
416
417 /*
418  * intr handler for queued dma operations. this can be entered for two
419  * reasons:
420  *
421  * 1) device has completed dma transfer
422  * 2) service request to start a command
423  *
424  * if the drive has an active tag, we first complete that request before
425  * processing any pending SERVICE.
426  */
427 ide_startstop_t ide_dmaq_intr(ide_drive_t *drive)
428 {
429         struct request *rq = HWGROUP(drive)->rq;
430         ide_hwif_t *hwif = HWIF(drive);
431         byte stat = hwif->INB(IDE_STATUS_REG);
432
433         TCQ_PRINTK("ide_dmaq_intr: stat=%x\n", stat);
434
435         /*
436          * if a command completion interrupt is pending, do that first and
437          * check service afterwards
438          */
439         if (rq) {
440                 TCQ_PRINTK("ide_dmaq_intr: completion\n");
441                 return ide_dmaq_complete(drive, rq, stat);
442         }
443
444         /*
445          * service interrupt
446          */
447         if (stat & SRV_STAT) {
448                 TCQ_PRINTK("ide_dmaq_intr: SERV (stat=%x)\n", stat);
449                 return ide_service(drive);
450         }
451
452         printk("ide_dmaq_intr: stat=%x, not expected\n", stat);
453         return ide_check_service(drive);
454 }
455
456 /*
457  * check if the ata adapter this drive is attached to supports the
458  * NOP auto-poll for multiple tcq enabled drives on one channel
459  */
460 static int ide_tcq_check_autopoll(ide_drive_t *drive)
461 {
462         ide_task_t *args;
463         int i, drives;
464
465         /*
466          * only need to probe if both drives on a channel support tcq
467          */
468         for (i = 0, drives = 0; i < MAX_DRIVES; i++)
469                 if (HWIF(drive)->drives[i].present && drive->media == ide_disk)
470                         drives++;
471
472         if (drives <= 1)
473                 return 0;
474
475         /*
476          * what a mess...
477          */
478         args = kmalloc(sizeof(*args), GFP_ATOMIC);
479         if (!args)
480                 return 1;
481
482         memset(args, 0, sizeof(*args));
483
484         args->tfRegister[IDE_FEATURE_OFFSET] = 0x01;
485         args->tfRegister[IDE_COMMAND_OFFSET] = WIN_NOP;
486         args->command_type = IDE_DRIVE_TASK_NO_DATA;
487         args->handler = ide_tcq_nop_handler;
488         return ide_raw_taskfile(drive, args, NULL);
489 }
490
491 /*
492  * configure the drive for tcq
493  */
494 static int ide_tcq_configure(ide_drive_t *drive)
495 {
496         int tcq_mask = 1 << 1 | 1 << 14;
497         int tcq_bits = tcq_mask | 1 << 15;
498         ide_task_t *args;
499
500         /*
501          * bit 14 and 1 must be set in word 83 of the device id to indicate
502          * support for dma queued protocol, and bit 15 must be cleared
503          */
504         if ((drive->id->command_set_2 & tcq_bits) ^ tcq_mask) {
505                 printk(KERN_INFO "%s: TCQ not supported\n", drive->name);
506                 return -EIO;
507         }
508
509         args = kmalloc(sizeof(*args), GFP_ATOMIC);
510         if (!args)
511                 return -ENOMEM;
512
513         memset(args, 0, sizeof(ide_task_t));
514         args->tfRegister[IDE_COMMAND_OFFSET] = WIN_SETFEATURES;
515         args->tfRegister[IDE_FEATURE_OFFSET] = SETFEATURES_EN_WCACHE;
516         args->command_type = IDE_DRIVE_TASK_NO_DATA;
517         args->handler      = &task_no_data_intr;
518
519         if (ide_raw_taskfile(drive, args, NULL)) {
520                 printk(KERN_WARNING "%s: failed to enable write cache\n", drive->name);
521                 goto err;
522         }
523
524         /*
525          * disable RELease interrupt, it's quicker to poll this after
526          * having sent the command opcode
527          */
528         memset(args, 0, sizeof(ide_task_t));
529         args->tfRegister[IDE_COMMAND_OFFSET] = WIN_SETFEATURES;
530         args->tfRegister[IDE_FEATURE_OFFSET] = SETFEATURES_DIS_RI;
531         args->command_type = IDE_DRIVE_TASK_NO_DATA;
532         args->handler      = &task_no_data_intr;
533
534         if (ide_raw_taskfile(drive, args, NULL)) {
535                 printk(KERN_ERR "%s: disabling release interrupt fail\n", drive->name);
536                 goto err;
537         }
538
539 #ifdef IDE_TCQ_FIDDLE_SI
540         /*
541          * enable SERVICE interrupt
542          */
543         memset(args, 0, sizeof(ide_task_t));
544         args->tfRegister[IDE_COMMAND_OFFSET] = WIN_SETFEATURES;
545         args->tfRegister[IDE_FEATURE_OFFSET] = SETFEATURES_EN_SI;
546         args->command_type = IDE_DRIVE_TASK_NO_DATA;
547         args->handler      = &task_no_data_intr;
548
549         if (ide_raw_taskfile(drive, args, NULL)) {
550                 printk(KERN_ERR "%s: enabling service interrupt fail\n", drive->name);
551                 goto err;
552         }
553 #endif
554
555         kfree(args);
556         return 0;
557 err:
558         kfree(args);
559         return -EIO;
560 }
561
562 /*
563  * for now assume that command list is always as big as we need and don't
564  * attempt to shrink it on tcq disable
565  */
566 static int ide_enable_queued(ide_drive_t *drive, int on)
567 {
568         struct ide_tcq_blacklist *itb;
569         int depth = drive->using_tcq ? drive->queue_depth : 0;
570
571         /*
572          * disable or adjust queue depth
573          */
574         if (!on) {
575                 if (drive->using_tcq)
576                         printk(KERN_INFO "%s: TCQ disabled\n", drive->name);
577
578                 drive->using_tcq = 0;
579                 return 0;
580         }
581
582         if (ide_tcq_configure(drive)) {
583                 drive->using_tcq = 0;
584                 return 1;
585         }
586
587         /*
588          * some drives need limited transfer size in tcq
589          */
590         itb = ide_find_drive_blacklist(drive);
591         if (itb && itb->max_sectors) {
592                 if (itb->max_sectors > HWIF(drive)->rqsize)
593                         itb->max_sectors = HWIF(drive)->rqsize;
594
595                 blk_queue_max_sectors(drive->queue, itb->max_sectors);
596         }
597
598         /*
599          * enable block tagging
600          */
601         if (!blk_queue_tagged(drive->queue))
602                 blk_queue_init_tags(drive->queue, IDE_MAX_TAG, NULL);
603
604         /*
605          * check auto-poll support
606          */
607         ide_tcq_check_autopoll(drive);
608
609         if (depth != drive->queue_depth)
610                 printk(KERN_INFO "%s: tagged command queueing enabled, command queue depth %d\n", drive->name, drive->queue_depth);
611
612         drive->using_tcq = 1;
613         return 0;
614 }
615
616 int ide_tcq_wait_dataphase(ide_drive_t *drive)
617 {
618         ide_hwif_t *hwif = HWIF(drive);
619         byte stat;
620         int i;
621
622         do {
623                 stat = hwif->INB(IDE_STATUS_REG);
624                 if (!(stat & BUSY_STAT))
625                         break;
626
627                 udelay(10);
628         } while (1);
629
630         if (OK_STAT(stat, READY_STAT | DRQ_STAT, drive->bad_wstat))
631                 return 0;
632
633         i = 0;
634         udelay(1);
635         do {
636                 stat = hwif->INB(IDE_STATUS_REG);
637
638                 if (OK_STAT(stat, READY_STAT | DRQ_STAT, drive->bad_wstat))
639                         break;
640
641                 ++i;
642                 if (unlikely(i >= IDE_TCQ_WAIT))
643                         return 1;
644
645                 udelay(10);
646         } while (1);
647
648         return 0;
649 }
650
651 static int ide_tcq_check_blacklist(ide_drive_t *drive)
652 {
653         struct ide_tcq_blacklist *itb = ide_find_drive_blacklist(drive);
654
655         if (!itb)
656                 return 0;
657
658         return !itb->works;
659 }
660
661 int __ide_dma_queued_on(ide_drive_t *drive)
662 {
663         ide_hwif_t *hwif = HWIF(drive);
664
665         if (drive->media != ide_disk)
666                 return 1;
667         if (!drive->using_dma)
668                 return 1;
669         if (hwif->chipset == ide_pdc4030)
670                 return 1;
671         if (ide_tcq_check_blacklist(drive)) {
672                 printk(KERN_WARNING "%s: tcq forbidden by blacklist\n",
673                                         drive->name);
674                 return 1;
675         }
676         if (hwif->drives[0].present && hwif->drives[1].present) {
677                 printk(KERN_WARNING "%s: only one drive on a channel supported"
678                                         " for tcq\n", drive->name);
679                 return 1;
680         }
681         if (ata_pending_commands(drive)) {
682                 printk(KERN_WARNING "ide-tcq; can't toggle tcq feature on "
683                                         "busy drive\n");
684                 return 1;
685         }
686
687         return ide_enable_queued(drive, 1);
688 }
689
690 int __ide_dma_queued_off(ide_drive_t *drive)
691 {
692         if (drive->media != ide_disk)
693                 return 1;
694         if (ata_pending_commands(drive)) {
695                 printk("ide-tcq; can't toggle tcq feature on busy drive\n");
696                 return 1;
697         }
698
699         return ide_enable_queued(drive, 0);
700 }
701
702 static ide_startstop_t ide_dma_queued_rw(ide_drive_t *drive, u8 command)
703 {
704         ide_hwif_t *hwif = HWIF(drive);
705         unsigned long flags;
706         byte stat, feat;
707
708         TCQ_PRINTK("%s: starting tag\n", drive->name);
709
710         /*
711          * set nIEN, tag start operation will enable again when
712          * it is safe
713          */
714         drive_ctl_nien(drive, 1);
715
716         TCQ_PRINTK("%s: sending cmd=%x\n", drive->name, command);
717         hwif->OUTB(command, IDE_COMMAND_REG);
718
719         if (ide_tcq_wait_altstat(drive, &stat, BUSY_STAT)) {
720                 printk("%s: alt stat timeout\n", drive->name);
721                 goto err;
722         }
723
724         drive_ctl_nien(drive, 0);
725
726         if (stat & ERR_STAT)
727                 goto err;
728
729         /*
730          * bus not released, start dma
731          */
732         feat = hwif->INB(IDE_NSECTOR_REG);
733         if (!(feat & REL)) {
734                 TCQ_PRINTK("IMMED in queued_start, feat=%x\n", feat);
735                 return __ide_dma_queued_start(drive);
736         }
737
738         /*
739          * drive released the bus, clear active request and check for service
740          */
741         spin_lock_irqsave(&ide_lock, flags);
742         HWGROUP(drive)->rq = NULL;
743         __ide_tcq_set_intr(HWGROUP(drive), ide_dmaq_intr);
744         spin_unlock_irqrestore(&ide_lock, flags);
745
746         TCQ_PRINTK("REL in queued_start\n");
747
748         stat = hwif->INB(IDE_STATUS_REG);
749         if (stat & SRV_STAT)
750                 return ide_service(drive);
751
752         return ide_released;
753 err:
754         ide_dump_status(drive, "rw_queued", stat);
755         ide_tcq_invalidate_queue(drive);
756         return ide_stopped;
757 }
758
759 ide_startstop_t __ide_dma_queued_read(ide_drive_t *drive)
760 {
761         u8 command = WIN_READDMA_QUEUED;
762
763         if (drive->addressing == 1)
764                  command = WIN_READDMA_QUEUED_EXT;
765
766         return ide_dma_queued_rw(drive, command);
767 }
768
769 ide_startstop_t __ide_dma_queued_write(ide_drive_t *drive)
770 {
771         u8 command = WIN_WRITEDMA_QUEUED;
772
773         if (drive->addressing == 1)
774                  command = WIN_WRITEDMA_QUEUED_EXT;
775
776         return ide_dma_queued_rw(drive, command);
777 }
778
779 ide_startstop_t __ide_dma_queued_start(ide_drive_t *drive)
780 {
781         ide_hwgroup_t *hwgroup = HWGROUP(drive);
782         struct request *rq = hwgroup->rq;
783         ide_hwif_t *hwif = HWIF(drive);
784         unsigned int reading = 0;
785
786         TCQ_PRINTK("ide_dma: setting up queued tag=%d\n", rq->tag);
787
788         if (!hwgroup->busy)
789                 printk(KERN_ERR "queued_rw: hwgroup not busy\n");
790
791         if (ide_tcq_wait_dataphase(drive)) {
792                 printk(KERN_WARNING "timeout waiting for data phase\n");
793                 return ide_stopped;
794         }
795
796         if (rq_data_dir(rq) == READ)
797                 reading = 1 << 3;
798
799         if (ide_start_dma(hwif, drive, reading))
800                 return ide_stopped;
801
802         ide_tcq_set_intr(hwgroup, ide_dmaq_intr);
803
804         if (!hwif->ide_dma_begin(drive))
805                 return ide_started;
806
807         return ide_stopped;
808 }