2 * linux/drivers/ide/ide-dma.c Version 4.10 June 9, 2000
4 * Copyright (c) 1999-2000 Andre Hedrick <andre@linux-ide.org>
5 * May be copied or modified under the terms of the GNU General Public License
9 * Special Thanks to Mark for his Six years of work.
11 * Copyright (c) 1995-1998 Mark Lord
12 * May be copied or modified under the terms of the GNU General Public License
16 * This module provides support for the bus-master IDE DMA functions
17 * of various PCI chipsets, including the Intel PIIX (i82371FB for
18 * the 430 FX chipset), the PIIX3 (i82371SB for the 430 HX/VX and
19 * 440 chipsets), and the PIIX4 (i82371AB for the 430 TX chipset)
20 * ("PIIX" stands for "PCI ISA IDE Xcellerator").
22 * Pretty much the same code works for other IDE PCI bus-mastering chipsets.
24 * DMA is supported for all IDE devices (disk drives, cdroms, tapes, floppies).
26 * By default, DMA support is prepared for use, but is currently enabled only
27 * for drives which already have DMA enabled (UltraDMA or mode 2 multi/single),
28 * or which are recognized as "good" (see table below). Drives with only mode0
29 * or mode1 (multi/single) DMA should also work with this chipset/driver
30 * (eg. MC2112A) but are not enabled by default.
32 * Use "hdparm -i" to view modes supported by a given drive.
34 * The hdparm-3.5 (or later) utility can be used for manually enabling/disabling
35 * DMA support, but must be (re-)compiled against this kernel version or later.
37 * To enable DMA, use "hdparm -d1 /dev/hd?" on a per-drive basis after booting.
38 * If problems arise, ide.c will disable DMA operation after a few retries.
39 * This error recovery mechanism works and has been extremely well exercised.
41 * IDE drives, depending on their vintage, may support several different modes
42 * of DMA operation. The boot-time modes are indicated with a "*" in
43 * the "hdparm -i" listing, and can be changed with *knowledgeable* use of
44 * the "hdparm -X" feature. There is seldom a need to do this, as drives
45 * normally power-up with their "best" PIO/DMA modes enabled.
47 * Testing has been done with a rather extensive number of drives,
48 * with Quantum & Western Digital models generally outperforming the pack,
49 * and Fujitsu & Conner (and some Seagate which are really Conner) drives
50 * showing more lackluster throughput.
52 * Keep an eye on /var/adm/messages for "DMA disabled" messages.
54 * Some people have reported trouble with Intel Zappa motherboards.
55 * This can be fixed by upgrading the AMI BIOS to version 1.00.04.BS0,
56 * available from ftp://ftp.intel.com/pub/bios/10004bs0.exe
57 * (thanks to Glen Morrell <glen@spin.Stanford.edu> for researching this).
59 * Thanks to "Christopher J. Reimer" <reimer@doe.carleton.ca> for
60 * fixing the problem with the BIOS on some Acer motherboards.
62 * Thanks to "Benoit Poulot-Cazajous" <poulot@chorus.fr> for testing
63 * "TX" chipset compatibility and for providing patches for the "TX" chipset.
65 * Thanks to Christian Brunner <chb@muc.de> for taking a good first crack
66 * at generic DMA -- his patches were referred to when preparing this code.
68 * Most importantly, thanks to Robert Bringman <rob@mars.trion.com>
69 * for supplying a Promise UDMA board & WD UDMA drive for this work!
71 * And, yes, Intel Zappa boards really *do* use both PIIX IDE ports.
73 * ATA-66/100 and recovery functions, I forgot the rest......
77 #include <linux/config.h>
78 #include <linux/module.h>
79 #include <linux/types.h>
80 #include <linux/kernel.h>
81 #include <linux/timer.h>
83 #include <linux/interrupt.h>
84 #include <linux/pci.h>
85 #include <linux/init.h>
86 #include <linux/ide.h>
87 #include <linux/delay.h>
92 struct drive_list_entry {
94 const char *id_firmware;
97 static const struct drive_list_entry drive_whitelist [] = {
99 { "Micropolis 2112A" , "ALL" },
100 { "CONNER CTMA 4000" , "ALL" },
101 { "CONNER CTT8000-A" , "ALL" },
102 { "ST34342A" , "ALL" },
106 static const struct drive_list_entry drive_blacklist [] = {
108 { "WDC AC11000H" , "ALL" },
109 { "WDC AC22100H" , "ALL" },
110 { "WDC AC32500H" , "ALL" },
111 { "WDC AC33100H" , "ALL" },
112 { "WDC AC31600H" , "ALL" },
113 { "WDC AC32100H" , "24.09P07" },
114 { "WDC AC23200L" , "21.10N21" },
115 { "Compaq CRD-8241B" , "ALL" },
116 { "CRD-8400B" , "ALL" },
117 { "CRD-8480B", "ALL" },
118 { "CRD-8480C", "ALL" },
119 { "CRD-8482B", "ALL" },
120 { "CRD-84" , "ALL" },
121 { "SanDisk SDP3B" , "ALL" },
122 { "SanDisk SDP3B-64" , "ALL" },
123 { "SANYO CD-ROM CRD" , "ALL" },
124 { "HITACHI CDR-8" , "ALL" },
125 { "HITACHI CDR-8335" , "ALL" },
126 { "HITACHI CDR-8435" , "ALL" },
127 { "Toshiba CD-ROM XM-6202B" , "ALL" },
128 { "CD-532E-A" , "ALL" },
129 { "E-IDE CD-ROM CR-840", "ALL" },
130 { "CD-ROM Drive/F5A", "ALL" },
131 { "RICOH CD-R/RW MP7083A", "ALL" },
132 { "WPI CDD-820", "ALL" },
133 { "SAMSUNG CD-ROM SC-148C", "ALL" },
134 { "SAMSUNG CD-ROM SC-148F", "ALL" },
135 { "SAMSUNG CD-ROM SC", "ALL" },
136 { "SanDisk SDP3B-64" , "ALL" },
137 { "SAMSUNG CD-ROM SN-124", "ALL" },
138 { "PLEXTOR CD-R PX-W8432T", "ALL" },
139 { "ATAPI CD-ROM DRIVE 40X MAXIMUM", "ALL" },
140 { "_NEC DV5800A", "ALL" },
146 * in_drive_list - look for drive in black/white list
147 * @id: drive identifier
148 * @drive_table: list to inspect
150 * Look for a drive in the blacklist and the whitelist tables
151 * Returns 1 if the drive is found in the table.
154 static int in_drive_list(struct hd_driveid *id, const struct drive_list_entry *drive_table)
156 for ( ; drive_table->id_model ; drive_table++)
157 if ((!strcmp(drive_table->id_model, id->model)) &&
158 ((strstr(drive_table->id_firmware, id->fw_rev)) ||
159 (!strcmp(drive_table->id_firmware, "ALL"))))
164 #ifdef CONFIG_BLK_DEV_IDEDMA_PCI
166 * ide_dma_intr - IDE DMA interrupt handler
167 * @drive: the drive the interrupt is for
169 * Handle an interrupt completing a read/write DMA transfer on an
173 ide_startstop_t ide_dma_intr (ide_drive_t *drive)
175 u8 stat = 0, dma_stat = 0;
177 dma_stat = HWIF(drive)->ide_dma_end(drive);
178 stat = HWIF(drive)->INB(IDE_STATUS_REG); /* get drive status */
179 if (OK_STAT(stat,DRIVE_READY,drive->bad_wstat|DRQ_STAT)) {
181 struct request *rq = HWGROUP(drive)->rq;
183 DRIVER(drive)->end_request(drive, 1, rq->nr_sectors);
186 printk(KERN_ERR "%s: dma_intr: bad DMA status (dma_stat=%x)\n",
187 drive->name, dma_stat);
189 return DRIVER(drive)->error(drive, "dma_intr", stat);
192 EXPORT_SYMBOL_GPL(ide_dma_intr);
195 * ide_build_sglist - map IDE scatter gather for DMA I/O
196 * @drive: the drive to build the DMA table for
197 * @rq: the request holding the sg list
199 * Perform the PCI mapping magic necessary to access the source or
200 * target buffers of a request via PCI DMA. The lower layers of the
201 * kernel provide the necessary cache management so that we can
202 * operate in a portable fashion
205 int ide_build_sglist(ide_drive_t *drive, struct request *rq)
207 ide_hwif_t *hwif = HWIF(drive);
208 struct scatterlist *sg = hwif->sg_table;
211 if (hwif->sg_dma_active)
214 nents = blk_rq_map_sg(drive->queue, rq, hwif->sg_table);
216 if (rq_data_dir(rq) == READ)
217 hwif->sg_dma_direction = PCI_DMA_FROMDEVICE;
219 hwif->sg_dma_direction = PCI_DMA_TODEVICE;
221 return pci_map_sg(hwif->pci_dev, sg, nents, hwif->sg_dma_direction);
224 EXPORT_SYMBOL_GPL(ide_build_sglist);
227 * ide_raw_build_sglist - map IDE scatter gather for DMA
228 * @drive: the drive to build the DMA table for
229 * @rq: the request holding the sg list
231 * Perform the PCI mapping magic necessary to access the source or
232 * target buffers of a taskfile request via PCI DMA. The lower layers
233 * of the kernel provide the necessary cache management so that we can
234 * operate in a portable fashion
237 int ide_raw_build_sglist(ide_drive_t *drive, struct request *rq)
239 ide_hwif_t *hwif = HWIF(drive);
240 struct scatterlist *sg = hwif->sg_table;
242 ide_task_t *args = rq->special;
243 u8 *virt_addr = rq->buffer;
244 int sector_count = rq->nr_sectors;
246 if (args->command_type == IDE_DRIVE_TASK_RAW_WRITE)
247 hwif->sg_dma_direction = PCI_DMA_TODEVICE;
249 hwif->sg_dma_direction = PCI_DMA_FROMDEVICE;
252 if (sector_count > 256)
255 if (sector_count > 128) {
257 while (sector_count > 128) {
259 memset(&sg[nents], 0, sizeof(*sg));
260 sg[nents].page = virt_to_page(virt_addr);
261 sg[nents].offset = offset_in_page(virt_addr);
262 sg[nents].length = 128 * SECTOR_SIZE;
264 virt_addr = virt_addr + (128 * SECTOR_SIZE);
267 memset(&sg[nents], 0, sizeof(*sg));
268 sg[nents].page = virt_to_page(virt_addr);
269 sg[nents].offset = offset_in_page(virt_addr);
270 sg[nents].length = sector_count * SECTOR_SIZE;
273 return pci_map_sg(hwif->pci_dev, sg, nents, hwif->sg_dma_direction);
276 EXPORT_SYMBOL_GPL(ide_raw_build_sglist);
279 * ide_build_dmatable - build IDE DMA table
281 * ide_build_dmatable() prepares a dma request. We map the command
282 * to get the pci bus addresses of the buffers and then build up
283 * the PRD table that the IDE layer wants to be fed. The code
284 * knows about the 64K wrap bug in the CS5530.
286 * Returns 0 if all went okay, returns 1 otherwise.
287 * May also be invoked from trm290.c
290 int ide_build_dmatable (ide_drive_t *drive, struct request *rq)
292 ide_hwif_t *hwif = HWIF(drive);
293 unsigned int *table = hwif->dmatable_cpu;
294 unsigned int is_trm290 = (hwif->chipset == ide_trm290) ? 1 : 0;
295 unsigned int count = 0;
297 struct scatterlist *sg;
299 if (HWGROUP(drive)->rq->flags & REQ_DRIVE_TASKFILE)
300 hwif->sg_nents = i = ide_raw_build_sglist(drive, rq);
302 hwif->sg_nents = i = ide_build_sglist(drive, rq);
312 cur_addr = sg_dma_address(sg);
313 cur_len = sg_dma_len(sg);
316 * Fill in the dma table, without crossing any 64kB boundaries.
317 * Most hardware requires 16-bit alignment of all blocks,
318 * but the trm290 requires 32-bit alignment.
322 if (count++ >= PRD_ENTRIES) {
323 printk(KERN_ERR "%s: DMA table too small\n", drive->name);
324 goto use_pio_instead;
326 u32 xcount, bcount = 0x10000 - (cur_addr & 0xffff);
328 if (bcount > cur_len)
330 *table++ = cpu_to_le32(cur_addr);
331 xcount = bcount & 0xffff;
333 xcount = ((xcount >> 2) - 1) << 16;
334 if (xcount == 0x0000) {
336 * Most chipsets correctly interpret a length of 0x0000 as 64KB,
337 * but at least one (e.g. CS5530) misinterprets it as zero (!).
338 * So here we break the 64KB entry into two 32KB entries instead.
340 if (count++ >= PRD_ENTRIES) {
341 printk(KERN_ERR "%s: DMA table too small\n", drive->name);
342 goto use_pio_instead;
344 *table++ = cpu_to_le32(0x8000);
345 *table++ = cpu_to_le32(cur_addr + 0x8000);
348 *table++ = cpu_to_le32(xcount);
360 *--table |= cpu_to_le32(0x80000000);
363 printk(KERN_ERR "%s: empty DMA table?\n", drive->name);
365 pci_unmap_sg(hwif->pci_dev,
368 hwif->sg_dma_direction);
369 hwif->sg_dma_active = 0;
370 return 0; /* revert to PIO for this request */
373 EXPORT_SYMBOL_GPL(ide_build_dmatable);
376 * ide_destroy_dmatable - clean up DMA mapping
377 * @drive: The drive to unmap
379 * Teardown mappings after DMA has completed. This must be called
380 * after the completion of each use of ide_build_dmatable and before
381 * the next use of ide_build_dmatable. Failure to do so will cause
382 * an oops as only one mapping can be live for each target at a given
386 void ide_destroy_dmatable (ide_drive_t *drive)
388 struct pci_dev *dev = HWIF(drive)->pci_dev;
389 struct scatterlist *sg = HWIF(drive)->sg_table;
390 int nents = HWIF(drive)->sg_nents;
392 pci_unmap_sg(dev, sg, nents, HWIF(drive)->sg_dma_direction);
393 HWIF(drive)->sg_dma_active = 0;
396 EXPORT_SYMBOL_GPL(ide_destroy_dmatable);
399 * config_drive_for_dma - attempt to activate IDE DMA
400 * @drive: the drive to place in DMA mode
402 * If the drive supports at least mode 2 DMA or UDMA of any kind
403 * then attempt to place it into DMA mode. Drives that are known to
404 * support DMA but predate the DMA properties or that are known
405 * to have DMA handling bugs are also set up appropriately based
406 * on the good/bad drive lists.
409 static int config_drive_for_dma (ide_drive_t *drive)
411 struct hd_driveid *id = drive->id;
412 ide_hwif_t *hwif = HWIF(drive);
414 if ((id->capability & 1) && hwif->autodma) {
415 /* Consult the list of known "bad" drives */
416 if (__ide_dma_bad_drive(drive))
417 return __ide_dma_off(drive);
420 * Enable DMA on any drive that has
421 * UltraDMA (mode 0/1/2/3/4/5/6) enabled
423 if ((id->field_valid & 4) && ((id->dma_ultra >> 8) & 0x7f))
424 return hwif->ide_dma_on(drive);
426 * Enable DMA on any drive that has mode2 DMA
427 * (multi or single) enabled
429 if (id->field_valid & 2) /* regular DMA */
430 if ((id->dma_mword & 0x404) == 0x404 ||
431 (id->dma_1word & 0x404) == 0x404)
432 return hwif->ide_dma_on(drive);
434 /* Consult the list of known "good" drives */
435 if (__ide_dma_good_drive(drive))
436 return hwif->ide_dma_on(drive);
438 // if (hwif->tuneproc != NULL) hwif->tuneproc(drive, 255);
439 return hwif->ide_dma_off_quietly(drive);
443 * dma_timer_expiry - handle a DMA timeout
444 * @drive: Drive that timed out
446 * An IDE DMA transfer timed out. In the event of an error we ask
447 * the driver to resolve the problem, if a DMA transfer is still
448 * in progress we continue to wait (arguably we need to add a
449 * secondary 'I don't care what the drive thinks' timeout here)
450 * Finally if we have an interrupt we let it complete the I/O.
451 * But only one time - we clear expiry and if it's still not
452 * completed after WAIT_CMD, we error and retry in PIO.
453 * This can occur if an interrupt is lost or due to hang or bugs.
456 static int dma_timer_expiry (ide_drive_t *drive)
458 ide_hwif_t *hwif = HWIF(drive);
459 u8 dma_stat = hwif->INB(hwif->dma_status);
461 printk(KERN_WARNING "%s: dma_timer_expiry: dma status == 0x%02x\n",
462 drive->name, dma_stat);
464 if ((dma_stat & 0x18) == 0x18) /* BUSY Stupid Early Timer !! */
467 HWGROUP(drive)->expiry = NULL; /* one free ride for now */
469 /* 1 dmaing, 2 error, 4 intr */
470 if (dma_stat & 2) /* ERROR */
473 if (dma_stat & 1) /* DMAing */
476 if (dma_stat & 4) /* Got an Interrupt */
479 return 0; /* Status is unknown -- reset the bus */
483 * __ide_dma_host_off - Generic DMA kill
484 * @drive: drive to control
486 * Perform the generic IDE controller DMA off operation. This
487 * works for most IDE bus mastering controllers
490 int __ide_dma_host_off (ide_drive_t *drive)
492 ide_hwif_t *hwif = HWIF(drive);
493 u8 unit = (drive->select.b.unit & 0x01);
494 u8 dma_stat = hwif->INB(hwif->dma_status);
496 hwif->OUTB((dma_stat & ~(1<<(5+unit))), hwif->dma_status);
500 EXPORT_SYMBOL(__ide_dma_host_off);
503 * __ide_dma_host_off_quietly - Generic DMA kill
504 * @drive: drive to control
506 * Turn off the current DMA on this IDE controller.
509 int __ide_dma_off_quietly (ide_drive_t *drive)
511 drive->using_dma = 0;
512 ide_toggle_bounce(drive, 0);
514 if (HWIF(drive)->ide_dma_host_off(drive))
516 #ifdef CONFIG_BLK_DEV_IDE_TCQ
517 __ide_dma_queued_off(drive);
522 EXPORT_SYMBOL(__ide_dma_off_quietly);
523 #endif /* CONFIG_BLK_DEV_IDEDMA_PCI */
526 * __ide_dma_off - disable DMA on a device
527 * @drive: drive to disable DMA on
529 * Disable IDE DMA for a device on this IDE controller.
530 * Inform the user that DMA has been disabled.
533 int __ide_dma_off (ide_drive_t *drive)
535 printk(KERN_INFO "%s: DMA disabled\n", drive->name);
536 return HWIF(drive)->ide_dma_off_quietly(drive);
539 EXPORT_SYMBOL(__ide_dma_off);
541 #ifdef CONFIG_BLK_DEV_IDEDMA_PCI
543 * __ide_dma_host_on - Enable DMA on a host
544 * @drive: drive to enable for DMA
546 * Enable DMA on an IDE controller following generic bus mastering
547 * IDE controller behaviour
550 int __ide_dma_host_on (ide_drive_t *drive)
552 if (drive->using_dma) {
553 ide_hwif_t *hwif = HWIF(drive);
554 u8 unit = (drive->select.b.unit & 0x01);
555 u8 dma_stat = hwif->INB(hwif->dma_status);
557 hwif->OUTB((dma_stat|(1<<(5+unit))), hwif->dma_status);
563 EXPORT_SYMBOL(__ide_dma_host_on);
566 * __ide_dma_on - Enable DMA on a device
567 * @drive: drive to enable DMA on
569 * Enable IDE DMA for a device on this IDE controller.
572 int __ide_dma_on (ide_drive_t *drive)
574 drive->using_dma = 1;
575 ide_toggle_bounce(drive, 1);
577 if (HWIF(drive)->ide_dma_host_on(drive))
583 EXPORT_SYMBOL(__ide_dma_on);
586 * __ide_dma_check - check DMA setup
587 * @drive: drive to check
589 * Don't use - due for extermination
592 int __ide_dma_check (ide_drive_t *drive)
594 return config_drive_for_dma(drive);
597 EXPORT_SYMBOL(__ide_dma_check);
600 * ide_start_dma - begin a DMA phase
602 * @drive: target device
603 * @reading: set if reading, clear if writing
605 * Build an IDE DMA PRD (IDE speak for scatter gather table)
606 * and then set up the DMA transfer registers for a device
607 * that follows generic IDE PCI DMA behaviour. Controllers can
608 * override this function if they need to
610 * Returns 0 on success. If a PIO fallback is required then 1
614 int ide_start_dma(ide_hwif_t *hwif, ide_drive_t *drive, int reading)
616 struct request *rq = HWGROUP(drive)->rq;
619 /* fall back to pio! */
620 if (!ide_build_dmatable(drive, rq))
624 hwif->OUTL(hwif->dmatable_dma, hwif->dma_prdtable);
627 hwif->OUTB(reading, hwif->dma_command);
629 /* read dma_status for INTR & ERROR flags */
630 dma_stat = hwif->INB(hwif->dma_status);
632 /* clear INTR & ERROR flags */
633 hwif->OUTB(dma_stat|6, hwif->dma_status);
634 drive->waiting_for_dma = 1;
638 EXPORT_SYMBOL(ide_start_dma);
640 int __ide_dma_read (ide_drive_t *drive /*, struct request *rq */)
642 ide_hwif_t *hwif = HWIF(drive);
643 struct request *rq = HWGROUP(drive)->rq;
644 unsigned int reading = 1 << 3;
645 u8 lba48 = (drive->addressing == 1) ? 1 : 0;
646 task_ioreg_t command = WIN_NOP;
649 if (ide_start_dma(hwif, drive, reading))
652 if (drive->media != ide_disk)
655 command = (lba48) ? WIN_READDMA_EXT : WIN_READDMA;
658 command = (lba48) ? WIN_READ_EXT: WIN_READ;
660 if (rq->flags & REQ_DRIVE_TASKFILE) {
661 ide_task_t *args = rq->special;
662 command = args->tfRegister[IDE_COMMAND_OFFSET];
665 /* issue cmd to drive */
666 ide_execute_command(drive, command, &ide_dma_intr, 2*WAIT_CMD, dma_timer_expiry);
667 return hwif->ide_dma_begin(drive);
670 EXPORT_SYMBOL(__ide_dma_read);
672 int __ide_dma_write (ide_drive_t *drive /*, struct request *rq */)
674 ide_hwif_t *hwif = HWIF(drive);
675 struct request *rq = HWGROUP(drive)->rq;
676 unsigned int reading = 0;
677 u8 lba48 = (drive->addressing == 1) ? 1 : 0;
678 task_ioreg_t command = WIN_NOP;
680 /* try PIO instead of DMA */
681 if (ide_start_dma(hwif, drive, reading))
684 if (drive->media != ide_disk)
687 command = (lba48) ? WIN_WRITEDMA_EXT : WIN_WRITEDMA;
689 command = (lba48) ? WIN_WRITE_EXT: WIN_WRITE;
691 if (rq->flags & REQ_DRIVE_TASKFILE) {
692 ide_task_t *args = rq->special;
693 command = args->tfRegister[IDE_COMMAND_OFFSET];
696 /* issue cmd to drive */
697 ide_execute_command(drive, command, &ide_dma_intr, 2*WAIT_CMD, dma_timer_expiry);
699 return hwif->ide_dma_begin(drive);
702 EXPORT_SYMBOL(__ide_dma_write);
704 int __ide_dma_begin (ide_drive_t *drive)
706 ide_hwif_t *hwif = HWIF(drive);
707 u8 dma_cmd = hwif->INB(hwif->dma_command);
709 /* Note that this is done *after* the cmd has
710 * been issued to the drive, as per the BM-IDE spec.
711 * The Promise Ultra33 doesn't work correctly when
712 * we do this part before issuing the drive cmd.
715 hwif->OUTB(dma_cmd|1, hwif->dma_command);
721 EXPORT_SYMBOL(__ide_dma_begin);
723 /* returns 1 on error, 0 otherwise */
724 int __ide_dma_end (ide_drive_t *drive)
726 ide_hwif_t *hwif = HWIF(drive);
727 u8 dma_stat = 0, dma_cmd = 0;
729 drive->waiting_for_dma = 0;
730 /* get dma_command mode */
731 dma_cmd = hwif->INB(hwif->dma_command);
733 hwif->OUTB(dma_cmd&~1, hwif->dma_command);
735 dma_stat = hwif->INB(hwif->dma_status);
736 /* clear the INTR & ERROR bits */
737 hwif->OUTB(dma_stat|6, hwif->dma_status);
738 /* purge DMA mappings */
739 ide_destroy_dmatable(drive);
740 /* verify good DMA status */
743 return (dma_stat & 7) != 4 ? (0x10 | dma_stat) : 0;
746 EXPORT_SYMBOL(__ide_dma_end);
748 /* returns 1 if dma irq issued, 0 otherwise */
749 int __ide_dma_test_irq (ide_drive_t *drive)
751 ide_hwif_t *hwif = HWIF(drive);
752 u8 dma_stat = hwif->INB(hwif->dma_status);
754 #if 0 /* do not set unless you know what you are doing */
756 u8 stat = hwif->INB(IDE_STATUS_REG);
757 hwif->OUTB(hwif->dma_status, dma_stat & 0xE4);
760 /* return 1 if INTR asserted */
761 if ((dma_stat & 4) == 4)
763 if (!drive->waiting_for_dma)
764 printk(KERN_WARNING "%s: (%s) called while not waiting\n",
765 drive->name, __FUNCTION__);
769 EXPORT_SYMBOL(__ide_dma_test_irq);
770 #endif /* CONFIG_BLK_DEV_IDEDMA_PCI */
772 int __ide_dma_bad_drive (ide_drive_t *drive)
774 struct hd_driveid *id = drive->id;
776 int blacklist = in_drive_list(id, drive_blacklist);
778 printk(KERN_WARNING "%s: Disabling (U)DMA for %s (blacklisted)\n",
779 drive->name, id->model);
785 EXPORT_SYMBOL(__ide_dma_bad_drive);
787 int __ide_dma_good_drive (ide_drive_t *drive)
789 struct hd_driveid *id = drive->id;
790 return in_drive_list(id, drive_whitelist);
793 EXPORT_SYMBOL(__ide_dma_good_drive);
795 #ifdef CONFIG_BLK_DEV_IDEDMA_PCI
796 int __ide_dma_verbose (ide_drive_t *drive)
798 struct hd_driveid *id = drive->id;
799 ide_hwif_t *hwif = HWIF(drive);
801 if (id->field_valid & 4) {
802 if ((id->dma_ultra >> 8) && (id->dma_mword >> 8)) {
803 printk(", BUG DMA OFF");
804 return hwif->ide_dma_off_quietly(drive);
806 if (id->dma_ultra & ((id->dma_ultra >> 8) & hwif->ultra_mask)) {
807 if (((id->dma_ultra >> 11) & 0x1F) &&
808 eighty_ninty_three(drive)) {
809 if ((id->dma_ultra >> 15) & 1) {
810 printk(", UDMA(mode 7)");
811 } else if ((id->dma_ultra >> 14) & 1) {
812 printk(", UDMA(133)");
813 } else if ((id->dma_ultra >> 13) & 1) {
814 printk(", UDMA(100)");
815 } else if ((id->dma_ultra >> 12) & 1) {
816 printk(", UDMA(66)");
817 } else if ((id->dma_ultra >> 11) & 1) {
818 printk(", UDMA(44)");
823 if ((id->dma_ultra >> 10) & 1) {
824 printk(", UDMA(33)");
825 } else if ((id->dma_ultra >> 9) & 1) {
826 printk(", UDMA(25)");
827 } else if ((id->dma_ultra >> 8) & 1) {
828 printk(", UDMA(16)");
832 printk(", (U)DMA"); /* Can be BIOS-enabled! */
834 } else if (id->field_valid & 2) {
835 if ((id->dma_mword >> 8) && (id->dma_1word >> 8)) {
836 printk(", BUG DMA OFF");
837 return hwif->ide_dma_off_quietly(drive);
840 } else if (id->field_valid & 1) {
846 EXPORT_SYMBOL(__ide_dma_verbose);
848 int __ide_dma_lostirq (ide_drive_t *drive)
850 printk("%s: DMA interrupt recovery\n", drive->name);
854 EXPORT_SYMBOL(__ide_dma_lostirq);
856 int __ide_dma_timeout (ide_drive_t *drive)
858 printk(KERN_ERR "%s: timeout waiting for DMA\n", drive->name);
859 if (HWIF(drive)->ide_dma_test_irq(drive))
862 return HWIF(drive)->ide_dma_end(drive);
865 EXPORT_SYMBOL(__ide_dma_timeout);
868 * Needed for allowing full modular support of ide-driver
870 int ide_release_dma_engine (ide_hwif_t *hwif)
872 if (hwif->dmatable_cpu) {
873 pci_free_consistent(hwif->pci_dev,
874 PRD_ENTRIES * PRD_BYTES,
877 hwif->dmatable_cpu = NULL;
879 if (hwif->sg_table) {
880 kfree(hwif->sg_table);
881 hwif->sg_table = NULL;
886 int ide_release_iomio_dma (ide_hwif_t *hwif)
888 if ((hwif->dma_extra) && (hwif->channel == 0))
889 release_region((hwif->dma_base + 16), hwif->dma_extra);
890 release_region(hwif->dma_base, 8);
892 release_region(hwif->dma_base, 8);
897 * Needed for allowing full modular support of ide-driver
899 int ide_release_dma (ide_hwif_t *hwif)
903 if (hwif->chipset == ide_etrax100)
906 ide_release_dma_engine(hwif);
907 return ide_release_iomio_dma(hwif);
910 int ide_allocate_dma_engine (ide_hwif_t *hwif)
912 hwif->dmatable_cpu = pci_alloc_consistent(hwif->pci_dev,
913 PRD_ENTRIES * PRD_BYTES,
914 &hwif->dmatable_dma);
915 hwif->sg_table = kmalloc(sizeof(struct scatterlist) * PRD_ENTRIES,
918 if ((hwif->dmatable_cpu) && (hwif->sg_table))
921 printk(KERN_ERR "%s: -- Error, unable to allocate%s%s table(s).\n",
922 (hwif->dmatable_cpu == NULL) ? " CPU" : "",
923 (hwif->sg_table == NULL) ? " SG DMA" : " DMA",
926 ide_release_dma_engine(hwif);
930 int ide_mapped_mmio_dma (ide_hwif_t *hwif, unsigned long base, unsigned int ports)
932 printk(KERN_INFO " %s: MMIO-DMA ", hwif->name);
934 hwif->dma_base = base;
935 if (hwif->cds->extra && hwif->channel == 0)
936 hwif->dma_extra = hwif->cds->extra;
939 hwif->dma_master = (hwif->channel) ? hwif->mate->dma_base : base;
941 hwif->dma_master = base;
945 int ide_iomio_dma (ide_hwif_t *hwif, unsigned long base, unsigned int ports)
947 printk(KERN_INFO " %s: BM-DMA at 0x%04lx-0x%04lx",
948 hwif->name, base, base + ports - 1);
949 if (!request_region(base, ports, hwif->name)) {
950 printk(" -- Error, ports in use.\n");
953 hwif->dma_base = base;
954 if ((hwif->cds->extra) && (hwif->channel == 0)) {
955 request_region(base+16, hwif->cds->extra, hwif->cds->name);
956 hwif->dma_extra = hwif->cds->extra;
960 hwif->dma_master = (hwif->channel) ? hwif->mate->dma_base : base;
962 hwif->dma_master = base;
963 if (hwif->dma_base2) {
964 if (!request_region(hwif->dma_base2, ports, hwif->name))
966 printk(" -- Error, secondary ports in use.\n");
967 release_region(base, ports);
977 int ide_dma_iobase (ide_hwif_t *hwif, unsigned long base, unsigned int ports)
980 return ide_mapped_mmio_dma(hwif, base,ports);
981 BUG_ON(hwif->mmio == 1);
982 return ide_iomio_dma(hwif, base, ports);
986 * This can be called for a dynamically installed interface. Don't __init it
988 void ide_setup_dma (ide_hwif_t *hwif, unsigned long dma_base, unsigned int num_ports)
990 if (ide_dma_iobase(hwif, dma_base, num_ports))
993 if (ide_allocate_dma_engine(hwif)) {
994 ide_release_dma(hwif);
998 if (!(hwif->dma_command))
999 hwif->dma_command = hwif->dma_base;
1000 if (!(hwif->dma_vendor1))
1001 hwif->dma_vendor1 = (hwif->dma_base + 1);
1002 if (!(hwif->dma_status))
1003 hwif->dma_status = (hwif->dma_base + 2);
1004 if (!(hwif->dma_vendor3))
1005 hwif->dma_vendor3 = (hwif->dma_base + 3);
1006 if (!(hwif->dma_prdtable))
1007 hwif->dma_prdtable = (hwif->dma_base + 4);
1009 if (!hwif->ide_dma_off_quietly)
1010 hwif->ide_dma_off_quietly = &__ide_dma_off_quietly;
1011 if (!hwif->ide_dma_host_off)
1012 hwif->ide_dma_host_off = &__ide_dma_host_off;
1013 if (!hwif->ide_dma_on)
1014 hwif->ide_dma_on = &__ide_dma_on;
1015 if (!hwif->ide_dma_host_on)
1016 hwif->ide_dma_host_on = &__ide_dma_host_on;
1017 if (!hwif->ide_dma_check)
1018 hwif->ide_dma_check = &__ide_dma_check;
1019 if (!hwif->ide_dma_read)
1020 hwif->ide_dma_read = &__ide_dma_read;
1021 if (!hwif->ide_dma_write)
1022 hwif->ide_dma_write = &__ide_dma_write;
1023 if (!hwif->ide_dma_begin)
1024 hwif->ide_dma_begin = &__ide_dma_begin;
1025 if (!hwif->ide_dma_end)
1026 hwif->ide_dma_end = &__ide_dma_end;
1027 if (!hwif->ide_dma_test_irq)
1028 hwif->ide_dma_test_irq = &__ide_dma_test_irq;
1029 if (!hwif->ide_dma_verbose)
1030 hwif->ide_dma_verbose = &__ide_dma_verbose;
1031 if (!hwif->ide_dma_timeout)
1032 hwif->ide_dma_timeout = &__ide_dma_timeout;
1033 if (!hwif->ide_dma_lostirq)
1034 hwif->ide_dma_lostirq = &__ide_dma_lostirq;
1036 if (hwif->chipset != ide_trm290) {
1037 u8 dma_stat = hwif->INB(hwif->dma_status);
1038 printk(", BIOS settings: %s:%s, %s:%s",
1039 hwif->drives[0].name, (dma_stat & 0x20) ? "DMA" : "pio",
1040 hwif->drives[1].name, (dma_stat & 0x40) ? "DMA" : "pio");
1044 if (!(hwif->dma_master))
1048 EXPORT_SYMBOL_GPL(ide_setup_dma);
1049 #endif /* CONFIG_BLK_DEV_IDEDMA_PCI */