2 libata-core.c - helper library for ATA
4 Copyright 2003-2004 Red Hat, Inc. All rights reserved.
5 Copyright 2003-2004 Jeff Garzik
7 The contents of this file are subject to the Open
8 Software License version 1.1 that can be found at
9 http://www.opensource.org/licenses/osl-1.1.txt and is included herein
12 Alternatively, the contents of this file may be used under the terms
13 of the GNU General Public License version 2 (the "GPL") as distributed
14 in the kernel source COPYING file, in which case the provisions of
15 the GPL are applicable instead of the above. If you wish to allow
16 the use of your version of this file only under the terms of the
17 GPL and not to allow others to use your version of this file under
18 the OSL, indicate your decision by deleting the provisions above and
19 replace them with the notice and other provisions required by the GPL.
20 If you do not delete the provisions above, a recipient may use your
21 version of this file under either the OSL or the GPL.
25 #include <linux/config.h>
26 #include <linux/kernel.h>
27 #include <linux/module.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/list.h>
31 #include <linux/highmem.h>
32 #include <linux/spinlock.h>
33 #include <linux/blkdev.h>
34 #include <linux/delay.h>
35 #include <linux/timer.h>
36 #include <linux/interrupt.h>
37 #include <linux/completion.h>
38 #include <linux/suspend.h>
39 #include <linux/workqueue.h>
40 #include <scsi/scsi.h>
42 #include "scsi_priv.h"
43 #include <scsi/scsi_host.h>
44 #include <linux/libata.h>
46 #include <asm/semaphore.h>
47 #include <asm/byteorder.h>
51 static unsigned int ata_busy_sleep (struct ata_port *ap,
52 unsigned long tmout_pat,
54 static void ata_set_mode(struct ata_port *ap);
55 static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev);
56 static unsigned int ata_get_mode_mask(struct ata_port *ap, int shift);
57 static int fgb(u32 bitmap);
58 static int ata_choose_xfer_mode(struct ata_port *ap,
60 unsigned int *xfer_shift_out);
61 static int ata_qc_complete_noop(struct ata_queued_cmd *qc, u8 drv_stat);
62 static void __ata_qc_complete(struct ata_queued_cmd *qc);
64 static unsigned int ata_unique_id = 1;
65 static struct workqueue_struct *ata_wq;
67 MODULE_AUTHOR("Jeff Garzik");
68 MODULE_DESCRIPTION("Library module for ATA devices");
69 MODULE_LICENSE("GPL");
70 MODULE_VERSION(DRV_VERSION);
73 * ata_tf_load - send taskfile registers to host controller
74 * @ap: Port to which output is sent
75 * @tf: ATA taskfile register set
77 * Outputs ATA taskfile to standard ATA host controller.
80 * Inherited from caller.
83 static void ata_tf_load_pio(struct ata_port *ap, struct ata_taskfile *tf)
85 struct ata_ioports *ioaddr = &ap->ioaddr;
86 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
88 if (tf->ctl != ap->last_ctl) {
89 outb(tf->ctl, ioaddr->ctl_addr);
90 ap->last_ctl = tf->ctl;
94 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
95 outb(tf->hob_feature, ioaddr->feature_addr);
96 outb(tf->hob_nsect, ioaddr->nsect_addr);
97 outb(tf->hob_lbal, ioaddr->lbal_addr);
98 outb(tf->hob_lbam, ioaddr->lbam_addr);
99 outb(tf->hob_lbah, ioaddr->lbah_addr);
100 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
109 outb(tf->feature, ioaddr->feature_addr);
110 outb(tf->nsect, ioaddr->nsect_addr);
111 outb(tf->lbal, ioaddr->lbal_addr);
112 outb(tf->lbam, ioaddr->lbam_addr);
113 outb(tf->lbah, ioaddr->lbah_addr);
114 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
122 if (tf->flags & ATA_TFLAG_DEVICE) {
123 outb(tf->device, ioaddr->device_addr);
124 VPRINTK("device 0x%X\n", tf->device);
131 * ata_tf_load_mmio - send taskfile registers to host controller
132 * @ap: Port to which output is sent
133 * @tf: ATA taskfile register set
135 * Outputs ATA taskfile to standard ATA host controller using MMIO.
138 * Inherited from caller.
141 static void ata_tf_load_mmio(struct ata_port *ap, struct ata_taskfile *tf)
143 struct ata_ioports *ioaddr = &ap->ioaddr;
144 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
146 if (tf->ctl != ap->last_ctl) {
147 writeb(tf->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
148 ap->last_ctl = tf->ctl;
152 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
153 writeb(tf->hob_feature, (void __iomem *) ioaddr->feature_addr);
154 writeb(tf->hob_nsect, (void __iomem *) ioaddr->nsect_addr);
155 writeb(tf->hob_lbal, (void __iomem *) ioaddr->lbal_addr);
156 writeb(tf->hob_lbam, (void __iomem *) ioaddr->lbam_addr);
157 writeb(tf->hob_lbah, (void __iomem *) ioaddr->lbah_addr);
158 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
167 writeb(tf->feature, (void __iomem *) ioaddr->feature_addr);
168 writeb(tf->nsect, (void __iomem *) ioaddr->nsect_addr);
169 writeb(tf->lbal, (void __iomem *) ioaddr->lbal_addr);
170 writeb(tf->lbam, (void __iomem *) ioaddr->lbam_addr);
171 writeb(tf->lbah, (void __iomem *) ioaddr->lbah_addr);
172 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
180 if (tf->flags & ATA_TFLAG_DEVICE) {
181 writeb(tf->device, (void __iomem *) ioaddr->device_addr);
182 VPRINTK("device 0x%X\n", tf->device);
188 void ata_tf_load(struct ata_port *ap, struct ata_taskfile *tf)
190 if (ap->flags & ATA_FLAG_MMIO)
191 ata_tf_load_mmio(ap, tf);
193 ata_tf_load_pio(ap, tf);
197 * ata_exec_command - issue ATA command to host controller
198 * @ap: port to which command is being issued
199 * @tf: ATA taskfile register set
201 * Issues PIO/MMIO write to ATA command register, with proper
202 * synchronization with interrupt handler / other threads.
205 * spin_lock_irqsave(host_set lock)
208 static void ata_exec_command_pio(struct ata_port *ap, struct ata_taskfile *tf)
210 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
212 outb(tf->command, ap->ioaddr.command_addr);
218 * ata_exec_command_mmio - issue ATA command to host controller
219 * @ap: port to which command is being issued
220 * @tf: ATA taskfile register set
222 * Issues MMIO write to ATA command register, with proper
223 * synchronization with interrupt handler / other threads.
226 * spin_lock_irqsave(host_set lock)
229 static void ata_exec_command_mmio(struct ata_port *ap, struct ata_taskfile *tf)
231 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
233 writeb(tf->command, (void __iomem *) ap->ioaddr.command_addr);
237 void ata_exec_command(struct ata_port *ap, struct ata_taskfile *tf)
239 if (ap->flags & ATA_FLAG_MMIO)
240 ata_exec_command_mmio(ap, tf);
242 ata_exec_command_pio(ap, tf);
246 * ata_exec - issue ATA command to host controller
247 * @ap: port to which command is being issued
248 * @tf: ATA taskfile register set
250 * Issues PIO/MMIO write to ATA command register, with proper
251 * synchronization with interrupt handler / other threads.
254 * Obtains host_set lock.
257 static inline void ata_exec(struct ata_port *ap, struct ata_taskfile *tf)
261 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
262 spin_lock_irqsave(&ap->host_set->lock, flags);
263 ap->ops->exec_command(ap, tf);
264 spin_unlock_irqrestore(&ap->host_set->lock, flags);
268 * ata_tf_to_host - issue ATA taskfile to host controller
269 * @ap: port to which command is being issued
270 * @tf: ATA taskfile register set
272 * Issues ATA taskfile register set to ATA host controller,
273 * with proper synchronization with interrupt handler and
277 * Obtains host_set lock.
280 static void ata_tf_to_host(struct ata_port *ap, struct ata_taskfile *tf)
282 ap->ops->tf_load(ap, tf);
288 * ata_tf_to_host_nolock - issue ATA taskfile to host controller
289 * @ap: port to which command is being issued
290 * @tf: ATA taskfile register set
292 * Issues ATA taskfile register set to ATA host controller,
293 * with proper synchronization with interrupt handler and
297 * spin_lock_irqsave(host_set lock)
300 void ata_tf_to_host_nolock(struct ata_port *ap, struct ata_taskfile *tf)
302 ap->ops->tf_load(ap, tf);
303 ap->ops->exec_command(ap, tf);
307 * ata_tf_read - input device's ATA taskfile shadow registers
308 * @ap: Port from which input is read
309 * @tf: ATA taskfile register set for storing input
311 * Reads ATA taskfile registers for currently-selected device
315 * Inherited from caller.
318 static void ata_tf_read_pio(struct ata_port *ap, struct ata_taskfile *tf)
320 struct ata_ioports *ioaddr = &ap->ioaddr;
322 tf->nsect = inb(ioaddr->nsect_addr);
323 tf->lbal = inb(ioaddr->lbal_addr);
324 tf->lbam = inb(ioaddr->lbam_addr);
325 tf->lbah = inb(ioaddr->lbah_addr);
326 tf->device = inb(ioaddr->device_addr);
328 if (tf->flags & ATA_TFLAG_LBA48) {
329 outb(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
330 tf->hob_feature = inb(ioaddr->error_addr);
331 tf->hob_nsect = inb(ioaddr->nsect_addr);
332 tf->hob_lbal = inb(ioaddr->lbal_addr);
333 tf->hob_lbam = inb(ioaddr->lbam_addr);
334 tf->hob_lbah = inb(ioaddr->lbah_addr);
339 * ata_tf_read_mmio - input device's ATA taskfile shadow registers
340 * @ap: Port from which input is read
341 * @tf: ATA taskfile register set for storing input
343 * Reads ATA taskfile registers for currently-selected device
347 * Inherited from caller.
350 static void ata_tf_read_mmio(struct ata_port *ap, struct ata_taskfile *tf)
352 struct ata_ioports *ioaddr = &ap->ioaddr;
354 tf->nsect = readb((void __iomem *)ioaddr->nsect_addr);
355 tf->lbal = readb((void __iomem *)ioaddr->lbal_addr);
356 tf->lbam = readb((void __iomem *)ioaddr->lbam_addr);
357 tf->lbah = readb((void __iomem *)ioaddr->lbah_addr);
358 tf->device = readb((void __iomem *)ioaddr->device_addr);
360 if (tf->flags & ATA_TFLAG_LBA48) {
361 writeb(tf->ctl | ATA_HOB, (void __iomem *) ap->ioaddr.ctl_addr);
362 tf->hob_feature = readb((void __iomem *)ioaddr->error_addr);
363 tf->hob_nsect = readb((void __iomem *)ioaddr->nsect_addr);
364 tf->hob_lbal = readb((void __iomem *)ioaddr->lbal_addr);
365 tf->hob_lbam = readb((void __iomem *)ioaddr->lbam_addr);
366 tf->hob_lbah = readb((void __iomem *)ioaddr->lbah_addr);
370 void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
372 if (ap->flags & ATA_FLAG_MMIO)
373 ata_tf_read_mmio(ap, tf);
375 ata_tf_read_pio(ap, tf);
379 * ata_check_status - Read device status reg & clear interrupt
380 * @ap: port where the device is
382 * Reads ATA taskfile status register for currently-selected device
383 * and return it's value. This also clears pending interrupts
387 * Inherited from caller.
389 static u8 ata_check_status_pio(struct ata_port *ap)
391 return inb(ap->ioaddr.status_addr);
395 * ata_check_status_mmio - Read device status reg & clear interrupt
396 * @ap: port where the device is
398 * Reads ATA taskfile status register for currently-selected device
399 * via MMIO and return it's value. This also clears pending interrupts
403 * Inherited from caller.
405 static u8 ata_check_status_mmio(struct ata_port *ap)
407 return readb((void __iomem *) ap->ioaddr.status_addr);
410 u8 ata_check_status(struct ata_port *ap)
412 if (ap->flags & ATA_FLAG_MMIO)
413 return ata_check_status_mmio(ap);
414 return ata_check_status_pio(ap);
418 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
419 * @tf: Taskfile to convert
420 * @fis: Buffer into which data will output
421 * @pmp: Port multiplier port
423 * Converts a standard ATA taskfile to a Serial ATA
424 * FIS structure (Register - Host to Device).
427 * Inherited from caller.
430 void ata_tf_to_fis(struct ata_taskfile *tf, u8 *fis, u8 pmp)
432 fis[0] = 0x27; /* Register - Host to Device FIS */
433 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
434 bit 7 indicates Command FIS */
435 fis[2] = tf->command;
436 fis[3] = tf->feature;
443 fis[8] = tf->hob_lbal;
444 fis[9] = tf->hob_lbam;
445 fis[10] = tf->hob_lbah;
446 fis[11] = tf->hob_feature;
449 fis[13] = tf->hob_nsect;
460 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
461 * @fis: Buffer from which data will be input
462 * @tf: Taskfile to output
464 * Converts a standard ATA taskfile to a Serial ATA
465 * FIS structure (Register - Host to Device).
468 * Inherited from caller.
471 void ata_tf_from_fis(u8 *fis, struct ata_taskfile *tf)
473 tf->command = fis[2]; /* status */
474 tf->feature = fis[3]; /* error */
481 tf->hob_lbal = fis[8];
482 tf->hob_lbam = fis[9];
483 tf->hob_lbah = fis[10];
486 tf->hob_nsect = fis[13];
490 * ata_prot_to_cmd - determine which read/write opcodes to use
491 * @protocol: ATA_PROT_xxx taskfile protocol
492 * @lba48: true is lba48 is present
494 * Given necessary input, determine which read/write commands
495 * to use to transfer data.
500 static int ata_prot_to_cmd(int protocol, int lba48)
502 int rcmd = 0, wcmd = 0;
507 rcmd = ATA_CMD_PIO_READ_EXT;
508 wcmd = ATA_CMD_PIO_WRITE_EXT;
510 rcmd = ATA_CMD_PIO_READ;
511 wcmd = ATA_CMD_PIO_WRITE;
517 rcmd = ATA_CMD_READ_EXT;
518 wcmd = ATA_CMD_WRITE_EXT;
521 wcmd = ATA_CMD_WRITE;
529 return rcmd | (wcmd << 8);
533 * ata_dev_set_protocol - set taskfile protocol and r/w commands
534 * @dev: device to examine and configure
536 * Examine the device configuration, after we have
537 * read the identify-device page and configured the
538 * data transfer mode. Set internal state related to
539 * the ATA taskfile protocol (pio, pio mult, dma, etc.)
540 * and calculate the proper read/write commands to use.
545 static void ata_dev_set_protocol(struct ata_device *dev)
547 int pio = (dev->flags & ATA_DFLAG_PIO);
548 int lba48 = (dev->flags & ATA_DFLAG_LBA48);
552 proto = dev->xfer_protocol = ATA_PROT_PIO;
554 proto = dev->xfer_protocol = ATA_PROT_DMA;
556 cmd = ata_prot_to_cmd(proto, lba48);
560 dev->read_cmd = cmd & 0xff;
561 dev->write_cmd = (cmd >> 8) & 0xff;
564 static const char * xfer_mode_str[] = {
584 * ata_udma_string - convert UDMA bit offset to string
585 * @mask: mask of bits supported; only highest bit counts.
587 * Determine string which represents the highest speed
588 * (highest bit in @udma_mask).
594 * Constant C string representing highest speed listed in
595 * @udma_mask, or the constant C string "<n/a>".
598 static const char *ata_mode_string(unsigned int mask)
602 for (i = 7; i >= 0; i--)
605 for (i = ATA_SHIFT_MWDMA + 2; i >= ATA_SHIFT_MWDMA; i--)
608 for (i = ATA_SHIFT_PIO + 4; i >= ATA_SHIFT_PIO; i--)
615 return xfer_mode_str[i];
619 * ata_pio_devchk - PATA device presence detection
620 * @ap: ATA channel to examine
621 * @device: Device to examine (starting at zero)
623 * This technique was originally described in
624 * Hale Landis's ATADRVR (www.ata-atapi.com), and
625 * later found its way into the ATA/ATAPI spec.
627 * Write a pattern to the ATA shadow registers,
628 * and if a device is present, it will respond by
629 * correctly storing and echoing back the
630 * ATA shadow register contents.
636 static unsigned int ata_pio_devchk(struct ata_port *ap,
639 struct ata_ioports *ioaddr = &ap->ioaddr;
642 ap->ops->dev_select(ap, device);
644 outb(0x55, ioaddr->nsect_addr);
645 outb(0xaa, ioaddr->lbal_addr);
647 outb(0xaa, ioaddr->nsect_addr);
648 outb(0x55, ioaddr->lbal_addr);
650 outb(0x55, ioaddr->nsect_addr);
651 outb(0xaa, ioaddr->lbal_addr);
653 nsect = inb(ioaddr->nsect_addr);
654 lbal = inb(ioaddr->lbal_addr);
656 if ((nsect == 0x55) && (lbal == 0xaa))
657 return 1; /* we found a device */
659 return 0; /* nothing found */
663 * ata_mmio_devchk - PATA device presence detection
664 * @ap: ATA channel to examine
665 * @device: Device to examine (starting at zero)
667 * This technique was originally described in
668 * Hale Landis's ATADRVR (www.ata-atapi.com), and
669 * later found its way into the ATA/ATAPI spec.
671 * Write a pattern to the ATA shadow registers,
672 * and if a device is present, it will respond by
673 * correctly storing and echoing back the
674 * ATA shadow register contents.
680 static unsigned int ata_mmio_devchk(struct ata_port *ap,
683 struct ata_ioports *ioaddr = &ap->ioaddr;
686 ap->ops->dev_select(ap, device);
688 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
689 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
691 writeb(0xaa, (void __iomem *) ioaddr->nsect_addr);
692 writeb(0x55, (void __iomem *) ioaddr->lbal_addr);
694 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
695 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
697 nsect = readb((void __iomem *) ioaddr->nsect_addr);
698 lbal = readb((void __iomem *) ioaddr->lbal_addr);
700 if ((nsect == 0x55) && (lbal == 0xaa))
701 return 1; /* we found a device */
703 return 0; /* nothing found */
707 * ata_devchk - PATA device presence detection
708 * @ap: ATA channel to examine
709 * @device: Device to examine (starting at zero)
711 * Dispatch ATA device presence detection, depending
712 * on whether we are using PIO or MMIO to talk to the
713 * ATA shadow registers.
719 static unsigned int ata_devchk(struct ata_port *ap,
722 if (ap->flags & ATA_FLAG_MMIO)
723 return ata_mmio_devchk(ap, device);
724 return ata_pio_devchk(ap, device);
728 * ata_dev_classify - determine device type based on ATA-spec signature
729 * @tf: ATA taskfile register set for device to be identified
731 * Determine from taskfile register contents whether a device is
732 * ATA or ATAPI, as per "Signature and persistence" section
733 * of ATA/PI spec (volume 1, sect 5.14).
739 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
740 * the event of failure.
743 unsigned int ata_dev_classify(struct ata_taskfile *tf)
745 /* Apple's open source Darwin code hints that some devices only
746 * put a proper signature into the LBA mid/high registers,
747 * So, we only check those. It's sufficient for uniqueness.
750 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
751 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
752 DPRINTK("found ATA device by sig\n");
756 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
757 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
758 DPRINTK("found ATAPI device by sig\n");
759 return ATA_DEV_ATAPI;
762 DPRINTK("unknown device\n");
763 return ATA_DEV_UNKNOWN;
767 * ata_dev_try_classify - Parse returned ATA device signature
768 * @ap: ATA channel to examine
769 * @device: Device to examine (starting at zero)
771 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
772 * an ATA/ATAPI-defined set of values is placed in the ATA
773 * shadow registers, indicating the results of device detection
776 * Select the ATA device, and read the values from the ATA shadow
777 * registers. Then parse according to the Error register value,
778 * and the spec-defined values examined by ata_dev_classify().
784 static u8 ata_dev_try_classify(struct ata_port *ap, unsigned int device)
786 struct ata_device *dev = &ap->device[device];
787 struct ata_taskfile tf;
791 ap->ops->dev_select(ap, device);
793 memset(&tf, 0, sizeof(tf));
795 err = ata_chk_err(ap);
796 ap->ops->tf_read(ap, &tf);
798 dev->class = ATA_DEV_NONE;
800 /* see if device passed diags */
803 else if ((device == 0) && (err == 0x81))
808 /* determine if device if ATA or ATAPI */
809 class = ata_dev_classify(&tf);
810 if (class == ATA_DEV_UNKNOWN)
812 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
821 * ata_dev_id_string - Convert IDENTIFY DEVICE page into string
822 * @id: IDENTIFY DEVICE results we will examine
823 * @s: string into which data is output
824 * @ofs: offset into identify device page
825 * @len: length of string to return. must be an even number.
827 * The strings in the IDENTIFY DEVICE page are broken up into
828 * 16-bit chunks. Run through the string, and output each
829 * 8-bit chunk linearly, regardless of platform.
835 void ata_dev_id_string(u16 *id, unsigned char *s,
836 unsigned int ofs, unsigned int len)
854 void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
859 * ata_std_dev_select - Select device 0/1 on ATA bus
860 * @ap: ATA channel to manipulate
861 * @device: ATA device (numbered from zero) to select
863 * Use the method defined in the ATA specification to
864 * make either device 0, or device 1, active on the
871 void ata_std_dev_select (struct ata_port *ap, unsigned int device)
876 tmp = ATA_DEVICE_OBS;
878 tmp = ATA_DEVICE_OBS | ATA_DEV1;
880 if (ap->flags & ATA_FLAG_MMIO) {
881 writeb(tmp, (void __iomem *) ap->ioaddr.device_addr);
883 outb(tmp, ap->ioaddr.device_addr);
885 ata_pause(ap); /* needed; also flushes, for mmio */
889 * ata_dev_select - Select device 0/1 on ATA bus
890 * @ap: ATA channel to manipulate
891 * @device: ATA device (numbered from zero) to select
892 * @wait: non-zero to wait for Status register BSY bit to clear
893 * @can_sleep: non-zero if context allows sleeping
895 * Use the method defined in the ATA specification to
896 * make either device 0, or device 1, active on the
899 * This is a high-level version of ata_std_dev_select(),
900 * which additionally provides the services of inserting
901 * the proper pauses and status polling, where needed.
907 void ata_dev_select(struct ata_port *ap, unsigned int device,
908 unsigned int wait, unsigned int can_sleep)
910 VPRINTK("ENTER, ata%u: device %u, wait %u\n",
911 ap->id, device, wait);
916 ap->ops->dev_select(ap, device);
919 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
926 * ata_dump_id - IDENTIFY DEVICE info debugging output
927 * @dev: Device whose IDENTIFY DEVICE page we will dump
929 * Dump selected 16-bit words from a detected device's
930 * IDENTIFY PAGE page.
936 static inline void ata_dump_id(struct ata_device *dev)
938 DPRINTK("49==0x%04x "
948 DPRINTK("80==0x%04x "
958 DPRINTK("88==0x%04x "
965 * ata_dev_identify - obtain IDENTIFY x DEVICE page
966 * @ap: port on which device we wish to probe resides
967 * @device: device bus address, starting at zero
969 * Following bus reset, we issue the IDENTIFY [PACKET] DEVICE
970 * command, and read back the 512-byte device information page.
971 * The device information page is fed to us via the standard
972 * PIO-IN protocol, but we hand-code it here. (TODO: investigate
973 * using standard PIO-IN paths)
975 * After reading the device information page, we use several
976 * bits of information from it to initialize data structures
977 * that will be used during the lifetime of the ata_device.
978 * Other data from the info page is used to disqualify certain
979 * older ATA devices we do not wish to support.
982 * Inherited from caller. Some functions called by this function
983 * obtain the host_set lock.
986 static void ata_dev_identify(struct ata_port *ap, unsigned int device)
988 struct ata_device *dev = &ap->device[device];
991 unsigned long xfer_modes;
993 unsigned int using_edd;
994 DECLARE_COMPLETION(wait);
995 struct ata_queued_cmd *qc;
999 if (!ata_dev_present(dev)) {
1000 DPRINTK("ENTER/EXIT (host %u, dev %u) -- nodev\n",
1005 if (ap->flags & (ATA_FLAG_SRST | ATA_FLAG_SATA_RESET))
1010 DPRINTK("ENTER, host %u, dev %u\n", ap->id, device);
1012 assert (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ATAPI ||
1013 dev->class == ATA_DEV_NONE);
1015 ata_dev_select(ap, device, 1, 1); /* select device 0/1 */
1017 qc = ata_qc_new_init(ap, dev);
1020 ata_sg_init_one(qc, dev->id, sizeof(dev->id));
1021 qc->dma_dir = DMA_FROM_DEVICE;
1022 qc->tf.protocol = ATA_PROT_PIO;
1026 if (dev->class == ATA_DEV_ATA) {
1027 qc->tf.command = ATA_CMD_ID_ATA;
1028 DPRINTK("do ATA identify\n");
1030 qc->tf.command = ATA_CMD_ID_ATAPI;
1031 DPRINTK("do ATAPI identify\n");
1034 qc->waiting = &wait;
1035 qc->complete_fn = ata_qc_complete_noop;
1037 spin_lock_irqsave(&ap->host_set->lock, flags);
1038 rc = ata_qc_issue(qc);
1039 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1044 wait_for_completion(&wait);
1046 status = ata_chk_status(ap);
1047 if (status & ATA_ERR) {
1049 * arg! EDD works for all test cases, but seems to return
1050 * the ATA signature for some ATAPI devices. Until the
1051 * reason for this is found and fixed, we fix up the mess
1052 * here. If IDENTIFY DEVICE returns command aborted
1053 * (as ATAPI devices do), then we issue an
1054 * IDENTIFY PACKET DEVICE.
1056 * ATA software reset (SRST, the default) does not appear
1057 * to have this problem.
1059 if ((using_edd) && (qc->tf.command == ATA_CMD_ID_ATA)) {
1060 u8 err = ata_chk_err(ap);
1061 if (err & ATA_ABORTED) {
1062 dev->class = ATA_DEV_ATAPI;
1073 swap_buf_le16(dev->id, ATA_ID_WORDS);
1075 /* print device capabilities */
1076 printk(KERN_DEBUG "ata%u: dev %u cfg "
1077 "49:%04x 82:%04x 83:%04x 84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n",
1078 ap->id, device, dev->id[49],
1079 dev->id[82], dev->id[83], dev->id[84],
1080 dev->id[85], dev->id[86], dev->id[87],
1084 * common ATA, ATAPI feature tests
1087 /* we require LBA and DMA support (bits 8 & 9 of word 49) */
1088 if (!ata_id_has_dma(dev->id) || !ata_id_has_lba(dev->id)) {
1089 printk(KERN_DEBUG "ata%u: no dma/lba\n", ap->id);
1093 /* quick-n-dirty find max transfer mode; for printk only */
1094 xfer_modes = dev->id[ATA_ID_UDMA_MODES];
1096 xfer_modes = (dev->id[ATA_ID_MWDMA_MODES]) << ATA_SHIFT_MWDMA;
1098 xfer_modes = (dev->id[ATA_ID_PIO_MODES]) << (ATA_SHIFT_PIO + 3);
1099 xfer_modes |= (0x7 << ATA_SHIFT_PIO);
1104 /* ATA-specific feature tests */
1105 if (dev->class == ATA_DEV_ATA) {
1106 if (!ata_id_is_ata(dev->id)) /* sanity check */
1109 tmp = dev->id[ATA_ID_MAJOR_VER];
1110 for (i = 14; i >= 1; i--)
1114 /* we require at least ATA-3 */
1116 printk(KERN_DEBUG "ata%u: no ATA-3\n", ap->id);
1120 if (ata_id_has_lba48(dev->id)) {
1121 dev->flags |= ATA_DFLAG_LBA48;
1122 dev->n_sectors = ata_id_u64(dev->id, 100);
1124 dev->n_sectors = ata_id_u32(dev->id, 60);
1127 ap->host->max_cmd_len = 16;
1129 /* print device info to dmesg */
1130 printk(KERN_INFO "ata%u: dev %u ATA, max %s, %Lu sectors:%s\n",
1132 ata_mode_string(xfer_modes),
1133 (unsigned long long)dev->n_sectors,
1134 dev->flags & ATA_DFLAG_LBA48 ? " lba48" : "");
1137 /* ATAPI-specific feature tests */
1139 if (ata_id_is_ata(dev->id)) /* sanity check */
1142 rc = atapi_cdb_len(dev->id);
1143 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1144 printk(KERN_WARNING "ata%u: unsupported CDB len\n", ap->id);
1147 ap->cdb_len = (unsigned int) rc;
1148 ap->host->max_cmd_len = (unsigned char) ap->cdb_len;
1150 /* print device info to dmesg */
1151 printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n",
1153 ata_mode_string(xfer_modes));
1156 DPRINTK("EXIT, drv_stat = 0x%x\n", ata_chk_status(ap));
1160 printk(KERN_WARNING "ata%u: dev %u not supported, ignoring\n",
1163 ata_irq_on(ap); /* re-enable interrupts */
1164 dev->class++; /* converts ATA_DEV_xxx into ATA_DEV_xxx_UNSUP */
1165 DPRINTK("EXIT, err\n");
1169 * ata_bus_probe - Reset and probe ATA bus
1175 * Zero on success, non-zero on error.
1178 static int ata_bus_probe(struct ata_port *ap)
1180 unsigned int i, found = 0;
1182 ap->ops->phy_reset(ap);
1183 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1186 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1187 ata_dev_identify(ap, i);
1188 if (ata_dev_present(&ap->device[i])) {
1190 if (ap->ops->dev_config)
1191 ap->ops->dev_config(ap, &ap->device[i]);
1195 if ((!found) || (ap->flags & ATA_FLAG_PORT_DISABLED))
1196 goto err_out_disable;
1199 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1200 goto err_out_disable;
1205 ap->ops->port_disable(ap);
1217 void ata_port_probe(struct ata_port *ap)
1219 ap->flags &= ~ATA_FLAG_PORT_DISABLED;
1223 * __sata_phy_reset -
1229 void __sata_phy_reset(struct ata_port *ap)
1232 unsigned long timeout = jiffies + (HZ * 5);
1234 if (ap->flags & ATA_FLAG_SATA_RESET) {
1235 scr_write(ap, SCR_CONTROL, 0x301); /* issue phy wake/reset */
1236 scr_read(ap, SCR_STATUS); /* dummy read; flush */
1237 udelay(400); /* FIXME: a guess */
1239 scr_write(ap, SCR_CONTROL, 0x300); /* issue phy wake/clear reset */
1241 /* wait for phy to become ready, if necessary */
1244 sstatus = scr_read(ap, SCR_STATUS);
1245 if ((sstatus & 0xf) != 1)
1247 } while (time_before(jiffies, timeout));
1249 /* TODO: phy layer with polling, timeouts, etc. */
1250 if (sata_dev_present(ap))
1253 sstatus = scr_read(ap, SCR_STATUS);
1254 printk(KERN_INFO "ata%u: no device found (phy stat %08x)\n",
1256 ata_port_disable(ap);
1259 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1262 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
1263 ata_port_disable(ap);
1267 ap->cbl = ATA_CBL_SATA;
1271 * __sata_phy_reset -
1277 void sata_phy_reset(struct ata_port *ap)
1279 __sata_phy_reset(ap);
1280 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1286 * ata_port_disable -
1292 void ata_port_disable(struct ata_port *ap)
1294 ap->device[0].class = ATA_DEV_NONE;
1295 ap->device[1].class = ATA_DEV_NONE;
1296 ap->flags |= ATA_FLAG_PORT_DISABLED;
1302 } xfer_mode_classes[] = {
1303 { ATA_SHIFT_UDMA, XFER_UDMA_0 },
1304 { ATA_SHIFT_MWDMA, XFER_MW_DMA_0 },
1305 { ATA_SHIFT_PIO, XFER_PIO_0 },
1308 static inline u8 base_from_shift(unsigned int shift)
1312 for (i = 0; i < ARRAY_SIZE(xfer_mode_classes); i++)
1313 if (xfer_mode_classes[i].shift == shift)
1314 return xfer_mode_classes[i].base;
1319 static void ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev)
1324 if (!ata_dev_present(dev) || (ap->flags & ATA_FLAG_PORT_DISABLED))
1327 if (dev->xfer_shift == ATA_SHIFT_PIO)
1328 dev->flags |= ATA_DFLAG_PIO;
1330 ata_dev_set_xfermode(ap, dev);
1332 base = base_from_shift(dev->xfer_shift);
1333 ofs = dev->xfer_mode - base;
1334 idx = ofs + dev->xfer_shift;
1335 WARN_ON(idx >= ARRAY_SIZE(xfer_mode_str));
1337 DPRINTK("idx=%d xfer_shift=%u, xfer_mode=0x%x, base=0x%x, offset=%d\n",
1338 idx, dev->xfer_shift, (int)dev->xfer_mode, (int)base, ofs);
1340 printk(KERN_INFO "ata%u: dev %u configured for %s\n",
1341 ap->id, dev->devno, xfer_mode_str[idx]);
1344 static int ata_host_set_pio(struct ata_port *ap)
1350 mask = ata_get_mode_mask(ap, ATA_SHIFT_PIO);
1353 printk(KERN_WARNING "ata%u: no PIO support\n", ap->id);
1357 base = base_from_shift(ATA_SHIFT_PIO);
1358 xfer_mode = base + x;
1360 DPRINTK("base 0x%x xfer_mode 0x%x mask 0x%x x %d\n",
1361 (int)base, (int)xfer_mode, mask, x);
1363 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1364 struct ata_device *dev = &ap->device[i];
1365 if (ata_dev_present(dev)) {
1366 dev->pio_mode = xfer_mode;
1367 dev->xfer_mode = xfer_mode;
1368 dev->xfer_shift = ATA_SHIFT_PIO;
1369 if (ap->ops->set_piomode)
1370 ap->ops->set_piomode(ap, dev);
1377 static void ata_host_set_dma(struct ata_port *ap, u8 xfer_mode,
1378 unsigned int xfer_shift)
1382 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1383 struct ata_device *dev = &ap->device[i];
1384 if (ata_dev_present(dev)) {
1385 dev->dma_mode = xfer_mode;
1386 dev->xfer_mode = xfer_mode;
1387 dev->xfer_shift = xfer_shift;
1388 if (ap->ops->set_dmamode)
1389 ap->ops->set_dmamode(ap, dev);
1395 * ata_set_mode - Program timings and issue SET FEATURES - XFER
1396 * @ap: port on which timings will be programmed
1401 static void ata_set_mode(struct ata_port *ap)
1403 unsigned int i, xfer_shift;
1407 /* step 1: always set host PIO timings */
1408 rc = ata_host_set_pio(ap);
1412 /* step 2: choose the best data xfer mode */
1413 xfer_mode = xfer_shift = 0;
1414 rc = ata_choose_xfer_mode(ap, &xfer_mode, &xfer_shift);
1418 /* step 3: if that xfer mode isn't PIO, set host DMA timings */
1419 if (xfer_shift != ATA_SHIFT_PIO)
1420 ata_host_set_dma(ap, xfer_mode, xfer_shift);
1422 /* step 4: update devices' xfer mode */
1423 ata_dev_set_mode(ap, &ap->device[0]);
1424 ata_dev_set_mode(ap, &ap->device[1]);
1426 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1429 if (ap->ops->post_set_mode)
1430 ap->ops->post_set_mode(ap);
1432 for (i = 0; i < 2; i++) {
1433 struct ata_device *dev = &ap->device[i];
1434 ata_dev_set_protocol(dev);
1440 ata_port_disable(ap);
1444 * ata_busy_sleep - sleep until BSY clears, or timeout
1445 * @ap: port containing status register to be polled
1446 * @tmout_pat: impatience timeout
1447 * @tmout: overall timeout
1453 static unsigned int ata_busy_sleep (struct ata_port *ap,
1454 unsigned long tmout_pat,
1455 unsigned long tmout)
1457 unsigned long timer_start, timeout;
1460 status = ata_busy_wait(ap, ATA_BUSY, 300);
1461 timer_start = jiffies;
1462 timeout = timer_start + tmout_pat;
1463 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
1465 status = ata_busy_wait(ap, ATA_BUSY, 3);
1468 if (status & ATA_BUSY)
1469 printk(KERN_WARNING "ata%u is slow to respond, "
1470 "please be patient\n", ap->id);
1472 timeout = timer_start + tmout;
1473 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
1475 status = ata_chk_status(ap);
1478 if (status & ATA_BUSY) {
1479 printk(KERN_ERR "ata%u failed to respond (%lu secs)\n",
1480 ap->id, tmout / HZ);
1487 static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
1489 struct ata_ioports *ioaddr = &ap->ioaddr;
1490 unsigned int dev0 = devmask & (1 << 0);
1491 unsigned int dev1 = devmask & (1 << 1);
1492 unsigned long timeout;
1494 /* if device 0 was found in ata_devchk, wait for its
1498 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1500 /* if device 1 was found in ata_devchk, wait for
1501 * register access, then wait for BSY to clear
1503 timeout = jiffies + ATA_TMOUT_BOOT;
1507 ap->ops->dev_select(ap, 1);
1508 if (ap->flags & ATA_FLAG_MMIO) {
1509 nsect = readb((void __iomem *) ioaddr->nsect_addr);
1510 lbal = readb((void __iomem *) ioaddr->lbal_addr);
1512 nsect = inb(ioaddr->nsect_addr);
1513 lbal = inb(ioaddr->lbal_addr);
1515 if ((nsect == 1) && (lbal == 1))
1517 if (time_after(jiffies, timeout)) {
1521 msleep(50); /* give drive a breather */
1524 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1526 /* is all this really necessary? */
1527 ap->ops->dev_select(ap, 0);
1529 ap->ops->dev_select(ap, 1);
1531 ap->ops->dev_select(ap, 0);
1542 static unsigned int ata_bus_edd(struct ata_port *ap)
1544 struct ata_taskfile tf;
1546 /* set up execute-device-diag (bus reset) taskfile */
1547 /* also, take interrupts to a known state (disabled) */
1548 DPRINTK("execute-device-diag\n");
1549 ata_tf_init(ap, &tf, 0);
1551 tf.command = ATA_CMD_EDD;
1552 tf.protocol = ATA_PROT_NODATA;
1555 ata_tf_to_host(ap, &tf);
1557 /* spec says at least 2ms. but who knows with those
1558 * crazy ATAPI devices...
1562 return ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1565 static unsigned int ata_bus_softreset(struct ata_port *ap,
1566 unsigned int devmask)
1568 struct ata_ioports *ioaddr = &ap->ioaddr;
1570 DPRINTK("ata%u: bus reset via SRST\n", ap->id);
1572 /* software reset. causes dev0 to be selected */
1573 if (ap->flags & ATA_FLAG_MMIO) {
1574 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1575 udelay(20); /* FIXME: flush */
1576 writeb(ap->ctl | ATA_SRST, (void __iomem *) ioaddr->ctl_addr);
1577 udelay(20); /* FIXME: flush */
1578 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1580 outb(ap->ctl, ioaddr->ctl_addr);
1582 outb(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
1584 outb(ap->ctl, ioaddr->ctl_addr);
1587 /* spec mandates ">= 2ms" before checking status.
1588 * We wait 150ms, because that was the magic delay used for
1589 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
1590 * between when the ATA command register is written, and then
1591 * status is checked. Because waiting for "a while" before
1592 * checking status is fine, post SRST, we perform this magic
1593 * delay here as well.
1597 ata_bus_post_reset(ap, devmask);
1603 * ata_bus_reset - reset host port and associated ATA channel
1604 * @ap: port to reset
1606 * This is typically the first time we actually start issuing
1607 * commands to the ATA channel. We wait for BSY to clear, then
1608 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
1609 * result. Determine what devices, if any, are on the channel
1610 * by looking at the device 0/1 error register. Look at the signature
1611 * stored in each device's taskfile registers, to determine if
1612 * the device is ATA or ATAPI.
1615 * Inherited from caller. Some functions called by this function
1616 * obtain the host_set lock.
1619 * Sets ATA_FLAG_PORT_DISABLED if bus reset fails.
1622 void ata_bus_reset(struct ata_port *ap)
1624 struct ata_ioports *ioaddr = &ap->ioaddr;
1625 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
1627 unsigned int dev0, dev1 = 0, rc = 0, devmask = 0;
1629 DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no);
1631 /* determine if device 0/1 are present */
1632 if (ap->flags & ATA_FLAG_SATA_RESET)
1635 dev0 = ata_devchk(ap, 0);
1637 dev1 = ata_devchk(ap, 1);
1641 devmask |= (1 << 0);
1643 devmask |= (1 << 1);
1645 /* select device 0 again */
1646 ap->ops->dev_select(ap, 0);
1648 /* issue bus reset */
1649 if (ap->flags & ATA_FLAG_SRST)
1650 rc = ata_bus_softreset(ap, devmask);
1651 else if ((ap->flags & ATA_FLAG_SATA_RESET) == 0) {
1652 /* set up device control */
1653 if (ap->flags & ATA_FLAG_MMIO)
1654 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1656 outb(ap->ctl, ioaddr->ctl_addr);
1657 rc = ata_bus_edd(ap);
1664 * determine by signature whether we have ATA or ATAPI devices
1666 err = ata_dev_try_classify(ap, 0);
1667 if ((slave_possible) && (err != 0x81))
1668 ata_dev_try_classify(ap, 1);
1670 /* re-enable interrupts */
1673 /* is double-select really necessary? */
1674 if (ap->device[1].class != ATA_DEV_NONE)
1675 ap->ops->dev_select(ap, 1);
1676 if (ap->device[0].class != ATA_DEV_NONE)
1677 ap->ops->dev_select(ap, 0);
1679 /* if no devices were detected, disable this port */
1680 if ((ap->device[0].class == ATA_DEV_NONE) &&
1681 (ap->device[1].class == ATA_DEV_NONE))
1684 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
1685 /* set up device control for ATA_FLAG_SATA_RESET */
1686 if (ap->flags & ATA_FLAG_MMIO)
1687 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1689 outb(ap->ctl, ioaddr->ctl_addr);
1696 printk(KERN_ERR "ata%u: disabling port\n", ap->id);
1697 ap->ops->port_disable(ap);
1702 static unsigned int ata_get_mode_mask(struct ata_port *ap, int shift)
1704 struct ata_device *master, *slave;
1707 master = &ap->device[0];
1708 slave = &ap->device[1];
1710 assert (ata_dev_present(master) || ata_dev_present(slave));
1712 if (shift == ATA_SHIFT_UDMA) {
1713 mask = ap->udma_mask;
1714 if (ata_dev_present(master))
1715 mask &= (master->id[ATA_ID_UDMA_MODES] & 0xff);
1716 if (ata_dev_present(slave))
1717 mask &= (slave->id[ATA_ID_UDMA_MODES] & 0xff);
1719 else if (shift == ATA_SHIFT_MWDMA) {
1720 mask = ap->mwdma_mask;
1721 if (ata_dev_present(master))
1722 mask &= (master->id[ATA_ID_MWDMA_MODES] & 0x07);
1723 if (ata_dev_present(slave))
1724 mask &= (slave->id[ATA_ID_MWDMA_MODES] & 0x07);
1726 else if (shift == ATA_SHIFT_PIO) {
1727 mask = ap->pio_mask;
1728 if (ata_dev_present(master)) {
1729 /* spec doesn't return explicit support for
1730 * PIO0-2, so we fake it
1732 u16 tmp_mode = master->id[ATA_ID_PIO_MODES] & 0x03;
1737 if (ata_dev_present(slave)) {
1738 /* spec doesn't return explicit support for
1739 * PIO0-2, so we fake it
1741 u16 tmp_mode = slave->id[ATA_ID_PIO_MODES] & 0x03;
1748 mask = 0xffffffff; /* shut up compiler warning */
1755 /* find greatest bit */
1756 static int fgb(u32 bitmap)
1761 for (i = 0; i < 32; i++)
1762 if (bitmap & (1 << i))
1769 * ata_choose_xfer_mode - attempt to find best transfer mode
1770 * @ap: Port for which an xfer mode will be selected
1771 * @xfer_mode_out: (output) SET FEATURES - XFER MODE code
1772 * @xfer_shift_out: (output) bit shift that selects this mode
1777 * Zero on success, negative on error.
1780 static int ata_choose_xfer_mode(struct ata_port *ap,
1782 unsigned int *xfer_shift_out)
1784 unsigned int mask, shift;
1787 for (i = 0; i < ARRAY_SIZE(xfer_mode_classes); i++) {
1788 shift = xfer_mode_classes[i].shift;
1789 mask = ata_get_mode_mask(ap, shift);
1793 *xfer_mode_out = xfer_mode_classes[i].base + x;
1794 *xfer_shift_out = shift;
1803 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
1804 * @ap: Port associated with device @dev
1805 * @dev: Device to which command will be sent
1810 static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev)
1812 DECLARE_COMPLETION(wait);
1813 struct ata_queued_cmd *qc;
1815 unsigned long flags;
1817 /* set up set-features taskfile */
1818 DPRINTK("set features - xfer mode\n");
1820 qc = ata_qc_new_init(ap, dev);
1823 qc->tf.command = ATA_CMD_SET_FEATURES;
1824 qc->tf.feature = SETFEATURES_XFER;
1825 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1826 qc->tf.protocol = ATA_PROT_NODATA;
1827 qc->tf.nsect = dev->xfer_mode;
1829 qc->waiting = &wait;
1830 qc->complete_fn = ata_qc_complete_noop;
1832 spin_lock_irqsave(&ap->host_set->lock, flags);
1833 rc = ata_qc_issue(qc);
1834 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1837 ata_port_disable(ap);
1839 wait_for_completion(&wait);
1851 static void ata_sg_clean(struct ata_queued_cmd *qc)
1853 struct ata_port *ap = qc->ap;
1854 struct scatterlist *sg = qc->sg;
1855 int dir = qc->dma_dir;
1857 assert(qc->flags & ATA_QCFLAG_DMAMAP);
1860 if (qc->flags & ATA_QCFLAG_SINGLE)
1861 assert(qc->n_elem == 1);
1863 DPRINTK("unmapping %u sg elements\n", qc->n_elem);
1865 if (qc->flags & ATA_QCFLAG_SG)
1866 dma_unmap_sg(ap->host_set->dev, sg, qc->n_elem, dir);
1868 dma_unmap_single(ap->host_set->dev, sg_dma_address(&sg[0]),
1869 sg_dma_len(&sg[0]), dir);
1871 qc->flags &= ~ATA_QCFLAG_DMAMAP;
1876 * ata_fill_sg - Fill PCI IDE PRD table
1877 * @qc: Metadata associated with taskfile to be transferred
1882 static void ata_fill_sg(struct ata_queued_cmd *qc)
1884 struct scatterlist *sg = qc->sg;
1885 struct ata_port *ap = qc->ap;
1886 unsigned int idx, nelem;
1889 assert(qc->n_elem > 0);
1892 for (nelem = qc->n_elem; nelem; nelem--,sg++) {
1896 /* determine if physical DMA addr spans 64K boundary.
1897 * Note h/w doesn't support 64-bit, so we unconditionally
1898 * truncate dma_addr_t to u32.
1900 addr = (u32) sg_dma_address(sg);
1901 sg_len = sg_dma_len(sg);
1904 offset = addr & 0xffff;
1906 if ((offset + sg_len) > 0x10000)
1907 len = 0x10000 - offset;
1909 ap->prd[idx].addr = cpu_to_le32(addr);
1910 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
1911 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
1920 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
1923 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
1924 * @qc: Metadata associated with taskfile to check
1927 * RETURNS: 0 when ATAPI DMA can be used
1930 int ata_check_atapi_dma(struct ata_queued_cmd *qc)
1932 struct ata_port *ap = qc->ap;
1933 int rc = 0; /* Assume ATAPI DMA is OK by default */
1935 if (ap->ops->check_atapi_dma)
1936 rc = ap->ops->check_atapi_dma(qc);
1941 * ata_qc_prep - Prepare taskfile for submission
1942 * @qc: Metadata associated with taskfile to be prepared
1945 * spin_lock_irqsave(host_set lock)
1947 void ata_qc_prep(struct ata_queued_cmd *qc)
1949 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1955 void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
1957 struct scatterlist *sg;
1959 qc->flags |= ATA_QCFLAG_SINGLE;
1961 memset(&qc->sgent, 0, sizeof(qc->sgent));
1962 qc->sg = &qc->sgent;
1967 sg->page = virt_to_page(buf);
1968 sg->offset = (unsigned long) buf & ~PAGE_MASK;
1969 sg_dma_len(sg) = buflen;
1972 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
1973 unsigned int n_elem)
1975 qc->flags |= ATA_QCFLAG_SG;
1977 qc->n_elem = n_elem;
1981 * ata_sg_setup_one -
1985 * spin_lock_irqsave(host_set lock)
1991 static int ata_sg_setup_one(struct ata_queued_cmd *qc)
1993 struct ata_port *ap = qc->ap;
1994 int dir = qc->dma_dir;
1995 struct scatterlist *sg = qc->sg;
1996 dma_addr_t dma_address;
1998 dma_address = dma_map_single(ap->host_set->dev, qc->buf_virt,
1999 sg_dma_len(sg), dir);
2000 if (dma_mapping_error(dma_address))
2003 sg_dma_address(sg) = dma_address;
2005 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
2006 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
2016 * spin_lock_irqsave(host_set lock)
2022 static int ata_sg_setup(struct ata_queued_cmd *qc)
2024 struct ata_port *ap = qc->ap;
2025 struct scatterlist *sg = qc->sg;
2028 VPRINTK("ENTER, ata%u\n", ap->id);
2029 assert(qc->flags & ATA_QCFLAG_SG);
2032 n_elem = dma_map_sg(ap->host_set->dev, sg, qc->n_elem, dir);
2036 DPRINTK("%d sg elements mapped\n", n_elem);
2038 qc->n_elem = n_elem;
2053 static unsigned long ata_pio_poll(struct ata_port *ap)
2056 unsigned int poll_state = PIO_ST_UNKNOWN;
2057 unsigned int reg_state = PIO_ST_UNKNOWN;
2058 const unsigned int tmout_state = PIO_ST_TMOUT;
2060 switch (ap->pio_task_state) {
2063 poll_state = PIO_ST_POLL;
2067 case PIO_ST_LAST_POLL:
2068 poll_state = PIO_ST_LAST_POLL;
2069 reg_state = PIO_ST_LAST;
2076 status = ata_chk_status(ap);
2077 if (status & ATA_BUSY) {
2078 if (time_after(jiffies, ap->pio_task_timeout)) {
2079 ap->pio_task_state = tmout_state;
2082 ap->pio_task_state = poll_state;
2083 return ATA_SHORT_PAUSE;
2086 ap->pio_task_state = reg_state;
2091 * ata_pio_complete -
2097 static void ata_pio_complete (struct ata_port *ap)
2099 struct ata_queued_cmd *qc;
2103 * This is purely hueristic. This is a fast path.
2104 * Sometimes when we enter, BSY will be cleared in
2105 * a chk-status or two. If not, the drive is probably seeking
2106 * or something. Snooze for a couple msecs, then
2107 * chk-status again. If still busy, fall back to
2108 * PIO_ST_POLL state.
2110 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10);
2111 if (drv_stat & (ATA_BUSY | ATA_DRQ)) {
2113 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10);
2114 if (drv_stat & (ATA_BUSY | ATA_DRQ)) {
2115 ap->pio_task_state = PIO_ST_LAST_POLL;
2116 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
2121 drv_stat = ata_wait_idle(ap);
2122 if (!ata_ok(drv_stat)) {
2123 ap->pio_task_state = PIO_ST_ERR;
2127 qc = ata_qc_from_tag(ap, ap->active_tag);
2130 ap->pio_task_state = PIO_ST_IDLE;
2134 ata_qc_complete(qc, drv_stat);
2137 void swap_buf_le16(u16 *buf, unsigned int buf_words)
2142 for (i = 0; i < buf_words; i++)
2143 buf[i] = le16_to_cpu(buf[i]);
2144 #endif /* __BIG_ENDIAN */
2147 static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf,
2148 unsigned int buflen, int write_data)
2151 unsigned int words = buflen >> 1;
2152 u16 *buf16 = (u16 *) buf;
2153 void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr;
2156 for (i = 0; i < words; i++)
2157 writew(le16_to_cpu(buf16[i]), mmio);
2159 for (i = 0; i < words; i++)
2160 buf16[i] = cpu_to_le16(readw(mmio));
2164 static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf,
2165 unsigned int buflen, int write_data)
2167 unsigned int dwords = buflen >> 1;
2170 outsw(ap->ioaddr.data_addr, buf, dwords);
2172 insw(ap->ioaddr.data_addr, buf, dwords);
2175 static void ata_data_xfer(struct ata_port *ap, unsigned char *buf,
2176 unsigned int buflen, int do_write)
2178 if (ap->flags & ATA_FLAG_MMIO)
2179 ata_mmio_data_xfer(ap, buf, buflen, do_write);
2181 ata_pio_data_xfer(ap, buf, buflen, do_write);
2184 static void ata_pio_sector(struct ata_queued_cmd *qc)
2186 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
2187 struct scatterlist *sg = qc->sg;
2188 struct ata_port *ap = qc->ap;
2190 unsigned int offset;
2193 if (qc->cursect == (qc->nsect - 1))
2194 ap->pio_task_state = PIO_ST_LAST;
2196 page = sg[qc->cursg].page;
2197 offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE;
2199 /* get the current page and offset */
2200 page = nth_page(page, (offset >> PAGE_SHIFT));
2201 offset %= PAGE_SIZE;
2203 buf = kmap(page) + offset;
2208 if ((qc->cursg_ofs * ATA_SECT_SIZE) == sg_dma_len(&sg[qc->cursg])) {
2213 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
2215 /* do the actual data transfer */
2216 do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
2217 ata_data_xfer(ap, buf, ATA_SECT_SIZE, do_write);
2222 static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
2224 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
2225 struct scatterlist *sg = qc->sg;
2226 struct ata_port *ap = qc->ap;
2229 unsigned int offset, count;
2231 if (qc->curbytes == qc->nbytes - bytes)
2232 ap->pio_task_state = PIO_ST_LAST;
2235 sg = &qc->sg[qc->cursg];
2239 offset = sg->offset + qc->cursg_ofs;
2241 /* get the current page and offset */
2242 page = nth_page(page, (offset >> PAGE_SHIFT));
2243 offset %= PAGE_SIZE;
2245 count = min(sg_dma_len(sg) - qc->cursg_ofs, bytes);
2247 /* don't cross page boundaries */
2248 count = min(count, (unsigned int)PAGE_SIZE - offset);
2250 buf = kmap(page) + offset;
2253 qc->curbytes += count;
2254 qc->cursg_ofs += count;
2256 if (qc->cursg_ofs == sg_dma_len(sg)) {
2261 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
2263 /* do the actual data transfer */
2264 ata_data_xfer(ap, buf, count, do_write);
2269 if (qc->cursg_ofs < sg_dma_len(sg))
2275 static void atapi_pio_bytes(struct ata_queued_cmd *qc)
2277 struct ata_port *ap = qc->ap;
2278 struct ata_device *dev = qc->dev;
2279 unsigned int ireason, bc_lo, bc_hi, bytes;
2280 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
2282 ap->ops->tf_read(ap, &qc->tf);
2283 ireason = qc->tf.nsect;
2284 bc_lo = qc->tf.lbam;
2285 bc_hi = qc->tf.lbah;
2286 bytes = (bc_hi << 8) | bc_lo;
2288 /* shall be cleared to zero, indicating xfer of data */
2289 if (ireason & (1 << 0))
2292 /* make sure transfer direction matches expected */
2293 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
2294 if (do_write != i_write)
2297 __atapi_pio_bytes(qc, bytes);
2302 printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n",
2303 ap->id, dev->devno);
2304 ap->pio_task_state = PIO_ST_ERR;
2314 static void ata_pio_block(struct ata_port *ap)
2316 struct ata_queued_cmd *qc;
2320 * This is purely hueristic. This is a fast path.
2321 * Sometimes when we enter, BSY will be cleared in
2322 * a chk-status or two. If not, the drive is probably seeking
2323 * or something. Snooze for a couple msecs, then
2324 * chk-status again. If still busy, fall back to
2325 * PIO_ST_POLL state.
2327 status = ata_busy_wait(ap, ATA_BUSY, 5);
2328 if (status & ATA_BUSY) {
2330 status = ata_busy_wait(ap, ATA_BUSY, 10);
2331 if (status & ATA_BUSY) {
2332 ap->pio_task_state = PIO_ST_POLL;
2333 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
2338 qc = ata_qc_from_tag(ap, ap->active_tag);
2341 if (is_atapi_taskfile(&qc->tf)) {
2342 /* no more data to transfer or unsupported ATAPI command */
2343 if ((status & ATA_DRQ) == 0) {
2344 ap->pio_task_state = PIO_ST_IDLE;
2348 ata_qc_complete(qc, status);
2352 atapi_pio_bytes(qc);
2354 /* handle BSY=0, DRQ=0 as error */
2355 if ((status & ATA_DRQ) == 0) {
2356 ap->pio_task_state = PIO_ST_ERR;
2364 static void ata_pio_error(struct ata_port *ap)
2366 struct ata_queued_cmd *qc;
2369 qc = ata_qc_from_tag(ap, ap->active_tag);
2372 drv_stat = ata_chk_status(ap);
2373 printk(KERN_WARNING "ata%u: PIO error, drv_stat 0x%x\n",
2376 ap->pio_task_state = PIO_ST_IDLE;
2380 ata_qc_complete(qc, drv_stat | ATA_ERR);
2383 static void ata_pio_task(void *_data)
2385 struct ata_port *ap = _data;
2386 unsigned long timeout = 0;
2388 switch (ap->pio_task_state) {
2397 ata_pio_complete(ap);
2401 case PIO_ST_LAST_POLL:
2402 timeout = ata_pio_poll(ap);
2412 queue_delayed_work(ata_wq, &ap->pio_task,
2415 queue_work(ata_wq, &ap->pio_task);
2418 static void atapi_request_sense(struct ata_port *ap, struct ata_device *dev,
2419 struct scsi_cmnd *cmd)
2421 DECLARE_COMPLETION(wait);
2422 struct ata_queued_cmd *qc;
2423 unsigned long flags;
2426 DPRINTK("ATAPI request sense\n");
2428 qc = ata_qc_new_init(ap, dev);
2431 /* FIXME: is this needed? */
2432 memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
2434 ata_sg_init_one(qc, cmd->sense_buffer, sizeof(cmd->sense_buffer));
2435 qc->dma_dir = DMA_FROM_DEVICE;
2437 memset(&qc->cdb, 0, sizeof(ap->cdb_len));
2438 qc->cdb[0] = REQUEST_SENSE;
2439 qc->cdb[4] = SCSI_SENSE_BUFFERSIZE;
2441 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2442 qc->tf.command = ATA_CMD_PACKET;
2444 qc->tf.protocol = ATA_PROT_ATAPI;
2445 qc->tf.lbam = (8 * 1024) & 0xff;
2446 qc->tf.lbah = (8 * 1024) >> 8;
2447 qc->nbytes = SCSI_SENSE_BUFFERSIZE;
2449 qc->waiting = &wait;
2450 qc->complete_fn = ata_qc_complete_noop;
2452 spin_lock_irqsave(&ap->host_set->lock, flags);
2453 rc = ata_qc_issue(qc);
2454 spin_unlock_irqrestore(&ap->host_set->lock, flags);
2457 ata_port_disable(ap);
2459 wait_for_completion(&wait);
2465 * ata_qc_timeout - Handle timeout of queued command
2466 * @qc: Command that timed out
2468 * Some part of the kernel (currently, only the SCSI layer)
2469 * has noticed that the active command on port @ap has not
2470 * completed after a specified length of time. Handle this
2471 * condition by disabling DMA (if necessary) and completing
2472 * transactions, with error if necessary.
2474 * This also handles the case of the "lost interrupt", where
2475 * for some reason (possibly hardware bug, possibly driver bug)
2476 * an interrupt was not delivered to the driver, even though the
2477 * transaction completed successfully.
2482 static void ata_qc_timeout(struct ata_queued_cmd *qc)
2484 struct ata_port *ap = qc->ap;
2485 struct ata_device *dev = qc->dev;
2486 u8 host_stat = 0, drv_stat;
2490 /* FIXME: doesn't this conflict with timeout handling? */
2491 if (qc->dev->class == ATA_DEV_ATAPI && qc->scsicmd) {
2492 struct scsi_cmnd *cmd = qc->scsicmd;
2494 if (!scsi_eh_eflags_chk(cmd, SCSI_EH_CANCEL_CMD)) {
2496 /* finish completing original command */
2497 __ata_qc_complete(qc);
2499 atapi_request_sense(ap, dev, cmd);
2501 cmd->result = (CHECK_CONDITION << 1) | (DID_OK << 16);
2502 scsi_finish_command(cmd);
2508 /* hack alert! We cannot use the supplied completion
2509 * function from inside the ->eh_strategy_handler() thread.
2510 * libata is the only user of ->eh_strategy_handler() in
2511 * any kernel, so the default scsi_done() assumes it is
2512 * not being called from the SCSI EH.
2514 qc->scsidone = scsi_finish_command;
2516 switch (qc->tf.protocol) {
2519 case ATA_PROT_ATAPI_DMA:
2520 host_stat = ata_bmdma_status(ap);
2522 /* before we do anything else, clear DMA-Start bit */
2529 drv_stat = ata_chk_status(ap);
2531 /* ack bmdma irq events */
2532 ata_bmdma_ack_irq(ap);
2534 printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x host_stat 0x%x\n",
2535 ap->id, qc->tf.command, drv_stat, host_stat);
2537 /* complete taskfile transaction */
2538 ata_qc_complete(qc, drv_stat);
2546 * ata_eng_timeout - Handle timeout of queued command
2547 * @ap: Port on which timed-out command is active
2549 * Some part of the kernel (currently, only the SCSI layer)
2550 * has noticed that the active command on port @ap has not
2551 * completed after a specified length of time. Handle this
2552 * condition by disabling DMA (if necessary) and completing
2553 * transactions, with error if necessary.
2555 * This also handles the case of the "lost interrupt", where
2556 * for some reason (possibly hardware bug, possibly driver bug)
2557 * an interrupt was not delivered to the driver, even though the
2558 * transaction completed successfully.
2561 * Inherited from SCSI layer (none, can sleep)
2564 void ata_eng_timeout(struct ata_port *ap)
2566 struct ata_queued_cmd *qc;
2570 qc = ata_qc_from_tag(ap, ap->active_tag);
2572 printk(KERN_ERR "ata%u: BUG: timeout without command\n",
2584 * ata_qc_new - Request an available ATA command, for queueing
2585 * @ap: Port associated with device @dev
2586 * @dev: Device from whom we request an available command structure
2591 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
2593 struct ata_queued_cmd *qc = NULL;
2596 for (i = 0; i < ATA_MAX_QUEUE; i++)
2597 if (!test_and_set_bit(i, &ap->qactive)) {
2598 qc = ata_qc_from_tag(ap, i);
2609 * ata_qc_new_init - Request an available ATA command, and initialize it
2610 * @ap: Port associated with device @dev
2611 * @dev: Device from whom we request an available command structure
2616 struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
2617 struct ata_device *dev)
2619 struct ata_queued_cmd *qc;
2621 qc = ata_qc_new(ap);
2628 qc->cursect = qc->cursg = qc->cursg_ofs = 0;
2630 qc->nbytes = qc->curbytes = 0;
2632 ata_tf_init(ap, &qc->tf, dev->devno);
2634 if (dev->flags & ATA_DFLAG_LBA48)
2635 qc->tf.flags |= ATA_TFLAG_LBA48;
2641 static int ata_qc_complete_noop(struct ata_queued_cmd *qc, u8 drv_stat)
2646 static void __ata_qc_complete(struct ata_queued_cmd *qc)
2648 struct ata_port *ap = qc->ap;
2649 unsigned int tag, do_clear = 0;
2653 if (likely(ata_tag_valid(tag))) {
2654 if (tag == ap->active_tag)
2655 ap->active_tag = ATA_TAG_POISON;
2656 qc->tag = ATA_TAG_POISON;
2661 struct completion *waiting = qc->waiting;
2666 if (likely(do_clear))
2667 clear_bit(tag, &ap->qactive);
2671 * ata_qc_complete - Complete an active ATA command
2672 * @qc: Command to complete
2673 * @drv_stat: ATA status register contents
2679 void ata_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat)
2683 assert(qc != NULL); /* ata_qc_from_tag _might_ return NULL */
2684 assert(qc->flags & ATA_QCFLAG_ACTIVE);
2686 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
2689 /* call completion callback */
2690 rc = qc->complete_fn(qc, drv_stat);
2692 /* if callback indicates not to complete command (non-zero),
2693 * return immediately
2698 __ata_qc_complete(qc);
2703 static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
2705 struct ata_port *ap = qc->ap;
2707 switch (qc->tf.protocol) {
2709 case ATA_PROT_ATAPI_DMA:
2712 case ATA_PROT_ATAPI:
2714 case ATA_PROT_PIO_MULT:
2715 if (ap->flags & ATA_FLAG_PIO_DMA)
2728 * ata_qc_issue - issue taskfile to device
2729 * @qc: command to issue to device
2731 * Prepare an ATA command to submission to device.
2732 * This includes mapping the data into a DMA-able
2733 * area, filling in the S/G table, and finally
2734 * writing the taskfile to hardware, starting the command.
2737 * spin_lock_irqsave(host_set lock)
2740 * Zero on success, negative on error.
2743 int ata_qc_issue(struct ata_queued_cmd *qc)
2745 struct ata_port *ap = qc->ap;
2747 if (ata_should_dma_map(qc)) {
2748 if (qc->flags & ATA_QCFLAG_SG) {
2749 if (ata_sg_setup(qc))
2751 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
2752 if (ata_sg_setup_one(qc))
2756 qc->flags &= ~ATA_QCFLAG_DMAMAP;
2759 ap->ops->qc_prep(qc);
2761 qc->ap->active_tag = qc->tag;
2762 qc->flags |= ATA_QCFLAG_ACTIVE;
2764 return ap->ops->qc_issue(qc);
2771 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
2772 * @qc: command to issue to device
2774 * Using various libata functions and hooks, this function
2775 * starts an ATA command. ATA commands are grouped into
2776 * classes called "protocols", and issuing each type of protocol
2777 * is slightly different.
2780 * spin_lock_irqsave(host_set lock)
2783 * Zero on success, negative on error.
2786 int ata_qc_issue_prot(struct ata_queued_cmd *qc)
2788 struct ata_port *ap = qc->ap;
2790 ata_dev_select(ap, qc->dev->devno, 1, 0);
2792 switch (qc->tf.protocol) {
2793 case ATA_PROT_NODATA:
2794 ata_tf_to_host_nolock(ap, &qc->tf);
2798 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
2799 ap->ops->bmdma_setup(qc); /* set up bmdma */
2800 ap->ops->bmdma_start(qc); /* initiate bmdma */
2803 case ATA_PROT_PIO: /* load tf registers, initiate polling pio */
2804 ata_qc_set_polling(qc);
2805 ata_tf_to_host_nolock(ap, &qc->tf);
2806 ap->pio_task_state = PIO_ST;
2807 queue_work(ata_wq, &ap->pio_task);
2810 case ATA_PROT_ATAPI:
2811 ata_qc_set_polling(qc);
2812 ata_tf_to_host_nolock(ap, &qc->tf);
2813 queue_work(ata_wq, &ap->packet_task);
2816 case ATA_PROT_ATAPI_NODATA:
2817 ata_tf_to_host_nolock(ap, &qc->tf);
2818 queue_work(ata_wq, &ap->packet_task);
2821 case ATA_PROT_ATAPI_DMA:
2822 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
2823 ap->ops->bmdma_setup(qc); /* set up bmdma */
2824 queue_work(ata_wq, &ap->packet_task);
2836 * ata_bmdma_setup - Set up PCI IDE BMDMA transaction
2837 * @qc: Info associated with this ATA transaction.
2840 * spin_lock_irqsave(host_set lock)
2843 static void ata_bmdma_setup_mmio (struct ata_queued_cmd *qc)
2845 struct ata_port *ap = qc->ap;
2846 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
2848 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
2850 /* load PRD table addr. */
2851 mb(); /* make sure PRD table writes are visible to controller */
2852 writel(ap->prd_dma, mmio + ATA_DMA_TABLE_OFS);
2854 /* specify data direction, triple-check start bit is clear */
2855 dmactl = readb(mmio + ATA_DMA_CMD);
2856 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
2858 dmactl |= ATA_DMA_WR;
2859 writeb(dmactl, mmio + ATA_DMA_CMD);
2861 /* issue r/w command */
2862 ap->ops->exec_command(ap, &qc->tf);
2866 * ata_bmdma_start - Start a PCI IDE BMDMA transaction
2867 * @qc: Info associated with this ATA transaction.
2870 * spin_lock_irqsave(host_set lock)
2873 static void ata_bmdma_start_mmio (struct ata_queued_cmd *qc)
2875 struct ata_port *ap = qc->ap;
2876 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
2879 /* start host DMA transaction */
2880 dmactl = readb(mmio + ATA_DMA_CMD);
2881 writeb(dmactl | ATA_DMA_START, mmio + ATA_DMA_CMD);
2883 /* Strictly, one may wish to issue a readb() here, to
2884 * flush the mmio write. However, control also passes
2885 * to the hardware at this point, and it will interrupt
2886 * us when we are to resume control. So, in effect,
2887 * we don't care when the mmio write flushes.
2888 * Further, a read of the DMA status register _immediately_
2889 * following the write may not be what certain flaky hardware
2890 * is expected, so I think it is best to not add a readb()
2891 * without first all the MMIO ATA cards/mobos.
2892 * Or maybe I'm just being paranoid.
2897 * ata_bmdma_setup_pio - Set up PCI IDE BMDMA transaction (PIO)
2898 * @qc: Info associated with this ATA transaction.
2901 * spin_lock_irqsave(host_set lock)
2904 static void ata_bmdma_setup_pio (struct ata_queued_cmd *qc)
2906 struct ata_port *ap = qc->ap;
2907 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
2910 /* load PRD table addr. */
2911 outl(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2913 /* specify data direction, triple-check start bit is clear */
2914 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2915 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
2917 dmactl |= ATA_DMA_WR;
2918 outb(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2920 /* issue r/w command */
2921 ap->ops->exec_command(ap, &qc->tf);
2925 * ata_bmdma_start_pio - Start a PCI IDE BMDMA transaction (PIO)
2926 * @qc: Info associated with this ATA transaction.
2929 * spin_lock_irqsave(host_set lock)
2932 static void ata_bmdma_start_pio (struct ata_queued_cmd *qc)
2934 struct ata_port *ap = qc->ap;
2937 /* start host DMA transaction */
2938 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2939 outb(dmactl | ATA_DMA_START,
2940 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2943 void ata_bmdma_start(struct ata_queued_cmd *qc)
2945 if (qc->ap->flags & ATA_FLAG_MMIO)
2946 ata_bmdma_start_mmio(qc);
2948 ata_bmdma_start_pio(qc);
2951 void ata_bmdma_setup(struct ata_queued_cmd *qc)
2953 if (qc->ap->flags & ATA_FLAG_MMIO)
2954 ata_bmdma_setup_mmio(qc);
2956 ata_bmdma_setup_pio(qc);
2959 void ata_bmdma_irq_clear(struct ata_port *ap)
2961 ata_bmdma_ack_irq(ap);
2965 * ata_host_intr - Handle host interrupt for given (port, task)
2966 * @ap: Port on which interrupt arrived (possibly...)
2967 * @qc: Taskfile currently active in engine
2969 * Handle host interrupt for given queued command. Currently,
2970 * only DMA interrupts are handled. All other commands are
2971 * handled via polling with interrupts disabled (nIEN bit).
2974 * spin_lock_irqsave(host_set lock)
2977 * One if interrupt was handled, zero if not (shared irq).
2980 inline unsigned int ata_host_intr (struct ata_port *ap,
2981 struct ata_queued_cmd *qc)
2983 u8 status, host_stat;
2985 switch (qc->tf.protocol) {
2988 case ATA_PROT_ATAPI_DMA:
2989 case ATA_PROT_ATAPI:
2990 /* check status of DMA engine */
2991 host_stat = ata_bmdma_status(ap);
2992 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
2994 /* if it's not our irq... */
2995 if (!(host_stat & ATA_DMA_INTR))
2998 /* before we do anything else, clear DMA-Start bit */
3003 case ATA_PROT_ATAPI_NODATA:
3004 case ATA_PROT_NODATA:
3005 /* check altstatus */
3006 status = ata_altstatus(ap);
3007 if (status & ATA_BUSY)
3010 /* check main status, clearing INTRQ */
3011 status = ata_chk_status(ap);
3012 if (unlikely(status & ATA_BUSY))
3014 DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
3015 ap->id, qc->tf.protocol, status);
3017 /* ack bmdma irq events */
3018 ata_bmdma_ack_irq(ap);
3020 /* complete taskfile transaction */
3021 ata_qc_complete(qc, status);
3028 return 1; /* irq handled */
3031 ap->stats.idle_irq++;
3034 if ((ap->stats.idle_irq % 1000) == 0) {
3036 ata_irq_ack(ap, 0); /* debug trap */
3037 printk(KERN_WARNING "ata%d: irq trap\n", ap->id);
3040 return 0; /* irq not handled */
3044 * ata_interrupt - Default ATA host interrupt handler
3046 * @dev_instance: pointer to our host information structure
3055 irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
3057 struct ata_host_set *host_set = dev_instance;
3059 unsigned int handled = 0;
3060 unsigned long flags;
3062 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
3063 spin_lock_irqsave(&host_set->lock, flags);
3065 for (i = 0; i < host_set->n_ports; i++) {
3066 struct ata_port *ap;
3068 ap = host_set->ports[i];
3069 if (ap && (!(ap->flags & ATA_FLAG_PORT_DISABLED))) {
3070 struct ata_queued_cmd *qc;
3072 qc = ata_qc_from_tag(ap, ap->active_tag);
3073 if (qc && (!(qc->tf.ctl & ATA_NIEN)))
3074 handled |= ata_host_intr(ap, qc);
3078 spin_unlock_irqrestore(&host_set->lock, flags);
3080 return IRQ_RETVAL(handled);
3084 * atapi_packet_task - Write CDB bytes to hardware
3085 * @_data: Port to which ATAPI device is attached.
3087 * When device has indicated its readiness to accept
3088 * a CDB, this function is called. Send the CDB.
3089 * If DMA is to be performed, exit immediately.
3090 * Otherwise, we are in polling mode, so poll
3091 * status under operation succeeds or fails.
3094 * Kernel thread context (may sleep)
3097 static void atapi_packet_task(void *_data)
3099 struct ata_port *ap = _data;
3100 struct ata_queued_cmd *qc;
3103 qc = ata_qc_from_tag(ap, ap->active_tag);
3105 assert(qc->flags & ATA_QCFLAG_ACTIVE);
3107 /* sleep-wait for BSY to clear */
3108 DPRINTK("busy wait\n");
3109 if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB))
3112 /* make sure DRQ is set */
3113 status = ata_chk_status(ap);
3114 if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ)
3118 DPRINTK("send cdb\n");
3119 assert(ap->cdb_len >= 12);
3120 ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1);
3122 /* if we are DMA'ing, irq handler takes over from here */
3123 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA)
3124 ap->ops->bmdma_start(qc); /* initiate bmdma */
3126 /* non-data commands are also handled via irq */
3127 else if (qc->tf.protocol == ATA_PROT_ATAPI_NODATA) {
3131 /* PIO commands are handled by polling */
3133 ap->pio_task_state = PIO_ST;
3134 queue_work(ata_wq, &ap->pio_task);
3140 ata_qc_complete(qc, ATA_ERR);
3143 int ata_port_start (struct ata_port *ap)
3145 struct device *dev = ap->host_set->dev;
3147 ap->prd = dma_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, GFP_KERNEL);
3151 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma);
3156 void ata_port_stop (struct ata_port *ap)
3158 struct device *dev = ap->host_set->dev;
3160 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
3164 * ata_host_remove - Unregister SCSI host structure with upper layers
3165 * @ap: Port to unregister
3166 * @do_unregister: 1 if we fully unregister, 0 to just stop the port
3171 static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister)
3173 struct Scsi_Host *sh = ap->host;
3178 scsi_remove_host(sh);
3180 ap->ops->port_stop(ap);
3184 * ata_host_init - Initialize an ata_port structure
3185 * @ap: Structure to initialize
3186 * @host: associated SCSI mid-layer structure
3187 * @host_set: Collection of hosts to which @ap belongs
3188 * @ent: Probe information provided by low-level driver
3189 * @port_no: Port number associated with this ata_port
3195 static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
3196 struct ata_host_set *host_set,
3197 struct ata_probe_ent *ent, unsigned int port_no)
3203 host->max_channel = 1;
3204 host->unique_id = ata_unique_id++;
3205 host->max_cmd_len = 12;
3206 scsi_set_device(host, ent->dev);
3207 scsi_assign_lock(host, &host_set->lock);
3209 ap->flags = ATA_FLAG_PORT_DISABLED;
3210 ap->id = host->unique_id;
3212 ap->ctl = ATA_DEVCTL_OBS;
3213 ap->host_set = host_set;
3214 ap->port_no = port_no;
3216 ent->legacy_mode ? ent->hard_port_no : port_no;
3217 ap->pio_mask = ent->pio_mask;
3218 ap->mwdma_mask = ent->mwdma_mask;
3219 ap->udma_mask = ent->udma_mask;
3220 ap->flags |= ent->host_flags;
3221 ap->ops = ent->port_ops;
3222 ap->cbl = ATA_CBL_NONE;
3223 ap->active_tag = ATA_TAG_POISON;
3224 ap->last_ctl = 0xFF;
3226 INIT_WORK(&ap->packet_task, atapi_packet_task, ap);
3227 INIT_WORK(&ap->pio_task, ata_pio_task, ap);
3229 for (i = 0; i < ATA_MAX_DEVICES; i++)
3230 ap->device[i].devno = i;
3233 ap->stats.unhandled_irq = 1;
3234 ap->stats.idle_irq = 1;
3237 memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
3241 * ata_host_add - Attach low-level ATA driver to system
3242 * @ent: Information provided by low-level driver
3243 * @host_set: Collections of ports to which we add
3244 * @port_no: Port number associated with this host
3252 static struct ata_port * ata_host_add(struct ata_probe_ent *ent,
3253 struct ata_host_set *host_set,
3254 unsigned int port_no)
3256 struct Scsi_Host *host;
3257 struct ata_port *ap;
3261 host = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
3265 ap = (struct ata_port *) &host->hostdata[0];
3267 ata_host_init(ap, host, host_set, ent, port_no);
3269 rc = ap->ops->port_start(ap);
3276 scsi_host_put(host);
3290 int ata_device_add(struct ata_probe_ent *ent)
3292 unsigned int count = 0, i;
3293 struct device *dev = ent->dev;
3294 struct ata_host_set *host_set;
3297 /* alloc a container for our list of ATA ports (buses) */
3298 host_set = kmalloc(sizeof(struct ata_host_set) +
3299 (ent->n_ports * sizeof(void *)), GFP_KERNEL);
3302 memset(host_set, 0, sizeof(struct ata_host_set) + (ent->n_ports * sizeof(void *)));
3303 spin_lock_init(&host_set->lock);
3305 host_set->dev = dev;
3306 host_set->n_ports = ent->n_ports;
3307 host_set->irq = ent->irq;
3308 host_set->mmio_base = ent->mmio_base;
3309 host_set->private_data = ent->private_data;
3310 host_set->ops = ent->port_ops;
3312 /* register each port bound to this device */
3313 for (i = 0; i < ent->n_ports; i++) {
3314 struct ata_port *ap;
3315 unsigned long xfer_mode_mask;
3317 ap = ata_host_add(ent, host_set, i);
3321 host_set->ports[i] = ap;
3322 xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
3323 (ap->mwdma_mask << ATA_SHIFT_MWDMA) |
3324 (ap->pio_mask << ATA_SHIFT_PIO);
3326 /* print per-port info to dmesg */
3327 printk(KERN_INFO "ata%u: %cATA max %s cmd 0x%lX ctl 0x%lX "
3328 "bmdma 0x%lX irq %lu\n",
3330 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
3331 ata_mode_string(xfer_mode_mask),
3332 ap->ioaddr.cmd_addr,
3333 ap->ioaddr.ctl_addr,
3334 ap->ioaddr.bmdma_addr,
3338 host_set->ops->irq_clear(ap);
3347 /* obtain irq, that is shared between channels */
3348 if (request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags,
3349 DRV_NAME, host_set))
3352 /* perform each probe synchronously */
3353 DPRINTK("probe begin\n");
3354 for (i = 0; i < count; i++) {
3355 struct ata_port *ap;
3358 ap = host_set->ports[i];
3360 DPRINTK("ata%u: probe begin\n", ap->id);
3361 rc = ata_bus_probe(ap);
3362 DPRINTK("ata%u: probe end\n", ap->id);
3365 /* FIXME: do something useful here?
3366 * Current libata behavior will
3367 * tear down everything when
3368 * the module is removed
3369 * or the h/w is unplugged.
3373 rc = scsi_add_host(ap->host, dev);
3375 printk(KERN_ERR "ata%u: scsi_add_host failed\n",
3377 /* FIXME: do something useful here */
3378 /* FIXME: handle unconditional calls to
3379 * scsi_scan_host and ata_host_remove, below,
3385 /* probes are done, now scan each port's disk(s) */
3386 DPRINTK("probe begin\n");
3387 for (i = 0; i < count; i++) {
3388 struct ata_port *ap = host_set->ports[i];
3390 scsi_scan_host(ap->host);
3393 dev_set_drvdata(dev, host_set);
3395 VPRINTK("EXIT, returning %u\n", ent->n_ports);
3396 return ent->n_ports; /* success */
3399 for (i = 0; i < count; i++) {
3400 ata_host_remove(host_set->ports[i], 1);
3401 scsi_host_put(host_set->ports[i]->host);
3404 VPRINTK("EXIT, returning 0\n");
3409 * ata_scsi_release - SCSI layer callback hook for host unload
3410 * @host: libata host to be unloaded
3412 * Performs all duties necessary to shut down a libata port...
3413 * Kill port kthread, disable port, and release resources.
3416 * Inherited from SCSI layer.
3422 int ata_scsi_release(struct Scsi_Host *host)
3424 struct ata_port *ap = (struct ata_port *) &host->hostdata[0];
3428 ap->ops->port_disable(ap);
3429 ata_host_remove(ap, 0);
3436 * ata_std_ports - initialize ioaddr with standard port offsets.
3437 * @ioaddr: IO address structure to be initialized
3439 void ata_std_ports(struct ata_ioports *ioaddr)
3441 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
3442 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
3443 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
3444 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
3445 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
3446 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
3447 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
3448 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
3449 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
3450 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
3453 static struct ata_probe_ent *
3454 ata_probe_ent_alloc(int n, struct device *dev, struct ata_port_info **port)
3456 struct ata_probe_ent *probe_ent;
3459 probe_ent = kmalloc(sizeof(*probe_ent) * n, GFP_KERNEL);
3461 printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
3462 kobject_name(&(dev->kobj)));
3466 memset(probe_ent, 0, sizeof(*probe_ent) * n);
3468 for (i = 0; i < n; i++) {
3469 INIT_LIST_HEAD(&probe_ent[i].node);
3470 probe_ent[i].dev = dev;
3472 probe_ent[i].sht = port[i]->sht;
3473 probe_ent[i].host_flags = port[i]->host_flags;
3474 probe_ent[i].pio_mask = port[i]->pio_mask;
3475 probe_ent[i].mwdma_mask = port[i]->mwdma_mask;
3476 probe_ent[i].udma_mask = port[i]->udma_mask;
3477 probe_ent[i].port_ops = port[i]->port_ops;
3485 struct ata_probe_ent *
3486 ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port)
3488 struct ata_probe_ent *probe_ent =
3489 ata_probe_ent_alloc(1, pci_dev_to_dev(pdev), port);
3493 probe_ent->n_ports = 2;
3494 probe_ent->irq = pdev->irq;
3495 probe_ent->irq_flags = SA_SHIRQ;
3497 probe_ent->port[0].cmd_addr = pci_resource_start(pdev, 0);
3498 probe_ent->port[0].altstatus_addr =
3499 probe_ent->port[0].ctl_addr =
3500 pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS;
3501 probe_ent->port[0].bmdma_addr = pci_resource_start(pdev, 4);
3503 probe_ent->port[1].cmd_addr = pci_resource_start(pdev, 2);
3504 probe_ent->port[1].altstatus_addr =
3505 probe_ent->port[1].ctl_addr =
3506 pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS;
3507 probe_ent->port[1].bmdma_addr = pci_resource_start(pdev, 4) + 8;
3509 ata_std_ports(&probe_ent->port[0]);
3510 ata_std_ports(&probe_ent->port[1]);
3515 struct ata_probe_ent *
3516 ata_pci_init_legacy_mode(struct pci_dev *pdev, struct ata_port_info **port)
3518 struct ata_probe_ent *probe_ent =
3519 ata_probe_ent_alloc(2, pci_dev_to_dev(pdev), port);
3523 probe_ent[0].n_ports = 1;
3524 probe_ent[0].irq = 14;
3526 probe_ent[0].hard_port_no = 0;
3527 probe_ent[0].legacy_mode = 1;
3529 probe_ent[1].n_ports = 1;
3530 probe_ent[1].irq = 15;
3532 probe_ent[1].hard_port_no = 1;
3533 probe_ent[1].legacy_mode = 1;
3535 probe_ent[0].port[0].cmd_addr = 0x1f0;
3536 probe_ent[0].port[0].altstatus_addr =
3537 probe_ent[0].port[0].ctl_addr = 0x3f6;
3538 probe_ent[0].port[0].bmdma_addr = pci_resource_start(pdev, 4);
3540 probe_ent[1].port[0].cmd_addr = 0x170;
3541 probe_ent[1].port[0].altstatus_addr =
3542 probe_ent[1].port[0].ctl_addr = 0x376;
3543 probe_ent[1].port[0].bmdma_addr = pci_resource_start(pdev, 4)+8;
3545 ata_std_ports(&probe_ent[0].port[0]);
3546 ata_std_ports(&probe_ent[1].port[0]);
3552 * ata_pci_init_one - Initialize/register PCI IDE host controller
3553 * @pdev: Controller to be initialized
3554 * @port_info: Information from low-level host driver
3555 * @n_ports: Number of ports attached to host controller
3558 * Inherited from PCI layer (may sleep).
3564 int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
3565 unsigned int n_ports)
3567 struct ata_probe_ent *probe_ent, *probe_ent2 = NULL;
3568 struct ata_port_info *port[2];
3570 unsigned int legacy_mode = 0;
3575 port[0] = port_info[0];
3577 port[1] = port_info[1];
3581 if ((port[0]->host_flags & ATA_FLAG_NO_LEGACY) == 0) {
3582 /* TODO: support transitioning to native mode? */
3583 pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
3584 mask = (1 << 2) | (1 << 0);
3585 if ((tmp8 & mask) != mask)
3586 legacy_mode = (1 << 3);
3590 if ((!legacy_mode) && (n_ports > 1)) {
3591 printk(KERN_ERR "ata: BUG: native mode, n_ports > 1\n");
3595 rc = pci_enable_device(pdev);
3599 rc = pci_request_regions(pdev, DRV_NAME);
3604 if (!request_region(0x1f0, 8, "libata")) {
3605 struct resource *conflict, res;
3607 res.end = 0x1f0 + 8 - 1;
3608 conflict = ____request_resource(&ioport_resource, &res);
3609 if (!strcmp(conflict->name, "libata"))
3610 legacy_mode |= (1 << 0);
3612 printk(KERN_WARNING "ata: 0x1f0 IDE port busy\n");
3614 legacy_mode |= (1 << 0);
3616 if (!request_region(0x170, 8, "libata")) {
3617 struct resource *conflict, res;
3619 res.end = 0x170 + 8 - 1;
3620 conflict = ____request_resource(&ioport_resource, &res);
3621 if (!strcmp(conflict->name, "libata"))
3622 legacy_mode |= (1 << 1);
3624 printk(KERN_WARNING "ata: 0x170 IDE port busy\n");
3626 legacy_mode |= (1 << 1);
3629 /* we have legacy mode, but all ports are unavailable */
3630 if (legacy_mode == (1 << 3)) {
3632 goto err_out_regions;
3635 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
3637 goto err_out_regions;
3638 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
3640 goto err_out_regions;
3643 probe_ent = ata_pci_init_legacy_mode(pdev, port);
3645 probe_ent2 = &probe_ent[1];
3647 probe_ent = ata_pci_init_native_mode(pdev, port);
3650 goto err_out_regions;
3653 pci_set_master(pdev);
3655 /* FIXME: check ata_device_add return */
3657 if (legacy_mode & (1 << 0))
3658 ata_device_add(probe_ent);
3659 if (legacy_mode & (1 << 1))
3660 ata_device_add(probe_ent2);
3662 ata_device_add(probe_ent);
3669 if (legacy_mode & (1 << 0))
3670 release_region(0x1f0, 8);
3671 if (legacy_mode & (1 << 1))
3672 release_region(0x170, 8);
3673 pci_release_regions(pdev);
3675 pci_disable_device(pdev);
3680 * ata_pci_remove_one - PCI layer callback for device removal
3681 * @pdev: PCI device that was removed
3683 * PCI layer indicates to libata via this hook that
3684 * hot-unplug or module unload event has occured.
3685 * Handle this by unregistering all objects associated
3686 * with this PCI device. Free those objects. Then finally
3687 * release PCI resources and disable device.
3690 * Inherited from PCI layer (may sleep).
3693 void ata_pci_remove_one (struct pci_dev *pdev)
3695 struct device *dev = pci_dev_to_dev(pdev);
3696 struct ata_host_set *host_set = dev_get_drvdata(dev);
3697 struct ata_port *ap;
3700 for (i = 0; i < host_set->n_ports; i++) {
3701 ap = host_set->ports[i];
3703 scsi_remove_host(ap->host);
3706 free_irq(host_set->irq, host_set);
3707 if (host_set->ops->host_stop)
3708 host_set->ops->host_stop(host_set);
3709 if (host_set->mmio_base)
3710 iounmap(host_set->mmio_base);
3712 for (i = 0; i < host_set->n_ports; i++) {
3713 ap = host_set->ports[i];
3715 ata_scsi_release(ap->host);
3716 scsi_host_put(ap->host);
3719 pci_release_regions(pdev);
3721 for (i = 0; i < host_set->n_ports; i++) {
3722 struct ata_ioports *ioaddr;
3724 ap = host_set->ports[i];
3725 ioaddr = &ap->ioaddr;
3727 if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) {
3728 if (ioaddr->cmd_addr == 0x1f0)
3729 release_region(0x1f0, 8);
3730 else if (ioaddr->cmd_addr == 0x170)
3731 release_region(0x170, 8);
3736 pci_disable_device(pdev);
3737 dev_set_drvdata(dev, NULL);
3740 /* move to PCI subsystem */
3741 int pci_test_config_bits(struct pci_dev *pdev, struct pci_bits *bits)
3743 unsigned long tmp = 0;
3745 switch (bits->width) {
3748 pci_read_config_byte(pdev, bits->reg, &tmp8);
3754 pci_read_config_word(pdev, bits->reg, &tmp16);
3760 pci_read_config_dword(pdev, bits->reg, &tmp32);
3771 return (tmp == bits->val) ? 1 : 0;
3773 #endif /* CONFIG_PCI */
3785 static int __init ata_init(void)
3787 ata_wq = create_workqueue("ata");
3791 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
3795 static void __exit ata_exit(void)
3797 destroy_workqueue(ata_wq);
3800 module_init(ata_init);
3801 module_exit(ata_exit);
3804 * libata is essentially a library of internal helper functions for
3805 * low-level ATA host controller drivers. As such, the API/ABI is
3806 * likely to change as new drivers are added and updated.
3807 * Do not depend on ABI/API stability.
3810 EXPORT_SYMBOL_GPL(ata_std_bios_param);
3811 EXPORT_SYMBOL_GPL(ata_std_ports);
3812 EXPORT_SYMBOL_GPL(ata_device_add);
3813 EXPORT_SYMBOL_GPL(ata_sg_init);
3814 EXPORT_SYMBOL_GPL(ata_sg_init_one);
3815 EXPORT_SYMBOL_GPL(ata_qc_complete);
3816 EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
3817 EXPORT_SYMBOL_GPL(ata_eng_timeout);
3818 EXPORT_SYMBOL_GPL(ata_tf_load);
3819 EXPORT_SYMBOL_GPL(ata_tf_read);
3820 EXPORT_SYMBOL_GPL(ata_noop_dev_select);
3821 EXPORT_SYMBOL_GPL(ata_std_dev_select);
3822 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
3823 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
3824 EXPORT_SYMBOL_GPL(ata_check_status);
3825 EXPORT_SYMBOL_GPL(ata_exec_command);
3826 EXPORT_SYMBOL_GPL(ata_port_start);
3827 EXPORT_SYMBOL_GPL(ata_port_stop);
3828 EXPORT_SYMBOL_GPL(ata_interrupt);
3829 EXPORT_SYMBOL_GPL(ata_qc_prep);
3830 EXPORT_SYMBOL_GPL(ata_bmdma_setup);
3831 EXPORT_SYMBOL_GPL(ata_bmdma_start);
3832 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
3833 EXPORT_SYMBOL_GPL(ata_port_probe);
3834 EXPORT_SYMBOL_GPL(sata_phy_reset);
3835 EXPORT_SYMBOL_GPL(__sata_phy_reset);
3836 EXPORT_SYMBOL_GPL(ata_bus_reset);
3837 EXPORT_SYMBOL_GPL(ata_port_disable);
3838 EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
3839 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
3840 EXPORT_SYMBOL_GPL(ata_scsi_error);
3841 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
3842 EXPORT_SYMBOL_GPL(ata_scsi_release);
3843 EXPORT_SYMBOL_GPL(ata_host_intr);
3844 EXPORT_SYMBOL_GPL(ata_dev_classify);
3845 EXPORT_SYMBOL_GPL(ata_dev_id_string);
3846 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
3849 EXPORT_SYMBOL_GPL(pci_test_config_bits);
3850 EXPORT_SYMBOL_GPL(ata_pci_init_legacy_mode);
3851 EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
3852 EXPORT_SYMBOL_GPL(ata_pci_init_one);
3853 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
3854 #endif /* CONFIG_PCI */