2 libata-core.c - helper library for ATA
4 Copyright 2003-2004 Red Hat, Inc. All rights reserved.
5 Copyright 2003-2004 Jeff Garzik
7 The contents of this file are subject to the Open
8 Software License version 1.1 that can be found at
9 http://www.opensource.org/licenses/osl-1.1.txt and is included herein
12 Alternatively, the contents of this file may be used under the terms
13 of the GNU General Public License version 2 (the "GPL") as distributed
14 in the kernel source COPYING file, in which case the provisions of
15 the GPL are applicable instead of the above. If you wish to allow
16 the use of your version of this file only under the terms of the
17 GPL and not to allow others to use your version of this file under
18 the OSL, indicate your decision by deleting the provisions above and
19 replace them with the notice and other provisions required by the GPL.
20 If you do not delete the provisions above, a recipient may use your
21 version of this file under either the OSL or the GPL.
25 #include <linux/config.h>
26 #include <linux/kernel.h>
27 #include <linux/module.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/list.h>
31 #include <linux/highmem.h>
32 #include <linux/spinlock.h>
33 #include <linux/blkdev.h>
34 #include <linux/delay.h>
35 #include <linux/timer.h>
36 #include <linux/interrupt.h>
37 #include <linux/completion.h>
38 #include <linux/suspend.h>
39 #include <linux/workqueue.h>
40 #include <scsi/scsi.h>
42 #include "scsi_priv.h"
43 #include <scsi/scsi_host.h>
44 #include <linux/libata.h>
46 #include <asm/semaphore.h>
47 #include <asm/byteorder.h>
51 static unsigned int ata_busy_sleep (struct ata_port *ap,
52 unsigned long tmout_pat,
54 static void ata_set_mode(struct ata_port *ap);
55 static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev);
56 static unsigned int ata_get_mode_mask(struct ata_port *ap, int shift);
57 static int fgb(u32 bitmap);
58 static int ata_choose_xfer_mode(struct ata_port *ap,
60 unsigned int *xfer_shift_out);
61 static int ata_qc_complete_noop(struct ata_queued_cmd *qc, u8 drv_stat);
62 static void __ata_qc_complete(struct ata_queued_cmd *qc);
64 static unsigned int ata_unique_id = 1;
65 static struct workqueue_struct *ata_wq;
67 MODULE_AUTHOR("Jeff Garzik");
68 MODULE_DESCRIPTION("Library module for ATA devices");
69 MODULE_LICENSE("GPL");
70 MODULE_VERSION(DRV_VERSION);
73 * ata_tf_load - send taskfile registers to host controller
74 * @ap: Port to which output is sent
75 * @tf: ATA taskfile register set
77 * Outputs ATA taskfile to standard ATA host controller.
80 * Inherited from caller.
83 static void ata_tf_load_pio(struct ata_port *ap, struct ata_taskfile *tf)
85 struct ata_ioports *ioaddr = &ap->ioaddr;
86 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
88 if (tf->ctl != ap->last_ctl) {
89 outb(tf->ctl, ioaddr->ctl_addr);
90 ap->last_ctl = tf->ctl;
94 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
95 outb(tf->hob_feature, ioaddr->feature_addr);
96 outb(tf->hob_nsect, ioaddr->nsect_addr);
97 outb(tf->hob_lbal, ioaddr->lbal_addr);
98 outb(tf->hob_lbam, ioaddr->lbam_addr);
99 outb(tf->hob_lbah, ioaddr->lbah_addr);
100 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
109 outb(tf->feature, ioaddr->feature_addr);
110 outb(tf->nsect, ioaddr->nsect_addr);
111 outb(tf->lbal, ioaddr->lbal_addr);
112 outb(tf->lbam, ioaddr->lbam_addr);
113 outb(tf->lbah, ioaddr->lbah_addr);
114 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
122 if (tf->flags & ATA_TFLAG_DEVICE) {
123 outb(tf->device, ioaddr->device_addr);
124 VPRINTK("device 0x%X\n", tf->device);
131 * ata_tf_load_mmio - send taskfile registers to host controller
132 * @ap: Port to which output is sent
133 * @tf: ATA taskfile register set
135 * Outputs ATA taskfile to standard ATA host controller using MMIO.
138 * Inherited from caller.
141 static void ata_tf_load_mmio(struct ata_port *ap, struct ata_taskfile *tf)
143 struct ata_ioports *ioaddr = &ap->ioaddr;
144 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
146 if (tf->ctl != ap->last_ctl) {
147 writeb(tf->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
148 ap->last_ctl = tf->ctl;
152 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
153 writeb(tf->hob_feature, (void __iomem *) ioaddr->feature_addr);
154 writeb(tf->hob_nsect, (void __iomem *) ioaddr->nsect_addr);
155 writeb(tf->hob_lbal, (void __iomem *) ioaddr->lbal_addr);
156 writeb(tf->hob_lbam, (void __iomem *) ioaddr->lbam_addr);
157 writeb(tf->hob_lbah, (void __iomem *) ioaddr->lbah_addr);
158 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
167 writeb(tf->feature, (void __iomem *) ioaddr->feature_addr);
168 writeb(tf->nsect, (void __iomem *) ioaddr->nsect_addr);
169 writeb(tf->lbal, (void __iomem *) ioaddr->lbal_addr);
170 writeb(tf->lbam, (void __iomem *) ioaddr->lbam_addr);
171 writeb(tf->lbah, (void __iomem *) ioaddr->lbah_addr);
172 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
180 if (tf->flags & ATA_TFLAG_DEVICE) {
181 writeb(tf->device, (void __iomem *) ioaddr->device_addr);
182 VPRINTK("device 0x%X\n", tf->device);
188 void ata_tf_load(struct ata_port *ap, struct ata_taskfile *tf)
190 if (ap->flags & ATA_FLAG_MMIO)
191 ata_tf_load_mmio(ap, tf);
193 ata_tf_load_pio(ap, tf);
197 * ata_exec_command - issue ATA command to host controller
198 * @ap: port to which command is being issued
199 * @tf: ATA taskfile register set
201 * Issues PIO/MMIO write to ATA command register, with proper
202 * synchronization with interrupt handler / other threads.
205 * spin_lock_irqsave(host_set lock)
208 static void ata_exec_command_pio(struct ata_port *ap, struct ata_taskfile *tf)
210 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
212 outb(tf->command, ap->ioaddr.command_addr);
218 * ata_exec_command_mmio - issue ATA command to host controller
219 * @ap: port to which command is being issued
220 * @tf: ATA taskfile register set
222 * Issues MMIO write to ATA command register, with proper
223 * synchronization with interrupt handler / other threads.
226 * spin_lock_irqsave(host_set lock)
229 static void ata_exec_command_mmio(struct ata_port *ap, struct ata_taskfile *tf)
231 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
233 writeb(tf->command, (void __iomem *) ap->ioaddr.command_addr);
237 void ata_exec_command(struct ata_port *ap, struct ata_taskfile *tf)
239 if (ap->flags & ATA_FLAG_MMIO)
240 ata_exec_command_mmio(ap, tf);
242 ata_exec_command_pio(ap, tf);
246 * ata_exec - issue ATA command to host controller
247 * @ap: port to which command is being issued
248 * @tf: ATA taskfile register set
250 * Issues PIO/MMIO write to ATA command register, with proper
251 * synchronization with interrupt handler / other threads.
254 * Obtains host_set lock.
257 static inline void ata_exec(struct ata_port *ap, struct ata_taskfile *tf)
261 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
262 spin_lock_irqsave(&ap->host_set->lock, flags);
263 ap->ops->exec_command(ap, tf);
264 spin_unlock_irqrestore(&ap->host_set->lock, flags);
268 * ata_tf_to_host - issue ATA taskfile to host controller
269 * @ap: port to which command is being issued
270 * @tf: ATA taskfile register set
272 * Issues ATA taskfile register set to ATA host controller,
273 * with proper synchronization with interrupt handler and
277 * Obtains host_set lock.
280 static void ata_tf_to_host(struct ata_port *ap, struct ata_taskfile *tf)
282 ap->ops->tf_load(ap, tf);
288 * ata_tf_to_host_nolock - issue ATA taskfile to host controller
289 * @ap: port to which command is being issued
290 * @tf: ATA taskfile register set
292 * Issues ATA taskfile register set to ATA host controller,
293 * with proper synchronization with interrupt handler and
297 * spin_lock_irqsave(host_set lock)
300 void ata_tf_to_host_nolock(struct ata_port *ap, struct ata_taskfile *tf)
302 ap->ops->tf_load(ap, tf);
303 ap->ops->exec_command(ap, tf);
307 * ata_tf_read - input device's ATA taskfile shadow registers
308 * @ap: Port from which input is read
309 * @tf: ATA taskfile register set for storing input
311 * Reads ATA taskfile registers for currently-selected device
315 * Inherited from caller.
318 static void ata_tf_read_pio(struct ata_port *ap, struct ata_taskfile *tf)
320 struct ata_ioports *ioaddr = &ap->ioaddr;
322 tf->nsect = inb(ioaddr->nsect_addr);
323 tf->lbal = inb(ioaddr->lbal_addr);
324 tf->lbam = inb(ioaddr->lbam_addr);
325 tf->lbah = inb(ioaddr->lbah_addr);
326 tf->device = inb(ioaddr->device_addr);
328 if (tf->flags & ATA_TFLAG_LBA48) {
329 outb(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
330 tf->hob_feature = inb(ioaddr->error_addr);
331 tf->hob_nsect = inb(ioaddr->nsect_addr);
332 tf->hob_lbal = inb(ioaddr->lbal_addr);
333 tf->hob_lbam = inb(ioaddr->lbam_addr);
334 tf->hob_lbah = inb(ioaddr->lbah_addr);
339 * ata_tf_read_mmio - input device's ATA taskfile shadow registers
340 * @ap: Port from which input is read
341 * @tf: ATA taskfile register set for storing input
343 * Reads ATA taskfile registers for currently-selected device
347 * Inherited from caller.
350 static void ata_tf_read_mmio(struct ata_port *ap, struct ata_taskfile *tf)
352 struct ata_ioports *ioaddr = &ap->ioaddr;
354 tf->nsect = readb((void __iomem *)ioaddr->nsect_addr);
355 tf->lbal = readb((void __iomem *)ioaddr->lbal_addr);
356 tf->lbam = readb((void __iomem *)ioaddr->lbam_addr);
357 tf->lbah = readb((void __iomem *)ioaddr->lbah_addr);
358 tf->device = readb((void __iomem *)ioaddr->device_addr);
360 if (tf->flags & ATA_TFLAG_LBA48) {
361 writeb(tf->ctl | ATA_HOB, (void __iomem *) ap->ioaddr.ctl_addr);
362 tf->hob_feature = readb((void __iomem *)ioaddr->error_addr);
363 tf->hob_nsect = readb((void __iomem *)ioaddr->nsect_addr);
364 tf->hob_lbal = readb((void __iomem *)ioaddr->lbal_addr);
365 tf->hob_lbam = readb((void __iomem *)ioaddr->lbam_addr);
366 tf->hob_lbah = readb((void __iomem *)ioaddr->lbah_addr);
370 void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
372 if (ap->flags & ATA_FLAG_MMIO)
373 ata_tf_read_mmio(ap, tf);
375 ata_tf_read_pio(ap, tf);
379 * ata_check_status - Read device status reg & clear interrupt
380 * @ap: port where the device is
382 * Reads ATA taskfile status register for currently-selected device
383 * and return it's value. This also clears pending interrupts
387 * Inherited from caller.
389 static u8 ata_check_status_pio(struct ata_port *ap)
391 return inb(ap->ioaddr.status_addr);
395 * ata_check_status_mmio - Read device status reg & clear interrupt
396 * @ap: port where the device is
398 * Reads ATA taskfile status register for currently-selected device
399 * via MMIO and return it's value. This also clears pending interrupts
403 * Inherited from caller.
405 static u8 ata_check_status_mmio(struct ata_port *ap)
407 return readb((void __iomem *) ap->ioaddr.status_addr);
410 u8 ata_check_status(struct ata_port *ap)
412 if (ap->flags & ATA_FLAG_MMIO)
413 return ata_check_status_mmio(ap);
414 return ata_check_status_pio(ap);
418 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
419 * @tf: Taskfile to convert
420 * @fis: Buffer into which data will output
421 * @pmp: Port multiplier port
423 * Converts a standard ATA taskfile to a Serial ATA
424 * FIS structure (Register - Host to Device).
427 * Inherited from caller.
430 void ata_tf_to_fis(struct ata_taskfile *tf, u8 *fis, u8 pmp)
432 fis[0] = 0x27; /* Register - Host to Device FIS */
433 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
434 bit 7 indicates Command FIS */
435 fis[2] = tf->command;
436 fis[3] = tf->feature;
443 fis[8] = tf->hob_lbal;
444 fis[9] = tf->hob_lbam;
445 fis[10] = tf->hob_lbah;
446 fis[11] = tf->hob_feature;
449 fis[13] = tf->hob_nsect;
460 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
461 * @fis: Buffer from which data will be input
462 * @tf: Taskfile to output
464 * Converts a standard ATA taskfile to a Serial ATA
465 * FIS structure (Register - Host to Device).
468 * Inherited from caller.
471 void ata_tf_from_fis(u8 *fis, struct ata_taskfile *tf)
473 tf->command = fis[2]; /* status */
474 tf->feature = fis[3]; /* error */
481 tf->hob_lbal = fis[8];
482 tf->hob_lbam = fis[9];
483 tf->hob_lbah = fis[10];
486 tf->hob_nsect = fis[13];
490 * ata_prot_to_cmd - determine which read/write opcodes to use
491 * @protocol: ATA_PROT_xxx taskfile protocol
492 * @lba48: true is lba48 is present
494 * Given necessary input, determine which read/write commands
495 * to use to transfer data.
500 static int ata_prot_to_cmd(int protocol, int lba48)
502 int rcmd = 0, wcmd = 0;
507 rcmd = ATA_CMD_PIO_READ_EXT;
508 wcmd = ATA_CMD_PIO_WRITE_EXT;
510 rcmd = ATA_CMD_PIO_READ;
511 wcmd = ATA_CMD_PIO_WRITE;
517 rcmd = ATA_CMD_READ_EXT;
518 wcmd = ATA_CMD_WRITE_EXT;
521 wcmd = ATA_CMD_WRITE;
529 return rcmd | (wcmd << 8);
533 * ata_dev_set_protocol - set taskfile protocol and r/w commands
534 * @dev: device to examine and configure
536 * Examine the device configuration, after we have
537 * read the identify-device page and configured the
538 * data transfer mode. Set internal state related to
539 * the ATA taskfile protocol (pio, pio mult, dma, etc.)
540 * and calculate the proper read/write commands to use.
545 static void ata_dev_set_protocol(struct ata_device *dev)
547 int pio = (dev->flags & ATA_DFLAG_PIO);
548 int lba48 = (dev->flags & ATA_DFLAG_LBA48);
552 proto = dev->xfer_protocol = ATA_PROT_PIO;
554 proto = dev->xfer_protocol = ATA_PROT_DMA;
556 cmd = ata_prot_to_cmd(proto, lba48);
560 dev->read_cmd = cmd & 0xff;
561 dev->write_cmd = (cmd >> 8) & 0xff;
564 static const char * xfer_mode_str[] = {
584 * ata_udma_string - convert UDMA bit offset to string
585 * @udma_mask: mask of bits supported; only highest bit counts.
587 * Determine string which represents the highest speed
588 * (highest bit in @udma_mask).
594 * Constant C string representing highest speed listed in
595 * @udma_mask, or the constant C string "<n/a>".
598 static const char *ata_mode_string(unsigned int mask)
602 for (i = 7; i >= 0; i--)
605 for (i = ATA_SHIFT_MWDMA + 2; i >= ATA_SHIFT_MWDMA; i--)
608 for (i = ATA_SHIFT_PIO + 4; i >= ATA_SHIFT_PIO; i--)
615 return xfer_mode_str[i];
619 * ata_pio_devchk - PATA device presence detection
620 * @ap: ATA channel to examine
621 * @device: Device to examine (starting at zero)
623 * This technique was originally described in
624 * Hale Landis's ATADRVR (www.ata-atapi.com), and
625 * later found its way into the ATA/ATAPI spec.
627 * Write a pattern to the ATA shadow registers,
628 * and if a device is present, it will respond by
629 * correctly storing and echoing back the
630 * ATA shadow register contents.
636 static unsigned int ata_pio_devchk(struct ata_port *ap,
639 struct ata_ioports *ioaddr = &ap->ioaddr;
642 ap->ops->dev_select(ap, device);
644 outb(0x55, ioaddr->nsect_addr);
645 outb(0xaa, ioaddr->lbal_addr);
647 outb(0xaa, ioaddr->nsect_addr);
648 outb(0x55, ioaddr->lbal_addr);
650 outb(0x55, ioaddr->nsect_addr);
651 outb(0xaa, ioaddr->lbal_addr);
653 nsect = inb(ioaddr->nsect_addr);
654 lbal = inb(ioaddr->lbal_addr);
656 if ((nsect == 0x55) && (lbal == 0xaa))
657 return 1; /* we found a device */
659 return 0; /* nothing found */
663 * ata_mmio_devchk - PATA device presence detection
664 * @ap: ATA channel to examine
665 * @device: Device to examine (starting at zero)
667 * This technique was originally described in
668 * Hale Landis's ATADRVR (www.ata-atapi.com), and
669 * later found its way into the ATA/ATAPI spec.
671 * Write a pattern to the ATA shadow registers,
672 * and if a device is present, it will respond by
673 * correctly storing and echoing back the
674 * ATA shadow register contents.
680 static unsigned int ata_mmio_devchk(struct ata_port *ap,
683 struct ata_ioports *ioaddr = &ap->ioaddr;
686 ap->ops->dev_select(ap, device);
688 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
689 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
691 writeb(0xaa, (void __iomem *) ioaddr->nsect_addr);
692 writeb(0x55, (void __iomem *) ioaddr->lbal_addr);
694 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
695 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
697 nsect = readb((void __iomem *) ioaddr->nsect_addr);
698 lbal = readb((void __iomem *) ioaddr->lbal_addr);
700 if ((nsect == 0x55) && (lbal == 0xaa))
701 return 1; /* we found a device */
703 return 0; /* nothing found */
707 * ata_devchk - PATA device presence detection
708 * @ap: ATA channel to examine
709 * @device: Device to examine (starting at zero)
711 * Dispatch ATA device presence detection, depending
712 * on whether we are using PIO or MMIO to talk to the
713 * ATA shadow registers.
719 static unsigned int ata_devchk(struct ata_port *ap,
722 if (ap->flags & ATA_FLAG_MMIO)
723 return ata_mmio_devchk(ap, device);
724 return ata_pio_devchk(ap, device);
728 * ata_dev_classify - determine device type based on ATA-spec signature
729 * @tf: ATA taskfile register set for device to be identified
731 * Determine from taskfile register contents whether a device is
732 * ATA or ATAPI, as per "Signature and persistence" section
733 * of ATA/PI spec (volume 1, sect 5.14).
739 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
740 * the event of failure.
743 unsigned int ata_dev_classify(struct ata_taskfile *tf)
745 /* Apple's open source Darwin code hints that some devices only
746 * put a proper signature into the LBA mid/high registers,
747 * So, we only check those. It's sufficient for uniqueness.
750 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
751 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
752 DPRINTK("found ATA device by sig\n");
756 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
757 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
758 DPRINTK("found ATAPI device by sig\n");
759 return ATA_DEV_ATAPI;
762 DPRINTK("unknown device\n");
763 return ATA_DEV_UNKNOWN;
767 * ata_dev_try_classify - Parse returned ATA device signature
768 * @ap: ATA channel to examine
769 * @device: Device to examine (starting at zero)
771 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
772 * an ATA/ATAPI-defined set of values is placed in the ATA
773 * shadow registers, indicating the results of device detection
776 * Select the ATA device, and read the values from the ATA shadow
777 * registers. Then parse according to the Error register value,
778 * and the spec-defined values examined by ata_dev_classify().
784 static u8 ata_dev_try_classify(struct ata_port *ap, unsigned int device)
786 struct ata_device *dev = &ap->device[device];
787 struct ata_taskfile tf;
791 ap->ops->dev_select(ap, device);
793 memset(&tf, 0, sizeof(tf));
795 err = ata_chk_err(ap);
796 ap->ops->tf_read(ap, &tf);
798 dev->class = ATA_DEV_NONE;
800 /* see if device passed diags */
803 else if ((device == 0) && (err == 0x81))
808 /* determine if device if ATA or ATAPI */
809 class = ata_dev_classify(&tf);
810 if (class == ATA_DEV_UNKNOWN)
812 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
821 * ata_dev_id_string - Convert IDENTIFY DEVICE page into string
822 * @dev: Device whose IDENTIFY DEVICE results we will examine
823 * @s: string into which data is output
824 * @ofs: offset into identify device page
825 * @len: length of string to return. must be an even number.
827 * The strings in the IDENTIFY DEVICE page are broken up into
828 * 16-bit chunks. Run through the string, and output each
829 * 8-bit chunk linearly, regardless of platform.
835 void ata_dev_id_string(u16 *id, unsigned char *s,
836 unsigned int ofs, unsigned int len)
854 void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
859 * ata_std_dev_select - Select device 0/1 on ATA bus
860 * @ap: ATA channel to manipulate
861 * @device: ATA device (numbered from zero) to select
863 * Use the method defined in the ATA specification to
864 * make either device 0, or device 1, active on the
871 void ata_std_dev_select (struct ata_port *ap, unsigned int device)
876 tmp = ATA_DEVICE_OBS;
878 tmp = ATA_DEVICE_OBS | ATA_DEV1;
880 if (ap->flags & ATA_FLAG_MMIO) {
881 writeb(tmp, (void __iomem *) ap->ioaddr.device_addr);
883 outb(tmp, ap->ioaddr.device_addr);
885 ata_pause(ap); /* needed; also flushes, for mmio */
889 * ata_dev_select - Select device 0/1 on ATA bus
890 * @ap: ATA channel to manipulate
891 * @device: ATA device (numbered from zero) to select
892 * @wait: non-zero to wait for Status register BSY bit to clear
893 * @can_sleep: non-zero if context allows sleeping
895 * Use the method defined in the ATA specification to
896 * make either device 0, or device 1, active on the
899 * This is a high-level version of ata_std_dev_select(),
900 * which additionally provides the services of inserting
901 * the proper pauses and status polling, where needed.
907 void ata_dev_select(struct ata_port *ap, unsigned int device,
908 unsigned int wait, unsigned int can_sleep)
910 VPRINTK("ENTER, ata%u: device %u, wait %u\n",
911 ap->id, device, wait);
916 ap->ops->dev_select(ap, device);
919 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
926 * ata_dump_id - IDENTIFY DEVICE info debugging output
927 * @dev: Device whose IDENTIFY DEVICE page we will dump
929 * Dump selected 16-bit words from a detected device's
930 * IDENTIFY PAGE page.
936 static inline void ata_dump_id(struct ata_device *dev)
938 DPRINTK("49==0x%04x "
948 DPRINTK("80==0x%04x "
958 DPRINTK("88==0x%04x "
965 * ata_dev_identify - obtain IDENTIFY x DEVICE page
966 * @ap: port on which device we wish to probe resides
967 * @device: device bus address, starting at zero
969 * Following bus reset, we issue the IDENTIFY [PACKET] DEVICE
970 * command, and read back the 512-byte device information page.
971 * The device information page is fed to us via the standard
972 * PIO-IN protocol, but we hand-code it here. (TODO: investigate
973 * using standard PIO-IN paths)
975 * After reading the device information page, we use several
976 * bits of information from it to initialize data structures
977 * that will be used during the lifetime of the ata_device.
978 * Other data from the info page is used to disqualify certain
979 * older ATA devices we do not wish to support.
982 * Inherited from caller. Some functions called by this function
983 * obtain the host_set lock.
986 static void ata_dev_identify(struct ata_port *ap, unsigned int device)
988 struct ata_device *dev = &ap->device[device];
991 unsigned long xfer_modes;
993 unsigned int using_edd;
994 DECLARE_COMPLETION(wait);
995 struct ata_queued_cmd *qc;
999 if (!ata_dev_present(dev)) {
1000 DPRINTK("ENTER/EXIT (host %u, dev %u) -- nodev\n",
1005 if (ap->flags & (ATA_FLAG_SRST | ATA_FLAG_SATA_RESET))
1010 DPRINTK("ENTER, host %u, dev %u\n", ap->id, device);
1012 assert (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ATAPI ||
1013 dev->class == ATA_DEV_NONE);
1015 ata_dev_select(ap, device, 1, 1); /* select device 0/1 */
1017 qc = ata_qc_new_init(ap, dev);
1020 ata_sg_init_one(qc, dev->id, sizeof(dev->id));
1021 qc->pci_dma_dir = PCI_DMA_FROMDEVICE;
1022 qc->tf.protocol = ATA_PROT_PIO;
1026 if (dev->class == ATA_DEV_ATA) {
1027 qc->tf.command = ATA_CMD_ID_ATA;
1028 DPRINTK("do ATA identify\n");
1030 qc->tf.command = ATA_CMD_ID_ATAPI;
1031 DPRINTK("do ATAPI identify\n");
1034 qc->waiting = &wait;
1035 qc->complete_fn = ata_qc_complete_noop;
1037 spin_lock_irqsave(&ap->host_set->lock, flags);
1038 rc = ata_qc_issue(qc);
1039 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1044 wait_for_completion(&wait);
1046 status = ata_chk_status(ap);
1047 if (status & ATA_ERR) {
1049 * arg! EDD works for all test cases, but seems to return
1050 * the ATA signature for some ATAPI devices. Until the
1051 * reason for this is found and fixed, we fix up the mess
1052 * here. If IDENTIFY DEVICE returns command aborted
1053 * (as ATAPI devices do), then we issue an
1054 * IDENTIFY PACKET DEVICE.
1056 * ATA software reset (SRST, the default) does not appear
1057 * to have this problem.
1059 if ((using_edd) && (qc->tf.command == ATA_CMD_ID_ATA)) {
1060 u8 err = ata_chk_err(ap);
1061 if (err & ATA_ABORTED) {
1062 dev->class = ATA_DEV_ATAPI;
1073 swap_buf_le16(dev->id, ATA_ID_WORDS);
1075 /* print device capabilities */
1076 printk(KERN_DEBUG "ata%u: dev %u cfg "
1077 "49:%04x 82:%04x 83:%04x 84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n",
1078 ap->id, device, dev->id[49],
1079 dev->id[82], dev->id[83], dev->id[84],
1080 dev->id[85], dev->id[86], dev->id[87],
1084 * common ATA, ATAPI feature tests
1087 /* we require LBA and DMA support (bits 8 & 9 of word 49) */
1088 if (!ata_id_has_dma(dev->id) || !ata_id_has_lba(dev->id)) {
1089 printk(KERN_DEBUG "ata%u: no dma/lba\n", ap->id);
1093 /* quick-n-dirty find max transfer mode; for printk only */
1094 xfer_modes = dev->id[ATA_ID_UDMA_MODES];
1096 xfer_modes = (dev->id[ATA_ID_MWDMA_MODES]) << ATA_SHIFT_MWDMA;
1098 xfer_modes = (dev->id[ATA_ID_PIO_MODES]) << (ATA_SHIFT_PIO + 3);
1099 xfer_modes |= (0x7 << ATA_SHIFT_PIO);
1104 /* ATA-specific feature tests */
1105 if (dev->class == ATA_DEV_ATA) {
1106 if (!ata_id_is_ata(dev->id)) /* sanity check */
1109 tmp = dev->id[ATA_ID_MAJOR_VER];
1110 for (i = 14; i >= 1; i--)
1114 /* we require at least ATA-3 */
1116 printk(KERN_DEBUG "ata%u: no ATA-3\n", ap->id);
1120 if (ata_id_has_lba48(dev->id)) {
1121 dev->flags |= ATA_DFLAG_LBA48;
1122 dev->n_sectors = ata_id_u64(dev->id, 100);
1124 dev->n_sectors = ata_id_u32(dev->id, 60);
1127 ap->host->max_cmd_len = 16;
1129 /* print device info to dmesg */
1130 printk(KERN_INFO "ata%u: dev %u ATA, max %s, %Lu sectors:%s\n",
1132 ata_mode_string(xfer_modes),
1133 (unsigned long long)dev->n_sectors,
1134 dev->flags & ATA_DFLAG_LBA48 ? " lba48" : "");
1137 /* ATAPI-specific feature tests */
1139 if (ata_id_is_ata(dev->id)) /* sanity check */
1142 rc = atapi_cdb_len(dev->id);
1143 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1144 printk(KERN_WARNING "ata%u: unsupported CDB len\n", ap->id);
1147 ap->cdb_len = (unsigned int) rc;
1148 ap->host->max_cmd_len = (unsigned char) ap->cdb_len;
1150 /* print device info to dmesg */
1151 printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n",
1153 ata_mode_string(xfer_modes));
1156 DPRINTK("EXIT, drv_stat = 0x%x\n", ata_chk_status(ap));
1160 printk(KERN_WARNING "ata%u: dev %u not supported, ignoring\n",
1163 ata_irq_on(ap); /* re-enable interrupts */
1164 dev->class++; /* converts ATA_DEV_xxx into ATA_DEV_xxx_UNSUP */
1165 DPRINTK("EXIT, err\n");
1169 * ata_bus_probe - Reset and probe ATA bus
1175 * Zero on success, non-zero on error.
1178 static int ata_bus_probe(struct ata_port *ap)
1180 unsigned int i, found = 0;
1182 ap->ops->phy_reset(ap);
1183 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1186 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1187 ata_dev_identify(ap, i);
1188 if (ata_dev_present(&ap->device[i])) {
1190 if (ap->ops->dev_config)
1191 ap->ops->dev_config(ap, &ap->device[i]);
1195 if ((!found) || (ap->flags & ATA_FLAG_PORT_DISABLED))
1196 goto err_out_disable;
1199 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1200 goto err_out_disable;
1205 ap->ops->port_disable(ap);
1217 void ata_port_probe(struct ata_port *ap)
1219 ap->flags &= ~ATA_FLAG_PORT_DISABLED;
1223 * __sata_phy_reset -
1229 void __sata_phy_reset(struct ata_port *ap)
1232 unsigned long timeout = jiffies + (HZ * 5);
1234 if (ap->flags & ATA_FLAG_SATA_RESET) {
1235 scr_write(ap, SCR_CONTROL, 0x301); /* issue phy wake/reset */
1236 scr_read(ap, SCR_STATUS); /* dummy read; flush */
1237 udelay(400); /* FIXME: a guess */
1239 scr_write(ap, SCR_CONTROL, 0x300); /* issue phy wake/clear reset */
1241 /* wait for phy to become ready, if necessary */
1244 sstatus = scr_read(ap, SCR_STATUS);
1245 if ((sstatus & 0xf) != 1)
1247 } while (time_before(jiffies, timeout));
1249 /* TODO: phy layer with polling, timeouts, etc. */
1250 if (sata_dev_present(ap))
1253 sstatus = scr_read(ap, SCR_STATUS);
1254 printk(KERN_INFO "ata%u: no device found (phy stat %08x)\n",
1256 ata_port_disable(ap);
1259 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1262 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
1263 ata_port_disable(ap);
1267 ap->cbl = ATA_CBL_SATA;
1271 * __sata_phy_reset -
1277 void sata_phy_reset(struct ata_port *ap)
1279 __sata_phy_reset(ap);
1280 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1286 * ata_port_disable -
1292 void ata_port_disable(struct ata_port *ap)
1294 ap->device[0].class = ATA_DEV_NONE;
1295 ap->device[1].class = ATA_DEV_NONE;
1296 ap->flags |= ATA_FLAG_PORT_DISABLED;
1302 } xfer_mode_classes[] = {
1303 { ATA_SHIFT_UDMA, XFER_UDMA_0 },
1304 { ATA_SHIFT_MWDMA, XFER_MW_DMA_0 },
1305 { ATA_SHIFT_PIO, XFER_PIO_0 },
1308 static inline u8 base_from_shift(unsigned int shift)
1312 for (i = 0; i < ARRAY_SIZE(xfer_mode_classes); i++)
1313 if (xfer_mode_classes[i].shift == shift)
1314 return xfer_mode_classes[i].base;
1319 static void ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev)
1324 if (!ata_dev_present(dev) || (ap->flags & ATA_FLAG_PORT_DISABLED))
1327 if (dev->xfer_shift == ATA_SHIFT_PIO)
1328 dev->flags |= ATA_DFLAG_PIO;
1330 ata_dev_set_xfermode(ap, dev);
1332 base = base_from_shift(dev->xfer_shift);
1333 ofs = dev->xfer_mode - base;
1334 idx = ofs + dev->xfer_shift;
1335 WARN_ON(idx >= ARRAY_SIZE(xfer_mode_str));
1337 DPRINTK("idx=%d xfer_shift=%u, xfer_mode=0x%x, base=0x%x, offset=%d\n",
1338 idx, dev->xfer_shift, (int)dev->xfer_mode, (int)base, ofs);
1340 printk(KERN_INFO "ata%u: dev %u configured for %s\n",
1341 ap->id, dev->devno, xfer_mode_str[idx]);
1344 static int ata_host_set_pio(struct ata_port *ap)
1350 mask = ata_get_mode_mask(ap, ATA_SHIFT_PIO);
1353 printk(KERN_WARNING "ata%u: no PIO support\n", ap->id);
1357 base = base_from_shift(ATA_SHIFT_PIO);
1358 xfer_mode = base + x;
1360 DPRINTK("base 0x%x xfer_mode 0x%x mask 0x%x x %d\n",
1361 (int)base, (int)xfer_mode, mask, x);
1363 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1364 struct ata_device *dev = &ap->device[i];
1365 if (ata_dev_present(dev)) {
1366 dev->pio_mode = xfer_mode;
1367 dev->xfer_mode = xfer_mode;
1368 dev->xfer_shift = ATA_SHIFT_PIO;
1369 if (ap->ops->set_piomode)
1370 ap->ops->set_piomode(ap, dev);
1377 static void ata_host_set_dma(struct ata_port *ap, u8 xfer_mode,
1378 unsigned int xfer_shift)
1382 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1383 struct ata_device *dev = &ap->device[i];
1384 if (ata_dev_present(dev)) {
1385 dev->dma_mode = xfer_mode;
1386 dev->xfer_mode = xfer_mode;
1387 dev->xfer_shift = xfer_shift;
1388 if (ap->ops->set_dmamode)
1389 ap->ops->set_dmamode(ap, dev);
1395 * ata_set_mode - Program timings and issue SET FEATURES - XFER
1396 * @ap: port on which timings will be programmed
1401 static void ata_set_mode(struct ata_port *ap)
1403 unsigned int i, xfer_shift;
1407 /* step 1: always set host PIO timings */
1408 rc = ata_host_set_pio(ap);
1412 /* step 2: choose the best data xfer mode */
1413 xfer_mode = xfer_shift = 0;
1414 rc = ata_choose_xfer_mode(ap, &xfer_mode, &xfer_shift);
1418 /* step 3: if that xfer mode isn't PIO, set host DMA timings */
1419 if (xfer_shift != ATA_SHIFT_PIO)
1420 ata_host_set_dma(ap, xfer_mode, xfer_shift);
1422 /* step 4: update devices' xfer mode */
1423 ata_dev_set_mode(ap, &ap->device[0]);
1424 ata_dev_set_mode(ap, &ap->device[1]);
1426 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1429 if (ap->ops->post_set_mode)
1430 ap->ops->post_set_mode(ap);
1432 for (i = 0; i < 2; i++) {
1433 struct ata_device *dev = &ap->device[i];
1434 ata_dev_set_protocol(dev);
1440 ata_port_disable(ap);
1444 * ata_busy_sleep - sleep until BSY clears, or timeout
1445 * @ap: port containing status register to be polled
1446 * @tmout_pat: impatience timeout
1447 * @tmout: overall timeout
1453 static unsigned int ata_busy_sleep (struct ata_port *ap,
1454 unsigned long tmout_pat,
1455 unsigned long tmout)
1457 unsigned long timer_start, timeout;
1460 status = ata_busy_wait(ap, ATA_BUSY, 300);
1461 timer_start = jiffies;
1462 timeout = timer_start + tmout_pat;
1463 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
1465 status = ata_busy_wait(ap, ATA_BUSY, 3);
1468 if (status & ATA_BUSY)
1469 printk(KERN_WARNING "ata%u is slow to respond, "
1470 "please be patient\n", ap->id);
1472 timeout = timer_start + tmout;
1473 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
1475 status = ata_chk_status(ap);
1478 if (status & ATA_BUSY) {
1479 printk(KERN_ERR "ata%u failed to respond (%lu secs)\n",
1480 ap->id, tmout / HZ);
1487 static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
1489 struct ata_ioports *ioaddr = &ap->ioaddr;
1490 unsigned int dev0 = devmask & (1 << 0);
1491 unsigned int dev1 = devmask & (1 << 1);
1492 unsigned long timeout;
1494 /* if device 0 was found in ata_devchk, wait for its
1498 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1500 /* if device 1 was found in ata_devchk, wait for
1501 * register access, then wait for BSY to clear
1503 timeout = jiffies + ATA_TMOUT_BOOT;
1507 ap->ops->dev_select(ap, 1);
1508 if (ap->flags & ATA_FLAG_MMIO) {
1509 nsect = readb((void __iomem *) ioaddr->nsect_addr);
1510 lbal = readb((void __iomem *) ioaddr->lbal_addr);
1512 nsect = inb(ioaddr->nsect_addr);
1513 lbal = inb(ioaddr->lbal_addr);
1515 if ((nsect == 1) && (lbal == 1))
1517 if (time_after(jiffies, timeout)) {
1521 msleep(50); /* give drive a breather */
1524 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1526 /* is all this really necessary? */
1527 ap->ops->dev_select(ap, 0);
1529 ap->ops->dev_select(ap, 1);
1531 ap->ops->dev_select(ap, 0);
1542 static unsigned int ata_bus_edd(struct ata_port *ap)
1544 struct ata_taskfile tf;
1546 /* set up execute-device-diag (bus reset) taskfile */
1547 /* also, take interrupts to a known state (disabled) */
1548 DPRINTK("execute-device-diag\n");
1549 ata_tf_init(ap, &tf, 0);
1551 tf.command = ATA_CMD_EDD;
1552 tf.protocol = ATA_PROT_NODATA;
1555 ata_tf_to_host(ap, &tf);
1557 /* spec says at least 2ms. but who knows with those
1558 * crazy ATAPI devices...
1562 return ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1565 static unsigned int ata_bus_softreset(struct ata_port *ap,
1566 unsigned int devmask)
1568 struct ata_ioports *ioaddr = &ap->ioaddr;
1570 DPRINTK("ata%u: bus reset via SRST\n", ap->id);
1572 /* software reset. causes dev0 to be selected */
1573 if (ap->flags & ATA_FLAG_MMIO) {
1574 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1575 udelay(20); /* FIXME: flush */
1576 writeb(ap->ctl | ATA_SRST, (void __iomem *) ioaddr->ctl_addr);
1577 udelay(20); /* FIXME: flush */
1578 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1580 outb(ap->ctl, ioaddr->ctl_addr);
1582 outb(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
1584 outb(ap->ctl, ioaddr->ctl_addr);
1587 /* spec mandates ">= 2ms" before checking status.
1588 * We wait 150ms, because that was the magic delay used for
1589 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
1590 * between when the ATA command register is written, and then
1591 * status is checked. Because waiting for "a while" before
1592 * checking status is fine, post SRST, we perform this magic
1593 * delay here as well.
1597 ata_bus_post_reset(ap, devmask);
1603 * ata_bus_reset - reset host port and associated ATA channel
1604 * @ap: port to reset
1606 * This is typically the first time we actually start issuing
1607 * commands to the ATA channel. We wait for BSY to clear, then
1608 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
1609 * result. Determine what devices, if any, are on the channel
1610 * by looking at the device 0/1 error register. Look at the signature
1611 * stored in each device's taskfile registers, to determine if
1612 * the device is ATA or ATAPI.
1615 * Inherited from caller. Some functions called by this function
1616 * obtain the host_set lock.
1619 * Sets ATA_FLAG_PORT_DISABLED if bus reset fails.
1622 void ata_bus_reset(struct ata_port *ap)
1624 struct ata_ioports *ioaddr = &ap->ioaddr;
1625 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
1627 unsigned int dev0, dev1 = 0, rc = 0, devmask = 0;
1629 DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no);
1631 /* determine if device 0/1 are present */
1632 if (ap->flags & ATA_FLAG_SATA_RESET)
1635 dev0 = ata_devchk(ap, 0);
1637 dev1 = ata_devchk(ap, 1);
1641 devmask |= (1 << 0);
1643 devmask |= (1 << 1);
1645 /* select device 0 again */
1646 ap->ops->dev_select(ap, 0);
1648 /* issue bus reset */
1649 if (ap->flags & ATA_FLAG_SRST)
1650 rc = ata_bus_softreset(ap, devmask);
1651 else if ((ap->flags & ATA_FLAG_SATA_RESET) == 0) {
1652 /* set up device control */
1653 if (ap->flags & ATA_FLAG_MMIO)
1654 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1656 outb(ap->ctl, ioaddr->ctl_addr);
1657 rc = ata_bus_edd(ap);
1664 * determine by signature whether we have ATA or ATAPI devices
1666 err = ata_dev_try_classify(ap, 0);
1667 if ((slave_possible) && (err != 0x81))
1668 ata_dev_try_classify(ap, 1);
1670 /* re-enable interrupts */
1673 /* is double-select really necessary? */
1674 if (ap->device[1].class != ATA_DEV_NONE)
1675 ap->ops->dev_select(ap, 1);
1676 if (ap->device[0].class != ATA_DEV_NONE)
1677 ap->ops->dev_select(ap, 0);
1679 /* if no devices were detected, disable this port */
1680 if ((ap->device[0].class == ATA_DEV_NONE) &&
1681 (ap->device[1].class == ATA_DEV_NONE))
1684 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
1685 /* set up device control for ATA_FLAG_SATA_RESET */
1686 if (ap->flags & ATA_FLAG_MMIO)
1687 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1689 outb(ap->ctl, ioaddr->ctl_addr);
1696 printk(KERN_ERR "ata%u: disabling port\n", ap->id);
1697 ap->ops->port_disable(ap);
1702 static unsigned int ata_get_mode_mask(struct ata_port *ap, int shift)
1704 struct ata_device *master, *slave;
1707 master = &ap->device[0];
1708 slave = &ap->device[1];
1710 assert (ata_dev_present(master) || ata_dev_present(slave));
1712 if (shift == ATA_SHIFT_UDMA) {
1713 mask = ap->udma_mask;
1714 if (ata_dev_present(master))
1715 mask &= (master->id[ATA_ID_UDMA_MODES] & 0xff);
1716 if (ata_dev_present(slave))
1717 mask &= (slave->id[ATA_ID_UDMA_MODES] & 0xff);
1719 else if (shift == ATA_SHIFT_MWDMA) {
1720 mask = ap->mwdma_mask;
1721 if (ata_dev_present(master))
1722 mask &= (master->id[ATA_ID_MWDMA_MODES] & 0x07);
1723 if (ata_dev_present(slave))
1724 mask &= (slave->id[ATA_ID_MWDMA_MODES] & 0x07);
1726 else if (shift == ATA_SHIFT_PIO) {
1727 mask = ap->pio_mask;
1728 if (ata_dev_present(master)) {
1729 /* spec doesn't return explicit support for
1730 * PIO0-2, so we fake it
1732 u16 tmp_mode = master->id[ATA_ID_PIO_MODES] & 0x03;
1737 if (ata_dev_present(slave)) {
1738 /* spec doesn't return explicit support for
1739 * PIO0-2, so we fake it
1741 u16 tmp_mode = slave->id[ATA_ID_PIO_MODES] & 0x03;
1748 mask = 0xffffffff; /* shut up compiler warning */
1755 /* find greatest bit */
1756 static int fgb(u32 bitmap)
1761 for (i = 0; i < 32; i++)
1762 if (bitmap & (1 << i))
1769 * ata_choose_xfer_mode -
1775 * Zero on success, negative on error.
1778 static int ata_choose_xfer_mode(struct ata_port *ap,
1780 unsigned int *xfer_shift_out)
1782 unsigned int mask, shift;
1785 for (i = 0; i < ARRAY_SIZE(xfer_mode_classes); i++) {
1786 shift = xfer_mode_classes[i].shift;
1787 mask = ata_get_mode_mask(ap, shift);
1791 *xfer_mode_out = xfer_mode_classes[i].base + x;
1792 *xfer_shift_out = shift;
1801 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
1802 * @ap: Port associated with device @dev
1803 * @dev: Device to which command will be sent
1808 static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev)
1810 DECLARE_COMPLETION(wait);
1811 struct ata_queued_cmd *qc;
1813 unsigned long flags;
1815 /* set up set-features taskfile */
1816 DPRINTK("set features - xfer mode\n");
1818 qc = ata_qc_new_init(ap, dev);
1821 qc->tf.command = ATA_CMD_SET_FEATURES;
1822 qc->tf.feature = SETFEATURES_XFER;
1823 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1824 qc->tf.protocol = ATA_PROT_NODATA;
1825 qc->tf.nsect = dev->xfer_mode;
1827 qc->waiting = &wait;
1828 qc->complete_fn = ata_qc_complete_noop;
1830 spin_lock_irqsave(&ap->host_set->lock, flags);
1831 rc = ata_qc_issue(qc);
1832 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1835 ata_port_disable(ap);
1837 wait_for_completion(&wait);
1849 static void ata_sg_clean(struct ata_queued_cmd *qc)
1851 struct ata_port *ap = qc->ap;
1852 struct scatterlist *sg = qc->sg;
1853 int dir = qc->pci_dma_dir;
1855 assert(qc->flags & ATA_QCFLAG_DMAMAP);
1858 if (qc->flags & ATA_QCFLAG_SINGLE)
1859 assert(qc->n_elem == 1);
1861 DPRINTK("unmapping %u sg elements\n", qc->n_elem);
1863 if (qc->flags & ATA_QCFLAG_SG)
1864 pci_unmap_sg(ap->host_set->pdev, sg, qc->n_elem, dir);
1866 pci_unmap_single(ap->host_set->pdev, sg_dma_address(&sg[0]),
1867 sg_dma_len(&sg[0]), dir);
1869 qc->flags &= ~ATA_QCFLAG_DMAMAP;
1874 * ata_fill_sg - Fill PCI IDE PRD table
1875 * @qc: Metadata associated with taskfile to be transferred
1880 static void ata_fill_sg(struct ata_queued_cmd *qc)
1882 struct scatterlist *sg = qc->sg;
1883 struct ata_port *ap = qc->ap;
1884 unsigned int idx, nelem;
1887 assert(qc->n_elem > 0);
1890 for (nelem = qc->n_elem; nelem; nelem--,sg++) {
1894 /* determine if physical DMA addr spans 64K boundary.
1895 * Note h/w doesn't support 64-bit, so we unconditionally
1896 * truncate dma_addr_t to u32.
1898 addr = (u32) sg_dma_address(sg);
1899 sg_len = sg_dma_len(sg);
1902 offset = addr & 0xffff;
1904 if ((offset + sg_len) > 0x10000)
1905 len = 0x10000 - offset;
1907 ap->prd[idx].addr = cpu_to_le32(addr);
1908 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
1909 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
1918 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
1922 * ata_qc_prep - Prepare taskfile for submission
1923 * @qc: Metadata associated with taskfile to be prepared
1926 * spin_lock_irqsave(host_set lock)
1928 void ata_qc_prep(struct ata_queued_cmd *qc)
1930 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1936 void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
1938 struct scatterlist *sg;
1940 qc->flags |= ATA_QCFLAG_SINGLE;
1942 memset(&qc->sgent, 0, sizeof(qc->sgent));
1943 qc->sg = &qc->sgent;
1948 sg->page = virt_to_page(buf);
1949 sg->offset = (unsigned long) buf & ~PAGE_MASK;
1950 sg_dma_len(sg) = buflen;
1953 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
1954 unsigned int n_elem)
1956 qc->flags |= ATA_QCFLAG_SG;
1958 qc->n_elem = n_elem;
1962 * ata_sg_setup_one -
1966 * spin_lock_irqsave(host_set lock)
1972 static int ata_sg_setup_one(struct ata_queued_cmd *qc)
1974 struct ata_port *ap = qc->ap;
1975 int dir = qc->pci_dma_dir;
1976 struct scatterlist *sg = qc->sg;
1977 dma_addr_t dma_address;
1979 dma_address = pci_map_single(ap->host_set->pdev, qc->buf_virt,
1980 sg_dma_len(sg), dir);
1981 if (pci_dma_mapping_error(dma_address))
1984 sg_dma_address(sg) = dma_address;
1986 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
1987 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
1997 * spin_lock_irqsave(host_set lock)
2003 static int ata_sg_setup(struct ata_queued_cmd *qc)
2005 struct ata_port *ap = qc->ap;
2006 struct scatterlist *sg = qc->sg;
2009 VPRINTK("ENTER, ata%u\n", ap->id);
2010 assert(qc->flags & ATA_QCFLAG_SG);
2012 dir = qc->pci_dma_dir;
2013 n_elem = pci_map_sg(ap->host_set->pdev, sg, qc->n_elem, dir);
2017 DPRINTK("%d sg elements mapped\n", n_elem);
2019 qc->n_elem = n_elem;
2034 static unsigned long ata_pio_poll(struct ata_port *ap)
2037 unsigned int poll_state = PIO_ST_UNKNOWN;
2038 unsigned int reg_state = PIO_ST_UNKNOWN;
2039 const unsigned int tmout_state = PIO_ST_TMOUT;
2041 switch (ap->pio_task_state) {
2044 poll_state = PIO_ST_POLL;
2048 case PIO_ST_LAST_POLL:
2049 poll_state = PIO_ST_LAST_POLL;
2050 reg_state = PIO_ST_LAST;
2057 status = ata_chk_status(ap);
2058 if (status & ATA_BUSY) {
2059 if (time_after(jiffies, ap->pio_task_timeout)) {
2060 ap->pio_task_state = tmout_state;
2063 ap->pio_task_state = poll_state;
2064 return ATA_SHORT_PAUSE;
2067 ap->pio_task_state = reg_state;
2072 * ata_pio_complete -
2078 static void ata_pio_complete (struct ata_port *ap)
2080 struct ata_queued_cmd *qc;
2084 * This is purely hueristic. This is a fast path.
2085 * Sometimes when we enter, BSY will be cleared in
2086 * a chk-status or two. If not, the drive is probably seeking
2087 * or something. Snooze for a couple msecs, then
2088 * chk-status again. If still busy, fall back to
2089 * PIO_ST_POLL state.
2091 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10);
2092 if (drv_stat & (ATA_BUSY | ATA_DRQ)) {
2094 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10);
2095 if (drv_stat & (ATA_BUSY | ATA_DRQ)) {
2096 ap->pio_task_state = PIO_ST_LAST_POLL;
2097 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
2102 drv_stat = ata_wait_idle(ap);
2103 if (!ata_ok(drv_stat)) {
2104 ap->pio_task_state = PIO_ST_ERR;
2108 qc = ata_qc_from_tag(ap, ap->active_tag);
2111 ap->pio_task_state = PIO_ST_IDLE;
2115 ata_qc_complete(qc, drv_stat);
2118 void swap_buf_le16(u16 *buf, unsigned int buf_words)
2123 for (i = 0; i < buf_words; i++)
2124 buf[i] = le16_to_cpu(buf[i]);
2125 #endif /* __BIG_ENDIAN */
2128 static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf,
2129 unsigned int buflen, int write_data)
2132 unsigned int words = buflen >> 1;
2133 u16 *buf16 = (u16 *) buf;
2134 void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr;
2137 for (i = 0; i < words; i++)
2138 writew(le16_to_cpu(buf16[i]), mmio);
2140 for (i = 0; i < words; i++)
2141 buf16[i] = cpu_to_le16(readw(mmio));
2145 static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf,
2146 unsigned int buflen, int write_data)
2148 unsigned int dwords = buflen >> 1;
2151 outsw(ap->ioaddr.data_addr, buf, dwords);
2153 insw(ap->ioaddr.data_addr, buf, dwords);
2156 static void ata_data_xfer(struct ata_port *ap, unsigned char *buf,
2157 unsigned int buflen, int do_write)
2159 if (ap->flags & ATA_FLAG_MMIO)
2160 ata_mmio_data_xfer(ap, buf, buflen, do_write);
2162 ata_pio_data_xfer(ap, buf, buflen, do_write);
2165 static void ata_pio_sector(struct ata_queued_cmd *qc)
2167 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
2168 struct scatterlist *sg = qc->sg;
2169 struct ata_port *ap = qc->ap;
2171 unsigned int offset;
2174 if (qc->cursect == (qc->nsect - 1))
2175 ap->pio_task_state = PIO_ST_LAST;
2177 page = sg[qc->cursg].page;
2178 offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE;
2180 /* get the current page and offset */
2181 page = nth_page(page, (offset >> PAGE_SHIFT));
2182 offset %= PAGE_SIZE;
2184 buf = kmap(page) + offset;
2189 if ((qc->cursg_ofs * ATA_SECT_SIZE) == sg_dma_len(&sg[qc->cursg])) {
2194 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
2196 /* do the actual data transfer */
2197 do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
2198 ata_data_xfer(ap, buf, ATA_SECT_SIZE, do_write);
2203 static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
2205 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
2206 struct scatterlist *sg = qc->sg;
2207 struct ata_port *ap = qc->ap;
2210 unsigned int offset, count;
2212 if (qc->curbytes == qc->nbytes - bytes)
2213 ap->pio_task_state = PIO_ST_LAST;
2216 sg = &qc->sg[qc->cursg];
2220 offset = sg->offset + qc->cursg_ofs;
2222 /* get the current page and offset */
2223 page = nth_page(page, (offset >> PAGE_SHIFT));
2224 offset %= PAGE_SIZE;
2226 count = min(sg_dma_len(sg) - qc->cursg_ofs, bytes);
2228 /* don't cross page boundaries */
2229 count = min(count, (unsigned int)PAGE_SIZE - offset);
2231 buf = kmap(page) + offset;
2234 qc->curbytes += count;
2235 qc->cursg_ofs += count;
2237 if (qc->cursg_ofs == sg_dma_len(sg)) {
2242 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
2244 /* do the actual data transfer */
2245 ata_data_xfer(ap, buf, count, do_write);
2250 if (qc->cursg_ofs < sg_dma_len(sg))
2256 static void atapi_pio_bytes(struct ata_queued_cmd *qc)
2258 struct ata_port *ap = qc->ap;
2259 struct ata_device *dev = qc->dev;
2260 unsigned int ireason, bc_lo, bc_hi, bytes;
2261 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
2263 ap->ops->tf_read(ap, &qc->tf);
2264 ireason = qc->tf.nsect;
2265 bc_lo = qc->tf.lbam;
2266 bc_hi = qc->tf.lbah;
2267 bytes = (bc_hi << 8) | bc_lo;
2269 /* shall be cleared to zero, indicating xfer of data */
2270 if (ireason & (1 << 0))
2273 /* make sure transfer direction matches expected */
2274 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
2275 if (do_write != i_write)
2278 __atapi_pio_bytes(qc, bytes);
2283 printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n",
2284 ap->id, dev->devno);
2285 ap->pio_task_state = PIO_ST_ERR;
2295 static void ata_pio_block(struct ata_port *ap)
2297 struct ata_queued_cmd *qc;
2301 * This is purely hueristic. This is a fast path.
2302 * Sometimes when we enter, BSY will be cleared in
2303 * a chk-status or two. If not, the drive is probably seeking
2304 * or something. Snooze for a couple msecs, then
2305 * chk-status again. If still busy, fall back to
2306 * PIO_ST_POLL state.
2308 status = ata_busy_wait(ap, ATA_BUSY, 5);
2309 if (status & ATA_BUSY) {
2311 status = ata_busy_wait(ap, ATA_BUSY, 10);
2312 if (status & ATA_BUSY) {
2313 ap->pio_task_state = PIO_ST_POLL;
2314 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
2319 qc = ata_qc_from_tag(ap, ap->active_tag);
2322 if (is_atapi_taskfile(&qc->tf)) {
2323 /* no more data to transfer or unsupported ATAPI command */
2324 if ((status & ATA_DRQ) == 0) {
2325 ap->pio_task_state = PIO_ST_IDLE;
2329 ata_qc_complete(qc, status);
2333 atapi_pio_bytes(qc);
2335 /* handle BSY=0, DRQ=0 as error */
2336 if ((status & ATA_DRQ) == 0) {
2337 ap->pio_task_state = PIO_ST_ERR;
2345 static void ata_pio_error(struct ata_port *ap)
2347 struct ata_queued_cmd *qc;
2350 qc = ata_qc_from_tag(ap, ap->active_tag);
2353 drv_stat = ata_chk_status(ap);
2354 printk(KERN_WARNING "ata%u: PIO error, drv_stat 0x%x\n",
2357 ap->pio_task_state = PIO_ST_IDLE;
2361 ata_qc_complete(qc, drv_stat | ATA_ERR);
2364 static void ata_pio_task(void *_data)
2366 struct ata_port *ap = _data;
2367 unsigned long timeout = 0;
2369 switch (ap->pio_task_state) {
2375 ata_pio_complete(ap);
2379 case PIO_ST_LAST_POLL:
2380 timeout = ata_pio_poll(ap);
2389 if ((ap->pio_task_state != PIO_ST_IDLE) &&
2390 (ap->pio_task_state != PIO_ST_TMOUT) &&
2391 (ap->pio_task_state != PIO_ST_ERR)) {
2393 queue_delayed_work(ata_wq, &ap->pio_task,
2396 queue_work(ata_wq, &ap->pio_task);
2400 static void atapi_request_sense(struct ata_port *ap, struct ata_device *dev,
2401 struct scsi_cmnd *cmd)
2403 DECLARE_COMPLETION(wait);
2404 struct ata_queued_cmd *qc;
2405 unsigned long flags;
2406 int using_pio = dev->flags & ATA_DFLAG_PIO;
2409 DPRINTK("ATAPI request sense\n");
2411 qc = ata_qc_new_init(ap, dev);
2414 /* FIXME: is this needed? */
2415 memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
2417 ata_sg_init_one(qc, cmd->sense_buffer, sizeof(cmd->sense_buffer));
2418 qc->pci_dma_dir = PCI_DMA_FROMDEVICE;
2420 memset(&qc->cdb, 0, sizeof(ap->cdb_len));
2421 qc->cdb[0] = REQUEST_SENSE;
2422 qc->cdb[4] = SCSI_SENSE_BUFFERSIZE;
2424 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2425 qc->tf.command = ATA_CMD_PACKET;
2428 qc->tf.protocol = ATA_PROT_ATAPI;
2429 qc->tf.lbam = (8 * 1024) & 0xff;
2430 qc->tf.lbah = (8 * 1024) >> 8;
2432 qc->nbytes = SCSI_SENSE_BUFFERSIZE;
2434 qc->tf.protocol = ATA_PROT_ATAPI_DMA;
2435 qc->tf.feature |= ATAPI_PKT_DMA;
2438 qc->waiting = &wait;
2439 qc->complete_fn = ata_qc_complete_noop;
2441 spin_lock_irqsave(&ap->host_set->lock, flags);
2442 rc = ata_qc_issue(qc);
2443 spin_unlock_irqrestore(&ap->host_set->lock, flags);
2446 ata_port_disable(ap);
2448 wait_for_completion(&wait);
2454 * ata_qc_timeout - Handle timeout of queued command
2455 * @qc: Command that timed out
2457 * Some part of the kernel (currently, only the SCSI layer)
2458 * has noticed that the active command on port @ap has not
2459 * completed after a specified length of time. Handle this
2460 * condition by disabling DMA (if necessary) and completing
2461 * transactions, with error if necessary.
2463 * This also handles the case of the "lost interrupt", where
2464 * for some reason (possibly hardware bug, possibly driver bug)
2465 * an interrupt was not delivered to the driver, even though the
2466 * transaction completed successfully.
2471 static void ata_qc_timeout(struct ata_queued_cmd *qc)
2473 struct ata_port *ap = qc->ap;
2474 struct ata_device *dev = qc->dev;
2475 u8 host_stat = 0, drv_stat;
2479 /* FIXME: doesn't this conflict with timeout handling? */
2480 if (qc->dev->class == ATA_DEV_ATAPI && qc->scsicmd) {
2481 struct scsi_cmnd *cmd = qc->scsicmd;
2483 if (!scsi_eh_eflags_chk(cmd, SCSI_EH_CANCEL_CMD)) {
2485 /* finish completing original command */
2486 __ata_qc_complete(qc);
2488 atapi_request_sense(ap, dev, cmd);
2490 cmd->result = (CHECK_CONDITION << 1) | (DID_OK << 16);
2491 scsi_finish_command(cmd);
2497 /* hack alert! We cannot use the supplied completion
2498 * function from inside the ->eh_strategy_handler() thread.
2499 * libata is the only user of ->eh_strategy_handler() in
2500 * any kernel, so the default scsi_done() assumes it is
2501 * not being called from the SCSI EH.
2503 qc->scsidone = scsi_finish_command;
2505 switch (qc->tf.protocol) {
2508 case ATA_PROT_ATAPI_DMA:
2509 host_stat = ata_bmdma_status(ap);
2511 /* before we do anything else, clear DMA-Start bit */
2518 drv_stat = ata_chk_status(ap);
2520 /* ack bmdma irq events */
2521 ata_bmdma_ack_irq(ap);
2523 printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x host_stat 0x%x\n",
2524 ap->id, qc->tf.command, drv_stat, host_stat);
2526 /* complete taskfile transaction */
2527 ata_qc_complete(qc, drv_stat);
2535 * ata_eng_timeout - Handle timeout of queued command
2536 * @ap: Port on which timed-out command is active
2538 * Some part of the kernel (currently, only the SCSI layer)
2539 * has noticed that the active command on port @ap has not
2540 * completed after a specified length of time. Handle this
2541 * condition by disabling DMA (if necessary) and completing
2542 * transactions, with error if necessary.
2544 * This also handles the case of the "lost interrupt", where
2545 * for some reason (possibly hardware bug, possibly driver bug)
2546 * an interrupt was not delivered to the driver, even though the
2547 * transaction completed successfully.
2550 * Inherited from SCSI layer (none, can sleep)
2553 void ata_eng_timeout(struct ata_port *ap)
2555 struct ata_queued_cmd *qc;
2559 qc = ata_qc_from_tag(ap, ap->active_tag);
2561 printk(KERN_ERR "ata%u: BUG: timeout without command\n",
2573 * ata_qc_new - Request an available ATA command, for queueing
2574 * @ap: Port associated with device @dev
2575 * @dev: Device from whom we request an available command structure
2580 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
2582 struct ata_queued_cmd *qc = NULL;
2585 for (i = 0; i < ATA_MAX_QUEUE; i++)
2586 if (!test_and_set_bit(i, &ap->qactive)) {
2587 qc = ata_qc_from_tag(ap, i);
2598 * ata_qc_new_init - Request an available ATA command, and initialize it
2599 * @ap: Port associated with device @dev
2600 * @dev: Device from whom we request an available command structure
2605 struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
2606 struct ata_device *dev)
2608 struct ata_queued_cmd *qc;
2610 qc = ata_qc_new(ap);
2617 qc->cursect = qc->cursg = qc->cursg_ofs = 0;
2619 qc->nbytes = qc->curbytes = 0;
2621 ata_tf_init(ap, &qc->tf, dev->devno);
2623 if (dev->flags & ATA_DFLAG_LBA48)
2624 qc->tf.flags |= ATA_TFLAG_LBA48;
2630 static int ata_qc_complete_noop(struct ata_queued_cmd *qc, u8 drv_stat)
2635 static void __ata_qc_complete(struct ata_queued_cmd *qc)
2637 struct ata_port *ap = qc->ap;
2638 unsigned int tag, do_clear = 0;
2642 if (likely(ata_tag_valid(tag))) {
2643 if (tag == ap->active_tag)
2644 ap->active_tag = ATA_TAG_POISON;
2645 qc->tag = ATA_TAG_POISON;
2650 struct completion *waiting = qc->waiting;
2655 if (likely(do_clear))
2656 clear_bit(tag, &ap->qactive);
2660 * ata_qc_complete - Complete an active ATA command
2661 * @qc: Command to complete
2662 * @drv_stat: ATA status register contents
2668 void ata_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat)
2672 assert(qc != NULL); /* ata_qc_from_tag _might_ return NULL */
2673 assert(qc->flags & ATA_QCFLAG_ACTIVE);
2675 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
2678 /* call completion callback */
2679 rc = qc->complete_fn(qc, drv_stat);
2681 /* if callback indicates not to complete command (non-zero),
2682 * return immediately
2687 __ata_qc_complete(qc);
2692 static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
2694 struct ata_port *ap = qc->ap;
2696 switch (qc->tf.protocol) {
2698 case ATA_PROT_ATAPI_DMA:
2701 case ATA_PROT_ATAPI:
2703 case ATA_PROT_PIO_MULT:
2704 if (ap->flags & ATA_FLAG_PIO_DMA)
2717 * ata_qc_issue - issue taskfile to device
2718 * @qc: command to issue to device
2720 * Prepare an ATA command to submission to device.
2721 * This includes mapping the data into a DMA-able
2722 * area, filling in the S/G table, and finally
2723 * writing the taskfile to hardware, starting the command.
2726 * spin_lock_irqsave(host_set lock)
2729 * Zero on success, negative on error.
2732 int ata_qc_issue(struct ata_queued_cmd *qc)
2734 struct ata_port *ap = qc->ap;
2736 if (ata_should_dma_map(qc)) {
2737 if (qc->flags & ATA_QCFLAG_SG) {
2738 if (ata_sg_setup(qc))
2740 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
2741 if (ata_sg_setup_one(qc))
2745 qc->flags &= ~ATA_QCFLAG_DMAMAP;
2748 ap->ops->qc_prep(qc);
2750 qc->ap->active_tag = qc->tag;
2751 qc->flags |= ATA_QCFLAG_ACTIVE;
2753 return ap->ops->qc_issue(qc);
2760 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
2761 * @qc: command to issue to device
2763 * Using various libata functions and hooks, this function
2764 * starts an ATA command. ATA commands are grouped into
2765 * classes called "protocols", and issuing each type of protocol
2766 * is slightly different.
2769 * spin_lock_irqsave(host_set lock)
2772 * Zero on success, negative on error.
2775 int ata_qc_issue_prot(struct ata_queued_cmd *qc)
2777 struct ata_port *ap = qc->ap;
2779 ata_dev_select(ap, qc->dev->devno, 1, 0);
2781 switch (qc->tf.protocol) {
2782 case ATA_PROT_NODATA:
2783 ata_tf_to_host_nolock(ap, &qc->tf);
2787 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
2788 ap->ops->bmdma_setup(qc); /* set up bmdma */
2789 ap->ops->bmdma_start(qc); /* initiate bmdma */
2792 case ATA_PROT_PIO: /* load tf registers, initiate polling pio */
2793 ata_qc_set_polling(qc);
2794 ata_tf_to_host_nolock(ap, &qc->tf);
2795 ap->pio_task_state = PIO_ST;
2796 queue_work(ata_wq, &ap->pio_task);
2799 case ATA_PROT_ATAPI:
2800 ata_qc_set_polling(qc);
2801 ata_tf_to_host_nolock(ap, &qc->tf);
2802 queue_work(ata_wq, &ap->packet_task);
2805 case ATA_PROT_ATAPI_NODATA:
2806 ata_tf_to_host_nolock(ap, &qc->tf);
2807 queue_work(ata_wq, &ap->packet_task);
2810 case ATA_PROT_ATAPI_DMA:
2811 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
2812 ap->ops->bmdma_setup(qc); /* set up bmdma */
2813 queue_work(ata_wq, &ap->packet_task);
2825 * ata_bmdma_setup - Set up PCI IDE BMDMA transaction
2826 * @qc: Info associated with this ATA transaction.
2829 * spin_lock_irqsave(host_set lock)
2832 static void ata_bmdma_setup_mmio (struct ata_queued_cmd *qc)
2834 struct ata_port *ap = qc->ap;
2835 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
2837 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
2839 /* load PRD table addr. */
2840 mb(); /* make sure PRD table writes are visible to controller */
2841 writel(ap->prd_dma, mmio + ATA_DMA_TABLE_OFS);
2843 /* specify data direction, triple-check start bit is clear */
2844 dmactl = readb(mmio + ATA_DMA_CMD);
2845 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
2847 dmactl |= ATA_DMA_WR;
2848 writeb(dmactl, mmio + ATA_DMA_CMD);
2850 /* issue r/w command */
2851 ap->ops->exec_command(ap, &qc->tf);
2855 * ata_bmdma_start - Start a PCI IDE BMDMA transaction
2856 * @qc: Info associated with this ATA transaction.
2859 * spin_lock_irqsave(host_set lock)
2862 static void ata_bmdma_start_mmio (struct ata_queued_cmd *qc)
2864 struct ata_port *ap = qc->ap;
2865 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
2868 /* start host DMA transaction */
2869 dmactl = readb(mmio + ATA_DMA_CMD);
2870 writeb(dmactl | ATA_DMA_START, mmio + ATA_DMA_CMD);
2872 /* Strictly, one may wish to issue a readb() here, to
2873 * flush the mmio write. However, control also passes
2874 * to the hardware at this point, and it will interrupt
2875 * us when we are to resume control. So, in effect,
2876 * we don't care when the mmio write flushes.
2877 * Further, a read of the DMA status register _immediately_
2878 * following the write may not be what certain flaky hardware
2879 * is expected, so I think it is best to not add a readb()
2880 * without first all the MMIO ATA cards/mobos.
2881 * Or maybe I'm just being paranoid.
2886 * ata_bmdma_setup_pio - Set up PCI IDE BMDMA transaction (PIO)
2887 * @qc: Info associated with this ATA transaction.
2890 * spin_lock_irqsave(host_set lock)
2893 static void ata_bmdma_setup_pio (struct ata_queued_cmd *qc)
2895 struct ata_port *ap = qc->ap;
2896 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
2899 /* load PRD table addr. */
2900 outl(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2902 /* specify data direction, triple-check start bit is clear */
2903 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2904 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
2906 dmactl |= ATA_DMA_WR;
2907 outb(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2909 /* issue r/w command */
2910 ap->ops->exec_command(ap, &qc->tf);
2914 * ata_bmdma_start_pio - Start a PCI IDE BMDMA transaction (PIO)
2915 * @qc: Info associated with this ATA transaction.
2918 * spin_lock_irqsave(host_set lock)
2921 static void ata_bmdma_start_pio (struct ata_queued_cmd *qc)
2923 struct ata_port *ap = qc->ap;
2926 /* start host DMA transaction */
2927 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2928 outb(dmactl | ATA_DMA_START,
2929 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2932 void ata_bmdma_start(struct ata_queued_cmd *qc)
2934 if (qc->ap->flags & ATA_FLAG_MMIO)
2935 ata_bmdma_start_mmio(qc);
2937 ata_bmdma_start_pio(qc);
2940 void ata_bmdma_setup(struct ata_queued_cmd *qc)
2942 if (qc->ap->flags & ATA_FLAG_MMIO)
2943 ata_bmdma_setup_mmio(qc);
2945 ata_bmdma_setup_pio(qc);
2948 void ata_bmdma_irq_clear(struct ata_port *ap)
2950 ata_bmdma_ack_irq(ap);
2954 * ata_host_intr - Handle host interrupt for given (port, task)
2955 * @ap: Port on which interrupt arrived (possibly...)
2956 * @qc: Taskfile currently active in engine
2958 * Handle host interrupt for given queued command. Currently,
2959 * only DMA interrupts are handled. All other commands are
2960 * handled via polling with interrupts disabled (nIEN bit).
2963 * spin_lock_irqsave(host_set lock)
2966 * One if interrupt was handled, zero if not (shared irq).
2969 inline unsigned int ata_host_intr (struct ata_port *ap,
2970 struct ata_queued_cmd *qc)
2972 u8 status, host_stat;
2974 switch (qc->tf.protocol) {
2977 case ATA_PROT_ATAPI_DMA:
2978 case ATA_PROT_ATAPI:
2979 /* check status of DMA engine */
2980 host_stat = ata_bmdma_status(ap);
2981 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
2983 /* if it's not our irq... */
2984 if (!(host_stat & ATA_DMA_INTR))
2987 /* before we do anything else, clear DMA-Start bit */
2992 case ATA_PROT_ATAPI_NODATA:
2993 case ATA_PROT_NODATA:
2994 /* check altstatus */
2995 status = ata_altstatus(ap);
2996 if (status & ATA_BUSY)
2999 /* check main status, clearing INTRQ */
3000 status = ata_chk_status(ap);
3001 if (unlikely(status & ATA_BUSY))
3003 DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
3004 ap->id, qc->tf.protocol, status);
3006 /* ack bmdma irq events */
3007 ata_bmdma_ack_irq(ap);
3009 /* complete taskfile transaction */
3010 ata_qc_complete(qc, status);
3017 return 1; /* irq handled */
3020 ap->stats.idle_irq++;
3023 if ((ap->stats.idle_irq % 1000) == 0) {
3025 ata_irq_ack(ap, 0); /* debug trap */
3026 printk(KERN_WARNING "ata%d: irq trap\n", ap->id);
3029 return 0; /* irq not handled */
3033 * ata_interrupt - Default ATA host interrupt handler
3035 * @dev_instance: pointer to our host information structure
3044 irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
3046 struct ata_host_set *host_set = dev_instance;
3048 unsigned int handled = 0;
3049 unsigned long flags;
3051 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
3052 spin_lock_irqsave(&host_set->lock, flags);
3054 for (i = 0; i < host_set->n_ports; i++) {
3055 struct ata_port *ap;
3057 ap = host_set->ports[i];
3058 if (ap && (!(ap->flags & ATA_FLAG_PORT_DISABLED))) {
3059 struct ata_queued_cmd *qc;
3061 qc = ata_qc_from_tag(ap, ap->active_tag);
3062 if (qc && (!(qc->tf.ctl & ATA_NIEN)))
3063 handled |= ata_host_intr(ap, qc);
3067 spin_unlock_irqrestore(&host_set->lock, flags);
3069 return IRQ_RETVAL(handled);
3073 * atapi_packet_task - Write CDB bytes to hardware
3074 * @_data: Port to which ATAPI device is attached.
3076 * When device has indicated its readiness to accept
3077 * a CDB, this function is called. Send the CDB.
3078 * If DMA is to be performed, exit immediately.
3079 * Otherwise, we are in polling mode, so poll
3080 * status under operation succeeds or fails.
3083 * Kernel thread context (may sleep)
3086 static void atapi_packet_task(void *_data)
3088 struct ata_port *ap = _data;
3089 struct ata_queued_cmd *qc;
3092 qc = ata_qc_from_tag(ap, ap->active_tag);
3094 assert(qc->flags & ATA_QCFLAG_ACTIVE);
3096 /* sleep-wait for BSY to clear */
3097 DPRINTK("busy wait\n");
3098 if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB))
3101 /* make sure DRQ is set */
3102 status = ata_chk_status(ap);
3103 if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ)
3107 DPRINTK("send cdb\n");
3108 assert(ap->cdb_len >= 12);
3109 ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1);
3111 /* if we are DMA'ing, irq handler takes over from here */
3112 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA)
3113 ap->ops->bmdma_start(qc); /* initiate bmdma */
3115 /* non-data commands are also handled via irq */
3116 else if (qc->tf.protocol == ATA_PROT_ATAPI_NODATA) {
3120 /* PIO commands are handled by polling */
3122 ap->pio_task_state = PIO_ST;
3123 queue_work(ata_wq, &ap->pio_task);
3129 ata_qc_complete(qc, ATA_ERR);
3132 int ata_port_start (struct ata_port *ap)
3134 struct pci_dev *pdev = ap->host_set->pdev;
3136 ap->prd = pci_alloc_consistent(pdev, ATA_PRD_TBL_SZ, &ap->prd_dma);
3140 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma);
3145 void ata_port_stop (struct ata_port *ap)
3147 struct pci_dev *pdev = ap->host_set->pdev;
3149 pci_free_consistent(pdev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
3153 * ata_host_remove - Unregister SCSI host structure with upper layers
3154 * @ap: Port to unregister
3155 * @do_unregister: 1 if we fully unregister, 0 to just stop the port
3160 static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister)
3162 struct Scsi_Host *sh = ap->host;
3167 scsi_remove_host(sh);
3169 ap->ops->port_stop(ap);
3173 * ata_host_init - Initialize an ata_port structure
3174 * @ap: Structure to initialize
3175 * @host: associated SCSI mid-layer structure
3176 * @host_set: Collection of hosts to which @ap belongs
3177 * @ent: Probe information provided by low-level driver
3178 * @port_no: Port number associated with this ata_port
3184 static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
3185 struct ata_host_set *host_set,
3186 struct ata_probe_ent *ent, unsigned int port_no)
3192 host->max_channel = 1;
3193 host->unique_id = ata_unique_id++;
3194 host->max_cmd_len = 12;
3195 scsi_set_device(host, &ent->pdev->dev);
3196 scsi_assign_lock(host, &host_set->lock);
3198 ap->flags = ATA_FLAG_PORT_DISABLED;
3199 ap->id = host->unique_id;
3201 ap->ctl = ATA_DEVCTL_OBS;
3202 ap->host_set = host_set;
3203 ap->port_no = port_no;
3205 ent->legacy_mode ? ent->hard_port_no : port_no;
3206 ap->pio_mask = ent->pio_mask;
3207 ap->mwdma_mask = ent->mwdma_mask;
3208 ap->udma_mask = ent->udma_mask;
3209 ap->flags |= ent->host_flags;
3210 ap->ops = ent->port_ops;
3211 ap->cbl = ATA_CBL_NONE;
3212 ap->active_tag = ATA_TAG_POISON;
3213 ap->last_ctl = 0xFF;
3215 INIT_WORK(&ap->packet_task, atapi_packet_task, ap);
3216 INIT_WORK(&ap->pio_task, ata_pio_task, ap);
3218 for (i = 0; i < ATA_MAX_DEVICES; i++)
3219 ap->device[i].devno = i;
3222 ap->stats.unhandled_irq = 1;
3223 ap->stats.idle_irq = 1;
3226 memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
3230 * ata_host_add - Attach low-level ATA driver to system
3231 * @ent: Information provided by low-level driver
3232 * @host_set: Collections of ports to which we add
3233 * @port_no: Port number associated with this host
3241 static struct ata_port * ata_host_add(struct ata_probe_ent *ent,
3242 struct ata_host_set *host_set,
3243 unsigned int port_no)
3245 struct Scsi_Host *host;
3246 struct ata_port *ap;
3250 host = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
3254 ap = (struct ata_port *) &host->hostdata[0];
3256 ata_host_init(ap, host, host_set, ent, port_no);
3258 rc = ap->ops->port_start(ap);
3265 scsi_host_put(host);
3279 int ata_device_add(struct ata_probe_ent *ent)
3281 unsigned int count = 0, i;
3282 struct pci_dev *pdev = ent->pdev;
3283 struct ata_host_set *host_set;
3286 /* alloc a container for our list of ATA ports (buses) */
3287 host_set = kmalloc(sizeof(struct ata_host_set) +
3288 (ent->n_ports * sizeof(void *)), GFP_KERNEL);
3291 memset(host_set, 0, sizeof(struct ata_host_set) + (ent->n_ports * sizeof(void *)));
3292 spin_lock_init(&host_set->lock);
3294 host_set->pdev = pdev;
3295 host_set->n_ports = ent->n_ports;
3296 host_set->irq = ent->irq;
3297 host_set->mmio_base = ent->mmio_base;
3298 host_set->private_data = ent->private_data;
3299 host_set->ops = ent->port_ops;
3301 /* register each port bound to this device */
3302 for (i = 0; i < ent->n_ports; i++) {
3303 struct ata_port *ap;
3304 unsigned long xfer_mode_mask;
3306 ap = ata_host_add(ent, host_set, i);
3310 host_set->ports[i] = ap;
3311 xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
3312 (ap->mwdma_mask << ATA_SHIFT_MWDMA) |
3313 (ap->pio_mask << ATA_SHIFT_PIO);
3315 /* print per-port info to dmesg */
3316 printk(KERN_INFO "ata%u: %cATA max %s cmd 0x%lX ctl 0x%lX "
3317 "bmdma 0x%lX irq %lu\n",
3319 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
3320 ata_mode_string(xfer_mode_mask),
3321 ap->ioaddr.cmd_addr,
3322 ap->ioaddr.ctl_addr,
3323 ap->ioaddr.bmdma_addr,
3327 host_set->ops->irq_clear(ap);
3336 /* obtain irq, that is shared between channels */
3337 if (request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags,
3338 DRV_NAME, host_set))
3341 /* perform each probe synchronously */
3342 DPRINTK("probe begin\n");
3343 for (i = 0; i < count; i++) {
3344 struct ata_port *ap;
3347 ap = host_set->ports[i];
3349 DPRINTK("ata%u: probe begin\n", ap->id);
3350 rc = ata_bus_probe(ap);
3351 DPRINTK("ata%u: probe end\n", ap->id);
3354 /* FIXME: do something useful here?
3355 * Current libata behavior will
3356 * tear down everything when
3357 * the module is removed
3358 * or the h/w is unplugged.
3362 rc = scsi_add_host(ap->host, &pdev->dev);
3364 printk(KERN_ERR "ata%u: scsi_add_host failed\n",
3366 /* FIXME: do something useful here */
3367 /* FIXME: handle unconditional calls to
3368 * scsi_scan_host and ata_host_remove, below,
3374 /* probes are done, now scan each port's disk(s) */
3375 DPRINTK("probe begin\n");
3376 for (i = 0; i < count; i++) {
3377 struct ata_port *ap = host_set->ports[i];
3379 scsi_scan_host(ap->host);
3382 pci_set_drvdata(pdev, host_set);
3384 VPRINTK("EXIT, returning %u\n", ent->n_ports);
3385 return ent->n_ports; /* success */
3388 for (i = 0; i < count; i++) {
3389 ata_host_remove(host_set->ports[i], 1);
3390 scsi_host_put(host_set->ports[i]->host);
3393 VPRINTK("EXIT, returning 0\n");
3398 * ata_scsi_release - SCSI layer callback hook for host unload
3399 * @host: libata host to be unloaded
3401 * Performs all duties necessary to shut down a libata port...
3402 * Kill port kthread, disable port, and release resources.
3405 * Inherited from SCSI layer.
3411 int ata_scsi_release(struct Scsi_Host *host)
3413 struct ata_port *ap = (struct ata_port *) &host->hostdata[0];
3417 ap->ops->port_disable(ap);
3418 ata_host_remove(ap, 0);
3425 * ata_std_ports - initialize ioaddr with standard port offsets.
3426 * @ioaddr: IO address structure to be initialized
3428 void ata_std_ports(struct ata_ioports *ioaddr)
3430 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
3431 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
3432 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
3433 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
3434 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
3435 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
3436 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
3437 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
3438 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
3439 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
3442 static struct ata_probe_ent *
3443 ata_probe_ent_alloc(int n, struct pci_dev *pdev, struct ata_port_info **port)
3445 struct ata_probe_ent *probe_ent;
3448 probe_ent = kmalloc(sizeof(*probe_ent) * n, GFP_KERNEL);
3450 printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
3455 memset(probe_ent, 0, sizeof(*probe_ent) * n);
3457 for (i = 0; i < n; i++) {
3458 INIT_LIST_HEAD(&probe_ent[i].node);
3459 probe_ent[i].pdev = pdev;
3461 probe_ent[i].sht = port[i]->sht;
3462 probe_ent[i].host_flags = port[i]->host_flags;
3463 probe_ent[i].pio_mask = port[i]->pio_mask;
3464 probe_ent[i].mwdma_mask = port[i]->mwdma_mask;
3465 probe_ent[i].udma_mask = port[i]->udma_mask;
3466 probe_ent[i].port_ops = port[i]->port_ops;
3473 struct ata_probe_ent *
3474 ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port)
3476 struct ata_probe_ent *probe_ent = ata_probe_ent_alloc(1, pdev, port);
3480 probe_ent->n_ports = 2;
3481 probe_ent->irq = pdev->irq;
3482 probe_ent->irq_flags = SA_SHIRQ;
3484 probe_ent->port[0].cmd_addr = pci_resource_start(pdev, 0);
3485 probe_ent->port[0].altstatus_addr =
3486 probe_ent->port[0].ctl_addr =
3487 pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS;
3488 probe_ent->port[0].bmdma_addr = pci_resource_start(pdev, 4);
3490 probe_ent->port[1].cmd_addr = pci_resource_start(pdev, 2);
3491 probe_ent->port[1].altstatus_addr =
3492 probe_ent->port[1].ctl_addr =
3493 pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS;
3494 probe_ent->port[1].bmdma_addr = pci_resource_start(pdev, 4) + 8;
3496 ata_std_ports(&probe_ent->port[0]);
3497 ata_std_ports(&probe_ent->port[1]);
3502 struct ata_probe_ent *
3503 ata_pci_init_legacy_mode(struct pci_dev *pdev, struct ata_port_info **port)
3505 struct ata_probe_ent *probe_ent = ata_probe_ent_alloc(2, pdev, port);
3509 probe_ent[0].n_ports = 1;
3510 probe_ent[0].irq = 14;
3512 probe_ent[0].hard_port_no = 0;
3513 probe_ent[0].legacy_mode = 1;
3515 probe_ent[1].n_ports = 1;
3516 probe_ent[1].irq = 15;
3518 probe_ent[1].hard_port_no = 1;
3519 probe_ent[1].legacy_mode = 1;
3521 probe_ent[0].port[0].cmd_addr = 0x1f0;
3522 probe_ent[0].port[0].altstatus_addr =
3523 probe_ent[0].port[0].ctl_addr = 0x3f6;
3524 probe_ent[0].port[0].bmdma_addr = pci_resource_start(pdev, 4);
3526 probe_ent[1].port[0].cmd_addr = 0x170;
3527 probe_ent[1].port[0].altstatus_addr =
3528 probe_ent[1].port[0].ctl_addr = 0x376;
3529 probe_ent[1].port[0].bmdma_addr = pci_resource_start(pdev, 4)+8;
3531 ata_std_ports(&probe_ent[0].port[0]);
3532 ata_std_ports(&probe_ent[1].port[0]);
3538 * ata_pci_init_one - Initialize/register PCI IDE host controller
3539 * @pdev: Controller to be initialized
3540 * @port_info: Information from low-level host driver
3541 * @n_ports: Number of ports attached to host controller
3544 * Inherited from PCI layer (may sleep).
3550 int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
3551 unsigned int n_ports)
3553 struct ata_probe_ent *probe_ent, *probe_ent2 = NULL;
3554 struct ata_port_info *port[2];
3556 unsigned int legacy_mode = 0;
3561 port[0] = port_info[0];
3563 port[1] = port_info[1];
3567 if ((port[0]->host_flags & ATA_FLAG_NO_LEGACY) == 0) {
3568 /* TODO: support transitioning to native mode? */
3569 pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
3570 mask = (1 << 2) | (1 << 0);
3571 if ((tmp8 & mask) != mask)
3572 legacy_mode = (1 << 3);
3576 if ((!legacy_mode) && (n_ports > 1)) {
3577 printk(KERN_ERR "ata: BUG: native mode, n_ports > 1\n");
3581 rc = pci_enable_device(pdev);
3585 rc = pci_request_regions(pdev, DRV_NAME);
3590 if (!request_region(0x1f0, 8, "libata")) {
3591 struct resource *conflict, res;
3593 res.end = 0x1f0 + 8 - 1;
3594 conflict = ____request_resource(&ioport_resource, &res);
3595 if (!strcmp(conflict->name, "libata"))
3596 legacy_mode |= (1 << 0);
3598 printk(KERN_WARNING "ata: 0x1f0 IDE port busy\n");
3600 legacy_mode |= (1 << 0);
3602 if (!request_region(0x170, 8, "libata")) {
3603 struct resource *conflict, res;
3605 res.end = 0x170 + 8 - 1;
3606 conflict = ____request_resource(&ioport_resource, &res);
3607 if (!strcmp(conflict->name, "libata"))
3608 legacy_mode |= (1 << 1);
3610 printk(KERN_WARNING "ata: 0x170 IDE port busy\n");
3612 legacy_mode |= (1 << 1);
3615 /* we have legacy mode, but all ports are unavailable */
3616 if (legacy_mode == (1 << 3)) {
3618 goto err_out_regions;
3621 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
3623 goto err_out_regions;
3624 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
3626 goto err_out_regions;
3629 probe_ent = ata_pci_init_legacy_mode(pdev, port);
3631 probe_ent2 = &probe_ent[1];
3633 probe_ent = ata_pci_init_native_mode(pdev, port);
3636 goto err_out_regions;
3639 pci_set_master(pdev);
3641 /* FIXME: check ata_device_add return */
3643 if (legacy_mode & (1 << 0))
3644 ata_device_add(probe_ent);
3645 if (legacy_mode & (1 << 1))
3646 ata_device_add(probe_ent2);
3648 ata_device_add(probe_ent);
3655 if (legacy_mode & (1 << 0))
3656 release_region(0x1f0, 8);
3657 if (legacy_mode & (1 << 1))
3658 release_region(0x170, 8);
3659 pci_release_regions(pdev);
3661 pci_disable_device(pdev);
3666 * ata_pci_remove_one - PCI layer callback for device removal
3667 * @pdev: PCI device that was removed
3669 * PCI layer indicates to libata via this hook that
3670 * hot-unplug or module unload event has occured.
3671 * Handle this by unregistering all objects associated
3672 * with this PCI device. Free those objects. Then finally
3673 * release PCI resources and disable device.
3676 * Inherited from PCI layer (may sleep).
3679 void ata_pci_remove_one (struct pci_dev *pdev)
3681 struct ata_host_set *host_set = pci_get_drvdata(pdev);
3682 struct ata_port *ap;
3685 for (i = 0; i < host_set->n_ports; i++) {
3686 ap = host_set->ports[i];
3688 scsi_remove_host(ap->host);
3691 free_irq(host_set->irq, host_set);
3692 if (host_set->ops->host_stop)
3693 host_set->ops->host_stop(host_set);
3694 if (host_set->mmio_base)
3695 iounmap(host_set->mmio_base);
3697 for (i = 0; i < host_set->n_ports; i++) {
3698 ap = host_set->ports[i];
3700 ata_scsi_release(ap->host);
3701 scsi_host_put(ap->host);
3704 pci_release_regions(pdev);
3706 for (i = 0; i < host_set->n_ports; i++) {
3707 struct ata_ioports *ioaddr;
3709 ap = host_set->ports[i];
3710 ioaddr = &ap->ioaddr;
3712 if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) {
3713 if (ioaddr->cmd_addr == 0x1f0)
3714 release_region(0x1f0, 8);
3715 else if (ioaddr->cmd_addr == 0x170)
3716 release_region(0x170, 8);
3721 pci_disable_device(pdev);
3722 pci_set_drvdata(pdev, NULL);
3725 /* move to PCI subsystem */
3726 int pci_test_config_bits(struct pci_dev *pdev, struct pci_bits *bits)
3728 unsigned long tmp = 0;
3730 switch (bits->width) {
3733 pci_read_config_byte(pdev, bits->reg, &tmp8);
3739 pci_read_config_word(pdev, bits->reg, &tmp16);
3745 pci_read_config_dword(pdev, bits->reg, &tmp32);
3756 return (tmp == bits->val) ? 1 : 0;
3769 static int __init ata_init(void)
3771 ata_wq = create_workqueue("ata");
3775 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
3779 static void __exit ata_exit(void)
3781 destroy_workqueue(ata_wq);
3784 module_init(ata_init);
3785 module_exit(ata_exit);
3788 * libata is essentially a library of internal helper functions for
3789 * low-level ATA host controller drivers. As such, the API/ABI is
3790 * likely to change as new drivers are added and updated.
3791 * Do not depend on ABI/API stability.
3794 EXPORT_SYMBOL_GPL(pci_test_config_bits);
3795 EXPORT_SYMBOL_GPL(ata_std_bios_param);
3796 EXPORT_SYMBOL_GPL(ata_std_ports);
3797 EXPORT_SYMBOL_GPL(ata_device_add);
3798 EXPORT_SYMBOL_GPL(ata_sg_init);
3799 EXPORT_SYMBOL_GPL(ata_sg_init_one);
3800 EXPORT_SYMBOL_GPL(ata_qc_complete);
3801 EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
3802 EXPORT_SYMBOL_GPL(ata_eng_timeout);
3803 EXPORT_SYMBOL_GPL(ata_tf_load);
3804 EXPORT_SYMBOL_GPL(ata_tf_read);
3805 EXPORT_SYMBOL_GPL(ata_noop_dev_select);
3806 EXPORT_SYMBOL_GPL(ata_std_dev_select);
3807 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
3808 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
3809 EXPORT_SYMBOL_GPL(ata_pci_init_legacy_mode);
3810 EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
3811 EXPORT_SYMBOL_GPL(ata_check_status);
3812 EXPORT_SYMBOL_GPL(ata_exec_command);
3813 EXPORT_SYMBOL_GPL(ata_port_start);
3814 EXPORT_SYMBOL_GPL(ata_port_stop);
3815 EXPORT_SYMBOL_GPL(ata_interrupt);
3816 EXPORT_SYMBOL_GPL(ata_qc_prep);
3817 EXPORT_SYMBOL_GPL(ata_bmdma_setup);
3818 EXPORT_SYMBOL_GPL(ata_bmdma_start);
3819 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
3820 EXPORT_SYMBOL_GPL(ata_port_probe);
3821 EXPORT_SYMBOL_GPL(sata_phy_reset);
3822 EXPORT_SYMBOL_GPL(__sata_phy_reset);
3823 EXPORT_SYMBOL_GPL(ata_bus_reset);
3824 EXPORT_SYMBOL_GPL(ata_port_disable);
3825 EXPORT_SYMBOL_GPL(ata_pci_init_one);
3826 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
3827 EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
3828 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
3829 EXPORT_SYMBOL_GPL(ata_scsi_error);
3830 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
3831 EXPORT_SYMBOL_GPL(ata_scsi_release);
3832 EXPORT_SYMBOL_GPL(ata_host_intr);
3833 EXPORT_SYMBOL_GPL(ata_dev_classify);
3834 EXPORT_SYMBOL_GPL(ata_dev_id_string);
3835 EXPORT_SYMBOL_GPL(ata_scsi_simulate);