2 libata-core.c - helper library for ATA
4 Copyright 2003-2004 Red Hat, Inc. All rights reserved.
5 Copyright 2003-2004 Jeff Garzik
7 The contents of this file are subject to the Open
8 Software License version 1.1 that can be found at
9 http://www.opensource.org/licenses/osl-1.1.txt and is included herein
12 Alternatively, the contents of this file may be used under the terms
13 of the GNU General Public License version 2 (the "GPL") as distributed
14 in the kernel source COPYING file, in which case the provisions of
15 the GPL are applicable instead of the above. If you wish to allow
16 the use of your version of this file only under the terms of the
17 GPL and not to allow others to use your version of this file under
18 the OSL, indicate your decision by deleting the provisions above and
19 replace them with the notice and other provisions required by the GPL.
20 If you do not delete the provisions above, a recipient may use your
21 version of this file under either the OSL or the GPL.
25 #include <linux/config.h>
26 #include <linux/kernel.h>
27 #include <linux/module.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/list.h>
31 #include <linux/highmem.h>
32 #include <linux/spinlock.h>
33 #include <linux/blkdev.h>
34 #include <linux/delay.h>
35 #include <linux/timer.h>
36 #include <linux/interrupt.h>
37 #include <linux/completion.h>
38 #include <linux/suspend.h>
39 #include <linux/workqueue.h>
40 #include <scsi/scsi.h>
42 #include "scsi_priv.h"
43 #include <scsi/scsi_host.h>
44 #include <linux/libata.h>
46 #include <asm/semaphore.h>
47 #include <asm/byteorder.h>
51 static unsigned int ata_busy_sleep (struct ata_port *ap,
52 unsigned long tmout_pat,
54 static void ata_set_mode(struct ata_port *ap);
55 static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev);
56 static unsigned int ata_get_mode_mask(struct ata_port *ap, int shift);
57 static int fgb(u32 bitmap);
58 static int ata_choose_xfer_mode(struct ata_port *ap,
60 unsigned int *xfer_shift_out);
61 static int ata_qc_complete_noop(struct ata_queued_cmd *qc, u8 drv_stat);
62 static void __ata_qc_complete(struct ata_queued_cmd *qc);
64 static unsigned int ata_unique_id = 1;
65 static struct workqueue_struct *ata_wq;
67 MODULE_AUTHOR("Jeff Garzik");
68 MODULE_DESCRIPTION("Library module for ATA devices");
69 MODULE_LICENSE("GPL");
70 MODULE_VERSION(DRV_VERSION);
73 * ata_tf_load - send taskfile registers to host controller
74 * @ap: Port to which output is sent
75 * @tf: ATA taskfile register set
77 * Outputs ATA taskfile to standard ATA host controller.
80 * Inherited from caller.
83 static void ata_tf_load_pio(struct ata_port *ap, struct ata_taskfile *tf)
85 struct ata_ioports *ioaddr = &ap->ioaddr;
86 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
88 if (tf->ctl != ap->last_ctl) {
89 outb(tf->ctl, ioaddr->ctl_addr);
90 ap->last_ctl = tf->ctl;
94 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
95 outb(tf->hob_feature, ioaddr->feature_addr);
96 outb(tf->hob_nsect, ioaddr->nsect_addr);
97 outb(tf->hob_lbal, ioaddr->lbal_addr);
98 outb(tf->hob_lbam, ioaddr->lbam_addr);
99 outb(tf->hob_lbah, ioaddr->lbah_addr);
100 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
109 outb(tf->feature, ioaddr->feature_addr);
110 outb(tf->nsect, ioaddr->nsect_addr);
111 outb(tf->lbal, ioaddr->lbal_addr);
112 outb(tf->lbam, ioaddr->lbam_addr);
113 outb(tf->lbah, ioaddr->lbah_addr);
114 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
122 if (tf->flags & ATA_TFLAG_DEVICE) {
123 outb(tf->device, ioaddr->device_addr);
124 VPRINTK("device 0x%X\n", tf->device);
131 * ata_tf_load_mmio - send taskfile registers to host controller
132 * @ap: Port to which output is sent
133 * @tf: ATA taskfile register set
135 * Outputs ATA taskfile to standard ATA host controller using MMIO.
138 * Inherited from caller.
141 static void ata_tf_load_mmio(struct ata_port *ap, struct ata_taskfile *tf)
143 struct ata_ioports *ioaddr = &ap->ioaddr;
144 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
146 if (tf->ctl != ap->last_ctl) {
147 writeb(tf->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
148 ap->last_ctl = tf->ctl;
152 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
153 writeb(tf->hob_feature, (void __iomem *) ioaddr->feature_addr);
154 writeb(tf->hob_nsect, (void __iomem *) ioaddr->nsect_addr);
155 writeb(tf->hob_lbal, (void __iomem *) ioaddr->lbal_addr);
156 writeb(tf->hob_lbam, (void __iomem *) ioaddr->lbam_addr);
157 writeb(tf->hob_lbah, (void __iomem *) ioaddr->lbah_addr);
158 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
167 writeb(tf->feature, (void __iomem *) ioaddr->feature_addr);
168 writeb(tf->nsect, (void __iomem *) ioaddr->nsect_addr);
169 writeb(tf->lbal, (void __iomem *) ioaddr->lbal_addr);
170 writeb(tf->lbam, (void __iomem *) ioaddr->lbam_addr);
171 writeb(tf->lbah, (void __iomem *) ioaddr->lbah_addr);
172 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
180 if (tf->flags & ATA_TFLAG_DEVICE) {
181 writeb(tf->device, (void __iomem *) ioaddr->device_addr);
182 VPRINTK("device 0x%X\n", tf->device);
188 void ata_tf_load(struct ata_port *ap, struct ata_taskfile *tf)
190 if (ap->flags & ATA_FLAG_MMIO)
191 ata_tf_load_mmio(ap, tf);
193 ata_tf_load_pio(ap, tf);
197 * ata_exec_command - issue ATA command to host controller
198 * @ap: port to which command is being issued
199 * @tf: ATA taskfile register set
201 * Issues PIO/MMIO write to ATA command register, with proper
202 * synchronization with interrupt handler / other threads.
205 * spin_lock_irqsave(host_set lock)
208 static void ata_exec_command_pio(struct ata_port *ap, struct ata_taskfile *tf)
210 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
212 outb(tf->command, ap->ioaddr.command_addr);
218 * ata_exec_command_mmio - issue ATA command to host controller
219 * @ap: port to which command is being issued
220 * @tf: ATA taskfile register set
222 * Issues MMIO write to ATA command register, with proper
223 * synchronization with interrupt handler / other threads.
226 * spin_lock_irqsave(host_set lock)
229 static void ata_exec_command_mmio(struct ata_port *ap, struct ata_taskfile *tf)
231 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
233 writeb(tf->command, (void __iomem *) ap->ioaddr.command_addr);
237 void ata_exec_command(struct ata_port *ap, struct ata_taskfile *tf)
239 if (ap->flags & ATA_FLAG_MMIO)
240 ata_exec_command_mmio(ap, tf);
242 ata_exec_command_pio(ap, tf);
246 * ata_exec - issue ATA command to host controller
247 * @ap: port to which command is being issued
248 * @tf: ATA taskfile register set
250 * Issues PIO/MMIO write to ATA command register, with proper
251 * synchronization with interrupt handler / other threads.
254 * Obtains host_set lock.
257 static inline void ata_exec(struct ata_port *ap, struct ata_taskfile *tf)
261 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
262 spin_lock_irqsave(&ap->host_set->lock, flags);
263 ap->ops->exec_command(ap, tf);
264 spin_unlock_irqrestore(&ap->host_set->lock, flags);
268 * ata_tf_to_host - issue ATA taskfile to host controller
269 * @ap: port to which command is being issued
270 * @tf: ATA taskfile register set
272 * Issues ATA taskfile register set to ATA host controller,
273 * with proper synchronization with interrupt handler and
277 * Obtains host_set lock.
280 static void ata_tf_to_host(struct ata_port *ap, struct ata_taskfile *tf)
282 ap->ops->tf_load(ap, tf);
288 * ata_tf_to_host_nolock - issue ATA taskfile to host controller
289 * @ap: port to which command is being issued
290 * @tf: ATA taskfile register set
292 * Issues ATA taskfile register set to ATA host controller,
293 * with proper synchronization with interrupt handler and
297 * spin_lock_irqsave(host_set lock)
300 void ata_tf_to_host_nolock(struct ata_port *ap, struct ata_taskfile *tf)
302 ap->ops->tf_load(ap, tf);
303 ap->ops->exec_command(ap, tf);
307 * ata_tf_read - input device's ATA taskfile shadow registers
308 * @ap: Port from which input is read
309 * @tf: ATA taskfile register set for storing input
311 * Reads ATA taskfile registers for currently-selected device
315 * Inherited from caller.
318 static void ata_tf_read_pio(struct ata_port *ap, struct ata_taskfile *tf)
320 struct ata_ioports *ioaddr = &ap->ioaddr;
322 tf->nsect = inb(ioaddr->nsect_addr);
323 tf->lbal = inb(ioaddr->lbal_addr);
324 tf->lbam = inb(ioaddr->lbam_addr);
325 tf->lbah = inb(ioaddr->lbah_addr);
326 tf->device = inb(ioaddr->device_addr);
328 if (tf->flags & ATA_TFLAG_LBA48) {
329 outb(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
330 tf->hob_feature = inb(ioaddr->error_addr);
331 tf->hob_nsect = inb(ioaddr->nsect_addr);
332 tf->hob_lbal = inb(ioaddr->lbal_addr);
333 tf->hob_lbam = inb(ioaddr->lbam_addr);
334 tf->hob_lbah = inb(ioaddr->lbah_addr);
339 * ata_tf_read_mmio - input device's ATA taskfile shadow registers
340 * @ap: Port from which input is read
341 * @tf: ATA taskfile register set for storing input
343 * Reads ATA taskfile registers for currently-selected device
347 * Inherited from caller.
350 static void ata_tf_read_mmio(struct ata_port *ap, struct ata_taskfile *tf)
352 struct ata_ioports *ioaddr = &ap->ioaddr;
354 tf->nsect = readb((void __iomem *)ioaddr->nsect_addr);
355 tf->lbal = readb((void __iomem *)ioaddr->lbal_addr);
356 tf->lbam = readb((void __iomem *)ioaddr->lbam_addr);
357 tf->lbah = readb((void __iomem *)ioaddr->lbah_addr);
358 tf->device = readb((void __iomem *)ioaddr->device_addr);
360 if (tf->flags & ATA_TFLAG_LBA48) {
361 writeb(tf->ctl | ATA_HOB, (void __iomem *) ap->ioaddr.ctl_addr);
362 tf->hob_feature = readb((void __iomem *)ioaddr->error_addr);
363 tf->hob_nsect = readb((void __iomem *)ioaddr->nsect_addr);
364 tf->hob_lbal = readb((void __iomem *)ioaddr->lbal_addr);
365 tf->hob_lbam = readb((void __iomem *)ioaddr->lbam_addr);
366 tf->hob_lbah = readb((void __iomem *)ioaddr->lbah_addr);
370 void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
372 if (ap->flags & ATA_FLAG_MMIO)
373 ata_tf_read_mmio(ap, tf);
375 ata_tf_read_pio(ap, tf);
379 * ata_check_status - Read device status reg & clear interrupt
380 * @ap: port where the device is
382 * Reads ATA taskfile status register for currently-selected device
383 * and return it's value. This also clears pending interrupts
387 * Inherited from caller.
389 static u8 ata_check_status_pio(struct ata_port *ap)
391 return inb(ap->ioaddr.status_addr);
395 * ata_check_status_mmio - Read device status reg & clear interrupt
396 * @ap: port where the device is
398 * Reads ATA taskfile status register for currently-selected device
399 * via MMIO and return it's value. This also clears pending interrupts
403 * Inherited from caller.
405 static u8 ata_check_status_mmio(struct ata_port *ap)
407 return readb((void __iomem *) ap->ioaddr.status_addr);
410 u8 ata_check_status(struct ata_port *ap)
412 if (ap->flags & ATA_FLAG_MMIO)
413 return ata_check_status_mmio(ap);
414 return ata_check_status_pio(ap);
418 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
419 * @tf: Taskfile to convert
420 * @fis: Buffer into which data will output
421 * @pmp: Port multiplier port
423 * Converts a standard ATA taskfile to a Serial ATA
424 * FIS structure (Register - Host to Device).
427 * Inherited from caller.
430 void ata_tf_to_fis(struct ata_taskfile *tf, u8 *fis, u8 pmp)
432 fis[0] = 0x27; /* Register - Host to Device FIS */
433 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
434 bit 7 indicates Command FIS */
435 fis[2] = tf->command;
436 fis[3] = tf->feature;
443 fis[8] = tf->hob_lbal;
444 fis[9] = tf->hob_lbam;
445 fis[10] = tf->hob_lbah;
446 fis[11] = tf->hob_feature;
449 fis[13] = tf->hob_nsect;
460 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
461 * @fis: Buffer from which data will be input
462 * @tf: Taskfile to output
464 * Converts a standard ATA taskfile to a Serial ATA
465 * FIS structure (Register - Host to Device).
468 * Inherited from caller.
471 void ata_tf_from_fis(u8 *fis, struct ata_taskfile *tf)
473 tf->command = fis[2]; /* status */
474 tf->feature = fis[3]; /* error */
481 tf->hob_lbal = fis[8];
482 tf->hob_lbam = fis[9];
483 tf->hob_lbah = fis[10];
486 tf->hob_nsect = fis[13];
490 * ata_prot_to_cmd - determine which read/write opcodes to use
491 * @protocol: ATA_PROT_xxx taskfile protocol
492 * @lba48: true is lba48 is present
494 * Given necessary input, determine which read/write commands
495 * to use to transfer data.
500 static int ata_prot_to_cmd(int protocol, int lba48)
502 int rcmd = 0, wcmd = 0;
507 rcmd = ATA_CMD_PIO_READ_EXT;
508 wcmd = ATA_CMD_PIO_WRITE_EXT;
510 rcmd = ATA_CMD_PIO_READ;
511 wcmd = ATA_CMD_PIO_WRITE;
517 rcmd = ATA_CMD_READ_EXT;
518 wcmd = ATA_CMD_WRITE_EXT;
521 wcmd = ATA_CMD_WRITE;
529 return rcmd | (wcmd << 8);
533 * ata_dev_set_protocol - set taskfile protocol and r/w commands
534 * @dev: device to examine and configure
536 * Examine the device configuration, after we have
537 * read the identify-device page and configured the
538 * data transfer mode. Set internal state related to
539 * the ATA taskfile protocol (pio, pio mult, dma, etc.)
540 * and calculate the proper read/write commands to use.
545 static void ata_dev_set_protocol(struct ata_device *dev)
547 int pio = (dev->flags & ATA_DFLAG_PIO);
548 int lba48 = (dev->flags & ATA_DFLAG_LBA48);
552 proto = dev->xfer_protocol = ATA_PROT_PIO;
554 proto = dev->xfer_protocol = ATA_PROT_DMA;
556 cmd = ata_prot_to_cmd(proto, lba48);
560 dev->read_cmd = cmd & 0xff;
561 dev->write_cmd = (cmd >> 8) & 0xff;
564 static const char * xfer_mode_str[] = {
584 * ata_udma_string - convert UDMA bit offset to string
585 * @mask: mask of bits supported; only highest bit counts.
587 * Determine string which represents the highest speed
588 * (highest bit in @udma_mask).
594 * Constant C string representing highest speed listed in
595 * @udma_mask, or the constant C string "<n/a>".
598 static const char *ata_mode_string(unsigned int mask)
602 for (i = 7; i >= 0; i--)
605 for (i = ATA_SHIFT_MWDMA + 2; i >= ATA_SHIFT_MWDMA; i--)
608 for (i = ATA_SHIFT_PIO + 4; i >= ATA_SHIFT_PIO; i--)
615 return xfer_mode_str[i];
619 * ata_pio_devchk - PATA device presence detection
620 * @ap: ATA channel to examine
621 * @device: Device to examine (starting at zero)
623 * This technique was originally described in
624 * Hale Landis's ATADRVR (www.ata-atapi.com), and
625 * later found its way into the ATA/ATAPI spec.
627 * Write a pattern to the ATA shadow registers,
628 * and if a device is present, it will respond by
629 * correctly storing and echoing back the
630 * ATA shadow register contents.
636 static unsigned int ata_pio_devchk(struct ata_port *ap,
639 struct ata_ioports *ioaddr = &ap->ioaddr;
642 ap->ops->dev_select(ap, device);
644 outb(0x55, ioaddr->nsect_addr);
645 outb(0xaa, ioaddr->lbal_addr);
647 outb(0xaa, ioaddr->nsect_addr);
648 outb(0x55, ioaddr->lbal_addr);
650 outb(0x55, ioaddr->nsect_addr);
651 outb(0xaa, ioaddr->lbal_addr);
653 nsect = inb(ioaddr->nsect_addr);
654 lbal = inb(ioaddr->lbal_addr);
656 if ((nsect == 0x55) && (lbal == 0xaa))
657 return 1; /* we found a device */
659 return 0; /* nothing found */
663 * ata_mmio_devchk - PATA device presence detection
664 * @ap: ATA channel to examine
665 * @device: Device to examine (starting at zero)
667 * This technique was originally described in
668 * Hale Landis's ATADRVR (www.ata-atapi.com), and
669 * later found its way into the ATA/ATAPI spec.
671 * Write a pattern to the ATA shadow registers,
672 * and if a device is present, it will respond by
673 * correctly storing and echoing back the
674 * ATA shadow register contents.
680 static unsigned int ata_mmio_devchk(struct ata_port *ap,
683 struct ata_ioports *ioaddr = &ap->ioaddr;
686 ap->ops->dev_select(ap, device);
688 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
689 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
691 writeb(0xaa, (void __iomem *) ioaddr->nsect_addr);
692 writeb(0x55, (void __iomem *) ioaddr->lbal_addr);
694 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
695 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
697 nsect = readb((void __iomem *) ioaddr->nsect_addr);
698 lbal = readb((void __iomem *) ioaddr->lbal_addr);
700 if ((nsect == 0x55) && (lbal == 0xaa))
701 return 1; /* we found a device */
703 return 0; /* nothing found */
707 * ata_devchk - PATA device presence detection
708 * @ap: ATA channel to examine
709 * @device: Device to examine (starting at zero)
711 * Dispatch ATA device presence detection, depending
712 * on whether we are using PIO or MMIO to talk to the
713 * ATA shadow registers.
719 static unsigned int ata_devchk(struct ata_port *ap,
722 if (ap->flags & ATA_FLAG_MMIO)
723 return ata_mmio_devchk(ap, device);
724 return ata_pio_devchk(ap, device);
728 * ata_dev_classify - determine device type based on ATA-spec signature
729 * @tf: ATA taskfile register set for device to be identified
731 * Determine from taskfile register contents whether a device is
732 * ATA or ATAPI, as per "Signature and persistence" section
733 * of ATA/PI spec (volume 1, sect 5.14).
739 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
740 * the event of failure.
743 unsigned int ata_dev_classify(struct ata_taskfile *tf)
745 /* Apple's open source Darwin code hints that some devices only
746 * put a proper signature into the LBA mid/high registers,
747 * So, we only check those. It's sufficient for uniqueness.
750 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
751 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
752 DPRINTK("found ATA device by sig\n");
756 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
757 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
758 DPRINTK("found ATAPI device by sig\n");
759 return ATA_DEV_ATAPI;
762 DPRINTK("unknown device\n");
763 return ATA_DEV_UNKNOWN;
767 * ata_dev_try_classify - Parse returned ATA device signature
768 * @ap: ATA channel to examine
769 * @device: Device to examine (starting at zero)
771 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
772 * an ATA/ATAPI-defined set of values is placed in the ATA
773 * shadow registers, indicating the results of device detection
776 * Select the ATA device, and read the values from the ATA shadow
777 * registers. Then parse according to the Error register value,
778 * and the spec-defined values examined by ata_dev_classify().
784 static u8 ata_dev_try_classify(struct ata_port *ap, unsigned int device)
786 struct ata_device *dev = &ap->device[device];
787 struct ata_taskfile tf;
791 ap->ops->dev_select(ap, device);
793 memset(&tf, 0, sizeof(tf));
795 err = ata_chk_err(ap);
796 ap->ops->tf_read(ap, &tf);
798 dev->class = ATA_DEV_NONE;
800 /* see if device passed diags */
803 else if ((device == 0) && (err == 0x81))
808 /* determine if device if ATA or ATAPI */
809 class = ata_dev_classify(&tf);
810 if (class == ATA_DEV_UNKNOWN)
812 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
821 * ata_dev_id_string - Convert IDENTIFY DEVICE page into string
822 * @id: IDENTIFY DEVICE results we will examine
823 * @s: string into which data is output
824 * @ofs: offset into identify device page
825 * @len: length of string to return. must be an even number.
827 * The strings in the IDENTIFY DEVICE page are broken up into
828 * 16-bit chunks. Run through the string, and output each
829 * 8-bit chunk linearly, regardless of platform.
835 void ata_dev_id_string(u16 *id, unsigned char *s,
836 unsigned int ofs, unsigned int len)
854 void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
859 * ata_std_dev_select - Select device 0/1 on ATA bus
860 * @ap: ATA channel to manipulate
861 * @device: ATA device (numbered from zero) to select
863 * Use the method defined in the ATA specification to
864 * make either device 0, or device 1, active on the
871 void ata_std_dev_select (struct ata_port *ap, unsigned int device)
876 tmp = ATA_DEVICE_OBS;
878 tmp = ATA_DEVICE_OBS | ATA_DEV1;
880 if (ap->flags & ATA_FLAG_MMIO) {
881 writeb(tmp, (void __iomem *) ap->ioaddr.device_addr);
883 outb(tmp, ap->ioaddr.device_addr);
885 ata_pause(ap); /* needed; also flushes, for mmio */
889 * ata_dev_select - Select device 0/1 on ATA bus
890 * @ap: ATA channel to manipulate
891 * @device: ATA device (numbered from zero) to select
892 * @wait: non-zero to wait for Status register BSY bit to clear
893 * @can_sleep: non-zero if context allows sleeping
895 * Use the method defined in the ATA specification to
896 * make either device 0, or device 1, active on the
899 * This is a high-level version of ata_std_dev_select(),
900 * which additionally provides the services of inserting
901 * the proper pauses and status polling, where needed.
907 void ata_dev_select(struct ata_port *ap, unsigned int device,
908 unsigned int wait, unsigned int can_sleep)
910 VPRINTK("ENTER, ata%u: device %u, wait %u\n",
911 ap->id, device, wait);
916 ap->ops->dev_select(ap, device);
919 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
926 * ata_dump_id - IDENTIFY DEVICE info debugging output
927 * @dev: Device whose IDENTIFY DEVICE page we will dump
929 * Dump selected 16-bit words from a detected device's
930 * IDENTIFY PAGE page.
936 static inline void ata_dump_id(struct ata_device *dev)
938 DPRINTK("49==0x%04x "
948 DPRINTK("80==0x%04x "
958 DPRINTK("88==0x%04x "
965 * ata_dev_identify - obtain IDENTIFY x DEVICE page
966 * @ap: port on which device we wish to probe resides
967 * @device: device bus address, starting at zero
969 * Following bus reset, we issue the IDENTIFY [PACKET] DEVICE
970 * command, and read back the 512-byte device information page.
971 * The device information page is fed to us via the standard
972 * PIO-IN protocol, but we hand-code it here. (TODO: investigate
973 * using standard PIO-IN paths)
975 * After reading the device information page, we use several
976 * bits of information from it to initialize data structures
977 * that will be used during the lifetime of the ata_device.
978 * Other data from the info page is used to disqualify certain
979 * older ATA devices we do not wish to support.
982 * Inherited from caller. Some functions called by this function
983 * obtain the host_set lock.
986 static void ata_dev_identify(struct ata_port *ap, unsigned int device)
988 struct ata_device *dev = &ap->device[device];
991 unsigned long xfer_modes;
993 unsigned int using_edd;
994 DECLARE_COMPLETION(wait);
995 struct ata_queued_cmd *qc;
999 if (!ata_dev_present(dev)) {
1000 DPRINTK("ENTER/EXIT (host %u, dev %u) -- nodev\n",
1005 if (ap->flags & (ATA_FLAG_SRST | ATA_FLAG_SATA_RESET))
1010 DPRINTK("ENTER, host %u, dev %u\n", ap->id, device);
1012 assert (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ATAPI ||
1013 dev->class == ATA_DEV_NONE);
1015 ata_dev_select(ap, device, 1, 1); /* select device 0/1 */
1017 qc = ata_qc_new_init(ap, dev);
1020 ata_sg_init_one(qc, dev->id, sizeof(dev->id));
1021 qc->dma_dir = DMA_FROM_DEVICE;
1022 qc->tf.protocol = ATA_PROT_PIO;
1026 if (dev->class == ATA_DEV_ATA) {
1027 qc->tf.command = ATA_CMD_ID_ATA;
1028 DPRINTK("do ATA identify\n");
1030 qc->tf.command = ATA_CMD_ID_ATAPI;
1031 DPRINTK("do ATAPI identify\n");
1034 qc->waiting = &wait;
1035 qc->complete_fn = ata_qc_complete_noop;
1037 spin_lock_irqsave(&ap->host_set->lock, flags);
1038 rc = ata_qc_issue(qc);
1039 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1044 wait_for_completion(&wait);
1046 status = ata_chk_status(ap);
1047 if (status & ATA_ERR) {
1049 * arg! EDD works for all test cases, but seems to return
1050 * the ATA signature for some ATAPI devices. Until the
1051 * reason for this is found and fixed, we fix up the mess
1052 * here. If IDENTIFY DEVICE returns command aborted
1053 * (as ATAPI devices do), then we issue an
1054 * IDENTIFY PACKET DEVICE.
1056 * ATA software reset (SRST, the default) does not appear
1057 * to have this problem.
1059 if ((using_edd) && (qc->tf.command == ATA_CMD_ID_ATA)) {
1060 u8 err = ata_chk_err(ap);
1061 if (err & ATA_ABORTED) {
1062 dev->class = ATA_DEV_ATAPI;
1073 swap_buf_le16(dev->id, ATA_ID_WORDS);
1075 /* print device capabilities */
1076 printk(KERN_DEBUG "ata%u: dev %u cfg "
1077 "49:%04x 82:%04x 83:%04x 84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n",
1078 ap->id, device, dev->id[49],
1079 dev->id[82], dev->id[83], dev->id[84],
1080 dev->id[85], dev->id[86], dev->id[87],
1084 * common ATA, ATAPI feature tests
1087 /* we require LBA and DMA support (bits 8 & 9 of word 49) */
1088 if (!ata_id_has_dma(dev->id) || !ata_id_has_lba(dev->id)) {
1089 printk(KERN_DEBUG "ata%u: no dma/lba\n", ap->id);
1093 /* quick-n-dirty find max transfer mode; for printk only */
1094 xfer_modes = dev->id[ATA_ID_UDMA_MODES];
1096 xfer_modes = (dev->id[ATA_ID_MWDMA_MODES]) << ATA_SHIFT_MWDMA;
1098 xfer_modes = (dev->id[ATA_ID_PIO_MODES]) << (ATA_SHIFT_PIO + 3);
1099 xfer_modes |= (0x7 << ATA_SHIFT_PIO);
1104 /* ATA-specific feature tests */
1105 if (dev->class == ATA_DEV_ATA) {
1106 if (!ata_id_is_ata(dev->id)) /* sanity check */
1109 tmp = dev->id[ATA_ID_MAJOR_VER];
1110 for (i = 14; i >= 1; i--)
1114 /* we require at least ATA-3 */
1116 printk(KERN_DEBUG "ata%u: no ATA-3\n", ap->id);
1120 if (ata_id_has_lba48(dev->id)) {
1121 dev->flags |= ATA_DFLAG_LBA48;
1122 dev->n_sectors = ata_id_u64(dev->id, 100);
1124 dev->n_sectors = ata_id_u32(dev->id, 60);
1127 ap->host->max_cmd_len = 16;
1129 /* print device info to dmesg */
1130 printk(KERN_INFO "ata%u: dev %u ATA, max %s, %Lu sectors:%s\n",
1132 ata_mode_string(xfer_modes),
1133 (unsigned long long)dev->n_sectors,
1134 dev->flags & ATA_DFLAG_LBA48 ? " lba48" : "");
1137 /* ATAPI-specific feature tests */
1139 if (ata_id_is_ata(dev->id)) /* sanity check */
1142 rc = atapi_cdb_len(dev->id);
1143 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1144 printk(KERN_WARNING "ata%u: unsupported CDB len\n", ap->id);
1147 ap->cdb_len = (unsigned int) rc;
1148 ap->host->max_cmd_len = (unsigned char) ap->cdb_len;
1150 /* print device info to dmesg */
1151 printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n",
1153 ata_mode_string(xfer_modes));
1156 DPRINTK("EXIT, drv_stat = 0x%x\n", ata_chk_status(ap));
1160 printk(KERN_WARNING "ata%u: dev %u not supported, ignoring\n",
1163 ata_irq_on(ap); /* re-enable interrupts */
1164 dev->class++; /* converts ATA_DEV_xxx into ATA_DEV_xxx_UNSUP */
1165 DPRINTK("EXIT, err\n");
1169 * ata_bus_probe - Reset and probe ATA bus
1175 * Zero on success, non-zero on error.
1178 static int ata_bus_probe(struct ata_port *ap)
1180 unsigned int i, found = 0;
1182 ap->ops->phy_reset(ap);
1183 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1186 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1187 ata_dev_identify(ap, i);
1188 if (ata_dev_present(&ap->device[i])) {
1190 if (ap->ops->dev_config)
1191 ap->ops->dev_config(ap, &ap->device[i]);
1195 if ((!found) || (ap->flags & ATA_FLAG_PORT_DISABLED))
1196 goto err_out_disable;
1199 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1200 goto err_out_disable;
1205 ap->ops->port_disable(ap);
1217 void ata_port_probe(struct ata_port *ap)
1219 ap->flags &= ~ATA_FLAG_PORT_DISABLED;
1223 * __sata_phy_reset -
1229 void __sata_phy_reset(struct ata_port *ap)
1232 unsigned long timeout = jiffies + (HZ * 5);
1234 if (ap->flags & ATA_FLAG_SATA_RESET) {
1235 scr_write(ap, SCR_CONTROL, 0x301); /* issue phy wake/reset */
1236 scr_read(ap, SCR_STATUS); /* dummy read; flush */
1237 udelay(400); /* FIXME: a guess */
1239 scr_write(ap, SCR_CONTROL, 0x300); /* issue phy wake/clear reset */
1241 /* wait for phy to become ready, if necessary */
1244 sstatus = scr_read(ap, SCR_STATUS);
1245 if ((sstatus & 0xf) != 1)
1247 } while (time_before(jiffies, timeout));
1249 /* TODO: phy layer with polling, timeouts, etc. */
1250 if (sata_dev_present(ap))
1253 sstatus = scr_read(ap, SCR_STATUS);
1254 printk(KERN_INFO "ata%u: no device found (phy stat %08x)\n",
1256 ata_port_disable(ap);
1259 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1262 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
1263 ata_port_disable(ap);
1267 ap->cbl = ATA_CBL_SATA;
1271 * __sata_phy_reset -
1277 void sata_phy_reset(struct ata_port *ap)
1279 __sata_phy_reset(ap);
1280 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1286 * ata_port_disable -
1292 void ata_port_disable(struct ata_port *ap)
1294 ap->device[0].class = ATA_DEV_NONE;
1295 ap->device[1].class = ATA_DEV_NONE;
1296 ap->flags |= ATA_FLAG_PORT_DISABLED;
1302 } xfer_mode_classes[] = {
1303 { ATA_SHIFT_UDMA, XFER_UDMA_0 },
1304 { ATA_SHIFT_MWDMA, XFER_MW_DMA_0 },
1305 { ATA_SHIFT_PIO, XFER_PIO_0 },
1308 static inline u8 base_from_shift(unsigned int shift)
1312 for (i = 0; i < ARRAY_SIZE(xfer_mode_classes); i++)
1313 if (xfer_mode_classes[i].shift == shift)
1314 return xfer_mode_classes[i].base;
1319 static void ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev)
1324 if (!ata_dev_present(dev) || (ap->flags & ATA_FLAG_PORT_DISABLED))
1327 if (dev->xfer_shift == ATA_SHIFT_PIO)
1328 dev->flags |= ATA_DFLAG_PIO;
1330 ata_dev_set_xfermode(ap, dev);
1332 base = base_from_shift(dev->xfer_shift);
1333 ofs = dev->xfer_mode - base;
1334 idx = ofs + dev->xfer_shift;
1335 WARN_ON(idx >= ARRAY_SIZE(xfer_mode_str));
1337 DPRINTK("idx=%d xfer_shift=%u, xfer_mode=0x%x, base=0x%x, offset=%d\n",
1338 idx, dev->xfer_shift, (int)dev->xfer_mode, (int)base, ofs);
1340 printk(KERN_INFO "ata%u: dev %u configured for %s\n",
1341 ap->id, dev->devno, xfer_mode_str[idx]);
1344 static int ata_host_set_pio(struct ata_port *ap)
1350 mask = ata_get_mode_mask(ap, ATA_SHIFT_PIO);
1353 printk(KERN_WARNING "ata%u: no PIO support\n", ap->id);
1357 base = base_from_shift(ATA_SHIFT_PIO);
1358 xfer_mode = base + x;
1360 DPRINTK("base 0x%x xfer_mode 0x%x mask 0x%x x %d\n",
1361 (int)base, (int)xfer_mode, mask, x);
1363 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1364 struct ata_device *dev = &ap->device[i];
1365 if (ata_dev_present(dev)) {
1366 dev->pio_mode = xfer_mode;
1367 dev->xfer_mode = xfer_mode;
1368 dev->xfer_shift = ATA_SHIFT_PIO;
1369 if (ap->ops->set_piomode)
1370 ap->ops->set_piomode(ap, dev);
1377 static void ata_host_set_dma(struct ata_port *ap, u8 xfer_mode,
1378 unsigned int xfer_shift)
1382 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1383 struct ata_device *dev = &ap->device[i];
1384 if (ata_dev_present(dev)) {
1385 dev->dma_mode = xfer_mode;
1386 dev->xfer_mode = xfer_mode;
1387 dev->xfer_shift = xfer_shift;
1388 if (ap->ops->set_dmamode)
1389 ap->ops->set_dmamode(ap, dev);
1395 * ata_set_mode - Program timings and issue SET FEATURES - XFER
1396 * @ap: port on which timings will be programmed
1401 static void ata_set_mode(struct ata_port *ap)
1403 unsigned int i, xfer_shift;
1407 /* step 1: always set host PIO timings */
1408 rc = ata_host_set_pio(ap);
1412 /* step 2: choose the best data xfer mode */
1413 xfer_mode = xfer_shift = 0;
1414 rc = ata_choose_xfer_mode(ap, &xfer_mode, &xfer_shift);
1418 /* step 3: if that xfer mode isn't PIO, set host DMA timings */
1419 if (xfer_shift != ATA_SHIFT_PIO)
1420 ata_host_set_dma(ap, xfer_mode, xfer_shift);
1422 /* step 4: update devices' xfer mode */
1423 ata_dev_set_mode(ap, &ap->device[0]);
1424 ata_dev_set_mode(ap, &ap->device[1]);
1426 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1429 if (ap->ops->post_set_mode)
1430 ap->ops->post_set_mode(ap);
1432 for (i = 0; i < 2; i++) {
1433 struct ata_device *dev = &ap->device[i];
1434 ata_dev_set_protocol(dev);
1440 ata_port_disable(ap);
1444 * ata_busy_sleep - sleep until BSY clears, or timeout
1445 * @ap: port containing status register to be polled
1446 * @tmout_pat: impatience timeout
1447 * @tmout: overall timeout
1453 static unsigned int ata_busy_sleep (struct ata_port *ap,
1454 unsigned long tmout_pat,
1455 unsigned long tmout)
1457 unsigned long timer_start, timeout;
1460 status = ata_busy_wait(ap, ATA_BUSY, 300);
1461 timer_start = jiffies;
1462 timeout = timer_start + tmout_pat;
1463 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
1465 status = ata_busy_wait(ap, ATA_BUSY, 3);
1468 if (status & ATA_BUSY)
1469 printk(KERN_WARNING "ata%u is slow to respond, "
1470 "please be patient\n", ap->id);
1472 timeout = timer_start + tmout;
1473 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
1475 status = ata_chk_status(ap);
1478 if (status & ATA_BUSY) {
1479 printk(KERN_ERR "ata%u failed to respond (%lu secs)\n",
1480 ap->id, tmout / HZ);
1487 static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
1489 struct ata_ioports *ioaddr = &ap->ioaddr;
1490 unsigned int dev0 = devmask & (1 << 0);
1491 unsigned int dev1 = devmask & (1 << 1);
1492 unsigned long timeout;
1494 /* if device 0 was found in ata_devchk, wait for its
1498 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1500 /* if device 1 was found in ata_devchk, wait for
1501 * register access, then wait for BSY to clear
1503 timeout = jiffies + ATA_TMOUT_BOOT;
1507 ap->ops->dev_select(ap, 1);
1508 if (ap->flags & ATA_FLAG_MMIO) {
1509 nsect = readb((void __iomem *) ioaddr->nsect_addr);
1510 lbal = readb((void __iomem *) ioaddr->lbal_addr);
1512 nsect = inb(ioaddr->nsect_addr);
1513 lbal = inb(ioaddr->lbal_addr);
1515 if ((nsect == 1) && (lbal == 1))
1517 if (time_after(jiffies, timeout)) {
1521 msleep(50); /* give drive a breather */
1524 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1526 /* is all this really necessary? */
1527 ap->ops->dev_select(ap, 0);
1529 ap->ops->dev_select(ap, 1);
1531 ap->ops->dev_select(ap, 0);
1542 static unsigned int ata_bus_edd(struct ata_port *ap)
1544 struct ata_taskfile tf;
1546 /* set up execute-device-diag (bus reset) taskfile */
1547 /* also, take interrupts to a known state (disabled) */
1548 DPRINTK("execute-device-diag\n");
1549 ata_tf_init(ap, &tf, 0);
1551 tf.command = ATA_CMD_EDD;
1552 tf.protocol = ATA_PROT_NODATA;
1555 ata_tf_to_host(ap, &tf);
1557 /* spec says at least 2ms. but who knows with those
1558 * crazy ATAPI devices...
1562 return ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1565 static unsigned int ata_bus_softreset(struct ata_port *ap,
1566 unsigned int devmask)
1568 struct ata_ioports *ioaddr = &ap->ioaddr;
1570 DPRINTK("ata%u: bus reset via SRST\n", ap->id);
1572 /* software reset. causes dev0 to be selected */
1573 if (ap->flags & ATA_FLAG_MMIO) {
1574 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1575 udelay(20); /* FIXME: flush */
1576 writeb(ap->ctl | ATA_SRST, (void __iomem *) ioaddr->ctl_addr);
1577 udelay(20); /* FIXME: flush */
1578 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1580 outb(ap->ctl, ioaddr->ctl_addr);
1582 outb(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
1584 outb(ap->ctl, ioaddr->ctl_addr);
1587 /* spec mandates ">= 2ms" before checking status.
1588 * We wait 150ms, because that was the magic delay used for
1589 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
1590 * between when the ATA command register is written, and then
1591 * status is checked. Because waiting for "a while" before
1592 * checking status is fine, post SRST, we perform this magic
1593 * delay here as well.
1597 ata_bus_post_reset(ap, devmask);
1603 * ata_bus_reset - reset host port and associated ATA channel
1604 * @ap: port to reset
1606 * This is typically the first time we actually start issuing
1607 * commands to the ATA channel. We wait for BSY to clear, then
1608 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
1609 * result. Determine what devices, if any, are on the channel
1610 * by looking at the device 0/1 error register. Look at the signature
1611 * stored in each device's taskfile registers, to determine if
1612 * the device is ATA or ATAPI.
1615 * Inherited from caller. Some functions called by this function
1616 * obtain the host_set lock.
1619 * Sets ATA_FLAG_PORT_DISABLED if bus reset fails.
1622 void ata_bus_reset(struct ata_port *ap)
1624 struct ata_ioports *ioaddr = &ap->ioaddr;
1625 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
1627 unsigned int dev0, dev1 = 0, rc = 0, devmask = 0;
1629 DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no);
1631 /* determine if device 0/1 are present */
1632 if (ap->flags & ATA_FLAG_SATA_RESET)
1635 dev0 = ata_devchk(ap, 0);
1637 dev1 = ata_devchk(ap, 1);
1641 devmask |= (1 << 0);
1643 devmask |= (1 << 1);
1645 /* select device 0 again */
1646 ap->ops->dev_select(ap, 0);
1648 /* issue bus reset */
1649 if (ap->flags & ATA_FLAG_SRST)
1650 rc = ata_bus_softreset(ap, devmask);
1651 else if ((ap->flags & ATA_FLAG_SATA_RESET) == 0) {
1652 /* set up device control */
1653 if (ap->flags & ATA_FLAG_MMIO)
1654 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1656 outb(ap->ctl, ioaddr->ctl_addr);
1657 rc = ata_bus_edd(ap);
1664 * determine by signature whether we have ATA or ATAPI devices
1666 err = ata_dev_try_classify(ap, 0);
1667 if ((slave_possible) && (err != 0x81))
1668 ata_dev_try_classify(ap, 1);
1670 /* re-enable interrupts */
1673 /* is double-select really necessary? */
1674 if (ap->device[1].class != ATA_DEV_NONE)
1675 ap->ops->dev_select(ap, 1);
1676 if (ap->device[0].class != ATA_DEV_NONE)
1677 ap->ops->dev_select(ap, 0);
1679 /* if no devices were detected, disable this port */
1680 if ((ap->device[0].class == ATA_DEV_NONE) &&
1681 (ap->device[1].class == ATA_DEV_NONE))
1684 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
1685 /* set up device control for ATA_FLAG_SATA_RESET */
1686 if (ap->flags & ATA_FLAG_MMIO)
1687 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1689 outb(ap->ctl, ioaddr->ctl_addr);
1696 printk(KERN_ERR "ata%u: disabling port\n", ap->id);
1697 ap->ops->port_disable(ap);
1702 static unsigned int ata_get_mode_mask(struct ata_port *ap, int shift)
1704 struct ata_device *master, *slave;
1707 master = &ap->device[0];
1708 slave = &ap->device[1];
1710 assert (ata_dev_present(master) || ata_dev_present(slave));
1712 if (shift == ATA_SHIFT_UDMA) {
1713 mask = ap->udma_mask;
1714 if (ata_dev_present(master))
1715 mask &= (master->id[ATA_ID_UDMA_MODES] & 0xff);
1716 if (ata_dev_present(slave))
1717 mask &= (slave->id[ATA_ID_UDMA_MODES] & 0xff);
1719 else if (shift == ATA_SHIFT_MWDMA) {
1720 mask = ap->mwdma_mask;
1721 if (ata_dev_present(master))
1722 mask &= (master->id[ATA_ID_MWDMA_MODES] & 0x07);
1723 if (ata_dev_present(slave))
1724 mask &= (slave->id[ATA_ID_MWDMA_MODES] & 0x07);
1726 else if (shift == ATA_SHIFT_PIO) {
1727 mask = ap->pio_mask;
1728 if (ata_dev_present(master)) {
1729 /* spec doesn't return explicit support for
1730 * PIO0-2, so we fake it
1732 u16 tmp_mode = master->id[ATA_ID_PIO_MODES] & 0x03;
1737 if (ata_dev_present(slave)) {
1738 /* spec doesn't return explicit support for
1739 * PIO0-2, so we fake it
1741 u16 tmp_mode = slave->id[ATA_ID_PIO_MODES] & 0x03;
1748 mask = 0xffffffff; /* shut up compiler warning */
1755 /* find greatest bit */
1756 static int fgb(u32 bitmap)
1761 for (i = 0; i < 32; i++)
1762 if (bitmap & (1 << i))
1769 * ata_choose_xfer_mode - attempt to find best transfer mode
1770 * @ap: Port for which an xfer mode will be selected
1771 * @xfer_mode_out: (output) SET FEATURES - XFER MODE code
1772 * @xfer_shift_out: (output) bit shift that selects this mode
1777 * Zero on success, negative on error.
1780 static int ata_choose_xfer_mode(struct ata_port *ap,
1782 unsigned int *xfer_shift_out)
1784 unsigned int mask, shift;
1787 for (i = 0; i < ARRAY_SIZE(xfer_mode_classes); i++) {
1788 shift = xfer_mode_classes[i].shift;
1789 mask = ata_get_mode_mask(ap, shift);
1793 *xfer_mode_out = xfer_mode_classes[i].base + x;
1794 *xfer_shift_out = shift;
1803 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
1804 * @ap: Port associated with device @dev
1805 * @dev: Device to which command will be sent
1810 static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev)
1812 DECLARE_COMPLETION(wait);
1813 struct ata_queued_cmd *qc;
1815 unsigned long flags;
1817 /* set up set-features taskfile */
1818 DPRINTK("set features - xfer mode\n");
1820 qc = ata_qc_new_init(ap, dev);
1823 qc->tf.command = ATA_CMD_SET_FEATURES;
1824 qc->tf.feature = SETFEATURES_XFER;
1825 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1826 qc->tf.protocol = ATA_PROT_NODATA;
1827 qc->tf.nsect = dev->xfer_mode;
1829 qc->waiting = &wait;
1830 qc->complete_fn = ata_qc_complete_noop;
1832 spin_lock_irqsave(&ap->host_set->lock, flags);
1833 rc = ata_qc_issue(qc);
1834 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1837 ata_port_disable(ap);
1839 wait_for_completion(&wait);
1851 static void ata_sg_clean(struct ata_queued_cmd *qc)
1853 struct ata_port *ap = qc->ap;
1854 struct scatterlist *sg = qc->sg;
1855 int dir = qc->dma_dir;
1857 assert(qc->flags & ATA_QCFLAG_DMAMAP);
1860 if (qc->flags & ATA_QCFLAG_SINGLE)
1861 assert(qc->n_elem == 1);
1863 DPRINTK("unmapping %u sg elements\n", qc->n_elem);
1865 if (qc->flags & ATA_QCFLAG_SG)
1866 dma_unmap_sg(ap->host_set->dev, sg, qc->n_elem, dir);
1868 dma_unmap_single(ap->host_set->dev, sg_dma_address(&sg[0]),
1869 sg_dma_len(&sg[0]), dir);
1871 qc->flags &= ~ATA_QCFLAG_DMAMAP;
1876 * ata_fill_sg - Fill PCI IDE PRD table
1877 * @qc: Metadata associated with taskfile to be transferred
1882 static void ata_fill_sg(struct ata_queued_cmd *qc)
1884 struct scatterlist *sg = qc->sg;
1885 struct ata_port *ap = qc->ap;
1886 unsigned int idx, nelem;
1889 assert(qc->n_elem > 0);
1892 for (nelem = qc->n_elem; nelem; nelem--,sg++) {
1896 /* determine if physical DMA addr spans 64K boundary.
1897 * Note h/w doesn't support 64-bit, so we unconditionally
1898 * truncate dma_addr_t to u32.
1900 addr = (u32) sg_dma_address(sg);
1901 sg_len = sg_dma_len(sg);
1904 offset = addr & 0xffff;
1906 if ((offset + sg_len) > 0x10000)
1907 len = 0x10000 - offset;
1909 ap->prd[idx].addr = cpu_to_le32(addr);
1910 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
1911 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
1920 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
1924 * ata_qc_prep - Prepare taskfile for submission
1925 * @qc: Metadata associated with taskfile to be prepared
1928 * spin_lock_irqsave(host_set lock)
1930 void ata_qc_prep(struct ata_queued_cmd *qc)
1932 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1938 void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
1940 struct scatterlist *sg;
1942 qc->flags |= ATA_QCFLAG_SINGLE;
1944 memset(&qc->sgent, 0, sizeof(qc->sgent));
1945 qc->sg = &qc->sgent;
1950 sg->page = virt_to_page(buf);
1951 sg->offset = (unsigned long) buf & ~PAGE_MASK;
1952 sg_dma_len(sg) = buflen;
1955 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
1956 unsigned int n_elem)
1958 qc->flags |= ATA_QCFLAG_SG;
1960 qc->n_elem = n_elem;
1964 * ata_sg_setup_one -
1968 * spin_lock_irqsave(host_set lock)
1974 static int ata_sg_setup_one(struct ata_queued_cmd *qc)
1976 struct ata_port *ap = qc->ap;
1977 int dir = qc->dma_dir;
1978 struct scatterlist *sg = qc->sg;
1979 dma_addr_t dma_address;
1981 dma_address = dma_map_single(ap->host_set->dev, qc->buf_virt,
1982 sg_dma_len(sg), dir);
1983 if (dma_mapping_error(dma_address))
1986 sg_dma_address(sg) = dma_address;
1988 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
1989 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
1999 * spin_lock_irqsave(host_set lock)
2005 static int ata_sg_setup(struct ata_queued_cmd *qc)
2007 struct ata_port *ap = qc->ap;
2008 struct scatterlist *sg = qc->sg;
2011 VPRINTK("ENTER, ata%u\n", ap->id);
2012 assert(qc->flags & ATA_QCFLAG_SG);
2015 n_elem = dma_map_sg(ap->host_set->dev, sg, qc->n_elem, dir);
2019 DPRINTK("%d sg elements mapped\n", n_elem);
2021 qc->n_elem = n_elem;
2036 static unsigned long ata_pio_poll(struct ata_port *ap)
2039 unsigned int poll_state = PIO_ST_UNKNOWN;
2040 unsigned int reg_state = PIO_ST_UNKNOWN;
2041 const unsigned int tmout_state = PIO_ST_TMOUT;
2043 switch (ap->pio_task_state) {
2046 poll_state = PIO_ST_POLL;
2050 case PIO_ST_LAST_POLL:
2051 poll_state = PIO_ST_LAST_POLL;
2052 reg_state = PIO_ST_LAST;
2059 status = ata_chk_status(ap);
2060 if (status & ATA_BUSY) {
2061 if (time_after(jiffies, ap->pio_task_timeout)) {
2062 ap->pio_task_state = tmout_state;
2065 ap->pio_task_state = poll_state;
2066 return ATA_SHORT_PAUSE;
2069 ap->pio_task_state = reg_state;
2074 * ata_pio_complete -
2080 static void ata_pio_complete (struct ata_port *ap)
2082 struct ata_queued_cmd *qc;
2086 * This is purely hueristic. This is a fast path.
2087 * Sometimes when we enter, BSY will be cleared in
2088 * a chk-status or two. If not, the drive is probably seeking
2089 * or something. Snooze for a couple msecs, then
2090 * chk-status again. If still busy, fall back to
2091 * PIO_ST_POLL state.
2093 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10);
2094 if (drv_stat & (ATA_BUSY | ATA_DRQ)) {
2096 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10);
2097 if (drv_stat & (ATA_BUSY | ATA_DRQ)) {
2098 ap->pio_task_state = PIO_ST_LAST_POLL;
2099 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
2104 drv_stat = ata_wait_idle(ap);
2105 if (!ata_ok(drv_stat)) {
2106 ap->pio_task_state = PIO_ST_ERR;
2110 qc = ata_qc_from_tag(ap, ap->active_tag);
2113 ap->pio_task_state = PIO_ST_IDLE;
2117 ata_qc_complete(qc, drv_stat);
2120 void swap_buf_le16(u16 *buf, unsigned int buf_words)
2125 for (i = 0; i < buf_words; i++)
2126 buf[i] = le16_to_cpu(buf[i]);
2127 #endif /* __BIG_ENDIAN */
2130 static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf,
2131 unsigned int buflen, int write_data)
2134 unsigned int words = buflen >> 1;
2135 u16 *buf16 = (u16 *) buf;
2136 void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr;
2139 for (i = 0; i < words; i++)
2140 writew(le16_to_cpu(buf16[i]), mmio);
2142 for (i = 0; i < words; i++)
2143 buf16[i] = cpu_to_le16(readw(mmio));
2147 static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf,
2148 unsigned int buflen, int write_data)
2150 unsigned int dwords = buflen >> 1;
2153 outsw(ap->ioaddr.data_addr, buf, dwords);
2155 insw(ap->ioaddr.data_addr, buf, dwords);
2158 static void ata_data_xfer(struct ata_port *ap, unsigned char *buf,
2159 unsigned int buflen, int do_write)
2161 if (ap->flags & ATA_FLAG_MMIO)
2162 ata_mmio_data_xfer(ap, buf, buflen, do_write);
2164 ata_pio_data_xfer(ap, buf, buflen, do_write);
2167 static void ata_pio_sector(struct ata_queued_cmd *qc)
2169 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
2170 struct scatterlist *sg = qc->sg;
2171 struct ata_port *ap = qc->ap;
2173 unsigned int offset;
2176 if (qc->cursect == (qc->nsect - 1))
2177 ap->pio_task_state = PIO_ST_LAST;
2179 page = sg[qc->cursg].page;
2180 offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE;
2182 /* get the current page and offset */
2183 page = nth_page(page, (offset >> PAGE_SHIFT));
2184 offset %= PAGE_SIZE;
2186 buf = kmap(page) + offset;
2191 if ((qc->cursg_ofs * ATA_SECT_SIZE) == sg_dma_len(&sg[qc->cursg])) {
2196 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
2198 /* do the actual data transfer */
2199 do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
2200 ata_data_xfer(ap, buf, ATA_SECT_SIZE, do_write);
2205 static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
2207 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
2208 struct scatterlist *sg = qc->sg;
2209 struct ata_port *ap = qc->ap;
2212 unsigned int offset, count;
2214 if (qc->curbytes == qc->nbytes - bytes)
2215 ap->pio_task_state = PIO_ST_LAST;
2218 sg = &qc->sg[qc->cursg];
2222 offset = sg->offset + qc->cursg_ofs;
2224 /* get the current page and offset */
2225 page = nth_page(page, (offset >> PAGE_SHIFT));
2226 offset %= PAGE_SIZE;
2228 count = min(sg_dma_len(sg) - qc->cursg_ofs, bytes);
2230 /* don't cross page boundaries */
2231 count = min(count, (unsigned int)PAGE_SIZE - offset);
2233 buf = kmap(page) + offset;
2236 qc->curbytes += count;
2237 qc->cursg_ofs += count;
2239 if (qc->cursg_ofs == sg_dma_len(sg)) {
2244 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
2246 /* do the actual data transfer */
2247 ata_data_xfer(ap, buf, count, do_write);
2252 if (qc->cursg_ofs < sg_dma_len(sg))
2258 static void atapi_pio_bytes(struct ata_queued_cmd *qc)
2260 struct ata_port *ap = qc->ap;
2261 struct ata_device *dev = qc->dev;
2262 unsigned int ireason, bc_lo, bc_hi, bytes;
2263 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
2265 ap->ops->tf_read(ap, &qc->tf);
2266 ireason = qc->tf.nsect;
2267 bc_lo = qc->tf.lbam;
2268 bc_hi = qc->tf.lbah;
2269 bytes = (bc_hi << 8) | bc_lo;
2271 /* shall be cleared to zero, indicating xfer of data */
2272 if (ireason & (1 << 0))
2275 /* make sure transfer direction matches expected */
2276 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
2277 if (do_write != i_write)
2280 __atapi_pio_bytes(qc, bytes);
2285 printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n",
2286 ap->id, dev->devno);
2287 ap->pio_task_state = PIO_ST_ERR;
2297 static void ata_pio_block(struct ata_port *ap)
2299 struct ata_queued_cmd *qc;
2303 * This is purely hueristic. This is a fast path.
2304 * Sometimes when we enter, BSY will be cleared in
2305 * a chk-status or two. If not, the drive is probably seeking
2306 * or something. Snooze for a couple msecs, then
2307 * chk-status again. If still busy, fall back to
2308 * PIO_ST_POLL state.
2310 status = ata_busy_wait(ap, ATA_BUSY, 5);
2311 if (status & ATA_BUSY) {
2313 status = ata_busy_wait(ap, ATA_BUSY, 10);
2314 if (status & ATA_BUSY) {
2315 ap->pio_task_state = PIO_ST_POLL;
2316 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
2321 qc = ata_qc_from_tag(ap, ap->active_tag);
2324 if (is_atapi_taskfile(&qc->tf)) {
2325 /* no more data to transfer or unsupported ATAPI command */
2326 if ((status & ATA_DRQ) == 0) {
2327 ap->pio_task_state = PIO_ST_IDLE;
2331 ata_qc_complete(qc, status);
2335 atapi_pio_bytes(qc);
2337 /* handle BSY=0, DRQ=0 as error */
2338 if ((status & ATA_DRQ) == 0) {
2339 ap->pio_task_state = PIO_ST_ERR;
2347 static void ata_pio_error(struct ata_port *ap)
2349 struct ata_queued_cmd *qc;
2352 qc = ata_qc_from_tag(ap, ap->active_tag);
2355 drv_stat = ata_chk_status(ap);
2356 printk(KERN_WARNING "ata%u: PIO error, drv_stat 0x%x\n",
2359 ap->pio_task_state = PIO_ST_IDLE;
2363 ata_qc_complete(qc, drv_stat | ATA_ERR);
2366 static void ata_pio_task(void *_data)
2368 struct ata_port *ap = _data;
2369 unsigned long timeout = 0;
2371 switch (ap->pio_task_state) {
2377 ata_pio_complete(ap);
2381 case PIO_ST_LAST_POLL:
2382 timeout = ata_pio_poll(ap);
2391 if ((ap->pio_task_state != PIO_ST_IDLE) &&
2392 (ap->pio_task_state != PIO_ST_TMOUT) &&
2393 (ap->pio_task_state != PIO_ST_ERR)) {
2395 queue_delayed_work(ata_wq, &ap->pio_task,
2398 queue_work(ata_wq, &ap->pio_task);
2402 static void atapi_request_sense(struct ata_port *ap, struct ata_device *dev,
2403 struct scsi_cmnd *cmd)
2405 DECLARE_COMPLETION(wait);
2406 struct ata_queued_cmd *qc;
2407 unsigned long flags;
2408 int using_pio = dev->flags & ATA_DFLAG_PIO;
2411 DPRINTK("ATAPI request sense\n");
2413 qc = ata_qc_new_init(ap, dev);
2416 /* FIXME: is this needed? */
2417 memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
2419 ata_sg_init_one(qc, cmd->sense_buffer, sizeof(cmd->sense_buffer));
2420 qc->dma_dir = DMA_FROM_DEVICE;
2422 memset(&qc->cdb, 0, sizeof(ap->cdb_len));
2423 qc->cdb[0] = REQUEST_SENSE;
2424 qc->cdb[4] = SCSI_SENSE_BUFFERSIZE;
2426 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2427 qc->tf.command = ATA_CMD_PACKET;
2430 qc->tf.protocol = ATA_PROT_ATAPI;
2431 qc->tf.lbam = (8 * 1024) & 0xff;
2432 qc->tf.lbah = (8 * 1024) >> 8;
2434 qc->nbytes = SCSI_SENSE_BUFFERSIZE;
2436 qc->tf.protocol = ATA_PROT_ATAPI_DMA;
2437 qc->tf.feature |= ATAPI_PKT_DMA;
2440 qc->waiting = &wait;
2441 qc->complete_fn = ata_qc_complete_noop;
2443 spin_lock_irqsave(&ap->host_set->lock, flags);
2444 rc = ata_qc_issue(qc);
2445 spin_unlock_irqrestore(&ap->host_set->lock, flags);
2448 ata_port_disable(ap);
2450 wait_for_completion(&wait);
2456 * ata_qc_timeout - Handle timeout of queued command
2457 * @qc: Command that timed out
2459 * Some part of the kernel (currently, only the SCSI layer)
2460 * has noticed that the active command on port @ap has not
2461 * completed after a specified length of time. Handle this
2462 * condition by disabling DMA (if necessary) and completing
2463 * transactions, with error if necessary.
2465 * This also handles the case of the "lost interrupt", where
2466 * for some reason (possibly hardware bug, possibly driver bug)
2467 * an interrupt was not delivered to the driver, even though the
2468 * transaction completed successfully.
2473 static void ata_qc_timeout(struct ata_queued_cmd *qc)
2475 struct ata_port *ap = qc->ap;
2476 struct ata_device *dev = qc->dev;
2477 u8 host_stat = 0, drv_stat;
2481 /* FIXME: doesn't this conflict with timeout handling? */
2482 if (qc->dev->class == ATA_DEV_ATAPI && qc->scsicmd) {
2483 struct scsi_cmnd *cmd = qc->scsicmd;
2485 if (!scsi_eh_eflags_chk(cmd, SCSI_EH_CANCEL_CMD)) {
2487 /* finish completing original command */
2488 __ata_qc_complete(qc);
2490 atapi_request_sense(ap, dev, cmd);
2492 cmd->result = (CHECK_CONDITION << 1) | (DID_OK << 16);
2493 scsi_finish_command(cmd);
2499 /* hack alert! We cannot use the supplied completion
2500 * function from inside the ->eh_strategy_handler() thread.
2501 * libata is the only user of ->eh_strategy_handler() in
2502 * any kernel, so the default scsi_done() assumes it is
2503 * not being called from the SCSI EH.
2505 qc->scsidone = scsi_finish_command;
2507 switch (qc->tf.protocol) {
2510 case ATA_PROT_ATAPI_DMA:
2511 host_stat = ata_bmdma_status(ap);
2513 /* before we do anything else, clear DMA-Start bit */
2520 drv_stat = ata_chk_status(ap);
2522 /* ack bmdma irq events */
2523 ata_bmdma_ack_irq(ap);
2525 printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x host_stat 0x%x\n",
2526 ap->id, qc->tf.command, drv_stat, host_stat);
2528 /* complete taskfile transaction */
2529 ata_qc_complete(qc, drv_stat);
2537 * ata_eng_timeout - Handle timeout of queued command
2538 * @ap: Port on which timed-out command is active
2540 * Some part of the kernel (currently, only the SCSI layer)
2541 * has noticed that the active command on port @ap has not
2542 * completed after a specified length of time. Handle this
2543 * condition by disabling DMA (if necessary) and completing
2544 * transactions, with error if necessary.
2546 * This also handles the case of the "lost interrupt", where
2547 * for some reason (possibly hardware bug, possibly driver bug)
2548 * an interrupt was not delivered to the driver, even though the
2549 * transaction completed successfully.
2552 * Inherited from SCSI layer (none, can sleep)
2555 void ata_eng_timeout(struct ata_port *ap)
2557 struct ata_queued_cmd *qc;
2561 qc = ata_qc_from_tag(ap, ap->active_tag);
2563 printk(KERN_ERR "ata%u: BUG: timeout without command\n",
2575 * ata_qc_new - Request an available ATA command, for queueing
2576 * @ap: Port associated with device @dev
2577 * @dev: Device from whom we request an available command structure
2582 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
2584 struct ata_queued_cmd *qc = NULL;
2587 for (i = 0; i < ATA_MAX_QUEUE; i++)
2588 if (!test_and_set_bit(i, &ap->qactive)) {
2589 qc = ata_qc_from_tag(ap, i);
2600 * ata_qc_new_init - Request an available ATA command, and initialize it
2601 * @ap: Port associated with device @dev
2602 * @dev: Device from whom we request an available command structure
2607 struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
2608 struct ata_device *dev)
2610 struct ata_queued_cmd *qc;
2612 qc = ata_qc_new(ap);
2619 qc->cursect = qc->cursg = qc->cursg_ofs = 0;
2621 qc->nbytes = qc->curbytes = 0;
2623 ata_tf_init(ap, &qc->tf, dev->devno);
2625 if (dev->flags & ATA_DFLAG_LBA48)
2626 qc->tf.flags |= ATA_TFLAG_LBA48;
2632 static int ata_qc_complete_noop(struct ata_queued_cmd *qc, u8 drv_stat)
2637 static void __ata_qc_complete(struct ata_queued_cmd *qc)
2639 struct ata_port *ap = qc->ap;
2640 unsigned int tag, do_clear = 0;
2644 if (likely(ata_tag_valid(tag))) {
2645 if (tag == ap->active_tag)
2646 ap->active_tag = ATA_TAG_POISON;
2647 qc->tag = ATA_TAG_POISON;
2652 struct completion *waiting = qc->waiting;
2657 if (likely(do_clear))
2658 clear_bit(tag, &ap->qactive);
2662 * ata_qc_complete - Complete an active ATA command
2663 * @qc: Command to complete
2664 * @drv_stat: ATA status register contents
2670 void ata_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat)
2674 assert(qc != NULL); /* ata_qc_from_tag _might_ return NULL */
2675 assert(qc->flags & ATA_QCFLAG_ACTIVE);
2677 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
2680 /* call completion callback */
2681 rc = qc->complete_fn(qc, drv_stat);
2683 /* if callback indicates not to complete command (non-zero),
2684 * return immediately
2689 __ata_qc_complete(qc);
2694 static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
2696 struct ata_port *ap = qc->ap;
2698 switch (qc->tf.protocol) {
2700 case ATA_PROT_ATAPI_DMA:
2703 case ATA_PROT_ATAPI:
2705 case ATA_PROT_PIO_MULT:
2706 if (ap->flags & ATA_FLAG_PIO_DMA)
2719 * ata_qc_issue - issue taskfile to device
2720 * @qc: command to issue to device
2722 * Prepare an ATA command to submission to device.
2723 * This includes mapping the data into a DMA-able
2724 * area, filling in the S/G table, and finally
2725 * writing the taskfile to hardware, starting the command.
2728 * spin_lock_irqsave(host_set lock)
2731 * Zero on success, negative on error.
2734 int ata_qc_issue(struct ata_queued_cmd *qc)
2736 struct ata_port *ap = qc->ap;
2738 if (ata_should_dma_map(qc)) {
2739 if (qc->flags & ATA_QCFLAG_SG) {
2740 if (ata_sg_setup(qc))
2742 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
2743 if (ata_sg_setup_one(qc))
2747 qc->flags &= ~ATA_QCFLAG_DMAMAP;
2750 ap->ops->qc_prep(qc);
2752 qc->ap->active_tag = qc->tag;
2753 qc->flags |= ATA_QCFLAG_ACTIVE;
2755 return ap->ops->qc_issue(qc);
2762 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
2763 * @qc: command to issue to device
2765 * Using various libata functions and hooks, this function
2766 * starts an ATA command. ATA commands are grouped into
2767 * classes called "protocols", and issuing each type of protocol
2768 * is slightly different.
2771 * spin_lock_irqsave(host_set lock)
2774 * Zero on success, negative on error.
2777 int ata_qc_issue_prot(struct ata_queued_cmd *qc)
2779 struct ata_port *ap = qc->ap;
2781 ata_dev_select(ap, qc->dev->devno, 1, 0);
2783 switch (qc->tf.protocol) {
2784 case ATA_PROT_NODATA:
2785 ata_tf_to_host_nolock(ap, &qc->tf);
2789 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
2790 ap->ops->bmdma_setup(qc); /* set up bmdma */
2791 ap->ops->bmdma_start(qc); /* initiate bmdma */
2794 case ATA_PROT_PIO: /* load tf registers, initiate polling pio */
2795 ata_qc_set_polling(qc);
2796 ata_tf_to_host_nolock(ap, &qc->tf);
2797 ap->pio_task_state = PIO_ST;
2798 queue_work(ata_wq, &ap->pio_task);
2801 case ATA_PROT_ATAPI:
2802 ata_qc_set_polling(qc);
2803 ata_tf_to_host_nolock(ap, &qc->tf);
2804 queue_work(ata_wq, &ap->packet_task);
2807 case ATA_PROT_ATAPI_NODATA:
2808 ata_tf_to_host_nolock(ap, &qc->tf);
2809 queue_work(ata_wq, &ap->packet_task);
2812 case ATA_PROT_ATAPI_DMA:
2813 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
2814 ap->ops->bmdma_setup(qc); /* set up bmdma */
2815 queue_work(ata_wq, &ap->packet_task);
2827 * ata_bmdma_setup - Set up PCI IDE BMDMA transaction
2828 * @qc: Info associated with this ATA transaction.
2831 * spin_lock_irqsave(host_set lock)
2834 static void ata_bmdma_setup_mmio (struct ata_queued_cmd *qc)
2836 struct ata_port *ap = qc->ap;
2837 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
2839 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
2841 /* load PRD table addr. */
2842 mb(); /* make sure PRD table writes are visible to controller */
2843 writel(ap->prd_dma, mmio + ATA_DMA_TABLE_OFS);
2845 /* specify data direction, triple-check start bit is clear */
2846 dmactl = readb(mmio + ATA_DMA_CMD);
2847 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
2849 dmactl |= ATA_DMA_WR;
2850 writeb(dmactl, mmio + ATA_DMA_CMD);
2852 /* issue r/w command */
2853 ap->ops->exec_command(ap, &qc->tf);
2857 * ata_bmdma_start - Start a PCI IDE BMDMA transaction
2858 * @qc: Info associated with this ATA transaction.
2861 * spin_lock_irqsave(host_set lock)
2864 static void ata_bmdma_start_mmio (struct ata_queued_cmd *qc)
2866 struct ata_port *ap = qc->ap;
2867 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
2870 /* start host DMA transaction */
2871 dmactl = readb(mmio + ATA_DMA_CMD);
2872 writeb(dmactl | ATA_DMA_START, mmio + ATA_DMA_CMD);
2874 /* Strictly, one may wish to issue a readb() here, to
2875 * flush the mmio write. However, control also passes
2876 * to the hardware at this point, and it will interrupt
2877 * us when we are to resume control. So, in effect,
2878 * we don't care when the mmio write flushes.
2879 * Further, a read of the DMA status register _immediately_
2880 * following the write may not be what certain flaky hardware
2881 * is expected, so I think it is best to not add a readb()
2882 * without first all the MMIO ATA cards/mobos.
2883 * Or maybe I'm just being paranoid.
2888 * ata_bmdma_setup_pio - Set up PCI IDE BMDMA transaction (PIO)
2889 * @qc: Info associated with this ATA transaction.
2892 * spin_lock_irqsave(host_set lock)
2895 static void ata_bmdma_setup_pio (struct ata_queued_cmd *qc)
2897 struct ata_port *ap = qc->ap;
2898 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
2901 /* load PRD table addr. */
2902 outl(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2904 /* specify data direction, triple-check start bit is clear */
2905 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2906 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
2908 dmactl |= ATA_DMA_WR;
2909 outb(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2911 /* issue r/w command */
2912 ap->ops->exec_command(ap, &qc->tf);
2916 * ata_bmdma_start_pio - Start a PCI IDE BMDMA transaction (PIO)
2917 * @qc: Info associated with this ATA transaction.
2920 * spin_lock_irqsave(host_set lock)
2923 static void ata_bmdma_start_pio (struct ata_queued_cmd *qc)
2925 struct ata_port *ap = qc->ap;
2928 /* start host DMA transaction */
2929 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2930 outb(dmactl | ATA_DMA_START,
2931 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2934 void ata_bmdma_start(struct ata_queued_cmd *qc)
2936 if (qc->ap->flags & ATA_FLAG_MMIO)
2937 ata_bmdma_start_mmio(qc);
2939 ata_bmdma_start_pio(qc);
2942 void ata_bmdma_setup(struct ata_queued_cmd *qc)
2944 if (qc->ap->flags & ATA_FLAG_MMIO)
2945 ata_bmdma_setup_mmio(qc);
2947 ata_bmdma_setup_pio(qc);
2950 void ata_bmdma_irq_clear(struct ata_port *ap)
2952 ata_bmdma_ack_irq(ap);
2956 * ata_host_intr - Handle host interrupt for given (port, task)
2957 * @ap: Port on which interrupt arrived (possibly...)
2958 * @qc: Taskfile currently active in engine
2960 * Handle host interrupt for given queued command. Currently,
2961 * only DMA interrupts are handled. All other commands are
2962 * handled via polling with interrupts disabled (nIEN bit).
2965 * spin_lock_irqsave(host_set lock)
2968 * One if interrupt was handled, zero if not (shared irq).
2971 inline unsigned int ata_host_intr (struct ata_port *ap,
2972 struct ata_queued_cmd *qc)
2974 u8 status, host_stat;
2976 switch (qc->tf.protocol) {
2979 case ATA_PROT_ATAPI_DMA:
2980 case ATA_PROT_ATAPI:
2981 /* check status of DMA engine */
2982 host_stat = ata_bmdma_status(ap);
2983 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
2985 /* if it's not our irq... */
2986 if (!(host_stat & ATA_DMA_INTR))
2989 /* before we do anything else, clear DMA-Start bit */
2994 case ATA_PROT_ATAPI_NODATA:
2995 case ATA_PROT_NODATA:
2996 /* check altstatus */
2997 status = ata_altstatus(ap);
2998 if (status & ATA_BUSY)
3001 /* check main status, clearing INTRQ */
3002 status = ata_chk_status(ap);
3003 if (unlikely(status & ATA_BUSY))
3005 DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
3006 ap->id, qc->tf.protocol, status);
3008 /* ack bmdma irq events */
3009 ata_bmdma_ack_irq(ap);
3011 /* complete taskfile transaction */
3012 ata_qc_complete(qc, status);
3019 return 1; /* irq handled */
3022 ap->stats.idle_irq++;
3025 if ((ap->stats.idle_irq % 1000) == 0) {
3027 ata_irq_ack(ap, 0); /* debug trap */
3028 printk(KERN_WARNING "ata%d: irq trap\n", ap->id);
3031 return 0; /* irq not handled */
3035 * ata_interrupt - Default ATA host interrupt handler
3037 * @dev_instance: pointer to our host information structure
3046 irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
3048 struct ata_host_set *host_set = dev_instance;
3050 unsigned int handled = 0;
3051 unsigned long flags;
3053 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
3054 spin_lock_irqsave(&host_set->lock, flags);
3056 for (i = 0; i < host_set->n_ports; i++) {
3057 struct ata_port *ap;
3059 ap = host_set->ports[i];
3060 if (ap && (!(ap->flags & ATA_FLAG_PORT_DISABLED))) {
3061 struct ata_queued_cmd *qc;
3063 qc = ata_qc_from_tag(ap, ap->active_tag);
3064 if (qc && (!(qc->tf.ctl & ATA_NIEN)))
3065 handled |= ata_host_intr(ap, qc);
3069 spin_unlock_irqrestore(&host_set->lock, flags);
3071 return IRQ_RETVAL(handled);
3075 * atapi_packet_task - Write CDB bytes to hardware
3076 * @_data: Port to which ATAPI device is attached.
3078 * When device has indicated its readiness to accept
3079 * a CDB, this function is called. Send the CDB.
3080 * If DMA is to be performed, exit immediately.
3081 * Otherwise, we are in polling mode, so poll
3082 * status under operation succeeds or fails.
3085 * Kernel thread context (may sleep)
3088 static void atapi_packet_task(void *_data)
3090 struct ata_port *ap = _data;
3091 struct ata_queued_cmd *qc;
3094 qc = ata_qc_from_tag(ap, ap->active_tag);
3096 assert(qc->flags & ATA_QCFLAG_ACTIVE);
3098 /* sleep-wait for BSY to clear */
3099 DPRINTK("busy wait\n");
3100 if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB))
3103 /* make sure DRQ is set */
3104 status = ata_chk_status(ap);
3105 if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ)
3109 DPRINTK("send cdb\n");
3110 assert(ap->cdb_len >= 12);
3111 ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1);
3113 /* if we are DMA'ing, irq handler takes over from here */
3114 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA)
3115 ap->ops->bmdma_start(qc); /* initiate bmdma */
3117 /* non-data commands are also handled via irq */
3118 else if (qc->tf.protocol == ATA_PROT_ATAPI_NODATA) {
3122 /* PIO commands are handled by polling */
3124 ap->pio_task_state = PIO_ST;
3125 queue_work(ata_wq, &ap->pio_task);
3131 ata_qc_complete(qc, ATA_ERR);
3134 int ata_port_start (struct ata_port *ap)
3136 struct device *dev = ap->host_set->dev;
3138 ap->prd = dma_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, GFP_KERNEL);
3142 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma);
3147 void ata_port_stop (struct ata_port *ap)
3149 struct device *dev = ap->host_set->dev;
3151 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
3155 * ata_host_remove - Unregister SCSI host structure with upper layers
3156 * @ap: Port to unregister
3157 * @do_unregister: 1 if we fully unregister, 0 to just stop the port
3162 static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister)
3164 struct Scsi_Host *sh = ap->host;
3169 scsi_remove_host(sh);
3171 ap->ops->port_stop(ap);
3175 * ata_host_init - Initialize an ata_port structure
3176 * @ap: Structure to initialize
3177 * @host: associated SCSI mid-layer structure
3178 * @host_set: Collection of hosts to which @ap belongs
3179 * @ent: Probe information provided by low-level driver
3180 * @port_no: Port number associated with this ata_port
3186 static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
3187 struct ata_host_set *host_set,
3188 struct ata_probe_ent *ent, unsigned int port_no)
3194 host->max_channel = 1;
3195 host->unique_id = ata_unique_id++;
3196 host->max_cmd_len = 12;
3197 scsi_set_device(host, ent->dev);
3198 scsi_assign_lock(host, &host_set->lock);
3200 ap->flags = ATA_FLAG_PORT_DISABLED;
3201 ap->id = host->unique_id;
3203 ap->ctl = ATA_DEVCTL_OBS;
3204 ap->host_set = host_set;
3205 ap->port_no = port_no;
3207 ent->legacy_mode ? ent->hard_port_no : port_no;
3208 ap->pio_mask = ent->pio_mask;
3209 ap->mwdma_mask = ent->mwdma_mask;
3210 ap->udma_mask = ent->udma_mask;
3211 ap->flags |= ent->host_flags;
3212 ap->ops = ent->port_ops;
3213 ap->cbl = ATA_CBL_NONE;
3214 ap->active_tag = ATA_TAG_POISON;
3215 ap->last_ctl = 0xFF;
3217 INIT_WORK(&ap->packet_task, atapi_packet_task, ap);
3218 INIT_WORK(&ap->pio_task, ata_pio_task, ap);
3220 for (i = 0; i < ATA_MAX_DEVICES; i++)
3221 ap->device[i].devno = i;
3224 ap->stats.unhandled_irq = 1;
3225 ap->stats.idle_irq = 1;
3228 memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
3232 * ata_host_add - Attach low-level ATA driver to system
3233 * @ent: Information provided by low-level driver
3234 * @host_set: Collections of ports to which we add
3235 * @port_no: Port number associated with this host
3243 static struct ata_port * ata_host_add(struct ata_probe_ent *ent,
3244 struct ata_host_set *host_set,
3245 unsigned int port_no)
3247 struct Scsi_Host *host;
3248 struct ata_port *ap;
3252 host = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
3256 ap = (struct ata_port *) &host->hostdata[0];
3258 ata_host_init(ap, host, host_set, ent, port_no);
3260 rc = ap->ops->port_start(ap);
3267 scsi_host_put(host);
3281 int ata_device_add(struct ata_probe_ent *ent)
3283 unsigned int count = 0, i;
3284 struct device *dev = ent->dev;
3285 struct ata_host_set *host_set;
3288 /* alloc a container for our list of ATA ports (buses) */
3289 host_set = kmalloc(sizeof(struct ata_host_set) +
3290 (ent->n_ports * sizeof(void *)), GFP_KERNEL);
3293 memset(host_set, 0, sizeof(struct ata_host_set) + (ent->n_ports * sizeof(void *)));
3294 spin_lock_init(&host_set->lock);
3296 host_set->dev = dev;
3297 host_set->n_ports = ent->n_ports;
3298 host_set->irq = ent->irq;
3299 host_set->mmio_base = ent->mmio_base;
3300 host_set->private_data = ent->private_data;
3301 host_set->ops = ent->port_ops;
3303 /* register each port bound to this device */
3304 for (i = 0; i < ent->n_ports; i++) {
3305 struct ata_port *ap;
3306 unsigned long xfer_mode_mask;
3308 ap = ata_host_add(ent, host_set, i);
3312 host_set->ports[i] = ap;
3313 xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
3314 (ap->mwdma_mask << ATA_SHIFT_MWDMA) |
3315 (ap->pio_mask << ATA_SHIFT_PIO);
3317 /* print per-port info to dmesg */
3318 printk(KERN_INFO "ata%u: %cATA max %s cmd 0x%lX ctl 0x%lX "
3319 "bmdma 0x%lX irq %lu\n",
3321 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
3322 ata_mode_string(xfer_mode_mask),
3323 ap->ioaddr.cmd_addr,
3324 ap->ioaddr.ctl_addr,
3325 ap->ioaddr.bmdma_addr,
3329 host_set->ops->irq_clear(ap);
3338 /* obtain irq, that is shared between channels */
3339 if (request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags,
3340 DRV_NAME, host_set))
3343 /* perform each probe synchronously */
3344 DPRINTK("probe begin\n");
3345 for (i = 0; i < count; i++) {
3346 struct ata_port *ap;
3349 ap = host_set->ports[i];
3351 DPRINTK("ata%u: probe begin\n", ap->id);
3352 rc = ata_bus_probe(ap);
3353 DPRINTK("ata%u: probe end\n", ap->id);
3356 /* FIXME: do something useful here?
3357 * Current libata behavior will
3358 * tear down everything when
3359 * the module is removed
3360 * or the h/w is unplugged.
3364 rc = scsi_add_host(ap->host, dev);
3366 printk(KERN_ERR "ata%u: scsi_add_host failed\n",
3368 /* FIXME: do something useful here */
3369 /* FIXME: handle unconditional calls to
3370 * scsi_scan_host and ata_host_remove, below,
3376 /* probes are done, now scan each port's disk(s) */
3377 DPRINTK("probe begin\n");
3378 for (i = 0; i < count; i++) {
3379 struct ata_port *ap = host_set->ports[i];
3381 scsi_scan_host(ap->host);
3384 dev_set_drvdata(dev, host_set);
3386 VPRINTK("EXIT, returning %u\n", ent->n_ports);
3387 return ent->n_ports; /* success */
3390 for (i = 0; i < count; i++) {
3391 ata_host_remove(host_set->ports[i], 1);
3392 scsi_host_put(host_set->ports[i]->host);
3395 VPRINTK("EXIT, returning 0\n");
3400 * ata_scsi_release - SCSI layer callback hook for host unload
3401 * @host: libata host to be unloaded
3403 * Performs all duties necessary to shut down a libata port...
3404 * Kill port kthread, disable port, and release resources.
3407 * Inherited from SCSI layer.
3413 int ata_scsi_release(struct Scsi_Host *host)
3415 struct ata_port *ap = (struct ata_port *) &host->hostdata[0];
3419 ap->ops->port_disable(ap);
3420 ata_host_remove(ap, 0);
3427 * ata_std_ports - initialize ioaddr with standard port offsets.
3428 * @ioaddr: IO address structure to be initialized
3430 void ata_std_ports(struct ata_ioports *ioaddr)
3432 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
3433 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
3434 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
3435 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
3436 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
3437 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
3438 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
3439 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
3440 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
3441 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
3444 static struct ata_probe_ent *
3445 ata_probe_ent_alloc(int n, struct device *dev, struct ata_port_info **port)
3447 struct ata_probe_ent *probe_ent;
3450 probe_ent = kmalloc(sizeof(*probe_ent) * n, GFP_KERNEL);
3452 printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
3453 kobject_name(&(dev->kobj)));
3457 memset(probe_ent, 0, sizeof(*probe_ent) * n);
3459 for (i = 0; i < n; i++) {
3460 INIT_LIST_HEAD(&probe_ent[i].node);
3461 probe_ent[i].dev = dev;
3463 probe_ent[i].sht = port[i]->sht;
3464 probe_ent[i].host_flags = port[i]->host_flags;
3465 probe_ent[i].pio_mask = port[i]->pio_mask;
3466 probe_ent[i].mwdma_mask = port[i]->mwdma_mask;
3467 probe_ent[i].udma_mask = port[i]->udma_mask;
3468 probe_ent[i].port_ops = port[i]->port_ops;
3476 struct ata_probe_ent *
3477 ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port)
3479 struct ata_probe_ent *probe_ent =
3480 ata_probe_ent_alloc(1, pci_dev_to_dev(pdev), port);
3484 probe_ent->n_ports = 2;
3485 probe_ent->irq = pdev->irq;
3486 probe_ent->irq_flags = SA_SHIRQ;
3488 probe_ent->port[0].cmd_addr = pci_resource_start(pdev, 0);
3489 probe_ent->port[0].altstatus_addr =
3490 probe_ent->port[0].ctl_addr =
3491 pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS;
3492 probe_ent->port[0].bmdma_addr = pci_resource_start(pdev, 4);
3494 probe_ent->port[1].cmd_addr = pci_resource_start(pdev, 2);
3495 probe_ent->port[1].altstatus_addr =
3496 probe_ent->port[1].ctl_addr =
3497 pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS;
3498 probe_ent->port[1].bmdma_addr = pci_resource_start(pdev, 4) + 8;
3500 ata_std_ports(&probe_ent->port[0]);
3501 ata_std_ports(&probe_ent->port[1]);
3506 struct ata_probe_ent *
3507 ata_pci_init_legacy_mode(struct pci_dev *pdev, struct ata_port_info **port)
3509 struct ata_probe_ent *probe_ent =
3510 ata_probe_ent_alloc(2, pci_dev_to_dev(pdev), port);
3514 probe_ent[0].n_ports = 1;
3515 probe_ent[0].irq = 14;
3517 probe_ent[0].hard_port_no = 0;
3518 probe_ent[0].legacy_mode = 1;
3520 probe_ent[1].n_ports = 1;
3521 probe_ent[1].irq = 15;
3523 probe_ent[1].hard_port_no = 1;
3524 probe_ent[1].legacy_mode = 1;
3526 probe_ent[0].port[0].cmd_addr = 0x1f0;
3527 probe_ent[0].port[0].altstatus_addr =
3528 probe_ent[0].port[0].ctl_addr = 0x3f6;
3529 probe_ent[0].port[0].bmdma_addr = pci_resource_start(pdev, 4);
3531 probe_ent[1].port[0].cmd_addr = 0x170;
3532 probe_ent[1].port[0].altstatus_addr =
3533 probe_ent[1].port[0].ctl_addr = 0x376;
3534 probe_ent[1].port[0].bmdma_addr = pci_resource_start(pdev, 4)+8;
3536 ata_std_ports(&probe_ent[0].port[0]);
3537 ata_std_ports(&probe_ent[1].port[0]);
3543 * ata_pci_init_one - Initialize/register PCI IDE host controller
3544 * @pdev: Controller to be initialized
3545 * @port_info: Information from low-level host driver
3546 * @n_ports: Number of ports attached to host controller
3549 * Inherited from PCI layer (may sleep).
3555 int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
3556 unsigned int n_ports)
3558 struct ata_probe_ent *probe_ent, *probe_ent2 = NULL;
3559 struct ata_port_info *port[2];
3561 unsigned int legacy_mode = 0;
3566 port[0] = port_info[0];
3568 port[1] = port_info[1];
3572 if ((port[0]->host_flags & ATA_FLAG_NO_LEGACY) == 0) {
3573 /* TODO: support transitioning to native mode? */
3574 pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
3575 mask = (1 << 2) | (1 << 0);
3576 if ((tmp8 & mask) != mask)
3577 legacy_mode = (1 << 3);
3581 if ((!legacy_mode) && (n_ports > 1)) {
3582 printk(KERN_ERR "ata: BUG: native mode, n_ports > 1\n");
3586 rc = pci_enable_device(pdev);
3590 rc = pci_request_regions(pdev, DRV_NAME);
3595 if (!request_region(0x1f0, 8, "libata")) {
3596 struct resource *conflict, res;
3598 res.end = 0x1f0 + 8 - 1;
3599 conflict = ____request_resource(&ioport_resource, &res);
3600 if (!strcmp(conflict->name, "libata"))
3601 legacy_mode |= (1 << 0);
3603 printk(KERN_WARNING "ata: 0x1f0 IDE port busy\n");
3605 legacy_mode |= (1 << 0);
3607 if (!request_region(0x170, 8, "libata")) {
3608 struct resource *conflict, res;
3610 res.end = 0x170 + 8 - 1;
3611 conflict = ____request_resource(&ioport_resource, &res);
3612 if (!strcmp(conflict->name, "libata"))
3613 legacy_mode |= (1 << 1);
3615 printk(KERN_WARNING "ata: 0x170 IDE port busy\n");
3617 legacy_mode |= (1 << 1);
3620 /* we have legacy mode, but all ports are unavailable */
3621 if (legacy_mode == (1 << 3)) {
3623 goto err_out_regions;
3626 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
3628 goto err_out_regions;
3629 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
3631 goto err_out_regions;
3634 probe_ent = ata_pci_init_legacy_mode(pdev, port);
3636 probe_ent2 = &probe_ent[1];
3638 probe_ent = ata_pci_init_native_mode(pdev, port);
3641 goto err_out_regions;
3644 pci_set_master(pdev);
3646 /* FIXME: check ata_device_add return */
3648 if (legacy_mode & (1 << 0))
3649 ata_device_add(probe_ent);
3650 if (legacy_mode & (1 << 1))
3651 ata_device_add(probe_ent2);
3653 ata_device_add(probe_ent);
3660 if (legacy_mode & (1 << 0))
3661 release_region(0x1f0, 8);
3662 if (legacy_mode & (1 << 1))
3663 release_region(0x170, 8);
3664 pci_release_regions(pdev);
3666 pci_disable_device(pdev);
3671 * ata_pci_remove_one - PCI layer callback for device removal
3672 * @pdev: PCI device that was removed
3674 * PCI layer indicates to libata via this hook that
3675 * hot-unplug or module unload event has occured.
3676 * Handle this by unregistering all objects associated
3677 * with this PCI device. Free those objects. Then finally
3678 * release PCI resources and disable device.
3681 * Inherited from PCI layer (may sleep).
3684 void ata_pci_remove_one (struct pci_dev *pdev)
3686 struct device *dev = pci_dev_to_dev(pdev);
3687 struct ata_host_set *host_set = dev_get_drvdata(dev);
3688 struct ata_port *ap;
3691 for (i = 0; i < host_set->n_ports; i++) {
3692 ap = host_set->ports[i];
3694 scsi_remove_host(ap->host);
3697 free_irq(host_set->irq, host_set);
3698 if (host_set->ops->host_stop)
3699 host_set->ops->host_stop(host_set);
3700 if (host_set->mmio_base)
3701 iounmap(host_set->mmio_base);
3703 for (i = 0; i < host_set->n_ports; i++) {
3704 ap = host_set->ports[i];
3706 ata_scsi_release(ap->host);
3707 scsi_host_put(ap->host);
3710 pci_release_regions(pdev);
3712 for (i = 0; i < host_set->n_ports; i++) {
3713 struct ata_ioports *ioaddr;
3715 ap = host_set->ports[i];
3716 ioaddr = &ap->ioaddr;
3718 if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) {
3719 if (ioaddr->cmd_addr == 0x1f0)
3720 release_region(0x1f0, 8);
3721 else if (ioaddr->cmd_addr == 0x170)
3722 release_region(0x170, 8);
3727 pci_disable_device(pdev);
3728 dev_set_drvdata(dev, NULL);
3731 /* move to PCI subsystem */
3732 int pci_test_config_bits(struct pci_dev *pdev, struct pci_bits *bits)
3734 unsigned long tmp = 0;
3736 switch (bits->width) {
3739 pci_read_config_byte(pdev, bits->reg, &tmp8);
3745 pci_read_config_word(pdev, bits->reg, &tmp16);
3751 pci_read_config_dword(pdev, bits->reg, &tmp32);
3762 return (tmp == bits->val) ? 1 : 0;
3764 #endif /* CONFIG_PCI */
3776 static int __init ata_init(void)
3778 ata_wq = create_workqueue("ata");
3782 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
3786 static void __exit ata_exit(void)
3788 destroy_workqueue(ata_wq);
3791 module_init(ata_init);
3792 module_exit(ata_exit);
3795 * libata is essentially a library of internal helper functions for
3796 * low-level ATA host controller drivers. As such, the API/ABI is
3797 * likely to change as new drivers are added and updated.
3798 * Do not depend on ABI/API stability.
3801 EXPORT_SYMBOL_GPL(ata_std_bios_param);
3802 EXPORT_SYMBOL_GPL(ata_std_ports);
3803 EXPORT_SYMBOL_GPL(ata_device_add);
3804 EXPORT_SYMBOL_GPL(ata_sg_init);
3805 EXPORT_SYMBOL_GPL(ata_sg_init_one);
3806 EXPORT_SYMBOL_GPL(ata_qc_complete);
3807 EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
3808 EXPORT_SYMBOL_GPL(ata_eng_timeout);
3809 EXPORT_SYMBOL_GPL(ata_tf_load);
3810 EXPORT_SYMBOL_GPL(ata_tf_read);
3811 EXPORT_SYMBOL_GPL(ata_noop_dev_select);
3812 EXPORT_SYMBOL_GPL(ata_std_dev_select);
3813 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
3814 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
3815 EXPORT_SYMBOL_GPL(ata_check_status);
3816 EXPORT_SYMBOL_GPL(ata_exec_command);
3817 EXPORT_SYMBOL_GPL(ata_port_start);
3818 EXPORT_SYMBOL_GPL(ata_port_stop);
3819 EXPORT_SYMBOL_GPL(ata_interrupt);
3820 EXPORT_SYMBOL_GPL(ata_qc_prep);
3821 EXPORT_SYMBOL_GPL(ata_bmdma_setup);
3822 EXPORT_SYMBOL_GPL(ata_bmdma_start);
3823 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
3824 EXPORT_SYMBOL_GPL(ata_port_probe);
3825 EXPORT_SYMBOL_GPL(sata_phy_reset);
3826 EXPORT_SYMBOL_GPL(__sata_phy_reset);
3827 EXPORT_SYMBOL_GPL(ata_bus_reset);
3828 EXPORT_SYMBOL_GPL(ata_port_disable);
3829 EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
3830 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
3831 EXPORT_SYMBOL_GPL(ata_scsi_error);
3832 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
3833 EXPORT_SYMBOL_GPL(ata_scsi_release);
3834 EXPORT_SYMBOL_GPL(ata_host_intr);
3835 EXPORT_SYMBOL_GPL(ata_dev_classify);
3836 EXPORT_SYMBOL_GPL(ata_dev_id_string);
3837 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
3840 EXPORT_SYMBOL_GPL(pci_test_config_bits);
3841 EXPORT_SYMBOL_GPL(ata_pci_init_legacy_mode);
3842 EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
3843 EXPORT_SYMBOL_GPL(ata_pci_init_one);
3844 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
3845 #endif /* CONFIG_PCI */