#include <linux/pci.h>
#include <linux/init.h>
#include <linux/list.h>
+#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/spinlock.h>
#include <linux/blkdev.h>
#include <linux/workqueue.h>
#include <scsi/scsi.h>
#include "scsi.h"
+#include "scsi_priv.h"
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#include <asm/io.h>
u8 *xfer_mode_out,
unsigned int *xfer_shift_out);
static int ata_qc_complete_noop(struct ata_queued_cmd *qc, u8 drv_stat);
+static void __ata_qc_complete(struct ata_queued_cmd *qc);
static unsigned int ata_unique_id = 1;
static struct workqueue_struct *ata_wq;
MODULE_AUTHOR("Jeff Garzik");
MODULE_DESCRIPTION("Library module for ATA devices");
MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
/**
* ata_tf_load - send taskfile registers to host controller
}
/**
- * ata_check_status - Read device status reg & clear interrupt
+ * ata_check_status_pio - Read device status reg & clear interrupt
* @ap: port where the device is
*
* Reads ATA taskfile status register for currently-selected device
return ata_check_status_pio(ap);
}
+u8 ata_altstatus(struct ata_port *ap)
+{
+ if (ap->ops->check_altstatus)
+ return ap->ops->check_altstatus(ap);
+
+ if (ap->flags & ATA_FLAG_MMIO)
+ return readb((void __iomem *)ap->ioaddr.altstatus_addr);
+ return inb(ap->ioaddr.altstatus_addr);
+}
+
+u8 ata_chk_err(struct ata_port *ap)
+{
+ if (ap->ops->check_err)
+ return ap->ops->check_err(ap);
+
+ if (ap->flags & ATA_FLAG_MMIO) {
+ return readb((void __iomem *) ap->ioaddr.error_addr);
+ }
+ return inb(ap->ioaddr.error_addr);
+}
+
/**
* ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
* @tf: Taskfile to convert
/**
* ata_udma_string - convert UDMA bit offset to string
- * @udma_mask: mask of bits supported; only highest bit counts.
+ * @mask: mask of bits supported; only highest bit counts.
*
* Determine string which represents the highest speed
* (highest bit in @udma_mask).
/**
* ata_dev_id_string - Convert IDENTIFY DEVICE page into string
- * @dev: Device whose IDENTIFY DEVICE results we will examine
+ * @id: IDENTIFY DEVICE results we will examine
* @s: string into which data is output
* @ofs: offset into identify device page
* @len: length of string to return. must be an even number.
* caller.
*/
-void ata_dev_id_string(struct ata_device *dev, unsigned char *s,
+void ata_dev_id_string(u16 *id, unsigned char *s,
unsigned int ofs, unsigned int len)
{
unsigned int c;
while (len > 0) {
- c = dev->id[ofs] >> 8;
+ c = id[ofs] >> 8;
*s = c;
s++;
- c = dev->id[ofs] & 0xff;
+ c = id[ofs] & 0xff;
*s = c;
s++;
BUG_ON(qc == NULL);
ata_sg_init_one(qc, dev->id, sizeof(dev->id));
- qc->pci_dma_dir = PCI_DMA_FROMDEVICE;
+ qc->dma_dir = DMA_FROM_DEVICE;
qc->tf.protocol = ATA_PROT_PIO;
qc->nsect = 1;
*/
/* we require LBA and DMA support (bits 8 & 9 of word 49) */
- if (!ata_id_has_dma(dev) || !ata_id_has_lba(dev)) {
+ if (!ata_id_has_dma(dev->id) || !ata_id_has_lba(dev->id)) {
printk(KERN_DEBUG "ata%u: no dma/lba\n", ap->id);
goto err_out_nosup;
}
/* ATA-specific feature tests */
if (dev->class == ATA_DEV_ATA) {
- if (!ata_id_is_ata(dev)) /* sanity check */
+ if (!ata_id_is_ata(dev->id)) /* sanity check */
goto err_out_nosup;
tmp = dev->id[ATA_ID_MAJOR_VER];
goto err_out_nosup;
}
- if (ata_id_has_lba48(dev)) {
+ if (ata_id_has_lba48(dev->id)) {
dev->flags |= ATA_DFLAG_LBA48;
- dev->n_sectors = ata_id_u64(dev, 100);
+ dev->n_sectors = ata_id_u64(dev->id, 100);
} else {
- dev->n_sectors = ata_id_u32(dev, 60);
+ dev->n_sectors = ata_id_u32(dev->id, 60);
}
ap->host->max_cmd_len = 16;
/* ATAPI-specific feature tests */
else {
- if (ata_id_is_ata(dev)) /* sanity check */
+ if (ata_id_is_ata(dev->id)) /* sanity check */
goto err_out_nosup;
rc = atapi_cdb_len(dev->id);
printk(KERN_WARNING "ata%u: dev %u not supported, ignoring\n",
ap->id, device);
err_out:
- ata_irq_on(ap); /* re-enable interrupts */
dev->class++; /* converts ATA_DEV_xxx into ATA_DEV_xxx_UNSUP */
DPRINTK("EXIT, err\n");
}
ata_dev_try_classify(ap, 1);
/* re-enable interrupts */
- ata_irq_on(ap);
+ if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
+ ata_irq_on(ap);
/* is double-select really necessary? */
if (ap->device[1].class != ATA_DEV_NONE)
DPRINTK("EXIT\n");
}
+static void ata_pr_blacklisted(struct ata_port *ap, struct ata_device *dev)
+{
+ printk(KERN_WARNING "ata%u: dev %u is on DMA blacklist, disabling DMA\n",
+ ap->id, dev->devno);
+}
+
+static const char * ata_dma_blacklist [] = {
+ "WDC AC11000H",
+ "WDC AC22100H",
+ "WDC AC32500H",
+ "WDC AC33100H",
+ "WDC AC31600H",
+ "WDC AC32100H",
+ "WDC AC23200L",
+ "Compaq CRD-8241B",
+ "CRD-8400B",
+ "CRD-8480B",
+ "CRD-8482B",
+ "CRD-84",
+ "SanDisk SDP3B",
+ "SanDisk SDP3B-64",
+ "SANYO CD-ROM CRD",
+ "HITACHI CDR-8",
+ "HITACHI CDR-8335",
+ "HITACHI CDR-8435",
+ "Toshiba CD-ROM XM-6202B",
+ "CD-532E-A",
+ "E-IDE CD-ROM CR-840",
+ "CD-ROM Drive/F5A",
+ "WPI CDD-820",
+ "SAMSUNG CD-ROM SC-148C",
+ "SAMSUNG CD-ROM SC",
+ "SanDisk SDP3B-64",
+ "SAMSUNG CD-ROM SN-124",
+ "ATAPI CD-ROM DRIVE 40X MAXIMUM",
+ "_NEC DV5800A",
+};
+
+static int ata_dma_blacklisted(struct ata_port *ap, struct ata_device *dev)
+{
+ unsigned char model_num[40];
+ char *s;
+ unsigned int len;
+ int i;
+
+ ata_dev_id_string(dev->id, model_num, ATA_ID_PROD_OFS,
+ sizeof(model_num));
+ s = &model_num[0];
+ len = strnlen(s, sizeof(model_num));
+
+ /* ATAPI specifies that empty space is blank-filled; remove blanks */
+ while ((len > 0) && (s[len - 1] == ' ')) {
+ len--;
+ s[len] = 0;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ata_dma_blacklist); i++)
+ if (!strncmp(ata_dma_blacklist[i], s, len))
+ return 1;
+
+ return 0;
+}
+
static unsigned int ata_get_mode_mask(struct ata_port *ap, int shift)
{
struct ata_device *master, *slave;
if (shift == ATA_SHIFT_UDMA) {
mask = ap->udma_mask;
- if (ata_dev_present(master))
+ if (ata_dev_present(master)) {
mask &= (master->id[ATA_ID_UDMA_MODES] & 0xff);
- if (ata_dev_present(slave))
+ if (ata_dma_blacklisted(ap, master)) {
+ mask = 0;
+ ata_pr_blacklisted(ap, master);
+ }
+ }
+ if (ata_dev_present(slave)) {
mask &= (slave->id[ATA_ID_UDMA_MODES] & 0xff);
+ if (ata_dma_blacklisted(ap, slave)) {
+ mask = 0;
+ ata_pr_blacklisted(ap, slave);
+ }
+ }
}
else if (shift == ATA_SHIFT_MWDMA) {
mask = ap->mwdma_mask;
- if (ata_dev_present(master))
+ if (ata_dev_present(master)) {
mask &= (master->id[ATA_ID_MWDMA_MODES] & 0x07);
- if (ata_dev_present(slave))
+ if (ata_dma_blacklisted(ap, master)) {
+ mask = 0;
+ ata_pr_blacklisted(ap, master);
+ }
+ }
+ if (ata_dev_present(slave)) {
mask &= (slave->id[ATA_ID_MWDMA_MODES] & 0x07);
+ if (ata_dma_blacklisted(ap, slave)) {
+ mask = 0;
+ ata_pr_blacklisted(ap, slave);
+ }
+ }
}
else if (shift == ATA_SHIFT_PIO) {
mask = ap->pio_mask;
}
/**
- * ata_choose_xfer_mode -
- * @ap:
+ * ata_choose_xfer_mode - attempt to find best transfer mode
+ * @ap: Port for which an xfer mode will be selected
+ * @xfer_mode_out: (output) SET FEATURES - XFER MODE code
+ * @xfer_shift_out: (output) bit shift that selects this mode
*
* LOCKING:
*
{
struct ata_port *ap = qc->ap;
struct scatterlist *sg = qc->sg;
- int dir = qc->pci_dma_dir;
+ int dir = qc->dma_dir;
assert(qc->flags & ATA_QCFLAG_DMAMAP);
assert(sg != NULL);
DPRINTK("unmapping %u sg elements\n", qc->n_elem);
if (qc->flags & ATA_QCFLAG_SG)
- pci_unmap_sg(ap->host_set->pdev, sg, qc->n_elem, dir);
+ dma_unmap_sg(ap->host_set->dev, sg, qc->n_elem, dir);
else
- pci_unmap_single(ap->host_set->pdev, sg_dma_address(&sg[0]),
+ dma_unmap_single(ap->host_set->dev, sg_dma_address(&sg[0]),
sg_dma_len(&sg[0]), dir);
qc->flags &= ~ATA_QCFLAG_DMAMAP;
if (idx)
ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
}
+/**
+ * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
+ * @qc: Metadata associated with taskfile to check
+ *
+ * LOCKING:
+ * RETURNS: 0 when ATAPI DMA can be used
+ * nonzero otherwise
+ */
+int ata_check_atapi_dma(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ int rc = 0; /* Assume ATAPI DMA is OK by default */
+ if (ap->ops->check_atapi_dma)
+ rc = ap->ops->check_atapi_dma(qc);
+
+ return rc;
+}
/**
* ata_qc_prep - Prepare taskfile for submission
* @qc: Metadata associated with taskfile to be prepared
sg->page = virt_to_page(buf);
sg->offset = (unsigned long) buf & ~PAGE_MASK;
sg_dma_len(sg) = buflen;
-
- WARN_ON(buflen > PAGE_SIZE);
}
void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
static int ata_sg_setup_one(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
- int dir = qc->pci_dma_dir;
+ int dir = qc->dma_dir;
struct scatterlist *sg = qc->sg;
dma_addr_t dma_address;
- dma_address = pci_map_single(ap->host_set->pdev, qc->buf_virt,
+ dma_address = dma_map_single(ap->host_set->dev, qc->buf_virt,
sg_dma_len(sg), dir);
- if (pci_dma_mapping_error(dma_address))
+ if (dma_mapping_error(dma_address))
return -1;
sg_dma_address(sg) = dma_address;
VPRINTK("ENTER, ata%u\n", ap->id);
assert(qc->flags & ATA_QCFLAG_SG);
- dir = qc->pci_dma_dir;
- n_elem = pci_map_sg(ap->host_set->pdev, sg, qc->n_elem, dir);
+ dir = qc->dma_dir;
+ n_elem = dma_map_sg(ap->host_set->dev, sg, qc->n_elem, dir);
if (n_elem < 1)
return -1;
struct scatterlist *sg = qc->sg;
struct ata_port *ap = qc->ap;
struct page *page;
+ unsigned int offset;
unsigned char *buf;
if (qc->cursect == (qc->nsect - 1))
ap->pio_task_state = PIO_ST_LAST;
page = sg[qc->cursg].page;
- buf = kmap(page) +
- sg[qc->cursg].offset + (qc->cursg_ofs * ATA_SECT_SIZE);
+ offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE;
+
+ /* get the current page and offset */
+ page = nth_page(page, (offset >> PAGE_SHIFT));
+ offset %= PAGE_SIZE;
+
+ buf = kmap(page) + offset;
qc->cursect++;
qc->cursg_ofs++;
kunmap(page);
}
-static void atapi_pio_sector(struct ata_queued_cmd *qc)
+static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
+{
+ int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
+ struct scatterlist *sg = qc->sg;
+ struct ata_port *ap = qc->ap;
+ struct page *page;
+ unsigned char *buf;
+ unsigned int offset, count;
+
+ if (qc->curbytes == qc->nbytes - bytes)
+ ap->pio_task_state = PIO_ST_LAST;
+
+next_sg:
+ sg = &qc->sg[qc->cursg];
+
+next_page:
+ page = sg->page;
+ offset = sg->offset + qc->cursg_ofs;
+
+ /* get the current page and offset */
+ page = nth_page(page, (offset >> PAGE_SHIFT));
+ offset %= PAGE_SIZE;
+
+ count = min(sg_dma_len(sg) - qc->cursg_ofs, bytes);
+
+ /* don't cross page boundaries */
+ count = min(count, (unsigned int)PAGE_SIZE - offset);
+
+ buf = kmap(page) + offset;
+
+ bytes -= count;
+ qc->curbytes += count;
+ qc->cursg_ofs += count;
+
+ if (qc->cursg_ofs == sg_dma_len(sg)) {
+ qc->cursg++;
+ qc->cursg_ofs = 0;
+ }
+
+ DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
+
+ /* do the actual data transfer */
+ ata_data_xfer(ap, buf, count, do_write);
+
+ kunmap(page);
+
+ if (bytes) {
+ if (qc->cursg_ofs < sg_dma_len(sg))
+ goto next_page;
+ goto next_sg;
+ }
+}
+
+static void atapi_pio_bytes(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct ata_device *dev = qc->dev;
- unsigned int i, ireason, bc_lo, bc_hi, bytes;
+ unsigned int ireason, bc_lo, bc_hi, bytes;
int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
ap->ops->tf_read(ap, &qc->tf);
if (do_write != i_write)
goto err_out;
- /* make sure byte count is multiple of sector size; not
- * required by standard (warning! warning!), but IDE driver
- * does this to simplify things a bit. We are lazy, and
- * follow suit.
- */
- if (bytes & (ATA_SECT_SIZE - 1))
- goto err_out;
-
- for (i = 0; i < (bytes >> 9); i++)
- ata_pio_sector(qc);
+ __atapi_pio_bytes(qc, bytes);
return;
}
}
- /* handle BSY=0, DRQ=0 as error */
- if ((status & ATA_DRQ) == 0) {
- ap->pio_task_state = PIO_ST_ERR;
- return;
- }
-
qc = ata_qc_from_tag(ap, ap->active_tag);
assert(qc != NULL);
- if (is_atapi_taskfile(&qc->tf))
- atapi_pio_sector(qc);
- else
+ if (is_atapi_taskfile(&qc->tf)) {
+ /* no more data to transfer or unsupported ATAPI command */
+ if ((status & ATA_DRQ) == 0) {
+ ap->pio_task_state = PIO_ST_IDLE;
+
+ ata_irq_on(ap);
+
+ ata_qc_complete(qc, status);
+ return;
+ }
+
+ atapi_pio_bytes(qc);
+ } else {
+ /* handle BSY=0, DRQ=0 as error */
+ if ((status & ATA_DRQ) == 0) {
+ ap->pio_task_state = PIO_ST_ERR;
+ return;
+ }
+
ata_pio_sector(qc);
+ }
}
static void ata_pio_error(struct ata_port *ap)
unsigned long timeout = 0;
switch (ap->pio_task_state) {
+ case PIO_ST_IDLE:
+ return;
+
case PIO_ST:
ata_pio_block(ap);
break;
case PIO_ST_TMOUT:
case PIO_ST_ERR:
ata_pio_error(ap);
- break;
+ return;
}
- if ((ap->pio_task_state != PIO_ST_IDLE) &&
- (ap->pio_task_state != PIO_ST_TMOUT) &&
- (ap->pio_task_state != PIO_ST_ERR)) {
- if (timeout)
- queue_delayed_work(ata_wq, &ap->pio_task,
- timeout);
- else
- queue_work(ata_wq, &ap->pio_task);
- }
+ if (timeout)
+ queue_delayed_work(ata_wq, &ap->pio_task,
+ timeout);
+ else
+ queue_work(ata_wq, &ap->pio_task);
+}
+
+static void atapi_request_sense(struct ata_port *ap, struct ata_device *dev,
+ struct scsi_cmnd *cmd)
+{
+ DECLARE_COMPLETION(wait);
+ struct ata_queued_cmd *qc;
+ unsigned long flags;
+ int rc;
+
+ DPRINTK("ATAPI request sense\n");
+
+ qc = ata_qc_new_init(ap, dev);
+ BUG_ON(qc == NULL);
+
+ /* FIXME: is this needed? */
+ memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
+
+ ata_sg_init_one(qc, cmd->sense_buffer, sizeof(cmd->sense_buffer));
+ qc->dma_dir = DMA_FROM_DEVICE;
+
+ memset(&qc->cdb, 0, sizeof(ap->cdb_len));
+ qc->cdb[0] = REQUEST_SENSE;
+ qc->cdb[4] = SCSI_SENSE_BUFFERSIZE;
+
+ qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
+ qc->tf.command = ATA_CMD_PACKET;
+
+ qc->tf.protocol = ATA_PROT_ATAPI;
+ qc->tf.lbam = (8 * 1024) & 0xff;
+ qc->tf.lbah = (8 * 1024) >> 8;
+ qc->nbytes = SCSI_SENSE_BUFFERSIZE;
+
+ qc->waiting = &wait;
+ qc->complete_fn = ata_qc_complete_noop;
+
+ spin_lock_irqsave(&ap->host_set->lock, flags);
+ rc = ata_qc_issue(qc);
+ spin_unlock_irqrestore(&ap->host_set->lock, flags);
+
+ if (rc)
+ ata_port_disable(ap);
+ else
+ wait_for_completion(&wait);
+
+ DPRINTK("EXIT\n");
}
/**
static void ata_qc_timeout(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
+ struct ata_device *dev = qc->dev;
u8 host_stat = 0, drv_stat;
DPRINTK("ENTER\n");
+ /* FIXME: doesn't this conflict with timeout handling? */
+ if (qc->dev->class == ATA_DEV_ATAPI && qc->scsicmd) {
+ struct scsi_cmnd *cmd = qc->scsicmd;
+
+ if (!scsi_eh_eflags_chk(cmd, SCSI_EH_CANCEL_CMD)) {
+
+ /* finish completing original command */
+ __ata_qc_complete(qc);
+
+ atapi_request_sense(ap, dev, cmd);
+
+ cmd->result = (CHECK_CONDITION << 1) | (DID_OK << 16);
+ scsi_finish_command(cmd);
+
+ goto out;
+ }
+ }
+
/* hack alert! We cannot use the supplied completion
* function from inside the ->eh_strategy_handler() thread.
* libata is the only user of ->eh_strategy_handler() in
case ATA_PROT_DMA:
case ATA_PROT_ATAPI_DMA:
- host_stat = ata_bmdma_status(ap);
+ host_stat = ap->ops->bmdma_status(ap);
/* before we do anything else, clear DMA-Start bit */
- ata_bmdma_stop(ap);
+ ap->ops->bmdma_stop(ap);
/* fall through */
drv_stat = ata_chk_status(ap);
/* ack bmdma irq events */
- ata_bmdma_ack_irq(ap);
+ ap->ops->irq_clear(ap);
printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x host_stat 0x%x\n",
ap->id, qc->tf.command, drv_stat, host_stat);
ata_qc_complete(qc, drv_stat);
break;
}
-
+out:
DPRINTK("EXIT\n");
}
qc->dev = dev;
qc->cursect = qc->cursg = qc->cursg_ofs = 0;
qc->nsect = 0;
+ qc->nbytes = qc->curbytes = 0;
ata_tf_init(ap, &qc->tf, dev->devno);
return 0;
}
+static void __ata_qc_complete(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ unsigned int tag, do_clear = 0;
+
+ qc->flags = 0;
+ tag = qc->tag;
+ if (likely(ata_tag_valid(tag))) {
+ if (tag == ap->active_tag)
+ ap->active_tag = ATA_TAG_POISON;
+ qc->tag = ATA_TAG_POISON;
+ do_clear = 1;
+ }
+
+ if (qc->waiting) {
+ struct completion *waiting = qc->waiting;
+ qc->waiting = NULL;
+ complete(waiting);
+ }
+
+ if (likely(do_clear))
+ clear_bit(tag, &ap->qactive);
+}
+
+/**
+ * ata_qc_free - free unused ata_queued_cmd
+ * @qc: Command to complete
+ *
+ * Designed to free unused ata_queued_cmd object
+ * in case something prevents using it.
+ *
+ * LOCKING:
+ *
+ */
+void ata_qc_free(struct ata_queued_cmd *qc)
+{
+ assert(qc != NULL); /* ata_qc_from_tag _might_ return NULL */
+ assert(qc->waiting == NULL); /* nothing should be waiting */
+
+ __ata_qc_complete(qc);
+}
+
/**
* ata_qc_complete - Complete an active ATA command
* @qc: Command to complete
void ata_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat)
{
- struct ata_port *ap = qc->ap;
- unsigned int tag, do_clear = 0;
int rc;
assert(qc != NULL); /* ata_qc_from_tag _might_ return NULL */
if (rc != 0)
return;
- qc->flags = 0;
- tag = qc->tag;
- if (likely(ata_tag_valid(tag))) {
- if (tag == ap->active_tag)
- ap->active_tag = ATA_TAG_POISON;
- qc->tag = ATA_TAG_POISON;
- do_clear = 1;
- }
+ __ata_qc_complete(qc);
- if (qc->waiting) {
- struct completion *waiting = qc->waiting;
- qc->waiting = NULL;
- complete(waiting);
- }
+ VPRINTK("EXIT\n");
+}
- if (likely(do_clear))
- clear_bit(tag, &ap->qactive);
+static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
- VPRINTK("EXIT\n");
+ switch (qc->tf.protocol) {
+ case ATA_PROT_DMA:
+ case ATA_PROT_ATAPI_DMA:
+ return 1;
+
+ case ATA_PROT_ATAPI:
+ case ATA_PROT_PIO:
+ case ATA_PROT_PIO_MULT:
+ if (ap->flags & ATA_FLAG_PIO_DMA)
+ return 1;
+
+ /* fall through */
+
+ default:
+ return 0;
+ }
+
+ /* never reached */
}
/**
{
struct ata_port *ap = qc->ap;
- if (qc->flags & ATA_QCFLAG_SG) {
- if (ata_sg_setup(qc))
- goto err_out;
- } else if (qc->flags & ATA_QCFLAG_SINGLE) {
- if (ata_sg_setup_one(qc))
- goto err_out;
+ if (ata_should_dma_map(qc)) {
+ if (qc->flags & ATA_QCFLAG_SG) {
+ if (ata_sg_setup(qc))
+ goto err_out;
+ } else if (qc->flags & ATA_QCFLAG_SINGLE) {
+ if (ata_sg_setup_one(qc))
+ goto err_out;
+ }
+ } else {
+ qc->flags &= ~ATA_QCFLAG_DMAMAP;
}
ap->ops->qc_prep(qc);
void ata_bmdma_irq_clear(struct ata_port *ap)
{
- ata_bmdma_ack_irq(ap);
+ if (ap->flags & ATA_FLAG_MMIO) {
+ void __iomem *mmio = ((void __iomem *) ap->ioaddr.bmdma_addr) + ATA_DMA_STATUS;
+ writeb(readb(mmio), mmio);
+ } else {
+ unsigned long addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS;
+ outb(inb(addr), addr);
+ }
+
+}
+
+u8 ata_bmdma_status(struct ata_port *ap)
+{
+ u8 host_stat;
+ if (ap->flags & ATA_FLAG_MMIO) {
+ void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
+ host_stat = readb(mmio + ATA_DMA_STATUS);
+ } else
+ host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
+ return host_stat;
+}
+
+void ata_bmdma_stop(struct ata_port *ap)
+{
+ if (ap->flags & ATA_FLAG_MMIO) {
+ void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
+
+ /* clear start/stop bit */
+ writeb(readb(mmio + ATA_DMA_CMD) & ~ATA_DMA_START,
+ mmio + ATA_DMA_CMD);
+ } else {
+ /* clear start/stop bit */
+ outb(inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START,
+ ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
+ }
+
+ /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
+ ata_altstatus(ap); /* dummy read */
}
/**
case ATA_PROT_ATAPI_DMA:
case ATA_PROT_ATAPI:
/* check status of DMA engine */
- host_stat = ata_bmdma_status(ap);
+ host_stat = ap->ops->bmdma_status(ap);
VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
/* if it's not our irq... */
goto idle_irq;
/* before we do anything else, clear DMA-Start bit */
- ata_bmdma_stop(ap);
+ ap->ops->bmdma_stop(ap);
/* fall through */
ap->id, qc->tf.protocol, status);
/* ack bmdma irq events */
- ata_bmdma_ack_irq(ap);
+ ap->ops->irq_clear(ap);
/* complete taskfile transaction */
ata_qc_complete(qc, status);
int ata_port_start (struct ata_port *ap)
{
- struct pci_dev *pdev = ap->host_set->pdev;
+ struct device *dev = ap->host_set->dev;
- ap->prd = pci_alloc_consistent(pdev, ATA_PRD_TBL_SZ, &ap->prd_dma);
+ ap->prd = dma_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, GFP_KERNEL);
if (!ap->prd)
return -ENOMEM;
void ata_port_stop (struct ata_port *ap)
{
- struct pci_dev *pdev = ap->host_set->pdev;
+ struct device *dev = ap->host_set->dev;
- pci_free_consistent(pdev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
+ dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
}
/**
host->max_channel = 1;
host->unique_id = ata_unique_id++;
host->max_cmd_len = 12;
- scsi_set_device(host, &ent->pdev->dev);
+ scsi_set_device(host, ent->dev);
scsi_assign_lock(host, &host_set->lock);
ap->flags = ATA_FLAG_PORT_DISABLED;
ap->ctl = ATA_DEVCTL_OBS;
ap->host_set = host_set;
ap->port_no = port_no;
+ ap->hard_port_no =
+ ent->legacy_mode ? ent->hard_port_no : port_no;
ap->pio_mask = ent->pio_mask;
ap->mwdma_mask = ent->mwdma_mask;
ap->udma_mask = ent->udma_mask;
int ata_device_add(struct ata_probe_ent *ent)
{
unsigned int count = 0, i;
- struct pci_dev *pdev = ent->pdev;
+ struct device *dev = ent->dev;
struct ata_host_set *host_set;
DPRINTK("ENTER\n");
memset(host_set, 0, sizeof(struct ata_host_set) + (ent->n_ports * sizeof(void *)));
spin_lock_init(&host_set->lock);
- host_set->pdev = pdev;
+ host_set->dev = dev;
host_set->n_ports = ent->n_ports;
host_set->irq = ent->irq;
host_set->mmio_base = ent->mmio_base;
*/
}
- rc = scsi_add_host(ap->host, &pdev->dev);
+ rc = scsi_add_host(ap->host, dev);
if (rc) {
printk(KERN_ERR "ata%u: scsi_add_host failed\n",
ap->id);
scsi_scan_host(ap->host);
}
- pci_set_drvdata(pdev, host_set);
+ dev_set_drvdata(dev, host_set);
VPRINTK("EXIT, returning %u\n", ent->n_ports);
return ent->n_ports; /* success */
}
static struct ata_probe_ent *
-ata_probe_ent_alloc(int n, struct pci_dev *pdev, struct ata_port_info **port)
+ata_probe_ent_alloc(struct device *dev, struct ata_port_info *port)
{
struct ata_probe_ent *probe_ent;
- int i;
- probe_ent = kmalloc(sizeof(*probe_ent) * n, GFP_KERNEL);
+ probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
if (!probe_ent) {
printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
- pci_name(pdev));
+ kobject_name(&(dev->kobj)));
return NULL;
}
- memset(probe_ent, 0, sizeof(*probe_ent) * n);
+ memset(probe_ent, 0, sizeof(*probe_ent));
- for (i = 0; i < n; i++) {
- INIT_LIST_HEAD(&probe_ent[i].node);
- probe_ent[i].pdev = pdev;
+ INIT_LIST_HEAD(&probe_ent->node);
+ probe_ent->dev = dev;
- probe_ent[i].sht = port[i]->sht;
- probe_ent[i].host_flags = port[i]->host_flags;
- probe_ent[i].pio_mask = port[i]->pio_mask;
- probe_ent[i].mwdma_mask = port[i]->mwdma_mask;
- probe_ent[i].udma_mask = port[i]->udma_mask;
- probe_ent[i].port_ops = port[i]->port_ops;
-
- }
+ probe_ent->sht = port->sht;
+ probe_ent->host_flags = port->host_flags;
+ probe_ent->pio_mask = port->pio_mask;
+ probe_ent->mwdma_mask = port->mwdma_mask;
+ probe_ent->udma_mask = port->udma_mask;
+ probe_ent->port_ops = port->port_ops;
return probe_ent;
}
+#ifdef CONFIG_PCI
struct ata_probe_ent *
ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port)
{
- struct ata_probe_ent *probe_ent = ata_probe_ent_alloc(1, pdev, port);
+ struct ata_probe_ent *probe_ent =
+ ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]);
if (!probe_ent)
return NULL;
return probe_ent;
}
-struct ata_probe_ent *
-ata_pci_init_legacy_mode(struct pci_dev *pdev, struct ata_port_info **port)
+static struct ata_probe_ent *
+ata_pci_init_legacy_mode(struct pci_dev *pdev, struct ata_port_info **port,
+ struct ata_probe_ent **ppe2)
{
- struct ata_probe_ent *probe_ent = ata_probe_ent_alloc(2, pdev, port);
+ struct ata_probe_ent *probe_ent, *probe_ent2;
+
+ probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]);
if (!probe_ent)
return NULL;
+ probe_ent2 = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[1]);
+ if (!probe_ent2) {
+ kfree(probe_ent);
+ return NULL;
+ }
+
+ probe_ent->n_ports = 1;
+ probe_ent->irq = 14;
- probe_ent[0].n_ports = 1;
- probe_ent[0].irq = 14;
+ probe_ent->hard_port_no = 0;
+ probe_ent->legacy_mode = 1;
- probe_ent[1].n_ports = 1;
- probe_ent[1].irq = 15;
+ probe_ent2->n_ports = 1;
+ probe_ent2->irq = 15;
- probe_ent[0].port[0].cmd_addr = 0x1f0;
- probe_ent[0].port[0].altstatus_addr =
- probe_ent[0].port[0].ctl_addr = 0x3f6;
- probe_ent[0].port[0].bmdma_addr = pci_resource_start(pdev, 4);
+ probe_ent2->hard_port_no = 1;
+ probe_ent2->legacy_mode = 1;
+
+ probe_ent->port[0].cmd_addr = 0x1f0;
+ probe_ent->port[0].altstatus_addr =
+ probe_ent->port[0].ctl_addr = 0x3f6;
+ probe_ent->port[0].bmdma_addr = pci_resource_start(pdev, 4);
- probe_ent[1].port[0].cmd_addr = 0x170;
- probe_ent[1].port[0].altstatus_addr =
- probe_ent[1].port[0].ctl_addr = 0x376;
- probe_ent[1].port[0].bmdma_addr = pci_resource_start(pdev, 4)+8;
+ probe_ent2->port[0].cmd_addr = 0x170;
+ probe_ent2->port[0].altstatus_addr =
+ probe_ent2->port[0].ctl_addr = 0x376;
+ probe_ent2->port[0].bmdma_addr = pci_resource_start(pdev, 4)+8;
- ata_std_ports(&probe_ent[0].port[0]);
- ata_std_ports(&probe_ent[1].port[0]);
+ ata_std_ports(&probe_ent->port[0]);
+ ata_std_ports(&probe_ent2->port[0]);
+ *ppe2 = probe_ent2;
return probe_ent;
}
struct ata_port_info *port[2];
u8 tmp8, mask;
unsigned int legacy_mode = 0;
+ int disable_dev_on_err = 1;
int rc;
DPRINTK("ENTER\n");
else
port[1] = port[0];
- if ((port[0]->host_flags & ATA_FLAG_NO_LEGACY) == 0) {
+ if ((port[0]->host_flags & ATA_FLAG_NO_LEGACY) == 0
+ && (pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
/* TODO: support transitioning to native mode? */
pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
mask = (1 << 2) | (1 << 0);
return rc;
rc = pci_request_regions(pdev, DRV_NAME);
- if (rc)
+ if (rc) {
+ disable_dev_on_err = 0;
goto err_out;
+ }
if (legacy_mode) {
if (!request_region(0x1f0, 8, "libata")) {
conflict = ____request_resource(&ioport_resource, &res);
if (!strcmp(conflict->name, "libata"))
legacy_mode |= (1 << 0);
- else
+ else {
+ disable_dev_on_err = 0;
printk(KERN_WARNING "ata: 0x1f0 IDE port busy\n");
+ }
} else
legacy_mode |= (1 << 0);
conflict = ____request_resource(&ioport_resource, &res);
if (!strcmp(conflict->name, "libata"))
legacy_mode |= (1 << 1);
- else
+ else {
+ disable_dev_on_err = 0;
printk(KERN_WARNING "ata: 0x170 IDE port busy\n");
+ }
} else
legacy_mode |= (1 << 1);
}
goto err_out_regions;
if (legacy_mode) {
- probe_ent = ata_pci_init_legacy_mode(pdev, port);
- if (probe_ent)
- probe_ent2 = &probe_ent[1];
+ probe_ent = ata_pci_init_legacy_mode(pdev, port, &probe_ent2);
} else
probe_ent = ata_pci_init_native_mode(pdev, port);
if (!probe_ent) {
ata_device_add(probe_ent);
if (legacy_mode & (1 << 1))
ata_device_add(probe_ent2);
- } else {
+ } else
ata_device_add(probe_ent);
- }
+
kfree(probe_ent);
+ kfree(probe_ent2);
return 0;
release_region(0x170, 8);
pci_release_regions(pdev);
err_out:
- pci_disable_device(pdev);
+ if (disable_dev_on_err)
+ pci_disable_device(pdev);
return rc;
}
void ata_pci_remove_one (struct pci_dev *pdev)
{
- struct ata_host_set *host_set = pci_get_drvdata(pdev);
+ struct device *dev = pci_dev_to_dev(pdev);
+ struct ata_host_set *host_set = dev_get_drvdata(dev);
struct ata_port *ap;
unsigned int i;
kfree(host_set);
pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
+ dev_set_drvdata(dev, NULL);
}
/* move to PCI subsystem */
return (tmp == bits->val) ? 1 : 0;
}
+#endif /* CONFIG_PCI */
/**
* Do not depend on ABI/API stability.
*/
-EXPORT_SYMBOL_GPL(pci_test_config_bits);
EXPORT_SYMBOL_GPL(ata_std_bios_param);
EXPORT_SYMBOL_GPL(ata_std_ports);
EXPORT_SYMBOL_GPL(ata_device_add);
EXPORT_SYMBOL_GPL(ata_std_dev_select);
EXPORT_SYMBOL_GPL(ata_tf_to_fis);
EXPORT_SYMBOL_GPL(ata_tf_from_fis);
-EXPORT_SYMBOL_GPL(ata_pci_init_legacy_mode);
-EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
EXPORT_SYMBOL_GPL(ata_check_status);
+EXPORT_SYMBOL_GPL(ata_altstatus);
+EXPORT_SYMBOL_GPL(ata_chk_err);
EXPORT_SYMBOL_GPL(ata_exec_command);
EXPORT_SYMBOL_GPL(ata_port_start);
EXPORT_SYMBOL_GPL(ata_port_stop);
EXPORT_SYMBOL_GPL(ata_bmdma_setup);
EXPORT_SYMBOL_GPL(ata_bmdma_start);
EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
+EXPORT_SYMBOL_GPL(ata_bmdma_status);
+EXPORT_SYMBOL_GPL(ata_bmdma_stop);
EXPORT_SYMBOL_GPL(ata_port_probe);
EXPORT_SYMBOL_GPL(sata_phy_reset);
EXPORT_SYMBOL_GPL(__sata_phy_reset);
EXPORT_SYMBOL_GPL(ata_bus_reset);
EXPORT_SYMBOL_GPL(ata_port_disable);
-EXPORT_SYMBOL_GPL(ata_pci_init_one);
-EXPORT_SYMBOL_GPL(ata_pci_remove_one);
EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
EXPORT_SYMBOL_GPL(ata_scsi_error);
EXPORT_SYMBOL_GPL(ata_host_intr);
EXPORT_SYMBOL_GPL(ata_dev_classify);
EXPORT_SYMBOL_GPL(ata_dev_id_string);
+EXPORT_SYMBOL_GPL(ata_scsi_simulate);
+
+#ifdef CONFIG_PCI
+EXPORT_SYMBOL_GPL(pci_test_config_bits);
+EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
+EXPORT_SYMBOL_GPL(ata_pci_init_one);
+EXPORT_SYMBOL_GPL(ata_pci_remove_one);
+#endif /* CONFIG_PCI */