unsigned long tmout_pat,
unsigned long tmout);
static void __ata_dev_select (struct ata_port *ap, unsigned int device);
-static void ata_dma_complete(struct ata_queued_cmd *qc, u8 host_stat);
static void ata_host_set_pio(struct ata_port *ap);
static void ata_host_set_udma(struct ata_port *ap);
static void ata_dev_set_pio(struct ata_port *ap, unsigned int device);
static void ata_dev_set_udma(struct ata_port *ap, unsigned int device);
static void ata_set_mode(struct ata_port *ap);
-static int ata_qc_issue_prot(struct ata_queued_cmd *qc);
static unsigned int ata_unique_id = 1;
static struct workqueue_struct *ata_wq;
MODULE_DESCRIPTION("Library module for ATA devices");
MODULE_LICENSE("GPL");
-static const char * thr_state_name[] = {
- "THR_UNKNOWN",
- "THR_PORT_RESET",
- "THR_AWAIT_DEATH",
- "THR_PROBE_FAILED",
- "THR_IDLE",
- "THR_PROBE_SUCCESS",
- "THR_PROBE_START",
-};
-
-/**
- * ata_thr_state_name - convert thread state enum to string
- * @thr_state: thread state to be converted to string
- *
- * Converts the specified thread state id to a constant C string.
- *
- * LOCKING:
- * None.
- *
- * RETURNS:
- * The THR_xxx-prefixed string naming the specified thread
- * state id, or the string "<invalid THR_xxx state>".
- */
-
-static const char *ata_thr_state_name(unsigned int thr_state)
-{
- if (thr_state < ARRAY_SIZE(thr_state_name))
- return thr_state_name[thr_state];
- return "<invalid THR_xxx state>";
-}
-
/**
* ata_tf_load_pio - send taskfile registers to host controller
* @ap: Port to which output is sent
}
/**
- * ata_port_reset -
- * @ap:
+ * ata_bus_probe - Reset and probe ATA bus
+ * @ap: Bus to probe
*
* LOCKING:
+ *
+ * RETURNS:
+ * Zero on success, non-zero on error.
*/
-static void ata_port_reset(struct ata_port *ap)
+static int ata_bus_probe(struct ata_port *ap)
{
unsigned int i, found = 0;
if (ap->flags & ATA_FLAG_PORT_DISABLED)
goto err_out_disable;
- ap->thr_state = THR_PROBE_SUCCESS;
-
- return;
+ return 0;
err_out_disable:
ap->ops->port_disable(ap);
err_out:
- ap->thr_state = THR_PROBE_FAILED;
+ return -1;
}
/**
static void ata_sg_clean(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
- struct scsi_cmnd *cmd = qc->scsicmd;
struct scatterlist *sg = qc->sg;
- int dir = scsi_to_pci_dma_dir(cmd->sc_data_direction);
+ int dir = qc->pci_dma_dir;
- assert(dir == SCSI_DATA_READ || dir == SCSI_DATA_WRITE);
- assert(qc->flags & ATA_QCFLAG_SG);
+ assert(qc->flags & ATA_QCFLAG_DMAMAP);
assert(sg != NULL);
- if (!cmd->use_sg)
+ if (qc->flags & ATA_QCFLAG_SINGLE)
assert(qc->n_elem == 1);
DPRINTK("unmapping %u sg elements\n", qc->n_elem);
- if (cmd->use_sg)
+ if (qc->flags & ATA_QCFLAG_SG)
pci_unmap_sg(ap->host_set->pdev, sg, qc->n_elem, dir);
else
pci_unmap_single(ap->host_set->pdev, sg_dma_address(&sg[0]),
sg_dma_len(&sg[0]), dir);
- qc->flags &= ~ATA_QCFLAG_SG;
+ qc->flags &= ~ATA_QCFLAG_DMAMAP;
qc->sg = NULL;
}
/**
- * ata_fill_sg -
- * @qc:
+ * ata_fill_sg - Fill PCI IDE PRD table
+ * @qc: Metadata associated with taskfile to be transferred
*
* LOCKING:
*
*/
-void ata_fill_sg(struct ata_queued_cmd *qc)
+static void ata_fill_sg(struct ata_queued_cmd *qc)
{
struct scatterlist *sg = qc->sg;
struct ata_port *ap = qc->ap;
ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
}
+/**
+ * ata_qc_prep - Prepare taskfile for submission
+ * @qc: Metadata associated with taskfile to be prepared
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host_set lock)
+ */
+void ata_qc_prep(struct ata_queued_cmd *qc)
+{
+ if (!(qc->flags & ATA_QCFLAG_DMAMAP))
+ return;
+
+ ata_fill_sg(qc);
+}
+
+void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
+{
+ struct scatterlist *sg;
+
+ qc->flags |= ATA_QCFLAG_SINGLE;
+
+ memset(&qc->sgent, 0, sizeof(qc->sgent));
+ qc->sg = &qc->sgent;
+ qc->n_elem = 1;
+ qc->buf_virt = buf;
+
+ sg = qc->sg;
+ sg->page = virt_to_page(buf);
+ sg->offset = (unsigned long) buf & ~PAGE_MASK;
+ sg_dma_len(sg) = buflen;
+
+ WARN_ON(buflen > PAGE_SIZE);
+}
+
+void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
+ unsigned int n_elem)
+{
+ qc->flags |= ATA_QCFLAG_SG;
+ qc->sg = sg;
+ qc->n_elem = n_elem;
+}
+
/**
* ata_sg_setup_one -
* @qc:
static int ata_sg_setup_one(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
- struct scsi_cmnd *cmd = qc->scsicmd;
- int dir = scsi_to_pci_dma_dir(cmd->sc_data_direction);
+ int dir = qc->pci_dma_dir;
struct scatterlist *sg = qc->sg;
- unsigned int have_sg = (qc->flags & ATA_QCFLAG_SG);
dma_addr_t dma_address;
- assert(sg == &qc->sgent);
- assert(qc->n_elem == 1);
-
- sg->page = virt_to_page(cmd->request_buffer);
- sg->offset = (unsigned long) cmd->request_buffer & ~PAGE_MASK;
- sg_dma_len(sg) = cmd->request_bufflen;
-
- if (!have_sg)
- return 0;
-
- dma_address = pci_map_single(ap->host_set->pdev, cmd->request_buffer,
- cmd->request_bufflen, dir);
+ dma_address = pci_map_single(ap->host_set->pdev, qc->buf_virt,
+ sg_dma_len(sg), dir);
if (pci_dma_mapping_error(dma_address))
return -1;
sg_dma_address(sg) = dma_address;
- DPRINTK("mapped buffer of %d bytes for %s\n", cmd->request_bufflen,
+ DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
return 0;
static int ata_sg_setup(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
- struct scsi_cmnd *cmd = qc->scsicmd;
- struct scatterlist *sg;
- int n_elem;
- unsigned int have_sg = (qc->flags & ATA_QCFLAG_SG);
-
- VPRINTK("ENTER, ata%u, use_sg %d\n", ap->id, cmd->use_sg);
- assert(cmd->use_sg > 0);
-
- sg = (struct scatterlist *)cmd->request_buffer;
- if (have_sg) {
- int dir = scsi_to_pci_dma_dir(cmd->sc_data_direction);
- n_elem = pci_map_sg(ap->host_set->pdev, sg, cmd->use_sg, dir);
- if (n_elem < 1)
- return -1;
- DPRINTK("%d sg elements mapped\n", n_elem);
- } else {
- n_elem = cmd->use_sg;
- }
+ struct scatterlist *sg = qc->sg;
+ int n_elem, dir;
+
+ VPRINTK("ENTER, ata%u\n", ap->id);
+ assert(qc->flags & ATA_QCFLAG_SG);
+
+ dir = qc->pci_dma_dir;
+ n_elem = pci_map_sg(ap->host_set->pdev, sg, qc->n_elem, dir);
+ if (n_elem < 1)
+ return -1;
+
+ DPRINTK("%d sg elements mapped\n", n_elem);
+
qc->n_elem = n_elem;
return 0;
{
struct ata_queued_cmd *qc;
struct scatterlist *sg;
- struct scsi_cmnd *cmd;
+ struct page *page;
unsigned char *buf;
u8 status;
qc = ata_qc_from_tag(ap, ap->active_tag);
assert(qc != NULL);
- cmd = qc->scsicmd;
sg = qc->sg;
if (qc->cursect == (qc->nsect - 1))
ap->pio_task_state = PIO_ST_LAST;
- buf = kmap(sg[qc->cursg].page) +
+ page = sg[qc->cursg].page;
+ buf = kmap(page) +
sg[qc->cursg].offset + (qc->cursg_ofs * ATA_SECT_SIZE);
qc->cursect++;
qc->cursg_ofs++;
- if (cmd->use_sg)
+ if (qc->flags & ATA_QCFLAG_SG)
if ((qc->cursg_ofs * ATA_SECT_SIZE) == sg_dma_len(&sg[qc->cursg])) {
qc->cursg++;
qc->cursg_ofs = 0;
else
insl(ap->ioaddr.data_addr, buf, ATA_SECT_DWORDS);
- kunmap(sg[qc->cursg].page);
+ kunmap(page);
}
static void ata_pio_task(void *_data)
}
/**
- * ata_eng_timeout - Handle timeout of queued command
- * @ap: Port on which timed-out command is active
+ * ata_qc_timeout - Handle timeout of queued command
+ * @qc: Command that timed out
*
* Some part of the kernel (currently, only the SCSI layer)
* has noticed that the active command on port @ap has not
* transaction completed successfully.
*
* LOCKING:
- * Inherited from SCSI layer (none, can sleep)
*/
-void ata_eng_timeout(struct ata_port *ap)
+static void ata_qc_timeout(struct ata_queued_cmd *qc)
{
- u8 host_stat, drv_stat;
- struct ata_queued_cmd *qc;
+ struct ata_port *ap = qc->ap;
+ u8 host_stat = 0, drv_stat;
DPRINTK("ENTER\n");
- qc = ata_qc_from_tag(ap, ap->active_tag);
- if (!qc) {
- printk(KERN_ERR "ata%u: BUG: timeout without command\n",
- ap->id);
- goto out;
- }
-
/* hack alert! We cannot use the supplied completion
* function from inside the ->eh_strategy_handler() thread.
* libata is the only user of ->eh_strategy_handler() in
qc->scsidone = scsi_finish_command;
switch (qc->tf.protocol) {
+
case ATA_PROT_DMA:
- if (ap->flags & ATA_FLAG_MMIO) {
- void *mmio = (void *) ap->ioaddr.bmdma_addr;
- host_stat = readb(mmio + ATA_DMA_STATUS);
- } else
- host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
+ case ATA_PROT_ATAPI_DMA:
+ host_stat = ata_bmdma_status(ap);
- printk(KERN_ERR "ata%u: DMA timeout, stat 0x%x\n",
- ap->id, host_stat);
+ /* before we do anything else, clear DMA-Start bit */
+ ata_bmdma_stop(ap);
- ata_dma_complete(qc, host_stat);
- break;
+ /* fall through */
case ATA_PROT_NODATA:
- drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
+ default:
+ ata_altstatus(ap);
+ drv_stat = ata_chk_status(ap);
+
+ /* ack bmdma irq events */
+ ata_bmdma_ack_irq(ap);
- printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x\n",
- ap->id, qc->tf.command, drv_stat);
+ printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x host_stat 0x%x\n",
+ ap->id, qc->tf.command, drv_stat, host_stat);
+ /* complete taskfile transaction */
ata_qc_complete(qc, drv_stat);
break;
+ }
- default:
- drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
+ DPRINTK("EXIT\n");
+}
- printk(KERN_ERR "ata%u: unknown timeout, cmd 0x%x stat 0x%x\n",
- ap->id, qc->tf.command, drv_stat);
+/**
+ * ata_eng_timeout - Handle timeout of queued command
+ * @ap: Port on which timed-out command is active
+ *
+ * Some part of the kernel (currently, only the SCSI layer)
+ * has noticed that the active command on port @ap has not
+ * completed after a specified length of time. Handle this
+ * condition by disabling DMA (if necessary) and completing
+ * transactions, with error if necessary.
+ *
+ * This also handles the case of the "lost interrupt", where
+ * for some reason (possibly hardware bug, possibly driver bug)
+ * an interrupt was not delivered to the driver, even though the
+ * transaction completed successfully.
+ *
+ * LOCKING:
+ * Inherited from SCSI layer (none, can sleep)
+ */
- ata_qc_complete(qc, drv_stat);
- break;
+void ata_eng_timeout(struct ata_port *ap)
+{
+ struct ata_queued_cmd *qc;
+
+ DPRINTK("ENTER\n");
+
+ qc = ata_qc_from_tag(ap, ap->active_tag);
+ if (!qc) {
+ printk(KERN_ERR "ata%u: BUG: timeout without command\n",
+ ap->id);
+ goto out;
}
+ ata_qc_timeout(qc);
+
out:
DPRINTK("EXIT\n");
}
void ata_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat)
{
struct ata_port *ap = qc->ap;
- struct scsi_cmnd *cmd = qc->scsicmd;
unsigned int tag, do_clear = 0;
+ int rc;
assert(qc != NULL); /* ata_qc_from_tag _might_ return NULL */
assert(qc->flags & ATA_QCFLAG_ACTIVE);
- if (likely(qc->flags & ATA_QCFLAG_SG))
+ if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
ata_sg_clean(qc);
- if (cmd) {
- if (unlikely(drv_stat & (ATA_ERR | ATA_BUSY | ATA_DRQ))) {
- if (is_atapi_taskfile(&qc->tf))
- cmd->result = SAM_STAT_CHECK_CONDITION;
- else
- ata_to_sense_error(qc);
- } else {
- cmd->result = SAM_STAT_GOOD;
- }
+ /* call completion callback */
+ rc = qc->complete_fn(qc, drv_stat);
- qc->scsidone(cmd);
- }
+ /* if callback indicates not to complete command (non-zero),
+ * return immediately
+ */
+ if (rc != 0)
+ return;
- qc->flags &= ~ATA_QCFLAG_ACTIVE;
+ qc->flags = 0;
tag = qc->tag;
if (likely(ata_tag_valid(tag))) {
if (tag == ap->active_tag)
int ata_qc_issue(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
- struct scsi_cmnd *cmd = qc->scsicmd;
if (qc->flags & ATA_QCFLAG_SG) {
- /* set up SG table */
- if (cmd->use_sg) {
- if (ata_sg_setup(qc))
- goto err_out;
- } else {
- if (ata_sg_setup_one(qc))
- goto err_out;
- }
-
- ap->ops->fill_sg(qc);
+ if (ata_sg_setup(qc))
+ goto err_out;
+ } else if (qc->flags & ATA_QCFLAG_SINGLE) {
+ if (ata_sg_setup_one(qc))
+ goto err_out;
}
+ ap->ops->qc_prep(qc);
+
qc->ap->active_tag = qc->tag;
qc->flags |= ATA_QCFLAG_ACTIVE;
- return ata_qc_issue_prot(qc);
+ return ap->ops->qc_issue(qc);
err_out:
return -1;
* Zero on success, negative on error.
*/
-static int ata_qc_issue_prot(struct ata_queued_cmd *qc)
+int ata_qc_issue_prot(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
{
struct ata_port *ap = qc->ap;
unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
- u8 host_stat, dmactl;
+ u8 dmactl;
void *mmio = (void *) ap->ioaddr.bmdma_addr;
/* load PRD table addr. */
dmactl |= ATA_DMA_WR;
writeb(dmactl, mmio + ATA_DMA_CMD);
- /* clear interrupt, error bits */
- host_stat = readb(mmio + ATA_DMA_STATUS);
- writeb(host_stat | ATA_DMA_INTR | ATA_DMA_ERR, mmio + ATA_DMA_STATUS);
-
/* issue r/w command */
ap->ops->exec_command(ap, &qc->tf);
}
{
struct ata_port *ap = qc->ap;
unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
- u8 host_stat, dmactl;
+ u8 dmactl;
/* load PRD table addr. */
outl(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
dmactl |= ATA_DMA_WR;
outb(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
- /* clear interrupt, error bits */
- host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
- outb(host_stat | ATA_DMA_INTR | ATA_DMA_ERR,
- ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
-
/* issue r/w command */
ap->ops->exec_command(ap, &qc->tf);
}
ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
}
-/**
- * ata_dma_complete - Complete an active ATA BMDMA command
- * @qc: Command to complete
- * @host_stat: BMDMA status register contents
- *
- * LOCKING:
- */
-
-static void ata_dma_complete(struct ata_queued_cmd *qc, u8 host_stat)
+void ata_bmdma_irq_clear(struct ata_port *ap)
{
- struct ata_port *ap = qc->ap;
- VPRINTK("ENTER\n");
-
- if (ap->flags & ATA_FLAG_MMIO) {
- void *mmio = (void *) ap->ioaddr.bmdma_addr;
-
- /* clear start/stop bit */
- writeb(readb(mmio + ATA_DMA_CMD) & ~ATA_DMA_START,
- mmio + ATA_DMA_CMD);
-
- /* ack intr, err bits */
- writeb(host_stat | ATA_DMA_INTR | ATA_DMA_ERR,
- mmio + ATA_DMA_STATUS);
- } else {
- /* clear start/stop bit */
- outb(inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START,
- ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
-
- /* ack intr, err bits */
- outb(host_stat | ATA_DMA_INTR | ATA_DMA_ERR,
- ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
- }
-
-
- /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
- ata_altstatus(ap); /* dummy read */
-
- DPRINTK("host %u, host_stat==0x%X, drv_stat==0x%X\n",
- ap->id, (u32) host_stat, (u32) ata_chk_status(ap));
-
- /* get drive status; clear intr; complete txn */
- ata_qc_complete(qc, ata_wait_idle(ap));
+ ata_bmdma_ack_irq(ap);
}
/**
struct ata_queued_cmd *qc)
{
u8 status, host_stat;
- unsigned int handled = 0;
switch (qc->tf.protocol) {
- /* BMDMA completion */
case ATA_PROT_DMA:
case ATA_PROT_ATAPI_DMA:
- if (ap->flags & ATA_FLAG_MMIO) {
- void *mmio = (void *) ap->ioaddr.bmdma_addr;
- host_stat = readb(mmio + ATA_DMA_STATUS);
- } else
- host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
+ case ATA_PROT_ATAPI:
+ /* check status of DMA engine */
+ host_stat = ata_bmdma_status(ap);
VPRINTK("BUS_DMA (host_stat 0x%X)\n", host_stat);
- if (!(host_stat & ATA_DMA_INTR)) {
- ap->stats.idle_irq++;
- break;
- }
+ /* if it's not our irq... */
+ if (!(host_stat & ATA_DMA_INTR))
+ goto idle_irq;
- ata_dma_complete(qc, host_stat);
- handled = 1;
- break;
+ /* before we do anything else, clear DMA-Start bit */
+ ata_bmdma_stop(ap);
+
+ /* fall through */
- /* command completion, but no data xfer */
- /* FIXME: a shared interrupt _will_ cause a non-data command
- * to be completed prematurely, with an error.
- *
- * This doesn't matter right now, since we aren't sending
- * non-data commands down this pipe except in development
- * situations.
- */
- case ATA_PROT_ATAPI:
case ATA_PROT_NODATA:
- status = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
- DPRINTK("BUS_NODATA (drv_stat 0x%X)\n", status);
+ /* check altstatus */
+ status = ata_altstatus(ap);
+ if (status & ATA_BUSY)
+ goto idle_irq;
+
+ /* check main status, clearing INTRQ */
+ status = ata_chk_status(ap);
+ if (unlikely(status & ATA_BUSY))
+ goto idle_irq;
+ DPRINTK("BUS_NODATA (dev_stat 0x%X)\n", status);
+
+ /* ack bmdma irq events */
+ ata_bmdma_ack_irq(ap);
+
+ /* complete taskfile transaction */
ata_qc_complete(qc, status);
- handled = 1;
break;
default:
- ap->stats.idle_irq++;
+ goto idle_irq;
+ }
+
+ return 1; /* irq handled */
+
+idle_irq:
+ ap->stats.idle_irq++;
#ifdef ATA_IRQ_TRAP
- if ((ap->stats.idle_irq % 1000) == 0) {
- handled = 1;
- ata_irq_ack(ap, 0); /* debug trap */
- printk(KERN_WARNING "ata%d: irq trap\n", ap->id);
- }
-#endif
- break;
+ if ((ap->stats.idle_irq % 1000) == 0) {
+ handled = 1;
+ ata_irq_ack(ap, 0); /* debug trap */
+ printk(KERN_WARNING "ata%d: irq trap\n", ap->id);
}
-
- return handled;
+#endif
+ return 0; /* irq not handled */
}
/**
qc = ata_qc_from_tag(ap, ap->active_tag);
if (qc && (!(qc->tf.ctl & ATA_NIEN)))
- handled += ata_host_intr(ap, qc);
+ handled |= ata_host_intr(ap, qc);
}
}
return IRQ_RETVAL(handled);
}
-/**
- * ata_thread_iter -
- * @ap:
- *
- * LOCKING:
- *
- * RETURNS:
- *
- */
-
-static unsigned long ata_thread_iter(struct ata_port *ap)
-{
- long timeout = 0;
-
- DPRINTK("ata%u: thr_state %s\n",
- ap->id, ata_thr_state_name(ap->thr_state));
-
- switch (ap->thr_state) {
- case THR_UNKNOWN:
- ap->thr_state = THR_PORT_RESET;
- break;
-
- case THR_PROBE_START:
- ap->thr_state = THR_PORT_RESET;
- break;
-
- case THR_PORT_RESET:
- ata_port_reset(ap);
- break;
-
- case THR_PROBE_SUCCESS:
- up(&ap->probe_sem);
- ap->thr_state = THR_IDLE;
- break;
-
- case THR_PROBE_FAILED:
- up(&ap->probe_sem);
- ap->thr_state = THR_AWAIT_DEATH;
- break;
-
- case THR_AWAIT_DEATH:
- case THR_IDLE:
- timeout = -1;
- break;
-
- default:
- printk(KERN_DEBUG "ata%u: unknown thr state %s\n",
- ap->id, ata_thr_state_name(ap->thr_state));
- break;
- }
-
- DPRINTK("ata%u: new thr_state %s, returning %ld\n",
- ap->id, ata_thr_state_name(ap->thr_state), timeout);
- return timeout;
-}
-
/**
* atapi_packet_task - Write CDB bytes to hardware
* @_data: Port to which ATAPI device is attached.
pci_free_consistent(pdev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
}
-static void ata_probe_task(void *_data)
-{
- struct ata_port *ap = _data;
- long timeout;
-
- timeout = ata_thread_iter(ap);
- if (timeout < 0)
- return;
-
- if (timeout > 0)
- queue_delayed_work(ata_wq, &ap->probe_task, timeout);
- else
- queue_work(ata_wq, &ap->probe_task);
-}
-
/**
* ata_host_remove - Unregister SCSI host structure with upper layers
* @ap: Port to unregister
ap->udma_mask = ent->udma_mask;
ap->flags |= ent->host_flags;
ap->ops = ent->port_ops;
- ap->thr_state = THR_PROBE_START;
ap->cbl = ATA_CBL_NONE;
ap->device[0].flags = ATA_DFLAG_MASTER;
ap->active_tag = ATA_TAG_POISON;
INIT_WORK(&ap->packet_task, atapi_packet_task, ap);
INIT_WORK(&ap->pio_task, ata_pio_task, ap);
- INIT_WORK(&ap->probe_task, ata_probe_task, ap);
for (i = 0; i < ATA_MAX_DEVICES; i++)
ap->device[i].devno = i;
- init_MUTEX_LOCKED(&ap->probe_sem);
-
#ifdef ATA_IRQ_TRAP
ap->stats.unhandled_irq = 1;
ap->stats.idle_irq = 1;
host_set->irq = ent->irq;
host_set->mmio_base = ent->mmio_base;
host_set->private_data = ent->private_data;
+ host_set->ops = ent->port_ops;
/* register each port bound to this device */
for (i = 0; i < ent->n_ports; i++) {
ap->ioaddr.bmdma_addr,
ent->irq);
+ ata_chk_status(ap);
+ host_set->ops->irq_clear(ap);
count++;
}
ap = host_set->ports[i];
DPRINTK("ata%u: probe begin\n", ap->id);
- queue_work(ata_wq, &ap->probe_task); /* start probe */
-
- DPRINTK("ata%u: probe-wait begin\n", ap->id);
- down(&ap->probe_sem); /* wait for end */
+ rc = ata_bus_probe(ap);
+ DPRINTK("ata%u: probe end\n", ap->id);
- DPRINTK("ata%u: probe-wait end\n", ap->id);
+ if (rc) {
+ /* FIXME: do something useful here?
+ * Current libata behavior will
+ * tear down everything when
+ * the module is removed
+ * or the h/w is unplugged.
+ */
+ }
rc = scsi_add_host(ap->host, &pdev->dev);
if (rc) {
}
free_irq(host_set->irq, host_set);
+ if (host_set->ops->host_stop)
+ host_set->ops->host_stop(host_set);
if (host_set->mmio_base)
iounmap(host_set->mmio_base);
- if (host_set->ports[0]->ops->host_stop)
- host_set->ports[0]->ops->host_stop(host_set);
for (i = 0; i < host_set->n_ports; i++) {
ap = host_set->ports[i];
EXPORT_SYMBOL_GPL(ata_std_bios_param);
EXPORT_SYMBOL_GPL(ata_std_ports);
EXPORT_SYMBOL_GPL(ata_device_add);
+EXPORT_SYMBOL_GPL(ata_sg_init);
+EXPORT_SYMBOL_GPL(ata_sg_init_one);
EXPORT_SYMBOL_GPL(ata_qc_complete);
+EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
EXPORT_SYMBOL_GPL(ata_eng_timeout);
EXPORT_SYMBOL_GPL(ata_tf_load_pio);
EXPORT_SYMBOL_GPL(ata_tf_load_mmio);
EXPORT_SYMBOL_GPL(ata_port_start);
EXPORT_SYMBOL_GPL(ata_port_stop);
EXPORT_SYMBOL_GPL(ata_interrupt);
-EXPORT_SYMBOL_GPL(ata_fill_sg);
+EXPORT_SYMBOL_GPL(ata_qc_prep);
EXPORT_SYMBOL_GPL(ata_bmdma_setup_pio);
EXPORT_SYMBOL_GPL(ata_bmdma_start_pio);
EXPORT_SYMBOL_GPL(ata_bmdma_setup_mmio);
EXPORT_SYMBOL_GPL(ata_bmdma_start_mmio);
+EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
EXPORT_SYMBOL_GPL(ata_port_probe);
EXPORT_SYMBOL_GPL(sata_phy_reset);
EXPORT_SYMBOL_GPL(ata_bus_reset);