#include <asm/io.h>
#define DRV_NAME "sata_mv"
-#define DRV_VERSION "0.7"
+#define DRV_VERSION "0.5"
enum {
/* BAR's are enumerated in terms of pci_resource_start() terms */
MV_PCI_REG_BASE = 0,
MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
- MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
- MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
- MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
- MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
- MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
-
MV_SATAHC0_REG_BASE = 0x20000,
MV_FLASH_CTL = 0x1046c,
MV_GPIO_PORT_CTL = 0x104f0,
MV_HP_ERRATA_50XXB2 = (1 << 2),
MV_HP_ERRATA_60X1B2 = (1 << 3),
MV_HP_ERRATA_60X1C0 = (1 << 4),
- MV_HP_ERRATA_XX42A0 = (1 << 5),
- MV_HP_50XX = (1 << 6),
- MV_HP_GEN_IIE = (1 << 7),
+ MV_HP_50XX = (1 << 5),
/* Port private flags (pp_flags) */
MV_PP_FLAG_EDMA_EN = (1 << 0),
#define IS_50XX(hpriv) ((hpriv)->hp_flags & MV_HP_50XX)
#define IS_60XX(hpriv) (((hpriv)->hp_flags & MV_HP_50XX) == 0)
-#define IS_GEN_I(hpriv) IS_50XX(hpriv)
-#define IS_GEN_II(hpriv) IS_60XX(hpriv)
-#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
enum {
/* Our DMA boundary is determined by an ePRD being unable to handle
chip_5080,
chip_604x,
chip_608x,
- chip_6042,
- chip_7042,
};
/* Command ReQuest Block: 32B */
u16 ata_cmd[11];
};
-struct mv_crqb_iie {
- u32 addr;
- u32 addr_hi;
- u32 flags;
- u32 len;
- u32 ata_cmd[4];
-};
-
/* Command ResPonse Block: 8B */
struct mv_crpb {
u16 id;
dma_addr_t crpb_dma;
struct mv_sg *sg_tbl;
dma_addr_t sg_tbl_dma;
+
+ unsigned req_producer; /* cp of req_in_ptr */
+ unsigned rsp_consumer; /* cp of rsp_out_ptr */
u32 pp_flags;
};
static int mv_port_start(struct ata_port *ap);
static void mv_port_stop(struct ata_port *ap);
static void mv_qc_prep(struct ata_queued_cmd *qc);
-static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
-static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
+static int mv_qc_issue(struct ata_queued_cmd *qc);
static irqreturn_t mv_interrupt(int irq, void *dev_instance,
struct pt_regs *regs);
static void mv_eng_timeout(struct ata_port *ap);
.name = DRV_NAME,
.ioctl = ata_scsi_ioctl,
.queuecommand = ata_scsi_queuecmd,
+ .eh_strategy_handler = ata_scsi_error,
.can_queue = MV_USE_Q_DEPTH,
.this_id = ATA_SHT_THIS_ID,
.sg_tablesize = MV_MAX_SG_CT / 2,
+ .max_sectors = ATA_MAX_SECTORS,
.cmd_per_lun = ATA_SHT_CMD_PER_LUN,
.emulated = ATA_SHT_EMULATED,
.use_clustering = ATA_SHT_USE_CLUSTERING,
.host_stop = mv_host_stop,
};
-static const struct ata_port_operations mv_iie_ops = {
- .port_disable = ata_port_disable,
-
- .tf_load = ata_tf_load,
- .tf_read = ata_tf_read,
- .check_status = ata_check_status,
- .exec_command = ata_exec_command,
- .dev_select = ata_std_dev_select,
-
- .phy_reset = mv_phy_reset,
-
- .qc_prep = mv_qc_prep_iie,
- .qc_issue = mv_qc_issue,
-
- .eng_timeout = mv_eng_timeout,
-
- .irq_handler = mv_interrupt,
- .irq_clear = mv_irq_clear,
-
- .scr_read = mv_scr_read,
- .scr_write = mv_scr_write,
-
- .port_start = mv_port_start,
- .port_stop = mv_port_stop,
- .host_stop = mv_host_stop,
-};
-
static const struct ata_port_info mv_port_info[] = {
{ /* chip_504x */
.sht = &mv_sht,
.udma_mask = 0x7f, /* udma0-6 */
.port_ops = &mv6_ops,
},
- { /* chip_6042 */
- .sht = &mv_sht,
- .host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
- .pio_mask = 0x1f, /* pio0-4 */
- .udma_mask = 0x7f, /* udma0-6 */
- .port_ops = &mv_iie_ops,
- },
- { /* chip_7042 */
- .sht = &mv_sht,
- .host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS |
- MV_FLAG_DUAL_HC),
- .pio_mask = 0x1f, /* pio0-4 */
- .udma_mask = 0x7f, /* udma0-6 */
- .port_ops = &mv_iie_ops,
- },
};
static const struct pci_device_id mv_pci_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6040), 0, 0, chip_604x},
{PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6041), 0, 0, chip_604x},
- {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6042), 0, 0, chip_6042},
{PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6080), 0, 0, chip_608x},
{PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6081), 0, 0, chip_608x},
* @base: port base address
* @pp: port private data
*
- * Verify the local cache of the eDMA state is accurate with a
- * WARN_ON.
+ * Verify the local cache of the eDMA state is accurate with an
+ * assert.
*
* LOCKING:
* Inherited from caller.
writelfl(EDMA_EN, base + EDMA_CMD_OFS);
pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
}
- WARN_ON(!(EDMA_EN & readl(base + EDMA_CMD_OFS)));
+ assert(EDMA_EN & readl(base + EDMA_CMD_OFS));
}
/**
* mv_stop_dma - Disable eDMA engine
* @ap: ATA channel to manipulate
*
- * Verify the local cache of the eDMA state is accurate with a
- * WARN_ON.
+ * Verify the local cache of the eDMA state is accurate with an
+ * assert.
*
* LOCKING:
* Inherited from caller.
writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
} else {
- WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
+ assert(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
}
/* now properly wait for the eDMA to stop */
mv_dump_mem(mmio_base+0xf00, 0x4);
mv_dump_mem(mmio_base+0x1d00, 0x6c);
for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
- hc_base = mv_hc_base(mmio_base, hc);
+ hc_base = mv_hc_base(mmio_base, port >> MV_PORT_HC_SHIFT);
DPRINTK("HC regs (HC %i):\n", hc);
mv_dump_mem(hc_base, 0x1c);
}
dma_free_coherent(dev, MV_PORT_PRIV_DMA_SZ, pp->crpb, pp->crpb_dma);
}
-static void mv_edma_cfg(struct mv_host_priv *hpriv, void __iomem *port_mmio)
-{
- u32 cfg = readl(port_mmio + EDMA_CFG_OFS);
-
- /* set up non-NCQ EDMA configuration */
- cfg &= ~0x1f; /* clear queue depth */
- cfg &= ~EDMA_CFG_NCQ; /* clear NCQ mode */
- cfg &= ~(1 << 9); /* disable equeue */
-
- if (IS_GEN_I(hpriv))
- cfg |= (1 << 8); /* enab config burst size mask */
-
- else if (IS_GEN_II(hpriv))
- cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
-
- else if (IS_GEN_IIE(hpriv)) {
- cfg |= (1 << 23); /* dis RX PM port mask */
- cfg &= ~(1 << 16); /* dis FIS-based switching (for now) */
- cfg &= ~(1 << 19); /* dis 128-entry queue (for now?) */
- cfg |= (1 << 18); /* enab early completion */
- cfg |= (1 << 17); /* enab host q cache */
- cfg |= (1 << 22); /* enab cutthrough */
- }
-
- writelfl(cfg, port_mmio + EDMA_CFG_OFS);
-}
-
/**
* mv_port_start - Port specific init/start routine.
* @ap: ATA channel to manipulate
static int mv_port_start(struct ata_port *ap)
{
struct device *dev = ap->host_set->dev;
- struct mv_host_priv *hpriv = ap->host_set->private_data;
struct mv_port_priv *pp;
void __iomem *port_mmio = mv_ap_base(ap);
void *mem;
pp->sg_tbl = mem;
pp->sg_tbl_dma = mem_dma;
- mv_edma_cfg(hpriv, port_mmio);
+ writelfl(EDMA_CFG_Q_DEPTH | EDMA_CFG_RD_BRST_EXT |
+ EDMA_CFG_WR_BUFF_LEN, port_mmio + EDMA_CFG_OFS);
writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
writelfl(pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK,
port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
- if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
- writelfl(pp->crqb_dma & 0xffffffff,
- port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
- else
- writelfl(0, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
+ writelfl(0, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
+ writelfl(0, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
-
- if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
- writelfl(pp->crpb_dma & 0xffffffff,
- port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
- else
- writelfl(0, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
-
writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK,
port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
+ pp->req_producer = pp->rsp_consumer = 0;
+
/* Don't turn on EDMA here...do it before DMA commands only. Else
* we'll be unable to send non-data, PIO, etc due to restricted access
* to shadow regs.
pp->sg_tbl[i].addr = cpu_to_le32(addr & 0xffffffff);
pp->sg_tbl[i].addr_hi = cpu_to_le32((addr >> 16) >> 16);
- pp->sg_tbl[i].flags_size = cpu_to_le32(len & 0xffff);
+ pp->sg_tbl[i].flags_size = cpu_to_le32(len);
sg_len -= len;
addr += len;
}
}
-static inline unsigned mv_inc_q_index(unsigned index)
+static inline unsigned mv_inc_q_index(unsigned *index)
{
- return (index + 1) & MV_MAX_Q_DEPTH_MASK;
+ *index = (*index + 1) & MV_MAX_Q_DEPTH_MASK;
+ return *index;
}
static inline void mv_crqb_pack_cmd(u16 *cmdw, u8 data, u8 addr, unsigned last)
{
- u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
+ *cmdw = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
(last ? CRQB_CMD_LAST : 0);
- *cmdw = cpu_to_le16(tmp);
}
/**
u16 *cw;
struct ata_taskfile *tf;
u16 flags = 0;
- unsigned in_index;
- if (ATA_PROT_DMA != qc->tf.protocol)
+ if (ATA_PROT_DMA != qc->tf.protocol) {
return;
+ }
+
+ /* the req producer index should be the same as we remember it */
+ assert(((readl(mv_ap_base(qc->ap) + EDMA_REQ_Q_IN_PTR_OFS) >>
+ EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) ==
+ pp->req_producer);
/* Fill in command request block
*/
- if (!(qc->tf.flags & ATA_TFLAG_WRITE))
+ if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
flags |= CRQB_FLAG_READ;
- WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
+ }
+ assert(MV_MAX_Q_DEPTH > qc->tag);
flags |= qc->tag << CRQB_TAG_SHIFT;
- /* get current queue index from hardware */
- in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS)
- >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
-
- pp->crqb[in_index].sg_addr =
+ pp->crqb[pp->req_producer].sg_addr =
cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
- pp->crqb[in_index].sg_addr_hi =
+ pp->crqb[pp->req_producer].sg_addr_hi =
cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
- pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
+ pp->crqb[pp->req_producer].ctrl_flags = cpu_to_le16(flags);
- cw = &pp->crqb[in_index].ata_cmd[0];
+ cw = &pp->crqb[pp->req_producer].ata_cmd[0];
tf = &qc->tf;
/* Sadly, the CRQB cannot accomodate all registers--there are
mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
- if (!(qc->flags & ATA_QCFLAG_DMAMAP))
- return;
- mv_fill_sg(qc);
-}
-
-/**
- * mv_qc_prep_iie - Host specific command preparation.
- * @qc: queued command to prepare
- *
- * This routine simply redirects to the general purpose routine
- * if command is not DMA. Else, it handles prep of the CRQB
- * (command request block), does some sanity checking, and calls
- * the SG load routine.
- *
- * LOCKING:
- * Inherited from caller.
- */
-static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
-{
- struct ata_port *ap = qc->ap;
- struct mv_port_priv *pp = ap->private_data;
- struct mv_crqb_iie *crqb;
- struct ata_taskfile *tf;
- unsigned in_index;
- u32 flags = 0;
-
- if (ATA_PROT_DMA != qc->tf.protocol)
- return;
-
- /* Fill in Gen IIE command request block
- */
- if (!(qc->tf.flags & ATA_TFLAG_WRITE))
- flags |= CRQB_FLAG_READ;
-
- WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
- flags |= qc->tag << CRQB_TAG_SHIFT;
-
- /* get current queue index from hardware */
- in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS)
- >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
-
- crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
- crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
- crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
- crqb->flags = cpu_to_le32(flags);
-
- tf = &qc->tf;
- crqb->ata_cmd[0] = cpu_to_le32(
- (tf->command << 16) |
- (tf->feature << 24)
- );
- crqb->ata_cmd[1] = cpu_to_le32(
- (tf->lbal << 0) |
- (tf->lbam << 8) |
- (tf->lbah << 16) |
- (tf->device << 24)
- );
- crqb->ata_cmd[2] = cpu_to_le32(
- (tf->hob_lbal << 0) |
- (tf->hob_lbam << 8) |
- (tf->hob_lbah << 16) |
- (tf->hob_feature << 24)
- );
- crqb->ata_cmd[3] = cpu_to_le32(
- (tf->nsect << 0) |
- (tf->hob_nsect << 8)
- );
-
- if (!(qc->flags & ATA_QCFLAG_DMAMAP))
+ if (!(qc->flags & ATA_QCFLAG_DMAMAP)) {
return;
+ }
mv_fill_sg(qc);
}
* LOCKING:
* Inherited from caller.
*/
-static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
+static int mv_qc_issue(struct ata_queued_cmd *qc)
{
void __iomem *port_mmio = mv_ap_base(qc->ap);
struct mv_port_priv *pp = qc->ap->private_data;
- unsigned in_index;
u32 in_ptr;
if (ATA_PROT_DMA != qc->tf.protocol) {
return ata_qc_issue_prot(qc);
}
- in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
- in_index = (in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
+ in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
+ /* the req producer index should be the same as we remember it */
+ assert(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) ==
+ pp->req_producer);
/* until we do queuing, the queue should be empty at this point */
- WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
- >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
+ assert(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) ==
+ ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS) >>
+ EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
- in_index = mv_inc_q_index(in_index); /* now incr producer index */
+ mv_inc_q_index(&pp->req_producer); /* now incr producer index */
mv_start_dma(port_mmio, pp);
/* and write the request in pointer to kick the EDMA to life */
in_ptr &= EDMA_REQ_Q_BASE_LO_MASK;
- in_ptr |= in_index << EDMA_REQ_Q_PTR_SHIFT;
+ in_ptr |= pp->req_producer << EDMA_REQ_Q_PTR_SHIFT;
writelfl(in_ptr, port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
return 0;
*
* This routine is for use when the port is in DMA mode, when it
* will be using the CRPB (command response block) method of
- * returning command completion information. We check indices
+ * returning command completion information. We assert indices
* are good, grab status, and bump the response consumer index to
* prove that we're up to date.
*
{
void __iomem *port_mmio = mv_ap_base(ap);
struct mv_port_priv *pp = ap->private_data;
- unsigned out_index;
u32 out_ptr;
u8 ata_status;
- out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
- out_index = (out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
+ out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
- ata_status = le16_to_cpu(pp->crpb[out_index].flags)
- >> CRPB_FLAG_STATUS_SHIFT;
+ /* the response consumer index should be the same as we remember it */
+ assert(((out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) ==
+ pp->rsp_consumer);
+
+ ata_status = pp->crpb[pp->rsp_consumer].flags >> CRPB_FLAG_STATUS_SHIFT;
/* increment our consumer index... */
- out_index = mv_inc_q_index(out_index);
+ pp->rsp_consumer = mv_inc_q_index(&pp->rsp_consumer);
/* and, until we do NCQ, there should only be 1 CRPB waiting */
- WARN_ON(out_index != ((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
- >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
+ assert(((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS) >>
+ EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) ==
+ pp->rsp_consumer);
/* write out our inc'd consumer index so EDMA knows we're caught up */
out_ptr &= EDMA_RSP_Q_BASE_LO_MASK;
- out_ptr |= out_index << EDMA_RSP_Q_PTR_SHIFT;
+ out_ptr |= pp->rsp_consumer << EDMA_RSP_Q_PTR_SHIFT;
writelfl(out_ptr, port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
/* Return ATA status register for completed CRPB */
/**
* mv_err_intr - Handle error interrupts on the port
* @ap: ATA channel to manipulate
- * @reset_allowed: bool: 0 == don't trigger from reset here
*
* In most cases, just clear the interrupt and move on. However,
* some cases require an eDMA reset, which is done right before
* LOCKING:
* Inherited from caller.
*/
-static void mv_err_intr(struct ata_port *ap, int reset_allowed)
+static void mv_err_intr(struct ata_port *ap)
{
void __iomem *port_mmio = mv_ap_base(ap);
u32 edma_err_cause, serr = 0;
writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
/* check for fatal here and recover if needed */
- if (reset_allowed && (EDMA_ERR_FATAL & edma_err_cause))
+ if (EDMA_ERR_FATAL & edma_err_cause) {
mv_stop_and_reset(ap);
+ }
}
/**
{
void __iomem *mmio = host_set->mmio_base;
void __iomem *hc_mmio = mv_hc_base(mmio, hc);
+ struct ata_port *ap;
struct ata_queued_cmd *qc;
u32 hc_irq_cause;
int shift, port, port0, hard_port, handled;
for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
u8 ata_status = 0;
- struct ata_port *ap = host_set->ports[port];
- struct mv_port_priv *pp = ap->private_data;
-
- hard_port = mv_hardport_from_port(port); /* range 0..3 */
+ ap = host_set->ports[port];
+ hard_port = port & MV_PORT_MASK; /* range 0-3 */
handled = 0; /* ensure ata_status is set if handled++ */
- /* Note that DEV_IRQ might happen spuriously during EDMA,
- * and should be ignored in such cases.
- * The cause of this is still under investigation.
- */
- if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
- /* EDMA: check for response queue interrupt */
- if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause) {
- ata_status = mv_get_crpb_status(ap);
- handled = 1;
- }
- } else {
- /* PIO: check for device (drive) interrupt */
- if ((DEV_IRQ << hard_port) & hc_irq_cause) {
- ata_status = readb((void __iomem *)
+ if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause) {
+ /* new CRPB on the queue; just one at a time until NCQ
+ */
+ ata_status = mv_get_crpb_status(ap);
+ handled++;
+ } else if ((DEV_IRQ << hard_port) & hc_irq_cause) {
+ /* received ATA IRQ; read the status reg to clear INTRQ
+ */
+ ata_status = readb((void __iomem *)
ap->ioaddr.status_addr);
- handled = 1;
- /* ignore spurious intr if drive still BUSY */
- if (ata_status & ATA_BUSY) {
- ata_status = 0;
- handled = 0;
- }
- }
+ handled++;
}
- if (ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))
+ if (ap &&
+ (ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR)))
continue;
err_mask = ac_err_mask(ata_status);
shift++; /* skip bit 8 in the HC Main IRQ reg */
}
if ((PORT0_ERR << shift) & relevant) {
- mv_err_intr(ap, 1);
+ mv_err_intr(ap);
err_mask |= AC_ERR_OTHER;
- handled = 1;
+ handled++;
}
- if (handled) {
+ if (handled && ap) {
qc = ata_qc_from_tag(ap, ap->active_tag);
- if (qc && (qc->flags & ATA_QCFLAG_ACTIVE)) {
+ if (NULL != qc) {
VPRINTK("port %u IRQ found for qc, "
"ata_status 0x%x\n", port,ata_status);
/* mark qc status appropriately */
struct ata_host_set *host_set = dev_instance;
unsigned int hc, handled = 0, n_hcs;
void __iomem *mmio = host_set->mmio_base;
- struct mv_host_priv *hpriv;
u32 irq_stat;
irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
handled++;
}
}
-
- hpriv = host_set->private_data;
- if (IS_60XX(hpriv)) {
- /* deal with the interrupt coalescing bits */
- if (irq_stat & (TRAN_LO_DONE | TRAN_HI_DONE | PORTS_0_7_COAL_DONE)) {
- writelfl(0, mmio + MV_IRQ_COAL_CAUSE_LO);
- writelfl(0, mmio + MV_IRQ_COAL_CAUSE_HI);
- writelfl(0, mmio + MV_IRQ_COAL_CAUSE);
- }
- }
-
if (PCI_ERR & irq_stat) {
printk(KERN_ERR DRV_NAME ": PCI ERROR; PCI IRQ cause=0x%08x\n",
readl(mmio + PCI_IRQ_CAUSE_OFS));
m2 |= hpriv->signal[port].pre;
m2 &= ~(1 << 16);
- /* according to mvSata 3.6.1, some IIE values are fixed */
- if (IS_GEN_IIE(hpriv)) {
- m2 &= ~0xC30FF01F;
- m2 |= 0x0000900F;
- }
-
writel(m2, port_mmio + PHY_MODE2);
}
if (IS_60XX(hpriv)) {
u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
- ifctl |= (1 << 7); /* enable gen2i speed */
- ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
+ ifctl |= (1 << 12) | (1 << 7);
writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
}
ap->host_set->mmio_base, ap, qc, qc->scsicmd,
&qc->scsicmd->cmnd);
- spin_lock_irqsave(&ap->host_set->lock, flags);
- mv_err_intr(ap, 0);
+ mv_err_intr(ap);
mv_stop_and_reset(ap);
- spin_unlock_irqrestore(&ap->host_set->lock, flags);
- WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
- if (qc->flags & ATA_QCFLAG_ACTIVE) {
- qc->err_mask |= AC_ERR_TIMEOUT;
- ata_eh_qc_complete(qc);
+ if (!qc) {
+ printk(KERN_ERR "ata%u: BUG: timeout without command\n",
+ ap->id);
+ } else {
+ /* hack alert! We cannot use the supplied completion
+ * function from inside the ->eh_strategy_handler() thread.
+ * libata is the only user of ->eh_strategy_handler() in
+ * any kernel, so the default scsi_done() assumes it is
+ * not being called from the SCSI EH.
+ */
+ spin_lock_irqsave(&ap->host_set->lock, flags);
+ qc->scsidone = scsi_finish_command;
+ qc->err_mask |= AC_ERR_OTHER;
+ ata_qc_complete(qc);
+ spin_unlock_irqrestore(&ap->host_set->lock, flags);
}
}
}
break;
- case chip_7042:
- case chip_6042:
- hpriv->ops = &mv6xxx_ops;
-
- hp_flags |= MV_HP_GEN_IIE;
-
- switch (rev_id) {
- case 0x0:
- hp_flags |= MV_HP_ERRATA_XX42A0;
- break;
- case 0x1:
- hp_flags |= MV_HP_ERRATA_60X1C0;
- break;
- default:
- dev_printk(KERN_WARNING, &pdev->dev,
- "Applying 60X1C0 workarounds to unknown rev\n");
- hp_flags |= MV_HP_ERRATA_60X1C0;
- break;
- }
- break;
-
default:
printk(KERN_ERR DRV_NAME ": BUG: invalid board index %u\n", board_idx);
return 1;
void __iomem *port_mmio = mv_port_base(mmio, port);
u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
- ifctl |= (1 << 7); /* enable gen2i speed */
- ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
+ ifctl |= (1 << 12);
writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
}
if (rc) {
return rc;
}
- pci_set_master(pdev);
rc = pci_request_regions(pdev, DRV_NAME);
if (rc) {