X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=drivers%2Fscsi%2Fsata_mv.c;fp=drivers%2Fscsi%2Fsata_mv.c;h=b00af0884e1ef7cd87916a15c196c85413883562;hb=64ba3f394c830ec48a1c31b53dcae312c56f1604;hp=39ace4c6eef07db816f2e30019806d9f2a6d0740;hpb=be1e6109ac94a859551f8e1774eb9a8469fe055c;p=linux-2.6.git diff --git a/drivers/scsi/sata_mv.c b/drivers/scsi/sata_mv.c index 39ace4c6e..b00af0884 100644 --- a/drivers/scsi/sata_mv.c +++ b/drivers/scsi/sata_mv.c @@ -37,7 +37,7 @@ #include #define DRV_NAME "sata_mv" -#define DRV_VERSION "0.7" +#define DRV_VERSION "0.5" enum { /* BAR's are enumerated in terms of pci_resource_start() terms */ @@ -50,12 +50,6 @@ enum { MV_PCI_REG_BASE = 0, MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */ - MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08), - MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88), - MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c), - MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc), - MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0), - MV_SATAHC0_REG_BASE = 0x20000, MV_FLASH_CTL = 0x1046c, MV_GPIO_PORT_CTL = 0x104f0, @@ -93,7 +87,7 @@ enum { MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */ MV_COMMON_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO | - ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING), + ATA_FLAG_NO_ATAPI), MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE, CRQB_FLAG_READ = (1 << 0), @@ -234,9 +228,7 @@ enum { MV_HP_ERRATA_50XXB2 = (1 << 2), MV_HP_ERRATA_60X1B2 = (1 << 3), MV_HP_ERRATA_60X1C0 = (1 << 4), - MV_HP_ERRATA_XX42A0 = (1 << 5), - MV_HP_50XX = (1 << 6), - MV_HP_GEN_IIE = (1 << 7), + MV_HP_50XX = (1 << 5), /* Port private flags (pp_flags) */ MV_PP_FLAG_EDMA_EN = (1 << 0), @@ -245,9 +237,6 @@ enum { #define IS_50XX(hpriv) ((hpriv)->hp_flags & MV_HP_50XX) #define IS_60XX(hpriv) (((hpriv)->hp_flags & MV_HP_50XX) == 0) -#define IS_GEN_I(hpriv) IS_50XX(hpriv) -#define IS_GEN_II(hpriv) IS_60XX(hpriv) -#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE) enum { /* Our DMA boundary is determined by an ePRD being unable to handle @@ -266,39 +255,29 @@ enum chip_type { chip_5080, chip_604x, chip_608x, - chip_6042, - chip_7042, }; /* Command ReQuest Block: 32B */ struct mv_crqb { - __le32 sg_addr; - __le32 sg_addr_hi; - __le16 ctrl_flags; - __le16 ata_cmd[11]; -}; - -struct mv_crqb_iie { - __le32 addr; - __le32 addr_hi; - __le32 flags; - __le32 len; - __le32 ata_cmd[4]; + u32 sg_addr; + u32 sg_addr_hi; + u16 ctrl_flags; + u16 ata_cmd[11]; }; /* Command ResPonse Block: 8B */ struct mv_crpb { - __le16 id; - __le16 flags; - __le32 tmstmp; + u16 id; + u16 flags; + u32 tmstmp; }; /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */ struct mv_sg { - __le32 addr; - __le32 flags_size; - __le32 addr_hi; - __le32 reserved; + u32 addr; + u32 flags_size; + u32 addr_hi; + u32 reserved; }; struct mv_port_priv { @@ -308,6 +287,9 @@ struct mv_port_priv { dma_addr_t crpb_dma; struct mv_sg *sg_tbl; dma_addr_t sg_tbl_dma; + + unsigned req_producer; /* cp of req_in_ptr */ + unsigned rsp_consumer; /* cp of rsp_out_ptr */ u32 pp_flags; }; @@ -346,8 +328,7 @@ static void mv_host_stop(struct ata_host_set *host_set); static int mv_port_start(struct ata_port *ap); static void mv_port_stop(struct ata_port *ap); static void mv_qc_prep(struct ata_queued_cmd *qc); -static void mv_qc_prep_iie(struct ata_queued_cmd *qc); -static unsigned int mv_qc_issue(struct ata_queued_cmd *qc); +static int mv_qc_issue(struct ata_queued_cmd *qc); static irqreturn_t mv_interrupt(int irq, void *dev_instance, struct pt_regs *regs); static void mv_eng_timeout(struct ata_port *ap); @@ -381,16 +362,17 @@ static struct scsi_host_template mv_sht = { .name = DRV_NAME, .ioctl = ata_scsi_ioctl, .queuecommand = ata_scsi_queuecmd, + .eh_strategy_handler = ata_scsi_error, .can_queue = MV_USE_Q_DEPTH, .this_id = ATA_SHT_THIS_ID, .sg_tablesize = MV_MAX_SG_CT / 2, + .max_sectors = ATA_MAX_SECTORS, .cmd_per_lun = ATA_SHT_CMD_PER_LUN, .emulated = ATA_SHT_EMULATED, .use_clustering = ATA_SHT_USE_CLUSTERING, .proc_name = DRV_NAME, .dma_boundary = MV_DMA_BOUNDARY, .slave_configure = ata_scsi_slave_config, - .slave_destroy = ata_scsi_slave_destroy, .bios_param = ata_std_bios_param, }; @@ -407,7 +389,6 @@ static const struct ata_port_operations mv5_ops = { .qc_prep = mv_qc_prep, .qc_issue = mv_qc_issue, - .data_xfer = ata_mmio_data_xfer, .eng_timeout = mv_eng_timeout, @@ -435,35 +416,6 @@ static const struct ata_port_operations mv6_ops = { .qc_prep = mv_qc_prep, .qc_issue = mv_qc_issue, - .data_xfer = ata_mmio_data_xfer, - - .eng_timeout = mv_eng_timeout, - - .irq_handler = mv_interrupt, - .irq_clear = mv_irq_clear, - - .scr_read = mv_scr_read, - .scr_write = mv_scr_write, - - .port_start = mv_port_start, - .port_stop = mv_port_stop, - .host_stop = mv_host_stop, -}; - -static const struct ata_port_operations mv_iie_ops = { - .port_disable = ata_port_disable, - - .tf_load = ata_tf_load, - .tf_read = ata_tf_read, - .check_status = ata_check_status, - .exec_command = ata_exec_command, - .dev_select = ata_std_dev_select, - - .phy_reset = mv_phy_reset, - - .qc_prep = mv_qc_prep_iie, - .qc_issue = mv_qc_issue, - .data_xfer = ata_mmio_data_xfer, .eng_timeout = mv_eng_timeout, @@ -515,21 +467,6 @@ static const struct ata_port_info mv_port_info[] = { .udma_mask = 0x7f, /* udma0-6 */ .port_ops = &mv6_ops, }, - { /* chip_6042 */ - .sht = &mv_sht, - .host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS), - .pio_mask = 0x1f, /* pio0-4 */ - .udma_mask = 0x7f, /* udma0-6 */ - .port_ops = &mv_iie_ops, - }, - { /* chip_7042 */ - .sht = &mv_sht, - .host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS | - MV_FLAG_DUAL_HC), - .pio_mask = 0x1f, /* pio0-4 */ - .udma_mask = 0x7f, /* udma0-6 */ - .port_ops = &mv_iie_ops, - }, }; static const struct pci_device_id mv_pci_tbl[] = { @@ -540,7 +477,6 @@ static const struct pci_device_id mv_pci_tbl[] = { {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6040), 0, 0, chip_604x}, {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6041), 0, 0, chip_604x}, - {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6042), 0, 0, chip_6042}, {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6080), 0, 0, chip_608x}, {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6081), 0, 0, chip_608x}, @@ -636,8 +572,8 @@ static void mv_irq_clear(struct ata_port *ap) * @base: port base address * @pp: port private data * - * Verify the local cache of the eDMA state is accurate with a - * WARN_ON. + * Verify the local cache of the eDMA state is accurate with an + * assert. * * LOCKING: * Inherited from caller. @@ -648,15 +584,15 @@ static void mv_start_dma(void __iomem *base, struct mv_port_priv *pp) writelfl(EDMA_EN, base + EDMA_CMD_OFS); pp->pp_flags |= MV_PP_FLAG_EDMA_EN; } - WARN_ON(!(EDMA_EN & readl(base + EDMA_CMD_OFS))); + assert(EDMA_EN & readl(base + EDMA_CMD_OFS)); } /** * mv_stop_dma - Disable eDMA engine * @ap: ATA channel to manipulate * - * Verify the local cache of the eDMA state is accurate with a - * WARN_ON. + * Verify the local cache of the eDMA state is accurate with an + * assert. * * LOCKING: * Inherited from caller. @@ -674,7 +610,7 @@ static void mv_stop_dma(struct ata_port *ap) writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS); pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; } else { - WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)); + assert(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS))); } /* now properly wait for the eDMA to stop */ @@ -687,7 +623,7 @@ static void mv_stop_dma(struct ata_port *ap) } if (EDMA_EN & reg) { - ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n"); + printk(KERN_ERR "ata%u: Unable to stop eDMA\n", ap->id); /* FIXME: Consider doing a reset here to recover */ } } @@ -754,7 +690,7 @@ static void mv_dump_all_regs(void __iomem *mmio_base, int port, mv_dump_mem(mmio_base+0xf00, 0x4); mv_dump_mem(mmio_base+0x1d00, 0x6c); for (hc = start_hc; hc < start_hc + num_hcs; hc++) { - hc_base = mv_hc_base(mmio_base, hc); + hc_base = mv_hc_base(mmio_base, port >> MV_PORT_HC_SHIFT); DPRINTK("HC regs (HC %i):\n", hc); mv_dump_mem(hc_base, 0x1c); } @@ -837,33 +773,6 @@ static inline void mv_priv_free(struct mv_port_priv *pp, struct device *dev) dma_free_coherent(dev, MV_PORT_PRIV_DMA_SZ, pp->crpb, pp->crpb_dma); } -static void mv_edma_cfg(struct mv_host_priv *hpriv, void __iomem *port_mmio) -{ - u32 cfg = readl(port_mmio + EDMA_CFG_OFS); - - /* set up non-NCQ EDMA configuration */ - cfg &= ~0x1f; /* clear queue depth */ - cfg &= ~EDMA_CFG_NCQ; /* clear NCQ mode */ - cfg &= ~(1 << 9); /* disable equeue */ - - if (IS_GEN_I(hpriv)) - cfg |= (1 << 8); /* enab config burst size mask */ - - else if (IS_GEN_II(hpriv)) - cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN; - - else if (IS_GEN_IIE(hpriv)) { - cfg |= (1 << 23); /* dis RX PM port mask */ - cfg &= ~(1 << 16); /* dis FIS-based switching (for now) */ - cfg &= ~(1 << 19); /* dis 128-entry queue (for now?) */ - cfg |= (1 << 18); /* enab early completion */ - cfg |= (1 << 17); /* enab host q cache */ - cfg |= (1 << 22); /* enab cutthrough */ - } - - writelfl(cfg, port_mmio + EDMA_CFG_OFS); -} - /** * mv_port_start - Port specific init/start routine. * @ap: ATA channel to manipulate @@ -877,7 +786,6 @@ static void mv_edma_cfg(struct mv_host_priv *hpriv, void __iomem *port_mmio) static int mv_port_start(struct ata_port *ap) { struct device *dev = ap->host_set->dev; - struct mv_host_priv *hpriv = ap->host_set->private_data; struct mv_port_priv *pp; void __iomem *port_mmio = mv_ap_base(ap); void *mem; @@ -921,29 +829,22 @@ static int mv_port_start(struct ata_port *ap) pp->sg_tbl = mem; pp->sg_tbl_dma = mem_dma; - mv_edma_cfg(hpriv, port_mmio); + writelfl(EDMA_CFG_Q_DEPTH | EDMA_CFG_RD_BRST_EXT | + EDMA_CFG_WR_BUFF_LEN, port_mmio + EDMA_CFG_OFS); writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS); writelfl(pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK, port_mmio + EDMA_REQ_Q_IN_PTR_OFS); - if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0) - writelfl(pp->crqb_dma & 0xffffffff, - port_mmio + EDMA_REQ_Q_OUT_PTR_OFS); - else - writelfl(0, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS); + writelfl(0, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS); + writelfl(0, port_mmio + EDMA_RSP_Q_IN_PTR_OFS); writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS); - - if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0) - writelfl(pp->crpb_dma & 0xffffffff, - port_mmio + EDMA_RSP_Q_IN_PTR_OFS); - else - writelfl(0, port_mmio + EDMA_RSP_Q_IN_PTR_OFS); - writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK, port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); + pp->req_producer = pp->rsp_consumer = 0; + /* Don't turn on EDMA here...do it before DMA commands only. Else * we'll be unable to send non-data, PIO, etc due to restricted access * to shadow regs. @@ -1014,7 +915,7 @@ static void mv_fill_sg(struct ata_queued_cmd *qc) pp->sg_tbl[i].addr = cpu_to_le32(addr & 0xffffffff); pp->sg_tbl[i].addr_hi = cpu_to_le32((addr >> 16) >> 16); - pp->sg_tbl[i].flags_size = cpu_to_le32(len & 0xffff); + pp->sg_tbl[i].flags_size = cpu_to_le32(len); sg_len -= len; addr += len; @@ -1027,16 +928,16 @@ static void mv_fill_sg(struct ata_queued_cmd *qc) } } -static inline unsigned mv_inc_q_index(unsigned index) +static inline unsigned mv_inc_q_index(unsigned *index) { - return (index + 1) & MV_MAX_Q_DEPTH_MASK; + *index = (*index + 1) & MV_MAX_Q_DEPTH_MASK; + return *index; } -static inline void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last) +static inline void mv_crqb_pack_cmd(u16 *cmdw, u8 data, u8 addr, unsigned last) { - u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS | + *cmdw = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS | (last ? CRQB_CMD_LAST : 0); - *cmdw = cpu_to_le16(tmp); } /** @@ -1055,32 +956,34 @@ static void mv_qc_prep(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct mv_port_priv *pp = ap->private_data; - __le16 *cw; + u16 *cw; struct ata_taskfile *tf; u16 flags = 0; - unsigned in_index; - if (ATA_PROT_DMA != qc->tf.protocol) + if (ATA_PROT_DMA != qc->tf.protocol) { return; + } + + /* the req producer index should be the same as we remember it */ + assert(((readl(mv_ap_base(qc->ap) + EDMA_REQ_Q_IN_PTR_OFS) >> + EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == + pp->req_producer); /* Fill in command request block */ - if (!(qc->tf.flags & ATA_TFLAG_WRITE)) + if (!(qc->tf.flags & ATA_TFLAG_WRITE)) { flags |= CRQB_FLAG_READ; - WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); + } + assert(MV_MAX_Q_DEPTH > qc->tag); flags |= qc->tag << CRQB_TAG_SHIFT; - /* get current queue index from hardware */ - in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS) - >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; - - pp->crqb[in_index].sg_addr = + pp->crqb[pp->req_producer].sg_addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff); - pp->crqb[in_index].sg_addr_hi = + pp->crqb[pp->req_producer].sg_addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16); - pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags); + pp->crqb[pp->req_producer].ctrl_flags = cpu_to_le16(flags); - cw = &pp->crqb[in_index].ata_cmd[0]; + cw = &pp->crqb[pp->req_producer].ata_cmd[0]; tf = &qc->tf; /* Sadly, the CRQB cannot accomodate all registers--there are @@ -1126,76 +1029,9 @@ static void mv_qc_prep(struct ata_queued_cmd *qc) mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0); mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */ - if (!(qc->flags & ATA_QCFLAG_DMAMAP)) - return; - mv_fill_sg(qc); -} - -/** - * mv_qc_prep_iie - Host specific command preparation. - * @qc: queued command to prepare - * - * This routine simply redirects to the general purpose routine - * if command is not DMA. Else, it handles prep of the CRQB - * (command request block), does some sanity checking, and calls - * the SG load routine. - * - * LOCKING: - * Inherited from caller. - */ -static void mv_qc_prep_iie(struct ata_queued_cmd *qc) -{ - struct ata_port *ap = qc->ap; - struct mv_port_priv *pp = ap->private_data; - struct mv_crqb_iie *crqb; - struct ata_taskfile *tf; - unsigned in_index; - u32 flags = 0; - - if (ATA_PROT_DMA != qc->tf.protocol) - return; - - /* Fill in Gen IIE command request block - */ - if (!(qc->tf.flags & ATA_TFLAG_WRITE)) - flags |= CRQB_FLAG_READ; - - WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); - flags |= qc->tag << CRQB_TAG_SHIFT; - - /* get current queue index from hardware */ - in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS) - >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; - - crqb = (struct mv_crqb_iie *) &pp->crqb[in_index]; - crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff); - crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16); - crqb->flags = cpu_to_le32(flags); - - tf = &qc->tf; - crqb->ata_cmd[0] = cpu_to_le32( - (tf->command << 16) | - (tf->feature << 24) - ); - crqb->ata_cmd[1] = cpu_to_le32( - (tf->lbal << 0) | - (tf->lbam << 8) | - (tf->lbah << 16) | - (tf->device << 24) - ); - crqb->ata_cmd[2] = cpu_to_le32( - (tf->hob_lbal << 0) | - (tf->hob_lbam << 8) | - (tf->hob_lbah << 16) | - (tf->hob_feature << 24) - ); - crqb->ata_cmd[3] = cpu_to_le32( - (tf->nsect << 0) | - (tf->hob_nsect << 8) - ); - - if (!(qc->flags & ATA_QCFLAG_DMAMAP)) + if (!(qc->flags & ATA_QCFLAG_DMAMAP)) { return; + } mv_fill_sg(qc); } @@ -1211,11 +1047,10 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc) * LOCKING: * Inherited from caller. */ -static unsigned int mv_qc_issue(struct ata_queued_cmd *qc) +static int mv_qc_issue(struct ata_queued_cmd *qc) { void __iomem *port_mmio = mv_ap_base(qc->ap); struct mv_port_priv *pp = qc->ap->private_data; - unsigned in_index; u32 in_ptr; if (ATA_PROT_DMA != qc->tf.protocol) { @@ -1227,20 +1062,23 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc) return ata_qc_issue_prot(qc); } - in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS); - in_index = (in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; + in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS); + /* the req producer index should be the same as we remember it */ + assert(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == + pp->req_producer); /* until we do queuing, the queue should be empty at this point */ - WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS) - >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK)); + assert(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == + ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS) >> + EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK)); - in_index = mv_inc_q_index(in_index); /* now incr producer index */ + mv_inc_q_index(&pp->req_producer); /* now incr producer index */ mv_start_dma(port_mmio, pp); /* and write the request in pointer to kick the EDMA to life */ in_ptr &= EDMA_REQ_Q_BASE_LO_MASK; - in_ptr |= in_index << EDMA_REQ_Q_PTR_SHIFT; + in_ptr |= pp->req_producer << EDMA_REQ_Q_PTR_SHIFT; writelfl(in_ptr, port_mmio + EDMA_REQ_Q_IN_PTR_OFS); return 0; @@ -1252,7 +1090,7 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc) * * This routine is for use when the port is in DMA mode, when it * will be using the CRPB (command response block) method of - * returning command completion information. We check indices + * returning command completion information. We assert indices * are good, grab status, and bump the response consumer index to * prove that we're up to date. * @@ -1263,26 +1101,28 @@ static u8 mv_get_crpb_status(struct ata_port *ap) { void __iomem *port_mmio = mv_ap_base(ap); struct mv_port_priv *pp = ap->private_data; - unsigned out_index; u32 out_ptr; u8 ata_status; - out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); - out_index = (out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; + out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); - ata_status = le16_to_cpu(pp->crpb[out_index].flags) - >> CRPB_FLAG_STATUS_SHIFT; + /* the response consumer index should be the same as we remember it */ + assert(((out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == + pp->rsp_consumer); + + ata_status = pp->crpb[pp->rsp_consumer].flags >> CRPB_FLAG_STATUS_SHIFT; /* increment our consumer index... */ - out_index = mv_inc_q_index(out_index); + pp->rsp_consumer = mv_inc_q_index(&pp->rsp_consumer); /* and, until we do NCQ, there should only be 1 CRPB waiting */ - WARN_ON(out_index != ((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS) - >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK)); + assert(((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS) >> + EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == + pp->rsp_consumer); /* write out our inc'd consumer index so EDMA knows we're caught up */ out_ptr &= EDMA_RSP_Q_BASE_LO_MASK; - out_ptr |= out_index << EDMA_RSP_Q_PTR_SHIFT; + out_ptr |= pp->rsp_consumer << EDMA_RSP_Q_PTR_SHIFT; writelfl(out_ptr, port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); /* Return ATA status register for completed CRPB */ @@ -1292,7 +1132,6 @@ static u8 mv_get_crpb_status(struct ata_port *ap) /** * mv_err_intr - Handle error interrupts on the port * @ap: ATA channel to manipulate - * @reset_allowed: bool: 0 == don't trigger from reset here * * In most cases, just clear the interrupt and move on. However, * some cases require an eDMA reset, which is done right before @@ -1303,7 +1142,7 @@ static u8 mv_get_crpb_status(struct ata_port *ap) * LOCKING: * Inherited from caller. */ -static void mv_err_intr(struct ata_port *ap, int reset_allowed) +static void mv_err_intr(struct ata_port *ap) { void __iomem *port_mmio = mv_ap_base(ap); u32 edma_err_cause, serr = 0; @@ -1311,8 +1150,8 @@ static void mv_err_intr(struct ata_port *ap, int reset_allowed) edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); if (EDMA_ERR_SERR & edma_err_cause) { - sata_scr_read(ap, SCR_ERROR, &serr); - sata_scr_write_flush(ap, SCR_ERROR, serr); + serr = scr_read(ap, SCR_ERROR); + scr_write_flush(ap, SCR_ERROR, serr); } if (EDMA_ERR_SELF_DIS & edma_err_cause) { struct mv_port_priv *pp = ap->private_data; @@ -1325,8 +1164,9 @@ static void mv_err_intr(struct ata_port *ap, int reset_allowed) writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); /* check for fatal here and recover if needed */ - if (reset_allowed && (EDMA_ERR_FATAL & edma_err_cause)) + if (EDMA_ERR_FATAL & edma_err_cause) { mv_stop_and_reset(ap); + } } /** @@ -1350,6 +1190,7 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant, { void __iomem *mmio = host_set->mmio_base; void __iomem *hc_mmio = mv_hc_base(mmio, hc); + struct ata_port *ap; struct ata_queued_cmd *qc; u32 hc_irq_cause; int shift, port, port0, hard_port, handled; @@ -1372,37 +1213,25 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant, for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) { u8 ata_status = 0; - struct ata_port *ap = host_set->ports[port]; - struct mv_port_priv *pp = ap->private_data; - - hard_port = mv_hardport_from_port(port); /* range 0..3 */ + ap = host_set->ports[port]; + hard_port = port & MV_PORT_MASK; /* range 0-3 */ handled = 0; /* ensure ata_status is set if handled++ */ - /* Note that DEV_IRQ might happen spuriously during EDMA, - * and should be ignored in such cases. - * The cause of this is still under investigation. - */ - if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) { - /* EDMA: check for response queue interrupt */ - if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause) { - ata_status = mv_get_crpb_status(ap); - handled = 1; - } - } else { - /* PIO: check for device (drive) interrupt */ - if ((DEV_IRQ << hard_port) & hc_irq_cause) { - ata_status = readb((void __iomem *) + if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause) { + /* new CRPB on the queue; just one at a time until NCQ + */ + ata_status = mv_get_crpb_status(ap); + handled++; + } else if ((DEV_IRQ << hard_port) & hc_irq_cause) { + /* received ATA IRQ; read the status reg to clear INTRQ + */ + ata_status = readb((void __iomem *) ap->ioaddr.status_addr); - handled = 1; - /* ignore spurious intr if drive still BUSY */ - if (ata_status & ATA_BUSY) { - ata_status = 0; - handled = 0; - } - } + handled++; } - if (ap && (ap->flags & ATA_FLAG_DISABLED)) + if (ap && + (ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) continue; err_mask = ac_err_mask(ata_status); @@ -1412,18 +1241,18 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant, shift++; /* skip bit 8 in the HC Main IRQ reg */ } if ((PORT0_ERR << shift) & relevant) { - mv_err_intr(ap, 1); + mv_err_intr(ap); err_mask |= AC_ERR_OTHER; - handled = 1; + handled++; } - if (handled) { + if (handled && ap) { qc = ata_qc_from_tag(ap, ap->active_tag); - if (qc && (qc->flags & ATA_QCFLAG_ACTIVE)) { + if (NULL != qc) { VPRINTK("port %u IRQ found for qc, " "ata_status 0x%x\n", port,ata_status); /* mark qc status appropriately */ - if (!(qc->tf.flags & ATA_TFLAG_POLLING)) { + if (!(qc->tf.ctl & ATA_NIEN)) { qc->err_mask |= err_mask; ata_qc_complete(qc); } @@ -1454,7 +1283,6 @@ static irqreturn_t mv_interrupt(int irq, void *dev_instance, struct ata_host_set *host_set = dev_instance; unsigned int hc, handled = 0, n_hcs; void __iomem *mmio = host_set->mmio_base; - struct mv_host_priv *hpriv; u32 irq_stat; irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS); @@ -1476,17 +1304,6 @@ static irqreturn_t mv_interrupt(int irq, void *dev_instance, handled++; } } - - hpriv = host_set->private_data; - if (IS_60XX(hpriv)) { - /* deal with the interrupt coalescing bits */ - if (irq_stat & (TRAN_LO_DONE | TRAN_HI_DONE | PORTS_0_7_COAL_DONE)) { - writelfl(0, mmio + MV_IRQ_COAL_CAUSE_LO); - writelfl(0, mmio + MV_IRQ_COAL_CAUSE_HI); - writelfl(0, mmio + MV_IRQ_COAL_CAUSE); - } - } - if (PCI_ERR & irq_stat) { printk(KERN_ERR DRV_NAME ": PCI ERROR; PCI IRQ cause=0x%08x\n", readl(mmio + PCI_IRQ_CAUSE_OFS)); @@ -1867,12 +1684,6 @@ static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, m2 |= hpriv->signal[port].pre; m2 &= ~(1 << 16); - /* according to mvSata 3.6.1, some IIE values are fixed */ - if (IS_GEN_IIE(hpriv)) { - m2 &= ~0xC30FF01F; - m2 |= 0x0000900F; - } - writel(m2, port_mmio + PHY_MODE2); } @@ -1885,8 +1696,7 @@ static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio, if (IS_60XX(hpriv)) { u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL); - ifctl |= (1 << 7); /* enable gen2i speed */ - ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */ + ifctl |= (1 << 12) | (1 << 7); writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL); } @@ -1953,16 +1763,16 @@ static void __mv_phy_reset(struct ata_port *ap, int can_sleep) /* Issue COMRESET via SControl */ comreset_retry: - sata_scr_write_flush(ap, SCR_CONTROL, 0x301); + scr_write_flush(ap, SCR_CONTROL, 0x301); __msleep(1, can_sleep); - sata_scr_write_flush(ap, SCR_CONTROL, 0x300); + scr_write_flush(ap, SCR_CONTROL, 0x300); __msleep(20, can_sleep); timeout = jiffies + msecs_to_jiffies(200); do { - sata_scr_read(ap, SCR_STATUS, &sstatus); - if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0)) + sstatus = scr_read(ap, SCR_STATUS) & 0x3; + if ((sstatus == 3) || (sstatus == 0)) break; __msleep(1, can_sleep); @@ -1978,12 +1788,11 @@ comreset_retry: "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS), mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL)); - if (ata_port_online(ap)) { + if (sata_dev_present(ap)) { ata_port_probe(ap); } else { - sata_scr_read(ap, SCR_STATUS, &sstatus); - ata_port_printk(ap, KERN_INFO, - "no device found (phy stat %08x)\n", sstatus); + printk(KERN_INFO "ata%u: no device found (phy stat %08x)\n", + ap->id, scr_read(ap, SCR_STATUS)); ata_port_disable(ap); return; } @@ -2010,7 +1819,7 @@ comreset_retry: tf.nsect = readb((void __iomem *) ap->ioaddr.nsect_addr); dev->class = ata_dev_classify(&tf); - if (!ata_dev_enabled(dev)) { + if (!ata_dev_present(dev)) { VPRINTK("Port disabled post-sig: No device present.\n"); ata_port_disable(ap); } @@ -2042,7 +1851,7 @@ static void mv_eng_timeout(struct ata_port *ap) struct ata_queued_cmd *qc; unsigned long flags; - ata_port_printk(ap, KERN_ERR, "Entering mv_eng_timeout\n"); + printk(KERN_ERR "ata%u: Entering mv_eng_timeout\n",ap->id); DPRINTK("All regs @ start of eng_timeout\n"); mv_dump_all_regs(ap->host_set->mmio_base, ap->port_no, to_pci_dev(ap->host_set->dev)); @@ -2052,15 +1861,24 @@ static void mv_eng_timeout(struct ata_port *ap) ap->host_set->mmio_base, ap, qc, qc->scsicmd, &qc->scsicmd->cmnd); - spin_lock_irqsave(&ap->host_set->lock, flags); - mv_err_intr(ap, 0); + mv_err_intr(ap); mv_stop_and_reset(ap); - spin_unlock_irqrestore(&ap->host_set->lock, flags); - WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE)); - if (qc->flags & ATA_QCFLAG_ACTIVE) { - qc->err_mask |= AC_ERR_TIMEOUT; - ata_eh_qc_complete(qc); + if (!qc) { + printk(KERN_ERR "ata%u: BUG: timeout without command\n", + ap->id); + } else { + /* hack alert! We cannot use the supplied completion + * function from inside the ->eh_strategy_handler() thread. + * libata is the only user of ->eh_strategy_handler() in + * any kernel, so the default scsi_done() assumes it is + * not being called from the SCSI EH. + */ + spin_lock_irqsave(&ap->host_set->lock, flags); + qc->scsidone = scsi_finish_command; + qc->err_mask |= AC_ERR_OTHER; + ata_qc_complete(qc); + spin_unlock_irqrestore(&ap->host_set->lock, flags); } } @@ -2180,27 +1998,6 @@ static int mv_chip_id(struct pci_dev *pdev, struct mv_host_priv *hpriv, } break; - case chip_7042: - case chip_6042: - hpriv->ops = &mv6xxx_ops; - - hp_flags |= MV_HP_GEN_IIE; - - switch (rev_id) { - case 0x0: - hp_flags |= MV_HP_ERRATA_XX42A0; - break; - case 0x1: - hp_flags |= MV_HP_ERRATA_60X1C0; - break; - default: - dev_printk(KERN_WARNING, &pdev->dev, - "Applying 60X1C0 workarounds to unknown rev\n"); - hp_flags |= MV_HP_ERRATA_60X1C0; - break; - } - break; - default: printk(KERN_ERR DRV_NAME ": BUG: invalid board index %u\n", board_idx); return 1; @@ -2255,8 +2052,7 @@ static int mv_init_host(struct pci_dev *pdev, struct ata_probe_ent *probe_ent, void __iomem *port_mmio = mv_port_base(mmio, port); u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL); - ifctl |= (1 << 7); /* enable gen2i speed */ - ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */ + ifctl |= (1 << 12); writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL); } @@ -2357,7 +2153,6 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (rc) { return rc; } - pci_set_master(pdev); rc = pci_request_regions(pdev, DRV_NAME); if (rc) { @@ -2395,7 +2190,7 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) probe_ent->port_ops = mv_port_info[board_idx].port_ops; probe_ent->irq = pdev->irq; - probe_ent->irq_flags = IRQF_SHARED; + probe_ent->irq_flags = SA_SHIRQ; probe_ent->mmio_base = mmio_base; probe_ent->private_data = hpriv;