X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=drivers%2Fscsi%2Fsym53c8xx_2%2Fsym_glue.c;h=9c83b4d39a2687aa3a65bcebed3cc42fdb295f96;hb=43bc926fffd92024b46cafaf7350d669ba9ca884;hp=5b07c6ec3eccbc2845216d1fa1c1dce63039739d;hpb=cee37fe97739d85991964371c1f3a745c00dd236;p=linux-2.6.git diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c index 5b07c6ec3..9c83b4d39 100644 --- a/drivers/scsi/sym53c8xx_2/sym_glue.c +++ b/drivers/scsi/sym53c8xx_2/sym_glue.c @@ -134,65 +134,17 @@ static void sym2_setup_params(void) } } -/* - * We used to try to deal with 64-bit BARs here, but don't any more. - * There are many parts of this driver which would need to be modified - * to handle a 64-bit base address, including scripts. I'm uncomfortable - * with making those changes when I have no way of testing it, so I'm - * just going to disable it. - * - * Note that some machines (eg HP rx8620 and Superdome) have bus addresses - * below 4GB and physical addresses above 4GB. These will continue to work. - */ -static int __devinit -pci_get_base_address(struct pci_dev *pdev, int index, unsigned long *basep) -{ - u32 tmp; - unsigned long base; -#define PCI_BAR_OFFSET(index) (PCI_BASE_ADDRESS_0 + (index<<2)) - - pci_read_config_dword(pdev, PCI_BAR_OFFSET(index++), &tmp); - base = tmp; - if ((tmp & 0x7) == PCI_BASE_ADDRESS_MEM_TYPE_64) { - pci_read_config_dword(pdev, PCI_BAR_OFFSET(index++), &tmp); - if (tmp > 0) - dev_err(&pdev->dev, - "BAR %d is 64-bit, disabling\n", index - 1); - base = 0; - } - - if ((base & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) { - base &= PCI_BASE_ADDRESS_IO_MASK; - } else { - base &= PCI_BASE_ADDRESS_MEM_MASK; - } - - *basep = base; - return index; -#undef PCI_BAR_OFFSET -} - static struct scsi_transport_template *sym2_transport_template = NULL; -/* - * Used by the eh thread to wait for command completion. - * It is allocated on the eh thread stack. - */ -struct sym_eh_wait { - struct completion done; - struct timer_list timer; - void (*old_done)(struct scsi_cmnd *); - int to_do; - int timed_out; -}; - /* * Driver private area in the SCSI command structure. */ struct sym_ucmd { /* Override the SCSI pointer structure */ - dma_addr_t data_mapping; - u_char data_mapped; - struct sym_eh_wait *eh_wait; + dma_addr_t data_mapping; + unsigned char data_mapped; + unsigned char to_do; /* For error handling */ + void (*old_done)(struct scsi_cmnd *); /* For error handling */ + struct completion *eh_done; /* For error handling */ }; #define SYM_UCMD_PTR(cmd) ((struct sym_ucmd *)(&(cmd)->SCp)) @@ -389,13 +341,20 @@ static int sym_scatter_no_sglist(struct sym_hcb *np, struct sym_ccb *cp, struct { struct sym_tblmove *data = &cp->phys.data[SYM_CONF_MAX_SG-1]; int segment; + unsigned int len = cmd->request_bufflen; - cp->data_len = cmd->request_bufflen; - - if (cmd->request_bufflen) { + if (len) { dma_addr_t baddr = map_scsi_single_data(np, cmd); if (baddr) { - sym_build_sge(np, data, baddr, cmd->request_bufflen); + if (len & 1) { + struct sym_tcb *tp = &np->target[cp->target]; + if (tp->head.wval & EWS) { + len++; + cp->odd_byte_adjustment++; + } + } + cp->data_len = len; + sym_build_sge(np, data, baddr, len); segment = 1; } else { segment = -2; @@ -418,6 +377,7 @@ static int sym_scatter(struct sym_hcb *np, struct sym_ccb *cp, struct scsi_cmnd segment = sym_scatter_no_sglist(np, cp, cmd); else if ((use_sg = map_scsi_sg_data(np, cmd)) > 0) { struct scatterlist *scatter = (struct scatterlist *)cmd->buffer; + struct sym_tcb *tp = &np->target[cp->target]; struct sym_tblmove *data; if (use_sg > SYM_CONF_MAX_SG) { @@ -431,6 +391,11 @@ static int sym_scatter(struct sym_hcb *np, struct sym_ccb *cp, struct scsi_cmnd dma_addr_t baddr = sg_dma_address(&scatter[segment]); unsigned int len = sg_dma_len(&scatter[segment]); + if ((len & 1) && (tp->head.wval & EWS)) { + len++; + cp->odd_byte_adjustment++; + } + sym_build_sge(np, &data[segment], baddr, len); cp->data_len += len; } @@ -456,10 +421,8 @@ static int sym_queue_command(struct sym_hcb *np, struct scsi_cmnd *cmd) * Minimal checkings, so that we will not * go outside our tables. */ - if (sdev->id == np->myaddr || - sdev->id >= SYM_CONF_MAX_TARGET || - sdev->lun >= SYM_CONF_MAX_LUN) { - sym_xpt_done2(np, cmd, CAM_DEV_NOT_THERE); + if (sdev->id == np->myaddr) { + sym_xpt_done2(np, cmd, DID_NO_CONNECT); return 0; } @@ -468,28 +431,6 @@ static int sym_queue_command(struct sym_hcb *np, struct scsi_cmnd *cmd) */ tp = &np->target[sdev->id]; - /* - * Complete the 1st INQUIRY command with error - * condition if the device is flagged NOSCAN - * at BOOT in the NVRAM. This may speed up - * the boot and maintain coherency with BIOS - * device numbering. Clearing the flag allows - * user to rescan skipped devices later. - * We also return error for devices not flagged - * for SCAN LUNS in the NVRAM since some mono-lun - * devices behave badly when asked for some non - * zero LUN. Btw, this is an absolute hack.:-) - */ - if (cmd->cmnd[0] == 0x12 || cmd->cmnd[0] == 0x0) { - if ((tp->usrflags & SYM_SCAN_BOOT_DISABLED) || - ((tp->usrflags & SYM_SCAN_LUNS_DISABLED) && - sdev->lun != 0)) { - tp->usrflags &= ~SYM_SCAN_BOOT_DISABLED; - sym_xpt_done2(np, cmd, CAM_DEV_NOT_THERE); - return 0; - } - } - /* * Select tagged/untagged. */ @@ -511,23 +452,10 @@ static int sym_queue_command(struct sym_hcb *np, struct scsi_cmnd *cmd) */ static inline int sym_setup_cdb(struct sym_hcb *np, struct scsi_cmnd *cmd, struct sym_ccb *cp) { - u32 cmd_ba; - int cmd_len; - - /* - * CDB is 16 bytes max. - */ - if (cmd->cmd_len > sizeof(cp->cdb_buf)) { - sym_set_cam_status(cp->cmd, CAM_REQ_INVALID); - return -1; - } - memcpy(cp->cdb_buf, cmd->cmnd, cmd->cmd_len); - cmd_ba = CCB_BA (cp, cdb_buf[0]); - cmd_len = cmd->cmd_len; - cp->phys.cmd.addr = cpu_to_scr(cmd_ba); - cp->phys.cmd.size = cpu_to_scr(cmd_len); + cp->phys.cmd.addr = CCB_BA(cp, cdb_buf[0]); + cp->phys.cmd.size = cpu_to_scr(cmd->cmd_len); return 0; } @@ -537,9 +465,8 @@ static inline int sym_setup_cdb(struct sym_hcb *np, struct scsi_cmnd *cmd, struc */ int sym_setup_data_and_start(struct sym_hcb *np, struct scsi_cmnd *cmd, struct sym_ccb *cp) { + u32 lastp, goalp; int dir; - struct sym_tcb *tp = &np->target[cp->target]; - struct sym_lcb *lp = sym_lp(tp, cp->lun); /* * Build the CDB. @@ -554,21 +481,50 @@ int sym_setup_data_and_start(struct sym_hcb *np, struct scsi_cmnd *cmd, struct s if (dir != DMA_NONE) { cp->segments = sym_scatter(np, cp, cmd); if (cp->segments < 0) { - if (cp->segments == -2) - sym_set_cam_status(cmd, CAM_RESRC_UNAVAIL); - else - sym_set_cam_status(cmd, CAM_REQ_TOO_BIG); + sym_set_cam_status(cmd, DID_ERROR); goto out_abort; } + + /* + * No segments means no data. + */ + if (!cp->segments) + dir = DMA_NONE; } else { cp->data_len = 0; cp->segments = 0; } /* - * Set data pointers. + * Set the data pointer. */ - sym_setup_data_pointers(np, cp, dir); + switch (dir) { + case DMA_BIDIRECTIONAL: + printk("%s: got DMA_BIDIRECTIONAL command", sym_name(np)); + sym_set_cam_status(cmd, DID_ERROR); + goto out_abort; + case DMA_TO_DEVICE: + goalp = SCRIPTA_BA(np, data_out2) + 8; + lastp = goalp - 8 - (cp->segments * (2*4)); + break; + case DMA_FROM_DEVICE: + cp->host_flags |= HF_DATA_IN; + goalp = SCRIPTA_BA(np, data_in2) + 8; + lastp = goalp - 8 - (cp->segments * (2*4)); + break; + case DMA_NONE: + default: + lastp = goalp = SCRIPTB_BA(np, no_data); + break; + } + + /* + * Set all pointers values needed by SCRIPTS. + */ + cp->phys.head.lastp = cpu_to_scr(lastp); + cp->phys.head.savep = cpu_to_scr(lastp); + cp->startp = cp->phys.head.savep; + cp->goalp = cpu_to_scr(goalp); /* * When `#ifed 1', the code below makes the driver @@ -589,10 +545,7 @@ int sym_setup_data_and_start(struct sym_hcb *np, struct scsi_cmnd *cmd, struct s /* * activate this job. */ - if (lp) - sym_start_next_ccbs(np, lp, 2); - else - sym_put_start_queue(np, cp); + sym_put_start_queue(np, cp); return 0; out_abort: @@ -747,44 +700,22 @@ static void sym53c8xx_timer(unsigned long npref) * What we will do regarding the involved SCSI command. */ #define SYM_EH_DO_IGNORE 0 -#define SYM_EH_DO_COMPLETE 1 #define SYM_EH_DO_WAIT 2 /* - * Our general completion handler. + * scsi_done() alias when error recovery is in progress. */ -static void __sym_eh_done(struct scsi_cmnd *cmd, int timed_out) +static void sym_eh_done(struct scsi_cmnd *cmd) { - struct sym_eh_wait *ep = SYM_UCMD_PTR(cmd)->eh_wait; - if (!ep) - return; - - /* Try to avoid a race here (not 100% safe) */ - if (!timed_out) { - ep->timed_out = 0; - if (ep->to_do == SYM_EH_DO_WAIT && !del_timer(&ep->timer)) - return; - } + struct sym_ucmd *ucmd = SYM_UCMD_PTR(cmd); + BUILD_BUG_ON(sizeof(struct scsi_pointer) < sizeof(struct sym_ucmd)); - /* Revert everything */ - SYM_UCMD_PTR(cmd)->eh_wait = NULL; - cmd->scsi_done = ep->old_done; + cmd->scsi_done = ucmd->old_done; - /* Wake up the eh thread if it wants to sleep */ - if (ep->to_do == SYM_EH_DO_WAIT) - complete(&ep->done); + if (ucmd->to_do == SYM_EH_DO_WAIT) + complete(ucmd->eh_done); } -/* - * scsi_done() alias when error recovery is in progress. - */ -static void sym_eh_done(struct scsi_cmnd *cmd) { __sym_eh_done(cmd, 0); } - -/* - * Some timeout handler to avoid waiting too long. - */ -static void sym_eh_timeout(u_long p) { __sym_eh_done((struct scsi_cmnd *)p, 1); } - /* * Generic method for our eh processing. * The 'op' argument tells what we have to do. @@ -792,35 +723,31 @@ static void sym_eh_timeout(u_long p) { __sym_eh_done((struct scsi_cmnd *)p, 1); static int sym_eh_handler(int op, char *opname, struct scsi_cmnd *cmd) { struct sym_hcb *np = SYM_SOFTC_PTR(cmd); + struct sym_ucmd *ucmd = SYM_UCMD_PTR(cmd); + struct Scsi_Host *host = cmd->device->host; SYM_QUEHEAD *qp; int to_do = SYM_EH_DO_IGNORE; int sts = -1; - struct sym_eh_wait eh, *ep = &eh; + struct completion eh_done; dev_warn(&cmd->device->sdev_gendev, "%s operation started.\n", opname); + spin_lock_irq(host->host_lock); /* This one is queued in some place -> to wait for completion */ FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) { struct sym_ccb *cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); if (cp->cmd == cmd) { to_do = SYM_EH_DO_WAIT; - goto prepare; + break; } } -prepare: - /* Prepare stuff to either ignore, complete or wait for completion */ - switch(to_do) { - default: - case SYM_EH_DO_IGNORE: - break; - case SYM_EH_DO_WAIT: - init_completion(&ep->done); - /* fall through */ - case SYM_EH_DO_COMPLETE: - ep->old_done = cmd->scsi_done; + if (to_do == SYM_EH_DO_WAIT) { + init_completion(&eh_done); + ucmd->old_done = cmd->scsi_done; + ucmd->eh_done = &eh_done; + wmb(); cmd->scsi_done = sym_eh_done; - SYM_UCMD_PTR(cmd)->eh_wait = ep; } /* Try to proceed the operation we have been asked for */ @@ -847,29 +774,19 @@ prepare: /* On error, restore everything and cross fingers :) */ if (sts) { - SYM_UCMD_PTR(cmd)->eh_wait = NULL; - cmd->scsi_done = ep->old_done; + cmd->scsi_done = ucmd->old_done; to_do = SYM_EH_DO_IGNORE; } - ep->to_do = to_do; - /* Complete the command with locks held as required by the driver */ - if (to_do == SYM_EH_DO_COMPLETE) - sym_xpt_done2(np, cmd, CAM_REQ_ABORTED); + ucmd->to_do = to_do; + spin_unlock_irq(host->host_lock); - /* Wait for completion with locks released, as required by kernel */ if (to_do == SYM_EH_DO_WAIT) { - init_timer(&ep->timer); - ep->timer.expires = jiffies + (5*HZ); - ep->timer.function = sym_eh_timeout; - ep->timer.data = (u_long)cmd; - ep->timed_out = 1; /* Be pessimistic for once :) */ - add_timer(&ep->timer); - spin_unlock_irq(np->s.host->host_lock); - wait_for_completion(&ep->done); - spin_lock_irq(np->s.host->host_lock); - if (ep->timed_out) + if (!wait_for_completion_timeout(&eh_done, 5*HZ)) { + ucmd->to_do = SYM_EH_DO_IGNORE; + wmb(); sts = -2; + } } dev_warn(&cmd->device->sdev_gendev, "%s operation %s.\n", opname, sts==0 ? "complete" :sts==-2 ? "timed-out" : "failed"); @@ -916,15 +833,12 @@ static void sym_tune_dev_queuing(struct sym_tcb *tp, int lun, u_short reqtags) if (reqtags > lp->s.scdev_depth) reqtags = lp->s.scdev_depth; - lp->started_limit = reqtags ? reqtags : 2; - lp->started_max = 1; lp->s.reqtags = reqtags; if (reqtags != oldtags) { - dev_info(&tp->sdev->sdev_target->dev, + dev_info(&tp->starget->dev, "tagged command queuing %s, command queue depth %d.\n", - lp->s.reqtags ? "enabled" : "disabled", - lp->started_limit); + lp->s.reqtags ? "enabled" : "disabled", reqtags); } } @@ -981,42 +895,59 @@ static int device_queue_depth(struct sym_hcb *np, int target, int lun) return DEF_DEPTH; } -static int sym53c8xx_slave_alloc(struct scsi_device *device) +static int sym53c8xx_slave_alloc(struct scsi_device *sdev) { - struct sym_hcb *np = sym_get_hcb(device->host); - struct sym_tcb *tp = &np->target[device->id]; - if (!tp->sdev) - tp->sdev = device; + struct sym_hcb *np = sym_get_hcb(sdev->host); + struct sym_tcb *tp = &np->target[sdev->id]; + struct sym_lcb *lp; - return 0; -} + if (sdev->id >= SYM_CONF_MAX_TARGET || sdev->lun >= SYM_CONF_MAX_LUN) + return -ENXIO; -static void sym53c8xx_slave_destroy(struct scsi_device *device) -{ - struct sym_hcb *np = sym_get_hcb(device->host); - struct sym_tcb *tp = &np->target[device->id]; - if (tp->sdev == device) - tp->sdev = NULL; + tp->starget = sdev->sdev_target; + /* + * Fail the device init if the device is flagged NOSCAN at BOOT in + * the NVRAM. This may speed up boot and maintain coherency with + * BIOS device numbering. Clearing the flag allows the user to + * rescan skipped devices later. We also return an error for + * devices not flagged for SCAN LUNS in the NVRAM since some single + * lun devices behave badly when asked for a non zero LUN. + */ + + if (tp->usrflags & SYM_SCAN_BOOT_DISABLED) { + tp->usrflags &= ~SYM_SCAN_BOOT_DISABLED; + starget_printk(KERN_INFO, tp->starget, + "Scan at boot disabled in NVRAM\n"); + return -ENXIO; + } + + if (tp->usrflags & SYM_SCAN_LUNS_DISABLED) { + if (sdev->lun != 0) + return -ENXIO; + starget_printk(KERN_INFO, tp->starget, + "Multiple LUNs disabled in NVRAM\n"); + } + + lp = sym_alloc_lcb(np, sdev->id, sdev->lun); + if (!lp) + return -ENOMEM; + + spi_min_period(tp->starget) = tp->usr_period; + spi_max_width(tp->starget) = tp->usr_width; + + return 0; } /* * Linux entry point for device queue sizing. */ -static int sym53c8xx_slave_configure(struct scsi_device *device) +static int sym53c8xx_slave_configure(struct scsi_device *sdev) { - struct sym_hcb *np = sym_get_hcb(device->host); - struct sym_tcb *tp = &np->target[device->id]; - struct sym_lcb *lp; + struct sym_hcb *np = sym_get_hcb(sdev->host); + struct sym_tcb *tp = &np->target[sdev->id]; + struct sym_lcb *lp = sym_lp(tp, sdev->lun); int reqtags, depth_to_use; - /* - * Allocate the LCB if not yet. - * If it fail, we may well be in the sh*t. :) - */ - lp = sym_alloc_lcb(np, device->id, device->lun); - if (!lp) - return -ENOMEM; - /* * Get user flags. */ @@ -1028,10 +959,10 @@ static int sym53c8xx_slave_configure(struct scsi_device *device) * Use at least 2. * Donnot use more than our maximum. */ - reqtags = device_queue_depth(np, device->id, device->lun); + reqtags = device_queue_depth(np, sdev->id, sdev->lun); if (reqtags > tp->usrtags) reqtags = tp->usrtags; - if (!device->tagged_supported) + if (!sdev->tagged_supported) reqtags = 0; #if 1 /* Avoid to locally queue commands for no good reasons */ if (reqtags > SYM_CONF_MAX_TAG) @@ -1040,19 +971,30 @@ static int sym53c8xx_slave_configure(struct scsi_device *device) #else depth_to_use = (reqtags ? SYM_CONF_MAX_TAG : 2); #endif - scsi_adjust_queue_depth(device, - (device->tagged_supported ? + scsi_adjust_queue_depth(sdev, + (sdev->tagged_supported ? MSG_SIMPLE_TAG : 0), depth_to_use); lp->s.scdev_depth = depth_to_use; - sym_tune_dev_queuing(tp, device->lun, reqtags); + sym_tune_dev_queuing(tp, sdev->lun, reqtags); - if (!spi_initial_dv(device->sdev_target)) - spi_dv_device(device); + if (!spi_initial_dv(sdev->sdev_target)) + spi_dv_device(sdev); return 0; } +static void sym53c8xx_slave_destroy(struct scsi_device *sdev) +{ + struct sym_hcb *np = sym_get_hcb(sdev->host); + struct sym_lcb *lp = sym_lp(&np->target[sdev->id], sdev->lun); + + if (lp->itlq_tbl) + sym_mfree_dma(lp->itlq_tbl, SYM_CONF_MAX_TASK * 4, "ITLQ_TBL"); + kfree(lp->cb_tags); + sym_mfree_dma(lp, sizeof(*lp), "LCB"); +} + /* * Linux entry point for info() function */ @@ -1487,7 +1429,7 @@ static int sym_setup_bus_dma_mask(struct sym_hcb *np) { #if SYM_CONF_DMA_ADDRESSING_MODE > 0 #if SYM_CONF_DMA_ADDRESSING_MODE == 1 -#define DMA_DAC_MASK 0x000000ffffffffffULL /* 40-bit */ +#define DMA_DAC_MASK DMA_40BIT_MASK #elif SYM_CONF_DMA_ADDRESSING_MODE == 2 #define DMA_DAC_MASK DMA_64BIT_MASK #endif @@ -1810,15 +1752,25 @@ static int __devinit sym_set_workarounds(struct sym_device *device) static void __devinit sym_init_device(struct pci_dev *pdev, struct sym_device *device) { - int i; + int i = 2; + struct pci_bus_region bus_addr; device->host_id = SYM_SETUP_HOST_ID; device->pdev = pdev; - i = pci_get_base_address(pdev, 1, &device->mmio_base); - pci_get_base_address(pdev, i, &device->ram_base); + pcibios_resource_to_bus(pdev, &bus_addr, &pdev->resource[1]); + device->mmio_base = bus_addr.start; + + /* + * If the BAR is 64-bit, resource 2 will be occupied by the + * upper 32 bits + */ + if (!pdev->resource[i].flags) + i++; + pcibios_resource_to_bus(pdev, &bus_addr, &pdev->resource[i]); + device->ram_base = bus_addr.start; -#ifndef CONFIG_SCSI_SYM53C8XX_IOMAPPED +#ifdef CONFIG_SCSI_SYM53C8XX_MMIO if (device->mmio_base) device->s.ioaddr = pci_iomap(pdev, 1, pci_resource_len(pdev, 1)); @@ -1897,6 +1849,7 @@ static int sym_detach(struct sym_hcb *np, struct pci_dev *pdev) */ printk("%s: resetting chip\n", sym_name(np)); OUTB(np, nc_istat, SRST); + INB(np, nc_mbox1); udelay(10); OUTB(np, nc_istat, 0); @@ -1921,7 +1874,8 @@ static struct scsi_host_template sym2_template = { .eh_bus_reset_handler = sym53c8xx_eh_bus_reset_handler, .eh_host_reset_handler = sym53c8xx_eh_host_reset_handler, .this_id = 7, - .use_clustering = DISABLE_CLUSTERING, + .use_clustering = ENABLE_CLUSTERING, + .max_sectors = 0xFFFF, #ifdef SYM_LINUX_PROC_INFO_SUPPORT .proc_info = sym53c8xx_proc_info, .proc_name = NAME53C8XX, @@ -2076,6 +2030,7 @@ static void sym2_set_dt(struct scsi_target *starget, int dt) tp->tgoal.check_nego = 1; } +#if 0 static void sym2_set_iu(struct scsi_target *starget, int iu) { struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); @@ -2101,7 +2056,7 @@ static void sym2_set_qas(struct scsi_target *starget, int qas) tp->tgoal.qas = 0; tp->tgoal.check_nego = 1; } - +#endif static struct spi_function_template sym2_transport_functions = { .set_offset = sym2_set_offset, @@ -2112,10 +2067,12 @@ static struct spi_function_template sym2_transport_functions = { .show_width = 1, .set_dt = sym2_set_dt, .show_dt = 1, +#if 0 .set_iu = sym2_set_iu, .show_iu = 1, .set_qas = sym2_set_qas, .show_qas = 1, +#endif .get_signalling = sym2_get_signalling, };