X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=drivers%2Fscsi%2Fsym53c8xx_2%2Fsym_hipd.c;h=a671bdc07450ae43363a7cca0ac76af411a73eaa;hb=43bc926fffd92024b46cafaf7350d669ba9ca884;hp=50a176b3888d4c62336135db89eb1c9ef054bb82;hpb=cee37fe97739d85991964371c1f3a745c00dd236;p=linux-2.6.git diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c index 50a176b38..a671bdc07 100644 --- a/drivers/scsi/sym53c8xx_2/sym_hipd.c +++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c @@ -37,6 +37,10 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#include +#include /* for timeouts in units of HZ */ + #include "sym_glue.h" #include "sym_nvram.h" @@ -66,41 +70,24 @@ static void sym_printl_hex(u_char *p, int n) printf (".\n"); } -/* - * Print out the content of a SCSI message. - */ -static int sym_show_msg (u_char * msg) -{ - u_char i; - printf ("%x",*msg); - if (*msg==M_EXTENDED) { - for (i=1;i<8;i++) { - if (i-1>msg[1]) break; - printf ("-%x",msg[i]); - } - return (i+1); - } else if ((*msg & 0xf0) == 0x20) { - printf ("-%x",msg[1]); - return (2); - } - return (1); -} - static void sym_print_msg(struct sym_ccb *cp, char *label, u_char *msg) { - sym_print_addr(cp->cmd, "%s: ", label); + if (label) + sym_print_addr(cp->cmd, "%s: ", label); + else + sym_print_addr(cp->cmd, ""); - sym_show_msg(msg); - printf(".\n"); + spi_print_msg(msg); + printf("\n"); } static void sym_print_nego_msg(struct sym_hcb *np, int target, char *label, u_char *msg) { struct sym_tcb *tp = &np->target[target]; - dev_info(&tp->sdev->sdev_target->dev, "%s: ", label); + dev_info(&tp->starget->dev, "%s: ", label); - sym_show_msg(msg); - printf(".\n"); + spi_print_msg(msg); + printf("\n"); } /* @@ -149,8 +136,10 @@ static char *sym_scsi_bus_mode(int mode) static void sym_chip_reset (struct sym_hcb *np) { OUTB(np, nc_istat, SRST); + INB(np, nc_mbox1); udelay(10); OUTB(np, nc_istat, 0); + INB(np, nc_mbox1); udelay(2000); /* For BUS MODE to settle */ } @@ -216,6 +205,7 @@ int sym_reset_scsi_bus(struct sym_hcb *np, int enab_int) OUTB(np, nc_stest3, TE); OUTB(np, nc_dcntl, (np->rv_dcntl & IRQM)); OUTB(np, nc_scntl1, CRST); + INB(np, nc_mbox1); udelay(200); if (!SYM_SETUP_SCSI_BUS_CHECK) @@ -280,8 +270,10 @@ static void sym_selectclock(struct sym_hcb *np, u_char scntl3) if (!i) printf("%s: the chip cannot lock the frequency\n", sym_name(np)); - } else - udelay((50+10)); + } else { + INB(np, nc_mbox1); + udelay(50+10); + } OUTB(np, nc_stest3, HSC); /* Halt the scsi clock */ OUTB(np, nc_scntl3, scntl3); OUTB(np, nc_stest1, (DBLEN|DBLSEL));/* Select clock multiplier */ @@ -483,7 +475,7 @@ static int sym_getpciclock (struct sym_hcb *np) * calculations more simple. */ #define _5M 5000000 -static u32 div_10M[] = {2*_5M, 3*_5M, 4*_5M, 6*_5M, 8*_5M, 12*_5M, 16*_5M}; +static const u32 div_10M[] = {2*_5M, 3*_5M, 4*_5M, 6*_5M, 8*_5M, 12*_5M, 16*_5M}; /* * Get clock factor and sync divisor for a given @@ -626,29 +618,6 @@ static __inline void sym_init_burst(struct sym_hcb *np, u_char bc) } } - -/* - * Print out the list of targets that have some flag disabled by user. - */ -static void sym_print_targets_flag(struct sym_hcb *np, int mask, char *msg) -{ - int cnt; - int i; - - for (cnt = 0, i = 0 ; i < SYM_CONF_MAX_TARGET ; i++) { - if (i == np->myaddr) - continue; - if (np->target[i].usrflags & mask) { - if (!cnt++) - printf("%s: %s disabled for targets", - sym_name(np), msg); - printf(" %d", i); - } - } - if (cnt) - printf(".\n"); -} - /* * Save initial settings of some IO registers. * Assumed to have been set by BIOS. @@ -678,6 +647,37 @@ static void sym_save_initial_setting (struct sym_hcb *np) np->sv_ctest5 = INB(np, nc_ctest5) & 0x24; } +/* + * Set SCSI BUS mode. + * - LVD capable chips (895/895A/896/1010) report the current BUS mode + * through the STEST4 IO register. + * - For previous generation chips (825/825A/875), the user has to tell us + * how to check against HVD, since a 100% safe algorithm is not possible. + */ +static void sym_set_bus_mode(struct sym_hcb *np, struct sym_nvram *nvram) +{ + if (np->scsi_mode) + return; + + np->scsi_mode = SMODE_SE; + if (np->features & (FE_ULTRA2|FE_ULTRA3)) + np->scsi_mode = (np->sv_stest4 & SMODE); + else if (np->features & FE_DIFF) { + if (SYM_SETUP_SCSI_DIFF == 1) { + if (np->sv_scntl3) { + if (np->sv_stest2 & 0x20) + np->scsi_mode = SMODE_HVD; + } else if (nvram->type == SYM_SYMBIOS_NVRAM) { + if (!(INB(np, nc_gpreg) & 0x08)) + np->scsi_mode = SMODE_HVD; + } + } else if (SYM_SETUP_SCSI_DIFF == 2) + np->scsi_mode = SMODE_HVD; + } + if (np->scsi_mode == SMODE_HVD) + np->rv_stest2 |= 0x20; +} + /* * Prepare io register values used by sym_start_up() * according to selected and supported features. @@ -688,10 +688,7 @@ static int sym_prepare_setting(struct Scsi_Host *shost, struct sym_hcb *np, stru u32 period; int i; - /* - * Wide ? - */ - np->maxwide = (np->features & FE_WIDE)? 1 : 0; + np->maxwide = (np->features & FE_WIDE) ? 1 : 0; /* * Guess the frequency of the chip's clock. @@ -872,6 +869,7 @@ static int sym_prepare_setting(struct Scsi_Host *shost, struct sym_hcb *np, stru * Get parity checking, host ID and verbose mode from NVRAM */ np->myaddr = 255; + np->scsi_mode = 0; sym_nvram_setup_host(shost, np, nvram); /* @@ -888,33 +886,7 @@ static int sym_prepare_setting(struct Scsi_Host *shost, struct sym_hcb *np, stru */ sym_init_burst(np, burst_max); - /* - * Set SCSI BUS mode. - * - LVD capable chips (895/895A/896/1010) report the - * current BUS mode through the STEST4 IO register. - * - For previous generation chips (825/825A/875), - * user has to tell us how to check against HVD, - * since a 100% safe algorithm is not possible. - */ - np->scsi_mode = SMODE_SE; - if (np->features & (FE_ULTRA2|FE_ULTRA3)) - np->scsi_mode = (np->sv_stest4 & SMODE); - else if (np->features & FE_DIFF) { - if (SYM_SETUP_SCSI_DIFF == 1) { - if (np->sv_scntl3) { - if (np->sv_stest2 & 0x20) - np->scsi_mode = SMODE_HVD; - } - else if (nvram->type == SYM_SYMBIOS_NVRAM) { - if (!(INB(np, nc_gpreg) & 0x08)) - np->scsi_mode = SMODE_HVD; - } - } - else if (SYM_SETUP_SCSI_DIFF == 2) - np->scsi_mode = SMODE_HVD; - } - if (np->scsi_mode == SMODE_HVD) - np->rv_stest2 |= 0x20; + sym_set_bus_mode(np, nvram); /* * Set LED support from SCRIPTS. @@ -952,8 +924,10 @@ static int sym_prepare_setting(struct Scsi_Host *shost, struct sym_hcb *np, stru tp->usrflags |= (SYM_DISC_ENABLED | SYM_TAGS_ENABLED); tp->usrtags = SYM_SETUP_MAX_TAG; + tp->usr_width = np->maxwide; + tp->usr_period = 9; - sym_nvram_setup_target(np, i, nvram); + sym_nvram_setup_target(tp, i, nvram); if (!tp->usrtags) tp->usrflags &= ~SYM_TAGS_ENABLED; @@ -996,13 +970,6 @@ static int sym_prepare_setting(struct Scsi_Host *shost, struct sym_hcb *np, stru sym_name(np), np->rv_scntl3, np->rv_dmode, np->rv_dcntl, np->rv_ctest3, np->rv_ctest4, np->rv_ctest5); } - /* - * Let user be aware of targets that have some disable flags set. - */ - sym_print_targets_flag(np, SYM_SCAN_BOOT_DISABLED, "SCAN AT BOOT"); - if (sym_verbose) - sym_print_targets_flag(np, SYM_SCAN_LUNS_DISABLED, - "SCAN FOR LUNS"); return 0; } @@ -1012,8 +979,8 @@ static int sym_prepare_setting(struct Scsi_Host *shost, struct sym_hcb *np, stru * * Has to be called with interrupts disabled. */ -#ifndef CONFIG_SCSI_SYM53C8XX_IOMAPPED -static int sym_regtest (struct sym_hcb *np) +#ifdef CONFIG_SCSI_SYM53C8XX_MMIO +static int sym_regtest(struct sym_hcb *np) { register volatile u32 data; /* @@ -1031,20 +998,25 @@ static int sym_regtest (struct sym_hcb *np) #endif printf ("CACHE TEST FAILED: reg dstat-sstat2 readback %x.\n", (unsigned) data); - return (0x10); + return 0x10; } - return (0); + return 0; +} +#else +static inline int sym_regtest(struct sym_hcb *np) +{ + return 0; } #endif -static int sym_snooptest (struct sym_hcb *np) +static int sym_snooptest(struct sym_hcb *np) { - u32 sym_rd, sym_wr, sym_bk, host_rd, host_wr, pc, dstat; - int i, err=0; -#ifndef CONFIG_SCSI_SYM53C8XX_IOMAPPED - err |= sym_regtest (np); - if (err) return (err); -#endif + u32 sym_rd, sym_wr, sym_bk, host_rd, host_wr, pc, dstat; + int i, err; + + err = sym_regtest(np); + if (err) + return err; restart_test: /* * Enable Master Parity Checking as we intend @@ -1133,7 +1105,7 @@ restart_test: err |= 4; } - return (err); + return err; } /* @@ -1396,7 +1368,6 @@ static void sym_check_goals(struct sym_hcb *np, struct scsi_target *starget, goal->iu = 0; goal->dt = 0; goal->qas = 0; - goal->period = 0; goal->offset = 0; return; } @@ -1445,7 +1416,7 @@ static void sym_check_goals(struct sym_hcb *np, struct scsi_target *starget, static int sym_prepare_nego(struct sym_hcb *np, struct sym_ccb *cp, u_char *msgptr) { struct sym_tcb *tp = &np->target[cp->target]; - struct scsi_target *starget = tp->sdev->sdev_target; + struct scsi_target *starget = tp->starget; struct sym_trans *goal = &tp->tgoal; int msglen = 0; int nego; @@ -1456,7 +1427,8 @@ static int sym_prepare_nego(struct sym_hcb *np, struct sym_ccb *cp, u_char *msgp * Many devices implement PPR in a buggy way, so only use it if we * really want to. */ - if (goal->iu || goal->dt || goal->qas || (goal->period < 0xa)) { + if (goal->offset && + (goal->iu || goal->dt || goal->qas || (goal->period < 0xa))) { nego = NS_PPR; } else if (spi_width(starget) != goal->width) { nego = NS_WIDE; @@ -1470,29 +1442,18 @@ static int sym_prepare_nego(struct sym_hcb *np, struct sym_ccb *cp, u_char *msgp switch (nego) { case NS_SYNC: - msgptr[msglen++] = M_EXTENDED; - msgptr[msglen++] = 3; - msgptr[msglen++] = M_X_SYNC_REQ; - msgptr[msglen++] = goal->period; - msgptr[msglen++] = goal->offset; + msglen += spi_populate_sync_msg(msgptr + msglen, goal->period, + goal->offset); break; case NS_WIDE: - msgptr[msglen++] = M_EXTENDED; - msgptr[msglen++] = 2; - msgptr[msglen++] = M_X_WIDE_REQ; - msgptr[msglen++] = goal->width; + msglen += spi_populate_width_msg(msgptr + msglen, goal->width); break; case NS_PPR: - msgptr[msglen++] = M_EXTENDED; - msgptr[msglen++] = 6; - msgptr[msglen++] = M_X_PPR_REQ; - msgptr[msglen++] = goal->period; - msgptr[msglen++] = 0; - msgptr[msglen++] = goal->offset; - msgptr[msglen++] = goal->width; - msgptr[msglen++] = (goal->iu ? PPR_OPT_IU : 0) | + msglen += spi_populate_ppr_msg(msgptr + msglen, goal->period, + goal->offset, goal->width, + (goal->iu ? PPR_OPT_IU : 0) | (goal->dt ? PPR_OPT_DT : 0) | - (goal->qas ? PPR_OPT_QAS : 0); + (goal->qas ? PPR_OPT_QAS : 0)); break; } @@ -1690,7 +1651,7 @@ static void sym_flush_comp_queue(struct sym_hcb *np, int cam_status) if (cam_status) sym_set_cam_status(cmd, cam_status); #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING - if (sym_get_cam_status(cmd) == CAM_REQUEUE_REQ) { + if (sym_get_cam_status(cmd) == DID_SOFT_ERROR) { struct sym_tcb *tp = &np->target[cp->target]; struct sym_lcb *lp = sym_lp(tp, cp->lun); if (lp) { @@ -1791,12 +1752,13 @@ void sym_start_up (struct sym_hcb *np, int reason) /* * Wakeup all pending jobs. */ - sym_flush_busy_queue(np, CAM_SCSI_BUS_RESET); + sym_flush_busy_queue(np, DID_RESET); /* * Init chip. */ OUTB(np, nc_istat, 0x00); /* Remove Reset, abort */ + INB(np, nc_mbox1); udelay(2000); /* The 895 needs time for the bus mode to settle */ OUTB(np, nc_scntl0, np->rv_scntl0 | 0xc0); @@ -1905,6 +1867,7 @@ void sym_start_up (struct sym_hcb *np, int reason) if (np->features & (FE_ULTRA2|FE_ULTRA3)) { OUTONW(np, nc_sien, SBMC); if (reason == 0) { + INB(np, nc_mbox1); mdelay(100); INW(np, nc_sist); } @@ -2074,7 +2037,7 @@ static void sym_settrans(struct sym_hcb *np, int target, u_char opts, u_char ofs static void sym_setwide(struct sym_hcb *np, int target, u_char wide) { struct sym_tcb *tp = &np->target[target]; - struct scsi_target *starget = tp->sdev->sdev_target; + struct scsi_target *starget = tp->starget; if (spi_width(starget) == wide) return; @@ -2102,7 +2065,7 @@ sym_setsync(struct sym_hcb *np, int target, u_char ofs, u_char per, u_char div, u_char fak) { struct sym_tcb *tp = &np->target[target]; - struct scsi_target *starget = tp->sdev->sdev_target; + struct scsi_target *starget = tp->starget; u_char wide = (tp->head.wval & EWS) ? BUS_16_BIT : BUS_8_BIT; sym_settrans(np, target, 0, ofs, per, wide, div, fak); @@ -2129,7 +2092,7 @@ sym_setpprot(struct sym_hcb *np, int target, u_char opts, u_char ofs, u_char per, u_char wide, u_char div, u_char fak) { struct sym_tcb *tp = &np->target[target]; - struct scsi_target *starget = tp->sdev->sdev_target; + struct scsi_target *starget = tp->starget; sym_settrans(np, target, opts, ofs, per, wide, div, fak); @@ -2944,7 +2907,7 @@ unknown_int: * Dequeue from the START queue all CCBs that match * a given target/lun/task condition (-1 means all), * and move them from the BUSY queue to the COMP queue - * with CAM_REQUEUE_REQ status condition. + * with DID_SOFT_ERROR status condition. * This function is used during error handling/recovery. * It is called with SCRIPTS not running. */ @@ -2974,7 +2937,7 @@ sym_dequeue_from_squeue(struct sym_hcb *np, int i, int target, int lun, int task if ((target == -1 || cp->target == target) && (lun == -1 || cp->lun == lun) && (task == -1 || cp->tag == task)) { - sym_set_cam_status(cp->cmd, CAM_REQUEUE_REQ); + sym_set_cam_status(cp->cmd, DID_SOFT_ERROR); sym_remque(&cp->link_ccbq); sym_insque_tail(&cp->link_ccbq, &np->comp_ccbq); } @@ -3093,13 +3056,13 @@ static void sym_sir_bad_scsi_status(struct sym_hcb *np, int num, struct sym_ccb /* * Message table indirect structure. */ - cp->phys.smsg.addr = cpu_to_scr(CCB_BA(cp, scsi_smsg2)); + cp->phys.smsg.addr = CCB_BA(cp, scsi_smsg2); cp->phys.smsg.size = cpu_to_scr(msglen); /* * sense command */ - cp->phys.cmd.addr = cpu_to_scr(CCB_BA(cp, sensecmd)); + cp->phys.cmd.addr = CCB_BA(cp, sensecmd); cp->phys.cmd.size = cpu_to_scr(6); /* @@ -3116,7 +3079,7 @@ static void sym_sir_bad_scsi_status(struct sym_hcb *np, int num, struct sym_ccb * sense data */ memset(cp->sns_bbuf, 0, SYM_SNS_BBUF_LEN); - cp->phys.sense.addr = cpu_to_scr(CCB_BA(cp, sns_bbuf)); + cp->phys.sense.addr = CCB_BA(cp, sns_bbuf); cp->phys.sense.size = cpu_to_scr(SYM_SNS_BBUF_LEN); /* @@ -3198,7 +3161,7 @@ int sym_clear_tasks(struct sym_hcb *np, int cam_status, int target, int lun, int sym_insque_tail(&cp->link_ccbq, &np->comp_ccbq); /* Preserve the software timeout condition */ - if (sym_get_cam_status(cmd) != CAM_CMD_TIMEOUT) + if (sym_get_cam_status(cmd) != DID_TIME_OUT) sym_set_cam_status(cmd, cam_status); ++i; #if 0 @@ -3366,7 +3329,7 @@ static void sym_sir_task_recovery(struct sym_hcb *np, int num) * Make sure at least our IO to abort has been dequeued. */ #ifndef SYM_OPT_HANDLE_DEVICE_QUEUEING - assert(i && sym_get_cam_status(cp->cmd) == CAM_REQUEUE_REQ); + assert(i && sym_get_cam_status(cp->cmd) == DID_SOFT_ERROR); #else sym_remque(&cp->link_ccbq); sym_insque_tail(&cp->link_ccbq, &np->comp_ccbq); @@ -3375,9 +3338,9 @@ static void sym_sir_task_recovery(struct sym_hcb *np, int num) * Keep track in cam status of the reason of the abort. */ if (cp->to_abort == 2) - sym_set_cam_status(cp->cmd, CAM_CMD_TIMEOUT); + sym_set_cam_status(cp->cmd, DID_TIME_OUT); else - sym_set_cam_status(cp->cmd, CAM_REQ_ABORTED); + sym_set_cam_status(cp->cmd, DID_ABORT); /* * Complete with error everything that we have dequeued. @@ -3491,7 +3454,7 @@ static void sym_sir_task_recovery(struct sym_hcb *np, int num) * conditions not due to timeout. */ if (cp->to_abort == 2) - sym_set_cam_status(cp->cmd, CAM_CMD_TIMEOUT); + sym_set_cam_status(cp->cmd, DID_TIME_OUT); cp->to_abort = 0; /* We donnot expect to fail here */ break; @@ -3502,7 +3465,7 @@ static void sym_sir_task_recovery(struct sym_hcb *np, int num) case SIR_ABORT_SENT: target = INB(np, nc_sdid) & 0xf; tp = &np->target[target]; - starget = tp->sdev->sdev_target; + starget = tp->starget; /* ** If we didn't abort anything, leave here. @@ -3551,7 +3514,7 @@ static void sym_sir_task_recovery(struct sym_hcb *np, int num) */ i = (INL(np, nc_scratcha) - np->squeue_ba) / 4; sym_dequeue_from_squeue(np, i, target, lun, -1); - sym_clear_tasks(np, CAM_REQ_ABORTED, target, lun, task); + sym_clear_tasks(np, DID_ABORT, target, lun, task); sym_flush_comp_queue(np, 0); /* @@ -3566,7 +3529,7 @@ static void sym_sir_task_recovery(struct sym_hcb *np, int num) * Print to the log the message we intend to send. */ if (num == SIR_TARGET_SELECTED) { - dev_info(&tp->sdev->sdev_target->dev, "control msgout:"); + dev_info(&tp->starget->dev, "control msgout:"); sym_printl_hex(np->abrt_msg, np->abrt_tbl.size); np->abrt_tbl.size = cpu_to_scr(np->abrt_tbl.size); } @@ -3626,7 +3589,7 @@ static int sym_evaluate_dp(struct sym_hcb *np, struct sym_ccb *cp, u32 scr, int if (pm) { dp_scr = scr_to_cpu(pm->ret); - dp_ofs -= scr_to_cpu(pm->sg.size); + dp_ofs -= scr_to_cpu(pm->sg.size) & 0x00ffffff; } /* @@ -3643,7 +3606,7 @@ static int sym_evaluate_dp(struct sym_hcb *np, struct sym_ccb *cp, u32 scr, int * If result is dp_sg = SYM_CONF_MAX_SG, then we are at the * end of the data. */ - tmp = scr_to_cpu(sym_goalp(cp)); + tmp = scr_to_cpu(cp->goalp); dp_sg = SYM_CONF_MAX_SG; if (dp_scr != tmp) dp_sg -= (tmp - 8 - (int)dp_scr) / (2*4); @@ -3750,7 +3713,7 @@ static void sym_modify_dp(struct sym_hcb *np, struct sym_tcb *tp, struct sym_ccb * And our alchemy:) allows to easily calculate the data * script address we want to return for the next data phase. */ - dp_ret = cpu_to_scr(sym_goalp(cp)); + dp_ret = cpu_to_scr(cp->goalp); dp_ret = dp_ret - 8 - (SYM_CONF_MAX_SG - dp_sg) * (2*4); /* @@ -3846,7 +3809,7 @@ int sym_compute_residual(struct sym_hcb *np, struct sym_ccb *cp) * If all data has been transferred, * there is no residual. */ - if (cp->phys.head.lastp == sym_goalp(cp)) + if (cp->phys.head.lastp == cp->goalp) return resid; /* @@ -3877,6 +3840,8 @@ int sym_compute_residual(struct sym_hcb *np, struct sym_ccb *cp) resid += (tmp & 0xffffff); } + resid -= cp->odd_byte_adjustment; + /* * Hopefully, the result is not too wrong. */ @@ -3984,11 +3949,7 @@ sym_sync_nego_check(struct sym_hcb *np, int req, struct sym_ccb *cp) /* * It was a request. Prepare an answer message. */ - np->msgout[0] = M_EXTENDED; - np->msgout[1] = 3; - np->msgout[2] = M_X_SYNC_REQ; - np->msgout[3] = per; - np->msgout[4] = ofs; + spi_populate_sync_msg(np->msgout, per, ofs); if (DEBUG_FLAGS & DEBUG_NEGO) { sym_print_nego_msg(np, target, "sync msgout", np->msgout); @@ -4114,14 +4075,7 @@ sym_ppr_nego_check(struct sym_hcb *np, int req, int target) /* * It was a request. Prepare an answer message. */ - np->msgout[0] = M_EXTENDED; - np->msgout[1] = 6; - np->msgout[2] = M_X_PPR_REQ; - np->msgout[3] = per; - np->msgout[4] = 0; - np->msgout[5] = ofs; - np->msgout[6] = wide; - np->msgout[7] = opts; + spi_populate_ppr_msg(np->msgout, per, ofs, wide, opts); if (DEBUG_FLAGS & DEBUG_NEGO) { sym_print_nego_msg(np, target, "ppr msgout", np->msgout); @@ -4233,10 +4187,7 @@ sym_wide_nego_check(struct sym_hcb *np, int req, struct sym_ccb *cp) /* * It was a request. Prepare an answer message. */ - np->msgout[0] = M_EXTENDED; - np->msgout[1] = 2; - np->msgout[2] = M_X_WIDE_REQ; - np->msgout[3] = wide; + spi_populate_width_msg(np->msgout, wide); np->msgin [0] = M_NOOP; @@ -4281,11 +4232,8 @@ static void sym_wide_nego(struct sym_hcb *np, struct sym_tcb *tp, struct sym_ccb * a single SCSI command (Suggested by Justin Gibbs). */ if (tp->tgoal.offset) { - np->msgout[0] = M_EXTENDED; - np->msgout[1] = 3; - np->msgout[2] = M_X_SYNC_REQ; - np->msgout[3] = tp->tgoal.period; - np->msgout[4] = tp->tgoal.offset; + spi_populate_sync_msg(np->msgout, tp->tgoal.period, + tp->tgoal.offset); if (DEBUG_FLAGS & DEBUG_NEGO) { sym_print_nego_msg(np, cp->target, @@ -4544,7 +4492,7 @@ static void sym_int_sir (struct sym_hcb *np) switch (np->msgin [2]) { case M_X_MODIFY_DP: if (DEBUG_FLAGS & DEBUG_POINTER) - sym_print_msg(cp,"modify DP",np->msgin); + sym_print_msg(cp, NULL, np->msgin); tmp = (np->msgin[3]<<24) + (np->msgin[4]<<16) + (np->msgin[5]<<8) + (np->msgin[6]); sym_modify_dp(np, tp, cp, tmp); @@ -4571,7 +4519,7 @@ static void sym_int_sir (struct sym_hcb *np) */ case M_IGN_RESIDUE: if (DEBUG_FLAGS & DEBUG_POINTER) - sym_print_msg(cp,"ign wide residue", np->msgin); + sym_print_msg(cp, NULL, np->msgin); if (cp->host_flags & HF_SENSE) OUTL_DSP(np, SCRIPTA_BA(np, clrack)); else @@ -4651,30 +4599,7 @@ struct sym_ccb *sym_get_ccb (struct sym_hcb *np, struct scsi_cmnd *cmd, u_char t goto out; cp = sym_que_entry(qp, struct sym_ccb, link_ccbq); -#ifndef SYM_OPT_HANDLE_DEVICE_QUEUEING - /* - * If the LCB is not yet available and the LUN - * has been probed ok, try to allocate the LCB. - */ - if (!lp && sym_is_bit(tp->lun_map, ln)) { - lp = sym_alloc_lcb(np, tn, ln); - if (!lp) - goto out_free; - } -#endif - - /* - * If the LCB is not available here, then the - * logical unit is not yet discovered. For those - * ones only accept 1 SCSI IO per logical unit, - * since we cannot allow disconnections. - */ - if (!lp) { - if (!sym_is_bit(tp->busy0_map, ln)) - sym_set_bit(tp->busy0_map, ln); - else - goto out_free; - } else { + { /* * If we have been asked for a tagged command. */ @@ -4683,7 +4608,8 @@ struct sym_ccb *sym_get_ccb (struct sym_hcb *np, struct scsi_cmnd *cmd, u_char t * Debugging purpose. */ #ifndef SYM_OPT_HANDLE_DEVICE_QUEUEING - assert(lp->busy_itl == 0); + if (lp->busy_itl != 0) + goto out_free; #endif /* * Allocate resources for tags if not yet. @@ -4728,7 +4654,8 @@ struct sym_ccb *sym_get_ccb (struct sym_hcb *np, struct scsi_cmnd *cmd, u_char t * Debugging purpose. */ #ifndef SYM_OPT_HANDLE_DEVICE_QUEUEING - assert(lp->busy_itl == 0 && lp->busy_itlq == 0); + if (lp->busy_itl != 0 || lp->busy_itlq != 0) + goto out_free; #endif /* * Count this nexus for this LUN. @@ -4758,10 +4685,8 @@ struct sym_ccb *sym_get_ccb (struct sym_hcb *np, struct scsi_cmnd *cmd, u_char t } #endif - /* - * Remember all informations needed to free this CCB. - */ cp->to_abort = 0; + cp->odd_byte_adjustment = 0; cp->tag = tag; cp->order = tag_order; cp->target = tn; @@ -4829,12 +4754,6 @@ void sym_free_ccb (struct sym_hcb *np, struct sym_ccb *cp) lp->head.resel_sa = cpu_to_scr(SCRIPTB_BA(np, resel_bad_lun)); } - /* - * Otherwise, we only accept 1 IO per LUN. - * Clear the bit that keeps track of this IO. - */ - else - sym_clr_bit(tp->busy0_map, cp->lun); /* * We donnot queue more than 1 ccb per target @@ -4986,20 +4905,7 @@ static void sym_init_tcb (struct sym_hcb *np, u_char tn) struct sym_lcb *sym_alloc_lcb (struct sym_hcb *np, u_char tn, u_char ln) { struct sym_tcb *tp = &np->target[tn]; - struct sym_lcb *lp = sym_lp(tp, ln); - - /* - * Already done, just return. - */ - if (lp) - return lp; - - /* - * Donnot allow LUN control block - * allocation for not probed LUNs. - */ - if (!sym_is_bit(tp->lun_map, ln)) - return NULL; + struct sym_lcb *lp = NULL; /* * Initialize the target control block if not yet. @@ -5071,13 +4977,7 @@ struct sym_lcb *sym_alloc_lcb (struct sym_hcb *np, u_char tn, u_char ln) lp->started_max = SYM_CONF_MAX_TASK; lp->started_limit = SYM_CONF_MAX_TASK; #endif - /* - * If we are busy, count the IO. - */ - if (sym_is_bit(tp->busy0_map, ln)) { - lp->busy_itl = 1; - sym_clr_bit(tp->busy0_map, ln); - } + fail: return lp; } @@ -5091,12 +4991,6 @@ static void sym_alloc_lcb_tags (struct sym_hcb *np, u_char tn, u_char ln) struct sym_lcb *lp = sym_lp(tp, ln); int i; - /* - * If LCB not available, try to allocate it. - */ - if (!lp && !(lp = sym_alloc_lcb(np, tn, ln))) - goto fail; - /* * Allocate the task table and and the tag allocation * circular buffer. We want both or none. @@ -5104,7 +4998,7 @@ static void sym_alloc_lcb_tags (struct sym_hcb *np, u_char tn, u_char ln) lp->itlq_tbl = sym_calloc_dma(SYM_CONF_MAX_TASK*4, "ITLQ_TBL"); if (!lp->itlq_tbl) goto fail; - lp->cb_tags = kcalloc(SYM_CONF_MAX_TASK, 1, GFP_KERNEL); + lp->cb_tags = kcalloc(SYM_CONF_MAX_TASK, 1, GFP_ATOMIC); if (!lp->cb_tags) { sym_mfree_dma(lp->itlq_tbl, SYM_CONF_MAX_TASK*4, "ITLQ_TBL"); lp->itlq_tbl = NULL; @@ -5243,7 +5137,7 @@ int sym_queue_scsiio(struct sym_hcb *np, struct scsi_cmnd *cmd, struct sym_ccb * /* * message */ - cp->phys.smsg.addr = cpu_to_scr(CCB_BA(cp, scsi_smsg)); + cp->phys.smsg.addr = CCB_BA(cp, scsi_smsg); cp->phys.smsg.size = cpu_to_scr(msglen); /* @@ -5343,7 +5237,7 @@ int sym_abort_scsiio(struct sym_hcb *np, struct scsi_cmnd *cmd, int timed_out) } /* - * Complete execution of a SCSI command with extented + * Complete execution of a SCSI command with extended * error, SCSI status error, or having been auto-sensed. * * The SCRIPTS processor is not running there, so we @@ -5441,7 +5335,7 @@ if (resid) /* * Let's requeue it to device. */ - sym_set_cam_status(cmd, CAM_REQUEUE_REQ); + sym_set_cam_status(cmd, DID_SOFT_ERROR); goto finish; } weirdness: @@ -5470,8 +5364,7 @@ finish: /* * Donnot start more than 1 command after an error. */ - if (lp) - sym_start_next_ccbs(np, lp, 1); + sym_start_next_ccbs(np, lp, 1); #endif } @@ -5509,18 +5402,12 @@ void sym_complete_ok (struct sym_hcb *np, struct sym_ccb *cp) tp = &np->target[cp->target]; lp = sym_lp(tp, cp->lun); - /* - * Assume device discovered on first success. - */ - if (!lp) - sym_set_bit(tp->lun_map, cp->lun); - /* * If all data have been transferred, given than no * extended error did occur, there is no residual. */ resid = 0; - if (cp->phys.head.lastp != sym_goalp(cp)) + if (cp->phys.head.lastp != cp->goalp) resid = sym_compute_residual(np, cp); /* @@ -5540,15 +5427,6 @@ if (resid) */ sym_set_cam_result_ok(cp, cmd, resid); -#ifdef SYM_OPT_SNIFF_INQUIRY - /* - * On standard INQUIRY response (EVPD and CmDt - * not set), sniff out device capabilities. - */ - if (cp->cdb_buf[0] == INQUIRY && !(cp->cdb_buf[1] & 0x3)) - sym_sniff_inquiry(np, cmd, resid); -#endif - #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING /* * If max number of started ccbs had been reduced, @@ -5576,7 +5454,7 @@ if (resid) /* * Requeue a couple of awaiting scsi commands. */ - if (lp && !sym_que_empty(&lp->waiting_ccbq)) + if (!sym_que_empty(&lp->waiting_ccbq)) sym_start_next_ccbs(np, lp, 2); #endif /* @@ -5819,8 +5697,7 @@ void sym_hcb_free(struct sym_hcb *np) SYM_QUEHEAD *qp; struct sym_ccb *cp; struct sym_tcb *tp; - struct sym_lcb *lp; - int target, lun; + int target; if (np->scriptz0) sym_mfree_dma(np->scriptz0, np->scriptz_sz, "SCRIPTZ0"); @@ -5846,16 +5723,6 @@ void sym_hcb_free(struct sym_hcb *np) for (target = 0; target < SYM_CONF_MAX_TARGET ; target++) { tp = &np->target[target]; - for (lun = 0 ; lun < SYM_CONF_MAX_LUN ; lun++) { - lp = sym_lp(tp, lun); - if (!lp) - continue; - if (lp->itlq_tbl) - sym_mfree_dma(lp->itlq_tbl, SYM_CONF_MAX_TASK*4, - "ITLQ_TBL"); - kfree(lp->cb_tags); - sym_mfree_dma(lp, sizeof(*lp), "LCB"); - } #if SYM_CONF_MAX_LUN > 1 kfree(tp->lunmp); #endif