* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
-
-#define SYM_VERSION "2.1.18j"
-#define SYM_DRIVER_NAME "sym-" SYM_VERSION
-
#include "sym_glue.h"
#include "sym_nvram.h"
for (i = 0 ; i < SYM_CONF_MAX_TARGET ; i++) {
tcb_p tp = &np->target[i];
- tp->tinfo.user.scsi_version = tp->tinfo.curr.scsi_version= 2;
- tp->tinfo.user.spi_version = tp->tinfo.curr.spi_version = 2;
- tp->tinfo.user.period = np->minsync;
- tp->tinfo.user.offset = np->maxoffs;
- tp->tinfo.user.width = np->maxwide ? BUS_16_BIT : BUS_8_BIT;
tp->usrflags |= (SYM_DISC_ENABLED | SYM_TAGS_ENABLED);
tp->usrtags = SYM_SETUP_MAX_TAG;
sym_nvram_setup_target (np, i, nvram);
- /*
- * Some single-ended devices may crash on receiving a
- * PPR negotiation attempt. Only try PPR if we're in
- * LVD mode.
- */
- if (np->features & FE_ULTRA3) {
- tp->tinfo.user.options |= PPR_OPT_DT;
- tp->tinfo.user.period = np->minsync_dt;
- tp->tinfo.user.offset = np->maxoffs_dt;
- tp->tinfo.user.spi_version = 3;
- }
-
if (!tp->usrtags)
tp->usrflags &= ~SYM_TAGS_ENABLED;
}
} else {
script_ofs = dsp;
script_size = 0;
- script_base = 0;
+ script_base = NULL;
script_name = "mem";
}
return chip;
}
- return 0;
+ return NULL;
}
#if SYM_CONF_DMA_ADDRESSING_MODE == 2
}
#endif
+static void sym_check_goals(struct scsi_device *sdev)
+{
+ struct sym_hcb *np = ((struct host_data *)sdev->host->hostdata)->ncb;
+ struct sym_trans *st = &np->target[sdev->id].tinfo.goal;
+
+ /* here we enforce all the fiddly SPI rules */
+
+ if (!scsi_device_wide(sdev))
+ st->width = 0;
+
+ if (!scsi_device_sync(sdev)) {
+ st->options = 0;
+ st->period = 0;
+ st->offset = 0;
+ return;
+ }
+
+ if (scsi_device_dt(sdev)) {
+ if (scsi_device_dt_only(sdev))
+ st->options |= PPR_OPT_DT;
+
+ if (st->offset == 0)
+ st->options &= ~PPR_OPT_DT;
+ } else {
+ st->options &= ~PPR_OPT_DT;
+ }
+
+ if (!(np->features & FE_ULTRA3))
+ st->options &= ~PPR_OPT_DT;
+
+ if (st->options & PPR_OPT_DT) {
+ /* all DT transfers must be wide */
+ st->width = 1;
+ if (st->offset > np->maxoffs_dt)
+ st->offset = np->maxoffs_dt;
+ if (st->period < np->minsync_dt)
+ st->period = np->minsync_dt;
+ if (st->period > np->maxsync_dt)
+ st->period = np->maxsync_dt;
+ } else {
+ if (st->offset > np->maxoffs)
+ st->offset = np->maxoffs;
+ if (st->period < np->minsync)
+ st->period = np->minsync;
+ if (st->period > np->maxsync)
+ st->period = np->maxsync;
+ }
+}
+
/*
* Prepare the next negotiation message if needed.
*
{
tcb_p tp = &np->target[cp->target];
int msglen = 0;
+ struct scsi_device *sdev = tp->sdev;
+
+ if (likely(sdev))
+ sym_check_goals(sdev);
/*
* Early C1010 chips need a work-around for DT
/*
* negotiate using PPR ?
*/
- if (tp->tinfo.goal.options & PPR_OPT_MASK)
+ if (scsi_device_dt(sdev)) {
nego = NS_PPR;
- /*
- * negotiate wide transfers ?
- */
- else if (tp->tinfo.curr.width != tp->tinfo.goal.width)
- nego = NS_WIDE;
- /*
- * negotiate synchronous transfers?
- */
- else if (tp->tinfo.curr.period != tp->tinfo.goal.period ||
- tp->tinfo.curr.offset != tp->tinfo.goal.offset)
- nego = NS_SYNC;
+ } else {
+ /*
+ * negotiate wide transfers ?
+ */
+ if (tp->tinfo.curr.width != tp->tinfo.goal.width)
+ nego = NS_WIDE;
+ /*
+ * negotiate synchronous transfers?
+ */
+ else if (tp->tinfo.curr.period != tp->tinfo.goal.period ||
+ tp->tinfo.curr.offset != tp->tinfo.goal.offset)
+ nego = NS_SYNC;
+ }
switch (nego) {
case NS_SYNC:
* try to find the interrupted script command,
* and the address at which to continue.
*/
- vdsp = 0;
+ vdsp = NULL;
nxtdsp = 0;
if (dsp > np->scripta_ba &&
dsp <= np->scripta_ba + np->scripta_sz) {
* we are not in race.
*/
i = 0;
- cp = 0;
+ cp = NULL;
FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) {
cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
if (cp->host_status != HS_BUSY &&
* abort for this target.
*/
i = 0;
- cp = 0;
+ cp = NULL;
FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) {
cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
if (cp->host_status != HS_DISCONNECT)
else if (dp_scr == SCRIPTA_BA (np, pm1_data))
pm = &cp->phys.pm1;
else
- pm = 0;
+ pm = NULL;
if (pm) {
dp_scr = scr_to_cpu(pm->ret);
static int
sym_sync_nego_check(hcb_p np, int req, int target)
{
- tcb_p tp = &np->target[target];
u_char chg, ofs, per, fak, div;
if (DEBUG_FLAGS & DEBUG_NEGO) {
if (ofs) {
if (ofs > np->maxoffs)
{chg = 1; ofs = np->maxoffs;}
- if (req) {
- if (ofs > tp->tinfo.user.offset)
- {chg = 1; ofs = tp->tinfo.user.offset;}
- }
}
if (ofs) {
if (per < np->minsync)
{chg = 1; per = np->minsync;}
- if (req) {
- if (per < tp->tinfo.user.period)
- {chg = 1; per = tp->tinfo.user.period;}
- }
}
/*
}
if (!wide || !(np->features & FE_ULTRA3))
dt &= ~PPR_OPT_DT;
- if (req) {
- if (wide > tp->tinfo.user.width)
- {chg = 1; wide = tp->tinfo.user.width;}
- }
if (!(np->features & FE_U3EN)) /* Broken U3EN bit not supported */
dt &= ~PPR_OPT_DT;
}
else if (ofs > np->maxoffs)
{chg = 1; ofs = np->maxoffs;}
- if (req) {
- if (ofs > tp->tinfo.user.offset)
- {chg = 1; ofs = tp->tinfo.user.offset;}
- }
}
if (ofs) {
}
else if (per < np->minsync)
{chg = 1; per = np->minsync;}
- if (req) {
- if (per < tp->tinfo.user.period)
- {chg = 1; per = tp->tinfo.user.period;}
- }
}
/*
static int
sym_wide_nego_check(hcb_p np, int req, int target)
{
- tcb_p tp = &np->target[target];
u_char chg, wide;
if (DEBUG_FLAGS & DEBUG_NEGO) {
chg = 1;
wide = np->maxwide;
}
- if (req) {
- if (wide > tp->tinfo.user.width)
- {chg = 1; wide = tp->tinfo.user.width;}
- }
if (DEBUG_FLAGS & DEBUG_NEGO) {
PRINT_TARGET(np, target);
* used for negotiation, clear this info in the tcb.
*/
if (cp == tp->nego_cp)
- tp->nego_cp = 0;
+ tp->nego_cp = NULL;
#ifdef SYM_CONF_IARB_SUPPORT
/*
/*
* Make this CCB available.
*/
- cp->cam_ccb = 0;
+ cp->cam_ccb = NULL;
cp->host_status = HS_IDLE;
sym_remque(&cp->link_ccbq);
sym_insque_head(&cp->link_ccbq, &np->free_ccbq);
*/
static ccb_p sym_alloc_ccb(hcb_p np)
{
- ccb_p cp = 0;
+ ccb_p cp = NULL;
int hcode;
/*
* queue to the controller.
*/
if (np->actccbs >= SYM_CONF_MAX_START)
- return 0;
+ return NULL;
/*
* Allocate memory for this CCB.
sym_mfree_dma(cp->sns_bbuf,SYM_SNS_BBUF_LEN,"SNS_BBUF");
sym_mfree_dma(cp, sizeof(*cp), "CCB");
}
- return 0;
+ return NULL;
}
/*
* allocation for not probed LUNs.
*/
if (!sym_is_bit(tp->lun_map, ln))
- return 0;
+ return NULL;
/*
* Initialize the target control block if not yet.
lp->cb_tags = sym_calloc(SYM_CONF_MAX_TASK, "CB_TAGS");
if (!lp->cb_tags) {
sym_mfree_dma(lp->itlq_tbl, SYM_CONF_MAX_TASK*4, "ITLQ_TBL");
- lp->itlq_tbl = 0;
+ lp->itlq_tbl = NULL;
goto fail;
}
/*
* Look up our CCB control block.
*/
- cp = 0;
+ cp = NULL;
FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) {
ccb_p cp2 = sym_que_entry(qp, struct sym_ccb, link_ccbq);
if (cp2->cam_ccb == ccb) {