1 /* -*- mode: c; c-basic-offset: 8 -*- */
3 /* NCR (or Symbios) 53c700 and 53c700-66 Driver
5 * Copyright (C) 2001 by James.Bottomley@HansenPartnership.com
6 **-----------------------------------------------------------------------------
8 ** This program is free software; you can redistribute it and/or modify
9 ** it under the terms of the GNU General Public License as published by
10 ** the Free Software Foundation; either version 2 of the License, or
11 ** (at your option) any later version.
13 ** This program is distributed in the hope that it will be useful,
14 ** but WITHOUT ANY WARRANTY; without even the implied warranty of
15 ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 ** GNU General Public License for more details.
18 ** You should have received a copy of the GNU General Public License
19 ** along with this program; if not, write to the Free Software
20 ** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 **-----------------------------------------------------------------------------
27 * This driver is designed exclusively for these chips (virtually the
28 * earliest of the scripts engine chips). They need their own drivers
29 * because they are missing so many of the scripts and snazzy register
30 * features of their elder brothers (the 710, 720 and 770).
32 * The 700 is the lowliest of the line, it can only do async SCSI.
33 * The 700-66 can at least do synchronous SCSI up to 10MHz.
35 * The 700 chip has no host bus interface logic of its own. However,
36 * it is usually mapped to a location with well defined register
37 * offsets. Therefore, if you can determine the base address and the
38 * irq your board incorporating this chip uses, you can probably use
39 * this driver to run it (although you'll probably have to write a
40 * minimal wrapper for the purpose---see the NCR_D700 driver for
41 * details about how to do this).
46 * 1. Better statistics in the proc fs
48 * 2. Implement message queue (queues SCSI messages like commands) and make
49 * the abort and device reset functions use them.
56 * Fixed bad bug affecting tag starvation processing (previously the
57 * driver would hang the system if too many tags starved. Also fixed
58 * bad bug having to do with 10 byte command processing and REQUEST
59 * SENSE (the command would loop forever getting a transfer length
60 * mismatch in the CMD phase).
64 * Fixed scripts problem which caused certain devices (notably CDRWs)
65 * to hang on initial INQUIRY. Updated NCR_700_readl/writel to use
66 * __raw_readl/writel for parisc compatibility (Thomas
67 * Bogendoerfer). Added missing SCp->request_bufflen initialisation
68 * for sense requests (Ryan Bradetich).
72 * Following test of the 64 bit parisc kernel by Richard Hirst,
73 * several problems have now been corrected. Also adds support for
74 * consistent memory allocation.
78 * More Compatibility changes for 710 (now actually works). Enhanced
79 * support for odd clock speeds which constrain SDTR negotiations.
80 * correct cacheline separation for scsi messages and status for
81 * incoherent architectures. Use of the pci mapping functions on
82 * buffers to begin support for 64 bit drivers.
86 * Added support for the 53c710 chip (in 53c700 emulation mode only---no
87 * special 53c710 instructions or registers are used).
91 * More endianness/cache coherency changes.
93 * Better bad device handling (handles devices lying about tag
94 * queueing support and devices which fail to provide sense data on
95 * contingent allegiance conditions)
97 * Many thanks to Richard Hirst <rhirst@linuxcare.com> for patiently
98 * debugging this driver on the parisc architecture and suggesting
99 * many improvements and bug fixes.
101 * Thanks also go to Linuxcare Inc. for providing several PARISC
102 * machines for me to debug the driver on.
106 * Made the driver mem or io mapped; added endian invariance; added
107 * dma cache flushing operations for architectures which need it;
108 * added support for more varied clocking speeds.
112 * Initial modularisation from the D700. See NCR_D700.c for the rest of
115 #define NCR_700_VERSION "2.8"
117 #include <linux/config.h>
118 #include <linux/kernel.h>
119 #include <linux/types.h>
120 #include <linux/string.h>
121 #include <linux/ioport.h>
122 #include <linux/delay.h>
123 #include <linux/spinlock.h>
124 #include <linux/completion.h>
125 #include <linux/sched.h>
126 #include <linux/init.h>
127 #include <linux/proc_fs.h>
128 #include <linux/blkdev.h>
129 #include <linux/module.h>
130 #include <linux/interrupt.h>
132 #include <asm/system.h>
134 #include <asm/pgtable.h>
135 #include <asm/byteorder.h>
137 #include <scsi/scsi.h>
138 #include <scsi/scsi_cmnd.h>
139 #include <scsi/scsi_dbg.h>
140 #include <scsi/scsi_eh.h>
141 #include <scsi/scsi_host.h>
142 #include <scsi/scsi_tcq.h>
143 #include <scsi/scsi_transport.h>
144 #include <scsi/scsi_transport_spi.h>
148 /* NOTE: For 64 bit drivers there are points in the code where we use
149 * a non dereferenceable pointer to point to a structure in dma-able
150 * memory (which is 32 bits) so that we can use all of the structure
151 * operations but take the address at the end. This macro allows us
152 * to truncate the 64 bit pointer down to 32 bits without the compiler
154 #define to32bit(x) ((__u32)((unsigned long)(x)))
159 #define STATIC static
162 MODULE_AUTHOR("James Bottomley");
163 MODULE_DESCRIPTION("53c700 and 53c700-66 Driver");
164 MODULE_LICENSE("GPL");
166 /* This is the script */
167 #include "53c700_d.h"
170 STATIC int NCR_700_queuecommand(struct scsi_cmnd *, void (*done)(struct scsi_cmnd *));
171 STATIC int NCR_700_abort(struct scsi_cmnd * SCpnt);
172 STATIC int NCR_700_bus_reset(struct scsi_cmnd * SCpnt);
173 STATIC int NCR_700_dev_reset(struct scsi_cmnd * SCpnt);
174 STATIC int NCR_700_host_reset(struct scsi_cmnd * SCpnt);
175 STATIC void NCR_700_chip_setup(struct Scsi_Host *host);
176 STATIC void NCR_700_chip_reset(struct Scsi_Host *host);
177 STATIC int NCR_700_slave_configure(struct scsi_device *SDpnt);
178 STATIC void NCR_700_slave_destroy(struct scsi_device *SDpnt);
179 static int NCR_700_change_queue_depth(struct scsi_device *SDpnt, int depth);
180 static int NCR_700_change_queue_type(struct scsi_device *SDpnt, int depth);
182 STATIC struct device_attribute *NCR_700_dev_attrs[];
184 STATIC struct scsi_transport_template *NCR_700_transport_template = NULL;
186 static char *NCR_700_phase[] = {
189 "before command phase",
190 "after command phase",
191 "after status phase",
192 "after data in phase",
193 "after data out phase",
197 static char *NCR_700_condition[] = {
205 "REJECT_MSG RECEIVED",
206 "DISCONNECT_MSG RECEIVED",
212 static char *NCR_700_fatal_messages[] = {
213 "unexpected message after reselection",
214 "still MSG_OUT after message injection",
215 "not MSG_IN after selection",
216 "Illegal message length received",
219 static char *NCR_700_SBCL_bits[] = {
230 static char *NCR_700_SBCL_to_phase[] = {
241 static __u8 NCR_700_SDTR_msg[] = {
242 0x01, /* Extended message */
243 0x03, /* Extended message Length */
244 0x01, /* SDTR Extended message */
249 /* This translates the SDTR message offset and period to a value
250 * which can be loaded into the SXFER_REG.
252 * NOTE: According to SCSI-2, the true transfer period (in ns) is
253 * actually four times this period value */
255 NCR_700_offset_period_to_sxfer(struct NCR_700_Host_Parameters *hostdata,
256 __u8 offset, __u8 period)
260 __u8 min_xferp = (hostdata->chip710
261 ? NCR_710_MIN_XFERP : NCR_700_MIN_XFERP);
262 __u8 max_offset = (hostdata->chip710
263 ? NCR_710_MAX_OFFSET : NCR_700_MAX_OFFSET);
268 if(period < hostdata->min_period) {
269 printk(KERN_WARNING "53c700: Period %dns is less than this chip's minimum, setting to %d\n", period*4, NCR_700_SDTR_msg[3]*4);
270 period = hostdata->min_period;
272 XFERP = (period*4 * hostdata->sync_clock)/1000 - 4;
273 if(offset > max_offset) {
274 printk(KERN_WARNING "53c700: Offset %d exceeds chip maximum, setting to %d\n",
278 if(XFERP < min_xferp) {
279 printk(KERN_WARNING "53c700: XFERP %d is less than minium, setting to %d\n",
283 return (offset & 0x0f) | (XFERP & 0x07)<<4;
287 NCR_700_get_SXFER(struct scsi_device *SDp)
289 struct NCR_700_Host_Parameters *hostdata =
290 (struct NCR_700_Host_Parameters *)SDp->host->hostdata[0];
292 return NCR_700_offset_period_to_sxfer(hostdata,
293 spi_offset(SDp->sdev_target),
294 spi_period(SDp->sdev_target));
298 NCR_700_detect(struct scsi_host_template *tpnt,
299 struct NCR_700_Host_Parameters *hostdata, struct device *dev,
300 unsigned long irq, u8 scsi_id)
302 dma_addr_t pScript, pSlots;
305 struct Scsi_Host *host;
306 static int banner = 0;
309 if(tpnt->sdev_attrs == NULL)
310 tpnt->sdev_attrs = NCR_700_dev_attrs;
312 memory = dma_alloc_noncoherent(hostdata->dev, TOTAL_MEM_SIZE,
313 &pScript, GFP_KERNEL);
315 printk(KERN_ERR "53c700: Failed to allocate memory for driver, detatching\n");
319 script = (__u32 *)memory;
320 hostdata->msgin = memory + MSGIN_OFFSET;
321 hostdata->msgout = memory + MSGOUT_OFFSET;
322 hostdata->status = memory + STATUS_OFFSET;
323 /* all of these offsets are L1_CACHE_BYTES separated. It is fatal
324 * if this isn't sufficient separation to avoid dma flushing issues */
325 BUG_ON(!dma_is_consistent(pScript) && L1_CACHE_BYTES < dma_get_cache_alignment());
326 hostdata->slots = (struct NCR_700_command_slot *)(memory + SLOTS_OFFSET);
329 pSlots = pScript + SLOTS_OFFSET;
331 /* Fill in the missing routines from the host template */
332 tpnt->queuecommand = NCR_700_queuecommand;
333 tpnt->eh_abort_handler = NCR_700_abort;
334 tpnt->eh_device_reset_handler = NCR_700_dev_reset;
335 tpnt->eh_bus_reset_handler = NCR_700_bus_reset;
336 tpnt->eh_host_reset_handler = NCR_700_host_reset;
337 tpnt->can_queue = NCR_700_COMMAND_SLOTS_PER_HOST;
338 tpnt->sg_tablesize = NCR_700_SG_SEGMENTS;
339 tpnt->cmd_per_lun = NCR_700_CMD_PER_LUN;
340 tpnt->use_clustering = ENABLE_CLUSTERING;
341 tpnt->slave_configure = NCR_700_slave_configure;
342 tpnt->slave_destroy = NCR_700_slave_destroy;
343 tpnt->change_queue_depth = NCR_700_change_queue_depth;
344 tpnt->change_queue_type = NCR_700_change_queue_type;
346 if(tpnt->name == NULL)
347 tpnt->name = "53c700";
348 if(tpnt->proc_name == NULL)
349 tpnt->proc_name = "53c700";
352 host = scsi_host_alloc(tpnt, 4);
355 memset(hostdata->slots, 0, sizeof(struct NCR_700_command_slot)
356 * NCR_700_COMMAND_SLOTS_PER_HOST);
357 for(j = 0; j < NCR_700_COMMAND_SLOTS_PER_HOST; j++) {
358 dma_addr_t offset = (dma_addr_t)((unsigned long)&hostdata->slots[j].SG[0]
359 - (unsigned long)&hostdata->slots[0].SG[0]);
360 hostdata->slots[j].pSG = (struct NCR_700_SG_List *)((unsigned long)(pSlots + offset));
362 hostdata->free_list = &hostdata->slots[j];
364 hostdata->slots[j-1].ITL_forw = &hostdata->slots[j];
365 hostdata->slots[j].state = NCR_700_SLOT_FREE;
368 for(j = 0; j < sizeof(SCRIPT)/sizeof(SCRIPT[0]); j++) {
369 script[j] = bS_to_host(SCRIPT[j]);
372 /* adjust all labels to be bus physical */
373 for(j = 0; j < PATCHES; j++) {
374 script[LABELPATCHES[j]] = bS_to_host(pScript + SCRIPT[LABELPATCHES[j]]);
376 /* now patch up fixed addresses. */
377 script_patch_32(script, MessageLocation,
378 pScript + MSGOUT_OFFSET);
379 script_patch_32(script, StatusAddress,
380 pScript + STATUS_OFFSET);
381 script_patch_32(script, ReceiveMsgAddress,
382 pScript + MSGIN_OFFSET);
384 hostdata->script = script;
385 hostdata->pScript = pScript;
386 dma_sync_single_for_device(hostdata->dev, pScript, sizeof(SCRIPT), DMA_TO_DEVICE);
387 hostdata->state = NCR_700_HOST_FREE;
388 hostdata->cmd = NULL;
390 host->max_lun = NCR_700_MAX_LUNS;
391 BUG_ON(NCR_700_transport_template == NULL);
392 host->transportt = NCR_700_transport_template;
393 host->unique_id = hostdata->base;
394 host->base = hostdata->base;
395 hostdata->eh_complete = NULL;
397 host->this_id = scsi_id;
398 host->hostdata[0] = (unsigned long)hostdata;
400 NCR_700_writeb(0xff, host, CTEST9_REG);
401 if(hostdata->chip710)
402 hostdata->rev = (NCR_700_readb(host, CTEST8_REG)>>4) & 0x0f;
404 hostdata->rev = (NCR_700_readb(host, CTEST7_REG)>>4) & 0x0f;
405 hostdata->fast = (NCR_700_readb(host, CTEST9_REG) == 0);
407 printk(KERN_NOTICE "53c700: Version " NCR_700_VERSION " By James.Bottomley@HansenPartnership.com\n");
410 printk(KERN_NOTICE "scsi%d: %s rev %d %s\n", host->host_no,
411 hostdata->chip710 ? "53c710" :
412 (hostdata->fast ? "53c700-66" : "53c700"),
413 hostdata->rev, hostdata->differential ?
414 "(Differential)" : "");
416 NCR_700_chip_reset(host);
418 if (request_irq(irq, NCR_700_intr, SA_SHIRQ, dev->bus_id, host)) {
419 dev_printk(KERN_ERR, dev, "53c700: irq %lu request failed\n ",
424 if (scsi_add_host(host, dev)) {
425 dev_printk(KERN_ERR, dev, "53c700: scsi_add_host failed\n");
426 goto out_release_irq;
429 spi_signalling(host) = hostdata->differential ? SPI_SIGNAL_HVD :
443 NCR_700_release(struct Scsi_Host *host)
445 struct NCR_700_Host_Parameters *hostdata =
446 (struct NCR_700_Host_Parameters *)host->hostdata[0];
448 dma_free_noncoherent(hostdata->dev, TOTAL_MEM_SIZE,
449 hostdata->script, hostdata->pScript);
454 NCR_700_identify(int can_disconnect, __u8 lun)
456 return IDENTIFY_BASE |
457 ((can_disconnect) ? 0x40 : 0) |
458 (lun & NCR_700_LUN_MASK);
462 * Function : static int data_residual (Scsi_Host *host)
464 * Purpose : return residual data count of what's in the chip. If you
465 * really want to know what this function is doing, it's almost a
466 * direct transcription of the algorithm described in the 53c710
467 * guide, except that the DBC and DFIFO registers are only 6 bits
470 * Inputs : host - SCSI host */
472 NCR_700_data_residual (struct Scsi_Host *host) {
473 struct NCR_700_Host_Parameters *hostdata =
474 (struct NCR_700_Host_Parameters *)host->hostdata[0];
475 int count, synchronous = 0;
478 if(hostdata->chip710) {
479 count = ((NCR_700_readb(host, DFIFO_REG) & 0x7f) -
480 (NCR_700_readl(host, DBC_REG) & 0x7f)) & 0x7f;
482 count = ((NCR_700_readb(host, DFIFO_REG) & 0x3f) -
483 (NCR_700_readl(host, DBC_REG) & 0x3f)) & 0x3f;
487 synchronous = NCR_700_readb(host, SXFER_REG) & 0x0f;
489 /* get the data direction */
490 ddir = NCR_700_readb(host, CTEST0_REG) & 0x01;
495 count += (NCR_700_readb(host, SSTAT2_REG) & 0xf0) >> 4;
497 if (NCR_700_readb(host, SSTAT1_REG) & SIDL_REG_FULL)
501 __u8 sstat = NCR_700_readb(host, SSTAT1_REG);
502 if (sstat & SODL_REG_FULL)
504 if (synchronous && (sstat & SODR_REG_FULL))
509 printk("RESIDUAL IS %d (ddir %d)\n", count, ddir);
514 /* print out the SCSI wires and corresponding phase from the SBCL register
517 sbcl_to_string(__u8 sbcl)
520 static char ret[256];
525 strcat(ret, NCR_700_SBCL_bits[i]);
527 strcat(ret, NCR_700_SBCL_to_phase[sbcl & 0x07]);
532 bitmap_to_number(__u8 bitmap)
536 for(i=0; i<8 && !(bitmap &(1<<i)); i++)
541 /* Pull a slot off the free list */
542 STATIC struct NCR_700_command_slot *
543 find_empty_slot(struct NCR_700_Host_Parameters *hostdata)
545 struct NCR_700_command_slot *slot = hostdata->free_list;
549 if(hostdata->command_slot_count != NCR_700_COMMAND_SLOTS_PER_HOST)
550 printk(KERN_ERR "SLOTS FULL, but count is %d, should be %d\n", hostdata->command_slot_count, NCR_700_COMMAND_SLOTS_PER_HOST);
554 if(slot->state != NCR_700_SLOT_FREE)
556 printk(KERN_ERR "BUSY SLOT ON FREE LIST!!!\n");
559 hostdata->free_list = slot->ITL_forw;
560 slot->ITL_forw = NULL;
563 /* NOTE: set the state to busy here, not queued, since this
564 * indicates the slot is in use and cannot be run by the IRQ
565 * finish routine. If we cannot queue the command when it
566 * is properly build, we then change to NCR_700_SLOT_QUEUED */
567 slot->state = NCR_700_SLOT_BUSY;
568 hostdata->command_slot_count++;
574 free_slot(struct NCR_700_command_slot *slot,
575 struct NCR_700_Host_Parameters *hostdata)
577 if((slot->state & NCR_700_SLOT_MASK) != NCR_700_SLOT_MAGIC) {
578 printk(KERN_ERR "53c700: SLOT %p is not MAGIC!!!\n", slot);
580 if(slot->state == NCR_700_SLOT_FREE) {
581 printk(KERN_ERR "53c700: SLOT %p is FREE!!!\n", slot);
584 slot->resume_offset = 0;
586 slot->state = NCR_700_SLOT_FREE;
587 slot->ITL_forw = hostdata->free_list;
588 hostdata->free_list = slot;
589 hostdata->command_slot_count--;
593 /* This routine really does very little. The command is indexed on
594 the ITL and (if tagged) the ITLQ lists in _queuecommand */
596 save_for_reselection(struct NCR_700_Host_Parameters *hostdata,
597 struct scsi_cmnd *SCp, __u32 dsp)
599 /* Its just possible that this gets executed twice */
601 struct NCR_700_command_slot *slot =
602 (struct NCR_700_command_slot *)SCp->host_scribble;
604 slot->resume_offset = dsp;
606 hostdata->state = NCR_700_HOST_FREE;
607 hostdata->cmd = NULL;
611 NCR_700_unmap(struct NCR_700_Host_Parameters *hostdata, struct scsi_cmnd *SCp,
612 struct NCR_700_command_slot *slot)
614 if(SCp->sc_data_direction != DMA_NONE &&
615 SCp->sc_data_direction != DMA_BIDIRECTIONAL) {
617 dma_unmap_sg(hostdata->dev, SCp->buffer,
618 SCp->use_sg, SCp->sc_data_direction);
620 dma_unmap_single(hostdata->dev, slot->dma_handle,
621 SCp->request_bufflen,
622 SCp->sc_data_direction);
628 NCR_700_scsi_done(struct NCR_700_Host_Parameters *hostdata,
629 struct scsi_cmnd *SCp, int result)
631 hostdata->state = NCR_700_HOST_FREE;
632 hostdata->cmd = NULL;
635 struct NCR_700_command_slot *slot =
636 (struct NCR_700_command_slot *)SCp->host_scribble;
638 NCR_700_unmap(hostdata, SCp, slot);
639 dma_unmap_single(hostdata->dev, slot->pCmd,
640 sizeof(SCp->cmnd), DMA_TO_DEVICE);
641 if(SCp->cmnd[0] == REQUEST_SENSE && SCp->cmnd[6] == NCR_700_INTERNAL_SENSE_MAGIC) {
643 printk(" ORIGINAL CMD %p RETURNED %d, new return is %d sense is\n",
644 SCp, SCp->cmnd[7], result);
645 scsi_print_sense("53c700", SCp);
648 /* restore the old result if the request sense was
651 result = SCp->cmnd[7];
652 /* now restore the original command */
653 memcpy((void *) SCp->cmnd, (void *) SCp->data_cmnd,
654 sizeof(SCp->data_cmnd));
655 SCp->request_buffer = SCp->buffer;
656 SCp->request_bufflen = SCp->bufflen;
657 SCp->use_sg = SCp->old_use_sg;
658 SCp->cmd_len = SCp->old_cmd_len;
659 SCp->sc_data_direction = SCp->sc_old_data_direction;
660 SCp->underflow = SCp->old_underflow;
663 free_slot(slot, hostdata);
665 if(NCR_700_get_depth(SCp->device) == 0 ||
666 NCR_700_get_depth(SCp->device) > SCp->device->queue_depth)
667 printk(KERN_ERR "Invalid depth in NCR_700_scsi_done(): %d\n",
668 NCR_700_get_depth(SCp->device));
669 #endif /* NCR_700_DEBUG */
670 NCR_700_set_depth(SCp->device, NCR_700_get_depth(SCp->device) - 1);
672 SCp->host_scribble = NULL;
673 SCp->result = result;
676 printk(KERN_ERR "53c700: SCSI DONE HAS NULL SCp\n");
682 NCR_700_internal_bus_reset(struct Scsi_Host *host)
685 NCR_700_writeb(ASSERT_RST, host, SCNTL1_REG);
687 NCR_700_writeb(0, host, SCNTL1_REG);
692 NCR_700_chip_setup(struct Scsi_Host *host)
694 struct NCR_700_Host_Parameters *hostdata =
695 (struct NCR_700_Host_Parameters *)host->hostdata[0];
696 __u32 dcntl_extra = 0;
698 __u8 min_xferp = (hostdata->chip710 ? NCR_710_MIN_XFERP : NCR_700_MIN_XFERP);
700 if(hostdata->chip710) {
701 __u8 burst_disable = hostdata->burst_disable
703 dcntl_extra = COMPAT_700_MODE;
705 NCR_700_writeb(dcntl_extra, host, DCNTL_REG);
706 NCR_700_writeb(BURST_LENGTH_8 | hostdata->dmode_extra,
707 host, DMODE_710_REG);
708 NCR_700_writeb(burst_disable | (hostdata->differential ?
709 DIFF : 0), host, CTEST7_REG);
710 NCR_700_writeb(BTB_TIMER_DISABLE, host, CTEST0_REG);
711 NCR_700_writeb(FULL_ARBITRATION | ENABLE_PARITY | PARITY
712 | AUTO_ATN, host, SCNTL0_REG);
714 NCR_700_writeb(BURST_LENGTH_8 | hostdata->dmode_extra,
715 host, DMODE_700_REG);
716 NCR_700_writeb(hostdata->differential ?
717 DIFF : 0, host, CTEST7_REG);
719 /* this is for 700-66, does nothing on 700 */
720 NCR_700_writeb(LAST_DIS_ENBL | ENABLE_ACTIVE_NEGATION
721 | GENERATE_RECEIVE_PARITY, host,
724 NCR_700_writeb(FULL_ARBITRATION | ENABLE_PARITY
725 | PARITY | AUTO_ATN, host, SCNTL0_REG);
729 NCR_700_writeb(1 << host->this_id, host, SCID_REG);
730 NCR_700_writeb(0, host, SBCL_REG);
731 NCR_700_writeb(ASYNC_OPERATION, host, SXFER_REG);
733 NCR_700_writeb(PHASE_MM_INT | SEL_TIMEOUT_INT | GROSS_ERR_INT | UX_DISC_INT
734 | RST_INT | PAR_ERR_INT | SELECT_INT, host, SIEN_REG);
736 NCR_700_writeb(ABORT_INT | INT_INST_INT | ILGL_INST_INT, host, DIEN_REG);
737 NCR_700_writeb(ENABLE_SELECT, host, SCNTL1_REG);
738 if(hostdata->clock > 75) {
739 printk(KERN_ERR "53c700: Clock speed %dMHz is too high: 75Mhz is the maximum this chip can be driven at\n", hostdata->clock);
740 /* do the best we can, but the async clock will be out
741 * of spec: sync divider 2, async divider 3 */
742 DEBUG(("53c700: sync 2 async 3\n"));
743 NCR_700_writeb(SYNC_DIV_2_0, host, SBCL_REG);
744 NCR_700_writeb(ASYNC_DIV_3_0 | dcntl_extra, host, DCNTL_REG);
745 hostdata->sync_clock = hostdata->clock/2;
746 } else if(hostdata->clock > 50 && hostdata->clock <= 75) {
747 /* sync divider 1.5, async divider 3 */
748 DEBUG(("53c700: sync 1.5 async 3\n"));
749 NCR_700_writeb(SYNC_DIV_1_5, host, SBCL_REG);
750 NCR_700_writeb(ASYNC_DIV_3_0 | dcntl_extra, host, DCNTL_REG);
751 hostdata->sync_clock = hostdata->clock*2;
752 hostdata->sync_clock /= 3;
754 } else if(hostdata->clock > 37 && hostdata->clock <= 50) {
755 /* sync divider 1, async divider 2 */
756 DEBUG(("53c700: sync 1 async 2\n"));
757 NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG);
758 NCR_700_writeb(ASYNC_DIV_2_0 | dcntl_extra, host, DCNTL_REG);
759 hostdata->sync_clock = hostdata->clock;
760 } else if(hostdata->clock > 25 && hostdata->clock <=37) {
761 /* sync divider 1, async divider 1.5 */
762 DEBUG(("53c700: sync 1 async 1.5\n"));
763 NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG);
764 NCR_700_writeb(ASYNC_DIV_1_5 | dcntl_extra, host, DCNTL_REG);
765 hostdata->sync_clock = hostdata->clock;
767 DEBUG(("53c700: sync 1 async 1\n"));
768 NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG);
769 NCR_700_writeb(ASYNC_DIV_1_0 | dcntl_extra, host, DCNTL_REG);
770 /* sync divider 1, async divider 1 */
771 hostdata->sync_clock = hostdata->clock;
773 /* Calculate the actual minimum period that can be supported
774 * by our synchronous clock speed. See the 710 manual for
775 * exact details of this calculation which is based on a
776 * setting of the SXFER register */
777 min_period = 1000*(4+min_xferp)/(4*hostdata->sync_clock);
778 hostdata->min_period = NCR_700_MIN_PERIOD;
779 if(min_period > NCR_700_MIN_PERIOD)
780 hostdata->min_period = min_period;
784 NCR_700_chip_reset(struct Scsi_Host *host)
786 struct NCR_700_Host_Parameters *hostdata =
787 (struct NCR_700_Host_Parameters *)host->hostdata[0];
788 if(hostdata->chip710) {
789 NCR_700_writeb(SOFTWARE_RESET_710, host, ISTAT_REG);
792 NCR_700_writeb(0, host, ISTAT_REG);
794 NCR_700_writeb(SOFTWARE_RESET, host, DCNTL_REG);
797 NCR_700_writeb(0, host, DCNTL_REG);
802 NCR_700_chip_setup(host);
805 /* The heart of the message processing engine is that the instruction
806 * immediately after the INT is the normal case (and so must be CLEAR
807 * ACK). If we want to do something else, we call that routine in
808 * scripts and set temp to be the normal case + 8 (skipping the CLEAR
809 * ACK) so that the routine returns correctly to resume its activity
812 process_extended_message(struct Scsi_Host *host,
813 struct NCR_700_Host_Parameters *hostdata,
814 struct scsi_cmnd *SCp, __u32 dsp, __u32 dsps)
816 __u32 resume_offset = dsp, temp = dsp + 8;
817 __u8 pun = 0xff, lun = 0xff;
820 pun = SCp->device->id;
821 lun = SCp->device->lun;
824 switch(hostdata->msgin[2]) {
826 if(SCp != NULL && NCR_700_is_flag_set(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION)) {
827 __u8 period = hostdata->msgin[3];
828 __u8 offset = hostdata->msgin[4];
830 if(offset == 0 || period == 0) {
835 if(NCR_700_is_flag_set(SCp->device, NCR_700_DEV_PRINT_SYNC_NEGOTIATION)) {
836 if(spi_offset(SCp->device->sdev_target) != 0)
837 printk(KERN_INFO "scsi%d: (%d:%d) Synchronous at offset %d, period %dns\n",
838 host->host_no, pun, lun,
841 printk(KERN_INFO "scsi%d: (%d:%d) Asynchronous\n",
842 host->host_no, pun, lun);
843 NCR_700_clear_flag(SCp->device, NCR_700_DEV_PRINT_SYNC_NEGOTIATION);
846 spi_offset(SCp->device->sdev_target) = offset;
847 spi_period(SCp->device->sdev_target) = period;
850 NCR_700_set_flag(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC);
851 NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
853 NCR_700_writeb(NCR_700_get_SXFER(SCp->device),
857 /* SDTR message out of the blue, reject it */
858 printk(KERN_WARNING "scsi%d Unexpected SDTR msg\n",
860 hostdata->msgout[0] = A_REJECT_MSG;
861 dma_cache_sync(hostdata->msgout, 1, DMA_TO_DEVICE);
862 script_patch_16(hostdata->script, MessageCount, 1);
863 /* SendMsgOut returns, so set up the return
865 resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
870 printk(KERN_INFO "scsi%d: (%d:%d), Unsolicited WDTR after CMD, Rejecting\n",
871 host->host_no, pun, lun);
872 hostdata->msgout[0] = A_REJECT_MSG;
873 dma_cache_sync(hostdata->msgout, 1, DMA_TO_DEVICE);
874 script_patch_16(hostdata->script, MessageCount, 1);
875 resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
880 printk(KERN_INFO "scsi%d (%d:%d): Unexpected message %s: ",
881 host->host_no, pun, lun,
882 NCR_700_phase[(dsps & 0xf00) >> 8]);
883 scsi_print_msg(hostdata->msgin);
886 hostdata->msgout[0] = A_REJECT_MSG;
887 dma_cache_sync(hostdata->msgout, 1, DMA_TO_DEVICE);
888 script_patch_16(hostdata->script, MessageCount, 1);
889 /* SendMsgOut returns, so set up the return
891 resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
893 NCR_700_writel(temp, host, TEMP_REG);
894 return resume_offset;
898 process_message(struct Scsi_Host *host, struct NCR_700_Host_Parameters *hostdata,
899 struct scsi_cmnd *SCp, __u32 dsp, __u32 dsps)
901 /* work out where to return to */
902 __u32 temp = dsp + 8, resume_offset = dsp;
903 __u8 pun = 0xff, lun = 0xff;
906 pun = SCp->device->id;
907 lun = SCp->device->lun;
911 printk("scsi%d (%d:%d): message %s: ", host->host_no, pun, lun,
912 NCR_700_phase[(dsps & 0xf00) >> 8]);
913 scsi_print_msg(hostdata->msgin);
917 switch(hostdata->msgin[0]) {
920 resume_offset = process_extended_message(host, hostdata, SCp,
925 if(SCp != NULL && NCR_700_is_flag_set(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION)) {
926 /* Rejected our sync negotiation attempt */
927 spi_period(SCp->device->sdev_target) =
928 spi_offset(SCp->device->sdev_target) = 0;
929 NCR_700_set_flag(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC);
930 NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
931 } else if(SCp != NULL && NCR_700_get_tag_neg_state(SCp->device) == NCR_700_DURING_TAG_NEGOTIATION) {
932 /* rejected our first simple tag message */
933 printk(KERN_WARNING "scsi%d (%d:%d) Rejected first tag queue attempt, turning off tag queueing\n", host->host_no, pun, lun);
934 /* we're done negotiating */
935 NCR_700_set_tag_neg_state(SCp->device, NCR_700_FINISHED_TAG_NEGOTIATION);
936 hostdata->tag_negotiated &= ~(1<<SCp->device->id);
937 SCp->device->tagged_supported = 0;
938 scsi_deactivate_tcq(SCp->device, host->cmd_per_lun);
940 printk(KERN_WARNING "scsi%d (%d:%d) Unexpected REJECT Message %s\n",
941 host->host_no, pun, lun,
942 NCR_700_phase[(dsps & 0xf00) >> 8]);
943 /* however, just ignore it */
947 case A_PARITY_ERROR_MSG:
948 printk(KERN_ERR "scsi%d (%d:%d) Parity Error!\n", host->host_no,
950 NCR_700_internal_bus_reset(host);
952 case A_SIMPLE_TAG_MSG:
953 printk(KERN_INFO "scsi%d (%d:%d) SIMPLE TAG %d %s\n", host->host_no,
954 pun, lun, hostdata->msgin[1],
955 NCR_700_phase[(dsps & 0xf00) >> 8]);
959 printk(KERN_INFO "scsi%d (%d:%d): Unexpected message %s: ",
960 host->host_no, pun, lun,
961 NCR_700_phase[(dsps & 0xf00) >> 8]);
963 scsi_print_msg(hostdata->msgin);
966 hostdata->msgout[0] = A_REJECT_MSG;
967 dma_cache_sync(hostdata->msgout, 1, DMA_TO_DEVICE);
968 script_patch_16(hostdata->script, MessageCount, 1);
969 /* SendMsgOut returns, so set up the return
971 resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
975 NCR_700_writel(temp, host, TEMP_REG);
976 /* set us up to receive another message */
977 dma_cache_sync(hostdata->msgin, MSG_ARRAY_SIZE, DMA_FROM_DEVICE);
978 return resume_offset;
982 process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
983 struct Scsi_Host *host,
984 struct NCR_700_Host_Parameters *hostdata)
986 __u32 resume_offset = 0;
987 __u8 pun = 0xff, lun=0xff;
990 pun = SCp->device->id;
991 lun = SCp->device->lun;
994 if(dsps == A_GOOD_STATUS_AFTER_STATUS) {
995 DEBUG((" COMMAND COMPLETE, status=%02x\n",
996 hostdata->status[0]));
997 /* OK, if TCQ still under negotiation, we now know it works */
998 if (NCR_700_get_tag_neg_state(SCp->device) == NCR_700_DURING_TAG_NEGOTIATION)
999 NCR_700_set_tag_neg_state(SCp->device,
1000 NCR_700_FINISHED_TAG_NEGOTIATION);
1002 /* check for contingent allegiance contitions */
1003 if(status_byte(hostdata->status[0]) == CHECK_CONDITION ||
1004 status_byte(hostdata->status[0]) == COMMAND_TERMINATED) {
1005 struct NCR_700_command_slot *slot =
1006 (struct NCR_700_command_slot *)SCp->host_scribble;
1007 if(SCp->cmnd[0] == REQUEST_SENSE) {
1008 /* OOPS: bad device, returning another
1009 * contingent allegiance condition */
1010 printk(KERN_ERR "scsi%d (%d:%d) broken device is looping in contingent allegiance: ignoring\n", host->host_no, pun, lun);
1011 NCR_700_scsi_done(hostdata, SCp, hostdata->status[0]);
1014 scsi_print_command(SCp);
1015 printk(" cmd %p has status %d, requesting sense\n",
1016 SCp, hostdata->status[0]);
1018 /* we can destroy the command here
1019 * because the contingent allegiance
1020 * condition will cause a retry which
1021 * will re-copy the command from the
1022 * saved data_cmnd. We also unmap any
1023 * data associated with the command
1025 NCR_700_unmap(hostdata, SCp, slot);
1027 SCp->cmnd[0] = REQUEST_SENSE;
1028 SCp->cmnd[1] = (SCp->device->lun & 0x7) << 5;
1031 SCp->cmnd[4] = sizeof(SCp->sense_buffer);
1034 /* Here's a quiet hack: the
1035 * REQUEST_SENSE command is six bytes,
1036 * so store a flag indicating that
1037 * this was an internal sense request
1038 * and the original status at the end
1040 SCp->cmnd[6] = NCR_700_INTERNAL_SENSE_MAGIC;
1041 SCp->cmnd[7] = hostdata->status[0];
1043 SCp->sc_data_direction = DMA_FROM_DEVICE;
1044 dma_sync_single_for_device(hostdata->dev, slot->pCmd,
1045 SCp->cmd_len, DMA_TO_DEVICE);
1046 SCp->request_bufflen = sizeof(SCp->sense_buffer);
1047 slot->dma_handle = dma_map_single(hostdata->dev, SCp->sense_buffer, sizeof(SCp->sense_buffer), DMA_FROM_DEVICE);
1048 slot->SG[0].ins = bS_to_host(SCRIPT_MOVE_DATA_IN | sizeof(SCp->sense_buffer));
1049 slot->SG[0].pAddr = bS_to_host(slot->dma_handle);
1050 slot->SG[1].ins = bS_to_host(SCRIPT_RETURN);
1051 slot->SG[1].pAddr = 0;
1052 slot->resume_offset = hostdata->pScript;
1053 dma_cache_sync(slot->SG, sizeof(slot->SG[0])*2, DMA_TO_DEVICE);
1054 dma_cache_sync(SCp->sense_buffer, sizeof(SCp->sense_buffer), DMA_FROM_DEVICE);
1056 /* queue the command for reissue */
1057 slot->state = NCR_700_SLOT_QUEUED;
1058 hostdata->state = NCR_700_HOST_FREE;
1059 hostdata->cmd = NULL;
1062 // Currently rely on the mid layer evaluation
1063 // of the tag queuing capability
1065 //if(status_byte(hostdata->status[0]) == GOOD &&
1066 // SCp->cmnd[0] == INQUIRY && SCp->use_sg == 0) {
1067 // /* Piggy back the tag queueing support
1068 // * on this command */
1069 // dma_sync_single_for_cpu(hostdata->dev,
1070 // slot->dma_handle,
1071 // SCp->request_bufflen,
1072 // DMA_FROM_DEVICE);
1073 // if(((char *)SCp->request_buffer)[7] & 0x02) {
1074 // printk(KERN_INFO "scsi%d: (%d:%d) Enabling Tag Command Queuing\n", host->host_no, pun, lun);
1075 // hostdata->tag_negotiated |= (1<<SCp->device->id);
1076 // NCR_700_set_flag(SCp->device, NCR_700_DEV_BEGIN_TAG_QUEUEING);
1078 // NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_TAG_QUEUEING);
1079 // hostdata->tag_negotiated &= ~(1<<SCp->device->id);
1082 NCR_700_scsi_done(hostdata, SCp, hostdata->status[0]);
1084 } else if((dsps & 0xfffff0f0) == A_UNEXPECTED_PHASE) {
1085 __u8 i = (dsps & 0xf00) >> 8;
1087 printk(KERN_ERR "scsi%d: (%d:%d), UNEXPECTED PHASE %s (%s)\n",
1088 host->host_no, pun, lun,
1090 sbcl_to_string(NCR_700_readb(host, SBCL_REG)));
1091 printk(KERN_ERR " len = %d, cmd =", SCp->cmd_len);
1092 scsi_print_command(SCp);
1094 NCR_700_internal_bus_reset(host);
1095 } else if((dsps & 0xfffff000) == A_FATAL) {
1096 int i = (dsps & 0xfff);
1098 printk(KERN_ERR "scsi%d: (%d:%d) FATAL ERROR: %s\n",
1099 host->host_no, pun, lun, NCR_700_fatal_messages[i]);
1100 if(dsps == A_FATAL_ILLEGAL_MSG_LENGTH) {
1101 printk(KERN_ERR " msg begins %02x %02x\n",
1102 hostdata->msgin[0], hostdata->msgin[1]);
1104 NCR_700_internal_bus_reset(host);
1105 } else if((dsps & 0xfffff0f0) == A_DISCONNECT) {
1106 #ifdef NCR_700_DEBUG
1107 __u8 i = (dsps & 0xf00) >> 8;
1109 printk("scsi%d: (%d:%d), DISCONNECTED (%d) %s\n",
1110 host->host_no, pun, lun,
1111 i, NCR_700_phase[i]);
1113 save_for_reselection(hostdata, SCp, dsp);
1115 } else if(dsps == A_RESELECTION_IDENTIFIED) {
1117 struct NCR_700_command_slot *slot;
1118 __u8 reselection_id = hostdata->reselection_id;
1119 struct scsi_device *SDp;
1121 lun = hostdata->msgin[0] & 0x1f;
1123 hostdata->reselection_id = 0xff;
1124 DEBUG(("scsi%d: (%d:%d) RESELECTED!\n",
1125 host->host_no, reselection_id, lun));
1126 /* clear the reselection indicator */
1127 SDp = __scsi_device_lookup(host, 0, reselection_id, lun);
1128 if(unlikely(SDp == NULL)) {
1129 printk(KERN_ERR "scsi%d: (%d:%d) HAS NO device\n",
1130 host->host_no, reselection_id, lun);
1133 if(hostdata->msgin[1] == A_SIMPLE_TAG_MSG) {
1134 struct scsi_cmnd *SCp = scsi_find_tag(SDp, hostdata->msgin[2]);
1135 if(unlikely(SCp == NULL)) {
1136 printk(KERN_ERR "scsi%d: (%d:%d) no saved request for tag %d\n",
1137 host->host_no, reselection_id, lun, hostdata->msgin[2]);
1141 slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1142 DEBUG(("53c700: %d:%d:%d, reselection is tag %d, slot %p(%d)\n",
1143 host->host_no, SDp->id, SDp->lun,
1144 hostdata->msgin[2], slot, slot->tag));
1146 struct scsi_cmnd *SCp = scsi_find_tag(SDp, SCSI_NO_TAG);
1147 if(unlikely(SCp == NULL)) {
1148 printk(KERN_ERR "scsi%d: (%d:%d) no saved request for untagged cmd\n",
1149 host->host_no, reselection_id, lun);
1152 slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1156 printk(KERN_ERR "scsi%d: (%d:%d) RESELECTED but no saved command (MSG = %02x %02x %02x)!!\n",
1157 host->host_no, reselection_id, lun,
1158 hostdata->msgin[0], hostdata->msgin[1],
1159 hostdata->msgin[2]);
1161 if(hostdata->state != NCR_700_HOST_BUSY)
1162 printk(KERN_ERR "scsi%d: FATAL, host not busy during valid reselection!\n",
1164 resume_offset = slot->resume_offset;
1165 hostdata->cmd = slot->cmnd;
1167 /* re-patch for this command */
1168 script_patch_32_abs(hostdata->script, CommandAddress,
1170 script_patch_16(hostdata->script,
1171 CommandCount, slot->cmnd->cmd_len);
1172 script_patch_32_abs(hostdata->script, SGScriptStartAddress,
1173 to32bit(&slot->pSG[0].ins));
1175 /* Note: setting SXFER only works if we're
1176 * still in the MESSAGE phase, so it is vital
1177 * that ACK is still asserted when we process
1178 * the reselection message. The resume offset
1179 * should therefore always clear ACK */
1180 NCR_700_writeb(NCR_700_get_SXFER(hostdata->cmd->device),
1182 dma_cache_sync(hostdata->msgin,
1183 MSG_ARRAY_SIZE, DMA_FROM_DEVICE);
1184 dma_cache_sync(hostdata->msgout,
1185 MSG_ARRAY_SIZE, DMA_TO_DEVICE);
1186 /* I'm just being paranoid here, the command should
1187 * already have been flushed from the cache */
1188 dma_cache_sync(slot->cmnd->cmnd,
1189 slot->cmnd->cmd_len, DMA_TO_DEVICE);
1194 } else if(dsps == A_RESELECTED_DURING_SELECTION) {
1196 /* This section is full of debugging code because I've
1197 * never managed to reach it. I think what happens is
1198 * that, because the 700 runs with selection
1199 * interrupts enabled the whole time that we take a
1200 * selection interrupt before we manage to get to the
1201 * reselected script interrupt */
1203 __u8 reselection_id = NCR_700_readb(host, SFBR_REG);
1204 struct NCR_700_command_slot *slot;
1206 /* Take out our own ID */
1207 reselection_id &= ~(1<<host->this_id);
1209 /* I've never seen this happen, so keep this as a printk rather
1211 printk(KERN_INFO "scsi%d: (%d:%d) RESELECTION DURING SELECTION, dsp=%08x[%04x] state=%d, count=%d\n",
1212 host->host_no, reselection_id, lun, dsp, dsp - hostdata->pScript, hostdata->state, hostdata->command_slot_count);
1215 /* FIXME: DEBUGGING CODE */
1216 __u32 SG = (__u32)bS_to_cpu(hostdata->script[A_SGScriptStartAddress_used[0]]);
1219 for(i=0; i< NCR_700_COMMAND_SLOTS_PER_HOST; i++) {
1220 if(SG >= to32bit(&hostdata->slots[i].pSG[0])
1221 && SG <= to32bit(&hostdata->slots[i].pSG[NCR_700_SG_SEGMENTS]))
1224 printk(KERN_INFO "IDENTIFIED SG segment as being %08x in slot %p, cmd %p, slot->resume_offset=%08x\n", SG, &hostdata->slots[i], hostdata->slots[i].cmnd, hostdata->slots[i].resume_offset);
1225 SCp = hostdata->slots[i].cmnd;
1229 slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1230 /* change slot from busy to queued to redo command */
1231 slot->state = NCR_700_SLOT_QUEUED;
1233 hostdata->cmd = NULL;
1235 if(reselection_id == 0) {
1236 if(hostdata->reselection_id == 0xff) {
1237 printk(KERN_ERR "scsi%d: Invalid reselection during selection!!\n", host->host_no);
1240 printk(KERN_ERR "scsi%d: script reselected and we took a selection interrupt\n",
1242 reselection_id = hostdata->reselection_id;
1246 /* convert to real ID */
1247 reselection_id = bitmap_to_number(reselection_id);
1249 hostdata->reselection_id = reselection_id;
1250 /* just in case we have a stale simple tag message, clear it */
1251 hostdata->msgin[1] = 0;
1252 dma_cache_sync(hostdata->msgin,
1253 MSG_ARRAY_SIZE, DMA_BIDIRECTIONAL);
1254 if(hostdata->tag_negotiated & (1<<reselection_id)) {
1255 resume_offset = hostdata->pScript + Ent_GetReselectionWithTag;
1257 resume_offset = hostdata->pScript + Ent_GetReselectionData;
1259 } else if(dsps == A_COMPLETED_SELECTION_AS_TARGET) {
1260 /* we've just disconnected from the bus, do nothing since
1261 * a return here will re-run the queued command slot
1262 * that may have been interrupted by the initial selection */
1263 DEBUG((" SELECTION COMPLETED\n"));
1264 } else if((dsps & 0xfffff0f0) == A_MSG_IN) {
1265 resume_offset = process_message(host, hostdata, SCp,
1267 } else if((dsps & 0xfffff000) == 0) {
1268 __u8 i = (dsps & 0xf0) >> 4, j = (dsps & 0xf00) >> 8;
1269 printk(KERN_ERR "scsi%d: (%d:%d), unhandled script condition %s %s at %04x\n",
1270 host->host_no, pun, lun, NCR_700_condition[i],
1271 NCR_700_phase[j], dsp - hostdata->pScript);
1273 scsi_print_command(SCp);
1276 for(i = 0; i < SCp->use_sg + 1; i++) {
1277 printk(KERN_INFO " SG[%d].length = %d, move_insn=%08x, addr %08x\n", i, ((struct scatterlist *)SCp->buffer)[i].length, ((struct NCR_700_command_slot *)SCp->host_scribble)->SG[i].ins, ((struct NCR_700_command_slot *)SCp->host_scribble)->SG[i].pAddr);
1281 NCR_700_internal_bus_reset(host);
1282 } else if((dsps & 0xfffff000) == A_DEBUG_INTERRUPT) {
1283 printk(KERN_NOTICE "scsi%d (%d:%d) DEBUG INTERRUPT %d AT %08x[%04x], continuing\n",
1284 host->host_no, pun, lun, dsps & 0xfff, dsp, dsp - hostdata->pScript);
1285 resume_offset = dsp;
1287 printk(KERN_ERR "scsi%d: (%d:%d), unidentified script interrupt 0x%x at %04x\n",
1288 host->host_no, pun, lun, dsps, dsp - hostdata->pScript);
1289 NCR_700_internal_bus_reset(host);
1291 return resume_offset;
1294 /* We run the 53c700 with selection interrupts always enabled. This
1295 * means that the chip may be selected as soon as the bus frees. On a
1296 * busy bus, this can be before the scripts engine finishes its
1297 * processing. Therefore, part of the selection processing has to be
1298 * to find out what the scripts engine is doing and complete the
1299 * function if necessary (i.e. process the pending disconnect or save
1300 * the interrupted initial selection */
1302 process_selection(struct Scsi_Host *host, __u32 dsp)
1304 __u8 id = 0; /* Squash compiler warning */
1306 __u32 resume_offset = 0;
1307 struct NCR_700_Host_Parameters *hostdata =
1308 (struct NCR_700_Host_Parameters *)host->hostdata[0];
1309 struct scsi_cmnd *SCp = hostdata->cmd;
1312 for(count = 0; count < 5; count++) {
1313 id = NCR_700_readb(host, hostdata->chip710 ?
1314 CTEST9_REG : SFBR_REG);
1316 /* Take out our own ID */
1317 id &= ~(1<<host->this_id);
1322 sbcl = NCR_700_readb(host, SBCL_REG);
1323 if((sbcl & SBCL_IO) == 0) {
1324 /* mark as having been selected rather than reselected */
1327 /* convert to real ID */
1328 hostdata->reselection_id = id = bitmap_to_number(id);
1329 DEBUG(("scsi%d: Reselected by %d\n",
1330 host->host_no, id));
1332 if(hostdata->state == NCR_700_HOST_BUSY && SCp != NULL) {
1333 struct NCR_700_command_slot *slot =
1334 (struct NCR_700_command_slot *)SCp->host_scribble;
1335 DEBUG((" ID %d WARNING: RESELECTION OF BUSY HOST, saving cmd %p, slot %p, addr %x [%04x], resume %x!\n", id, hostdata->cmd, slot, dsp, dsp - hostdata->pScript, resume_offset));
1337 switch(dsp - hostdata->pScript) {
1338 case Ent_Disconnect1:
1339 case Ent_Disconnect2:
1340 save_for_reselection(hostdata, SCp, Ent_Disconnect2 + hostdata->pScript);
1342 case Ent_Disconnect3:
1343 case Ent_Disconnect4:
1344 save_for_reselection(hostdata, SCp, Ent_Disconnect4 + hostdata->pScript);
1346 case Ent_Disconnect5:
1347 case Ent_Disconnect6:
1348 save_for_reselection(hostdata, SCp, Ent_Disconnect6 + hostdata->pScript);
1350 case Ent_Disconnect7:
1351 case Ent_Disconnect8:
1352 save_for_reselection(hostdata, SCp, Ent_Disconnect8 + hostdata->pScript);
1356 process_script_interrupt(A_GOOD_STATUS_AFTER_STATUS, dsp, SCp, host, hostdata);
1360 slot->state = NCR_700_SLOT_QUEUED;
1364 hostdata->state = NCR_700_HOST_BUSY;
1365 hostdata->cmd = NULL;
1366 /* clear any stale simple tag message */
1367 hostdata->msgin[1] = 0;
1368 dma_cache_sync(hostdata->msgin, MSG_ARRAY_SIZE,
1372 /* Selected as target, Ignore */
1373 resume_offset = hostdata->pScript + Ent_SelectedAsTarget;
1374 } else if(hostdata->tag_negotiated & (1<<id)) {
1375 resume_offset = hostdata->pScript + Ent_GetReselectionWithTag;
1377 resume_offset = hostdata->pScript + Ent_GetReselectionData;
1379 return resume_offset;
1383 NCR_700_clear_fifo(struct Scsi_Host *host) {
1384 const struct NCR_700_Host_Parameters *hostdata
1385 = (struct NCR_700_Host_Parameters *)host->hostdata[0];
1386 if(hostdata->chip710) {
1387 NCR_700_writeb(CLR_FIFO_710, host, CTEST8_REG);
1389 NCR_700_writeb(CLR_FIFO, host, DFIFO_REG);
1394 NCR_700_flush_fifo(struct Scsi_Host *host) {
1395 const struct NCR_700_Host_Parameters *hostdata
1396 = (struct NCR_700_Host_Parameters *)host->hostdata[0];
1397 if(hostdata->chip710) {
1398 NCR_700_writeb(FLUSH_DMA_FIFO_710, host, CTEST8_REG);
1400 NCR_700_writeb(0, host, CTEST8_REG);
1402 NCR_700_writeb(FLUSH_DMA_FIFO, host, DFIFO_REG);
1404 NCR_700_writeb(0, host, DFIFO_REG);
1409 /* The queue lock with interrupts disabled must be held on entry to
1412 NCR_700_start_command(struct scsi_cmnd *SCp)
1414 struct NCR_700_command_slot *slot =
1415 (struct NCR_700_command_slot *)SCp->host_scribble;
1416 struct NCR_700_Host_Parameters *hostdata =
1417 (struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
1418 __u16 count = 1; /* for IDENTIFY message */
1420 if(hostdata->state != NCR_700_HOST_FREE) {
1421 /* keep this inside the lock to close the race window where
1422 * the running command finishes on another CPU while we don't
1423 * change the state to queued on this one */
1424 slot->state = NCR_700_SLOT_QUEUED;
1426 DEBUG(("scsi%d: host busy, queueing command %p, slot %p\n",
1427 SCp->device->host->host_no, slot->cmnd, slot));
1430 hostdata->state = NCR_700_HOST_BUSY;
1431 hostdata->cmd = SCp;
1432 slot->state = NCR_700_SLOT_BUSY;
1433 /* keep interrupts disabled until we have the command correctly
1434 * set up so we cannot take a selection interrupt */
1436 hostdata->msgout[0] = NCR_700_identify(SCp->cmnd[0] != REQUEST_SENSE,
1438 /* for INQUIRY or REQUEST_SENSE commands, we cannot be sure
1439 * if the negotiated transfer parameters still hold, so
1440 * always renegotiate them */
1441 if(SCp->cmnd[0] == INQUIRY || SCp->cmnd[0] == REQUEST_SENSE) {
1442 NCR_700_clear_flag(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC);
1445 /* REQUEST_SENSE is asking for contingent I_T_L(_Q) status.
1446 * If a contingent allegiance condition exists, the device
1447 * will refuse all tags, so send the request sense as untagged
1449 if((hostdata->tag_negotiated & (1<<SCp->device->id))
1450 && (slot->tag != SCSI_NO_TAG && SCp->cmnd[0] != REQUEST_SENSE)) {
1451 count += scsi_populate_tag_msg(SCp, &hostdata->msgout[count]);
1454 if(hostdata->fast &&
1455 NCR_700_is_flag_clear(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC)) {
1456 memcpy(&hostdata->msgout[count], NCR_700_SDTR_msg,
1457 sizeof(NCR_700_SDTR_msg));
1458 hostdata->msgout[count+3] = spi_period(SCp->device->sdev_target);
1459 hostdata->msgout[count+4] = spi_offset(SCp->device->sdev_target);
1460 count += sizeof(NCR_700_SDTR_msg);
1461 NCR_700_set_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
1464 script_patch_16(hostdata->script, MessageCount, count);
1467 script_patch_ID(hostdata->script,
1468 Device_ID, 1<<SCp->device->id);
1470 script_patch_32_abs(hostdata->script, CommandAddress,
1472 script_patch_16(hostdata->script, CommandCount, SCp->cmd_len);
1473 /* finally plumb the beginning of the SG list into the script
1475 script_patch_32_abs(hostdata->script, SGScriptStartAddress,
1476 to32bit(&slot->pSG[0].ins));
1477 NCR_700_clear_fifo(SCp->device->host);
1479 if(slot->resume_offset == 0)
1480 slot->resume_offset = hostdata->pScript;
1481 /* now perform all the writebacks and invalidates */
1482 dma_cache_sync(hostdata->msgout, count, DMA_TO_DEVICE);
1483 dma_cache_sync(hostdata->msgin, MSG_ARRAY_SIZE,
1485 dma_cache_sync(SCp->cmnd, SCp->cmd_len, DMA_TO_DEVICE);
1486 dma_cache_sync(hostdata->status, 1, DMA_FROM_DEVICE);
1488 /* set the synchronous period/offset */
1489 NCR_700_writeb(NCR_700_get_SXFER(SCp->device),
1490 SCp->device->host, SXFER_REG);
1491 NCR_700_writel(slot->temp, SCp->device->host, TEMP_REG);
1492 NCR_700_writel(slot->resume_offset, SCp->device->host, DSP_REG);
1498 NCR_700_intr(int irq, void *dev_id, struct pt_regs *regs)
1500 struct Scsi_Host *host = (struct Scsi_Host *)dev_id;
1501 struct NCR_700_Host_Parameters *hostdata =
1502 (struct NCR_700_Host_Parameters *)host->hostdata[0];
1504 __u32 resume_offset = 0;
1505 __u8 pun = 0xff, lun = 0xff;
1506 unsigned long flags;
1509 /* Use the host lock to serialise acess to the 53c700
1510 * hardware. Note: In future, we may need to take the queue
1511 * lock to enter the done routines. When that happens, we
1512 * need to ensure that for this driver, the host lock and the
1513 * queue lock point to the same thing. */
1514 spin_lock_irqsave(host->host_lock, flags);
1515 if((istat = NCR_700_readb(host, ISTAT_REG))
1516 & (SCSI_INT_PENDING | DMA_INT_PENDING)) {
1518 __u8 sstat0 = 0, dstat = 0;
1520 struct scsi_cmnd *SCp = hostdata->cmd;
1521 enum NCR_700_Host_State state;
1524 state = hostdata->state;
1525 SCp = hostdata->cmd;
1527 if(istat & SCSI_INT_PENDING) {
1530 sstat0 = NCR_700_readb(host, SSTAT0_REG);
1533 if(istat & DMA_INT_PENDING) {
1536 dstat = NCR_700_readb(host, DSTAT_REG);
1539 dsps = NCR_700_readl(host, DSPS_REG);
1540 dsp = NCR_700_readl(host, DSP_REG);
1542 DEBUG(("scsi%d: istat %02x sstat0 %02x dstat %02x dsp %04x[%08x] dsps 0x%x\n",
1543 host->host_no, istat, sstat0, dstat,
1544 (dsp - (__u32)(hostdata->pScript))/4,
1548 pun = SCp->device->id;
1549 lun = SCp->device->lun;
1552 if(sstat0 & SCSI_RESET_DETECTED) {
1553 struct scsi_device *SDp;
1556 hostdata->state = NCR_700_HOST_BUSY;
1558 printk(KERN_ERR "scsi%d: Bus Reset detected, executing command %p, slot %p, dsp %08x[%04x]\n",
1559 host->host_no, SCp, SCp == NULL ? NULL : SCp->host_scribble, dsp, dsp - hostdata->pScript);
1561 scsi_report_bus_reset(host, 0);
1563 /* clear all the negotiated parameters */
1564 __shost_for_each_device(SDp, host)
1565 SDp->hostdata = NULL;
1567 /* clear all the slots and their pending commands */
1568 for(i = 0; i < NCR_700_COMMAND_SLOTS_PER_HOST; i++) {
1569 struct scsi_cmnd *SCp;
1570 struct NCR_700_command_slot *slot =
1571 &hostdata->slots[i];
1573 if(slot->state == NCR_700_SLOT_FREE)
1577 printk(KERN_ERR " failing command because of reset, slot %p, cmnd %p\n",
1579 free_slot(slot, hostdata);
1580 SCp->host_scribble = NULL;
1581 NCR_700_set_depth(SCp->device, 0);
1582 /* NOTE: deadlock potential here: we
1583 * rely on mid-layer guarantees that
1584 * scsi_done won't try to issue the
1585 * command again otherwise we'll
1587 * hostdata->state_lock */
1588 SCp->result = DID_RESET << 16;
1589 SCp->scsi_done(SCp);
1592 NCR_700_chip_setup(host);
1594 hostdata->state = NCR_700_HOST_FREE;
1595 hostdata->cmd = NULL;
1596 /* signal back if this was an eh induced reset */
1597 if(hostdata->eh_complete != NULL)
1598 complete(hostdata->eh_complete);
1600 } else if(sstat0 & SELECTION_TIMEOUT) {
1601 DEBUG(("scsi%d: (%d:%d) selection timeout\n",
1602 host->host_no, pun, lun));
1603 NCR_700_scsi_done(hostdata, SCp, DID_NO_CONNECT<<16);
1604 } else if(sstat0 & PHASE_MISMATCH) {
1605 struct NCR_700_command_slot *slot = (SCp == NULL) ? NULL :
1606 (struct NCR_700_command_slot *)SCp->host_scribble;
1608 if(dsp == Ent_SendMessage + 8 + hostdata->pScript) {
1609 /* It wants to reply to some part of
1611 #ifdef NCR_700_DEBUG
1612 __u32 temp = NCR_700_readl(host, TEMP_REG);
1613 int count = (hostdata->script[Ent_SendMessage/4] & 0xffffff) - ((NCR_700_readl(host, DBC_REG) & 0xffffff) + NCR_700_data_residual(host));
1614 printk("scsi%d (%d:%d) PHASE MISMATCH IN SEND MESSAGE %d remain, return %p[%04x], phase %s\n", host->host_no, pun, lun, count, (void *)temp, temp - hostdata->pScript, sbcl_to_string(NCR_700_readb(host, SBCL_REG)));
1616 resume_offset = hostdata->pScript + Ent_SendMessagePhaseMismatch;
1617 } else if(dsp >= to32bit(&slot->pSG[0].ins) &&
1618 dsp <= to32bit(&slot->pSG[NCR_700_SG_SEGMENTS].ins)) {
1619 int data_transfer = NCR_700_readl(host, DBC_REG) & 0xffffff;
1620 int SGcount = (dsp - to32bit(&slot->pSG[0].ins))/sizeof(struct NCR_700_SG_List);
1621 int residual = NCR_700_data_residual(host);
1623 #ifdef NCR_700_DEBUG
1624 __u32 naddr = NCR_700_readl(host, DNAD_REG);
1626 printk("scsi%d: (%d:%d) Expected phase mismatch in slot->SG[%d], transferred 0x%x\n",
1627 host->host_no, pun, lun,
1628 SGcount, data_transfer);
1629 scsi_print_command(SCp);
1631 printk("scsi%d: (%d:%d) Expected phase mismatch in slot->SG[%d], transferred 0x%x, residual %d\n",
1632 host->host_no, pun, lun,
1633 SGcount, data_transfer, residual);
1636 data_transfer += residual;
1638 if(data_transfer != 0) {
1644 count = (bS_to_cpu(slot->SG[SGcount].ins) & 0x00ffffff);
1645 DEBUG(("DATA TRANSFER MISMATCH, count = %d, transferred %d\n", count, count-data_transfer));
1646 slot->SG[SGcount].ins &= bS_to_host(0xff000000);
1647 slot->SG[SGcount].ins |= bS_to_host(data_transfer);
1648 pAddr = bS_to_cpu(slot->SG[SGcount].pAddr);
1649 pAddr += (count - data_transfer);
1650 #ifdef NCR_700_DEBUG
1651 if(pAddr != naddr) {
1652 printk("scsi%d (%d:%d) transfer mismatch pAddr=%lx, naddr=%lx, data_transfer=%d, residual=%d\n", host->host_no, pun, lun, (unsigned long)pAddr, (unsigned long)naddr, data_transfer, residual);
1655 slot->SG[SGcount].pAddr = bS_to_host(pAddr);
1657 /* set the executed moves to nops */
1658 for(i=0; i<SGcount; i++) {
1659 slot->SG[i].ins = bS_to_host(SCRIPT_NOP);
1660 slot->SG[i].pAddr = 0;
1662 dma_cache_sync(slot->SG, sizeof(slot->SG), DMA_TO_DEVICE);
1663 /* and pretend we disconnected after
1664 * the command phase */
1665 resume_offset = hostdata->pScript + Ent_MsgInDuringData;
1666 /* make sure all the data is flushed */
1667 NCR_700_flush_fifo(host);
1669 __u8 sbcl = NCR_700_readb(host, SBCL_REG);
1670 printk(KERN_ERR "scsi%d: (%d:%d) phase mismatch at %04x, phase %s\n",
1671 host->host_no, pun, lun, dsp - hostdata->pScript, sbcl_to_string(sbcl));
1672 NCR_700_internal_bus_reset(host);
1675 } else if(sstat0 & SCSI_GROSS_ERROR) {
1676 printk(KERN_ERR "scsi%d: (%d:%d) GROSS ERROR\n",
1677 host->host_no, pun, lun);
1678 NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1679 } else if(sstat0 & PARITY_ERROR) {
1680 printk(KERN_ERR "scsi%d: (%d:%d) PARITY ERROR\n",
1681 host->host_no, pun, lun);
1682 NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1683 } else if(dstat & SCRIPT_INT_RECEIVED) {
1684 DEBUG(("scsi%d: (%d:%d) ====>SCRIPT INTERRUPT<====\n",
1685 host->host_no, pun, lun));
1686 resume_offset = process_script_interrupt(dsps, dsp, SCp, host, hostdata);
1687 } else if(dstat & (ILGL_INST_DETECTED)) {
1688 printk(KERN_ERR "scsi%d: (%d:%d) Illegal Instruction detected at 0x%08x[0x%x]!!!\n"
1689 " Please email James.Bottomley@HansenPartnership.com with the details\n",
1690 host->host_no, pun, lun,
1691 dsp, dsp - hostdata->pScript);
1692 NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1693 } else if(dstat & (WATCH_DOG_INTERRUPT|ABORTED)) {
1694 printk(KERN_ERR "scsi%d: (%d:%d) serious DMA problem, dstat=%02x\n",
1695 host->host_no, pun, lun, dstat);
1696 NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1700 /* NOTE: selection interrupt processing MUST occur
1701 * after script interrupt processing to correctly cope
1702 * with the case where we process a disconnect and
1703 * then get reselected before we process the
1705 if(sstat0 & SELECTED) {
1706 /* FIXME: It currently takes at least FOUR
1707 * interrupts to complete a command that
1708 * disconnects: one for the disconnect, one
1709 * for the reselection, one to get the
1710 * reselection data and one to complete the
1711 * command. If we guess the reselected
1712 * command here and prepare it, we only need
1713 * to get a reselection data interrupt if we
1714 * guessed wrongly. Since the interrupt
1715 * overhead is much greater than the command
1716 * setup, this would be an efficient
1717 * optimisation particularly as we probably
1718 * only have one outstanding command on a
1719 * target most of the time */
1721 resume_offset = process_selection(host, dsp);
1728 if(hostdata->state != NCR_700_HOST_BUSY) {
1729 printk(KERN_ERR "scsi%d: Driver error: resume at 0x%08x [0x%04x] with non busy host!\n",
1730 host->host_no, resume_offset, resume_offset - hostdata->pScript);
1731 hostdata->state = NCR_700_HOST_BUSY;
1734 DEBUG(("Attempting to resume at %x\n", resume_offset));
1735 NCR_700_clear_fifo(host);
1736 NCR_700_writel(resume_offset, host, DSP_REG);
1738 /* There is probably a technical no-no about this: If we're a
1739 * shared interrupt and we got this interrupt because the
1740 * other device needs servicing not us, we're still going to
1741 * check our queued commands here---of course, there shouldn't
1742 * be any outstanding.... */
1743 if(hostdata->state == NCR_700_HOST_FREE) {
1746 for(i = 0; i < NCR_700_COMMAND_SLOTS_PER_HOST; i++) {
1747 /* fairness: always run the queue from the last
1748 * position we left off */
1749 int j = (i + hostdata->saved_slot_position)
1750 % NCR_700_COMMAND_SLOTS_PER_HOST;
1752 if(hostdata->slots[j].state != NCR_700_SLOT_QUEUED)
1754 if(NCR_700_start_command(hostdata->slots[j].cmnd)) {
1755 DEBUG(("scsi%d: Issuing saved command slot %p, cmd %p\t\n",
1756 host->host_no, &hostdata->slots[j],
1757 hostdata->slots[j].cmnd));
1758 hostdata->saved_slot_position = j + 1;
1765 spin_unlock_irqrestore(host->host_lock, flags);
1766 return IRQ_RETVAL(handled);
1770 NCR_700_queuecommand(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *))
1772 struct NCR_700_Host_Parameters *hostdata =
1773 (struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
1775 enum dma_data_direction direction;
1776 struct NCR_700_command_slot *slot;
1778 if(hostdata->command_slot_count >= NCR_700_COMMAND_SLOTS_PER_HOST) {
1779 /* We're over our allocation, this should never happen
1780 * since we report the max allocation to the mid layer */
1781 printk(KERN_WARNING "scsi%d: Command depth has gone over queue depth\n", SCp->device->host->host_no);
1784 /* check for untagged commands. We cannot have any outstanding
1785 * commands if we accept them. Commands could be untagged because:
1787 * - The tag negotiated bitmap is clear
1788 * - The blk layer sent and untagged command
1790 if(NCR_700_get_depth(SCp->device) != 0
1791 && (!(hostdata->tag_negotiated & (1<<SCp->device->id))
1792 || !blk_rq_tagged(SCp->request))) {
1793 DEBUG((KERN_ERR "scsi%d (%d:%d) has non zero depth %d\n",
1794 SCp->device->host->host_no, SCp->device->id, SCp->device->lun,
1795 NCR_700_get_depth(SCp->device)));
1796 return SCSI_MLQUEUE_DEVICE_BUSY;
1798 if(NCR_700_get_depth(SCp->device) >= SCp->device->queue_depth) {
1799 DEBUG((KERN_ERR "scsi%d (%d:%d) has max tag depth %d\n",
1800 SCp->device->host->host_no, SCp->device->id, SCp->device->lun,
1801 NCR_700_get_depth(SCp->device)));
1802 return SCSI_MLQUEUE_DEVICE_BUSY;
1804 NCR_700_set_depth(SCp->device, NCR_700_get_depth(SCp->device) + 1);
1806 /* begin the command here */
1807 /* no need to check for NULL, test for command_slot_count above
1808 * ensures a slot is free */
1809 slot = find_empty_slot(hostdata);
1813 SCp->scsi_done = done;
1814 SCp->host_scribble = (unsigned char *)slot;
1815 SCp->SCp.ptr = NULL;
1816 SCp->SCp.buffer = NULL;
1818 #ifdef NCR_700_DEBUG
1819 printk("53c700: scsi%d, command ", SCp->device->host->host_no);
1820 scsi_print_command(SCp);
1822 if(blk_rq_tagged(SCp->request)
1823 && (hostdata->tag_negotiated &(1<<SCp->device->id)) == 0
1824 && NCR_700_get_tag_neg_state(SCp->device) == NCR_700_START_TAG_NEGOTIATION) {
1825 printk(KERN_ERR "scsi%d: (%d:%d) Enabling Tag Command Queuing\n", SCp->device->host->host_no, SCp->device->id, SCp->device->lun);
1826 hostdata->tag_negotiated |= (1<<SCp->device->id);
1827 NCR_700_set_tag_neg_state(SCp->device, NCR_700_DURING_TAG_NEGOTIATION);
1830 /* here we may have to process an untagged command. The gate
1831 * above ensures that this will be the only one outstanding,
1832 * so clear the tag negotiated bit.
1834 * FIXME: This will royally screw up on multiple LUN devices
1836 if(!blk_rq_tagged(SCp->request)
1837 && (hostdata->tag_negotiated &(1<<SCp->device->id))) {
1838 printk(KERN_INFO "scsi%d: (%d:%d) Disabling Tag Command Queuing\n", SCp->device->host->host_no, SCp->device->id, SCp->device->lun);
1839 hostdata->tag_negotiated &= ~(1<<SCp->device->id);
1842 if((hostdata->tag_negotiated &(1<<SCp->device->id))
1843 && scsi_get_tag_type(SCp->device)) {
1844 slot->tag = SCp->request->tag;
1845 DEBUG(("53c700 %d:%d:%d, sending out tag %d, slot %p\n",
1846 SCp->device->host->host_no, SCp->device->id, SCp->device->lun, slot->tag,
1849 slot->tag = SCSI_NO_TAG;
1850 /* must populate current_cmnd for scsi_find_tag to work */
1851 SCp->device->current_cmnd = SCp;
1853 /* sanity check: some of the commands generated by the mid-layer
1854 * have an eccentric idea of their sc_data_direction */
1855 if(!SCp->use_sg && !SCp->request_bufflen
1856 && SCp->sc_data_direction != DMA_NONE) {
1857 #ifdef NCR_700_DEBUG
1858 printk("53c700: Command");
1859 scsi_print_command(SCp);
1860 printk("Has wrong data direction %d\n", SCp->sc_data_direction);
1862 SCp->sc_data_direction = DMA_NONE;
1865 switch (SCp->cmnd[0]) {
1867 /* clear the internal sense magic */
1871 /* OK, get it from the command */
1872 switch(SCp->sc_data_direction) {
1873 case DMA_BIDIRECTIONAL:
1875 printk(KERN_ERR "53c700: Unknown command for data direction ");
1876 scsi_print_command(SCp);
1883 case DMA_FROM_DEVICE:
1884 move_ins = SCRIPT_MOVE_DATA_IN;
1887 move_ins = SCRIPT_MOVE_DATA_OUT;
1892 /* now build the scatter gather list */
1893 direction = SCp->sc_data_direction;
1897 dma_addr_t vPtr = 0;
1901 sg_count = dma_map_sg(hostdata->dev, SCp->buffer,
1902 SCp->use_sg, direction);
1904 vPtr = dma_map_single(hostdata->dev,
1905 SCp->request_buffer,
1906 SCp->request_bufflen,
1908 count = SCp->request_bufflen;
1909 slot->dma_handle = vPtr;
1914 for(i = 0; i < sg_count; i++) {
1917 struct scatterlist *sg = SCp->buffer;
1919 vPtr = sg_dma_address(&sg[i]);
1920 count = sg_dma_len(&sg[i]);
1923 slot->SG[i].ins = bS_to_host(move_ins | count);
1924 DEBUG((" scatter block %d: move %d[%08x] from 0x%lx\n",
1925 i, count, slot->SG[i].ins, (unsigned long)vPtr));
1926 slot->SG[i].pAddr = bS_to_host(vPtr);
1928 slot->SG[i].ins = bS_to_host(SCRIPT_RETURN);
1929 slot->SG[i].pAddr = 0;
1930 dma_cache_sync(slot->SG, sizeof(slot->SG), DMA_TO_DEVICE);
1931 DEBUG((" SETTING %08lx to %x\n",
1932 (&slot->pSG[i].ins),
1935 slot->resume_offset = 0;
1936 slot->pCmd = dma_map_single(hostdata->dev, SCp->cmnd,
1937 sizeof(SCp->cmnd), DMA_TO_DEVICE);
1938 NCR_700_start_command(SCp);
1943 NCR_700_abort(struct scsi_cmnd * SCp)
1945 struct NCR_700_command_slot *slot;
1947 printk(KERN_INFO "scsi%d (%d:%d) New error handler wants to abort command\n\t",
1948 SCp->device->host->host_no, SCp->device->id, SCp->device->lun);
1949 scsi_print_command(SCp);
1951 slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1954 /* no outstanding command to abort */
1956 if(SCp->cmnd[0] == TEST_UNIT_READY) {
1957 /* FIXME: This is because of a problem in the new
1958 * error handler. When it is in error recovery, it
1959 * will send a TUR to a device it thinks may still be
1960 * showing a problem. If the TUR isn't responded to,
1961 * it will abort it and mark the device off line.
1962 * Unfortunately, it does no other error recovery, so
1963 * this would leave us with an outstanding command
1964 * occupying a slot. Rather than allow this to
1965 * happen, we issue a bus reset to force all
1966 * outstanding commands to terminate here. */
1967 NCR_700_internal_bus_reset(SCp->device->host);
1968 /* still drop through and return failed */
1975 NCR_700_bus_reset(struct scsi_cmnd * SCp)
1977 DECLARE_COMPLETION(complete);
1978 struct NCR_700_Host_Parameters *hostdata =
1979 (struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
1981 printk(KERN_INFO "scsi%d (%d:%d) New error handler wants BUS reset, cmd %p\n\t",
1982 SCp->device->host->host_no, SCp->device->id, SCp->device->lun, SCp);
1983 scsi_print_command(SCp);
1984 /* In theory, eh_complete should always be null because the
1985 * eh is single threaded, but just in case we're handling a
1986 * reset via sg or something */
1987 while(hostdata->eh_complete != NULL) {
1988 spin_unlock_irq(SCp->device->host->host_lock);
1989 msleep_interruptible(100);
1990 spin_lock_irq(SCp->device->host->host_lock);
1992 hostdata->eh_complete = &complete;
1993 NCR_700_internal_bus_reset(SCp->device->host);
1994 spin_unlock_irq(SCp->device->host->host_lock);
1995 wait_for_completion(&complete);
1996 spin_lock_irq(SCp->device->host->host_lock);
1997 hostdata->eh_complete = NULL;
1998 /* Revalidate the transport parameters of the failing device */
2000 spi_schedule_dv_device(SCp->device);
2005 NCR_700_dev_reset(struct scsi_cmnd * SCp)
2007 printk(KERN_INFO "scsi%d (%d:%d) New error handler wants device reset\n\t",
2008 SCp->device->host->host_no, SCp->device->id, SCp->device->lun);
2009 scsi_print_command(SCp);
2015 NCR_700_host_reset(struct scsi_cmnd * SCp)
2017 printk(KERN_INFO "scsi%d (%d:%d) New error handler wants HOST reset\n\t",
2018 SCp->device->host->host_no, SCp->device->id, SCp->device->lun);
2019 scsi_print_command(SCp);
2021 NCR_700_internal_bus_reset(SCp->device->host);
2022 NCR_700_chip_reset(SCp->device->host);
2027 NCR_700_set_period(struct scsi_target *STp, int period)
2029 struct Scsi_Host *SHp = dev_to_shost(STp->dev.parent);
2030 struct NCR_700_Host_Parameters *hostdata =
2031 (struct NCR_700_Host_Parameters *)SHp->hostdata[0];
2036 if(period < hostdata->min_period)
2037 period = hostdata->min_period;
2039 spi_period(STp) = period;
2040 spi_flags(STp) &= ~(NCR_700_DEV_NEGOTIATED_SYNC |
2041 NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
2042 spi_flags(STp) |= NCR_700_DEV_PRINT_SYNC_NEGOTIATION;
2046 NCR_700_set_offset(struct scsi_target *STp, int offset)
2048 struct Scsi_Host *SHp = dev_to_shost(STp->dev.parent);
2049 struct NCR_700_Host_Parameters *hostdata =
2050 (struct NCR_700_Host_Parameters *)SHp->hostdata[0];
2051 int max_offset = hostdata->chip710
2052 ? NCR_710_MAX_OFFSET : NCR_700_MAX_OFFSET;
2057 if(offset > max_offset)
2058 offset = max_offset;
2060 /* if we're currently async, make sure the period is reasonable */
2061 if(spi_offset(STp) == 0 && (spi_period(STp) < hostdata->min_period ||
2062 spi_period(STp) > 0xff))
2063 spi_period(STp) = hostdata->min_period;
2065 spi_offset(STp) = offset;
2066 spi_flags(STp) &= ~(NCR_700_DEV_NEGOTIATED_SYNC |
2067 NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
2068 spi_flags(STp) |= NCR_700_DEV_PRINT_SYNC_NEGOTIATION;
2074 NCR_700_slave_configure(struct scsi_device *SDp)
2076 struct NCR_700_Host_Parameters *hostdata =
2077 (struct NCR_700_Host_Parameters *)SDp->host->hostdata[0];
2079 /* to do here: allocate memory; build a queue_full list */
2080 if(SDp->tagged_supported) {
2081 scsi_set_tag_type(SDp, MSG_ORDERED_TAG);
2082 scsi_activate_tcq(SDp, NCR_700_DEFAULT_TAGS);
2083 NCR_700_set_tag_neg_state(SDp, NCR_700_START_TAG_NEGOTIATION);
2085 /* initialise to default depth */
2086 scsi_adjust_queue_depth(SDp, 0, SDp->host->cmd_per_lun);
2088 if(hostdata->fast) {
2089 /* Find the correct offset and period via domain validation */
2090 if (!spi_initial_dv(SDp->sdev_target))
2093 spi_offset(SDp->sdev_target) = 0;
2094 spi_period(SDp->sdev_target) = 0;
2100 NCR_700_slave_destroy(struct scsi_device *SDp)
2102 /* to do here: deallocate memory */
2106 NCR_700_change_queue_depth(struct scsi_device *SDp, int depth)
2108 if (depth > NCR_700_MAX_TAGS)
2109 depth = NCR_700_MAX_TAGS;
2111 scsi_adjust_queue_depth(SDp, scsi_get_tag_type(SDp), depth);
2115 static int NCR_700_change_queue_type(struct scsi_device *SDp, int tag_type)
2117 int change_tag = ((tag_type ==0 && scsi_get_tag_type(SDp) != 0)
2118 || (tag_type != 0 && scsi_get_tag_type(SDp) == 0));
2119 struct NCR_700_Host_Parameters *hostdata =
2120 (struct NCR_700_Host_Parameters *)SDp->host->hostdata[0];
2122 scsi_set_tag_type(SDp, tag_type);
2124 /* We have a global (per target) flag to track whether TCQ is
2125 * enabled, so we'll be turning it off for the entire target here.
2126 * our tag algorithm will fail if we mix tagged and untagged commands,
2127 * so quiesce the device before doing this */
2129 scsi_target_quiesce(SDp->sdev_target);
2132 /* shift back to the default unqueued number of commands
2133 * (the user can still raise this) */
2134 scsi_deactivate_tcq(SDp, SDp->host->cmd_per_lun);
2135 hostdata->tag_negotiated &= ~(1 << SDp->id);
2137 /* Here, we cleared the negotiation flag above, so this
2138 * will force the driver to renegotiate */
2139 scsi_activate_tcq(SDp, SDp->queue_depth);
2141 NCR_700_set_tag_neg_state(SDp, NCR_700_START_TAG_NEGOTIATION);
2144 scsi_target_resume(SDp->sdev_target);
2150 NCR_700_show_active_tags(struct device *dev, char *buf)
2152 struct scsi_device *SDp = to_scsi_device(dev);
2154 return snprintf(buf, 20, "%d\n", NCR_700_get_depth(SDp));
2157 static struct device_attribute NCR_700_active_tags_attr = {
2159 .name = "active_tags",
2162 .show = NCR_700_show_active_tags,
2165 STATIC struct device_attribute *NCR_700_dev_attrs[] = {
2166 &NCR_700_active_tags_attr,
2170 EXPORT_SYMBOL(NCR_700_detect);
2171 EXPORT_SYMBOL(NCR_700_release);
2172 EXPORT_SYMBOL(NCR_700_intr);
2174 static struct spi_function_template NCR_700_transport_functions = {
2175 .set_period = NCR_700_set_period,
2177 .set_offset = NCR_700_set_offset,
2181 static int __init NCR_700_init(void)
2183 NCR_700_transport_template = spi_attach_transport(&NCR_700_transport_functions);
2184 if(!NCR_700_transport_template)
2189 static void __exit NCR_700_exit(void)
2191 spi_release_transport(NCR_700_transport_template);
2194 module_init(NCR_700_init);
2195 module_exit(NCR_700_exit);