2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License as published by the
4 * Free Software Foundation; either version 2, or (at your option) any
7 * This program is distributed in the hope that it will be useful, but
8 * WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10 * General Public License for more details.
12 * For the avoidance of doubt the "preferred form" of this code is one which
13 * is in an open non patent encumbered format. Where cryptographic key signing
14 * forms part of the process of creating an executable the information
15 * including keys needed to generate an equivalently functional executable
16 * are deemed to be part of the source code.
18 * Complications for I2O scsi
20 * o Each (bus,lun) is a logical device in I2O. We keep a map
21 * table. We spoof failed selection for unmapped units
22 * o Request sense buffers can come back for free.
23 * o Scatter gather is a bit dynamic. We have to investigate at
25 * o Some of our resources are dynamically shared. The i2o core
26 * needs a message reservation protocol to avoid swap v net
27 * deadlocking. We need to back off queue requests.
29 * In general the firmware wants to help. Where its help isn't performance
30 * useful we just ignore the aid. Its not worth the code in truth.
34 * Scatter gather now works
35 * Markus Lidel <Markus.Lidel@shadowconnect.com>:
36 * Minor fixes for 2.6.
40 * Fix the resource management problems.
44 #include <linux/module.h>
45 #include <linux/kernel.h>
46 #include <linux/types.h>
47 #include <linux/string.h>
48 #include <linux/ioport.h>
49 #include <linux/jiffies.h>
50 #include <linux/interrupt.h>
51 #include <linux/timer.h>
52 #include <linux/delay.h>
53 #include <linux/proc_fs.h>
54 #include <linux/prefetch.h>
55 #include <linux/pci.h>
57 #include <asm/system.h>
59 #include <asm/atomic.h>
60 #include <linux/blkdev.h>
61 #include <linux/i2o.h>
63 #include <scsi/scsi.h>
64 #include <scsi/scsi_cmnd.h>
65 #include <scsi/scsi_device.h>
66 #include <scsi/scsi_host.h>
69 #define VERSION_STRING "Version 0.1.2"
74 #define dprintk(s, args...) printk(s, ## args)
76 #define dprintk(s, args...)
79 #define I2O_SCSI_CAN_QUEUE 4
84 struct i2o_controller *controller;
85 s16 task[16][8]; /* Allow 16 devices for now */
86 unsigned long tagclock[16][8]; /* Tag clock for queueing */
87 s16 bus_task; /* The adapter TID */
90 static int scsi_context;
92 static int i2o_scsi_hosts;
94 static u32 *retry[32];
95 static struct i2o_controller *retry_ctrl[32];
96 static struct timer_list retry_timer;
97 static spinlock_t retry_lock = SPIN_LOCK_UNLOCKED;
98 static int retry_ct = 0;
100 static atomic_t queue_depth;
103 * SG Chain buffer support...
106 #define SG_MAX_FRAGS 64
109 * FIXME: we should allocate one of these per bus we find as we
110 * locate them not in a lump at boot.
113 typedef struct _chain_buf
115 u32 sg_flags_cnt[SG_MAX_FRAGS];
116 u32 sg_buf[SG_MAX_FRAGS];
119 #define SG_CHAIN_BUF_SZ sizeof(chain_buf)
121 #define SG_MAX_BUFS (i2o_num_controllers * I2O_SCSI_CAN_QUEUE)
122 #define SG_CHAIN_POOL_SZ (SG_MAX_BUFS * SG_CHAIN_BUF_SZ)
124 static int max_sg_len = 0;
125 static chain_buf *sg_chain_pool = NULL;
126 static int sg_chain_tag = 0;
127 static int sg_max_frags = SG_MAX_FRAGS;
130 * i2o_retry_run - retry on timeout
133 * Retry congested frames. This actually needs pushing down into
134 * i2o core. We should only bother the OSM with this when we can't
135 * queue and retry the frame. Or perhaps we should call the OSM
136 * and its default handler should be this in the core, and this
137 * call a 2nd "I give up" handler in the OSM ?
140 static void i2o_retry_run(unsigned long f)
145 spin_lock_irqsave(&retry_lock, flags);
146 for(i=0;i<retry_ct;i++)
147 i2o_post_message(retry_ctrl[i], virt_to_bus(retry[i]));
149 spin_unlock_irqrestore(&retry_lock, flags);
153 * flush_pending - empty the retry queue
155 * Turn each of the pending commands into a NOP and post it back
156 * to the controller to clear it.
159 static void flush_pending(void)
164 spin_lock_irqsave(&retry_lock, flags);
165 for(i=0;i<retry_ct;i++)
167 retry[i][0]&=~0xFFFFFF;
168 retry[i][0]|=I2O_CMD_UTIL_NOP<<24;
169 i2o_post_message(retry_ctrl[i],virt_to_bus(retry[i]));
172 spin_unlock_irqrestore(&retry_lock, flags);
176 * i2o_scsi_reply - scsi message reply processor
177 * @h: our i2o handler
178 * @c: controller issuing the reply
179 * @msg: the message from the controller (mapped)
181 * Process reply messages (interrupts in normal scsi controller think).
182 * We can get a variety of messages to process. The normal path is
183 * scsi command completions. We must also deal with IOP failures,
184 * the reply to a bus reset and the reply to a LUN query.
186 * Locks: the queue lock is taken to call the completion handler
189 static void i2o_scsi_reply(struct i2o_handler *h, struct i2o_controller *c, struct i2o_message *msg)
191 struct scsi_cmnd *current_command;
199 printk("IOP fail.\n");
200 printk("From %d To %d Cmd %d.\n",
204 printk("Failure Code %d.\n", m[4]>>24);
206 printk("Format error.\n");
208 printk("Path error.\n");
210 printk("Path State.\n");
212 printk("Congestion.\n");
214 m=(u32 *)bus_to_virt(m[7]);
215 printk("Failing message is %p.\n", m);
217 /* This isnt a fast path .. */
218 spin_lock_irqsave(&retry_lock, flags);
220 if((m[4]&(1<<18)) && retry_ct < 32)
222 retry_ctrl[retry_ct]=c;
226 retry_timer.expires=jiffies+1;
227 add_timer(&retry_timer);
229 spin_unlock_irqrestore(&retry_lock, flags);
233 spin_unlock_irqrestore(&retry_lock, flags);
234 /* Create a scsi error for this */
235 current_command = (struct scsi_cmnd *)i2o_context_list_get(m[3], c);
239 lock = current_command->device->host->host_lock;
240 printk("Aborted %ld\n", current_command->serial_number);
242 spin_lock_irqsave(lock, flags);
243 current_command->result = DID_ERROR << 16;
244 current_command->scsi_done(current_command);
245 spin_unlock_irqrestore(lock, flags);
247 /* Now flush the message by making it a NOP */
249 m[0]|=(I2O_CMD_UTIL_NOP)<<24;
250 i2o_post_message(c,virt_to_bus(m));
255 prefetchw(&queue_depth);
259 * Low byte is device status, next is adapter status,
260 * (then one byte reserved), then request status.
262 ds=(u8)le32_to_cpu(m[4]);
263 as=(u8)le32_to_cpu(m[4]>>8);
264 st=(u8)le32_to_cpu(m[4]>>24);
266 dprintk(KERN_INFO "i2o got a scsi reply %08X: ", m[0]);
267 dprintk(KERN_INFO "m[2]=%08X: ", m[2]);
268 dprintk(KERN_INFO "m[4]=%08X\n", m[4]);
274 dprintk(KERN_INFO "Event.\n");
278 printk(KERN_INFO "i2o_scsi: bus reset completed.\n");
282 current_command = (struct scsi_cmnd *)i2o_context_list_get(m[3], c);
285 * Is this a control request coming back - eg an abort ?
288 atomic_dec(&queue_depth);
290 if(current_command==NULL)
293 dprintk(KERN_WARNING "SCSI abort: %08X", m[4]);
294 dprintk(KERN_INFO "SCSI abort completed.\n");
298 dprintk(KERN_INFO "Completed %ld\n", current_command->serial_number);
302 if(le32_to_cpu(m[5]) < current_command->underflow)
305 printk(KERN_ERR "SCSI: underflow 0x%08X 0x%08X\n",
306 le32_to_cpu(m[5]), current_command->underflow);
309 printk("%02X ", current_command->cmnd[i]);
317 /* An error has occurred */
319 dprintk(KERN_WARNING "SCSI error %08X", m[4]);
323 current_command->result = DID_RESET << 16;
325 current_command->result = DID_PARITY << 16;
327 current_command->result = DID_ERROR << 16;
333 current_command->result = DID_OK << 16 | ds;
335 if (current_command->use_sg) {
336 pci_unmap_sg(c->pdev,
337 (struct scatterlist *)current_command->buffer,
338 current_command->use_sg,
339 current_command->sc_data_direction);
340 } else if (current_command->request_bufflen) {
341 pci_unmap_single(c->pdev,
342 (dma_addr_t)((long)current_command->SCp.ptr),
343 current_command->request_bufflen,
344 current_command->sc_data_direction);
347 lock = current_command->device->host->host_lock;
348 spin_lock_irqsave(lock, flags);
349 current_command->scsi_done(current_command);
350 spin_unlock_irqrestore(lock, flags);
354 struct i2o_handler i2o_scsi_handler = {
355 .reply = i2o_scsi_reply,
356 .name = "I2O SCSI OSM",
357 .class = I2O_CLASS_SCSI_PERIPHERAL,
361 * i2o_find_lun - report the lun of an i2o device
362 * @c: i2o controller owning the device
363 * @d: i2o disk device
364 * @target: filled in with target id
365 * @lun: filled in with target lun
367 * Query an I2O device to find out its SCSI lun and target numbering. We
368 * don't currently handle some of the fancy SCSI-3 stuff although our
369 * querying is sufficient to do so.
372 static int i2o_find_lun(struct i2o_controller *c, struct i2o_device *d, int *target, int *lun)
376 if(i2o_query_scalar(c, d->lct_data.tid, 0, 3, reply, 4)<0)
381 if(i2o_query_scalar(c, d->lct_data.tid, 0, 4, reply, 8)<0)
386 dprintk(KERN_INFO "SCSI (%d,%d)\n", *target, *lun);
391 * i2o_scsi_init - initialize an i2o device for scsi
392 * @c: i2o controller owning the device
393 * @d: scsi controller
394 * @shpnt: scsi device we wish it to become
396 * Enumerate the scsi peripheral/fibre channel peripheral class
397 * devices that are children of the controller. From that we build
398 * a translation map for the command queue code. Since I2O works on
399 * its own tid's we effectively have to think backwards to get what
403 static void i2o_scsi_init(struct i2o_controller *c, struct i2o_device *d, struct Scsi_Host *shpnt)
405 struct i2o_device *unit;
406 struct i2o_scsi_host *h =(struct i2o_scsi_host *)shpnt->hostdata;
411 h->bus_task=d->lct_data.tid;
413 for(target=0;target<16;target++)
414 for(lun=0;lun<8;lun++)
415 h->task[target][lun] = -1;
417 for(unit=c->devices;unit!=NULL;unit=unit->next)
419 dprintk(KERN_INFO "Class %03X, parent %d, want %d.\n",
420 unit->lct_data.class_id, unit->lct_data.parent_tid, d->lct_data.tid);
422 /* Only look at scsi and fc devices */
423 if ( (unit->lct_data.class_id != I2O_CLASS_SCSI_PERIPHERAL)
424 && (unit->lct_data.class_id != I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL)
429 dprintk(KERN_INFO "Found a disk (%d).\n", unit->lct_data.tid);
430 if ((unit->lct_data.parent_tid == d->lct_data.tid)
431 || (unit->lct_data.parent_tid == d->lct_data.parent_tid)
435 dprintk(KERN_INFO "Its ours.\n");
436 if(i2o_find_lun(c, unit, &target, &lun)==-1)
438 printk(KERN_ERR "i2o_scsi: Unable to get lun for tid %d.\n", unit->lct_data.tid);
441 dprintk(KERN_INFO "Found disk %d %d.\n", target, lun);
442 h->task[target][lun]=unit->lct_data.tid;
443 h->tagclock[target][lun]=jiffies;
445 /* Get the max fragments/request */
446 i2o_query_scalar(c, d->lct_data.tid, 0xF103, 3, &limit, 2);
451 printk(KERN_WARNING "i2o_scsi: Ignoring unreasonable SG limit of 0 from IOP!\n");
455 shpnt->sg_tablesize = limit;
457 dprintk(KERN_INFO "i2o_scsi: set scatter-gather to %d.\n",
458 shpnt->sg_tablesize);
464 * i2o_scsi_detect - probe for I2O scsi devices
465 * @tpnt: scsi layer template
467 * I2O is a little odd here. The I2O core already knows what the
468 * devices are. It also knows them by disk and tape as well as
469 * by controller. We register each I2O scsi class object as a
470 * scsi controller and then let the enumeration fake up the rest
473 static int i2o_scsi_detect(struct scsi_host_template * tpnt)
475 struct Scsi_Host *shpnt = NULL;
479 printk(KERN_INFO "i2o_scsi.c: %s\n", VERSION_STRING);
481 if(i2o_install_handler(&i2o_scsi_handler)<0)
483 printk(KERN_ERR "i2o_scsi: Unable to install OSM handler.\n");
486 scsi_context = i2o_scsi_handler.context;
488 if((sg_chain_pool = kmalloc(SG_CHAIN_POOL_SZ, GFP_KERNEL)) == NULL)
490 printk(KERN_INFO "i2o_scsi: Unable to alloc %d byte SG chain buffer pool.\n", SG_CHAIN_POOL_SZ);
491 printk(KERN_INFO "i2o_scsi: SG chaining DISABLED!\n");
496 printk(KERN_INFO " chain_pool: %d bytes @ %p\n", SG_CHAIN_POOL_SZ, sg_chain_pool);
497 printk(KERN_INFO " (%d byte buffers X %d can_queue X %d i2o controllers)\n",
498 SG_CHAIN_BUF_SZ, I2O_SCSI_CAN_QUEUE, i2o_num_controllers);
499 sg_max_frags = SG_MAX_FRAGS; // 64
502 init_timer(&retry_timer);
503 retry_timer.data = 0UL;
504 retry_timer.function = i2o_retry_run;
506 // printk("SCSI OSM at %d.\n", scsi_context);
508 for (count = 0, i = 0; i < MAX_I2O_CONTROLLERS; i++)
510 struct i2o_controller *c=i2o_find_controller(i);
511 struct i2o_device *d;
513 * This controller doesn't exist.
520 * Fixme - we need some altered device locking. This
521 * is racing with device addition in theory. Easy to fix.
524 for(d=c->devices;d!=NULL;d=d->next)
527 * bus_adapter, SCSI (obsolete), or FibreChannel busses only
529 if( (d->lct_data.class_id!=I2O_CLASS_BUS_ADAPTER_PORT) // bus_adapter
530 // && (d->lct_data.class_id!=I2O_CLASS_FIBRE_CHANNEL_PORT) // FC_PORT
534 shpnt = scsi_register(tpnt, sizeof(struct i2o_scsi_host));
537 shpnt->unique_id = (u32)d;
539 shpnt->n_io_port = 0;
541 shpnt->this_id = /* Good question */15;
542 i2o_scsi_init(c, d, shpnt);
546 i2o_scsi_hosts = count;
550 if(sg_chain_pool!=NULL)
552 kfree(sg_chain_pool);
553 sg_chain_pool = NULL;
556 del_timer(&retry_timer);
557 i2o_remove_handler(&i2o_scsi_handler);
563 static int i2o_scsi_release(struct Scsi_Host *host)
565 if(--i2o_scsi_hosts==0)
567 if(sg_chain_pool!=NULL)
569 kfree(sg_chain_pool);
570 sg_chain_pool = NULL;
573 del_timer(&retry_timer);
574 i2o_remove_handler(&i2o_scsi_handler);
577 scsi_unregister(host);
583 static const char *i2o_scsi_info(struct Scsi_Host *SChost)
585 struct i2o_scsi_host *hostdata;
586 hostdata = (struct i2o_scsi_host *)SChost->hostdata;
587 return(&hostdata->controller->name[0]);
591 * i2o_scsi_queuecommand - queue a SCSI command
592 * @SCpnt: scsi command pointer
593 * @done: callback for completion
595 * Issue a scsi comamnd asynchronously. Return 0 on success or 1 if
596 * we hit an error (normally message queue congestion). The only
597 * minor complication here is that I2O deals with the device addressing
598 * so we have to map the bus/dev/lun back to an I2O handle as well
599 * as faking absent devices ourself.
601 * Locks: takes the controller lock on error path only
604 static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt,
605 void (*done) (struct scsi_cmnd *))
609 struct i2o_controller *c;
610 struct scsi_cmnd *current_command;
611 struct Scsi_Host *host;
612 struct i2o_scsi_host *hostdata;
623 static int max_qd = 1;
626 * Do the incoming paperwork
629 host = SCpnt->device->host;
630 hostdata = (struct i2o_scsi_host *)host->hostdata;
632 c = hostdata->controller;
634 prefetchw(&queue_depth);
636 SCpnt->scsi_done = done;
638 if(SCpnt->device->id > 15)
640 printk(KERN_ERR "i2o_scsi: Wild target %d.\n", SCpnt->device->id);
644 tid = hostdata->task[SCpnt->device->id][SCpnt->device->lun];
646 dprintk(KERN_INFO "qcmd: Tid = %d\n", tid);
648 current_command = SCpnt; /* set current command */
649 current_command->scsi_done = done; /* set ptr to done function */
651 /* We don't have such a device. Pretend we did the command
652 and that selection timed out */
656 SCpnt->result = DID_NO_CONNECT << 16;
661 dprintk(KERN_INFO "Real scsi messages.\n");
664 * Obtain an I2O message. If there are none free then
665 * throw it back to the scsi layer
668 m = le32_to_cpu(I2O_POST_READ32(c));
672 msg = (u32 *)(c->msg_virt + m);
675 * Put together a scsi execscb message
678 len = SCpnt->request_bufflen;
679 direction = 0x00000000; // SGL IN (osm<--iop)
681 if (SCpnt->sc_data_direction == DMA_NONE) {
682 scsidir = 0x00000000; // DATA NO XFER
683 } else if (SCpnt->sc_data_direction == DMA_TO_DEVICE) {
684 direction = 0x04000000; // SGL OUT (osm-->iop)
685 scsidir = 0x80000000; // DATA OUT (iop-->dev)
686 } else if(SCpnt->sc_data_direction == DMA_FROM_DEVICE) {
687 scsidir = 0x40000000; // DATA IN (iop<--dev)
689 /* Unknown - kill the command */
690 SCpnt->result = DID_NO_CONNECT << 16;
692 /* We must lock the request queue while completing */
693 spin_lock_irqsave(host->host_lock, flags);
695 spin_unlock_irqrestore(host->host_lock, flags);
700 i2o_raw_writel(I2O_CMD_SCSI_EXEC<<24|HOST_TID<<12|tid, &msg[1]);
701 i2o_raw_writel(scsi_context, &msg[2]); /* So the I2O layer passes to us */
702 i2o_raw_writel(i2o_context_list_add(SCpnt, c), &msg[3]); /* We want the SCSI control block back */
706 * Intermittant observations of msg frame word data corruption
707 * observed on msg[4] after:
708 * WRITE, READ-MODIFY-WRITE
709 * operations. 19990606 -sralston
711 * (Hence we build this word via tag. Its good practice anyway
712 * we don't want fetches over PCI needlessly)
718 * Attach tags to the devices
720 if(SCpnt->device->tagged_supported)
723 * Some drives are too stupid to handle fairness issues
724 * with tagged queueing. We throw in the odd ordered
725 * tag to stop them starving themselves.
727 if((jiffies - hostdata->tagclock[SCpnt->device->id][SCpnt->device->lun]) > (5*HZ))
729 tag=0x01800000; /* ORDERED! */
730 hostdata->tagclock[SCpnt->device->id][SCpnt->device->lun]=jiffies;
734 /* Hmmm... I always see value of 0 here,
735 * of which {HEAD_OF, ORDERED, SIMPLE} are NOT! -sralston
737 if(SCpnt->tag == HEAD_OF_QUEUE_TAG)
739 else if(SCpnt->tag == ORDERED_QUEUE_TAG)
744 /* Direction, disconnect ok, tag, CDBLen */
745 i2o_raw_writel(scsidir|0x20000000|SCpnt->cmd_len|tag, &msg[4]);
750 * Write SCSI command into the message - always 16 byte block
753 memcpy_toio(mptr, SCpnt->cmnd, 16);
755 lenptr=mptr++; /* Remember me - fill in when we know */
757 reqlen = 12; // SINGLE SGE
760 * Now fill in the SGList and command
762 * FIXME: we need to set the sglist limits according to the
763 * message size of the I2O controller. We might only have room
764 * for 6 or so worst case
769 struct scatterlist *sg = (struct scatterlist *)SCpnt->request_buffer;
775 sg_count = pci_map_sg(c->pdev, sg, SCpnt->use_sg,
776 SCpnt->sc_data_direction);
778 /* FIXME: handle fail */
782 if((sg_max_frags > 11) && (SCpnt->use_sg > 11))
788 i2o_raw_writel(direction|0xB0000000|(SCpnt->use_sg*2*4), mptr++);
789 i2o_raw_writel(virt_to_bus(sg_chain_pool + sg_chain_tag), mptr);
790 mptr = (u32*)(sg_chain_pool + sg_chain_tag);
791 if (SCpnt->use_sg > max_sg_len)
793 max_sg_len = SCpnt->use_sg;
794 printk("i2o_scsi: Chain SG! SCpnt=%p, SG_FragCnt=%d, SG_idx=%d\n",
795 SCpnt, SCpnt->use_sg, sg_chain_tag);
797 if ( ++sg_chain_tag == SG_MAX_BUFS )
799 for(i = 0 ; i < SCpnt->use_sg; i++)
801 *mptr++=cpu_to_le32(direction|0x10000000|sg_dma_len(sg));
803 *mptr++=cpu_to_le32(sg_dma_address(sg));
806 mptr[-2]=cpu_to_le32(direction|0xD0000000|sg_dma_len(sg-1));
810 for(i = 0 ; i < SCpnt->use_sg; i++)
812 i2o_raw_writel(direction|0x10000000|sg_dma_len(sg), mptr++);
814 i2o_raw_writel(sg_dma_address(sg), mptr++);
818 /* Make this an end of list. Again evade the 920 bug and
819 unwanted PCI read traffic */
821 i2o_raw_writel(direction|0xD0000000|sg_dma_len(sg-1), &mptr[-2]);
827 i2o_raw_writel(len, lenptr);
829 if(len != SCpnt->underflow)
830 printk("Cmd len %08X Cmd underflow %08X\n",
831 len, SCpnt->underflow);
835 dprintk(KERN_INFO "non sg for %p, %d\n", SCpnt->request_buffer,
836 SCpnt->request_bufflen);
837 i2o_raw_writel(len = SCpnt->request_bufflen, lenptr);
845 dma_addr = pci_map_single(c->pdev,
846 SCpnt->request_buffer,
847 SCpnt->request_bufflen,
848 SCpnt->sc_data_direction);
850 BUG(); /* How to handle ?? */
851 SCpnt->SCp.ptr = (char *)(unsigned long) dma_addr;
852 i2o_raw_writel(0xD0000000|direction|SCpnt->request_bufflen, mptr++);
853 i2o_raw_writel(dma_addr, mptr++);
858 * Stick the headers on
861 i2o_raw_writel(reqlen<<16 | SGL_OFFSET_10, msg);
863 /* Queue the message */
864 i2o_post_message(c,m);
866 atomic_inc(&queue_depth);
868 if(atomic_read(&queue_depth)> max_qd)
870 max_qd=atomic_read(&queue_depth);
871 printk("Queue depth now %d.\n", max_qd);
875 dprintk(KERN_INFO "Issued %ld\n", current_command->serial_number);
881 * i2o_scsi_abort - abort a running command
882 * @SCpnt: command to abort
884 * Ask the I2O controller to abort a command. This is an asynchrnous
885 * process and our callback handler will see the command complete
886 * with an aborted message if it succeeds.
888 * Locks: no locks are held or needed
891 static int i2o_scsi_abort(struct scsi_cmnd * SCpnt)
893 struct i2o_controller *c;
894 struct Scsi_Host *host;
895 struct i2o_scsi_host *hostdata;
900 printk(KERN_WARNING "i2o_scsi: Aborting command block.\n");
902 host = SCpnt->device->host;
903 hostdata = (struct i2o_scsi_host *)host->hostdata;
904 tid = hostdata->task[SCpnt->device->id][SCpnt->device->lun];
907 printk(KERN_ERR "i2o_scsi: Impossible command to abort!\n");
910 c = hostdata->controller;
912 spin_unlock_irq(host->host_lock);
914 msg[0] = FIVE_WORD_MSG_SIZE;
915 msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|tid;
916 msg[2] = scsi_context;
918 msg[4] = i2o_context_list_remove(SCpnt, c);
919 if(i2o_post_wait(c, msg, sizeof(msg), 240))
922 spin_lock_irq(host->host_lock);
927 * i2o_scsi_bus_reset - Issue a SCSI reset
928 * @SCpnt: the command that caused the reset
930 * Perform a SCSI bus reset operation. In I2O this is just a message
931 * we pass. I2O can do clever multi-initiator and shared reset stuff
932 * but we don't support this.
934 * Locks: called with no lock held, requires no locks.
937 static int i2o_scsi_bus_reset(struct scsi_cmnd * SCpnt)
940 struct i2o_controller *c;
941 struct Scsi_Host *host;
942 struct i2o_scsi_host *hostdata;
945 unsigned long timeout;
949 * Find the TID for the bus
953 host = SCpnt->device->host;
955 spin_unlock_irq(host->host_lock);
957 printk(KERN_WARNING "i2o_scsi: Attempting to reset the bus.\n");
959 hostdata = (struct i2o_scsi_host *)host->hostdata;
960 tid = hostdata->bus_task;
961 c = hostdata->controller;
964 * Now send a SCSI reset request. Any remaining commands
965 * will be aborted by the IOP. We need to catch the reply
969 timeout = jiffies+2*HZ;
972 m = le32_to_cpu(I2O_POST_READ32(c));
975 set_current_state(TASK_UNINTERRUPTIBLE);
979 while(time_before(jiffies, timeout));
982 msg = c->msg_virt + m;
983 i2o_raw_writel(FOUR_WORD_MSG_SIZE|SGL_OFFSET_0, msg);
984 i2o_raw_writel(I2O_CMD_SCSI_BUSRESET<<24|HOST_TID<<12|tid, msg+4);
985 i2o_raw_writel(scsi_context|0x80000000, msg+8);
986 /* We use the top bit to split controller and unit transactions */
987 /* Now store unit,tid so we can tie the completion back to a specific device */
988 __raw_writel(c->unit << 16 | tid, msg+12);
991 /* We want the command to complete after we return */
992 spin_lock_irq(host->host_lock);
993 i2o_post_message(c,m);
995 /* Should we wait for the reset to complete ? */
1000 * i2o_scsi_bios_param - Invent disk geometry
1001 * @sdev: scsi device
1002 * @dev: block layer device
1003 * @capacity: size in sectors
1004 * @ip: geometry array
1006 * This is anyones guess quite frankly. We use the same rules everyone
1007 * else appears to and hope. It seems to work.
1010 static int i2o_scsi_bios_param(struct scsi_device * sdev,
1011 struct block_device *dev, sector_t capacity, int *ip)
1016 ip[0] = 64; /* heads */
1017 ip[1] = 32; /* sectors */
1018 if ((ip[2] = size >> 11) > 1024) { /* cylinders, test for big disk */
1019 ip[0] = 255; /* heads */
1020 ip[1] = 63; /* sectors */
1021 ip[2] = size / (255 * 63); /* cylinders */
1026 MODULE_AUTHOR("Red Hat Software");
1027 MODULE_LICENSE("GPL");
1030 static struct scsi_host_template driver_template = {
1031 .proc_name = "i2o_scsi",
1032 .name = "I2O SCSI Layer",
1033 .detect = i2o_scsi_detect,
1034 .release = i2o_scsi_release,
1035 .info = i2o_scsi_info,
1036 .queuecommand = i2o_scsi_queuecommand,
1037 .eh_abort_handler = i2o_scsi_abort,
1038 .eh_bus_reset_handler = i2o_scsi_bus_reset,
1039 .bios_param = i2o_scsi_bios_param,
1040 .can_queue = I2O_SCSI_CAN_QUEUE,
1044 .use_clustering = ENABLE_CLUSTERING,
1047 #include "../../scsi/scsi_module.c"