2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License as published by the
4 * Free Software Foundation; either version 2, or (at your option) any
7 * This program is distributed in the hope that it will be useful, but
8 * WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10 * General Public License for more details.
12 * For the avoidance of doubt the "preferred form" of this code is one which
13 * is in an open non patent encumbered format. Where cryptographic key signing
14 * forms part of the process of creating an executable the information
15 * including keys needed to generate an equivalently functional executable
16 * are deemed to be part of the source code.
18 * Complications for I2O scsi
20 * o Each (bus,lun) is a logical device in I2O. We keep a map
21 * table. We spoof failed selection for unmapped units
22 * o Request sense buffers can come back for free.
23 * o Scatter gather is a bit dynamic. We have to investigate at
25 * o Some of our resources are dynamically shared. The i2o core
26 * needs a message reservation protocol to avoid swap v net
27 * deadlocking. We need to back off queue requests.
29 * In general the firmware wants to help. Where its help isn't performance
30 * useful we just ignore the aid. Its not worth the code in truth.
34 * Scatter gather now works
35 * Markus Lidel <Markus.Lidel@shadowconnect.com>:
36 * Minor fixes for 2.6.
40 * Fix the resource management problems.
44 #include <linux/module.h>
45 #include <linux/kernel.h>
46 #include <linux/types.h>
47 #include <linux/string.h>
48 #include <linux/ioport.h>
49 #include <linux/jiffies.h>
50 #include <linux/interrupt.h>
51 #include <linux/timer.h>
52 #include <linux/delay.h>
53 #include <linux/proc_fs.h>
54 #include <linux/prefetch.h>
55 #include <linux/pci.h>
57 #include <asm/system.h>
59 #include <asm/atomic.h>
60 #include <linux/blkdev.h>
61 #include <linux/i2o.h>
62 #include "../../scsi/scsi.h"
63 #include "../../scsi/hosts.h"
67 #define VERSION_STRING "Version 0.1.2"
72 #define dprintk(s, args...) printk(s, ## args)
74 #define dprintk(s, args...)
77 #define I2O_SCSI_CAN_QUEUE 4
82 struct i2o_controller *controller;
83 s16 task[16][8]; /* Allow 16 devices for now */
84 unsigned long tagclock[16][8]; /* Tag clock for queueing */
85 s16 bus_task; /* The adapter TID */
88 static int scsi_context;
90 static int i2o_scsi_hosts;
92 static u32 *retry[32];
93 static struct i2o_controller *retry_ctrl[32];
94 static struct timer_list retry_timer;
95 static spinlock_t retry_lock = SPIN_LOCK_UNLOCKED;
96 static int retry_ct = 0;
98 static atomic_t queue_depth;
101 * SG Chain buffer support...
104 #define SG_MAX_FRAGS 64
107 * FIXME: we should allocate one of these per bus we find as we
108 * locate them not in a lump at boot.
111 typedef struct _chain_buf
113 u32 sg_flags_cnt[SG_MAX_FRAGS];
114 u32 sg_buf[SG_MAX_FRAGS];
117 #define SG_CHAIN_BUF_SZ sizeof(chain_buf)
119 #define SG_MAX_BUFS (i2o_num_controllers * I2O_SCSI_CAN_QUEUE)
120 #define SG_CHAIN_POOL_SZ (SG_MAX_BUFS * SG_CHAIN_BUF_SZ)
122 static int max_sg_len = 0;
123 static chain_buf *sg_chain_pool = NULL;
124 static int sg_chain_tag = 0;
125 static int sg_max_frags = SG_MAX_FRAGS;
128 * i2o_retry_run - retry on timeout
131 * Retry congested frames. This actually needs pushing down into
132 * i2o core. We should only bother the OSM with this when we can't
133 * queue and retry the frame. Or perhaps we should call the OSM
134 * and its default handler should be this in the core, and this
135 * call a 2nd "I give up" handler in the OSM ?
138 static void i2o_retry_run(unsigned long f)
143 spin_lock_irqsave(&retry_lock, flags);
144 for(i=0;i<retry_ct;i++)
145 i2o_post_message(retry_ctrl[i], virt_to_bus(retry[i]));
147 spin_unlock_irqrestore(&retry_lock, flags);
151 * flush_pending - empty the retry queue
153 * Turn each of the pending commands into a NOP and post it back
154 * to the controller to clear it.
157 static void flush_pending(void)
162 spin_lock_irqsave(&retry_lock, flags);
163 for(i=0;i<retry_ct;i++)
165 retry[i][0]&=~0xFFFFFF;
166 retry[i][0]|=I2O_CMD_UTIL_NOP<<24;
167 i2o_post_message(retry_ctrl[i],virt_to_bus(retry[i]));
170 spin_unlock_irqrestore(&retry_lock, flags);
174 * i2o_scsi_reply - scsi message reply processor
175 * @h: our i2o handler
176 * @c: controller issuing the reply
177 * @msg: the message from the controller (mapped)
179 * Process reply messages (interrupts in normal scsi controller think).
180 * We can get a variety of messages to process. The normal path is
181 * scsi command completions. We must also deal with IOP failures,
182 * the reply to a bus reset and the reply to a LUN query.
184 * Locks: the queue lock is taken to call the completion handler
187 static void i2o_scsi_reply(struct i2o_handler *h, struct i2o_controller *c, struct i2o_message *msg)
189 Scsi_Cmnd *current_command;
197 printk("IOP fail.\n");
198 printk("From %d To %d Cmd %d.\n",
202 printk("Failure Code %d.\n", m[4]>>24);
204 printk("Format error.\n");
206 printk("Path error.\n");
208 printk("Path State.\n");
210 printk("Congestion.\n");
212 m=(u32 *)bus_to_virt(m[7]);
213 printk("Failing message is %p.\n", m);
215 /* This isnt a fast path .. */
216 spin_lock_irqsave(&retry_lock, flags);
218 if((m[4]&(1<<18)) && retry_ct < 32)
220 retry_ctrl[retry_ct]=c;
224 retry_timer.expires=jiffies+1;
225 add_timer(&retry_timer);
227 spin_unlock_irqrestore(&retry_lock, flags);
231 spin_unlock_irqrestore(&retry_lock, flags);
232 /* Create a scsi error for this */
233 current_command = (Scsi_Cmnd *)i2o_context_list_get(m[3], c);
237 lock = current_command->device->host->host_lock;
238 printk("Aborted %ld\n", current_command->serial_number);
240 spin_lock_irqsave(lock, flags);
241 current_command->result = DID_ERROR << 16;
242 current_command->scsi_done(current_command);
243 spin_unlock_irqrestore(lock, flags);
245 /* Now flush the message by making it a NOP */
247 m[0]|=(I2O_CMD_UTIL_NOP)<<24;
248 i2o_post_message(c,virt_to_bus(m));
253 prefetchw(&queue_depth);
257 * Low byte is device status, next is adapter status,
258 * (then one byte reserved), then request status.
260 ds=(u8)le32_to_cpu(m[4]);
261 as=(u8)le32_to_cpu(m[4]>>8);
262 st=(u8)le32_to_cpu(m[4]>>24);
264 dprintk(KERN_INFO "i2o got a scsi reply %08X: ", m[0]);
265 dprintk(KERN_INFO "m[2]=%08X: ", m[2]);
266 dprintk(KERN_INFO "m[4]=%08X\n", m[4]);
272 dprintk(KERN_INFO "Event.\n");
276 printk(KERN_INFO "i2o_scsi: bus reset completed.\n");
280 current_command = (Scsi_Cmnd *)i2o_context_list_get(m[3], c);
283 * Is this a control request coming back - eg an abort ?
286 atomic_dec(&queue_depth);
288 if(current_command==NULL)
291 dprintk(KERN_WARNING "SCSI abort: %08X", m[4]);
292 dprintk(KERN_INFO "SCSI abort completed.\n");
296 dprintk(KERN_INFO "Completed %ld\n", current_command->serial_number);
300 if(le32_to_cpu(m[5]) < current_command->underflow)
303 printk(KERN_ERR "SCSI: underflow 0x%08X 0x%08X\n",
304 le32_to_cpu(m[5]), current_command->underflow);
307 printk("%02X ", current_command->cmnd[i]);
315 /* An error has occurred */
317 dprintk(KERN_WARNING "SCSI error %08X", m[4]);
321 current_command->result = DID_RESET << 16;
323 current_command->result = DID_PARITY << 16;
325 current_command->result = DID_ERROR << 16;
331 current_command->result = DID_OK << 16 | ds;
333 if (current_command->use_sg)
334 pci_unmap_sg(c->pdev, (struct scatterlist *)current_command->buffer, current_command->use_sg, scsi_to_pci_dma_dir(current_command->sc_data_direction));
335 else if (current_command->request_bufflen)
336 pci_unmap_single(c->pdev, (dma_addr_t)((long)current_command->SCp.ptr), current_command->request_bufflen, scsi_to_pci_dma_dir(current_command->sc_data_direction));
338 lock = current_command->device->host->host_lock;
339 spin_lock_irqsave(lock, flags);
340 current_command->scsi_done(current_command);
341 spin_unlock_irqrestore(lock, flags);
345 struct i2o_handler i2o_scsi_handler = {
346 .reply = i2o_scsi_reply,
347 .name = "I2O SCSI OSM",
348 .class = I2O_CLASS_SCSI_PERIPHERAL,
352 * i2o_find_lun - report the lun of an i2o device
353 * @c: i2o controller owning the device
354 * @d: i2o disk device
355 * @target: filled in with target id
356 * @lun: filled in with target lun
358 * Query an I2O device to find out its SCSI lun and target numbering. We
359 * don't currently handle some of the fancy SCSI-3 stuff although our
360 * querying is sufficient to do so.
363 static int i2o_find_lun(struct i2o_controller *c, struct i2o_device *d, int *target, int *lun)
367 if(i2o_query_scalar(c, d->lct_data.tid, 0, 3, reply, 4)<0)
372 if(i2o_query_scalar(c, d->lct_data.tid, 0, 4, reply, 8)<0)
377 dprintk(KERN_INFO "SCSI (%d,%d)\n", *target, *lun);
382 * i2o_scsi_init - initialize an i2o device for scsi
383 * @c: i2o controller owning the device
384 * @d: scsi controller
385 * @shpnt: scsi device we wish it to become
387 * Enumerate the scsi peripheral/fibre channel peripheral class
388 * devices that are children of the controller. From that we build
389 * a translation map for the command queue code. Since I2O works on
390 * its own tid's we effectively have to think backwards to get what
394 static void i2o_scsi_init(struct i2o_controller *c, struct i2o_device *d, struct Scsi_Host *shpnt)
396 struct i2o_device *unit;
397 struct i2o_scsi_host *h =(struct i2o_scsi_host *)shpnt->hostdata;
402 h->bus_task=d->lct_data.tid;
404 for(target=0;target<16;target++)
405 for(lun=0;lun<8;lun++)
406 h->task[target][lun] = -1;
408 for(unit=c->devices;unit!=NULL;unit=unit->next)
410 dprintk(KERN_INFO "Class %03X, parent %d, want %d.\n",
411 unit->lct_data.class_id, unit->lct_data.parent_tid, d->lct_data.tid);
413 /* Only look at scsi and fc devices */
414 if ( (unit->lct_data.class_id != I2O_CLASS_SCSI_PERIPHERAL)
415 && (unit->lct_data.class_id != I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL)
420 dprintk(KERN_INFO "Found a disk (%d).\n", unit->lct_data.tid);
421 if ((unit->lct_data.parent_tid == d->lct_data.tid)
422 || (unit->lct_data.parent_tid == d->lct_data.parent_tid)
426 dprintk(KERN_INFO "Its ours.\n");
427 if(i2o_find_lun(c, unit, &target, &lun)==-1)
429 printk(KERN_ERR "i2o_scsi: Unable to get lun for tid %d.\n", unit->lct_data.tid);
432 dprintk(KERN_INFO "Found disk %d %d.\n", target, lun);
433 h->task[target][lun]=unit->lct_data.tid;
434 h->tagclock[target][lun]=jiffies;
436 /* Get the max fragments/request */
437 i2o_query_scalar(c, d->lct_data.tid, 0xF103, 3, &limit, 2);
442 printk(KERN_WARNING "i2o_scsi: Ignoring unreasonable SG limit of 0 from IOP!\n");
446 shpnt->sg_tablesize = limit;
448 dprintk(KERN_INFO "i2o_scsi: set scatter-gather to %d.\n",
449 shpnt->sg_tablesize);
455 * i2o_scsi_detect - probe for I2O scsi devices
456 * @tpnt: scsi layer template
458 * I2O is a little odd here. The I2O core already knows what the
459 * devices are. It also knows them by disk and tape as well as
460 * by controller. We register each I2O scsi class object as a
461 * scsi controller and then let the enumeration fake up the rest
464 static int i2o_scsi_detect(Scsi_Host_Template * tpnt)
466 struct Scsi_Host *shpnt = NULL;
470 printk(KERN_INFO "i2o_scsi.c: %s\n", VERSION_STRING);
472 if(i2o_install_handler(&i2o_scsi_handler)<0)
474 printk(KERN_ERR "i2o_scsi: Unable to install OSM handler.\n");
477 scsi_context = i2o_scsi_handler.context;
479 if((sg_chain_pool = kmalloc(SG_CHAIN_POOL_SZ, GFP_KERNEL)) == NULL)
481 printk(KERN_INFO "i2o_scsi: Unable to alloc %d byte SG chain buffer pool.\n", SG_CHAIN_POOL_SZ);
482 printk(KERN_INFO "i2o_scsi: SG chaining DISABLED!\n");
487 printk(KERN_INFO " chain_pool: %d bytes @ %p\n", SG_CHAIN_POOL_SZ, sg_chain_pool);
488 printk(KERN_INFO " (%d byte buffers X %d can_queue X %d i2o controllers)\n",
489 SG_CHAIN_BUF_SZ, I2O_SCSI_CAN_QUEUE, i2o_num_controllers);
490 sg_max_frags = SG_MAX_FRAGS; // 64
493 init_timer(&retry_timer);
494 retry_timer.data = 0UL;
495 retry_timer.function = i2o_retry_run;
497 // printk("SCSI OSM at %d.\n", scsi_context);
499 for (count = 0, i = 0; i < MAX_I2O_CONTROLLERS; i++)
501 struct i2o_controller *c=i2o_find_controller(i);
502 struct i2o_device *d;
504 * This controller doesn't exist.
511 * Fixme - we need some altered device locking. This
512 * is racing with device addition in theory. Easy to fix.
515 for(d=c->devices;d!=NULL;d=d->next)
518 * bus_adapter, SCSI (obsolete), or FibreChannel busses only
520 if( (d->lct_data.class_id!=I2O_CLASS_BUS_ADAPTER_PORT) // bus_adapter
521 // && (d->lct_data.class_id!=I2O_CLASS_FIBRE_CHANNEL_PORT) // FC_PORT
525 shpnt = scsi_register(tpnt, sizeof(struct i2o_scsi_host));
528 shpnt->unique_id = (u32)d;
530 shpnt->n_io_port = 0;
532 shpnt->this_id = /* Good question */15;
533 i2o_scsi_init(c, d, shpnt);
537 i2o_scsi_hosts = count;
541 if(sg_chain_pool!=NULL)
543 kfree(sg_chain_pool);
544 sg_chain_pool = NULL;
547 del_timer(&retry_timer);
548 i2o_remove_handler(&i2o_scsi_handler);
554 static int i2o_scsi_release(struct Scsi_Host *host)
556 if(--i2o_scsi_hosts==0)
558 if(sg_chain_pool!=NULL)
560 kfree(sg_chain_pool);
561 sg_chain_pool = NULL;
564 del_timer(&retry_timer);
565 i2o_remove_handler(&i2o_scsi_handler);
568 scsi_unregister(host);
574 static const char *i2o_scsi_info(struct Scsi_Host *SChost)
576 struct i2o_scsi_host *hostdata;
577 hostdata = (struct i2o_scsi_host *)SChost->hostdata;
578 return(&hostdata->controller->name[0]);
582 * i2o_scsi_queuecommand - queue a SCSI command
583 * @SCpnt: scsi command pointer
584 * @done: callback for completion
586 * Issue a scsi comamnd asynchronously. Return 0 on success or 1 if
587 * we hit an error (normally message queue congestion). The only
588 * minor complication here is that I2O deals with the device addressing
589 * so we have to map the bus/dev/lun back to an I2O handle as well
590 * as faking absent devices ourself.
592 * Locks: takes the controller lock on error path only
595 static int i2o_scsi_queuecommand(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
599 struct i2o_controller *c;
600 Scsi_Cmnd *current_command;
601 struct Scsi_Host *host;
602 struct i2o_scsi_host *hostdata;
613 static int max_qd = 1;
616 * Do the incoming paperwork
619 host = SCpnt->device->host;
620 hostdata = (struct i2o_scsi_host *)host->hostdata;
622 c = hostdata->controller;
624 prefetchw(&queue_depth);
626 SCpnt->scsi_done = done;
628 if(SCpnt->device->id > 15)
630 printk(KERN_ERR "i2o_scsi: Wild target %d.\n", SCpnt->device->id);
634 tid = hostdata->task[SCpnt->device->id][SCpnt->device->lun];
636 dprintk(KERN_INFO "qcmd: Tid = %d\n", tid);
638 current_command = SCpnt; /* set current command */
639 current_command->scsi_done = done; /* set ptr to done function */
641 /* We don't have such a device. Pretend we did the command
642 and that selection timed out */
646 SCpnt->result = DID_NO_CONNECT << 16;
651 dprintk(KERN_INFO "Real scsi messages.\n");
654 * Obtain an I2O message. If there are none free then
655 * throw it back to the scsi layer
658 m = le32_to_cpu(I2O_POST_READ32(c));
662 msg = (u32 *)(c->mem_offset + m);
665 * Put together a scsi execscb message
668 len = SCpnt->request_bufflen;
669 direction = 0x00000000; // SGL IN (osm<--iop)
671 if(SCpnt->sc_data_direction == SCSI_DATA_NONE)
672 scsidir = 0x00000000; // DATA NO XFER
673 else if(SCpnt->sc_data_direction == SCSI_DATA_WRITE)
675 direction=0x04000000; // SGL OUT (osm-->iop)
676 scsidir =0x80000000; // DATA OUT (iop-->dev)
678 else if(SCpnt->sc_data_direction == SCSI_DATA_READ)
680 scsidir =0x40000000; // DATA IN (iop<--dev)
684 /* Unknown - kill the command */
685 SCpnt->result = DID_NO_CONNECT << 16;
687 /* We must lock the request queue while completing */
688 spin_lock_irqsave(host->host_lock, flags);
690 spin_unlock_irqrestore(host->host_lock, flags);
695 i2o_raw_writel(I2O_CMD_SCSI_EXEC<<24|HOST_TID<<12|tid, &msg[1]);
696 i2o_raw_writel(scsi_context, &msg[2]); /* So the I2O layer passes to us */
697 i2o_raw_writel(i2o_context_list_add(SCpnt, c), &msg[3]); /* We want the SCSI control block back */
701 * Intermittant observations of msg frame word data corruption
702 * observed on msg[4] after:
703 * WRITE, READ-MODIFY-WRITE
704 * operations. 19990606 -sralston
706 * (Hence we build this word via tag. Its good practice anyway
707 * we don't want fetches over PCI needlessly)
713 * Attach tags to the devices
715 if(SCpnt->device->tagged_supported)
718 * Some drives are too stupid to handle fairness issues
719 * with tagged queueing. We throw in the odd ordered
720 * tag to stop them starving themselves.
722 if((jiffies - hostdata->tagclock[SCpnt->device->id][SCpnt->device->lun]) > (5*HZ))
724 tag=0x01800000; /* ORDERED! */
725 hostdata->tagclock[SCpnt->device->id][SCpnt->device->lun]=jiffies;
729 /* Hmmm... I always see value of 0 here,
730 * of which {HEAD_OF, ORDERED, SIMPLE} are NOT! -sralston
732 if(SCpnt->tag == HEAD_OF_QUEUE_TAG)
734 else if(SCpnt->tag == ORDERED_QUEUE_TAG)
739 /* Direction, disconnect ok, tag, CDBLen */
740 i2o_raw_writel(scsidir|0x20000000|SCpnt->cmd_len|tag, &msg[4]);
745 * Write SCSI command into the message - always 16 byte block
748 memcpy_toio(mptr, SCpnt->cmnd, 16);
750 lenptr=mptr++; /* Remember me - fill in when we know */
752 reqlen = 12; // SINGLE SGE
755 * Now fill in the SGList and command
757 * FIXME: we need to set the sglist limits according to the
758 * message size of the I2O controller. We might only have room
759 * for 6 or so worst case
764 struct scatterlist *sg = (struct scatterlist *)SCpnt->request_buffer;
770 sg_count = pci_map_sg(c->pdev, sg, SCpnt->use_sg,
771 scsi_to_pci_dma_dir(SCpnt->sc_data_direction));
773 /* FIXME: handle fail */
777 if((sg_max_frags > 11) && (SCpnt->use_sg > 11))
783 i2o_raw_writel(direction|0xB0000000|(SCpnt->use_sg*2*4), mptr++);
784 i2o_raw_writel(virt_to_bus(sg_chain_pool + sg_chain_tag), mptr);
785 mptr = (u32*)(sg_chain_pool + sg_chain_tag);
786 if (SCpnt->use_sg > max_sg_len)
788 max_sg_len = SCpnt->use_sg;
789 printk("i2o_scsi: Chain SG! SCpnt=%p, SG_FragCnt=%d, SG_idx=%d\n",
790 SCpnt, SCpnt->use_sg, sg_chain_tag);
792 if ( ++sg_chain_tag == SG_MAX_BUFS )
794 for(i = 0 ; i < SCpnt->use_sg; i++)
796 *mptr++=cpu_to_le32(direction|0x10000000|sg_dma_len(sg));
798 *mptr++=cpu_to_le32(sg_dma_address(sg));
801 mptr[-2]=cpu_to_le32(direction|0xD0000000|sg_dma_len(sg-1));
805 for(i = 0 ; i < SCpnt->use_sg; i++)
807 i2o_raw_writel(direction|0x10000000|sg_dma_len(sg), mptr++);
809 i2o_raw_writel(sg_dma_address(sg), mptr++);
813 /* Make this an end of list. Again evade the 920 bug and
814 unwanted PCI read traffic */
816 i2o_raw_writel(direction|0xD0000000|sg_dma_len(sg-1), &mptr[-2]);
822 i2o_raw_writel(len, lenptr);
824 if(len != SCpnt->underflow)
825 printk("Cmd len %08X Cmd underflow %08X\n",
826 len, SCpnt->underflow);
830 dprintk(KERN_INFO "non sg for %p, %d\n", SCpnt->request_buffer,
831 SCpnt->request_bufflen);
832 i2o_raw_writel(len = SCpnt->request_bufflen, lenptr);
840 dma_addr = pci_map_single(c->pdev,
841 SCpnt->request_buffer,
842 SCpnt->request_bufflen,
843 scsi_to_pci_dma_dir(SCpnt->sc_data_direction));
845 BUG(); /* How to handle ?? */
846 SCpnt->SCp.ptr = (char *)(unsigned long) dma_addr;
847 i2o_raw_writel(0xD0000000|direction|SCpnt->request_bufflen, mptr++);
848 i2o_raw_writel(dma_addr, mptr++);
853 * Stick the headers on
856 i2o_raw_writel(reqlen<<16 | SGL_OFFSET_10, msg);
858 /* Queue the message */
859 i2o_post_message(c,m);
861 atomic_inc(&queue_depth);
863 if(atomic_read(&queue_depth)> max_qd)
865 max_qd=atomic_read(&queue_depth);
866 printk("Queue depth now %d.\n", max_qd);
870 dprintk(KERN_INFO "Issued %ld\n", current_command->serial_number);
876 * i2o_scsi_abort - abort a running command
877 * @SCpnt: command to abort
879 * Ask the I2O controller to abort a command. This is an asynchrnous
880 * process and our callback handler will see the command complete
881 * with an aborted message if it succeeds.
883 * Locks: no locks are held or needed
886 int i2o_scsi_abort(Scsi_Cmnd * SCpnt)
888 struct i2o_controller *c;
889 struct Scsi_Host *host;
890 struct i2o_scsi_host *hostdata;
895 printk(KERN_WARNING "i2o_scsi: Aborting command block.\n");
897 host = SCpnt->device->host;
898 hostdata = (struct i2o_scsi_host *)host->hostdata;
899 tid = hostdata->task[SCpnt->device->id][SCpnt->device->lun];
902 printk(KERN_ERR "i2o_scsi: Impossible command to abort!\n");
905 c = hostdata->controller;
907 spin_unlock_irq(host->host_lock);
909 msg[0] = FIVE_WORD_MSG_SIZE;
910 msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|tid;
911 msg[2] = scsi_context;
913 msg[4] = i2o_context_list_remove(SCpnt, c);
914 if(i2o_post_wait(c, msg, sizeof(msg), 240))
917 spin_lock_irq(host->host_lock);
922 * i2o_scsi_bus_reset - Issue a SCSI reset
923 * @SCpnt: the command that caused the reset
925 * Perform a SCSI bus reset operation. In I2O this is just a message
926 * we pass. I2O can do clever multi-initiator and shared reset stuff
927 * but we don't support this.
929 * Locks: called with no lock held, requires no locks.
932 static int i2o_scsi_bus_reset(Scsi_Cmnd * SCpnt)
935 struct i2o_controller *c;
936 struct Scsi_Host *host;
937 struct i2o_scsi_host *hostdata;
940 unsigned long timeout;
944 * Find the TID for the bus
948 host = SCpnt->device->host;
950 spin_unlock_irq(host->host_lock);
952 printk(KERN_WARNING "i2o_scsi: Attempting to reset the bus.\n");
954 hostdata = (struct i2o_scsi_host *)host->hostdata;
955 tid = hostdata->bus_task;
956 c = hostdata->controller;
959 * Now send a SCSI reset request. Any remaining commands
960 * will be aborted by the IOP. We need to catch the reply
964 timeout = jiffies+2*HZ;
967 m = le32_to_cpu(I2O_POST_READ32(c));
970 set_current_state(TASK_UNINTERRUPTIBLE);
974 while(time_before(jiffies, timeout));
977 msg = c->mem_offset + m;
978 i2o_raw_writel(FOUR_WORD_MSG_SIZE|SGL_OFFSET_0, msg);
979 i2o_raw_writel(I2O_CMD_SCSI_BUSRESET<<24|HOST_TID<<12|tid, msg+4);
980 i2o_raw_writel(scsi_context|0x80000000, msg+8);
981 /* We use the top bit to split controller and unit transactions */
982 /* Now store unit,tid so we can tie the completion back to a specific device */
983 __raw_writel(c->unit << 16 | tid, msg+12);
986 /* We want the command to complete after we return */
987 spin_lock_irq(host->host_lock);
988 i2o_post_message(c,m);
990 /* Should we wait for the reset to complete ? */
995 * i2o_scsi_host_reset - host reset callback
996 * @SCpnt: command causing the reset
998 * An I2O controller can be many things at once. While we can
999 * reset a controller the potential mess from doing so is vast, and
1000 * it's better to simply hold on and pray
1003 static int i2o_scsi_host_reset(Scsi_Cmnd * SCpnt)
1009 * i2o_scsi_device_reset - device reset callback
1010 * @SCpnt: command causing the reset
1012 * I2O does not (AFAIK) support doing a device reset
1015 static int i2o_scsi_device_reset(Scsi_Cmnd * SCpnt)
1021 * i2o_scsi_bios_param - Invent disk geometry
1022 * @sdev: scsi device
1023 * @dev: block layer device
1024 * @capacity: size in sectors
1025 * @ip: geometry array
1027 * This is anyones guess quite frankly. We use the same rules everyone
1028 * else appears to and hope. It seems to work.
1031 static int i2o_scsi_bios_param(struct scsi_device * sdev,
1032 struct block_device *dev, sector_t capacity, int *ip)
1037 ip[0] = 64; /* heads */
1038 ip[1] = 32; /* sectors */
1039 if ((ip[2] = size >> 11) > 1024) { /* cylinders, test for big disk */
1040 ip[0] = 255; /* heads */
1041 ip[1] = 63; /* sectors */
1042 ip[2] = size / (255 * 63); /* cylinders */
1047 MODULE_AUTHOR("Red Hat Software");
1048 MODULE_LICENSE("GPL");
1051 static Scsi_Host_Template driver_template = {
1052 .proc_name = "i2o_scsi",
1053 .name = "I2O SCSI Layer",
1054 .detect = i2o_scsi_detect,
1055 .release = i2o_scsi_release,
1056 .info = i2o_scsi_info,
1057 .queuecommand = i2o_scsi_queuecommand,
1058 .eh_abort_handler = i2o_scsi_abort,
1059 .eh_bus_reset_handler = i2o_scsi_bus_reset,
1060 .eh_device_reset_handler= i2o_scsi_device_reset,
1061 .eh_host_reset_handler = i2o_scsi_host_reset,
1062 .bios_param = i2o_scsi_bios_param,
1063 .can_queue = I2O_SCSI_CAN_QUEUE,
1067 .use_clustering = ENABLE_CLUSTERING,
1070 #include "../../scsi/scsi_module.c"