2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License as published by the
4 * Free Software Foundation; either version 2, or (at your option) any
7 * This program is distributed in the hope that it will be useful, but
8 * WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10 * General Public License for more details.
12 * For the avoidance of doubt the "preferred form" of this code is one which
13 * is in an open non patent encumbered format. Where cryptographic key signing
14 * forms part of the process of creating an executable the information
15 * including keys needed to generate an equivalently functional executable
16 * are deemed to be part of the source code.
18 * Complications for I2O scsi
20 * o Each (bus,lun) is a logical device in I2O. We keep a map
21 * table. We spoof failed selection for unmapped units
22 * o Request sense buffers can come back for free.
23 * o Scatter gather is a bit dynamic. We have to investigate at
25 * o Some of our resources are dynamically shared. The i2o core
26 * needs a message reservation protocol to avoid swap v net
27 * deadlocking. We need to back off queue requests.
29 * In general the firmware wants to help. Where its help isn't performance
30 * useful we just ignore the aid. Its not worth the code in truth.
34 * Scatter gather now works
35 * Markus Lidel <Markus.Lidel@shadowconnect.com>:
36 * Minor fixes for 2.6.
40 * Fix the resource management problems.
44 #include <linux/module.h>
45 #include <linux/kernel.h>
46 #include <linux/types.h>
47 #include <linux/string.h>
48 #include <linux/ioport.h>
49 #include <linux/jiffies.h>
50 #include <linux/interrupt.h>
51 #include <linux/timer.h>
52 #include <linux/delay.h>
53 #include <linux/proc_fs.h>
54 #include <linux/prefetch.h>
55 #include <linux/pci.h>
57 #include <asm/system.h>
59 #include <asm/atomic.h>
60 #include <linux/blkdev.h>
61 #include <linux/i2o.h>
62 #include "../../scsi/scsi.h"
63 #include "../../scsi/hosts.h"
65 #if BITS_PER_LONG == 64
66 #error FIXME: driver does not support 64-bit platforms
70 #define VERSION_STRING "Version 0.1.2"
75 #define dprintk(s, args...) printk(s, ## args)
77 #define dprintk(s, args...)
80 #define I2O_SCSI_CAN_QUEUE 4
85 struct i2o_controller *controller;
86 s16 task[16][8]; /* Allow 16 devices for now */
87 unsigned long tagclock[16][8]; /* Tag clock for queueing */
88 s16 bus_task; /* The adapter TID */
91 static int scsi_context;
93 static int i2o_scsi_hosts;
95 static u32 *retry[32];
96 static struct i2o_controller *retry_ctrl[32];
97 static struct timer_list retry_timer;
98 static spinlock_t retry_lock = SPIN_LOCK_UNLOCKED;
99 static int retry_ct = 0;
101 static atomic_t queue_depth;
104 * SG Chain buffer support...
107 #define SG_MAX_FRAGS 64
110 * FIXME: we should allocate one of these per bus we find as we
111 * locate them not in a lump at boot.
114 typedef struct _chain_buf
116 u32 sg_flags_cnt[SG_MAX_FRAGS];
117 u32 sg_buf[SG_MAX_FRAGS];
120 #define SG_CHAIN_BUF_SZ sizeof(chain_buf)
122 #define SG_MAX_BUFS (i2o_num_controllers * I2O_SCSI_CAN_QUEUE)
123 #define SG_CHAIN_POOL_SZ (SG_MAX_BUFS * SG_CHAIN_BUF_SZ)
125 static int max_sg_len = 0;
126 static chain_buf *sg_chain_pool = NULL;
127 static int sg_chain_tag = 0;
128 static int sg_max_frags = SG_MAX_FRAGS;
131 * i2o_retry_run - retry on timeout
134 * Retry congested frames. This actually needs pushing down into
135 * i2o core. We should only bother the OSM with this when we can't
136 * queue and retry the frame. Or perhaps we should call the OSM
137 * and its default handler should be this in the core, and this
138 * call a 2nd "I give up" handler in the OSM ?
141 static void i2o_retry_run(unsigned long f)
146 spin_lock_irqsave(&retry_lock, flags);
147 for(i=0;i<retry_ct;i++)
148 i2o_post_message(retry_ctrl[i], virt_to_bus(retry[i]));
150 spin_unlock_irqrestore(&retry_lock, flags);
154 * flush_pending - empty the retry queue
156 * Turn each of the pending commands into a NOP and post it back
157 * to the controller to clear it.
160 static void flush_pending(void)
165 spin_lock_irqsave(&retry_lock, flags);
166 for(i=0;i<retry_ct;i++)
168 retry[i][0]&=~0xFFFFFF;
169 retry[i][0]|=I2O_CMD_UTIL_NOP<<24;
170 i2o_post_message(retry_ctrl[i],virt_to_bus(retry[i]));
173 spin_unlock_irqrestore(&retry_lock, flags);
177 * i2o_scsi_reply - scsi message reply processor
178 * @h: our i2o handler
179 * @c: controller issuing the reply
180 * @msg: the message from the controller (mapped)
182 * Process reply messages (interrupts in normal scsi controller think).
183 * We can get a variety of messages to process. The normal path is
184 * scsi command completions. We must also deal with IOP failures,
185 * the reply to a bus reset and the reply to a LUN query.
187 * Locks: the queue lock is taken to call the completion handler
190 static void i2o_scsi_reply(struct i2o_handler *h, struct i2o_controller *c, struct i2o_message *msg)
192 Scsi_Cmnd *current_command;
200 printk("IOP fail.\n");
201 printk("From %d To %d Cmd %d.\n",
205 printk("Failure Code %d.\n", m[4]>>24);
207 printk("Format error.\n");
209 printk("Path error.\n");
211 printk("Path State.\n");
213 printk("Congestion.\n");
215 m=(u32 *)bus_to_virt(m[7]);
216 printk("Failing message is %p.\n", m);
218 /* This isnt a fast path .. */
219 spin_lock_irqsave(&retry_lock, flags);
221 if((m[4]&(1<<18)) && retry_ct < 32)
223 retry_ctrl[retry_ct]=c;
227 retry_timer.expires=jiffies+1;
228 add_timer(&retry_timer);
230 spin_unlock_irqrestore(&retry_lock, flags);
234 spin_unlock_irqrestore(&retry_lock, flags);
235 /* Create a scsi error for this */
236 current_command = (Scsi_Cmnd *)m[3];
237 lock = current_command->device->host->host_lock;
238 printk("Aborted %ld\n", current_command->serial_number);
240 spin_lock_irqsave(lock, flags);
241 current_command->result = DID_ERROR << 16;
242 current_command->scsi_done(current_command);
243 spin_unlock_irqrestore(lock, flags);
245 /* Now flush the message by making it a NOP */
247 m[0]|=(I2O_CMD_UTIL_NOP)<<24;
248 i2o_post_message(c,virt_to_bus(m));
253 prefetchw(&queue_depth);
257 * Low byte is device status, next is adapter status,
258 * (then one byte reserved), then request status.
260 ds=(u8)le32_to_cpu(m[4]);
261 as=(u8)le32_to_cpu(m[4]>>8);
262 st=(u8)le32_to_cpu(m[4]>>24);
264 dprintk(KERN_INFO "i2o got a scsi reply %08X: ", m[0]);
265 dprintk(KERN_INFO "m[2]=%08X: ", m[2]);
266 dprintk(KERN_INFO "m[4]=%08X\n", m[4]);
272 dprintk(KERN_INFO "Event.\n");
276 printk(KERN_INFO "i2o_scsi: bus reset completed.\n");
280 * FIXME: 64bit breakage
283 current_command = (Scsi_Cmnd *)m[3];
286 * Is this a control request coming back - eg an abort ?
289 if(current_command==NULL)
292 dprintk(KERN_WARNING "SCSI abort: %08X", m[4]);
293 dprintk(KERN_INFO "SCSI abort completed.\n");
297 dprintk(KERN_INFO "Completed %ld\n", current_command->serial_number);
299 atomic_dec(&queue_depth);
303 if(le32_to_cpu(m[5]) < current_command->underflow)
306 printk(KERN_ERR "SCSI: underflow 0x%08X 0x%08X\n",
307 le32_to_cpu(m[5]), current_command->underflow);
310 printk("%02X ", current_command->cmnd[i]);
318 /* An error has occurred */
320 dprintk(KERN_WARNING "SCSI error %08X", m[4]);
324 current_command->result = DID_RESET << 16;
326 current_command->result = DID_PARITY << 16;
328 current_command->result = DID_ERROR << 16;
334 current_command->result = DID_OK << 16 | ds;
336 if (current_command->use_sg)
337 pci_unmap_sg(c->pdev, (struct scatterlist *)current_command->buffer, current_command->use_sg, scsi_to_pci_dma_dir(current_command->sc_data_direction));
338 else if (current_command->request_bufflen)
339 pci_unmap_single(c->pdev, (dma_addr_t)((long)current_command->SCp.ptr), current_command->request_bufflen, scsi_to_pci_dma_dir(current_command->sc_data_direction));
341 lock = current_command->device->host->host_lock;
342 spin_lock_irqsave(lock, flags);
343 current_command->scsi_done(current_command);
344 spin_unlock_irqrestore(lock, flags);
348 struct i2o_handler i2o_scsi_handler = {
349 .reply = i2o_scsi_reply,
350 .name = "I2O SCSI OSM",
351 .class = I2O_CLASS_SCSI_PERIPHERAL,
355 * i2o_find_lun - report the lun of an i2o device
356 * @c: i2o controller owning the device
357 * @d: i2o disk device
358 * @target: filled in with target id
359 * @lun: filled in with target lun
361 * Query an I2O device to find out its SCSI lun and target numbering. We
362 * don't currently handle some of the fancy SCSI-3 stuff although our
363 * querying is sufficient to do so.
366 static int i2o_find_lun(struct i2o_controller *c, struct i2o_device *d, int *target, int *lun)
370 if(i2o_query_scalar(c, d->lct_data.tid, 0, 3, reply, 4)<0)
375 if(i2o_query_scalar(c, d->lct_data.tid, 0, 4, reply, 8)<0)
380 dprintk(KERN_INFO "SCSI (%d,%d)\n", *target, *lun);
385 * i2o_scsi_init - initialize an i2o device for scsi
386 * @c: i2o controller owning the device
387 * @d: scsi controller
388 * @shpnt: scsi device we wish it to become
390 * Enumerate the scsi peripheral/fibre channel peripheral class
391 * devices that are children of the controller. From that we build
392 * a translation map for the command queue code. Since I2O works on
393 * its own tid's we effectively have to think backwards to get what
397 static void i2o_scsi_init(struct i2o_controller *c, struct i2o_device *d, struct Scsi_Host *shpnt)
399 struct i2o_device *unit;
400 struct i2o_scsi_host *h =(struct i2o_scsi_host *)shpnt->hostdata;
405 h->bus_task=d->lct_data.tid;
407 for(target=0;target<16;target++)
408 for(lun=0;lun<8;lun++)
409 h->task[target][lun] = -1;
411 for(unit=c->devices;unit!=NULL;unit=unit->next)
413 dprintk(KERN_INFO "Class %03X, parent %d, want %d.\n",
414 unit->lct_data.class_id, unit->lct_data.parent_tid, d->lct_data.tid);
416 /* Only look at scsi and fc devices */
417 if ( (unit->lct_data.class_id != I2O_CLASS_SCSI_PERIPHERAL)
418 && (unit->lct_data.class_id != I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL)
423 dprintk(KERN_INFO "Found a disk (%d).\n", unit->lct_data.tid);
424 if ((unit->lct_data.parent_tid == d->lct_data.tid)
425 || (unit->lct_data.parent_tid == d->lct_data.parent_tid)
429 dprintk(KERN_INFO "Its ours.\n");
430 if(i2o_find_lun(c, unit, &target, &lun)==-1)
432 printk(KERN_ERR "i2o_scsi: Unable to get lun for tid %d.\n", unit->lct_data.tid);
435 dprintk(KERN_INFO "Found disk %d %d.\n", target, lun);
436 h->task[target][lun]=unit->lct_data.tid;
437 h->tagclock[target][lun]=jiffies;
439 /* Get the max fragments/request */
440 i2o_query_scalar(c, d->lct_data.tid, 0xF103, 3, &limit, 2);
445 printk(KERN_WARNING "i2o_scsi: Ignoring unreasonable SG limit of 0 from IOP!\n");
449 shpnt->sg_tablesize = limit;
451 dprintk(KERN_INFO "i2o_scsi: set scatter-gather to %d.\n",
452 shpnt->sg_tablesize);
458 * i2o_scsi_detect - probe for I2O scsi devices
459 * @tpnt: scsi layer template
461 * I2O is a little odd here. The I2O core already knows what the
462 * devices are. It also knows them by disk and tape as well as
463 * by controller. We register each I2O scsi class object as a
464 * scsi controller and then let the enumeration fake up the rest
467 static int i2o_scsi_detect(Scsi_Host_Template * tpnt)
469 struct Scsi_Host *shpnt = NULL;
473 printk(KERN_INFO "i2o_scsi.c: %s\n", VERSION_STRING);
475 if(i2o_install_handler(&i2o_scsi_handler)<0)
477 printk(KERN_ERR "i2o_scsi: Unable to install OSM handler.\n");
480 scsi_context = i2o_scsi_handler.context;
482 if((sg_chain_pool = kmalloc(SG_CHAIN_POOL_SZ, GFP_KERNEL)) == NULL)
484 printk(KERN_INFO "i2o_scsi: Unable to alloc %d byte SG chain buffer pool.\n", SG_CHAIN_POOL_SZ);
485 printk(KERN_INFO "i2o_scsi: SG chaining DISABLED!\n");
490 printk(KERN_INFO " chain_pool: %d bytes @ %p\n", SG_CHAIN_POOL_SZ, sg_chain_pool);
491 printk(KERN_INFO " (%d byte buffers X %d can_queue X %d i2o controllers)\n",
492 SG_CHAIN_BUF_SZ, I2O_SCSI_CAN_QUEUE, i2o_num_controllers);
493 sg_max_frags = SG_MAX_FRAGS; // 64
496 init_timer(&retry_timer);
497 retry_timer.data = 0UL;
498 retry_timer.function = i2o_retry_run;
500 // printk("SCSI OSM at %d.\n", scsi_context);
502 for (count = 0, i = 0; i < MAX_I2O_CONTROLLERS; i++)
504 struct i2o_controller *c=i2o_find_controller(i);
505 struct i2o_device *d;
507 * This controller doesn't exist.
514 * Fixme - we need some altered device locking. This
515 * is racing with device addition in theory. Easy to fix.
518 for(d=c->devices;d!=NULL;d=d->next)
521 * bus_adapter, SCSI (obsolete), or FibreChannel busses only
523 if( (d->lct_data.class_id!=I2O_CLASS_BUS_ADAPTER_PORT) // bus_adapter
524 // && (d->lct_data.class_id!=I2O_CLASS_FIBRE_CHANNEL_PORT) // FC_PORT
528 shpnt = scsi_register(tpnt, sizeof(struct i2o_scsi_host));
531 shpnt->unique_id = (u32)d;
533 shpnt->n_io_port = 0;
535 shpnt->this_id = /* Good question */15;
536 i2o_scsi_init(c, d, shpnt);
540 i2o_scsi_hosts = count;
544 if(sg_chain_pool!=NULL)
546 kfree(sg_chain_pool);
547 sg_chain_pool = NULL;
550 del_timer(&retry_timer);
551 i2o_remove_handler(&i2o_scsi_handler);
557 static int i2o_scsi_release(struct Scsi_Host *host)
559 if(--i2o_scsi_hosts==0)
561 if(sg_chain_pool!=NULL)
563 kfree(sg_chain_pool);
564 sg_chain_pool = NULL;
567 del_timer(&retry_timer);
568 i2o_remove_handler(&i2o_scsi_handler);
571 scsi_unregister(host);
577 static const char *i2o_scsi_info(struct Scsi_Host *SChost)
579 struct i2o_scsi_host *hostdata;
580 hostdata = (struct i2o_scsi_host *)SChost->hostdata;
581 return(&hostdata->controller->name[0]);
585 * i2o_scsi_queuecommand - queue a SCSI command
586 * @SCpnt: scsi command pointer
587 * @done: callback for completion
589 * Issue a scsi comamnd asynchronously. Return 0 on success or 1 if
590 * we hit an error (normally message queue congestion). The only
591 * minor complication here is that I2O deals with the device addressing
592 * so we have to map the bus/dev/lun back to an I2O handle as well
593 * as faking absent devices ourself.
595 * Locks: takes the controller lock on error path only
598 static int i2o_scsi_queuecommand(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
602 struct i2o_controller *c;
603 Scsi_Cmnd *current_command;
604 struct Scsi_Host *host;
605 struct i2o_scsi_host *hostdata;
616 static int max_qd = 1;
619 * Do the incoming paperwork
622 host = SCpnt->device->host;
623 hostdata = (struct i2o_scsi_host *)host->hostdata;
625 c = hostdata->controller;
627 prefetchw(&queue_depth);
629 SCpnt->scsi_done = done;
631 if(SCpnt->device->id > 15)
633 printk(KERN_ERR "i2o_scsi: Wild target %d.\n", SCpnt->device->id);
637 tid = hostdata->task[SCpnt->device->id][SCpnt->device->lun];
639 dprintk(KERN_INFO "qcmd: Tid = %d\n", tid);
641 current_command = SCpnt; /* set current command */
642 current_command->scsi_done = done; /* set ptr to done function */
644 /* We don't have such a device. Pretend we did the command
645 and that selection timed out */
649 SCpnt->result = DID_NO_CONNECT << 16;
650 spin_lock_irqsave(host->host_lock, flags);
652 spin_unlock_irqrestore(host->host_lock, flags);
656 dprintk(KERN_INFO "Real scsi messages.\n");
659 * Obtain an I2O message. If there are none free then
660 * throw it back to the scsi layer
663 m = le32_to_cpu(I2O_POST_READ32(c));
667 msg = (u32 *)(c->mem_offset + m);
670 * Put together a scsi execscb message
673 len = SCpnt->request_bufflen;
674 direction = 0x00000000; // SGL IN (osm<--iop)
676 if(SCpnt->sc_data_direction == SCSI_DATA_NONE)
677 scsidir = 0x00000000; // DATA NO XFER
678 else if(SCpnt->sc_data_direction == SCSI_DATA_WRITE)
680 direction=0x04000000; // SGL OUT (osm-->iop)
681 scsidir =0x80000000; // DATA OUT (iop-->dev)
683 else if(SCpnt->sc_data_direction == SCSI_DATA_READ)
685 scsidir =0x40000000; // DATA IN (iop<--dev)
689 /* Unknown - kill the command */
690 SCpnt->result = DID_NO_CONNECT << 16;
692 /* We must lock the request queue while completing */
693 spin_lock_irqsave(host->host_lock, flags);
695 spin_unlock_irqrestore(host->host_lock, flags);
700 i2o_raw_writel(I2O_CMD_SCSI_EXEC<<24|HOST_TID<<12|tid, &msg[1]);
701 i2o_raw_writel(scsi_context, &msg[2]); /* So the I2O layer passes to us */
702 /* Sorry 64bit folks. FIXME */
703 i2o_raw_writel((u32)SCpnt, &msg[3]); /* We want the SCSI control block back */
707 * Intermittant observations of msg frame word data corruption
708 * observed on msg[4] after:
709 * WRITE, READ-MODIFY-WRITE
710 * operations. 19990606 -sralston
712 * (Hence we build this word via tag. Its good practice anyway
713 * we don't want fetches over PCI needlessly)
719 * Attach tags to the devices
721 if(SCpnt->device->tagged_supported)
724 * Some drives are too stupid to handle fairness issues
725 * with tagged queueing. We throw in the odd ordered
726 * tag to stop them starving themselves.
728 if((jiffies - hostdata->tagclock[SCpnt->device->id][SCpnt->device->lun]) > (5*HZ))
730 tag=0x01800000; /* ORDERED! */
731 hostdata->tagclock[SCpnt->device->id][SCpnt->device->lun]=jiffies;
735 /* Hmmm... I always see value of 0 here,
736 * of which {HEAD_OF, ORDERED, SIMPLE} are NOT! -sralston
738 if(SCpnt->tag == HEAD_OF_QUEUE_TAG)
740 else if(SCpnt->tag == ORDERED_QUEUE_TAG)
745 /* Direction, disconnect ok, tag, CDBLen */
746 i2o_raw_writel(scsidir|0x20000000|SCpnt->cmd_len|tag, &msg[4]);
751 * Write SCSI command into the message - always 16 byte block
754 memcpy_toio(mptr, SCpnt->cmnd, 16);
756 lenptr=mptr++; /* Remember me - fill in when we know */
758 reqlen = 12; // SINGLE SGE
761 * Now fill in the SGList and command
763 * FIXME: we need to set the sglist limits according to the
764 * message size of the I2O controller. We might only have room
765 * for 6 or so worst case
770 struct scatterlist *sg = (struct scatterlist *)SCpnt->request_buffer;
776 sg_count = pci_map_sg(c->pdev, sg, SCpnt->use_sg,
777 scsi_to_pci_dma_dir(SCpnt->sc_data_direction));
779 /* FIXME: handle fail */
783 if((sg_max_frags > 11) && (SCpnt->use_sg > 11))
789 i2o_raw_writel(direction|0xB0000000|(SCpnt->use_sg*2*4), mptr++);
790 i2o_raw_writel(virt_to_bus(sg_chain_pool + sg_chain_tag), mptr);
791 mptr = (u32*)(sg_chain_pool + sg_chain_tag);
792 if (SCpnt->use_sg > max_sg_len)
794 max_sg_len = SCpnt->use_sg;
795 printk("i2o_scsi: Chain SG! SCpnt=%p, SG_FragCnt=%d, SG_idx=%d\n",
796 SCpnt, SCpnt->use_sg, sg_chain_tag);
798 if ( ++sg_chain_tag == SG_MAX_BUFS )
800 for(i = 0 ; i < SCpnt->use_sg; i++)
802 *mptr++=cpu_to_le32(direction|0x10000000|sg_dma_len(sg));
804 *mptr++=cpu_to_le32(sg_dma_address(sg));
807 mptr[-2]=cpu_to_le32(direction|0xD0000000|sg_dma_len(sg-1));
811 for(i = 0 ; i < SCpnt->use_sg; i++)
813 i2o_raw_writel(direction|0x10000000|sg_dma_len(sg), mptr++);
815 i2o_raw_writel(sg_dma_address(sg), mptr++);
819 /* Make this an end of list. Again evade the 920 bug and
820 unwanted PCI read traffic */
822 i2o_raw_writel(direction|0xD0000000|sg_dma_len(sg-1), &mptr[-2]);
828 i2o_raw_writel(len, lenptr);
830 if(len != SCpnt->underflow)
831 printk("Cmd len %08X Cmd underflow %08X\n",
832 len, SCpnt->underflow);
836 dprintk(KERN_INFO "non sg for %p, %d\n", SCpnt->request_buffer,
837 SCpnt->request_bufflen);
838 i2o_raw_writel(len = SCpnt->request_bufflen, lenptr);
846 dma_addr = pci_map_single(c->pdev,
847 SCpnt->request_buffer,
848 SCpnt->request_bufflen,
849 scsi_to_pci_dma_dir(SCpnt->sc_data_direction));
851 BUG(); /* How to handle ?? */
852 SCpnt->SCp.ptr = (char *)(unsigned long) dma_addr;
853 i2o_raw_writel(0xD0000000|direction|SCpnt->request_bufflen, mptr++);
854 i2o_raw_writel(dma_addr, mptr++);
859 * Stick the headers on
862 i2o_raw_writel(reqlen<<16 | SGL_OFFSET_10, msg);
864 /* Queue the message */
865 i2o_post_message(c,m);
867 atomic_inc(&queue_depth);
869 if(atomic_read(&queue_depth)> max_qd)
871 max_qd=atomic_read(&queue_depth);
872 printk("Queue depth now %d.\n", max_qd);
876 dprintk(KERN_INFO "Issued %ld\n", current_command->serial_number);
882 * i2o_scsi_abort - abort a running command
883 * @SCpnt: command to abort
885 * Ask the I2O controller to abort a command. This is an asynchrnous
886 * process and oru callback handler will see the command complete
887 * with an aborted message if it succeeds.
889 * Locks: no locks are held or needed
892 int i2o_scsi_abort(Scsi_Cmnd * SCpnt)
894 struct i2o_controller *c;
895 struct Scsi_Host *host;
896 struct i2o_scsi_host *hostdata;
900 unsigned long timeout;
902 printk(KERN_WARNING "i2o_scsi: Aborting command block.\n");
904 host = SCpnt->device->host;
905 hostdata = (struct i2o_scsi_host *)host->hostdata;
906 tid = hostdata->task[SCpnt->device->id][SCpnt->device->lun];
909 printk(KERN_ERR "i2o_scsi: Impossible command to abort!\n");
912 c = hostdata->controller;
914 spin_unlock_irq(host->host_lock);
916 timeout = jiffies+2*HZ;
919 m = le32_to_cpu(I2O_POST_READ32(c));
922 set_current_state(TASK_UNINTERRUPTIBLE);
926 while(time_before(jiffies, timeout));
928 msg = c->mem_offset + m;
930 i2o_raw_writel(FIVE_WORD_MSG_SIZE, msg);
931 i2o_raw_writel(I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|tid, msg+4);
932 i2o_raw_writel(scsi_context, msg+8);
933 i2o_raw_writel(0, msg+12); /* Not needed for an abort */
934 i2o_raw_writel((u32)SCpnt, msg+16);
936 i2o_post_message(c,m);
939 spin_lock_irq(host->host_lock);
944 * i2o_scsi_bus_reset - Issue a SCSI reset
945 * @SCpnt: the command that caused the reset
947 * Perform a SCSI bus reset operation. In I2O this is just a message
948 * we pass. I2O can do clever multi-initiator and shared reset stuff
949 * but we don't support this.
951 * Locks: called with no lock held, requires no locks.
954 static int i2o_scsi_bus_reset(Scsi_Cmnd * SCpnt)
957 struct i2o_controller *c;
958 struct Scsi_Host *host;
959 struct i2o_scsi_host *hostdata;
962 unsigned long timeout;
966 * Find the TID for the bus
970 host = SCpnt->device->host;
972 spin_unlock_irq(host->host_lock);
974 printk(KERN_WARNING "i2o_scsi: Attempting to reset the bus.\n");
976 hostdata = (struct i2o_scsi_host *)host->hostdata;
977 tid = hostdata->bus_task;
978 c = hostdata->controller;
981 * Now send a SCSI reset request. Any remaining commands
982 * will be aborted by the IOP. We need to catch the reply
986 timeout = jiffies+2*HZ;
989 m = le32_to_cpu(I2O_POST_READ32(c));
992 set_current_state(TASK_UNINTERRUPTIBLE);
996 while(time_before(jiffies, timeout));
999 msg = c->mem_offset + m;
1000 i2o_raw_writel(FOUR_WORD_MSG_SIZE|SGL_OFFSET_0, msg);
1001 i2o_raw_writel(I2O_CMD_SCSI_BUSRESET<<24|HOST_TID<<12|tid, msg+4);
1002 i2o_raw_writel(scsi_context|0x80000000, msg+8);
1003 /* We use the top bit to split controller and unit transactions */
1004 /* Now store unit,tid so we can tie the completion back to a specific device */
1005 __raw_writel(c->unit << 16 | tid, msg+12);
1008 /* We want the command to complete after we return */
1009 spin_lock_irq(host->host_lock);
1010 i2o_post_message(c,m);
1012 /* Should we wait for the reset to complete ? */
1017 * i2o_scsi_host_reset - host reset callback
1018 * @SCpnt: command causing the reset
1020 * An I2O controller can be many things at once. While we can
1021 * reset a controller the potential mess from doing so is vast, and
1022 * it's better to simply hold on and pray
1025 static int i2o_scsi_host_reset(Scsi_Cmnd * SCpnt)
1031 * i2o_scsi_device_reset - device reset callback
1032 * @SCpnt: command causing the reset
1034 * I2O does not (AFAIK) support doing a device reset
1037 static int i2o_scsi_device_reset(Scsi_Cmnd * SCpnt)
1043 * i2o_scsi_bios_param - Invent disk geometry
1044 * @sdev: scsi device
1045 * @dev: block layer device
1046 * @capacity: size in sectors
1047 * @ip: geometry array
1049 * This is anyones guess quite frankly. We use the same rules everyone
1050 * else appears to and hope. It seems to work.
1053 static int i2o_scsi_bios_param(struct scsi_device * sdev,
1054 struct block_device *dev, sector_t capacity, int *ip)
1059 ip[0] = 64; /* heads */
1060 ip[1] = 32; /* sectors */
1061 if ((ip[2] = size >> 11) > 1024) { /* cylinders, test for big disk */
1062 ip[0] = 255; /* heads */
1063 ip[1] = 63; /* sectors */
1064 ip[2] = size / (255 * 63); /* cylinders */
1069 MODULE_AUTHOR("Red Hat Software");
1070 MODULE_LICENSE("GPL");
1073 static Scsi_Host_Template driver_template = {
1074 .proc_name = "i2o_scsi",
1075 .name = "I2O SCSI Layer",
1076 .detect = i2o_scsi_detect,
1077 .release = i2o_scsi_release,
1078 .info = i2o_scsi_info,
1079 .queuecommand = i2o_scsi_queuecommand,
1080 .eh_abort_handler = i2o_scsi_abort,
1081 .eh_bus_reset_handler = i2o_scsi_bus_reset,
1082 .eh_device_reset_handler= i2o_scsi_device_reset,
1083 .eh_host_reset_handler = i2o_scsi_host_reset,
1084 .bios_param = i2o_scsi_bios_param,
1085 .can_queue = I2O_SCSI_CAN_QUEUE,
1089 .use_clustering = ENABLE_CLUSTERING,
1092 #include "../../scsi/scsi_module.c"