VServer 1.9.2 (patch-2.6.8.1-vs1.9.2.diff)
[linux-2.6.git] / drivers / message / i2o / i2o_scsi.c
1 /* 
2  * This program is free software; you can redistribute it and/or modify it
3  * under the terms of the GNU General Public License as published by the
4  * Free Software Foundation; either version 2, or (at your option) any
5  * later version.
6  *
7  * This program is distributed in the hope that it will be useful, but
8  * WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
10  * General Public License for more details.
11  *
12  * For the avoidance of doubt the "preferred form" of this code is one which
13  * is in an open non patent encumbered format. Where cryptographic key signing
14  * forms part of the process of creating an executable the information
15  * including keys needed to generate an equivalently functional executable
16  * are deemed to be part of the source code.
17  *
18  *  Complications for I2O scsi
19  *
20  *      o       Each (bus,lun) is a logical device in I2O. We keep a map
21  *              table. We spoof failed selection for unmapped units
22  *      o       Request sense buffers can come back for free. 
23  *      o       Scatter gather is a bit dynamic. We have to investigate at
24  *              setup time.
25  *      o       Some of our resources are dynamically shared. The i2o core
26  *              needs a message reservation protocol to avoid swap v net
27  *              deadlocking. We need to back off queue requests.
28  *      
29  *      In general the firmware wants to help. Where its help isn't performance
30  *      useful we just ignore the aid. Its not worth the code in truth.
31  *
32  * Fixes/additions:
33  *      Steve Ralston:
34  *              Scatter gather now works
35  *      Markus Lidel <Markus.Lidel@shadowconnect.com>:
36  *              Minor fixes for 2.6.
37  *
38  * To Do:
39  *      64bit cleanups
40  *      Fix the resource management problems.
41  */
42
43
44 #include <linux/module.h>
45 #include <linux/kernel.h>
46 #include <linux/types.h>
47 #include <linux/string.h>
48 #include <linux/ioport.h>
49 #include <linux/jiffies.h>
50 #include <linux/interrupt.h>
51 #include <linux/timer.h>
52 #include <linux/delay.h>
53 #include <linux/proc_fs.h>
54 #include <linux/prefetch.h>
55 #include <linux/pci.h>
56 #include <asm/dma.h>
57 #include <asm/system.h>
58 #include <asm/io.h>
59 #include <asm/atomic.h>
60 #include <linux/blkdev.h>
61 #include <linux/i2o.h>
62
63 #include <scsi/scsi.h>
64 #include <scsi/scsi_cmnd.h>
65 #include <scsi/scsi_device.h>
66 #include <scsi/scsi_host.h>
67
68
69 #define VERSION_STRING        "Version 0.1.2"
70
71 //#define DRIVERDEBUG
72
73 #ifdef DRIVERDEBUG
74 #define dprintk(s, args...) printk(s, ## args)
75 #else
76 #define dprintk(s, args...)
77 #endif
78
79 #define I2O_SCSI_CAN_QUEUE      4
80 #define MAXHOSTS                32
81
82 struct i2o_scsi_host
83 {
84         struct i2o_controller *controller;
85         s16 task[16][8];                /* Allow 16 devices for now */
86         unsigned long tagclock[16][8];  /* Tag clock for queueing */
87         s16 bus_task;           /* The adapter TID */
88 };
89
90 static int scsi_context;
91 static int lun_done;
92 static int i2o_scsi_hosts;
93
94 static u32 *retry[32];
95 static struct i2o_controller *retry_ctrl[32];
96 static struct timer_list retry_timer;
97 static spinlock_t retry_lock = SPIN_LOCK_UNLOCKED;
98 static int retry_ct = 0;
99
100 static atomic_t queue_depth;
101
102 /*
103  *      SG Chain buffer support...
104  */
105
106 #define SG_MAX_FRAGS            64
107
108 /*
109  *      FIXME: we should allocate one of these per bus we find as we
110  *      locate them not in a lump at boot.
111  */
112  
113 typedef struct _chain_buf
114 {
115         u32 sg_flags_cnt[SG_MAX_FRAGS];
116         u32 sg_buf[SG_MAX_FRAGS];
117 } chain_buf;
118
119 #define SG_CHAIN_BUF_SZ sizeof(chain_buf)
120
121 #define SG_MAX_BUFS             (i2o_num_controllers * I2O_SCSI_CAN_QUEUE)
122 #define SG_CHAIN_POOL_SZ        (SG_MAX_BUFS * SG_CHAIN_BUF_SZ)
123
124 static int max_sg_len = 0;
125 static chain_buf *sg_chain_pool = NULL;
126 static int sg_chain_tag = 0;
127 static int sg_max_frags = SG_MAX_FRAGS;
128
129 /**
130  *      i2o_retry_run           -       retry on timeout
131  *      @f: unused
132  *
133  *      Retry congested frames. This actually needs pushing down into
134  *      i2o core. We should only bother the OSM with this when we can't
135  *      queue and retry the frame. Or perhaps we should call the OSM
136  *      and its default handler should be this in the core, and this
137  *      call a 2nd "I give up" handler in the OSM ?
138  */
139  
140 static void i2o_retry_run(unsigned long f)
141 {
142         int i;
143         unsigned long flags;
144         
145         spin_lock_irqsave(&retry_lock, flags);
146         for(i=0;i<retry_ct;i++)
147                 i2o_post_message(retry_ctrl[i], virt_to_bus(retry[i]));
148         retry_ct=0;
149         spin_unlock_irqrestore(&retry_lock, flags);
150 }
151
152 /**
153  *      flush_pending           -       empty the retry queue
154  *
155  *      Turn each of the pending commands into a NOP and post it back
156  *      to the controller to clear it.
157  */
158  
159 static void flush_pending(void)
160 {
161         int i;
162         unsigned long flags;
163         
164         spin_lock_irqsave(&retry_lock, flags);
165         for(i=0;i<retry_ct;i++)
166         {
167                 retry[i][0]&=~0xFFFFFF;
168                 retry[i][0]|=I2O_CMD_UTIL_NOP<<24;
169                 i2o_post_message(retry_ctrl[i],virt_to_bus(retry[i]));
170         }
171         retry_ct=0;
172         spin_unlock_irqrestore(&retry_lock, flags);
173 }
174
175 /**
176  *      i2o_scsi_reply          -       scsi message reply processor
177  *      @h: our i2o handler
178  *      @c: controller issuing the reply
179  *      @msg: the message from the controller (mapped)
180  *
181  *      Process reply messages (interrupts in normal scsi controller think).
182  *      We can get a variety of messages to process. The normal path is
183  *      scsi command completions. We must also deal with IOP failures,
184  *      the reply to a bus reset and the reply to a LUN query.
185  *
186  *      Locks: the queue lock is taken to call the completion handler
187  */
188
189 static void i2o_scsi_reply(struct i2o_handler *h, struct i2o_controller *c, struct i2o_message *msg)
190 {
191         struct scsi_cmnd *current_command;
192         spinlock_t *lock;
193         u32 *m = (u32 *)msg;
194         u8 as,ds,st;
195         unsigned long flags;
196
197         if(m[0] & (1<<13))
198         {
199                 printk("IOP fail.\n");
200                 printk("From %d To %d Cmd %d.\n",
201                         (m[1]>>12)&0xFFF,
202                         m[1]&0xFFF,
203                         m[1]>>24);
204                 printk("Failure Code %d.\n", m[4]>>24);
205                 if(m[4]&(1<<16))
206                         printk("Format error.\n");
207                 if(m[4]&(1<<17))
208                         printk("Path error.\n");
209                 if(m[4]&(1<<18))
210                         printk("Path State.\n");
211                 if(m[4]&(1<<18))
212                         printk("Congestion.\n");
213                 
214                 m=(u32 *)bus_to_virt(m[7]);
215                 printk("Failing message is %p.\n", m);
216                 
217                 /* This isnt a fast path .. */
218                 spin_lock_irqsave(&retry_lock, flags);
219                 
220                 if((m[4]&(1<<18)) && retry_ct < 32)
221                 {
222                         retry_ctrl[retry_ct]=c;
223                         retry[retry_ct]=m;
224                         if(!retry_ct++)
225                         {
226                                 retry_timer.expires=jiffies+1;
227                                 add_timer(&retry_timer);
228                         }
229                         spin_unlock_irqrestore(&retry_lock, flags);
230                 }
231                 else
232                 {
233                         spin_unlock_irqrestore(&retry_lock, flags);
234                         /* Create a scsi error for this */
235                         current_command = (struct scsi_cmnd *)i2o_context_list_get(m[3], c);
236                         if(!current_command)
237                                 return;
238
239                         lock = current_command->device->host->host_lock;
240                         printk("Aborted %ld\n", current_command->serial_number);
241
242                         spin_lock_irqsave(lock, flags);
243                         current_command->result = DID_ERROR << 16;
244                         current_command->scsi_done(current_command);
245                         spin_unlock_irqrestore(lock, flags);
246                         
247                         /* Now flush the message by making it a NOP */
248                         m[0]&=0x00FFFFFF;
249                         m[0]|=(I2O_CMD_UTIL_NOP)<<24;
250                         i2o_post_message(c,virt_to_bus(m));
251                 }
252                 return;
253         }
254         
255         prefetchw(&queue_depth);
256                 
257         
258         /*
259          *      Low byte is device status, next is adapter status,
260          *      (then one byte reserved), then request status.
261          */
262         ds=(u8)le32_to_cpu(m[4]);
263         as=(u8)le32_to_cpu(m[4]>>8);
264         st=(u8)le32_to_cpu(m[4]>>24);
265         
266         dprintk(KERN_INFO "i2o got a scsi reply %08X: ", m[0]);
267         dprintk(KERN_INFO "m[2]=%08X: ", m[2]);
268         dprintk(KERN_INFO "m[4]=%08X\n", m[4]);
269  
270         if(m[2]&0x80000000)
271         {
272                 if(m[2]&0x40000000)
273                 {
274                         dprintk(KERN_INFO "Event.\n");
275                         lun_done=1;
276                         return;
277                 }
278                 printk(KERN_INFO "i2o_scsi: bus reset completed.\n");
279                 return;
280         }
281
282         current_command = (struct scsi_cmnd *)i2o_context_list_get(m[3], c);
283         
284         /*
285          *      Is this a control request coming back - eg an abort ?
286          */
287          
288         atomic_dec(&queue_depth);
289
290         if(current_command==NULL)
291         {
292                 if(st)
293                         dprintk(KERN_WARNING "SCSI abort: %08X", m[4]);
294                 dprintk(KERN_INFO "SCSI abort completed.\n");
295                 return;
296         }
297         
298         dprintk(KERN_INFO "Completed %ld\n", current_command->serial_number);
299         
300         if(st == 0x06)
301         {
302                 if(le32_to_cpu(m[5]) < current_command->underflow)
303                 {
304                         int i;
305                         printk(KERN_ERR "SCSI: underflow 0x%08X 0x%08X\n",
306                                 le32_to_cpu(m[5]), current_command->underflow);
307                         printk("Cmd: ");
308                         for(i=0;i<15;i++)
309                                 printk("%02X ", current_command->cmnd[i]);
310                         printk(".\n");
311                 }
312                 else st=0;
313         }
314         
315         if(st)
316         {
317                 /* An error has occurred */
318
319                 dprintk(KERN_WARNING "SCSI error %08X", m[4]);
320                         
321                 if (as == 0x0E) 
322                         /* SCSI Reset */
323                         current_command->result = DID_RESET << 16;
324                 else if (as == 0x0F)
325                         current_command->result = DID_PARITY << 16;
326                 else
327                         current_command->result = DID_ERROR << 16;
328         }
329         else
330                 /*
331                  *      It worked maybe ?
332                  */             
333                 current_command->result = DID_OK << 16 | ds;
334
335         if (current_command->use_sg) {
336                 pci_unmap_sg(c->pdev,
337                         (struct scatterlist *)current_command->buffer,
338                         current_command->use_sg,
339                         current_command->sc_data_direction);
340         } else if (current_command->request_bufflen) {
341                 pci_unmap_single(c->pdev,
342                         (dma_addr_t)((long)current_command->SCp.ptr),
343                         current_command->request_bufflen,
344                         current_command->sc_data_direction);
345         }
346
347         lock = current_command->device->host->host_lock;
348         spin_lock_irqsave(lock, flags);
349         current_command->scsi_done(current_command);
350         spin_unlock_irqrestore(lock, flags);
351         return;
352 }
353
354 struct i2o_handler i2o_scsi_handler = {
355         .reply  = i2o_scsi_reply,
356         .name   = "I2O SCSI OSM",
357         .class  = I2O_CLASS_SCSI_PERIPHERAL,
358 };
359
360 /**
361  *      i2o_find_lun            -       report the lun of an i2o device
362  *      @c: i2o controller owning the device
363  *      @d: i2o disk device
364  *      @target: filled in with target id
365  *      @lun: filled in with target lun
366  *
367  *      Query an I2O device to find out its SCSI lun and target numbering. We
368  *      don't currently handle some of the fancy SCSI-3 stuff although our
369  *      querying is sufficient to do so.
370  */
371  
372 static int i2o_find_lun(struct i2o_controller *c, struct i2o_device *d, int *target, int *lun)
373 {
374         u8 reply[8];
375         
376         if(i2o_query_scalar(c, d->lct_data.tid, 0, 3, reply, 4)<0)
377                 return -1;
378                 
379         *target=reply[0];
380         
381         if(i2o_query_scalar(c, d->lct_data.tid, 0, 4, reply, 8)<0)
382                 return -1;
383
384         *lun=reply[1];
385
386         dprintk(KERN_INFO "SCSI (%d,%d)\n", *target, *lun);
387         return 0;
388 }
389
390 /**
391  *      i2o_scsi_init           -       initialize an i2o device for scsi
392  *      @c: i2o controller owning the device
393  *      @d: scsi controller
394  *      @shpnt: scsi device we wish it to become
395  *
396  *      Enumerate the scsi peripheral/fibre channel peripheral class
397  *      devices that are children of the controller. From that we build
398  *      a translation map for the command queue code. Since I2O works on
399  *      its own tid's we effectively have to think backwards to get what
400  *      the midlayer wants
401  */
402  
403 static void i2o_scsi_init(struct i2o_controller *c, struct i2o_device *d, struct Scsi_Host *shpnt)
404 {
405         struct i2o_device *unit;
406         struct i2o_scsi_host *h =(struct i2o_scsi_host *)shpnt->hostdata;
407         int lun;
408         int target;
409         
410         h->controller=c;
411         h->bus_task=d->lct_data.tid;
412         
413         for(target=0;target<16;target++)
414                 for(lun=0;lun<8;lun++)
415                         h->task[target][lun] = -1;
416                         
417         for(unit=c->devices;unit!=NULL;unit=unit->next)
418         {
419                 dprintk(KERN_INFO "Class %03X, parent %d, want %d.\n",
420                         unit->lct_data.class_id, unit->lct_data.parent_tid, d->lct_data.tid);
421                         
422                 /* Only look at scsi and fc devices */
423                 if (    (unit->lct_data.class_id != I2O_CLASS_SCSI_PERIPHERAL)
424                      && (unit->lct_data.class_id != I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL)
425                    )
426                         continue;
427
428                 /* On our bus ? */
429                 dprintk(KERN_INFO "Found a disk (%d).\n", unit->lct_data.tid);
430                 if ((unit->lct_data.parent_tid == d->lct_data.tid)
431                      || (unit->lct_data.parent_tid == d->lct_data.parent_tid)
432                    )
433                 {
434                         u16 limit;
435                         dprintk(KERN_INFO "Its ours.\n");
436                         if(i2o_find_lun(c, unit, &target, &lun)==-1)
437                         {
438                                 printk(KERN_ERR "i2o_scsi: Unable to get lun for tid %d.\n", unit->lct_data.tid);
439                                 continue;
440                         }
441                         dprintk(KERN_INFO "Found disk %d %d.\n", target, lun);
442                         h->task[target][lun]=unit->lct_data.tid;
443                         h->tagclock[target][lun]=jiffies;
444
445                         /* Get the max fragments/request */
446                         i2o_query_scalar(c, d->lct_data.tid, 0xF103, 3, &limit, 2);
447                         
448                         /* sanity */
449                         if ( limit == 0 )
450                         {
451                                 printk(KERN_WARNING "i2o_scsi: Ignoring unreasonable SG limit of 0 from IOP!\n");
452                                 limit = 1;
453                         }
454                         
455                         shpnt->sg_tablesize = limit;
456
457                         dprintk(KERN_INFO "i2o_scsi: set scatter-gather to %d.\n",
458                                 shpnt->sg_tablesize);
459                 }
460         }               
461 }
462
463 /**
464  *      i2o_scsi_detect         -       probe for I2O scsi devices
465  *      @tpnt: scsi layer template
466  *
467  *      I2O is a little odd here. The I2O core already knows what the
468  *      devices are. It also knows them by disk and tape as well as
469  *      by controller. We register each I2O scsi class object as a
470  *      scsi controller and then let the enumeration fake up the rest
471  */
472  
473 static int i2o_scsi_detect(struct scsi_host_template * tpnt)
474 {
475         struct Scsi_Host *shpnt = NULL;
476         int i;
477         int count;
478
479         printk(KERN_INFO "i2o_scsi.c: %s\n", VERSION_STRING);
480
481         if(i2o_install_handler(&i2o_scsi_handler)<0)
482         {
483                 printk(KERN_ERR "i2o_scsi: Unable to install OSM handler.\n");
484                 return 0;
485         }
486         scsi_context = i2o_scsi_handler.context;
487         
488         if((sg_chain_pool = kmalloc(SG_CHAIN_POOL_SZ, GFP_KERNEL)) == NULL)
489         {
490                 printk(KERN_INFO "i2o_scsi: Unable to alloc %d byte SG chain buffer pool.\n", SG_CHAIN_POOL_SZ);
491                 printk(KERN_INFO "i2o_scsi: SG chaining DISABLED!\n");
492                 sg_max_frags = 11;
493         }
494         else
495         {
496                 printk(KERN_INFO "  chain_pool: %d bytes @ %p\n", SG_CHAIN_POOL_SZ, sg_chain_pool);
497                 printk(KERN_INFO "  (%d byte buffers X %d can_queue X %d i2o controllers)\n",
498                                 SG_CHAIN_BUF_SZ, I2O_SCSI_CAN_QUEUE, i2o_num_controllers);
499                 sg_max_frags = SG_MAX_FRAGS;    // 64
500         }
501         
502         init_timer(&retry_timer);
503         retry_timer.data = 0UL;
504         retry_timer.function = i2o_retry_run;
505         
506 //      printk("SCSI OSM at %d.\n", scsi_context);
507
508         for (count = 0, i = 0; i < MAX_I2O_CONTROLLERS; i++)
509         {
510                 struct i2o_controller *c=i2o_find_controller(i);
511                 struct i2o_device *d;
512                 /*
513                  *      This controller doesn't exist.
514                  */
515                 
516                 if(c==NULL)
517                         continue;
518                         
519                 /*
520                  *      Fixme - we need some altered device locking. This
521                  *      is racing with device addition in theory. Easy to fix.
522                  */
523                 
524                 for(d=c->devices;d!=NULL;d=d->next)
525                 {
526                         /*
527                          *      bus_adapter, SCSI (obsolete), or FibreChannel busses only
528                          */
529                         if(    (d->lct_data.class_id!=I2O_CLASS_BUS_ADAPTER_PORT)       // bus_adapter
530 //                          && (d->lct_data.class_id!=I2O_CLASS_FIBRE_CHANNEL_PORT)     // FC_PORT
531                           )
532                                 continue;
533                 
534                         shpnt = scsi_register(tpnt, sizeof(struct i2o_scsi_host));
535                         if(shpnt==NULL)
536                                 continue;
537                         shpnt->unique_id = (u32)d;
538                         shpnt->io_port = 0;
539                         shpnt->n_io_port = 0;
540                         shpnt->irq = 0;
541                         shpnt->this_id = /* Good question */15;
542                         i2o_scsi_init(c, d, shpnt);
543                         count++;
544                 }
545         }
546         i2o_scsi_hosts = count;
547         
548         if(count==0)
549         {
550                 if(sg_chain_pool!=NULL)
551                 {
552                         kfree(sg_chain_pool);
553                         sg_chain_pool = NULL;
554                 }
555                 flush_pending();
556                 del_timer(&retry_timer);
557                 i2o_remove_handler(&i2o_scsi_handler);
558         }
559         
560         return count;
561 }
562
563 static int i2o_scsi_release(struct Scsi_Host *host)
564 {
565         if(--i2o_scsi_hosts==0)
566         {
567                 if(sg_chain_pool!=NULL)
568                 {
569                         kfree(sg_chain_pool);
570                         sg_chain_pool = NULL;
571                 }
572                 flush_pending();
573                 del_timer(&retry_timer);
574                 i2o_remove_handler(&i2o_scsi_handler);
575         }
576
577         scsi_unregister(host);
578
579         return 0;
580 }
581
582
583 static const char *i2o_scsi_info(struct Scsi_Host *SChost)
584 {
585         struct i2o_scsi_host *hostdata;
586         hostdata = (struct i2o_scsi_host *)SChost->hostdata;
587         return(&hostdata->controller->name[0]);
588 }
589
590 /**
591  *      i2o_scsi_queuecommand   -       queue a SCSI command
592  *      @SCpnt: scsi command pointer
593  *      @done: callback for completion
594  *
595  *      Issue a scsi comamnd asynchronously. Return 0 on success or 1 if
596  *      we hit an error (normally message queue congestion). The only 
597  *      minor complication here is that I2O deals with the device addressing
598  *      so we have to map the bus/dev/lun back to an I2O handle as well
599  *      as faking absent devices ourself. 
600  *
601  *      Locks: takes the controller lock on error path only
602  */
603  
604 static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt,
605                                  void (*done) (struct scsi_cmnd *))
606 {
607         int i;
608         int tid;
609         struct i2o_controller *c;
610         struct scsi_cmnd *current_command;
611         struct Scsi_Host *host;
612         struct i2o_scsi_host *hostdata;
613         u32 *msg, *mptr;
614         u32 m;
615         u32 *lenptr;
616         int direction;
617         int scsidir;
618         u32 len;
619         u32 reqlen;
620         u32 tag;
621         unsigned long flags;
622         
623         static int max_qd = 1;
624         
625         /*
626          *      Do the incoming paperwork
627          */
628          
629         host = SCpnt->device->host;
630         hostdata = (struct i2o_scsi_host *)host->hostdata;
631          
632         c = hostdata->controller;
633         prefetch(c);
634         prefetchw(&queue_depth);
635
636         SCpnt->scsi_done = done;
637         
638         if(SCpnt->device->id > 15)
639         {
640                 printk(KERN_ERR "i2o_scsi: Wild target %d.\n", SCpnt->device->id);
641                 return -1;
642         }
643         
644         tid = hostdata->task[SCpnt->device->id][SCpnt->device->lun];
645         
646         dprintk(KERN_INFO "qcmd: Tid = %d\n", tid);
647         
648         current_command = SCpnt;                /* set current command                */
649         current_command->scsi_done = done;      /* set ptr to done function           */
650
651         /* We don't have such a device. Pretend we did the command 
652            and that selection timed out */
653         
654         if(tid == -1)
655         {
656                 SCpnt->result = DID_NO_CONNECT << 16;
657                 done(SCpnt);
658                 return 0;
659         }
660         
661         dprintk(KERN_INFO "Real scsi messages.\n");
662
663         /*
664          *      Obtain an I2O message. If there are none free then 
665          *      throw it back to the scsi layer
666          */     
667          
668         m = le32_to_cpu(I2O_POST_READ32(c));
669         if(m==0xFFFFFFFF)
670                 return 1;
671
672         msg = (u32 *)(c->msg_virt + m);
673         
674         /*
675          *      Put together a scsi execscb message
676          */
677         
678         len = SCpnt->request_bufflen;
679         direction = 0x00000000;                 // SGL IN  (osm<--iop)
680         
681         if (SCpnt->sc_data_direction == DMA_NONE) {
682                 scsidir = 0x00000000;                   // DATA NO XFER
683         } else if (SCpnt->sc_data_direction == DMA_TO_DEVICE) {
684                 direction = 0x04000000; // SGL OUT  (osm-->iop)
685                 scsidir = 0x80000000;   // DATA OUT (iop-->dev)
686         } else if(SCpnt->sc_data_direction == DMA_FROM_DEVICE) {
687                 scsidir = 0x40000000;   // DATA IN  (iop<--dev)
688         } else {
689                 /* Unknown - kill the command */
690                 SCpnt->result = DID_NO_CONNECT << 16;
691                 
692                 /* We must lock the request queue while completing */
693                 spin_lock_irqsave(host->host_lock, flags);
694                 done(SCpnt);
695                 spin_unlock_irqrestore(host->host_lock, flags);
696                 return 0;
697         }
698
699         
700         i2o_raw_writel(I2O_CMD_SCSI_EXEC<<24|HOST_TID<<12|tid, &msg[1]);
701         i2o_raw_writel(scsi_context, &msg[2]);  /* So the I2O layer passes to us */
702         i2o_raw_writel(i2o_context_list_add(SCpnt, c), &msg[3]);        /* We want the SCSI control block back */
703
704         /* LSI_920_PCI_QUIRK
705          *
706          *      Intermittant observations of msg frame word data corruption
707          *      observed on msg[4] after:
708          *        WRITE, READ-MODIFY-WRITE
709          *      operations.  19990606 -sralston
710          *
711          *      (Hence we build this word via tag. Its good practice anyway
712          *       we don't want fetches over PCI needlessly)
713          */
714
715         tag=0;
716         
717         /*
718          *      Attach tags to the devices
719          */     
720         if(SCpnt->device->tagged_supported)
721         {
722                 /*
723                  *      Some drives are too stupid to handle fairness issues
724                  *      with tagged queueing. We throw in the odd ordered
725                  *      tag to stop them starving themselves.
726                  */
727                 if((jiffies - hostdata->tagclock[SCpnt->device->id][SCpnt->device->lun]) > (5*HZ))
728                 {
729                         tag=0x01800000;         /* ORDERED! */
730                         hostdata->tagclock[SCpnt->device->id][SCpnt->device->lun]=jiffies;
731                 }
732                 else
733                 {
734                         /* Hmmm...  I always see value of 0 here,
735                          *  of which {HEAD_OF, ORDERED, SIMPLE} are NOT!  -sralston
736                          */
737                         if(SCpnt->tag == HEAD_OF_QUEUE_TAG)
738                                 tag=0x01000000;
739                         else if(SCpnt->tag == ORDERED_QUEUE_TAG)
740                                 tag=0x01800000;
741                 }
742         }
743
744         /* Direction, disconnect ok, tag, CDBLen */
745         i2o_raw_writel(scsidir|0x20000000|SCpnt->cmd_len|tag, &msg[4]);
746
747         mptr=msg+5;
748
749         /* 
750          *      Write SCSI command into the message - always 16 byte block 
751          */
752          
753         memcpy_toio(mptr, SCpnt->cmnd, 16);
754         mptr+=4;
755         lenptr=mptr++;          /* Remember me - fill in when we know */
756         
757         reqlen = 12;            // SINGLE SGE
758         
759         /*
760          *      Now fill in the SGList and command 
761          *
762          *      FIXME: we need to set the sglist limits according to the 
763          *      message size of the I2O controller. We might only have room
764          *      for 6 or so worst case
765          */
766         
767         if(SCpnt->use_sg)
768         {
769                 struct scatterlist *sg = (struct scatterlist *)SCpnt->request_buffer;
770                 int sg_count;
771                 int chain = 0;
772                 
773                 len = 0;
774
775                 sg_count = pci_map_sg(c->pdev, sg, SCpnt->use_sg,
776                                 SCpnt->sc_data_direction);
777
778                 /* FIXME: handle fail */
779                 if(!sg_count)
780                         BUG();
781                 
782                 if((sg_max_frags > 11) && (SCpnt->use_sg > 11))
783                 {
784                         chain = 1;
785                         /*
786                          *      Need to chain!
787                          */
788                         i2o_raw_writel(direction|0xB0000000|(SCpnt->use_sg*2*4), mptr++);
789                         i2o_raw_writel(virt_to_bus(sg_chain_pool + sg_chain_tag), mptr);
790                         mptr = (u32*)(sg_chain_pool + sg_chain_tag);
791                         if (SCpnt->use_sg > max_sg_len)
792                         {
793                                 max_sg_len = SCpnt->use_sg;
794                                 printk("i2o_scsi: Chain SG! SCpnt=%p, SG_FragCnt=%d, SG_idx=%d\n",
795                                         SCpnt, SCpnt->use_sg, sg_chain_tag);
796                         }
797                         if ( ++sg_chain_tag == SG_MAX_BUFS )
798                                 sg_chain_tag = 0;
799                         for(i = 0 ; i < SCpnt->use_sg; i++)
800                         {
801                                 *mptr++=cpu_to_le32(direction|0x10000000|sg_dma_len(sg));
802                                 len+=sg_dma_len(sg);
803                                 *mptr++=cpu_to_le32(sg_dma_address(sg));
804                                 sg++;
805                         }
806                         mptr[-2]=cpu_to_le32(direction|0xD0000000|sg_dma_len(sg-1));
807                 }
808                 else
809                 {               
810                         for(i = 0 ; i < SCpnt->use_sg; i++)
811                         {
812                                 i2o_raw_writel(direction|0x10000000|sg_dma_len(sg), mptr++);
813                                 len+=sg->length;
814                                 i2o_raw_writel(sg_dma_address(sg), mptr++);
815                                 sg++;
816                         }
817
818                         /* Make this an end of list. Again evade the 920 bug and
819                            unwanted PCI read traffic */
820                 
821                         i2o_raw_writel(direction|0xD0000000|sg_dma_len(sg-1), &mptr[-2]);
822                 }
823                 
824                 if(!chain)
825                         reqlen = mptr - msg;
826                 
827                 i2o_raw_writel(len, lenptr);
828                 
829                 if(len != SCpnt->underflow)
830                         printk("Cmd len %08X Cmd underflow %08X\n",
831                                 len, SCpnt->underflow);
832         }
833         else
834         {
835                 dprintk(KERN_INFO "non sg for %p, %d\n", SCpnt->request_buffer,
836                                 SCpnt->request_bufflen);
837                 i2o_raw_writel(len = SCpnt->request_bufflen, lenptr);
838                 if(len == 0)
839                 {
840                         reqlen = 9;
841                 }
842                 else
843                 {
844                         dma_addr_t dma_addr;
845                         dma_addr = pci_map_single(c->pdev,
846                                                SCpnt->request_buffer,
847                                                SCpnt->request_bufflen,
848                                                SCpnt->sc_data_direction);
849                         if(dma_addr == 0)
850                                 BUG();  /* How to handle ?? */
851                         SCpnt->SCp.ptr = (char *)(unsigned long) dma_addr;
852                         i2o_raw_writel(0xD0000000|direction|SCpnt->request_bufflen, mptr++);
853                         i2o_raw_writel(dma_addr, mptr++);
854                 }
855         }
856         
857         /*
858          *      Stick the headers on 
859          */
860
861         i2o_raw_writel(reqlen<<16 | SGL_OFFSET_10, msg);
862         
863         /* Queue the message */
864         i2o_post_message(c,m);
865         
866         atomic_inc(&queue_depth);
867         
868         if(atomic_read(&queue_depth)> max_qd)
869         {
870                 max_qd=atomic_read(&queue_depth);
871                 printk("Queue depth now %d.\n", max_qd);
872         }
873         
874         mb();
875         dprintk(KERN_INFO "Issued %ld\n", current_command->serial_number);
876         
877         return 0;
878 }
879
880 /**
881  *      i2o_scsi_abort  -       abort a running command
882  *      @SCpnt: command to abort
883  *
884  *      Ask the I2O controller to abort a command. This is an asynchrnous
885  *      process and our callback handler will see the command complete
886  *      with an aborted message if it succeeds. 
887  *
888  *      Locks: no locks are held or needed
889  */
890  
891 static int i2o_scsi_abort(struct scsi_cmnd * SCpnt)
892 {
893         struct i2o_controller *c;
894         struct Scsi_Host *host;
895         struct i2o_scsi_host *hostdata;
896         u32 msg[5];
897         int tid;
898         int status = FAILED;
899         
900         printk(KERN_WARNING "i2o_scsi: Aborting command block.\n");
901         
902         host = SCpnt->device->host;
903         hostdata = (struct i2o_scsi_host *)host->hostdata;
904         tid = hostdata->task[SCpnt->device->id][SCpnt->device->lun];
905         if(tid==-1)
906         {
907                 printk(KERN_ERR "i2o_scsi: Impossible command to abort!\n");
908                 return status;
909         }
910         c = hostdata->controller;
911
912         spin_unlock_irq(host->host_lock);
913                 
914         msg[0] = FIVE_WORD_MSG_SIZE;
915         msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|tid;
916         msg[2] = scsi_context;
917         msg[3] = 0;
918         msg[4] = i2o_context_list_remove(SCpnt, c);
919         if(i2o_post_wait(c, msg, sizeof(msg), 240))
920                 status = SUCCESS;
921
922         spin_lock_irq(host->host_lock);
923         return status;
924 }
925
926 /**
927  *      i2o_scsi_bus_reset              -       Issue a SCSI reset
928  *      @SCpnt: the command that caused the reset
929  *
930  *      Perform a SCSI bus reset operation. In I2O this is just a message
931  *      we pass. I2O can do clever multi-initiator and shared reset stuff
932  *      but we don't support this.
933  *
934  *      Locks: called with no lock held, requires no locks.
935  */
936  
937 static int i2o_scsi_bus_reset(struct scsi_cmnd * SCpnt)
938 {
939         int tid;
940         struct i2o_controller *c;
941         struct Scsi_Host *host;
942         struct i2o_scsi_host *hostdata;
943         u32 m;
944         void *msg;
945         unsigned long timeout;
946
947         
948         /*
949          *      Find the TID for the bus
950          */
951
952         
953         host = SCpnt->device->host;
954
955         spin_unlock_irq(host->host_lock);
956
957         printk(KERN_WARNING "i2o_scsi: Attempting to reset the bus.\n");
958
959         hostdata = (struct i2o_scsi_host *)host->hostdata;
960         tid = hostdata->bus_task;
961         c = hostdata->controller;
962
963         /*
964          *      Now send a SCSI reset request. Any remaining commands
965          *      will be aborted by the IOP. We need to catch the reply
966          *      possibly ?
967          */
968
969         timeout = jiffies+2*HZ;
970         do
971         {
972                 m = le32_to_cpu(I2O_POST_READ32(c));
973                 if(m != 0xFFFFFFFF)
974                         break;
975                 set_current_state(TASK_UNINTERRUPTIBLE);
976                 schedule_timeout(1);
977                 mb();
978         }
979         while(time_before(jiffies, timeout));
980         
981         
982         msg = c->msg_virt + m;
983         i2o_raw_writel(FOUR_WORD_MSG_SIZE|SGL_OFFSET_0, msg);
984         i2o_raw_writel(I2O_CMD_SCSI_BUSRESET<<24|HOST_TID<<12|tid, msg+4);
985         i2o_raw_writel(scsi_context|0x80000000, msg+8);
986         /* We use the top bit to split controller and unit transactions */
987         /* Now store unit,tid so we can tie the completion back to a specific device */
988         __raw_writel(c->unit << 16 | tid, msg+12);
989         wmb();
990
991         /* We want the command to complete after we return */   
992         spin_lock_irq(host->host_lock);
993         i2o_post_message(c,m);
994
995         /* Should we wait for the reset to complete ? */        
996         return SUCCESS;
997 }
998
999 /**
1000  *      i2o_scsi_bios_param     -       Invent disk geometry
1001  *      @sdev: scsi device 
1002  *      @dev: block layer device
1003  *      @capacity: size in sectors
1004  *      @ip: geometry array
1005  *
1006  *      This is anyones guess quite frankly. We use the same rules everyone 
1007  *      else appears to and hope. It seems to work.
1008  */
1009  
1010 static int i2o_scsi_bios_param(struct scsi_device * sdev,
1011                 struct block_device *dev, sector_t capacity, int *ip)
1012 {
1013         int size;
1014
1015         size = capacity;
1016         ip[0] = 64;             /* heads                        */
1017         ip[1] = 32;             /* sectors                      */
1018         if ((ip[2] = size >> 11) > 1024) {      /* cylinders, test for big disk */
1019                 ip[0] = 255;    /* heads                        */
1020                 ip[1] = 63;     /* sectors                      */
1021                 ip[2] = size / (255 * 63);      /* cylinders                    */
1022         }
1023         return 0;
1024 }
1025
1026 MODULE_AUTHOR("Red Hat Software");
1027 MODULE_LICENSE("GPL");
1028
1029
1030 static struct scsi_host_template driver_template = {
1031         .proc_name              = "i2o_scsi",
1032         .name                   = "I2O SCSI Layer",
1033         .detect                 = i2o_scsi_detect,
1034         .release                = i2o_scsi_release,
1035         .info                   = i2o_scsi_info,
1036         .queuecommand           = i2o_scsi_queuecommand,
1037         .eh_abort_handler       = i2o_scsi_abort,
1038         .eh_bus_reset_handler   = i2o_scsi_bus_reset,
1039         .bios_param             = i2o_scsi_bios_param,
1040         .can_queue              = I2O_SCSI_CAN_QUEUE,
1041         .this_id                = 15,
1042         .sg_tablesize           = 8,
1043         .cmd_per_lun            = 6,
1044         .use_clustering         = ENABLE_CLUSTERING,
1045 };
1046
1047 #include "../../scsi/scsi_module.c"