patch-2_6_7-vs1_9_1_12
[linux-2.6.git] / drivers / message / i2o / i2o_scsi.c
1 /* 
2  * This program is free software; you can redistribute it and/or modify it
3  * under the terms of the GNU General Public License as published by the
4  * Free Software Foundation; either version 2, or (at your option) any
5  * later version.
6  *
7  * This program is distributed in the hope that it will be useful, but
8  * WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
10  * General Public License for more details.
11  *
12  * For the avoidance of doubt the "preferred form" of this code is one which
13  * is in an open non patent encumbered format. Where cryptographic key signing
14  * forms part of the process of creating an executable the information
15  * including keys needed to generate an equivalently functional executable
16  * are deemed to be part of the source code.
17  *
18  *  Complications for I2O scsi
19  *
20  *      o       Each (bus,lun) is a logical device in I2O. We keep a map
21  *              table. We spoof failed selection for unmapped units
22  *      o       Request sense buffers can come back for free. 
23  *      o       Scatter gather is a bit dynamic. We have to investigate at
24  *              setup time.
25  *      o       Some of our resources are dynamically shared. The i2o core
26  *              needs a message reservation protocol to avoid swap v net
27  *              deadlocking. We need to back off queue requests.
28  *      
29  *      In general the firmware wants to help. Where its help isn't performance
30  *      useful we just ignore the aid. Its not worth the code in truth.
31  *
32  * Fixes/additions:
33  *      Steve Ralston:
34  *              Scatter gather now works
35  *      Markus Lidel <Markus.Lidel@shadowconnect.com>:
36  *              Minor fixes for 2.6.
37  *
38  * To Do:
39  *      64bit cleanups
40  *      Fix the resource management problems.
41  */
42
43
44 #include <linux/module.h>
45 #include <linux/kernel.h>
46 #include <linux/types.h>
47 #include <linux/string.h>
48 #include <linux/ioport.h>
49 #include <linux/jiffies.h>
50 #include <linux/interrupt.h>
51 #include <linux/timer.h>
52 #include <linux/delay.h>
53 #include <linux/proc_fs.h>
54 #include <linux/prefetch.h>
55 #include <linux/pci.h>
56 #include <asm/dma.h>
57 #include <asm/system.h>
58 #include <asm/io.h>
59 #include <asm/atomic.h>
60 #include <linux/blkdev.h>
61 #include <linux/i2o.h>
62 #include "../../scsi/scsi.h"
63 #include "../../scsi/hosts.h"
64
65
66
67 #define VERSION_STRING        "Version 0.1.2"
68
69 //#define DRIVERDEBUG
70
71 #ifdef DRIVERDEBUG
72 #define dprintk(s, args...) printk(s, ## args)
73 #else
74 #define dprintk(s, args...)
75 #endif
76
77 #define I2O_SCSI_CAN_QUEUE      4
78 #define MAXHOSTS                32
79
80 struct i2o_scsi_host
81 {
82         struct i2o_controller *controller;
83         s16 task[16][8];                /* Allow 16 devices for now */
84         unsigned long tagclock[16][8];  /* Tag clock for queueing */
85         s16 bus_task;           /* The adapter TID */
86 };
87
88 static int scsi_context;
89 static int lun_done;
90 static int i2o_scsi_hosts;
91
92 static u32 *retry[32];
93 static struct i2o_controller *retry_ctrl[32];
94 static struct timer_list retry_timer;
95 static spinlock_t retry_lock = SPIN_LOCK_UNLOCKED;
96 static int retry_ct = 0;
97
98 static atomic_t queue_depth;
99
100 /*
101  *      SG Chain buffer support...
102  */
103
104 #define SG_MAX_FRAGS            64
105
106 /*
107  *      FIXME: we should allocate one of these per bus we find as we
108  *      locate them not in a lump at boot.
109  */
110  
111 typedef struct _chain_buf
112 {
113         u32 sg_flags_cnt[SG_MAX_FRAGS];
114         u32 sg_buf[SG_MAX_FRAGS];
115 } chain_buf;
116
117 #define SG_CHAIN_BUF_SZ sizeof(chain_buf)
118
119 #define SG_MAX_BUFS             (i2o_num_controllers * I2O_SCSI_CAN_QUEUE)
120 #define SG_CHAIN_POOL_SZ        (SG_MAX_BUFS * SG_CHAIN_BUF_SZ)
121
122 static int max_sg_len = 0;
123 static chain_buf *sg_chain_pool = NULL;
124 static int sg_chain_tag = 0;
125 static int sg_max_frags = SG_MAX_FRAGS;
126
127 /**
128  *      i2o_retry_run           -       retry on timeout
129  *      @f: unused
130  *
131  *      Retry congested frames. This actually needs pushing down into
132  *      i2o core. We should only bother the OSM with this when we can't
133  *      queue and retry the frame. Or perhaps we should call the OSM
134  *      and its default handler should be this in the core, and this
135  *      call a 2nd "I give up" handler in the OSM ?
136  */
137  
138 static void i2o_retry_run(unsigned long f)
139 {
140         int i;
141         unsigned long flags;
142         
143         spin_lock_irqsave(&retry_lock, flags);
144         for(i=0;i<retry_ct;i++)
145                 i2o_post_message(retry_ctrl[i], virt_to_bus(retry[i]));
146         retry_ct=0;
147         spin_unlock_irqrestore(&retry_lock, flags);
148 }
149
150 /**
151  *      flush_pending           -       empty the retry queue
152  *
153  *      Turn each of the pending commands into a NOP and post it back
154  *      to the controller to clear it.
155  */
156  
157 static void flush_pending(void)
158 {
159         int i;
160         unsigned long flags;
161         
162         spin_lock_irqsave(&retry_lock, flags);
163         for(i=0;i<retry_ct;i++)
164         {
165                 retry[i][0]&=~0xFFFFFF;
166                 retry[i][0]|=I2O_CMD_UTIL_NOP<<24;
167                 i2o_post_message(retry_ctrl[i],virt_to_bus(retry[i]));
168         }
169         retry_ct=0;
170         spin_unlock_irqrestore(&retry_lock, flags);
171 }
172
173 /**
174  *      i2o_scsi_reply          -       scsi message reply processor
175  *      @h: our i2o handler
176  *      @c: controller issuing the reply
177  *      @msg: the message from the controller (mapped)
178  *
179  *      Process reply messages (interrupts in normal scsi controller think).
180  *      We can get a variety of messages to process. The normal path is
181  *      scsi command completions. We must also deal with IOP failures,
182  *      the reply to a bus reset and the reply to a LUN query.
183  *
184  *      Locks: the queue lock is taken to call the completion handler
185  */
186
187 static void i2o_scsi_reply(struct i2o_handler *h, struct i2o_controller *c, struct i2o_message *msg)
188 {
189         Scsi_Cmnd *current_command;
190         spinlock_t *lock;
191         u32 *m = (u32 *)msg;
192         u8 as,ds,st;
193         unsigned long flags;
194
195         if(m[0] & (1<<13))
196         {
197                 printk("IOP fail.\n");
198                 printk("From %d To %d Cmd %d.\n",
199                         (m[1]>>12)&0xFFF,
200                         m[1]&0xFFF,
201                         m[1]>>24);
202                 printk("Failure Code %d.\n", m[4]>>24);
203                 if(m[4]&(1<<16))
204                         printk("Format error.\n");
205                 if(m[4]&(1<<17))
206                         printk("Path error.\n");
207                 if(m[4]&(1<<18))
208                         printk("Path State.\n");
209                 if(m[4]&(1<<18))
210                         printk("Congestion.\n");
211                 
212                 m=(u32 *)bus_to_virt(m[7]);
213                 printk("Failing message is %p.\n", m);
214                 
215                 /* This isnt a fast path .. */
216                 spin_lock_irqsave(&retry_lock, flags);
217                 
218                 if((m[4]&(1<<18)) && retry_ct < 32)
219                 {
220                         retry_ctrl[retry_ct]=c;
221                         retry[retry_ct]=m;
222                         if(!retry_ct++)
223                         {
224                                 retry_timer.expires=jiffies+1;
225                                 add_timer(&retry_timer);
226                         }
227                         spin_unlock_irqrestore(&retry_lock, flags);
228                 }
229                 else
230                 {
231                         spin_unlock_irqrestore(&retry_lock, flags);
232                         /* Create a scsi error for this */
233                         current_command = (Scsi_Cmnd *)i2o_context_list_get(m[3], c);
234                         if(!current_command)
235                                 return;
236
237                         lock = current_command->device->host->host_lock;
238                         printk("Aborted %ld\n", current_command->serial_number);
239
240                         spin_lock_irqsave(lock, flags);
241                         current_command->result = DID_ERROR << 16;
242                         current_command->scsi_done(current_command);
243                         spin_unlock_irqrestore(lock, flags);
244                         
245                         /* Now flush the message by making it a NOP */
246                         m[0]&=0x00FFFFFF;
247                         m[0]|=(I2O_CMD_UTIL_NOP)<<24;
248                         i2o_post_message(c,virt_to_bus(m));
249                 }
250                 return;
251         }
252         
253         prefetchw(&queue_depth);
254                 
255         
256         /*
257          *      Low byte is device status, next is adapter status,
258          *      (then one byte reserved), then request status.
259          */
260         ds=(u8)le32_to_cpu(m[4]);
261         as=(u8)le32_to_cpu(m[4]>>8);
262         st=(u8)le32_to_cpu(m[4]>>24);
263         
264         dprintk(KERN_INFO "i2o got a scsi reply %08X: ", m[0]);
265         dprintk(KERN_INFO "m[2]=%08X: ", m[2]);
266         dprintk(KERN_INFO "m[4]=%08X\n", m[4]);
267  
268         if(m[2]&0x80000000)
269         {
270                 if(m[2]&0x40000000)
271                 {
272                         dprintk(KERN_INFO "Event.\n");
273                         lun_done=1;
274                         return;
275                 }
276                 printk(KERN_INFO "i2o_scsi: bus reset completed.\n");
277                 return;
278         }
279
280         current_command = (Scsi_Cmnd *)i2o_context_list_get(m[3], c);
281         
282         /*
283          *      Is this a control request coming back - eg an abort ?
284          */
285          
286         atomic_dec(&queue_depth);
287
288         if(current_command==NULL)
289         {
290                 if(st)
291                         dprintk(KERN_WARNING "SCSI abort: %08X", m[4]);
292                 dprintk(KERN_INFO "SCSI abort completed.\n");
293                 return;
294         }
295         
296         dprintk(KERN_INFO "Completed %ld\n", current_command->serial_number);
297         
298         if(st == 0x06)
299         {
300                 if(le32_to_cpu(m[5]) < current_command->underflow)
301                 {
302                         int i;
303                         printk(KERN_ERR "SCSI: underflow 0x%08X 0x%08X\n",
304                                 le32_to_cpu(m[5]), current_command->underflow);
305                         printk("Cmd: ");
306                         for(i=0;i<15;i++)
307                                 printk("%02X ", current_command->cmnd[i]);
308                         printk(".\n");
309                 }
310                 else st=0;
311         }
312         
313         if(st)
314         {
315                 /* An error has occurred */
316
317                 dprintk(KERN_WARNING "SCSI error %08X", m[4]);
318                         
319                 if (as == 0x0E) 
320                         /* SCSI Reset */
321                         current_command->result = DID_RESET << 16;
322                 else if (as == 0x0F)
323                         current_command->result = DID_PARITY << 16;
324                 else
325                         current_command->result = DID_ERROR << 16;
326         }
327         else
328                 /*
329                  *      It worked maybe ?
330                  */             
331                 current_command->result = DID_OK << 16 | ds;
332
333         if (current_command->use_sg)
334                 pci_unmap_sg(c->pdev, (struct scatterlist *)current_command->buffer, current_command->use_sg, scsi_to_pci_dma_dir(current_command->sc_data_direction));
335         else if (current_command->request_bufflen)
336                 pci_unmap_single(c->pdev, (dma_addr_t)((long)current_command->SCp.ptr), current_command->request_bufflen, scsi_to_pci_dma_dir(current_command->sc_data_direction));
337
338         lock = current_command->device->host->host_lock;
339         spin_lock_irqsave(lock, flags);
340         current_command->scsi_done(current_command);
341         spin_unlock_irqrestore(lock, flags);
342         return;
343 }
344
345 struct i2o_handler i2o_scsi_handler = {
346         .reply  = i2o_scsi_reply,
347         .name   = "I2O SCSI OSM",
348         .class  = I2O_CLASS_SCSI_PERIPHERAL,
349 };
350
351 /**
352  *      i2o_find_lun            -       report the lun of an i2o device
353  *      @c: i2o controller owning the device
354  *      @d: i2o disk device
355  *      @target: filled in with target id
356  *      @lun: filled in with target lun
357  *
358  *      Query an I2O device to find out its SCSI lun and target numbering. We
359  *      don't currently handle some of the fancy SCSI-3 stuff although our
360  *      querying is sufficient to do so.
361  */
362  
363 static int i2o_find_lun(struct i2o_controller *c, struct i2o_device *d, int *target, int *lun)
364 {
365         u8 reply[8];
366         
367         if(i2o_query_scalar(c, d->lct_data.tid, 0, 3, reply, 4)<0)
368                 return -1;
369                 
370         *target=reply[0];
371         
372         if(i2o_query_scalar(c, d->lct_data.tid, 0, 4, reply, 8)<0)
373                 return -1;
374
375         *lun=reply[1];
376
377         dprintk(KERN_INFO "SCSI (%d,%d)\n", *target, *lun);
378         return 0;
379 }
380
381 /**
382  *      i2o_scsi_init           -       initialize an i2o device for scsi
383  *      @c: i2o controller owning the device
384  *      @d: scsi controller
385  *      @shpnt: scsi device we wish it to become
386  *
387  *      Enumerate the scsi peripheral/fibre channel peripheral class
388  *      devices that are children of the controller. From that we build
389  *      a translation map for the command queue code. Since I2O works on
390  *      its own tid's we effectively have to think backwards to get what
391  *      the midlayer wants
392  */
393  
394 static void i2o_scsi_init(struct i2o_controller *c, struct i2o_device *d, struct Scsi_Host *shpnt)
395 {
396         struct i2o_device *unit;
397         struct i2o_scsi_host *h =(struct i2o_scsi_host *)shpnt->hostdata;
398         int lun;
399         int target;
400         
401         h->controller=c;
402         h->bus_task=d->lct_data.tid;
403         
404         for(target=0;target<16;target++)
405                 for(lun=0;lun<8;lun++)
406                         h->task[target][lun] = -1;
407                         
408         for(unit=c->devices;unit!=NULL;unit=unit->next)
409         {
410                 dprintk(KERN_INFO "Class %03X, parent %d, want %d.\n",
411                         unit->lct_data.class_id, unit->lct_data.parent_tid, d->lct_data.tid);
412                         
413                 /* Only look at scsi and fc devices */
414                 if (    (unit->lct_data.class_id != I2O_CLASS_SCSI_PERIPHERAL)
415                      && (unit->lct_data.class_id != I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL)
416                    )
417                         continue;
418
419                 /* On our bus ? */
420                 dprintk(KERN_INFO "Found a disk (%d).\n", unit->lct_data.tid);
421                 if ((unit->lct_data.parent_tid == d->lct_data.tid)
422                      || (unit->lct_data.parent_tid == d->lct_data.parent_tid)
423                    )
424                 {
425                         u16 limit;
426                         dprintk(KERN_INFO "Its ours.\n");
427                         if(i2o_find_lun(c, unit, &target, &lun)==-1)
428                         {
429                                 printk(KERN_ERR "i2o_scsi: Unable to get lun for tid %d.\n", unit->lct_data.tid);
430                                 continue;
431                         }
432                         dprintk(KERN_INFO "Found disk %d %d.\n", target, lun);
433                         h->task[target][lun]=unit->lct_data.tid;
434                         h->tagclock[target][lun]=jiffies;
435
436                         /* Get the max fragments/request */
437                         i2o_query_scalar(c, d->lct_data.tid, 0xF103, 3, &limit, 2);
438                         
439                         /* sanity */
440                         if ( limit == 0 )
441                         {
442                                 printk(KERN_WARNING "i2o_scsi: Ignoring unreasonable SG limit of 0 from IOP!\n");
443                                 limit = 1;
444                         }
445                         
446                         shpnt->sg_tablesize = limit;
447
448                         dprintk(KERN_INFO "i2o_scsi: set scatter-gather to %d.\n",
449                                 shpnt->sg_tablesize);
450                 }
451         }               
452 }
453
454 /**
455  *      i2o_scsi_detect         -       probe for I2O scsi devices
456  *      @tpnt: scsi layer template
457  *
458  *      I2O is a little odd here. The I2O core already knows what the
459  *      devices are. It also knows them by disk and tape as well as
460  *      by controller. We register each I2O scsi class object as a
461  *      scsi controller and then let the enumeration fake up the rest
462  */
463  
464 static int i2o_scsi_detect(Scsi_Host_Template * tpnt)
465 {
466         struct Scsi_Host *shpnt = NULL;
467         int i;
468         int count;
469
470         printk(KERN_INFO "i2o_scsi.c: %s\n", VERSION_STRING);
471
472         if(i2o_install_handler(&i2o_scsi_handler)<0)
473         {
474                 printk(KERN_ERR "i2o_scsi: Unable to install OSM handler.\n");
475                 return 0;
476         }
477         scsi_context = i2o_scsi_handler.context;
478         
479         if((sg_chain_pool = kmalloc(SG_CHAIN_POOL_SZ, GFP_KERNEL)) == NULL)
480         {
481                 printk(KERN_INFO "i2o_scsi: Unable to alloc %d byte SG chain buffer pool.\n", SG_CHAIN_POOL_SZ);
482                 printk(KERN_INFO "i2o_scsi: SG chaining DISABLED!\n");
483                 sg_max_frags = 11;
484         }
485         else
486         {
487                 printk(KERN_INFO "  chain_pool: %d bytes @ %p\n", SG_CHAIN_POOL_SZ, sg_chain_pool);
488                 printk(KERN_INFO "  (%d byte buffers X %d can_queue X %d i2o controllers)\n",
489                                 SG_CHAIN_BUF_SZ, I2O_SCSI_CAN_QUEUE, i2o_num_controllers);
490                 sg_max_frags = SG_MAX_FRAGS;    // 64
491         }
492         
493         init_timer(&retry_timer);
494         retry_timer.data = 0UL;
495         retry_timer.function = i2o_retry_run;
496         
497 //      printk("SCSI OSM at %d.\n", scsi_context);
498
499         for (count = 0, i = 0; i < MAX_I2O_CONTROLLERS; i++)
500         {
501                 struct i2o_controller *c=i2o_find_controller(i);
502                 struct i2o_device *d;
503                 /*
504                  *      This controller doesn't exist.
505                  */
506                 
507                 if(c==NULL)
508                         continue;
509                         
510                 /*
511                  *      Fixme - we need some altered device locking. This
512                  *      is racing with device addition in theory. Easy to fix.
513                  */
514                 
515                 for(d=c->devices;d!=NULL;d=d->next)
516                 {
517                         /*
518                          *      bus_adapter, SCSI (obsolete), or FibreChannel busses only
519                          */
520                         if(    (d->lct_data.class_id!=I2O_CLASS_BUS_ADAPTER_PORT)       // bus_adapter
521 //                          && (d->lct_data.class_id!=I2O_CLASS_FIBRE_CHANNEL_PORT)     // FC_PORT
522                           )
523                                 continue;
524                 
525                         shpnt = scsi_register(tpnt, sizeof(struct i2o_scsi_host));
526                         if(shpnt==NULL)
527                                 continue;
528                         shpnt->unique_id = (u32)d;
529                         shpnt->io_port = 0;
530                         shpnt->n_io_port = 0;
531                         shpnt->irq = 0;
532                         shpnt->this_id = /* Good question */15;
533                         i2o_scsi_init(c, d, shpnt);
534                         count++;
535                 }
536         }
537         i2o_scsi_hosts = count;
538         
539         if(count==0)
540         {
541                 if(sg_chain_pool!=NULL)
542                 {
543                         kfree(sg_chain_pool);
544                         sg_chain_pool = NULL;
545                 }
546                 flush_pending();
547                 del_timer(&retry_timer);
548                 i2o_remove_handler(&i2o_scsi_handler);
549         }
550         
551         return count;
552 }
553
554 static int i2o_scsi_release(struct Scsi_Host *host)
555 {
556         if(--i2o_scsi_hosts==0)
557         {
558                 if(sg_chain_pool!=NULL)
559                 {
560                         kfree(sg_chain_pool);
561                         sg_chain_pool = NULL;
562                 }
563                 flush_pending();
564                 del_timer(&retry_timer);
565                 i2o_remove_handler(&i2o_scsi_handler);
566         }
567
568         scsi_unregister(host);
569
570         return 0;
571 }
572
573
574 static const char *i2o_scsi_info(struct Scsi_Host *SChost)
575 {
576         struct i2o_scsi_host *hostdata;
577         hostdata = (struct i2o_scsi_host *)SChost->hostdata;
578         return(&hostdata->controller->name[0]);
579 }
580
581 /**
582  *      i2o_scsi_queuecommand   -       queue a SCSI command
583  *      @SCpnt: scsi command pointer
584  *      @done: callback for completion
585  *
586  *      Issue a scsi comamnd asynchronously. Return 0 on success or 1 if
587  *      we hit an error (normally message queue congestion). The only 
588  *      minor complication here is that I2O deals with the device addressing
589  *      so we have to map the bus/dev/lun back to an I2O handle as well
590  *      as faking absent devices ourself. 
591  *
592  *      Locks: takes the controller lock on error path only
593  */
594  
595 static int i2o_scsi_queuecommand(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
596 {
597         int i;
598         int tid;
599         struct i2o_controller *c;
600         Scsi_Cmnd *current_command;
601         struct Scsi_Host *host;
602         struct i2o_scsi_host *hostdata;
603         u32 *msg, *mptr;
604         u32 m;
605         u32 *lenptr;
606         int direction;
607         int scsidir;
608         u32 len;
609         u32 reqlen;
610         u32 tag;
611         unsigned long flags;
612         
613         static int max_qd = 1;
614         
615         /*
616          *      Do the incoming paperwork
617          */
618          
619         host = SCpnt->device->host;
620         hostdata = (struct i2o_scsi_host *)host->hostdata;
621          
622         c = hostdata->controller;
623         prefetch(c);
624         prefetchw(&queue_depth);
625
626         SCpnt->scsi_done = done;
627         
628         if(SCpnt->device->id > 15)
629         {
630                 printk(KERN_ERR "i2o_scsi: Wild target %d.\n", SCpnt->device->id);
631                 return -1;
632         }
633         
634         tid = hostdata->task[SCpnt->device->id][SCpnt->device->lun];
635         
636         dprintk(KERN_INFO "qcmd: Tid = %d\n", tid);
637         
638         current_command = SCpnt;                /* set current command                */
639         current_command->scsi_done = done;      /* set ptr to done function           */
640
641         /* We don't have such a device. Pretend we did the command 
642            and that selection timed out */
643         
644         if(tid == -1)
645         {
646                 SCpnt->result = DID_NO_CONNECT << 16;
647                 done(SCpnt);
648                 return 0;
649         }
650         
651         dprintk(KERN_INFO "Real scsi messages.\n");
652
653         /*
654          *      Obtain an I2O message. If there are none free then 
655          *      throw it back to the scsi layer
656          */     
657          
658         m = le32_to_cpu(I2O_POST_READ32(c));
659         if(m==0xFFFFFFFF)
660                 return 1;
661
662         msg = (u32 *)(c->mem_offset + m);
663         
664         /*
665          *      Put together a scsi execscb message
666          */
667         
668         len = SCpnt->request_bufflen;
669         direction = 0x00000000;                 // SGL IN  (osm<--iop)
670         
671         if(SCpnt->sc_data_direction == SCSI_DATA_NONE)
672                 scsidir = 0x00000000;                   // DATA NO XFER
673         else if(SCpnt->sc_data_direction == SCSI_DATA_WRITE)
674         {
675                 direction=0x04000000;   // SGL OUT  (osm-->iop)
676                 scsidir  =0x80000000;   // DATA OUT (iop-->dev)
677         }
678         else if(SCpnt->sc_data_direction == SCSI_DATA_READ)
679         {
680                 scsidir  =0x40000000;   // DATA IN  (iop<--dev)
681         }
682         else
683         {
684                 /* Unknown - kill the command */
685                 SCpnt->result = DID_NO_CONNECT << 16;
686                 
687                 /* We must lock the request queue while completing */
688                 spin_lock_irqsave(host->host_lock, flags);
689                 done(SCpnt);
690                 spin_unlock_irqrestore(host->host_lock, flags);
691                 return 0;
692         }
693
694         
695         i2o_raw_writel(I2O_CMD_SCSI_EXEC<<24|HOST_TID<<12|tid, &msg[1]);
696         i2o_raw_writel(scsi_context, &msg[2]);  /* So the I2O layer passes to us */
697         i2o_raw_writel(i2o_context_list_add(SCpnt, c), &msg[3]);        /* We want the SCSI control block back */
698
699         /* LSI_920_PCI_QUIRK
700          *
701          *      Intermittant observations of msg frame word data corruption
702          *      observed on msg[4] after:
703          *        WRITE, READ-MODIFY-WRITE
704          *      operations.  19990606 -sralston
705          *
706          *      (Hence we build this word via tag. Its good practice anyway
707          *       we don't want fetches over PCI needlessly)
708          */
709
710         tag=0;
711         
712         /*
713          *      Attach tags to the devices
714          */     
715         if(SCpnt->device->tagged_supported)
716         {
717                 /*
718                  *      Some drives are too stupid to handle fairness issues
719                  *      with tagged queueing. We throw in the odd ordered
720                  *      tag to stop them starving themselves.
721                  */
722                 if((jiffies - hostdata->tagclock[SCpnt->device->id][SCpnt->device->lun]) > (5*HZ))
723                 {
724                         tag=0x01800000;         /* ORDERED! */
725                         hostdata->tagclock[SCpnt->device->id][SCpnt->device->lun]=jiffies;
726                 }
727                 else
728                 {
729                         /* Hmmm...  I always see value of 0 here,
730                          *  of which {HEAD_OF, ORDERED, SIMPLE} are NOT!  -sralston
731                          */
732                         if(SCpnt->tag == HEAD_OF_QUEUE_TAG)
733                                 tag=0x01000000;
734                         else if(SCpnt->tag == ORDERED_QUEUE_TAG)
735                                 tag=0x01800000;
736                 }
737         }
738
739         /* Direction, disconnect ok, tag, CDBLen */
740         i2o_raw_writel(scsidir|0x20000000|SCpnt->cmd_len|tag, &msg[4]);
741
742         mptr=msg+5;
743
744         /* 
745          *      Write SCSI command into the message - always 16 byte block 
746          */
747          
748         memcpy_toio(mptr, SCpnt->cmnd, 16);
749         mptr+=4;
750         lenptr=mptr++;          /* Remember me - fill in when we know */
751         
752         reqlen = 12;            // SINGLE SGE
753         
754         /*
755          *      Now fill in the SGList and command 
756          *
757          *      FIXME: we need to set the sglist limits according to the 
758          *      message size of the I2O controller. We might only have room
759          *      for 6 or so worst case
760          */
761         
762         if(SCpnt->use_sg)
763         {
764                 struct scatterlist *sg = (struct scatterlist *)SCpnt->request_buffer;
765                 int sg_count;
766                 int chain = 0;
767                 
768                 len = 0;
769
770                 sg_count = pci_map_sg(c->pdev, sg, SCpnt->use_sg,
771                                       scsi_to_pci_dma_dir(SCpnt->sc_data_direction));
772
773                 /* FIXME: handle fail */
774                 if(!sg_count)
775                         BUG();
776                 
777                 if((sg_max_frags > 11) && (SCpnt->use_sg > 11))
778                 {
779                         chain = 1;
780                         /*
781                          *      Need to chain!
782                          */
783                         i2o_raw_writel(direction|0xB0000000|(SCpnt->use_sg*2*4), mptr++);
784                         i2o_raw_writel(virt_to_bus(sg_chain_pool + sg_chain_tag), mptr);
785                         mptr = (u32*)(sg_chain_pool + sg_chain_tag);
786                         if (SCpnt->use_sg > max_sg_len)
787                         {
788                                 max_sg_len = SCpnt->use_sg;
789                                 printk("i2o_scsi: Chain SG! SCpnt=%p, SG_FragCnt=%d, SG_idx=%d\n",
790                                         SCpnt, SCpnt->use_sg, sg_chain_tag);
791                         }
792                         if ( ++sg_chain_tag == SG_MAX_BUFS )
793                                 sg_chain_tag = 0;
794                         for(i = 0 ; i < SCpnt->use_sg; i++)
795                         {
796                                 *mptr++=cpu_to_le32(direction|0x10000000|sg_dma_len(sg));
797                                 len+=sg_dma_len(sg);
798                                 *mptr++=cpu_to_le32(sg_dma_address(sg));
799                                 sg++;
800                         }
801                         mptr[-2]=cpu_to_le32(direction|0xD0000000|sg_dma_len(sg-1));
802                 }
803                 else
804                 {               
805                         for(i = 0 ; i < SCpnt->use_sg; i++)
806                         {
807                                 i2o_raw_writel(direction|0x10000000|sg_dma_len(sg), mptr++);
808                                 len+=sg->length;
809                                 i2o_raw_writel(sg_dma_address(sg), mptr++);
810                                 sg++;
811                         }
812
813                         /* Make this an end of list. Again evade the 920 bug and
814                            unwanted PCI read traffic */
815                 
816                         i2o_raw_writel(direction|0xD0000000|sg_dma_len(sg-1), &mptr[-2]);
817                 }
818                 
819                 if(!chain)
820                         reqlen = mptr - msg;
821                 
822                 i2o_raw_writel(len, lenptr);
823                 
824                 if(len != SCpnt->underflow)
825                         printk("Cmd len %08X Cmd underflow %08X\n",
826                                 len, SCpnt->underflow);
827         }
828         else
829         {
830                 dprintk(KERN_INFO "non sg for %p, %d\n", SCpnt->request_buffer,
831                                 SCpnt->request_bufflen);
832                 i2o_raw_writel(len = SCpnt->request_bufflen, lenptr);
833                 if(len == 0)
834                 {
835                         reqlen = 9;
836                 }
837                 else
838                 {
839                         dma_addr_t dma_addr;
840                         dma_addr = pci_map_single(c->pdev,
841                                                SCpnt->request_buffer,
842                                                SCpnt->request_bufflen,
843                                                scsi_to_pci_dma_dir(SCpnt->sc_data_direction));
844                         if(dma_addr == 0)
845                                 BUG();  /* How to handle ?? */
846                         SCpnt->SCp.ptr = (char *)(unsigned long) dma_addr;
847                         i2o_raw_writel(0xD0000000|direction|SCpnt->request_bufflen, mptr++);
848                         i2o_raw_writel(dma_addr, mptr++);
849                 }
850         }
851         
852         /*
853          *      Stick the headers on 
854          */
855
856         i2o_raw_writel(reqlen<<16 | SGL_OFFSET_10, msg);
857         
858         /* Queue the message */
859         i2o_post_message(c,m);
860         
861         atomic_inc(&queue_depth);
862         
863         if(atomic_read(&queue_depth)> max_qd)
864         {
865                 max_qd=atomic_read(&queue_depth);
866                 printk("Queue depth now %d.\n", max_qd);
867         }
868         
869         mb();
870         dprintk(KERN_INFO "Issued %ld\n", current_command->serial_number);
871         
872         return 0;
873 }
874
875 /**
876  *      i2o_scsi_abort  -       abort a running command
877  *      @SCpnt: command to abort
878  *
879  *      Ask the I2O controller to abort a command. This is an asynchrnous
880  *      process and our callback handler will see the command complete
881  *      with an aborted message if it succeeds. 
882  *
883  *      Locks: no locks are held or needed
884  */
885  
886 int i2o_scsi_abort(Scsi_Cmnd * SCpnt)
887 {
888         struct i2o_controller *c;
889         struct Scsi_Host *host;
890         struct i2o_scsi_host *hostdata;
891         u32 msg[5];
892         int tid;
893         int status = FAILED;
894         
895         printk(KERN_WARNING "i2o_scsi: Aborting command block.\n");
896         
897         host = SCpnt->device->host;
898         hostdata = (struct i2o_scsi_host *)host->hostdata;
899         tid = hostdata->task[SCpnt->device->id][SCpnt->device->lun];
900         if(tid==-1)
901         {
902                 printk(KERN_ERR "i2o_scsi: Impossible command to abort!\n");
903                 return status;
904         }
905         c = hostdata->controller;
906
907         spin_unlock_irq(host->host_lock);
908                 
909         msg[0] = FIVE_WORD_MSG_SIZE;
910         msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|tid;
911         msg[2] = scsi_context;
912         msg[3] = 0;
913         msg[4] = i2o_context_list_remove(SCpnt, c);
914         if(i2o_post_wait(c, msg, sizeof(msg), 240))
915                 status = SUCCESS;
916
917         spin_lock_irq(host->host_lock);
918         return status;
919 }
920
921 /**
922  *      i2o_scsi_bus_reset              -       Issue a SCSI reset
923  *      @SCpnt: the command that caused the reset
924  *
925  *      Perform a SCSI bus reset operation. In I2O this is just a message
926  *      we pass. I2O can do clever multi-initiator and shared reset stuff
927  *      but we don't support this.
928  *
929  *      Locks: called with no lock held, requires no locks.
930  */
931  
932 static int i2o_scsi_bus_reset(Scsi_Cmnd * SCpnt)
933 {
934         int tid;
935         struct i2o_controller *c;
936         struct Scsi_Host *host;
937         struct i2o_scsi_host *hostdata;
938         u32 m;
939         unsigned long msg;
940         unsigned long timeout;
941
942         
943         /*
944          *      Find the TID for the bus
945          */
946
947         
948         host = SCpnt->device->host;
949
950         spin_unlock_irq(host->host_lock);
951
952         printk(KERN_WARNING "i2o_scsi: Attempting to reset the bus.\n");
953
954         hostdata = (struct i2o_scsi_host *)host->hostdata;
955         tid = hostdata->bus_task;
956         c = hostdata->controller;
957
958         /*
959          *      Now send a SCSI reset request. Any remaining commands
960          *      will be aborted by the IOP. We need to catch the reply
961          *      possibly ?
962          */
963
964         timeout = jiffies+2*HZ;
965         do
966         {
967                 m = le32_to_cpu(I2O_POST_READ32(c));
968                 if(m != 0xFFFFFFFF)
969                         break;
970                 set_current_state(TASK_UNINTERRUPTIBLE);
971                 schedule_timeout(1);
972                 mb();
973         }
974         while(time_before(jiffies, timeout));
975         
976         
977         msg = c->mem_offset + m;
978         i2o_raw_writel(FOUR_WORD_MSG_SIZE|SGL_OFFSET_0, msg);
979         i2o_raw_writel(I2O_CMD_SCSI_BUSRESET<<24|HOST_TID<<12|tid, msg+4);
980         i2o_raw_writel(scsi_context|0x80000000, msg+8);
981         /* We use the top bit to split controller and unit transactions */
982         /* Now store unit,tid so we can tie the completion back to a specific device */
983         __raw_writel(c->unit << 16 | tid, msg+12);
984         wmb();
985
986         /* We want the command to complete after we return */   
987         spin_lock_irq(host->host_lock);
988         i2o_post_message(c,m);
989
990         /* Should we wait for the reset to complete ? */        
991         return SUCCESS;
992 }
993
994 /**
995  *      i2o_scsi_host_reset     -       host reset callback
996  *      @SCpnt: command causing the reset
997  *
998  *      An I2O controller can be many things at once. While we can
999  *      reset a controller the potential mess from doing so is vast, and
1000  *      it's better to simply hold on and pray
1001  */
1002  
1003 static int i2o_scsi_host_reset(Scsi_Cmnd * SCpnt)
1004 {
1005         return FAILED;
1006 }
1007
1008 /**
1009  *      i2o_scsi_device_reset   -       device reset callback
1010  *      @SCpnt: command causing the reset
1011  *
1012  *      I2O does not (AFAIK) support doing a device reset
1013  */
1014  
1015 static int i2o_scsi_device_reset(Scsi_Cmnd * SCpnt)
1016 {
1017         return FAILED;
1018 }
1019
1020 /**
1021  *      i2o_scsi_bios_param     -       Invent disk geometry
1022  *      @sdev: scsi device 
1023  *      @dev: block layer device
1024  *      @capacity: size in sectors
1025  *      @ip: geometry array
1026  *
1027  *      This is anyones guess quite frankly. We use the same rules everyone 
1028  *      else appears to and hope. It seems to work.
1029  */
1030  
1031 static int i2o_scsi_bios_param(struct scsi_device * sdev,
1032                 struct block_device *dev, sector_t capacity, int *ip)
1033 {
1034         int size;
1035
1036         size = capacity;
1037         ip[0] = 64;             /* heads                        */
1038         ip[1] = 32;             /* sectors                      */
1039         if ((ip[2] = size >> 11) > 1024) {      /* cylinders, test for big disk */
1040                 ip[0] = 255;    /* heads                        */
1041                 ip[1] = 63;     /* sectors                      */
1042                 ip[2] = size / (255 * 63);      /* cylinders                    */
1043         }
1044         return 0;
1045 }
1046
1047 MODULE_AUTHOR("Red Hat Software");
1048 MODULE_LICENSE("GPL");
1049
1050
1051 static Scsi_Host_Template driver_template = {
1052         .proc_name              = "i2o_scsi",
1053         .name                   = "I2O SCSI Layer",
1054         .detect                 = i2o_scsi_detect,
1055         .release                = i2o_scsi_release,
1056         .info                   = i2o_scsi_info,
1057         .queuecommand           = i2o_scsi_queuecommand,
1058         .eh_abort_handler       = i2o_scsi_abort,
1059         .eh_bus_reset_handler   = i2o_scsi_bus_reset,
1060         .eh_device_reset_handler= i2o_scsi_device_reset,
1061         .eh_host_reset_handler  = i2o_scsi_host_reset,
1062         .bios_param             = i2o_scsi_bios_param,
1063         .can_queue              = I2O_SCSI_CAN_QUEUE,
1064         .this_id                = 15,
1065         .sg_tablesize           = 8,
1066         .cmd_per_lun            = 6,
1067         .use_clustering         = ENABLE_CLUSTERING,
1068 };
1069
1070 #include "../../scsi/scsi_module.c"