vserver 1.9.5.x5
[linux-2.6.git] / drivers / scsi / aacraid / aachba.c
1 /*
2  *      Adaptec AAC series RAID controller driver
3  *      (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
4  *
5  * based on the old aacraid driver that is..
6  * Adaptec aacraid device driver for Linux.
7  *
8  * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2, or (at your option)
13  * any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; see the file COPYING.  If not, write to
22  * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23  *
24  */
25
26 #include <linux/kernel.h>
27 #include <linux/init.h>
28 #include <linux/types.h>
29 #include <linux/sched.h>
30 #include <linux/pci.h>
31 #include <linux/spinlock.h>
32 #include <linux/slab.h>
33 #include <linux/completion.h>
34 #include <linux/blkdev.h>
35 #include <asm/semaphore.h>
36 #include <asm/uaccess.h>
37
38 #include <scsi/scsi.h>
39 #include <scsi/scsi_cmnd.h>
40 #include <scsi/scsi_device.h>
41 #include <scsi/scsi_host.h>
42
43 #include "aacraid.h"
44
45 /* values for inqd_pdt: Peripheral device type in plain English */
46 #define INQD_PDT_DA     0x00    /* Direct-access (DISK) device */
47 #define INQD_PDT_PROC   0x03    /* Processor device */
48 #define INQD_PDT_CHNGR  0x08    /* Changer (jukebox, scsi2) */
49 #define INQD_PDT_COMM   0x09    /* Communication device (scsi2) */
50 #define INQD_PDT_NOLUN2 0x1f    /* Unknown Device (scsi2) */
51 #define INQD_PDT_NOLUN  0x7f    /* Logical Unit Not Present */
52
53 #define INQD_PDT_DMASK  0x1F    /* Peripheral Device Type Mask */
54 #define INQD_PDT_QMASK  0xE0    /* Peripheral Device Qualifer Mask */
55
56 #define MAX_FIB_DATA (sizeof(struct hw_fib) - sizeof(FIB_HEADER))
57
58 #define MAX_DRIVER_SG_SEGMENT_COUNT 17
59
60 /*
61  *      Sense codes
62  */
63  
64 #define SENCODE_NO_SENSE                        0x00
65 #define SENCODE_END_OF_DATA                     0x00
66 #define SENCODE_BECOMING_READY                  0x04
67 #define SENCODE_INIT_CMD_REQUIRED               0x04
68 #define SENCODE_PARAM_LIST_LENGTH_ERROR         0x1A
69 #define SENCODE_INVALID_COMMAND                 0x20
70 #define SENCODE_LBA_OUT_OF_RANGE                0x21
71 #define SENCODE_INVALID_CDB_FIELD               0x24
72 #define SENCODE_LUN_NOT_SUPPORTED               0x25
73 #define SENCODE_INVALID_PARAM_FIELD             0x26
74 #define SENCODE_PARAM_NOT_SUPPORTED             0x26
75 #define SENCODE_PARAM_VALUE_INVALID             0x26
76 #define SENCODE_RESET_OCCURRED                  0x29
77 #define SENCODE_LUN_NOT_SELF_CONFIGURED_YET     0x3E
78 #define SENCODE_INQUIRY_DATA_CHANGED            0x3F
79 #define SENCODE_SAVING_PARAMS_NOT_SUPPORTED     0x39
80 #define SENCODE_DIAGNOSTIC_FAILURE              0x40
81 #define SENCODE_INTERNAL_TARGET_FAILURE         0x44
82 #define SENCODE_INVALID_MESSAGE_ERROR           0x49
83 #define SENCODE_LUN_FAILED_SELF_CONFIG          0x4c
84 #define SENCODE_OVERLAPPED_COMMAND              0x4E
85
86 /*
87  *      Additional sense codes
88  */
89  
90 #define ASENCODE_NO_SENSE                       0x00
91 #define ASENCODE_END_OF_DATA                    0x05
92 #define ASENCODE_BECOMING_READY                 0x01
93 #define ASENCODE_INIT_CMD_REQUIRED              0x02
94 #define ASENCODE_PARAM_LIST_LENGTH_ERROR        0x00
95 #define ASENCODE_INVALID_COMMAND                0x00
96 #define ASENCODE_LBA_OUT_OF_RANGE               0x00
97 #define ASENCODE_INVALID_CDB_FIELD              0x00
98 #define ASENCODE_LUN_NOT_SUPPORTED              0x00
99 #define ASENCODE_INVALID_PARAM_FIELD            0x00
100 #define ASENCODE_PARAM_NOT_SUPPORTED            0x01
101 #define ASENCODE_PARAM_VALUE_INVALID            0x02
102 #define ASENCODE_RESET_OCCURRED                 0x00
103 #define ASENCODE_LUN_NOT_SELF_CONFIGURED_YET    0x00
104 #define ASENCODE_INQUIRY_DATA_CHANGED           0x03
105 #define ASENCODE_SAVING_PARAMS_NOT_SUPPORTED    0x00
106 #define ASENCODE_DIAGNOSTIC_FAILURE             0x80
107 #define ASENCODE_INTERNAL_TARGET_FAILURE        0x00
108 #define ASENCODE_INVALID_MESSAGE_ERROR          0x00
109 #define ASENCODE_LUN_FAILED_SELF_CONFIG         0x00
110 #define ASENCODE_OVERLAPPED_COMMAND             0x00
111
112 #define BYTE0(x) (unsigned char)(x)
113 #define BYTE1(x) (unsigned char)((x) >> 8)
114 #define BYTE2(x) (unsigned char)((x) >> 16)
115 #define BYTE3(x) (unsigned char)((x) >> 24)
116
117 /*------------------------------------------------------------------------------
118  *              S T R U C T S / T Y P E D E F S
119  *----------------------------------------------------------------------------*/
120 /* SCSI inquiry data */
121 struct inquiry_data {
122         u8 inqd_pdt;    /* Peripheral qualifier | Peripheral Device Type  */
123         u8 inqd_dtq;    /* RMB | Device Type Qualifier  */
124         u8 inqd_ver;    /* ISO version | ECMA version | ANSI-approved version */
125         u8 inqd_rdf;    /* AENC | TrmIOP | Response data format */
126         u8 inqd_len;    /* Additional length (n-4) */
127         u8 inqd_pad1[2];/* Reserved - must be zero */
128         u8 inqd_pad2;   /* RelAdr | WBus32 | WBus16 |  Sync  | Linked |Reserved| CmdQue | SftRe */
129         u8 inqd_vid[8]; /* Vendor ID */
130         u8 inqd_pid[16];/* Product ID */
131         u8 inqd_prl[4]; /* Product Revision Level */
132 };
133
134 /*
135  *              M O D U L E   G L O B A L S
136  */
137  
138 static unsigned long aac_build_sg(struct scsi_cmnd* scsicmd, struct sgmap* sgmap);
139 static unsigned long aac_build_sg64(struct scsi_cmnd* scsicmd, struct sgmap64* psg);
140 static int aac_send_srb_fib(struct scsi_cmnd* scsicmd);
141 #ifdef AAC_DETAILED_STATUS_INFO
142 static char *aac_get_status_string(u32 status);
143 #endif
144
145 /*
146  *      Non dasd selection is handled entirely in aachba now
147  */     
148  
149 static int nondasd = -1;
150 static int dacmode = -1;
151
152 static int commit = -1;
153
154 module_param(nondasd, int, 0);
155 MODULE_PARM_DESC(nondasd, "Control scanning of hba for nondasd devices. 0=off, 1=on");
156 module_param(dacmode, int, 0);
157 MODULE_PARM_DESC(dacmode, "Control whether dma addressing is using 64 bit DAC. 0=off, 1=on");
158 module_param(commit, int, 0);
159 MODULE_PARM_DESC(commit, "Control whether a COMMIT_CONFIG is issued to the adapter for foreign arrays.\nThis is typically needed in systems that do not have a BIOS. 0=off, 1=on");
160
161 /**
162  *      aac_get_config_status   -       check the adapter configuration
163  *      @common: adapter to query
164  *
165  *      Query config status, and commit the configuration if needed.
166  */
167 int aac_get_config_status(struct aac_dev *dev)
168 {
169         int status = 0;
170         struct fib * fibptr;
171
172         if (!(fibptr = fib_alloc(dev)))
173                 return -ENOMEM;
174
175         fib_init(fibptr);
176         {
177                 struct aac_get_config_status *dinfo;
178                 dinfo = (struct aac_get_config_status *) fib_data(fibptr);
179
180                 dinfo->command = cpu_to_le32(VM_ContainerConfig);
181                 dinfo->type = cpu_to_le32(CT_GET_CONFIG_STATUS);
182                 dinfo->count = cpu_to_le32(sizeof(((struct aac_get_config_status_resp *)NULL)->data));
183         }
184
185         status = fib_send(ContainerCommand,
186                             fibptr,
187                             sizeof (struct aac_get_config_status),
188                             FsaNormal,
189                             1, 1,
190                             NULL, NULL);
191         if (status < 0 ) {
192                 printk(KERN_WARNING "aac_get_config_status: SendFIB failed.\n");
193         } else {
194                 struct aac_get_config_status_resp *reply
195                   = (struct aac_get_config_status_resp *) fib_data(fibptr);
196                 dprintk((KERN_WARNING
197                   "aac_get_config_status: response=%d status=%d action=%d\n",
198                   reply->response, reply->status, reply->data.action));
199                 if ((reply->response != ST_OK)
200                  || (reply->status != CT_OK)
201                  || (reply->data.action > CFACT_PAUSE)) {
202                         printk(KERN_WARNING "aac_get_config_status: Will not issue the Commit Configuration\n");
203                         status = -EINVAL;
204                 }
205         }
206         fib_complete(fibptr);
207         /* Send a CT_COMMIT_CONFIG to enable discovery of devices */
208         if (status >= 0) {
209                 if (commit == 1) {
210                         struct aac_commit_config * dinfo;
211                         fib_init(fibptr);
212                         dinfo = (struct aac_commit_config *) fib_data(fibptr);
213         
214                         dinfo->command = cpu_to_le32(VM_ContainerConfig);
215                         dinfo->type = cpu_to_le32(CT_COMMIT_CONFIG);
216         
217                         status = fib_send(ContainerCommand,
218                                     fibptr,
219                                     sizeof (struct aac_commit_config),
220                                     FsaNormal,
221                                     1, 1,
222                                     NULL, NULL);
223                         fib_complete(fibptr);
224                 } else if (commit == 0) {
225                         printk(KERN_WARNING
226                           "aac_get_config_status: Foreign device configurations are being ignored\n");
227                 }
228         }
229         fib_free(fibptr);
230         return status;
231 }
232
233 /**
234  *      aac_get_containers      -       list containers
235  *      @common: adapter to probe
236  *
237  *      Make a list of all containers on this controller
238  */
239 int aac_get_containers(struct aac_dev *dev)
240 {
241         struct fsa_dev_info *fsa_dev_ptr;
242         u32 index; 
243         int status = 0;
244         struct fib * fibptr;
245         unsigned instance;
246         struct aac_get_container_count *dinfo;
247         struct aac_get_container_count_resp *dresp;
248         int maximum_num_containers = MAXIMUM_NUM_CONTAINERS;
249
250         instance = dev->scsi_host_ptr->unique_id;
251
252         if (!(fibptr = fib_alloc(dev)))
253                 return -ENOMEM;
254
255         fib_init(fibptr);
256         dinfo = (struct aac_get_container_count *) fib_data(fibptr);
257         dinfo->command = cpu_to_le32(VM_ContainerConfig);
258         dinfo->type = cpu_to_le32(CT_GET_CONTAINER_COUNT);
259
260         status = fib_send(ContainerCommand,
261                     fibptr,
262                     sizeof (struct aac_get_container_count),
263                     FsaNormal,
264                     1, 1,
265                     NULL, NULL);
266         if (status >= 0) {
267                 dresp = (struct aac_get_container_count_resp *)fib_data(fibptr);
268                 maximum_num_containers = dresp->ContainerSwitchEntries;
269                 fib_complete(fibptr);
270         }
271
272         if (maximum_num_containers < MAXIMUM_NUM_CONTAINERS)
273                 maximum_num_containers = MAXIMUM_NUM_CONTAINERS;
274
275         fsa_dev_ptr = (struct fsa_dev_info *) kmalloc(
276           sizeof(*fsa_dev_ptr) * maximum_num_containers, GFP_KERNEL);
277         if (!fsa_dev_ptr) {
278                 fib_free(fibptr);
279                 return -ENOMEM;
280         }
281         memset(fsa_dev_ptr, 0, sizeof(*fsa_dev_ptr) * maximum_num_containers);
282
283         dev->fsa_dev = fsa_dev_ptr;
284         dev->maximum_num_containers = maximum_num_containers;
285
286         for (index = 0; index < dev->maximum_num_containers; index++) {
287                 struct aac_query_mount *dinfo;
288                 struct aac_mount *dresp;
289
290                 fsa_dev_ptr[index].devname[0] = '\0';
291
292                 fib_init(fibptr);
293                 dinfo = (struct aac_query_mount *) fib_data(fibptr);
294
295                 dinfo->command = cpu_to_le32(VM_NameServe);
296                 dinfo->count = cpu_to_le32(index);
297                 dinfo->type = cpu_to_le32(FT_FILESYS);
298
299                 status = fib_send(ContainerCommand,
300                                     fibptr,
301                                     sizeof (struct aac_query_mount),
302                                     FsaNormal,
303                                     1, 1,
304                                     NULL, NULL);
305                 if (status < 0 ) {
306                         printk(KERN_WARNING "aac_get_containers: SendFIB failed.\n");
307                         break;
308                 }
309                 dresp = (struct aac_mount *)fib_data(fibptr);
310
311                 dprintk ((KERN_DEBUG
312                   "VM_NameServe cid=%d status=%d vol=%d state=%d cap=%u\n",
313                   (int)index, (int)le32_to_cpu(dresp->status),
314                   (int)le32_to_cpu(dresp->mnt[0].vol),
315                   (int)le32_to_cpu(dresp->mnt[0].state),
316                   (unsigned)le32_to_cpu(dresp->mnt[0].capacity)));
317                 if ((le32_to_cpu(dresp->status) == ST_OK) &&
318                     (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) &&
319                     (le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) {
320                         fsa_dev_ptr[index].valid = 1;
321                         fsa_dev_ptr[index].type = le32_to_cpu(dresp->mnt[0].vol);
322                         fsa_dev_ptr[index].size = le32_to_cpu(dresp->mnt[0].capacity);
323                         if (le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY)
324                                     fsa_dev_ptr[index].ro = 1;
325                 }
326                 fib_complete(fibptr);
327                 /*
328                  *      If there are no more containers, then stop asking.
329                  */
330                 if ((index + 1) >= le32_to_cpu(dresp->count)){
331                         break;
332                 }
333         }
334         fib_free(fibptr);
335         return status;
336 }
337
338 static void aac_io_done(struct scsi_cmnd * scsicmd)
339 {
340         unsigned long cpu_flags;
341         struct Scsi_Host *host = scsicmd->device->host;
342         spin_lock_irqsave(host->host_lock, cpu_flags);
343         scsicmd->scsi_done(scsicmd);
344         spin_unlock_irqrestore(host->host_lock, cpu_flags);
345 }
346
347 static void get_container_name_callback(void *context, struct fib * fibptr)
348 {
349         struct aac_get_name_resp * get_name_reply;
350         struct scsi_cmnd * scsicmd;
351
352         scsicmd = (struct scsi_cmnd *) context;
353
354         dprintk((KERN_DEBUG "get_container_name_callback[cpu %d]: t = %ld.\n", smp_processor_id(), jiffies));
355         if (fibptr == NULL)
356                 BUG();
357
358         get_name_reply = (struct aac_get_name_resp *) fib_data(fibptr);
359         /* Failure is irrelevant, using default value instead */
360         if ((le32_to_cpu(get_name_reply->status) == CT_OK)
361          && (get_name_reply->data[0] != '\0')) {
362                 int    count;
363                 char * dp;
364                 char * sp = get_name_reply->data;
365                 sp[sizeof(((struct aac_get_name_resp *)NULL)->data)-1] = '\0';
366                 while (*sp == ' ')
367                         ++sp;
368                 count = sizeof(((struct inquiry_data *)NULL)->inqd_pid);
369                 dp = ((struct inquiry_data *)scsicmd->request_buffer)->inqd_pid;
370                 if (*sp) do {
371                         *dp++ = (*sp) ? *sp++ : ' ';
372                 } while (--count > 0);
373         }
374         scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
375
376         fib_complete(fibptr);
377         fib_free(fibptr);
378         aac_io_done(scsicmd);
379 }
380
381 /**
382  *      aac_get_container_name  -       get container name, none blocking.
383  */
384 static int aac_get_container_name(struct scsi_cmnd * scsicmd, int cid)
385 {
386         int status;
387         struct aac_get_name *dinfo;
388         struct fib * cmd_fibcontext;
389         struct aac_dev * dev;
390
391         dev = (struct aac_dev *)scsicmd->device->host->hostdata;
392
393         if (!(cmd_fibcontext = fib_alloc(dev)))
394                 return -ENOMEM;
395
396         fib_init(cmd_fibcontext);
397         dinfo = (struct aac_get_name *) fib_data(cmd_fibcontext);
398
399         dinfo->command = cpu_to_le32(VM_ContainerConfig);
400         dinfo->type = cpu_to_le32(CT_READ_NAME);
401         dinfo->cid = cpu_to_le32(cid);
402         dinfo->count = cpu_to_le32(sizeof(((struct aac_get_name_resp *)NULL)->data));
403
404         status = fib_send(ContainerCommand, 
405                   cmd_fibcontext, 
406                   sizeof (struct aac_get_name),
407                   FsaNormal, 
408                   0, 1, 
409                   (fib_callback) get_container_name_callback, 
410                   (void *) scsicmd);
411         
412         /*
413          *      Check that the command queued to the controller
414          */
415         if (status == -EINPROGRESS) 
416                 return 0;
417                 
418         printk(KERN_WARNING "aac_get_container_name: fib_send failed with status: %d.\n", status);
419         fib_complete(cmd_fibcontext);
420         fib_free(cmd_fibcontext);
421         return -1;
422 }
423
424 /**
425  *      probe_container         -       query a logical volume
426  *      @dev: device to query
427  *      @cid: container identifier
428  *
429  *      Queries the controller about the given volume. The volume information
430  *      is updated in the struct fsa_dev_info structure rather than returned.
431  */
432  
433 static int probe_container(struct aac_dev *dev, int cid)
434 {
435         struct fsa_dev_info *fsa_dev_ptr;
436         int status;
437         struct aac_query_mount *dinfo;
438         struct aac_mount *dresp;
439         struct fib * fibptr;
440         unsigned instance;
441
442         fsa_dev_ptr = dev->fsa_dev;
443         instance = dev->scsi_host_ptr->unique_id;
444
445         if (!(fibptr = fib_alloc(dev)))
446                 return -ENOMEM;
447
448         fib_init(fibptr);
449
450         dinfo = (struct aac_query_mount *)fib_data(fibptr);
451
452         dinfo->command = cpu_to_le32(VM_NameServe);
453         dinfo->count = cpu_to_le32(cid);
454         dinfo->type = cpu_to_le32(FT_FILESYS);
455
456         status = fib_send(ContainerCommand,
457                             fibptr,
458                             sizeof(struct aac_query_mount),
459                             FsaNormal,
460                             1, 1,
461                             NULL, NULL);
462         if (status < 0) {
463                 printk(KERN_WARNING "aacraid: probe_containers query failed.\n");
464                 goto error;
465         }
466
467         dresp = (struct aac_mount *) fib_data(fibptr);
468
469         if ((le32_to_cpu(dresp->status) == ST_OK) &&
470             (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) &&
471             (le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) {
472                 fsa_dev_ptr[cid].valid = 1;
473                 fsa_dev_ptr[cid].type = le32_to_cpu(dresp->mnt[0].vol);
474                 fsa_dev_ptr[cid].size = le32_to_cpu(dresp->mnt[0].capacity);
475                 if (le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY)
476                         fsa_dev_ptr[cid].ro = 1;
477         }
478
479 error:
480         fib_complete(fibptr);
481         fib_free(fibptr);
482
483         return status;
484 }
485
486 /* Local Structure to set SCSI inquiry data strings */
487 struct scsi_inq {
488         char vid[8];         /* Vendor ID */
489         char pid[16];        /* Product ID */
490         char prl[4];         /* Product Revision Level */
491 };
492
493 /**
494  *      InqStrCopy      -       string merge
495  *      @a:     string to copy from
496  *      @b:     string to copy to
497  *
498  *      Copy a String from one location to another
499  *      without copying \0
500  */
501
502 static void inqstrcpy(char *a, char *b)
503 {
504
505         while(*a != (char)0) 
506                 *b++ = *a++;
507 }
508
509 static char *container_types[] = {
510         "None",
511         "Volume",
512         "Mirror",
513         "Stripe",
514         "RAID5",
515         "SSRW",
516         "SSRO",
517         "Morph",
518         "Legacy",
519         "RAID4",
520         "RAID10",             
521         "RAID00",             
522         "V-MIRRORS",          
523         "PSEUDO R4",          
524         "RAID50",
525         "Unknown"
526 };
527
528
529
530 /* Function: setinqstr
531  *
532  * Arguments: [1] pointer to void [1] int
533  *
534  * Purpose: Sets SCSI inquiry data strings for vendor, product
535  * and revision level. Allows strings to be set in platform dependant
536  * files instead of in OS dependant driver source.
537  */
538
539 static void setinqstr(int devtype, void *data, int tindex)
540 {
541         struct scsi_inq *str;
542         struct aac_driver_ident *mp;
543
544         mp = aac_get_driver_ident(devtype);
545    
546         str = (struct scsi_inq *)(data); /* cast data to scsi inq block */
547
548         inqstrcpy (mp->vname, str->vid); 
549         inqstrcpy (mp->model, str->pid); /* last six chars reserved for vol type */
550
551         if (tindex < (sizeof(container_types)/sizeof(char *))){
552                 char *findit = str->pid;
553
554                 for ( ; *findit != ' '; findit++); /* walk till we find a space */
555                 /* RAID is superfluous in the context of a RAID device */
556                 if (memcmp(findit-4, "RAID", 4) == 0)
557                         *(findit -= 4) = ' ';
558                 inqstrcpy (container_types[tindex], findit + 1);
559         }
560         inqstrcpy ("V1.0", str->prl);
561 }
562
563 void set_sense(u8 *sense_buf, u8 sense_key, u8 sense_code,
564                     u8 a_sense_code, u8 incorrect_length,
565                     u8 bit_pointer, u16 field_pointer,
566                     u32 residue)
567 {
568         sense_buf[0] = 0xF0;    /* Sense data valid, err code 70h (current error) */
569         sense_buf[1] = 0;       /* Segment number, always zero */
570
571         if (incorrect_length) {
572                 sense_buf[2] = sense_key | 0x20;/* Set ILI bit | sense key */
573                 sense_buf[3] = BYTE3(residue);
574                 sense_buf[4] = BYTE2(residue);
575                 sense_buf[5] = BYTE1(residue);
576                 sense_buf[6] = BYTE0(residue);
577         } else
578                 sense_buf[2] = sense_key;       /* Sense key */
579
580         if (sense_key == ILLEGAL_REQUEST)
581                 sense_buf[7] = 10;      /* Additional sense length */
582         else
583                 sense_buf[7] = 6;       /* Additional sense length */
584
585         sense_buf[12] = sense_code;     /* Additional sense code */
586         sense_buf[13] = a_sense_code;   /* Additional sense code qualifier */
587         if (sense_key == ILLEGAL_REQUEST) {
588                 sense_buf[15] = 0;
589
590                 if (sense_code == SENCODE_INVALID_PARAM_FIELD)
591                         sense_buf[15] = 0x80;/* Std sense key specific field */
592                 /* Illegal parameter is in the parameter block */
593
594                 if (sense_code == SENCODE_INVALID_CDB_FIELD)
595                         sense_buf[15] = 0xc0;/* Std sense key specific field */
596                 /* Illegal parameter is in the CDB block */
597                 sense_buf[15] |= bit_pointer;
598                 sense_buf[16] = field_pointer >> 8;     /* MSB */
599                 sense_buf[17] = field_pointer;          /* LSB */
600         }
601 }
602
603 int aac_get_adapter_info(struct aac_dev* dev)
604 {
605         struct fib* fibptr;
606         struct aac_adapter_info* info;
607         int rcode;
608         u32 tmp;
609         if (!(fibptr = fib_alloc(dev)))
610                 return -ENOMEM;
611
612         fib_init(fibptr);
613         info = (struct aac_adapter_info*) fib_data(fibptr);
614
615         memset(info,0,sizeof(struct aac_adapter_info));
616
617         rcode = fib_send(RequestAdapterInfo,
618                         fibptr, 
619                         sizeof(struct aac_adapter_info),
620                         FsaNormal, 
621                         1, 1, 
622                         NULL, 
623                         NULL);
624
625         memcpy(&dev->adapter_info, info, sizeof(struct aac_adapter_info));
626
627         tmp = dev->adapter_info.kernelrev;
628         printk(KERN_INFO"%s%d: kernel %d.%d.%d build %d\n", 
629                         dev->name, dev->id,
630                         tmp>>24,(tmp>>16)&0xff,(tmp>>8)&0xff,
631                         dev->adapter_info.kernelbuild);
632         tmp = dev->adapter_info.monitorrev;
633         printk(KERN_INFO"%s%d: monitor %d.%d.%d build %d\n", 
634                         dev->name, dev->id,
635                         tmp>>24,(tmp>>16)&0xff,(tmp>>8)&0xff,
636                         dev->adapter_info.monitorbuild);
637         tmp = dev->adapter_info.biosrev;
638         printk(KERN_INFO"%s%d: bios %d.%d.%d build %d\n", 
639                         dev->name, dev->id,
640                         tmp>>24,(tmp>>16)&0xff,(tmp>>8)&0xff,
641                         dev->adapter_info.biosbuild);
642         printk(KERN_INFO"%s%d: serial %x%x\n",
643                         dev->name, dev->id,
644                         dev->adapter_info.serial[0],
645                         dev->adapter_info.serial[1]);
646
647         dev->nondasd_support = 0;
648         dev->raid_scsi_mode = 0;
649         if(dev->adapter_info.options & AAC_OPT_NONDASD){
650                 dev->nondasd_support = 1;
651         }
652
653         /*
654          * If the firmware supports ROMB RAID/SCSI mode and we are currently
655          * in RAID/SCSI mode, set the flag. For now if in this mode we will
656          * force nondasd support on. If we decide to allow the non-dasd flag
657          * additional changes changes will have to be made to support
658          * RAID/SCSI.  the function aac_scsi_cmd in this module will have to be
659          * changed to support the new dev->raid_scsi_mode flag instead of
660          * leaching off of the dev->nondasd_support flag. Also in linit.c the
661          * function aac_detect will have to be modified where it sets up the
662          * max number of channels based on the aac->nondasd_support flag only.
663          */
664         if ((dev->adapter_info.options & AAC_OPT_SCSI_MANAGED) &&
665             (dev->adapter_info.options & AAC_OPT_RAID_SCSI_MODE)) {
666                 dev->nondasd_support = 1;
667                 dev->raid_scsi_mode = 1;
668         }
669         if (dev->raid_scsi_mode != 0)
670                 printk(KERN_INFO "%s%d: ROMB RAID/SCSI mode enabled\n",
671                                 dev->name, dev->id);
672                 
673         if(nondasd != -1) {  
674                 dev->nondasd_support = (nondasd!=0);
675         }
676         if(dev->nondasd_support != 0){
677                 printk(KERN_INFO "%s%d: Non-DASD support enabled.\n",dev->name, dev->id);
678         }
679
680         dev->dac_support = 0;
681         if( (sizeof(dma_addr_t) > 4) && (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64)){
682                 printk(KERN_INFO "%s%d: 64bit support enabled.\n", dev->name, dev->id);
683                 dev->dac_support = 1;
684         }
685
686         if(dacmode != -1) {
687                 dev->dac_support = (dacmode!=0);
688         }
689         if(dev->dac_support != 0) {
690                 if (!pci_set_dma_mask(dev->pdev, 0xFFFFFFFFFFFFFFFFULL) &&
691                         !pci_set_consistent_dma_mask(dev->pdev, 0xFFFFFFFFFFFFFFFFULL)) {
692                         printk(KERN_INFO"%s%d: 64 Bit DAC enabled\n",
693                                 dev->name, dev->id);
694                 } else if (!pci_set_dma_mask(dev->pdev, 0xFFFFFFFFULL) &&
695                         !pci_set_consistent_dma_mask(dev->pdev, 0xFFFFFFFFULL)) {
696                         printk(KERN_INFO"%s%d: DMA mask set failed, 64 Bit DAC disabled\n",
697                                 dev->name, dev->id);
698                         dev->dac_support = 0;
699                 } else {
700                         printk(KERN_WARNING"%s%d: No suitable DMA available.\n",
701                                 dev->name, dev->id);
702                         rcode = -ENOMEM;
703                 }
704         }
705
706         fib_complete(fibptr);
707         fib_free(fibptr);
708
709         return rcode;
710 }
711
712
713 static void read_callback(void *context, struct fib * fibptr)
714 {
715         struct aac_dev *dev;
716         struct aac_read_reply *readreply;
717         struct scsi_cmnd *scsicmd;
718         u32 lba;
719         u32 cid;
720
721         scsicmd = (struct scsi_cmnd *) context;
722
723         dev = (struct aac_dev *)scsicmd->device->host->hostdata;
724         cid = ID_LUN_TO_CONTAINER(scsicmd->device->id, scsicmd->device->lun);
725
726         lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
727         dprintk((KERN_DEBUG "read_callback[cpu %d]: lba = %u, t = %ld.\n", smp_processor_id(), lba, jiffies));
728
729         if (fibptr == NULL)
730                 BUG();
731                 
732         if(scsicmd->use_sg)
733                 pci_unmap_sg(dev->pdev, 
734                         (struct scatterlist *)scsicmd->buffer,
735                         scsicmd->use_sg,
736                         scsicmd->sc_data_direction);
737         else if(scsicmd->request_bufflen)
738                 pci_unmap_single(dev->pdev, scsicmd->SCp.dma_handle,
739                                  scsicmd->request_bufflen,
740                                  scsicmd->sc_data_direction);
741         readreply = (struct aac_read_reply *)fib_data(fibptr);
742         if (le32_to_cpu(readreply->status) == ST_OK)
743                 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
744         else {
745                 printk(KERN_WARNING "read_callback: read failed, status = %d\n", readreply->status);
746                 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
747                 set_sense((u8 *) &dev->fsa_dev[cid].sense_data,
748                                     HARDWARE_ERROR,
749                                     SENCODE_INTERNAL_TARGET_FAILURE,
750                                     ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0,
751                                     0, 0);
752                 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
753                   (sizeof(dev->fsa_dev[cid].sense_data) > sizeof(scsicmd->sense_buffer))
754                     ? sizeof(scsicmd->sense_buffer)
755                     : sizeof(dev->fsa_dev[cid].sense_data));
756         }
757         fib_complete(fibptr);
758         fib_free(fibptr);
759
760         aac_io_done(scsicmd);
761 }
762
763 static void write_callback(void *context, struct fib * fibptr)
764 {
765         struct aac_dev *dev;
766         struct aac_write_reply *writereply;
767         struct scsi_cmnd *scsicmd;
768         u32 lba;
769         u32 cid;
770
771         scsicmd = (struct scsi_cmnd *) context;
772         dev = (struct aac_dev *)scsicmd->device->host->hostdata;
773         cid = ID_LUN_TO_CONTAINER(scsicmd->device->id, scsicmd->device->lun);
774
775         lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
776         dprintk((KERN_DEBUG "write_callback[cpu %d]: lba = %u, t = %ld.\n", smp_processor_id(), lba, jiffies));
777         if (fibptr == NULL)
778                 BUG();
779
780         if(scsicmd->use_sg)
781                 pci_unmap_sg(dev->pdev, 
782                         (struct scatterlist *)scsicmd->buffer,
783                         scsicmd->use_sg,
784                         scsicmd->sc_data_direction);
785         else if(scsicmd->request_bufflen)
786                 pci_unmap_single(dev->pdev, scsicmd->SCp.dma_handle,
787                                  scsicmd->request_bufflen,
788                                  scsicmd->sc_data_direction);
789
790         writereply = (struct aac_write_reply *) fib_data(fibptr);
791         if (le32_to_cpu(writereply->status) == ST_OK)
792                 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
793         else {
794                 printk(KERN_WARNING "write_callback: write failed, status = %d\n", writereply->status);
795                 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
796                 set_sense((u8 *) &dev->fsa_dev[cid].sense_data,
797                                     HARDWARE_ERROR,
798                                     SENCODE_INTERNAL_TARGET_FAILURE,
799                                     ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0,
800                                     0, 0);
801                 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, 
802                                 sizeof(struct sense_data));
803         }
804
805         fib_complete(fibptr);
806         fib_free(fibptr);
807         aac_io_done(scsicmd);
808 }
809
810 int aac_read(struct scsi_cmnd * scsicmd, int cid)
811 {
812         u32 lba;
813         u32 count;
814         int status;
815
816         u16 fibsize;
817         struct aac_dev *dev;
818         struct fib * cmd_fibcontext;
819
820         dev = (struct aac_dev *)scsicmd->device->host->hostdata;
821         /*
822          *      Get block address and transfer length
823          */
824         if (scsicmd->cmnd[0] == READ_6) /* 6 byte command */
825         {
826                 dprintk((KERN_DEBUG "aachba: received a read(6) command on id %d.\n", cid));
827
828                 lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
829                 count = scsicmd->cmnd[4];
830
831                 if (count == 0)
832                         count = 256;
833         } else {
834                 dprintk((KERN_DEBUG "aachba: received a read(10) command on id %d.\n", cid));
835
836                 lba = (scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
837                 count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8];
838         }
839         dprintk((KERN_DEBUG "aac_read[cpu %d]: lba = %u, t = %ld.\n", smp_processor_id(), lba, jiffies));
840         /*
841          *      Alocate and initialize a Fib
842          */
843         if (!(cmd_fibcontext = fib_alloc(dev))) {
844                 return -1;
845         }
846
847         fib_init(cmd_fibcontext);
848
849         if(dev->dac_support == 1) {
850                 struct aac_read64 *readcmd;
851                 readcmd = (struct aac_read64 *) fib_data(cmd_fibcontext);
852                 readcmd->command = cpu_to_le32(VM_CtHostRead64);
853                 readcmd->cid = cpu_to_le16(cid);
854                 readcmd->sector_count = cpu_to_le16(count);
855                 readcmd->block = cpu_to_le32(lba);
856                 readcmd->pad   = cpu_to_le16(0);
857                 readcmd->flags = cpu_to_le16(0); 
858
859                 aac_build_sg64(scsicmd, &readcmd->sg);
860                 if(readcmd->sg.count > MAX_DRIVER_SG_SEGMENT_COUNT)
861                         BUG();
862                 fibsize = sizeof(struct aac_read64) + ((readcmd->sg.count - 1) * sizeof (struct sgentry64));
863                 /*
864                  *      Now send the Fib to the adapter
865                  */
866                 status = fib_send(ContainerCommand64, 
867                           cmd_fibcontext, 
868                           fibsize, 
869                           FsaNormal, 
870                           0, 1, 
871                           (fib_callback) read_callback, 
872                           (void *) scsicmd);
873         } else {
874                 struct aac_read *readcmd;
875                 readcmd = (struct aac_read *) fib_data(cmd_fibcontext);
876                 readcmd->command = cpu_to_le32(VM_CtBlockRead);
877                 readcmd->cid = cpu_to_le32(cid);
878                 readcmd->block = cpu_to_le32(lba);
879                 readcmd->count = cpu_to_le32(count * 512);
880
881                 if (count * 512 > (64 * 1024))
882                         BUG();
883
884                 aac_build_sg(scsicmd, &readcmd->sg);
885                 if(readcmd->sg.count > MAX_DRIVER_SG_SEGMENT_COUNT)
886                         BUG();
887                 fibsize = sizeof(struct aac_read) + ((readcmd->sg.count - 1) * sizeof (struct sgentry));
888                 /*
889                  *      Now send the Fib to the adapter
890                  */
891                 status = fib_send(ContainerCommand, 
892                           cmd_fibcontext, 
893                           fibsize, 
894                           FsaNormal, 
895                           0, 1, 
896                           (fib_callback) read_callback, 
897                           (void *) scsicmd);
898         }
899
900         
901
902         /*
903          *      Check that the command queued to the controller
904          */
905         if (status == -EINPROGRESS) 
906         {
907                 dprintk("read queued.\n");
908                 return 0;
909         }
910                 
911         printk(KERN_WARNING "aac_read: fib_send failed with status: %d.\n", status);
912         /*
913          *      For some reason, the Fib didn't queue, return QUEUE_FULL
914          */
915         scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_TASK_SET_FULL;
916         aac_io_done(scsicmd);
917         fib_complete(cmd_fibcontext);
918         fib_free(cmd_fibcontext);
919         return 0;
920 }
921
922 static int aac_write(struct scsi_cmnd * scsicmd, int cid)
923 {
924         u32 lba;
925         u32 count;
926         int status;
927         u16 fibsize;
928         struct aac_dev *dev;
929         struct fib * cmd_fibcontext;
930
931         dev = (struct aac_dev *)scsicmd->device->host->hostdata;
932         /*
933          *      Get block address and transfer length
934          */
935         if (scsicmd->cmnd[0] == WRITE_6)        /* 6 byte command */
936         {
937                 lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
938                 count = scsicmd->cmnd[4];
939                 if (count == 0)
940                         count = 256;
941         } else {
942                 dprintk((KERN_DEBUG "aachba: received a write(10) command on id %d.\n", cid));
943                 lba = (scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
944                 count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8];
945         }
946         dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %u, t = %ld.\n", smp_processor_id(), lba, jiffies));
947         /*
948          *      Allocate and initialize a Fib then setup a BlockWrite command
949          */
950         if (!(cmd_fibcontext = fib_alloc(dev))) {
951                 scsicmd->result = DID_ERROR << 16;
952                 aac_io_done(scsicmd);
953                 return 0;
954         }
955         fib_init(cmd_fibcontext);
956
957         if(dev->dac_support == 1) {
958                 struct aac_write64 *writecmd;
959                 writecmd = (struct aac_write64 *) fib_data(cmd_fibcontext);
960                 writecmd->command = cpu_to_le32(VM_CtHostWrite64);
961                 writecmd->cid = cpu_to_le16(cid);
962                 writecmd->sector_count = cpu_to_le16(count); 
963                 writecmd->block = cpu_to_le32(lba);
964                 writecmd->pad   = cpu_to_le16(0);
965                 writecmd->flags = cpu_to_le16(0);
966
967                 aac_build_sg64(scsicmd, &writecmd->sg);
968                 if(writecmd->sg.count > MAX_DRIVER_SG_SEGMENT_COUNT)
969                         BUG();
970                 fibsize = sizeof(struct aac_write64) + ((writecmd->sg.count - 1) * sizeof (struct sgentry64));
971                 /*
972                  *      Now send the Fib to the adapter
973                  */
974                 status = fib_send(ContainerCommand64, 
975                           cmd_fibcontext, 
976                           fibsize, 
977                           FsaNormal, 
978                           0, 1, 
979                           (fib_callback) write_callback, 
980                           (void *) scsicmd);
981         } else {
982                 struct aac_write *writecmd;
983                 writecmd = (struct aac_write *) fib_data(cmd_fibcontext);
984                 writecmd->command = cpu_to_le32(VM_CtBlockWrite);
985                 writecmd->cid = cpu_to_le32(cid);
986                 writecmd->block = cpu_to_le32(lba);
987                 writecmd->count = cpu_to_le32(count * 512);
988                 writecmd->sg.count = cpu_to_le32(1);
989                 /* ->stable is not used - it did mean which type of write */
990
991                 if (count * 512 > (64 * 1024)) {
992                         BUG();
993                 }
994
995                 aac_build_sg(scsicmd, &writecmd->sg);
996                 if(writecmd->sg.count > MAX_DRIVER_SG_SEGMENT_COUNT)
997                         BUG();
998                 fibsize = sizeof(struct aac_write) + ((writecmd->sg.count - 1) * sizeof (struct sgentry));
999                 /*
1000                  *      Now send the Fib to the adapter
1001                  */
1002                 status = fib_send(ContainerCommand, 
1003                           cmd_fibcontext, 
1004                           fibsize, 
1005                           FsaNormal, 
1006                           0, 1, 
1007                           (fib_callback) write_callback, 
1008                           (void *) scsicmd);
1009         }
1010
1011         /*
1012          *      Check that the command queued to the controller
1013          */
1014         if (status == -EINPROGRESS)
1015         {
1016                 dprintk("write queued.\n");
1017                 return 0;
1018         }
1019
1020         printk(KERN_WARNING "aac_write: fib_send failed with status: %d\n", status);
1021         /*
1022          *      For some reason, the Fib didn't queue, return QUEUE_FULL
1023          */
1024         scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_TASK_SET_FULL;
1025         aac_io_done(scsicmd);
1026
1027         fib_complete(cmd_fibcontext);
1028         fib_free(cmd_fibcontext);
1029         return 0;
1030 }
1031
1032 static void synchronize_callback(void *context, struct fib *fibptr)
1033 {
1034         struct aac_synchronize_reply *synchronizereply;
1035         struct scsi_cmnd *cmd;
1036
1037         cmd = context;
1038
1039         dprintk((KERN_DEBUG "synchronize_callback[cpu %d]: t = %ld.\n", 
1040                                 smp_processor_id(), jiffies));
1041         BUG_ON(fibptr == NULL);
1042
1043
1044         synchronizereply = fib_data(fibptr);
1045         if (le32_to_cpu(synchronizereply->status) == CT_OK)
1046                 cmd->result = DID_OK << 16 | 
1047                         COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
1048         else {
1049                 struct scsi_device *sdev = cmd->device;
1050                 struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata;
1051                 u32 cid = ID_LUN_TO_CONTAINER(sdev->id, sdev->lun);
1052                 printk(KERN_WARNING 
1053                      "synchronize_callback: synchronize failed, status = %d\n",
1054                      synchronizereply->status);
1055                 cmd->result = DID_OK << 16 | 
1056                         COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
1057                 set_sense((u8 *)&dev->fsa_dev[cid].sense_data,
1058                                     HARDWARE_ERROR,
1059                                     SENCODE_INTERNAL_TARGET_FAILURE,
1060                                     ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0,
1061                                     0, 0);
1062                 memcpy(cmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
1063                   min(sizeof(dev->fsa_dev[cid].sense_data), 
1064                           sizeof(cmd->sense_buffer)));
1065         }
1066
1067         fib_complete(fibptr);
1068         fib_free(fibptr);
1069         aac_io_done(cmd);
1070 }
1071
1072 static int aac_synchronize(struct scsi_cmnd *scsicmd, int cid)
1073 {
1074         int status;
1075         struct fib *cmd_fibcontext;
1076         struct aac_synchronize *synchronizecmd;
1077         struct scsi_cmnd *cmd;
1078         struct scsi_device *sdev = scsicmd->device;
1079         int active = 0;
1080         unsigned long flags;
1081
1082         /*
1083          * Wait for all commands to complete to this specific
1084          * target (block).
1085          */
1086         spin_lock_irqsave(&sdev->list_lock, flags);
1087         list_for_each_entry(cmd, &sdev->cmd_list, list)
1088                 if (cmd != scsicmd && cmd->serial_number != 0) {
1089                         ++active;
1090                         break;
1091                 }
1092
1093         spin_unlock_irqrestore(&sdev->list_lock, flags);
1094
1095         /*
1096          *      Yield the processor (requeue for later)
1097          */
1098         if (active)
1099                 return SCSI_MLQUEUE_DEVICE_BUSY;
1100
1101         /*
1102          *      Alocate and initialize a Fib
1103          */
1104         if (!(cmd_fibcontext = 
1105             fib_alloc((struct aac_dev *)scsicmd->device->host->hostdata))) 
1106                 return SCSI_MLQUEUE_HOST_BUSY;
1107
1108         fib_init(cmd_fibcontext);
1109
1110         synchronizecmd = fib_data(cmd_fibcontext);
1111         synchronizecmd->command = cpu_to_le32(VM_ContainerConfig);
1112         synchronizecmd->type = cpu_to_le32(CT_FLUSH_CACHE);
1113         synchronizecmd->cid = cpu_to_le32(cid);
1114         synchronizecmd->count = 
1115              cpu_to_le32(sizeof(((struct aac_synchronize_reply *)NULL)->data));
1116
1117         /*
1118          *      Now send the Fib to the adapter
1119          */
1120         status = fib_send(ContainerCommand,
1121                   cmd_fibcontext,
1122                   sizeof(struct aac_synchronize),
1123                   FsaNormal,
1124                   0, 1,
1125                   (fib_callback)synchronize_callback,
1126                   (void *)scsicmd);
1127
1128         /*
1129          *      Check that the command queued to the controller
1130          */
1131         if (status == -EINPROGRESS)
1132                 return 0;
1133
1134         printk(KERN_WARNING 
1135                 "aac_synchronize: fib_send failed with status: %d.\n", status);
1136         fib_complete(cmd_fibcontext);
1137         fib_free(cmd_fibcontext);
1138         return SCSI_MLQUEUE_HOST_BUSY;
1139 }
1140
1141 /**
1142  *      aac_scsi_cmd()          -       Process SCSI command
1143  *      @scsicmd:               SCSI command block
1144  *
1145  *      Emulate a SCSI command and queue the required request for the
1146  *      aacraid firmware.
1147  */
1148  
1149 int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1150 {
1151         u32 cid = 0;
1152         struct Scsi_Host *host = scsicmd->device->host;
1153         struct aac_dev *dev = (struct aac_dev *)host->hostdata;
1154         struct fsa_dev_info *fsa_dev_ptr = dev->fsa_dev;
1155         int cardtype = dev->cardtype;
1156         int ret;
1157         
1158         /*
1159          *      If the bus, id or lun is out of range, return fail
1160          *      Test does not apply to ID 16, the pseudo id for the controller
1161          *      itself.
1162          */
1163         if (scsicmd->device->id != host->this_id) {
1164                 if ((scsicmd->device->channel == 0) ){
1165                         if( (scsicmd->device->id >= dev->maximum_num_containers) || (scsicmd->device->lun != 0)){ 
1166                                 scsicmd->result = DID_NO_CONNECT << 16;
1167                                 scsicmd->scsi_done(scsicmd);
1168                                 return 0;
1169                         }
1170                         cid = ID_LUN_TO_CONTAINER(scsicmd->device->id, scsicmd->device->lun);
1171
1172                         /*
1173                          *      If the target container doesn't exist, it may have
1174                          *      been newly created
1175                          */
1176                         if ((fsa_dev_ptr[cid].valid & 1) == 0) {
1177                                 switch (scsicmd->cmnd[0]) {
1178                                 case INQUIRY:
1179                                 case READ_CAPACITY:
1180                                 case TEST_UNIT_READY:
1181                                         spin_unlock_irq(host->host_lock);
1182                                         probe_container(dev, cid);
1183                                         spin_lock_irq(host->host_lock);
1184                                         if (fsa_dev_ptr[cid].valid == 0) {
1185                                                 scsicmd->result = DID_NO_CONNECT << 16;
1186                                                 scsicmd->scsi_done(scsicmd);
1187                                                 return 0;
1188                                         }
1189                                 default:
1190                                         break;
1191                                 }
1192                         }
1193                         /*
1194                          *      If the target container still doesn't exist, 
1195                          *      return failure
1196                          */
1197                         if (fsa_dev_ptr[cid].valid == 0) {
1198                                 scsicmd->result = DID_BAD_TARGET << 16;
1199                                 scsicmd->scsi_done(scsicmd);
1200                                 return 0;
1201                         }
1202                 } else {  /* check for physical non-dasd devices */
1203                         if(dev->nondasd_support == 1){
1204                                 return aac_send_srb_fib(scsicmd);
1205                         } else {
1206                                 scsicmd->result = DID_NO_CONNECT << 16;
1207                                 scsicmd->scsi_done(scsicmd);
1208                                 return 0;
1209                         }
1210                 }
1211         }
1212         /*
1213          * else Command for the controller itself
1214          */
1215         else if ((scsicmd->cmnd[0] != INQUIRY) &&       /* only INQUIRY & TUR cmnd supported for controller */
1216                 (scsicmd->cmnd[0] != TEST_UNIT_READY)) 
1217         {
1218                 dprintk((KERN_WARNING "Only INQUIRY & TUR command supported for controller, rcvd = 0x%x.\n", scsicmd->cmnd[0]));
1219                 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
1220                 set_sense((u8 *) &dev->fsa_dev[cid].sense_data,
1221                             ILLEGAL_REQUEST,
1222                             SENCODE_INVALID_COMMAND,
1223                             ASENCODE_INVALID_COMMAND, 0, 0, 0, 0);
1224                 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
1225                   (sizeof(dev->fsa_dev[cid].sense_data) > sizeof(scsicmd->sense_buffer))
1226                     ? sizeof(scsicmd->sense_buffer)
1227                     : sizeof(dev->fsa_dev[cid].sense_data));
1228                 scsicmd->scsi_done(scsicmd);
1229                 return 0;
1230         }
1231
1232
1233         /* Handle commands here that don't really require going out to the adapter */
1234         switch (scsicmd->cmnd[0]) {
1235         case INQUIRY:
1236         {
1237                 struct inquiry_data *inq_data_ptr;
1238
1239                 dprintk((KERN_DEBUG "INQUIRY command, ID: %d.\n", scsicmd->device->id));
1240                 inq_data_ptr = (struct inquiry_data *)scsicmd->request_buffer;
1241                 memset(inq_data_ptr, 0, sizeof (struct inquiry_data));
1242
1243                 inq_data_ptr->inqd_ver = 2;     /* claim compliance to SCSI-2 */
1244                 inq_data_ptr->inqd_dtq = 0x80;  /* set RMB bit to one indicating that the medium is removable */
1245                 inq_data_ptr->inqd_rdf = 2;     /* A response data format value of two indicates that the data shall be in the format specified in SCSI-2 */
1246                 inq_data_ptr->inqd_len = 31;
1247                 /*Format for "pad2" is  RelAdr | WBus32 | WBus16 |  Sync  | Linked |Reserved| CmdQue | SftRe */
1248                 inq_data_ptr->inqd_pad2= 0x32 ;  /*WBus16|Sync|CmdQue */
1249                 /*
1250                  *      Set the Vendor, Product, and Revision Level
1251                  *      see: <vendor>.c i.e. aac.c
1252                  */
1253                 if (scsicmd->device->id == host->this_id) {
1254                         setinqstr(cardtype, (void *) (inq_data_ptr->inqd_vid), (sizeof(container_types)/sizeof(char *)));
1255                         inq_data_ptr->inqd_pdt = INQD_PDT_PROC; /* Processor device */
1256                         scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
1257                         scsicmd->scsi_done(scsicmd);
1258                         return 0;
1259                 }
1260                 setinqstr(cardtype, (void *) (inq_data_ptr->inqd_vid), fsa_dev_ptr[cid].type);
1261                 inq_data_ptr->inqd_pdt = INQD_PDT_DA;   /* Direct/random access device */
1262                 return aac_get_container_name(scsicmd, cid);
1263         }
1264         case READ_CAPACITY:
1265         {
1266                 u32 capacity;
1267                 char *cp;
1268
1269                 dprintk((KERN_DEBUG "READ CAPACITY command.\n"));
1270                 if (fsa_dev_ptr[cid].size <= 0x100000000LL)
1271                         capacity = fsa_dev_ptr[cid].size - 1;
1272                 else
1273                         capacity = (u32)-1;
1274                 cp = scsicmd->request_buffer;
1275                 cp[0] = (capacity >> 24) & 0xff;
1276                 cp[1] = (capacity >> 16) & 0xff;
1277                 cp[2] = (capacity >> 8) & 0xff;
1278                 cp[3] = (capacity >> 0) & 0xff;
1279                 cp[4] = 0;
1280                 cp[5] = 0;
1281                 cp[6] = 2;
1282                 cp[7] = 0;
1283
1284                 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
1285                 scsicmd->scsi_done(scsicmd);
1286
1287                 return 0;
1288         }
1289
1290         case MODE_SENSE:
1291         {
1292                 char *mode_buf;
1293
1294                 dprintk((KERN_DEBUG "MODE SENSE command.\n"));
1295                 mode_buf = scsicmd->request_buffer;
1296                 mode_buf[0] = 3;        /* Mode data length */
1297                 mode_buf[1] = 0;        /* Medium type - default */
1298                 mode_buf[2] = 0;        /* Device-specific param, bit 8: 0/1 = write enabled/protected */
1299                 mode_buf[3] = 0;        /* Block descriptor length */
1300
1301                 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
1302                 scsicmd->scsi_done(scsicmd);
1303
1304                 return 0;
1305         }
1306         case MODE_SENSE_10:
1307         {
1308                 char *mode_buf;
1309
1310                 dprintk((KERN_DEBUG "MODE SENSE 10 byte command.\n"));
1311                 mode_buf = scsicmd->request_buffer;
1312                 mode_buf[0] = 0;        /* Mode data length (MSB) */
1313                 mode_buf[1] = 6;        /* Mode data length (LSB) */
1314                 mode_buf[2] = 0;        /* Medium type - default */
1315                 mode_buf[3] = 0;        /* Device-specific param, bit 8: 0/1 = write enabled/protected */
1316                 mode_buf[4] = 0;        /* reserved */
1317                 mode_buf[5] = 0;        /* reserved */
1318                 mode_buf[6] = 0;        /* Block descriptor length (MSB) */
1319                 mode_buf[7] = 0;        /* Block descriptor length (LSB) */
1320
1321                 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
1322                 scsicmd->scsi_done(scsicmd);
1323
1324                 return 0;
1325         }
1326         case REQUEST_SENSE:
1327                 dprintk((KERN_DEBUG "REQUEST SENSE command.\n"));
1328                 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, sizeof (struct sense_data));
1329                 memset(&dev->fsa_dev[cid].sense_data, 0, sizeof (struct sense_data));
1330                 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
1331                 scsicmd->scsi_done(scsicmd);
1332                 return 0;
1333
1334         case ALLOW_MEDIUM_REMOVAL:
1335                 dprintk((KERN_DEBUG "LOCK command.\n"));
1336                 if (scsicmd->cmnd[4])
1337                         fsa_dev_ptr[cid].locked = 1;
1338                 else
1339                         fsa_dev_ptr[cid].locked = 0;
1340
1341                 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
1342                 scsicmd->scsi_done(scsicmd);
1343                 return 0;
1344         /*
1345          *      These commands are all No-Ops
1346          */
1347         case TEST_UNIT_READY:
1348         case RESERVE:
1349         case RELEASE:
1350         case REZERO_UNIT:
1351         case REASSIGN_BLOCKS:
1352         case SEEK_10:
1353         case START_STOP:
1354                 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
1355                 scsicmd->scsi_done(scsicmd);
1356                 return 0;
1357         }
1358
1359         switch (scsicmd->cmnd[0]) 
1360         {
1361                 case READ_6:
1362                 case READ_10:
1363                         /*
1364                          *      Hack to keep track of ordinal number of the device that
1365                          *      corresponds to a container. Needed to convert
1366                          *      containers to /dev/sd device names
1367                          */
1368                          
1369                         spin_unlock_irq(host->host_lock);
1370                         if  (scsicmd->request->rq_disk)
1371                                 memcpy(fsa_dev_ptr[cid].devname,
1372                                         scsicmd->request->rq_disk->disk_name,
1373                                         8);
1374
1375                         ret = aac_read(scsicmd, cid);
1376                         spin_lock_irq(host->host_lock);
1377                         return ret;
1378
1379                 case WRITE_6:
1380                 case WRITE_10:
1381                         spin_unlock_irq(host->host_lock);
1382                         ret = aac_write(scsicmd, cid);
1383                         spin_lock_irq(host->host_lock);
1384                         return ret;
1385
1386                 case SYNCHRONIZE_CACHE:
1387                         /* Issue FIB to tell Firmware to flush it's cache */
1388                         return aac_synchronize(scsicmd, cid);
1389                         
1390                 default:
1391                         /*
1392                          *      Unhandled commands
1393                          */
1394                         printk(KERN_WARNING "Unhandled SCSI Command: 0x%x.\n", scsicmd->cmnd[0]);
1395                         scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
1396                         set_sense((u8 *) &dev->fsa_dev[cid].sense_data,
1397                                 ILLEGAL_REQUEST, SENCODE_INVALID_COMMAND,
1398                                 ASENCODE_INVALID_COMMAND, 0, 0, 0, 0);
1399                         memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
1400                           (sizeof(dev->fsa_dev[cid].sense_data) > sizeof(scsicmd->sense_buffer))
1401                             ? sizeof(scsicmd->sense_buffer)
1402                             : sizeof(dev->fsa_dev[cid].sense_data));
1403                         scsicmd->scsi_done(scsicmd);
1404                         return 0;
1405         }
1406 }
1407
1408 static int query_disk(struct aac_dev *dev, void __user *arg)
1409 {
1410         struct aac_query_disk qd;
1411         struct fsa_dev_info *fsa_dev_ptr;
1412
1413         fsa_dev_ptr = dev->fsa_dev;
1414         if (copy_from_user(&qd, arg, sizeof (struct aac_query_disk)))
1415                 return -EFAULT;
1416         if (qd.cnum == -1)
1417                 qd.cnum = ID_LUN_TO_CONTAINER(qd.id, qd.lun);
1418         else if ((qd.bus == -1) && (qd.id == -1) && (qd.lun == -1)) 
1419         {
1420                 if (qd.cnum < 0 || qd.cnum >= dev->maximum_num_containers)
1421                         return -EINVAL;
1422                 qd.instance = dev->scsi_host_ptr->host_no;
1423                 qd.bus = 0;
1424                 qd.id = CONTAINER_TO_ID(qd.cnum);
1425                 qd.lun = CONTAINER_TO_LUN(qd.cnum);
1426         }
1427         else return -EINVAL;
1428
1429         qd.valid = fsa_dev_ptr[qd.cnum].valid;
1430         qd.locked = fsa_dev_ptr[qd.cnum].locked;
1431         qd.deleted = fsa_dev_ptr[qd.cnum].deleted;
1432
1433         if (fsa_dev_ptr[qd.cnum].devname[0] == '\0')
1434                 qd.unmapped = 1;
1435         else
1436                 qd.unmapped = 0;
1437
1438         strlcpy(qd.name, fsa_dev_ptr[qd.cnum].devname,
1439           min(sizeof(qd.name), sizeof(fsa_dev_ptr[qd.cnum].devname) + 1));
1440
1441         if (copy_to_user(arg, &qd, sizeof (struct aac_query_disk)))
1442                 return -EFAULT;
1443         return 0;
1444 }
1445
1446 static int force_delete_disk(struct aac_dev *dev, void __user *arg)
1447 {
1448         struct aac_delete_disk dd;
1449         struct fsa_dev_info *fsa_dev_ptr;
1450
1451         fsa_dev_ptr = dev->fsa_dev;
1452
1453         if (copy_from_user(&dd, arg, sizeof (struct aac_delete_disk)))
1454                 return -EFAULT;
1455
1456         if (dd.cnum >= dev->maximum_num_containers)
1457                 return -EINVAL;
1458         /*
1459          *      Mark this container as being deleted.
1460          */
1461         fsa_dev_ptr[dd.cnum].deleted = 1;
1462         /*
1463          *      Mark the container as no longer valid
1464          */
1465         fsa_dev_ptr[dd.cnum].valid = 0;
1466         return 0;
1467 }
1468
1469 static int delete_disk(struct aac_dev *dev, void __user *arg)
1470 {
1471         struct aac_delete_disk dd;
1472         struct fsa_dev_info *fsa_dev_ptr;
1473
1474         fsa_dev_ptr = dev->fsa_dev;
1475
1476         if (copy_from_user(&dd, arg, sizeof (struct aac_delete_disk)))
1477                 return -EFAULT;
1478
1479         if (dd.cnum >= dev->maximum_num_containers)
1480                 return -EINVAL;
1481         /*
1482          *      If the container is locked, it can not be deleted by the API.
1483          */
1484         if (fsa_dev_ptr[dd.cnum].locked)
1485                 return -EBUSY;
1486         else {
1487                 /*
1488                  *      Mark the container as no longer being valid.
1489                  */
1490                 fsa_dev_ptr[dd.cnum].valid = 0;
1491                 fsa_dev_ptr[dd.cnum].devname[0] = '\0';
1492                 return 0;
1493         }
1494 }
1495
1496 int aac_dev_ioctl(struct aac_dev *dev, int cmd, void __user *arg)
1497 {
1498         switch (cmd) {
1499         case FSACTL_QUERY_DISK:
1500                 return query_disk(dev, arg);
1501         case FSACTL_DELETE_DISK:
1502                 return delete_disk(dev, arg);
1503         case FSACTL_FORCE_DELETE_DISK:
1504                 return force_delete_disk(dev, arg);
1505         case FSACTL_GET_CONTAINERS:
1506                 return aac_get_containers(dev);
1507         default:
1508                 return -ENOTTY;
1509         }
1510 }
1511
1512 /**
1513  *
1514  * aac_srb_callback
1515  * @context: the context set in the fib - here it is scsi cmd
1516  * @fibptr: pointer to the fib
1517  *
1518  * Handles the completion of a scsi command to a non dasd device
1519  *
1520  */
1521
1522 static void aac_srb_callback(void *context, struct fib * fibptr)
1523 {
1524         struct aac_dev *dev;
1525         struct aac_srb_reply *srbreply;
1526         struct scsi_cmnd *scsicmd;
1527
1528         scsicmd = (struct scsi_cmnd *) context;
1529         dev = (struct aac_dev *)scsicmd->device->host->hostdata;
1530
1531         if (fibptr == NULL)
1532                 BUG();
1533
1534         srbreply = (struct aac_srb_reply *) fib_data(fibptr);
1535
1536         scsicmd->sense_buffer[0] = '\0';  /* Initialize sense valid flag to false */
1537         /*
1538          *      Calculate resid for sg 
1539          */
1540          
1541         scsicmd->resid = scsicmd->request_bufflen - srbreply->data_xfer_length;
1542
1543         if(scsicmd->use_sg)
1544                 pci_unmap_sg(dev->pdev, 
1545                         (struct scatterlist *)scsicmd->buffer,
1546                         scsicmd->use_sg,
1547                         scsicmd->sc_data_direction);
1548         else if(scsicmd->request_bufflen)
1549                 pci_unmap_single(dev->pdev, scsicmd->SCp.dma_handle, scsicmd->request_bufflen,
1550                         scsicmd->sc_data_direction);
1551
1552         /*
1553          * First check the fib status
1554          */
1555
1556         if (le32_to_cpu(srbreply->status) != ST_OK){
1557                 int len;
1558                 printk(KERN_WARNING "aac_srb_callback: srb failed, status = %d\n", le32_to_cpu(srbreply->status));
1559                 len = (srbreply->sense_data_size > sizeof(scsicmd->sense_buffer))?
1560                                 sizeof(scsicmd->sense_buffer):srbreply->sense_data_size;
1561                 scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
1562                 memcpy(scsicmd->sense_buffer, srbreply->sense_data, len);
1563         }
1564
1565         /*
1566          * Next check the srb status
1567          */
1568         switch( (le32_to_cpu(srbreply->srb_status))&0x3f){
1569         case SRB_STATUS_ERROR_RECOVERY:
1570         case SRB_STATUS_PENDING:
1571         case SRB_STATUS_SUCCESS:
1572                 if(scsicmd->cmnd[0] == INQUIRY ){
1573                         u8 b;
1574                         u8 b1;
1575                         /* We can't expose disk devices because we can't tell whether they
1576                          * are the raw container drives or stand alone drives.  If they have
1577                          * the removable bit set then we should expose them though.
1578                          */
1579                         b = (*(u8*)scsicmd->buffer)&0x1f;
1580                         b1 = ((u8*)scsicmd->buffer)[1];
1581                         if( b==TYPE_TAPE || b==TYPE_WORM || b==TYPE_ROM || b==TYPE_MOD|| b==TYPE_MEDIUM_CHANGER 
1582                                         || (b==TYPE_DISK && (b1&0x80)) ){
1583                                 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
1584                         /*
1585                          * We will allow disk devices if in RAID/SCSI mode and
1586                          * the channel is 2
1587                          */
1588                         } else if ((dev->raid_scsi_mode) &&
1589                                         (scsicmd->device->channel == 2)) {
1590                                 scsicmd->result = DID_OK << 16 | 
1591                                                 COMMAND_COMPLETE << 8;
1592                         } else {
1593                                 scsicmd->result = DID_NO_CONNECT << 16 | 
1594                                                 COMMAND_COMPLETE << 8;
1595                         }
1596                 } else {
1597                         scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
1598                 }
1599                 break;
1600         case SRB_STATUS_DATA_OVERRUN:
1601                 switch(scsicmd->cmnd[0]){
1602                 case  READ_6:
1603                 case  WRITE_6:
1604                 case  READ_10:
1605                 case  WRITE_10:
1606                 case  READ_12:
1607                 case  WRITE_12:
1608                         if(le32_to_cpu(srbreply->data_xfer_length) < scsicmd->underflow ) {
1609                                 printk(KERN_WARNING"aacraid: SCSI CMD underflow\n");
1610                         } else {
1611                                 printk(KERN_WARNING"aacraid: SCSI CMD Data Overrun\n");
1612                         }
1613                         scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
1614                         break;
1615                 case INQUIRY: {
1616                         u8 b;
1617                         u8 b1;
1618                         /* We can't expose disk devices because we can't tell whether they
1619                         * are the raw container drives or stand alone drives
1620                         */
1621                         b = (*(u8*)scsicmd->buffer)&0x0f;
1622                         b1 = ((u8*)scsicmd->buffer)[1];
1623                         if( b==TYPE_TAPE || b==TYPE_WORM || b==TYPE_ROM || b==TYPE_MOD|| b==TYPE_MEDIUM_CHANGER
1624                                         || (b==TYPE_DISK && (b1&0x80)) ){
1625                                 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
1626                         /*
1627                          * We will allow disk devices if in RAID/SCSI mode and
1628                          * the channel is 2
1629                          */
1630                         } else if ((dev->raid_scsi_mode) &&
1631                                         (scsicmd->device->channel == 2)) {
1632                                 scsicmd->result = DID_OK << 16 | 
1633                                                 COMMAND_COMPLETE << 8;
1634                         } else {
1635                                 scsicmd->result = DID_NO_CONNECT << 16 | 
1636                                                 COMMAND_COMPLETE << 8;
1637                         }
1638                         break;
1639                 }
1640                 default:
1641                         scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
1642                         break;
1643                 }
1644                 break;
1645         case SRB_STATUS_ABORTED:
1646                 scsicmd->result = DID_ABORT << 16 | ABORT << 8;
1647                 break;
1648         case SRB_STATUS_ABORT_FAILED:
1649                 // Not sure about this one - but assuming the hba was trying to abort for some reason
1650                 scsicmd->result = DID_ERROR << 16 | ABORT << 8;
1651                 break;
1652         case SRB_STATUS_PARITY_ERROR:
1653                 scsicmd->result = DID_PARITY << 16 | MSG_PARITY_ERROR << 8;
1654                 break;
1655         case SRB_STATUS_NO_DEVICE:
1656         case SRB_STATUS_INVALID_PATH_ID:
1657         case SRB_STATUS_INVALID_TARGET_ID:
1658         case SRB_STATUS_INVALID_LUN:
1659         case SRB_STATUS_SELECTION_TIMEOUT:
1660                 scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
1661                 break;
1662
1663         case SRB_STATUS_COMMAND_TIMEOUT:
1664         case SRB_STATUS_TIMEOUT:
1665                 scsicmd->result = DID_TIME_OUT << 16 | COMMAND_COMPLETE << 8;
1666                 break;
1667
1668         case SRB_STATUS_BUSY:
1669                 scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
1670                 break;
1671
1672         case SRB_STATUS_BUS_RESET:
1673                 scsicmd->result = DID_RESET << 16 | COMMAND_COMPLETE << 8;
1674                 break;
1675
1676         case SRB_STATUS_MESSAGE_REJECTED:
1677                 scsicmd->result = DID_ERROR << 16 | MESSAGE_REJECT << 8;
1678                 break;
1679         case SRB_STATUS_REQUEST_FLUSHED:
1680         case SRB_STATUS_ERROR:
1681         case SRB_STATUS_INVALID_REQUEST:
1682         case SRB_STATUS_REQUEST_SENSE_FAILED:
1683         case SRB_STATUS_NO_HBA:
1684         case SRB_STATUS_UNEXPECTED_BUS_FREE:
1685         case SRB_STATUS_PHASE_SEQUENCE_FAILURE:
1686         case SRB_STATUS_BAD_SRB_BLOCK_LENGTH:
1687         case SRB_STATUS_DELAYED_RETRY:
1688         case SRB_STATUS_BAD_FUNCTION:
1689         case SRB_STATUS_NOT_STARTED:
1690         case SRB_STATUS_NOT_IN_USE:
1691         case SRB_STATUS_FORCE_ABORT:
1692         case SRB_STATUS_DOMAIN_VALIDATION_FAIL:
1693         default:
1694 #ifdef AAC_DETAILED_STATUS_INFO
1695                 printk("aacraid: SRB ERROR(%u) %s scsi cmd 0x%x - scsi status 0x%x\n",
1696                         le32_to_cpu(srbreply->srb_status & 0x3F),
1697                         aac_get_status_string(
1698                                 le32_to_cpu(srbreply->srb_status) & 0x3F), 
1699                         scsicmd->cmnd[0], 
1700                         le32_to_cpu(srbreply->scsi_status));
1701 #endif
1702                 scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
1703                 break;
1704         }
1705         if (le32_to_cpu(srbreply->scsi_status) == 0x02 ){  // Check Condition
1706                 int len;
1707                 scsicmd->result |= SAM_STAT_CHECK_CONDITION;
1708                 len = (srbreply->sense_data_size > sizeof(scsicmd->sense_buffer))?
1709                                 sizeof(scsicmd->sense_buffer):srbreply->sense_data_size;
1710 #ifdef AAC_DETAILED_STATUS_INFO
1711                 dprintk((KERN_WARNING "aac_srb_callback: check condition, status = %d len=%d\n", 
1712                                         le32_to_cpu(srbreply->status), len));
1713 #endif
1714                 memcpy(scsicmd->sense_buffer, srbreply->sense_data, len);
1715                 
1716         }
1717         /*
1718          * OR in the scsi status (already shifted up a bit)
1719          */
1720         scsicmd->result |= le32_to_cpu(srbreply->scsi_status);
1721
1722         fib_complete(fibptr);
1723         fib_free(fibptr);
1724         aac_io_done(scsicmd);
1725 }
1726
1727 /**
1728  *
1729  * aac_send_scb_fib
1730  * @scsicmd: the scsi command block
1731  *
1732  * This routine will form a FIB and fill in the aac_srb from the 
1733  * scsicmd passed in.
1734  */
1735
1736 static int aac_send_srb_fib(struct scsi_cmnd* scsicmd)
1737 {
1738         struct fib* cmd_fibcontext;
1739         struct aac_dev* dev;
1740         int status;
1741         struct aac_srb *srbcmd;
1742         u16 fibsize;
1743         u32 flag;
1744         u32 timeout;
1745
1746         if( scsicmd->device->id > 15 || scsicmd->device->lun > 7) {
1747                 scsicmd->result = DID_NO_CONNECT << 16;
1748                 scsicmd->scsi_done(scsicmd);
1749                 return 0;
1750         }
1751
1752         dev = (struct aac_dev *)scsicmd->device->host->hostdata;
1753         switch(scsicmd->sc_data_direction){
1754         case DMA_TO_DEVICE:
1755                 flag = SRB_DataOut;
1756                 break;
1757         case DMA_BIDIRECTIONAL:
1758                 flag = SRB_DataIn | SRB_DataOut;
1759                 break;
1760         case DMA_FROM_DEVICE:
1761                 flag = SRB_DataIn;
1762                 break;
1763         case DMA_NONE:
1764         default:        /* shuts up some versions of gcc */
1765                 flag = SRB_NoDataXfer;
1766                 break;
1767         }
1768
1769
1770         /*
1771          *      Allocate and initialize a Fib then setup a BlockWrite command
1772          */
1773         if (!(cmd_fibcontext = fib_alloc(dev))) {
1774                 return -1;
1775         }
1776         fib_init(cmd_fibcontext);
1777
1778         srbcmd = (struct aac_srb*) fib_data(cmd_fibcontext);
1779         srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi);
1780         srbcmd->channel  = cpu_to_le32(aac_logical_to_phys(scsicmd->device->channel));
1781         srbcmd->id   = cpu_to_le32(scsicmd->device->id);
1782         srbcmd->lun      = cpu_to_le32(scsicmd->device->lun);
1783         srbcmd->flags    = cpu_to_le32(flag);
1784         timeout = (scsicmd->timeout-jiffies)/HZ;
1785         if(timeout == 0){
1786                 timeout = 1;
1787         }
1788         srbcmd->timeout  = cpu_to_le32(timeout);  // timeout in seconds
1789         srbcmd->retry_limit =cpu_to_le32(0); // Obsolete parameter
1790         srbcmd->cdb_size = cpu_to_le32(scsicmd->cmd_len);
1791         
1792         if( dev->dac_support == 1 ) {
1793                 aac_build_sg64(scsicmd, (struct sgmap64*) &srbcmd->sg);
1794                 srbcmd->count = cpu_to_le32(scsicmd->request_bufflen);
1795
1796                 memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
1797                 memcpy(srbcmd->cdb, scsicmd->cmnd, scsicmd->cmd_len);
1798                 /*
1799                  *      Build Scatter/Gather list
1800                  */
1801                 fibsize = sizeof (struct aac_srb) + (((srbcmd->sg.count & 0xff) - 1) * sizeof (struct sgentry64));
1802
1803                 /*
1804                  *      Now send the Fib to the adapter
1805                  */
1806                 status = fib_send(ScsiPortCommand64, cmd_fibcontext, fibsize, FsaNormal, 0, 1,
1807                                   (fib_callback) aac_srb_callback, (void *) scsicmd);
1808         } else {
1809                 aac_build_sg(scsicmd, (struct sgmap*)&srbcmd->sg);
1810                 srbcmd->count = cpu_to_le32(scsicmd->request_bufflen);
1811
1812                 memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
1813                 memcpy(srbcmd->cdb, scsicmd->cmnd, scsicmd->cmd_len);
1814                 /*
1815                  *      Build Scatter/Gather list
1816                  */
1817                 fibsize = sizeof (struct aac_srb) + (((srbcmd->sg.count & 0xff) - 1) * sizeof (struct sgentry));
1818
1819                 /*
1820                  *      Now send the Fib to the adapter
1821                  */
1822                 status = fib_send(ScsiPortCommand, cmd_fibcontext, fibsize, FsaNormal, 0, 1,
1823                                   (fib_callback) aac_srb_callback, (void *) scsicmd);
1824         }
1825         /*
1826          *      Check that the command queued to the controller
1827          */
1828         if (status == -EINPROGRESS){
1829                 return 0;
1830         }
1831
1832         printk(KERN_WARNING "aac_srb: fib_send failed with status: %d\n", status);
1833         fib_complete(cmd_fibcontext);
1834         fib_free(cmd_fibcontext);
1835
1836         return -1;
1837 }
1838
1839 static unsigned long aac_build_sg(struct scsi_cmnd* scsicmd, struct sgmap* psg)
1840 {
1841         struct aac_dev *dev;
1842         unsigned long byte_count = 0;
1843
1844         dev = (struct aac_dev *)scsicmd->device->host->hostdata;
1845         // Get rid of old data
1846         psg->count = cpu_to_le32(0);
1847         psg->sg[0].addr = cpu_to_le32(0);
1848         psg->sg[0].count = cpu_to_le32(0);  
1849         if (scsicmd->use_sg) {
1850                 struct scatterlist *sg;
1851                 int i;
1852                 int sg_count;
1853                 sg = (struct scatterlist *) scsicmd->request_buffer;
1854
1855                 sg_count = pci_map_sg(dev->pdev, sg, scsicmd->use_sg,
1856                         scsicmd->sc_data_direction);
1857                 psg->count = cpu_to_le32(sg_count);
1858
1859                 byte_count = 0;
1860
1861                 for (i = 0; i < sg_count; i++) {
1862                         psg->sg[i].addr = cpu_to_le32(sg_dma_address(sg));
1863                         psg->sg[i].count = cpu_to_le32(sg_dma_len(sg));
1864                         byte_count += sg_dma_len(sg);
1865                         sg++;
1866                 }
1867                 /* hba wants the size to be exact */
1868                 if(byte_count > scsicmd->request_bufflen){
1869                         psg->sg[i-1].count -= (byte_count - scsicmd->request_bufflen);
1870                         byte_count = scsicmd->request_bufflen;
1871                 }
1872                 /* Check for command underflow */
1873                 if(scsicmd->underflow && (byte_count < scsicmd->underflow)){
1874                         printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
1875                                         byte_count, scsicmd->underflow);
1876                 }
1877         }
1878         else if(scsicmd->request_bufflen) {
1879                 dma_addr_t addr; 
1880                 addr = pci_map_single(dev->pdev,
1881                                 scsicmd->request_buffer,
1882                                 scsicmd->request_bufflen,
1883                                 scsicmd->sc_data_direction);
1884                 psg->count = cpu_to_le32(1);
1885                 psg->sg[0].addr = cpu_to_le32(addr);
1886                 psg->sg[0].count = cpu_to_le32(scsicmd->request_bufflen);  
1887                 scsicmd->SCp.dma_handle = addr;
1888                 byte_count = scsicmd->request_bufflen;
1889         }
1890         return byte_count;
1891 }
1892
1893
1894 static unsigned long aac_build_sg64(struct scsi_cmnd* scsicmd, struct sgmap64* psg)
1895 {
1896         struct aac_dev *dev;
1897         unsigned long byte_count = 0;
1898         u64 le_addr;
1899
1900         dev = (struct aac_dev *)scsicmd->device->host->hostdata;
1901         // Get rid of old data
1902         psg->count = cpu_to_le32(0);
1903         psg->sg[0].addr[0] = cpu_to_le32(0);
1904         psg->sg[0].addr[1] = cpu_to_le32(0);
1905         psg->sg[0].count = cpu_to_le32(0);  
1906         if (scsicmd->use_sg) {
1907                 struct scatterlist *sg;
1908                 int i;
1909                 int sg_count;
1910                 sg = (struct scatterlist *) scsicmd->request_buffer;
1911
1912                 sg_count = pci_map_sg(dev->pdev, sg, scsicmd->use_sg,
1913                         scsicmd->sc_data_direction);
1914                 psg->count = cpu_to_le32(sg_count);
1915
1916                 byte_count = 0;
1917
1918                 for (i = 0; i < sg_count; i++) {
1919                         le_addr = cpu_to_le64(sg_dma_address(sg));
1920                         psg->sg[i].addr[1] = (u32)(le_addr>>32);
1921                         psg->sg[i].addr[0] = (u32)(le_addr & 0xffffffff);
1922                         psg->sg[i].count = cpu_to_le32(sg_dma_len(sg));
1923                         byte_count += sg_dma_len(sg);
1924                         sg++;
1925                 }
1926                 /* hba wants the size to be exact */
1927                 if(byte_count > scsicmd->request_bufflen){
1928                         psg->sg[i-1].count -= (byte_count - scsicmd->request_bufflen);
1929                         byte_count = scsicmd->request_bufflen;
1930                 }
1931                 /* Check for command underflow */
1932                 if(scsicmd->underflow && (byte_count < scsicmd->underflow)){
1933                         printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
1934                                         byte_count, scsicmd->underflow);
1935                 }
1936         }
1937         else if(scsicmd->request_bufflen) {
1938                 dma_addr_t addr; 
1939                 addr = pci_map_single(dev->pdev,
1940                                 scsicmd->request_buffer,
1941                                 scsicmd->request_bufflen,
1942                                 scsicmd->sc_data_direction);
1943                 psg->count = cpu_to_le32(1);
1944                 le_addr = cpu_to_le64(addr);
1945                 psg->sg[0].addr[1] = (u32)(le_addr>>32);
1946                 psg->sg[0].addr[0] = (u32)(le_addr & 0xffffffff);
1947                 psg->sg[0].count = cpu_to_le32(scsicmd->request_bufflen);  
1948                 scsicmd->SCp.dma_handle = addr;
1949                 byte_count = scsicmd->request_bufflen;
1950         }
1951         return byte_count;
1952 }
1953
1954 #ifdef AAC_DETAILED_STATUS_INFO
1955
1956 struct aac_srb_status_info {
1957         u32     status;
1958         char    *str;
1959 };
1960
1961
1962 static struct aac_srb_status_info srb_status_info[] = {
1963         { SRB_STATUS_PENDING,           "Pending Status"},
1964         { SRB_STATUS_SUCCESS,           "Success"},
1965         { SRB_STATUS_ABORTED,           "Aborted Command"},
1966         { SRB_STATUS_ABORT_FAILED,      "Abort Failed"},
1967         { SRB_STATUS_ERROR,             "Error Event"}, 
1968         { SRB_STATUS_BUSY,              "Device Busy"},
1969         { SRB_STATUS_INVALID_REQUEST,   "Invalid Request"},
1970         { SRB_STATUS_INVALID_PATH_ID,   "Invalid Path ID"},
1971         { SRB_STATUS_NO_DEVICE,         "No Device"},
1972         { SRB_STATUS_TIMEOUT,           "Timeout"},
1973         { SRB_STATUS_SELECTION_TIMEOUT, "Selection Timeout"},
1974         { SRB_STATUS_COMMAND_TIMEOUT,   "Command Timeout"},
1975         { SRB_STATUS_MESSAGE_REJECTED,  "Message Rejected"},
1976         { SRB_STATUS_BUS_RESET,         "Bus Reset"},
1977         { SRB_STATUS_PARITY_ERROR,      "Parity Error"},
1978         { SRB_STATUS_REQUEST_SENSE_FAILED,"Request Sense Failed"},
1979         { SRB_STATUS_NO_HBA,            "No HBA"},
1980         { SRB_STATUS_DATA_OVERRUN,      "Data Overrun/Data Underrun"},
1981         { SRB_STATUS_UNEXPECTED_BUS_FREE,"Unexpected Bus Free"},
1982         { SRB_STATUS_PHASE_SEQUENCE_FAILURE,"Phase Error"},
1983         { SRB_STATUS_BAD_SRB_BLOCK_LENGTH,"Bad Srb Block Length"},
1984         { SRB_STATUS_REQUEST_FLUSHED,   "Request Flushed"},
1985         { SRB_STATUS_DELAYED_RETRY,     "Delayed Retry"},
1986         { SRB_STATUS_INVALID_LUN,       "Invalid LUN"}, 
1987         { SRB_STATUS_INVALID_TARGET_ID, "Invalid TARGET ID"},
1988         { SRB_STATUS_BAD_FUNCTION,      "Bad Function"},
1989         { SRB_STATUS_ERROR_RECOVERY,    "Error Recovery"},
1990         { SRB_STATUS_NOT_STARTED,       "Not Started"},
1991         { SRB_STATUS_NOT_IN_USE,        "Not In Use"},
1992         { SRB_STATUS_FORCE_ABORT,       "Force Abort"},
1993         { SRB_STATUS_DOMAIN_VALIDATION_FAIL,"Domain Validation Failure"},
1994         { 0xff,                         "Unknown Error"}
1995 };
1996
1997 char *aac_get_status_string(u32 status)
1998 {
1999         int i;
2000
2001         for(i=0; i < (sizeof(srb_status_info)/sizeof(struct aac_srb_status_info)); i++ ){
2002                 if(srb_status_info[i].status == status){
2003                         return srb_status_info[i].str;
2004                 }
2005         }
2006
2007         return "Bad Status Code";
2008 }
2009
2010 #endif