vserver 1.9.3
[linux-2.6.git] / drivers / scsi / aic7xxx / aic79xx_osm.c
1 /*
2  * Adaptec AIC79xx device driver for Linux.
3  *
4  * $Id: //depot/aic7xxx/linux/drivers/scsi/aic7xxx/aic79xx_osm.c#171 $
5  *
6  * --------------------------------------------------------------------------
7  * Copyright (c) 1994-2000 Justin T. Gibbs.
8  * Copyright (c) 1997-1999 Doug Ledford
9  * Copyright (c) 2000-2003 Adaptec Inc.
10  * All rights reserved.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions, and the following disclaimer,
17  *    without modification.
18  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
19  *    substantially similar to the "NO WARRANTY" disclaimer below
20  *    ("Disclaimer") and any redistribution must be conditioned upon
21  *    including a substantially similar Disclaimer requirement for further
22  *    binary redistribution.
23  * 3. Neither the names of the above-listed copyright holders nor the names
24  *    of any contributors may be used to endorse or promote products derived
25  *    from this software without specific prior written permission.
26  *
27  * Alternatively, this software may be distributed under the terms of the
28  * GNU General Public License ("GPL") version 2 as published by the Free
29  * Software Foundation.
30  *
31  * NO WARRANTY
32  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
33  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
34  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
35  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
36  * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
37  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
38  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
39  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
40  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
41  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
42  * POSSIBILITY OF SUCH DAMAGES.
43  */
44
45 #include "aic79xx_osm.h"
46 #include "aic79xx_inline.h"
47 #include <scsi/scsicam.h>
48
49 /*
50  * Include aiclib.c as part of our
51  * "module dependencies are hard" work around.
52  */
53 #include "aiclib.c"
54
55 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,0)
56 #include <linux/init.h>         /* __setup */
57 #endif
58
59 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
60 #include "sd.h"                 /* For geometry detection */
61 #endif
62
63 #include <linux/mm.h>           /* For fetching system memory size */
64 #include <linux/delay.h>        /* For ssleep/msleep */
65
66 /*
67  * Lock protecting manipulation of the ahd softc list.
68  */
69 spinlock_t ahd_list_spinlock;
70
71 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,0)
72 struct proc_dir_entry proc_scsi_aic79xx = {
73         PROC_SCSI_AIC79XX, 7, "aic79xx",
74         S_IFDIR | S_IRUGO | S_IXUGO, 2,
75         0, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL
76 };
77 #endif
78
79 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
80 /* For dynamic sglist size calculation. */
81 u_int ahd_linux_nseg;
82 #endif
83
84 /*
85  * Bucket size for counting good commands in between bad ones.
86  */
87 #define AHD_LINUX_ERR_THRESH    1000
88
89 /*
90  * Set this to the delay in seconds after SCSI bus reset.
91  * Note, we honor this only for the initial bus reset.
92  * The scsi error recovery code performs its own bus settle
93  * delay handling for error recovery actions.
94  */
95 #ifdef CONFIG_AIC79XX_RESET_DELAY_MS
96 #define AIC79XX_RESET_DELAY CONFIG_AIC79XX_RESET_DELAY_MS
97 #else
98 #define AIC79XX_RESET_DELAY 5000
99 #endif
100
101 /*
102  * To change the default number of tagged transactions allowed per-device,
103  * add a line to the lilo.conf file like:
104  * append="aic79xx=verbose,tag_info:{{32,32,32,32},{32,32,32,32}}"
105  * which will result in the first four devices on the first two
106  * controllers being set to a tagged queue depth of 32.
107  *
108  * The tag_commands is an array of 16 to allow for wide and twin adapters.
109  * Twin adapters will use indexes 0-7 for channel 0, and indexes 8-15
110  * for channel 1.
111  */
112 typedef struct {
113         uint16_t tag_commands[16];      /* Allow for wide/twin adapters. */
114 } adapter_tag_info_t;
115
116 /*
117  * Modify this as you see fit for your system.
118  *
119  * 0                    tagged queuing disabled
120  * 1 <= n <= 253        n == max tags ever dispatched.
121  *
122  * The driver will throttle the number of commands dispatched to a
123  * device if it returns queue full.  For devices with a fixed maximum
124  * queue depth, the driver will eventually determine this depth and
125  * lock it in (a console message is printed to indicate that a lock
126  * has occurred).  On some devices, queue full is returned for a temporary
127  * resource shortage.  These devices will return queue full at varying
128  * depths.  The driver will throttle back when the queue fulls occur and
129  * attempt to slowly increase the depth over time as the device recovers
130  * from the resource shortage.
131  *
132  * In this example, the first line will disable tagged queueing for all
133  * the devices on the first probed aic79xx adapter.
134  *
135  * The second line enables tagged queueing with 4 commands/LUN for IDs
136  * (0, 2-11, 13-15), disables tagged queueing for ID 12, and tells the
137  * driver to attempt to use up to 64 tags for ID 1.
138  *
139  * The third line is the same as the first line.
140  *
141  * The fourth line disables tagged queueing for devices 0 and 3.  It
142  * enables tagged queueing for the other IDs, with 16 commands/LUN
143  * for IDs 1 and 4, 127 commands/LUN for ID 8, and 4 commands/LUN for
144  * IDs 2, 5-7, and 9-15.
145  */
146
147 /*
148  * NOTE: The below structure is for reference only, the actual structure
149  *       to modify in order to change things is just below this comment block.
150 adapter_tag_info_t aic79xx_tag_info[] =
151 {
152         {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
153         {{4, 64, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 4, 4, 4}},
154         {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
155         {{0, 16, 4, 0, 16, 4, 4, 4, 127, 4, 4, 4, 4, 4, 4, 4}}
156 };
157 */
158
159 #ifdef CONFIG_AIC79XX_CMDS_PER_DEVICE
160 #define AIC79XX_CMDS_PER_DEVICE CONFIG_AIC79XX_CMDS_PER_DEVICE
161 #else
162 #define AIC79XX_CMDS_PER_DEVICE AHD_MAX_QUEUE
163 #endif
164
165 #define AIC79XX_CONFIGED_TAG_COMMANDS {                                 \
166         AIC79XX_CMDS_PER_DEVICE, AIC79XX_CMDS_PER_DEVICE,               \
167         AIC79XX_CMDS_PER_DEVICE, AIC79XX_CMDS_PER_DEVICE,               \
168         AIC79XX_CMDS_PER_DEVICE, AIC79XX_CMDS_PER_DEVICE,               \
169         AIC79XX_CMDS_PER_DEVICE, AIC79XX_CMDS_PER_DEVICE,               \
170         AIC79XX_CMDS_PER_DEVICE, AIC79XX_CMDS_PER_DEVICE,               \
171         AIC79XX_CMDS_PER_DEVICE, AIC79XX_CMDS_PER_DEVICE,               \
172         AIC79XX_CMDS_PER_DEVICE, AIC79XX_CMDS_PER_DEVICE,               \
173         AIC79XX_CMDS_PER_DEVICE, AIC79XX_CMDS_PER_DEVICE                \
174 }
175
176 /*
177  * By default, use the number of commands specified by
178  * the users kernel configuration.
179  */
180 static adapter_tag_info_t aic79xx_tag_info[] =
181 {
182         {AIC79XX_CONFIGED_TAG_COMMANDS},
183         {AIC79XX_CONFIGED_TAG_COMMANDS},
184         {AIC79XX_CONFIGED_TAG_COMMANDS},
185         {AIC79XX_CONFIGED_TAG_COMMANDS},
186         {AIC79XX_CONFIGED_TAG_COMMANDS},
187         {AIC79XX_CONFIGED_TAG_COMMANDS},
188         {AIC79XX_CONFIGED_TAG_COMMANDS},
189         {AIC79XX_CONFIGED_TAG_COMMANDS},
190         {AIC79XX_CONFIGED_TAG_COMMANDS},
191         {AIC79XX_CONFIGED_TAG_COMMANDS},
192         {AIC79XX_CONFIGED_TAG_COMMANDS},
193         {AIC79XX_CONFIGED_TAG_COMMANDS},
194         {AIC79XX_CONFIGED_TAG_COMMANDS},
195         {AIC79XX_CONFIGED_TAG_COMMANDS},
196         {AIC79XX_CONFIGED_TAG_COMMANDS},
197         {AIC79XX_CONFIGED_TAG_COMMANDS}
198 };
199
200 /*
201  * By default, read streaming is disabled.  In theory,
202  * read streaming should enhance performance, but early
203  * U320 drive firmware actually performs slower with
204  * read streaming enabled.
205  */
206 #ifdef CONFIG_AIC79XX_ENABLE_RD_STRM
207 #define AIC79XX_CONFIGED_RD_STRM 0xFFFF
208 #else
209 #define AIC79XX_CONFIGED_RD_STRM 0
210 #endif
211
212 static uint16_t aic79xx_rd_strm_info[] =
213 {
214         AIC79XX_CONFIGED_RD_STRM,
215         AIC79XX_CONFIGED_RD_STRM,
216         AIC79XX_CONFIGED_RD_STRM,
217         AIC79XX_CONFIGED_RD_STRM,
218         AIC79XX_CONFIGED_RD_STRM,
219         AIC79XX_CONFIGED_RD_STRM,
220         AIC79XX_CONFIGED_RD_STRM,
221         AIC79XX_CONFIGED_RD_STRM,
222         AIC79XX_CONFIGED_RD_STRM,
223         AIC79XX_CONFIGED_RD_STRM,
224         AIC79XX_CONFIGED_RD_STRM,
225         AIC79XX_CONFIGED_RD_STRM,
226         AIC79XX_CONFIGED_RD_STRM,
227         AIC79XX_CONFIGED_RD_STRM,
228         AIC79XX_CONFIGED_RD_STRM,
229         AIC79XX_CONFIGED_RD_STRM
230 };
231
232 /*
233  * DV option:
234  *
235  * positive value = DV Enabled
236  * zero           = DV Disabled
237  * negative value = DV Default for adapter type/seeprom
238  */
239 #ifdef CONFIG_AIC79XX_DV_SETTING
240 #define AIC79XX_CONFIGED_DV CONFIG_AIC79XX_DV_SETTING
241 #else
242 #define AIC79XX_CONFIGED_DV -1
243 #endif
244
245 static int8_t aic79xx_dv_settings[] =
246 {
247         AIC79XX_CONFIGED_DV,
248         AIC79XX_CONFIGED_DV,
249         AIC79XX_CONFIGED_DV,
250         AIC79XX_CONFIGED_DV,
251         AIC79XX_CONFIGED_DV,
252         AIC79XX_CONFIGED_DV,
253         AIC79XX_CONFIGED_DV,
254         AIC79XX_CONFIGED_DV,
255         AIC79XX_CONFIGED_DV,
256         AIC79XX_CONFIGED_DV,
257         AIC79XX_CONFIGED_DV,
258         AIC79XX_CONFIGED_DV,
259         AIC79XX_CONFIGED_DV,
260         AIC79XX_CONFIGED_DV,
261         AIC79XX_CONFIGED_DV,
262         AIC79XX_CONFIGED_DV
263 };
264
265 /*
266  * The I/O cell on the chip is very configurable in respect to its analog
267  * characteristics.  Set the defaults here; they can be overriden with
268  * the proper insmod parameters.
269  */
270 struct ahd_linux_iocell_opts
271 {
272         uint8_t precomp;
273         uint8_t slewrate;
274         uint8_t amplitude;
275 };
276 #define AIC79XX_DEFAULT_PRECOMP         0xFF
277 #define AIC79XX_DEFAULT_SLEWRATE        0xFF
278 #define AIC79XX_DEFAULT_AMPLITUDE       0xFF
279 #define AIC79XX_DEFAULT_IOOPTS                  \
280 {                                               \
281         AIC79XX_DEFAULT_PRECOMP,                \
282         AIC79XX_DEFAULT_SLEWRATE,               \
283         AIC79XX_DEFAULT_AMPLITUDE               \
284 }
285 #define AIC79XX_PRECOMP_INDEX   0
286 #define AIC79XX_SLEWRATE_INDEX  1
287 #define AIC79XX_AMPLITUDE_INDEX 2
288 static struct ahd_linux_iocell_opts aic79xx_iocell_info[] =
289 {
290         AIC79XX_DEFAULT_IOOPTS,
291         AIC79XX_DEFAULT_IOOPTS,
292         AIC79XX_DEFAULT_IOOPTS,
293         AIC79XX_DEFAULT_IOOPTS,
294         AIC79XX_DEFAULT_IOOPTS,
295         AIC79XX_DEFAULT_IOOPTS,
296         AIC79XX_DEFAULT_IOOPTS,
297         AIC79XX_DEFAULT_IOOPTS,
298         AIC79XX_DEFAULT_IOOPTS,
299         AIC79XX_DEFAULT_IOOPTS,
300         AIC79XX_DEFAULT_IOOPTS,
301         AIC79XX_DEFAULT_IOOPTS,
302         AIC79XX_DEFAULT_IOOPTS,
303         AIC79XX_DEFAULT_IOOPTS,
304         AIC79XX_DEFAULT_IOOPTS,
305         AIC79XX_DEFAULT_IOOPTS
306 };
307
308 /*
309  * There should be a specific return value for this in scsi.h, but
310  * it seems that most drivers ignore it.
311  */
312 #define DID_UNDERFLOW   DID_ERROR
313
314 void
315 ahd_print_path(struct ahd_softc *ahd, struct scb *scb)
316 {
317         printk("(scsi%d:%c:%d:%d): ",
318                ahd->platform_data->host->host_no,
319                scb != NULL ? SCB_GET_CHANNEL(ahd, scb) : 'X',
320                scb != NULL ? SCB_GET_TARGET(ahd, scb) : -1,
321                scb != NULL ? SCB_GET_LUN(scb) : -1);
322 }
323
324 /*
325  * XXX - these options apply unilaterally to _all_ adapters
326  *       cards in the system.  This should be fixed.  Exceptions to this
327  *       rule are noted in the comments.
328  */
329
330 /*
331  * Skip the scsi bus reset.  Non 0 make us skip the reset at startup.  This
332  * has no effect on any later resets that might occur due to things like
333  * SCSI bus timeouts.
334  */
335 static uint32_t aic79xx_no_reset;
336
337 /*
338  * Certain PCI motherboards will scan PCI devices from highest to lowest,
339  * others scan from lowest to highest, and they tend to do all kinds of
340  * strange things when they come into contact with PCI bridge chips.  The
341  * net result of all this is that the PCI card that is actually used to boot
342  * the machine is very hard to detect.  Most motherboards go from lowest
343  * PCI slot number to highest, and the first SCSI controller found is the
344  * one you boot from.  The only exceptions to this are when a controller
345  * has its BIOS disabled.  So, we by default sort all of our SCSI controllers
346  * from lowest PCI slot number to highest PCI slot number.  We also force
347  * all controllers with their BIOS disabled to the end of the list.  This
348  * works on *almost* all computers.  Where it doesn't work, we have this
349  * option.  Setting this option to non-0 will reverse the order of the sort
350  * to highest first, then lowest, but will still leave cards with their BIOS
351  * disabled at the very end.  That should fix everyone up unless there are
352  * really strange cirumstances.
353  */
354 static uint32_t aic79xx_reverse_scan;
355
356 /*
357  * Should we force EXTENDED translation on a controller.
358  *     0 == Use whatever is in the SEEPROM or default to off
359  *     1 == Use whatever is in the SEEPROM or default to on
360  */
361 static uint32_t aic79xx_extended;
362
363 /*
364  * PCI bus parity checking of the Adaptec controllers.  This is somewhat
365  * dubious at best.  To my knowledge, this option has never actually
366  * solved a PCI parity problem, but on certain machines with broken PCI
367  * chipset configurations, it can generate tons of false error messages.
368  * It's included in the driver for completeness.
369  *   0     = Shut off PCI parity check
370  *   non-0 = Enable PCI parity check
371  *
372  * NOTE: you can't actually pass -1 on the lilo prompt.  So, to set this
373  * variable to -1 you would actually want to simply pass the variable
374  * name without a number.  That will invert the 0 which will result in
375  * -1.
376  */
377 static uint32_t aic79xx_pci_parity = ~0;
378
379 /*
380  * There are lots of broken chipsets in the world.  Some of them will
381  * violate the PCI spec when we issue byte sized memory writes to our
382  * controller.  I/O mapped register access, if allowed by the given
383  * platform, will work in almost all cases.
384  */
385 uint32_t aic79xx_allow_memio = ~0;
386
387 /*
388  * aic79xx_detect() has been run, so register all device arrivals
389  * immediately with the system rather than deferring to the sorted
390  * attachment performed by aic79xx_detect().
391  */
392 int aic79xx_detect_complete;
393
394 /*
395  * So that we can set how long each device is given as a selection timeout.
396  * The table of values goes like this:
397  *   0 - 256ms
398  *   1 - 128ms
399  *   2 - 64ms
400  *   3 - 32ms
401  * We default to 256ms because some older devices need a longer time
402  * to respond to initial selection.
403  */
404 static uint32_t aic79xx_seltime;
405
406 /*
407  * Certain devices do not perform any aging on commands.  Should the
408  * device be saturated by commands in one portion of the disk, it is
409  * possible for transactions on far away sectors to never be serviced.
410  * To handle these devices, we can periodically send an ordered tag to
411  * force all outstanding transactions to be serviced prior to a new
412  * transaction.
413  */
414 uint32_t aic79xx_periodic_otag;
415
416 /*
417  * Module information and settable options.
418  */
419 static char *aic79xx = NULL;
420 /*
421  * Just in case someone uses commas to separate items on the insmod
422  * command line, we define a dummy buffer here to avoid having insmod
423  * write wild stuff into our code segment
424  */
425 static char dummy_buffer[60] = "Please don't trounce on me insmod!!\n";
426
427 MODULE_AUTHOR("Maintainer: Justin T. Gibbs <gibbs@scsiguy.com>");
428 MODULE_DESCRIPTION("Adaptec Aic790X U320 SCSI Host Bus Adapter driver");
429 MODULE_LICENSE("Dual BSD/GPL");
430 MODULE_VERSION(AIC79XX_DRIVER_VERSION);
431 MODULE_PARM(aic79xx, "s");
432 MODULE_PARM_DESC(aic79xx,
433 "period delimited, options string.\n"
434 "       verbose                 Enable verbose/diagnostic logging\n"
435 "       allow_memio             Allow device registers to be memory mapped\n"
436 "       debug                   Bitmask of debug values to enable\n"
437 "       no_reset                Supress initial bus resets\n"
438 "       extended                Enable extended geometry on all controllers\n"
439 "       periodic_otag           Send an ordered tagged transaction\n"
440 "                               periodically to prevent tag starvation.\n"
441 "                               This may be required by some older disk\n"
442 "                               or drives/RAID arrays.\n"
443 "       reverse_scan            Sort PCI devices highest Bus/Slot to lowest\n"
444 "       tag_info:<tag_str>      Set per-target tag depth\n"
445 "       global_tag_depth:<int>  Global tag depth for all targets on all buses\n"
446 "       rd_strm:<rd_strm_masks> Set per-target read streaming setting.\n"
447 "       dv:<dv_settings>        Set per-controller Domain Validation Setting.\n"
448 "       slewrate:<slewrate_list>Set the signal slew rate (0-15).\n"
449 "       precomp:<pcomp_list>    Set the signal precompensation (0-7).\n"
450 "       amplitude:<int>         Set the signal amplitude (0-7).\n"
451 "       seltime:<int>           Selection Timeout:\n"
452 "                               (0/256ms,1/128ms,2/64ms,3/32ms)\n"
453 "\n"
454 "       Sample /etc/modprobe.conf line:\n"
455 "               Enable verbose logging\n"
456 "               Set tag depth on Controller 2/Target 2 to 10 tags\n"
457 "               Shorten the selection timeout to 128ms\n"
458 "\n"
459 "       options aic79xx 'aic79xx=verbose.tag_info:{{}.{}.{..10}}.seltime:1'\n"
460 "\n"
461 "       Sample /etc/modprobe.conf line:\n"
462 "               Change Read Streaming for Controller's 2 and 3\n"
463 "\n"
464 "       options aic79xx 'aic79xx=rd_strm:{..0xFFF0.0xC0F0}'");
465
466 static void ahd_linux_handle_scsi_status(struct ahd_softc *,
467                                          struct ahd_linux_device *,
468                                          struct scb *);
469 static void ahd_linux_queue_cmd_complete(struct ahd_softc *ahd,
470                                          Scsi_Cmnd *cmd);
471 static void ahd_linux_filter_inquiry(struct ahd_softc *ahd,
472                                      struct ahd_devinfo *devinfo);
473 static void ahd_linux_dev_timed_unfreeze(u_long arg);
474 static void ahd_linux_sem_timeout(u_long arg);
475 static void ahd_linux_initialize_scsi_bus(struct ahd_softc *ahd);
476 static void ahd_linux_size_nseg(void);
477 static void ahd_linux_thread_run_complete_queue(struct ahd_softc *ahd);
478 static void ahd_linux_start_dv(struct ahd_softc *ahd);
479 static void ahd_linux_dv_timeout(struct scsi_cmnd *cmd);
480 static int  ahd_linux_dv_thread(void *data);
481 static void ahd_linux_kill_dv_thread(struct ahd_softc *ahd);
482 static void ahd_linux_dv_target(struct ahd_softc *ahd, u_int target);
483 static void ahd_linux_dv_transition(struct ahd_softc *ahd,
484                                     struct scsi_cmnd *cmd,
485                                     struct ahd_devinfo *devinfo,
486                                     struct ahd_linux_target *targ);
487 static void ahd_linux_dv_fill_cmd(struct ahd_softc *ahd,
488                                   struct scsi_cmnd *cmd,
489                                   struct ahd_devinfo *devinfo);
490 static void ahd_linux_dv_inq(struct ahd_softc *ahd,
491                              struct scsi_cmnd *cmd,
492                              struct ahd_devinfo *devinfo,
493                              struct ahd_linux_target *targ,
494                              u_int request_length);
495 static void ahd_linux_dv_tur(struct ahd_softc *ahd,
496                              struct scsi_cmnd *cmd,
497                              struct ahd_devinfo *devinfo);
498 static void ahd_linux_dv_rebd(struct ahd_softc *ahd,
499                               struct scsi_cmnd *cmd,
500                               struct ahd_devinfo *devinfo,
501                               struct ahd_linux_target *targ);
502 static void ahd_linux_dv_web(struct ahd_softc *ahd,
503                              struct scsi_cmnd *cmd,
504                              struct ahd_devinfo *devinfo,
505                              struct ahd_linux_target *targ);
506 static void ahd_linux_dv_reb(struct ahd_softc *ahd,
507                              struct scsi_cmnd *cmd,
508                              struct ahd_devinfo *devinfo,
509                              struct ahd_linux_target *targ);
510 static void ahd_linux_dv_su(struct ahd_softc *ahd,
511                             struct scsi_cmnd *cmd,
512                             struct ahd_devinfo *devinfo,
513                             struct ahd_linux_target *targ);
514 static int ahd_linux_fallback(struct ahd_softc *ahd,
515                               struct ahd_devinfo *devinfo);
516 static __inline int ahd_linux_dv_fallback(struct ahd_softc *ahd,
517                                           struct ahd_devinfo *devinfo);
518 static void ahd_linux_dv_complete(Scsi_Cmnd *cmd);
519 static void ahd_linux_generate_dv_pattern(struct ahd_linux_target *targ);
520 static u_int ahd_linux_user_tagdepth(struct ahd_softc *ahd,
521                                      struct ahd_devinfo *devinfo);
522 static u_int ahd_linux_user_dv_setting(struct ahd_softc *ahd);
523 static void ahd_linux_setup_user_rd_strm_settings(struct ahd_softc *ahd);
524 static void ahd_linux_device_queue_depth(struct ahd_softc *ahd,
525                                          struct ahd_linux_device *dev);
526 static struct ahd_linux_target* ahd_linux_alloc_target(struct ahd_softc*,
527                                                        u_int, u_int);
528 static void                     ahd_linux_free_target(struct ahd_softc*,
529                                                       struct ahd_linux_target*);
530 static struct ahd_linux_device* ahd_linux_alloc_device(struct ahd_softc*,
531                                                        struct ahd_linux_target*,
532                                                        u_int);
533 static void                     ahd_linux_free_device(struct ahd_softc*,
534                                                       struct ahd_linux_device*);
535 static void ahd_linux_run_device_queue(struct ahd_softc*,
536                                        struct ahd_linux_device*);
537 static void ahd_linux_setup_tag_info_global(char *p);
538 static aic_option_callback_t ahd_linux_setup_tag_info;
539 static aic_option_callback_t ahd_linux_setup_rd_strm_info;
540 static aic_option_callback_t ahd_linux_setup_dv;
541 static aic_option_callback_t ahd_linux_setup_iocell_info;
542 static int ahd_linux_next_unit(void);
543 static void ahd_runq_tasklet(unsigned long data);
544 static int aic79xx_setup(char *c);
545
546 /****************************** Inlines ***************************************/
547 static __inline void ahd_schedule_completeq(struct ahd_softc *ahd);
548 static __inline void ahd_schedule_runq(struct ahd_softc *ahd);
549 static __inline void ahd_setup_runq_tasklet(struct ahd_softc *ahd);
550 static __inline void ahd_teardown_runq_tasklet(struct ahd_softc *ahd);
551 static __inline struct ahd_linux_device*
552                      ahd_linux_get_device(struct ahd_softc *ahd, u_int channel,
553                                           u_int target, u_int lun, int alloc);
554 static struct ahd_cmd *ahd_linux_run_complete_queue(struct ahd_softc *ahd);
555 static __inline void ahd_linux_check_device_queue(struct ahd_softc *ahd,
556                                                   struct ahd_linux_device *dev);
557 static __inline struct ahd_linux_device *
558                      ahd_linux_next_device_to_run(struct ahd_softc *ahd);
559 static __inline void ahd_linux_run_device_queues(struct ahd_softc *ahd);
560 static __inline void ahd_linux_unmap_scb(struct ahd_softc*, struct scb*);
561
562 static __inline int ahd_linux_map_seg(struct ahd_softc *ahd, struct scb *scb,
563                                       struct ahd_dma_seg *sg,
564                                       bus_addr_t addr, bus_size_t len);
565
566 static __inline void
567 ahd_schedule_completeq(struct ahd_softc *ahd)
568 {
569         if ((ahd->platform_data->flags & AHD_RUN_CMPLT_Q_TIMER) == 0) {
570                 ahd->platform_data->flags |= AHD_RUN_CMPLT_Q_TIMER;
571                 ahd->platform_data->completeq_timer.expires = jiffies;
572                 add_timer(&ahd->platform_data->completeq_timer);
573         }
574 }
575
576 /*
577  * Must be called with our lock held.
578  */
579 static __inline void
580 ahd_schedule_runq(struct ahd_softc *ahd)
581 {
582 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
583         tasklet_schedule(&ahd->platform_data->runq_tasklet);
584 #else
585         /*
586          * Tasklets are not available, so run inline.
587          */
588         ahd_runq_tasklet((unsigned long)ahd);
589 #endif
590 }
591
592 static __inline
593 void ahd_setup_runq_tasklet(struct ahd_softc *ahd)
594 {
595 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
596         tasklet_init(&ahd->platform_data->runq_tasklet, ahd_runq_tasklet,
597                      (unsigned long)ahd);
598 #endif
599 }
600
601 static __inline void
602 ahd_teardown_runq_tasklet(struct ahd_softc *ahd)
603 {
604 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
605         tasklet_kill(&ahd->platform_data->runq_tasklet);
606 #endif
607 }
608
609 static __inline struct ahd_linux_device*
610 ahd_linux_get_device(struct ahd_softc *ahd, u_int channel, u_int target,
611                      u_int lun, int alloc)
612 {
613         struct ahd_linux_target *targ;
614         struct ahd_linux_device *dev;
615         u_int target_offset;
616
617         target_offset = target;
618         if (channel != 0)
619                 target_offset += 8;
620         targ = ahd->platform_data->targets[target_offset];
621         if (targ == NULL) {
622                 if (alloc != 0) {
623                         targ = ahd_linux_alloc_target(ahd, channel, target);
624                         if (targ == NULL)
625                                 return (NULL);
626                 } else
627                         return (NULL);
628         }
629         dev = targ->devices[lun];
630         if (dev == NULL && alloc != 0)
631                 dev = ahd_linux_alloc_device(ahd, targ, lun);
632         return (dev);
633 }
634
635 #define AHD_LINUX_MAX_RETURNED_ERRORS 4
636 static struct ahd_cmd *
637 ahd_linux_run_complete_queue(struct ahd_softc *ahd)
638 {       
639         struct  ahd_cmd *acmd;
640         u_long  done_flags;
641         int     with_errors;
642
643         with_errors = 0;
644         ahd_done_lock(ahd, &done_flags);
645         while ((acmd = TAILQ_FIRST(&ahd->platform_data->completeq)) != NULL) {
646                 Scsi_Cmnd *cmd;
647
648                 if (with_errors > AHD_LINUX_MAX_RETURNED_ERRORS) {
649                         /*
650                          * Linux uses stack recursion to requeue
651                          * commands that need to be retried.  Avoid
652                          * blowing out the stack by "spoon feeding"
653                          * commands that completed with error back
654                          * the operating system in case they are going
655                          * to be retried. "ick"
656                          */
657                         ahd_schedule_completeq(ahd);
658                         break;
659                 }
660                 TAILQ_REMOVE(&ahd->platform_data->completeq,
661                              acmd, acmd_links.tqe);
662                 cmd = &acmd_scsi_cmd(acmd);
663                 cmd->host_scribble = NULL;
664                 if (ahd_cmd_get_transaction_status(cmd) != DID_OK
665                  || (cmd->result & 0xFF) != SCSI_STATUS_OK)
666                         with_errors++;
667
668                 cmd->scsi_done(cmd);
669         }
670         ahd_done_unlock(ahd, &done_flags);
671         return (acmd);
672 }
673
674 static __inline void
675 ahd_linux_check_device_queue(struct ahd_softc *ahd,
676                              struct ahd_linux_device *dev)
677 {
678         if ((dev->flags & AHD_DEV_FREEZE_TIL_EMPTY) != 0
679          && dev->active == 0) {
680                 dev->flags &= ~AHD_DEV_FREEZE_TIL_EMPTY;
681                 dev->qfrozen--;
682         }
683
684         if (TAILQ_FIRST(&dev->busyq) == NULL
685          || dev->openings == 0 || dev->qfrozen != 0)
686                 return;
687
688         ahd_linux_run_device_queue(ahd, dev);
689 }
690
691 static __inline struct ahd_linux_device *
692 ahd_linux_next_device_to_run(struct ahd_softc *ahd)
693 {
694         
695         if ((ahd->flags & AHD_RESOURCE_SHORTAGE) != 0
696          || (ahd->platform_data->qfrozen != 0
697           && AHD_DV_SIMQ_FROZEN(ahd) == 0))
698                 return (NULL);
699         return (TAILQ_FIRST(&ahd->platform_data->device_runq));
700 }
701
702 static __inline void
703 ahd_linux_run_device_queues(struct ahd_softc *ahd)
704 {
705         struct ahd_linux_device *dev;
706
707         while ((dev = ahd_linux_next_device_to_run(ahd)) != NULL) {
708                 TAILQ_REMOVE(&ahd->platform_data->device_runq, dev, links);
709                 dev->flags &= ~AHD_DEV_ON_RUN_LIST;
710                 ahd_linux_check_device_queue(ahd, dev);
711         }
712 }
713
714 static __inline void
715 ahd_linux_unmap_scb(struct ahd_softc *ahd, struct scb *scb)
716 {
717         Scsi_Cmnd *cmd;
718         int direction;
719
720         cmd = scb->io_ctx;
721         direction = scsi_to_pci_dma_dir(cmd->sc_data_direction);
722         ahd_sync_sglist(ahd, scb, BUS_DMASYNC_POSTWRITE);
723         if (cmd->use_sg != 0) {
724                 struct scatterlist *sg;
725
726                 sg = (struct scatterlist *)cmd->request_buffer;
727                 pci_unmap_sg(ahd->dev_softc, sg, cmd->use_sg, direction);
728         } else if (cmd->request_bufflen != 0) {
729                 pci_unmap_single(ahd->dev_softc,
730                                  scb->platform_data->buf_busaddr,
731                                  cmd->request_bufflen, direction);
732         }
733 }
734
735 static __inline int
736 ahd_linux_map_seg(struct ahd_softc *ahd, struct scb *scb,
737                   struct ahd_dma_seg *sg, bus_addr_t addr, bus_size_t len)
738 {
739         int      consumed;
740
741         if ((scb->sg_count + 1) > AHD_NSEG)
742                 panic("Too few segs for dma mapping.  "
743                       "Increase AHD_NSEG\n");
744
745         consumed = 1;
746         sg->addr = ahd_htole32(addr & 0xFFFFFFFF);
747         scb->platform_data->xfer_len += len;
748
749         if (sizeof(bus_addr_t) > 4
750          && (ahd->flags & AHD_39BIT_ADDRESSING) != 0)
751                 len |= (addr >> 8) & AHD_SG_HIGH_ADDR_MASK;
752
753         sg->len = ahd_htole32(len);
754         return (consumed);
755 }
756
757 /******************************** Macros **************************************/
758 #define BUILD_SCSIID(ahd, cmd)                                          \
759         ((((cmd)->device->id << TID_SHIFT) & TID) | (ahd)->our_id)
760
761 /************************  Host template entry points *************************/
762 static int         ahd_linux_detect(Scsi_Host_Template *);
763 static const char *ahd_linux_info(struct Scsi_Host *);
764 static int         ahd_linux_queue(Scsi_Cmnd *, void (*)(Scsi_Cmnd *));
765 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
766 static int         ahd_linux_slave_alloc(Scsi_Device *);
767 static int         ahd_linux_slave_configure(Scsi_Device *);
768 static void        ahd_linux_slave_destroy(Scsi_Device *);
769 #if defined(__i386__)
770 static int         ahd_linux_biosparam(struct scsi_device*,
771                                        struct block_device*, sector_t, int[]);
772 #endif
773 #else
774 static int         ahd_linux_release(struct Scsi_Host *);
775 static void        ahd_linux_select_queue_depth(struct Scsi_Host *host,
776                                                 Scsi_Device *scsi_devs);
777 #if defined(__i386__)
778 static int         ahd_linux_biosparam(Disk *, kdev_t, int[]);
779 #endif
780 #endif
781 static int         ahd_linux_bus_reset(Scsi_Cmnd *);
782 static int         ahd_linux_dev_reset(Scsi_Cmnd *);
783 static int         ahd_linux_abort(Scsi_Cmnd *);
784
785 /*
786  * Calculate a safe value for AHD_NSEG (as expressed through ahd_linux_nseg).
787  *
788  * In pre-2.5.X...
789  * The midlayer allocates an S/G array dynamically when a command is issued
790  * using SCSI malloc.  This array, which is in an OS dependent format that
791  * must later be copied to our private S/G list, is sized to house just the
792  * number of segments needed for the current transfer.  Since the code that
793  * sizes the SCSI malloc pool does not take into consideration fragmentation
794  * of the pool, executing transactions numbering just a fraction of our
795  * concurrent transaction limit with SG list lengths aproaching AHC_NSEG will
796  * quickly depleat the SCSI malloc pool of usable space.  Unfortunately, the
797  * mid-layer does not properly handle this scsi malloc failures for the S/G
798  * array and the result can be a lockup of the I/O subsystem.  We try to size
799  * our S/G list so that it satisfies our drivers allocation requirements in
800  * addition to avoiding fragmentation of the SCSI malloc pool.
801  */
802 static void
803 ahd_linux_size_nseg(void)
804 {
805 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
806         u_int cur_size;
807         u_int best_size;
808
809         /*
810          * The SCSI allocator rounds to the nearest 512 bytes
811          * an cannot allocate across a page boundary.  Our algorithm
812          * is to start at 1K of scsi malloc space per-command and
813          * loop through all factors of the PAGE_SIZE and pick the best.
814          */
815         best_size = 0;
816         for (cur_size = 1024; cur_size <= PAGE_SIZE; cur_size *= 2) {
817                 u_int nseg;
818
819                 nseg = cur_size / sizeof(struct scatterlist);
820                 if (nseg < AHD_LINUX_MIN_NSEG)
821                         continue;
822
823                 if (best_size == 0) {
824                         best_size = cur_size;
825                         ahd_linux_nseg = nseg;
826                 } else {
827                         u_int best_rem;
828                         u_int cur_rem;
829
830                         /*
831                          * Compare the traits of the current "best_size"
832                          * with the current size to determine if the
833                          * current size is a better size.
834                          */
835                         best_rem = best_size % sizeof(struct scatterlist);
836                         cur_rem = cur_size % sizeof(struct scatterlist);
837                         if (cur_rem < best_rem) {
838                                 best_size = cur_size;
839                                 ahd_linux_nseg = nseg;
840                         }
841                 }
842         }
843 #endif
844 }
845
846 /*
847  * Try to detect an Adaptec 79XX controller.
848  */
849 static int
850 ahd_linux_detect(Scsi_Host_Template *template)
851 {
852         struct  ahd_softc *ahd;
853         int     found;
854
855 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
856         /*
857          * It is a bug that the upper layer takes
858          * this lock just prior to calling us.
859          */
860         spin_unlock_irq(&io_request_lock);
861 #endif
862
863         /*
864          * Sanity checking of Linux SCSI data structures so
865          * that some of our hacks^H^H^H^H^Hassumptions aren't
866          * violated.
867          */
868         if (offsetof(struct ahd_cmd_internal, end)
869           > offsetof(struct scsi_cmnd, host_scribble)) {
870                 printf("ahd_linux_detect: SCSI data structures changed.\n");
871                 printf("ahd_linux_detect: Unable to attach\n");
872                 return (0);
873         }
874         /*
875          * Determine an appropriate size for our Scatter Gatther lists.
876          */
877         ahd_linux_size_nseg();
878 #ifdef MODULE
879         /*
880          * If we've been passed any parameters, process them now.
881          */
882         if (aic79xx)
883                 aic79xx_setup(aic79xx);
884         if (dummy_buffer[0] != 'P')
885                 printk(KERN_WARNING
886 "aic79xx: Please read the file /usr/src/linux/drivers/scsi/README.aic79xx\n"
887 "aic79xx: to see the proper way to specify options to the aic79xx module\n"
888 "aic79xx: Specifically, don't use any commas when passing arguments to\n"
889 "aic79xx: insmod or else it might trash certain memory areas.\n");
890 #endif
891
892 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,3,0)
893         template->proc_name = "aic79xx";
894 #else
895         template->proc_dir = &proc_scsi_aic79xx;
896 #endif
897
898         /*
899          * Initialize our softc list lock prior to
900          * probing for any adapters.
901          */
902         ahd_list_lockinit();
903
904 #ifdef CONFIG_PCI
905         ahd_linux_pci_init();
906 #endif
907
908         /*
909          * Register with the SCSI layer all
910          * controllers we've found.
911          */
912         found = 0;
913         TAILQ_FOREACH(ahd, &ahd_tailq, links) {
914
915                 if (ahd_linux_register_host(ahd, template) == 0)
916                         found++;
917         }
918 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
919         spin_lock_irq(&io_request_lock);
920 #endif
921         aic79xx_detect_complete++;
922         return (found);
923 }
924
925 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
926 /*
927  * Free the passed in Scsi_Host memory structures prior to unloading the
928  * module.
929  */
930 static int
931 ahd_linux_release(struct Scsi_Host * host)
932 {
933         struct ahd_softc *ahd;
934         u_long l;
935
936         ahd_list_lock(&l);
937         if (host != NULL) {
938
939                 /*
940                  * We should be able to just perform
941                  * the free directly, but check our
942                  * list for extra sanity.
943                  */
944                 ahd = ahd_find_softc(*(struct ahd_softc **)host->hostdata);
945                 if (ahd != NULL) {
946                         u_long s;
947
948                         ahd_lock(ahd, &s);
949                         ahd_intr_enable(ahd, FALSE);
950                         ahd_unlock(ahd, &s);
951                         ahd_free(ahd);
952                 }
953         }
954         ahd_list_unlock(&l);
955         return (0);
956 }
957 #endif
958
959 /*
960  * Return a string describing the driver.
961  */
962 static const char *
963 ahd_linux_info(struct Scsi_Host *host)
964 {
965         static char buffer[512];
966         char    ahd_info[256];
967         char   *bp;
968         struct ahd_softc *ahd;
969
970         bp = &buffer[0];
971         ahd = *(struct ahd_softc **)host->hostdata;
972         memset(bp, 0, sizeof(buffer));
973         strcpy(bp, "Adaptec AIC79XX PCI-X SCSI HBA DRIVER, Rev ");
974         strcat(bp, AIC79XX_DRIVER_VERSION);
975         strcat(bp, "\n");
976         strcat(bp, "        <");
977         strcat(bp, ahd->description);
978         strcat(bp, ">\n");
979         strcat(bp, "        ");
980         ahd_controller_info(ahd, ahd_info);
981         strcat(bp, ahd_info);
982         strcat(bp, "\n");
983
984         return (bp);
985 }
986
987 /*
988  * Queue an SCB to the controller.
989  */
990 static int
991 ahd_linux_queue(Scsi_Cmnd * cmd, void (*scsi_done) (Scsi_Cmnd *))
992 {
993         struct   ahd_softc *ahd;
994         struct   ahd_linux_device *dev;
995         u_long   flags;
996
997         ahd = *(struct ahd_softc **)cmd->device->host->hostdata;
998
999         /*
1000          * Save the callback on completion function.
1001          */
1002         cmd->scsi_done = scsi_done;
1003
1004         ahd_midlayer_entrypoint_lock(ahd, &flags);
1005
1006         /*
1007          * Close the race of a command that was in the process of
1008          * being queued to us just as our simq was frozen.  Let
1009          * DV commands through so long as we are only frozen to
1010          * perform DV.
1011          */
1012         if (ahd->platform_data->qfrozen != 0
1013          && AHD_DV_CMD(cmd) == 0) {
1014
1015                 ahd_cmd_set_transaction_status(cmd, CAM_REQUEUE_REQ);
1016                 ahd_linux_queue_cmd_complete(ahd, cmd);
1017                 ahd_schedule_completeq(ahd);
1018                 ahd_midlayer_entrypoint_unlock(ahd, &flags);
1019                 return (0);
1020         }
1021         dev = ahd_linux_get_device(ahd, cmd->device->channel,
1022                                    cmd->device->id, cmd->device->lun,
1023                                    /*alloc*/TRUE);
1024         if (dev == NULL) {
1025                 ahd_cmd_set_transaction_status(cmd, CAM_RESRC_UNAVAIL);
1026                 ahd_linux_queue_cmd_complete(ahd, cmd);
1027                 ahd_schedule_completeq(ahd);
1028                 ahd_midlayer_entrypoint_unlock(ahd, &flags);
1029                 printf("%s: aic79xx_linux_queue - Unable to allocate device!\n",
1030                        ahd_name(ahd));
1031                 return (0);
1032         }
1033         if (cmd->cmd_len > MAX_CDB_LEN)
1034                 return (-EINVAL);
1035         cmd->result = CAM_REQ_INPROG << 16;
1036         TAILQ_INSERT_TAIL(&dev->busyq, (struct ahd_cmd *)cmd, acmd_links.tqe);
1037         if ((dev->flags & AHD_DEV_ON_RUN_LIST) == 0) {
1038                 TAILQ_INSERT_TAIL(&ahd->platform_data->device_runq, dev, links);
1039                 dev->flags |= AHD_DEV_ON_RUN_LIST;
1040                 ahd_linux_run_device_queues(ahd);
1041         }
1042         ahd_midlayer_entrypoint_unlock(ahd, &flags);
1043         return (0);
1044 }
1045
1046 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
1047 static int
1048 ahd_linux_slave_alloc(Scsi_Device *device)
1049 {
1050         struct  ahd_softc *ahd;
1051
1052         ahd = *((struct ahd_softc **)device->host->hostdata);
1053         if (bootverbose)
1054                 printf("%s: Slave Alloc %d\n", ahd_name(ahd), device->id);
1055         return (0);
1056 }
1057
1058 static int
1059 ahd_linux_slave_configure(Scsi_Device *device)
1060 {
1061         struct  ahd_softc *ahd;
1062         struct  ahd_linux_device *dev;
1063         u_long  flags;
1064
1065         ahd = *((struct ahd_softc **)device->host->hostdata);
1066         if (bootverbose)
1067                 printf("%s: Slave Configure %d\n", ahd_name(ahd), device->id);
1068         ahd_midlayer_entrypoint_lock(ahd, &flags);
1069         /*
1070          * Since Linux has attached to the device, configure
1071          * it so we don't free and allocate the device
1072          * structure on every command.
1073          */
1074         dev = ahd_linux_get_device(ahd, device->channel,
1075                                    device->id, device->lun,
1076                                    /*alloc*/TRUE);
1077         if (dev != NULL) {
1078                 dev->flags &= ~AHD_DEV_UNCONFIGURED;
1079                 dev->flags |= AHD_DEV_SLAVE_CONFIGURED;
1080                 dev->scsi_device = device;
1081                 ahd_linux_device_queue_depth(ahd, dev);
1082         }
1083         ahd_midlayer_entrypoint_unlock(ahd, &flags);
1084         return (0);
1085 }
1086
1087 static void
1088 ahd_linux_slave_destroy(Scsi_Device *device)
1089 {
1090         struct  ahd_softc *ahd;
1091         struct  ahd_linux_device *dev;
1092         u_long  flags;
1093
1094         ahd = *((struct ahd_softc **)device->host->hostdata);
1095         if (bootverbose)
1096                 printf("%s: Slave Destroy %d\n", ahd_name(ahd), device->id);
1097         ahd_midlayer_entrypoint_lock(ahd, &flags);
1098         dev = ahd_linux_get_device(ahd, device->channel,
1099                                    device->id, device->lun,
1100                                            /*alloc*/FALSE);
1101
1102         /*
1103          * Filter out "silly" deletions of real devices by only
1104          * deleting devices that have had slave_configure()
1105          * called on them.  All other devices that have not
1106          * been configured will automatically be deleted by
1107          * the refcounting process.
1108          */
1109         if (dev != NULL
1110          && (dev->flags & AHD_DEV_SLAVE_CONFIGURED) != 0) {
1111                 dev->flags |= AHD_DEV_UNCONFIGURED;
1112                 if (TAILQ_EMPTY(&dev->busyq)
1113                  && dev->active == 0
1114                  && (dev->flags & AHD_DEV_TIMER_ACTIVE) == 0)
1115                         ahd_linux_free_device(ahd, dev);
1116         }
1117         ahd_midlayer_entrypoint_unlock(ahd, &flags);
1118 }
1119 #else
1120 /*
1121  * Sets the queue depth for each SCSI device hanging
1122  * off the input host adapter.
1123  */
1124 static void
1125 ahd_linux_select_queue_depth(struct Scsi_Host * host,
1126                              Scsi_Device * scsi_devs)
1127 {
1128         Scsi_Device *device;
1129         Scsi_Device *ldev;
1130         struct  ahd_softc *ahd;
1131         u_long  flags;
1132
1133         ahd = *((struct ahd_softc **)host->hostdata);
1134         ahd_lock(ahd, &flags);
1135         for (device = scsi_devs; device != NULL; device = device->next) {
1136
1137                 /*
1138                  * Watch out for duplicate devices.  This works around
1139                  * some quirks in how the SCSI scanning code does its
1140                  * device management.
1141                  */
1142                 for (ldev = scsi_devs; ldev != device; ldev = ldev->next) {
1143                         if (ldev->host == device->host
1144                          && ldev->channel == device->channel
1145                          && ldev->id == device->id
1146                          && ldev->lun == device->lun)
1147                                 break;
1148                 }
1149                 /* Skip duplicate. */
1150                 if (ldev != device)
1151                         continue;
1152
1153                 if (device->host == host) {
1154                         struct   ahd_linux_device *dev;
1155
1156                         /*
1157                          * Since Linux has attached to the device, configure
1158                          * it so we don't free and allocate the device
1159                          * structure on every command.
1160                          */
1161                         dev = ahd_linux_get_device(ahd, device->channel,
1162                                                    device->id, device->lun,
1163                                                    /*alloc*/TRUE);
1164                         if (dev != NULL) {
1165                                 dev->flags &= ~AHD_DEV_UNCONFIGURED;
1166                                 dev->scsi_device = device;
1167                                 ahd_linux_device_queue_depth(ahd, dev);
1168                                 device->queue_depth = dev->openings
1169                                                     + dev->active;
1170                                 if ((dev->flags & (AHD_DEV_Q_BASIC
1171                                                 | AHD_DEV_Q_TAGGED)) == 0) {
1172                                         /*
1173                                          * We allow the OS to queue 2 untagged
1174                                          * transactions to us at any time even
1175                                          * though we can only execute them
1176                                          * serially on the controller/device.
1177                                          * This should remove some latency.
1178                                          */
1179                                         device->queue_depth = 2;
1180                                 }
1181                         }
1182                 }
1183         }
1184         ahd_unlock(ahd, &flags);
1185 }
1186 #endif
1187
1188 #if defined(__i386__)
1189 /*
1190  * Return the disk geometry for the given SCSI device.
1191  */
1192 static int
1193 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
1194 ahd_linux_biosparam(struct scsi_device *sdev, struct block_device *bdev,
1195                     sector_t capacity, int geom[])
1196 {
1197         uint8_t *bh;
1198 #else
1199 ahd_linux_biosparam(Disk *disk, kdev_t dev, int geom[])
1200 {
1201         struct  scsi_device *sdev = disk->device;
1202         u_long  capacity = disk->capacity;
1203         struct  buffer_head *bh;
1204 #endif
1205         int      heads;
1206         int      sectors;
1207         int      cylinders;
1208         int      ret;
1209         int      extended;
1210         struct   ahd_softc *ahd;
1211
1212         ahd = *((struct ahd_softc **)sdev->host->hostdata);
1213
1214 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
1215         bh = scsi_bios_ptable(bdev);
1216 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,17)
1217         bh = bread(MKDEV(MAJOR(dev), MINOR(dev) & ~0xf), 0, block_size(dev));
1218 #else
1219         bh = bread(MKDEV(MAJOR(dev), MINOR(dev) & ~0xf), 0, 1024);
1220 #endif
1221
1222         if (bh) {
1223                 ret = scsi_partsize(bh, capacity,
1224                                     &geom[2], &geom[0], &geom[1]);
1225 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
1226                 kfree(bh);
1227 #else
1228                 brelse(bh);
1229 #endif
1230                 if (ret != -1)
1231                         return (ret);
1232         }
1233         heads = 64;
1234         sectors = 32;
1235         cylinders = aic_sector_div(capacity, heads, sectors);
1236
1237         if (aic79xx_extended != 0)
1238                 extended = 1;
1239         else
1240                 extended = (ahd->flags & AHD_EXTENDED_TRANS_A) != 0;
1241         if (extended && cylinders >= 1024) {
1242                 heads = 255;
1243                 sectors = 63;
1244                 cylinders = aic_sector_div(capacity, heads, sectors);
1245         }
1246         geom[0] = heads;
1247         geom[1] = sectors;
1248         geom[2] = cylinders;
1249         return (0);
1250 }
1251 #endif
1252
1253 /*
1254  * Abort the current SCSI command(s).
1255  */
1256 static int
1257 ahd_linux_abort(Scsi_Cmnd *cmd)
1258 {
1259         struct ahd_softc *ahd;
1260         struct ahd_cmd *acmd;
1261         struct ahd_cmd *list_acmd;
1262         struct ahd_linux_device *dev;
1263         struct scb *pending_scb;
1264         u_long s;
1265         u_int  saved_scbptr;
1266         u_int  active_scbptr;
1267         u_int  last_phase;
1268         u_int  cdb_byte;
1269         int    retval;
1270         int    was_paused;
1271         int    paused;
1272         int    wait;
1273         int    disconnected;
1274         ahd_mode_state saved_modes;
1275
1276         pending_scb = NULL;
1277         paused = FALSE;
1278         wait = FALSE;
1279         ahd = *(struct ahd_softc **)cmd->device->host->hostdata;
1280         acmd = (struct ahd_cmd *)cmd;
1281
1282         printf("%s:%d:%d:%d: Attempting to abort cmd %p:",
1283                ahd_name(ahd), cmd->device->channel, cmd->device->id,
1284                cmd->device->lun, cmd);
1285         for (cdb_byte = 0; cdb_byte < cmd->cmd_len; cdb_byte++)
1286                 printf(" 0x%x", cmd->cmnd[cdb_byte]);
1287         printf("\n");
1288
1289         /*
1290          * In all versions of Linux, we have to work around
1291          * a major flaw in how the mid-layer is locked down
1292          * if we are to sleep successfully in our error handler
1293          * while allowing our interrupt handler to run.  Since
1294          * the midlayer acquires either the io_request_lock or
1295          * our lock prior to calling us, we must use the
1296          * spin_unlock_irq() method for unlocking our lock.
1297          * This will force interrupts to be enabled on the
1298          * current CPU.  Since the EH thread should not have
1299          * been running with CPU interrupts disabled other than
1300          * by acquiring either the io_request_lock or our own
1301          * lock, this *should* be safe.
1302          */
1303         ahd_midlayer_entrypoint_lock(ahd, &s);
1304
1305         /*
1306          * First determine if we currently own this command.
1307          * Start by searching the device queue.  If not found
1308          * there, check the pending_scb list.  If not found
1309          * at all, and the system wanted us to just abort the
1310          * command, return success.
1311          */
1312         dev = ahd_linux_get_device(ahd, cmd->device->channel,
1313                                    cmd->device->id, cmd->device->lun,
1314                                    /*alloc*/FALSE);
1315
1316         if (dev == NULL) {
1317                 /*
1318                  * No target device for this command exists,
1319                  * so we must not still own the command.
1320                  */
1321                 printf("%s:%d:%d:%d: Is not an active device\n",
1322                        ahd_name(ahd), cmd->device->channel, cmd->device->id,
1323                        cmd->device->lun);
1324                 retval = SUCCESS;
1325                 goto no_cmd;
1326         }
1327
1328         TAILQ_FOREACH(list_acmd, &dev->busyq, acmd_links.tqe) {
1329                 if (list_acmd == acmd)
1330                         break;
1331         }
1332
1333         if (list_acmd != NULL) {
1334                 printf("%s:%d:%d:%d: Command found on device queue\n",
1335                        ahd_name(ahd), cmd->device->channel, cmd->device->id,
1336                        cmd->device->lun);
1337                 TAILQ_REMOVE(&dev->busyq, list_acmd, acmd_links.tqe);
1338                 cmd->result = DID_ABORT << 16;
1339                 ahd_linux_queue_cmd_complete(ahd, cmd);
1340                 retval = SUCCESS;
1341                 goto done;
1342         }
1343
1344         /*
1345          * See if we can find a matching cmd in the pending list.
1346          */
1347         LIST_FOREACH(pending_scb, &ahd->pending_scbs, pending_links) {
1348                 if (pending_scb->io_ctx == cmd)
1349                         break;
1350         }
1351
1352         if (pending_scb == NULL) {
1353                 printf("%s:%d:%d:%d: Command not found\n",
1354                        ahd_name(ahd), cmd->device->channel, cmd->device->id,
1355                        cmd->device->lun);
1356                 goto no_cmd;
1357         }
1358
1359         if ((pending_scb->flags & SCB_RECOVERY_SCB) != 0) {
1360                 /*
1361                  * We can't queue two recovery actions using the same SCB
1362                  */
1363                 retval = FAILED;
1364                 goto  done;
1365         }
1366
1367         /*
1368          * Ensure that the card doesn't do anything
1369          * behind our back.  Also make sure that we
1370          * didn't "just" miss an interrupt that would
1371          * affect this cmd.
1372          */
1373         was_paused = ahd_is_paused(ahd);
1374         ahd_pause_and_flushwork(ahd);
1375         paused = TRUE;
1376
1377         if ((pending_scb->flags & SCB_ACTIVE) == 0) {
1378                 printf("%s:%d:%d:%d: Command already completed\n",
1379                        ahd_name(ahd), cmd->device->channel, cmd->device->id,
1380                        cmd->device->lun);
1381                 goto no_cmd;
1382         }
1383
1384         printf("%s: At time of recovery, card was %spaused\n",
1385                ahd_name(ahd), was_paused ? "" : "not ");
1386         ahd_dump_card_state(ahd);
1387
1388         disconnected = TRUE;
1389         if (ahd_search_qinfifo(ahd, cmd->device->id, cmd->device->channel + 'A',
1390                                cmd->device->lun, SCB_GET_TAG(pending_scb),
1391                                ROLE_INITIATOR, CAM_REQ_ABORTED,
1392                                SEARCH_COMPLETE) > 0) {
1393                 printf("%s:%d:%d:%d: Cmd aborted from QINFIFO\n",
1394                        ahd_name(ahd), cmd->device->channel, cmd->device->id,
1395                                 cmd->device->lun);
1396                 retval = SUCCESS;
1397                 goto done;
1398         }
1399
1400         saved_modes = ahd_save_modes(ahd);
1401         ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
1402         last_phase = ahd_inb(ahd, LASTPHASE);
1403         saved_scbptr = ahd_get_scbptr(ahd);
1404         active_scbptr = saved_scbptr;
1405         if (disconnected && (ahd_inb(ahd, SEQ_FLAGS) & NOT_IDENTIFIED) == 0) {
1406                 struct scb *bus_scb;
1407
1408                 bus_scb = ahd_lookup_scb(ahd, active_scbptr);
1409                 if (bus_scb == pending_scb)
1410                         disconnected = FALSE;
1411         }
1412
1413         /*
1414          * At this point, pending_scb is the scb associated with the
1415          * passed in command.  That command is currently active on the
1416          * bus or is in the disconnected state.
1417          */
1418         if (last_phase != P_BUSFREE
1419          && SCB_GET_TAG(pending_scb) == active_scbptr) {
1420
1421                 /*
1422                  * We're active on the bus, so assert ATN
1423                  * and hope that the target responds.
1424                  */
1425                 pending_scb = ahd_lookup_scb(ahd, active_scbptr);
1426                 pending_scb->flags |= SCB_RECOVERY_SCB|SCB_ABORT;
1427                 ahd_outb(ahd, MSG_OUT, HOST_MSG);
1428                 ahd_outb(ahd, SCSISIGO, last_phase|ATNO);
1429                 printf("%s:%d:%d:%d: Device is active, asserting ATN\n",
1430                        ahd_name(ahd), cmd->device->channel,
1431                        cmd->device->id, cmd->device->lun);
1432                 wait = TRUE;
1433         } else if (disconnected) {
1434
1435                 /*
1436                  * Actually re-queue this SCB in an attempt
1437                  * to select the device before it reconnects.
1438                  */
1439                 pending_scb->flags |= SCB_RECOVERY_SCB|SCB_ABORT;
1440                 ahd_set_scbptr(ahd, SCB_GET_TAG(pending_scb));
1441                 pending_scb->hscb->cdb_len = 0;
1442                 pending_scb->hscb->task_attribute = 0;
1443                 pending_scb->hscb->task_management = SIU_TASKMGMT_ABORT_TASK;
1444
1445                 if ((pending_scb->flags & SCB_PACKETIZED) != 0) {
1446                         /*
1447                          * Mark the SCB has having an outstanding
1448                          * task management function.  Should the command
1449                          * complete normally before the task management
1450                          * function can be sent, the host will be notified
1451                          * to abort our requeued SCB.
1452                          */
1453                         ahd_outb(ahd, SCB_TASK_MANAGEMENT,
1454                                  pending_scb->hscb->task_management);
1455                 } else {
1456                         /*
1457                          * If non-packetized, set the MK_MESSAGE control
1458                          * bit indicating that we desire to send a message.
1459                          * We also set the disconnected flag since there is
1460                          * no guarantee that our SCB control byte matches
1461                          * the version on the card.  We don't want the
1462                          * sequencer to abort the command thinking an
1463                          * unsolicited reselection occurred.
1464                          */
1465                         pending_scb->hscb->control |= MK_MESSAGE|DISCONNECTED;
1466
1467                         /*
1468                          * The sequencer will never re-reference the
1469                          * in-core SCB.  To make sure we are notified
1470                          * during reslection, set the MK_MESSAGE flag in
1471                          * the card's copy of the SCB.
1472                          */
1473                         ahd_outb(ahd, SCB_CONTROL,
1474                                  ahd_inb(ahd, SCB_CONTROL)|MK_MESSAGE);
1475                 }
1476
1477                 /*
1478                  * Clear out any entries in the QINFIFO first
1479                  * so we are the next SCB for this target
1480                  * to run.
1481                  */
1482                 ahd_search_qinfifo(ahd, cmd->device->id,
1483                                    cmd->device->channel + 'A', cmd->device->lun,
1484                                    SCB_LIST_NULL, ROLE_INITIATOR,
1485                                    CAM_REQUEUE_REQ, SEARCH_COMPLETE);
1486                 ahd_qinfifo_requeue_tail(ahd, pending_scb);
1487                 ahd_set_scbptr(ahd, saved_scbptr);
1488                 ahd_print_path(ahd, pending_scb);
1489                 printf("Device is disconnected, re-queuing SCB\n");
1490                 wait = TRUE;
1491         } else {
1492                 printf("%s:%d:%d:%d: Unable to deliver message\n",
1493                        ahd_name(ahd), cmd->device->channel,
1494                        cmd->device->id, cmd->device->lun);
1495                 retval = FAILED;
1496                 goto done;
1497         }
1498
1499 no_cmd:
1500         /*
1501          * Our assumption is that if we don't have the command, no
1502          * recovery action was required, so we return success.  Again,
1503          * the semantics of the mid-layer recovery engine are not
1504          * well defined, so this may change in time.
1505          */
1506         retval = SUCCESS;
1507 done:
1508         if (paused)
1509                 ahd_unpause(ahd);
1510         if (wait) {
1511                 struct timer_list timer;
1512                 int ret;
1513
1514                 pending_scb->platform_data->flags |= AHD_SCB_UP_EH_SEM;
1515                 spin_unlock_irq(&ahd->platform_data->spin_lock);
1516                 init_timer(&timer);
1517                 timer.data = (u_long)pending_scb;
1518                 timer.expires = jiffies + (5 * HZ);
1519                 timer.function = ahd_linux_sem_timeout;
1520                 add_timer(&timer);
1521                 printf("Recovery code sleeping\n");
1522                 down(&ahd->platform_data->eh_sem);
1523                 printf("Recovery code awake\n");
1524                 ret = del_timer_sync(&timer);
1525                 if (ret == 0) {
1526                         printf("Timer Expired\n");
1527                         retval = FAILED;
1528                 }
1529                 spin_lock_irq(&ahd->platform_data->spin_lock);
1530         }
1531         ahd_schedule_runq(ahd);
1532         ahd_linux_run_complete_queue(ahd);
1533         ahd_midlayer_entrypoint_unlock(ahd, &s);
1534         return (retval);
1535 }
1536
1537
1538 static void
1539 ahd_linux_dev_reset_complete(Scsi_Cmnd *cmd)
1540 {
1541         free(cmd, M_DEVBUF);
1542 }
1543
1544 /*
1545  * Attempt to send a target reset message to the device that timed out.
1546  */
1547 static int
1548 ahd_linux_dev_reset(Scsi_Cmnd *cmd)
1549 {
1550         struct  ahd_softc *ahd;
1551         struct  scsi_cmnd *recovery_cmd;
1552         struct  ahd_linux_device *dev;
1553         struct  ahd_initiator_tinfo *tinfo;
1554         struct  ahd_tmode_tstate *tstate;
1555         struct  scb *scb;
1556         struct  hardware_scb *hscb;
1557         u_long  s;
1558         struct  timer_list timer;
1559         int     retval;
1560
1561         ahd = *(struct ahd_softc **)cmd->device->host->hostdata;
1562         recovery_cmd = malloc(sizeof(struct scsi_cmnd), M_DEVBUF, M_WAITOK);
1563         memset(recovery_cmd, 0, sizeof(struct scsi_cmnd));
1564         recovery_cmd->device = cmd->device;
1565         recovery_cmd->scsi_done = ahd_linux_dev_reset_complete;
1566 #if AHD_DEBUG
1567         if ((ahd_debug & AHD_SHOW_RECOVERY) != 0)
1568                 printf("%s:%d:%d:%d: Device reset called for cmd %p\n",
1569                        ahd_name(ahd), cmd->device->channel, cmd->device->id,
1570                        cmd->device->lun, cmd);
1571 #endif
1572         ahd_midlayer_entrypoint_lock(ahd, &s);
1573
1574         dev = ahd_linux_get_device(ahd, cmd->device->channel, cmd->device->id,
1575                                    cmd->device->lun, /*alloc*/FALSE);
1576         if (dev == NULL) {
1577                 ahd_midlayer_entrypoint_unlock(ahd, &s);
1578                 return (FAILED);
1579         }
1580         if ((scb = ahd_get_scb(ahd, AHD_NEVER_COL_IDX)) == NULL) {
1581                 ahd_midlayer_entrypoint_unlock(ahd, &s);
1582                 return (FAILED);
1583         }
1584         tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id,
1585                                     cmd->device->id, &tstate);
1586         recovery_cmd->result = CAM_REQ_INPROG << 16;
1587         recovery_cmd->host_scribble = (char *)scb;
1588         scb->io_ctx = recovery_cmd;
1589         scb->platform_data->dev = dev;
1590         scb->sg_count = 0;
1591         ahd_set_residual(scb, 0);
1592         ahd_set_sense_residual(scb, 0);
1593         hscb = scb->hscb;
1594         hscb->control = 0;
1595         hscb->scsiid = BUILD_SCSIID(ahd, cmd);
1596         hscb->lun = cmd->device->lun;
1597         hscb->cdb_len = 0;
1598         hscb->task_management = SIU_TASKMGMT_LUN_RESET;
1599         scb->flags |= SCB_DEVICE_RESET|SCB_RECOVERY_SCB|SCB_ACTIVE;
1600         if ((tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ) != 0) {
1601                 scb->flags |= SCB_PACKETIZED;
1602         } else {
1603                 hscb->control |= MK_MESSAGE;
1604         }
1605         dev->openings--;
1606         dev->active++;
1607         dev->commands_issued++;
1608         LIST_INSERT_HEAD(&ahd->pending_scbs, scb, pending_links);
1609         ahd_queue_scb(ahd, scb);
1610
1611         scb->platform_data->flags |= AHD_SCB_UP_EH_SEM;
1612         spin_unlock_irq(&ahd->platform_data->spin_lock);
1613         init_timer(&timer);
1614         timer.data = (u_long)scb;
1615         timer.expires = jiffies + (5 * HZ);
1616         timer.function = ahd_linux_sem_timeout;
1617         add_timer(&timer);
1618         printf("Recovery code sleeping\n");
1619         down(&ahd->platform_data->eh_sem);
1620         printf("Recovery code awake\n");
1621         retval = SUCCESS;
1622         if (del_timer_sync(&timer) == 0) {
1623                 printf("Timer Expired\n");
1624                 retval = FAILED;
1625         }
1626         spin_lock_irq(&ahd->platform_data->spin_lock);
1627         ahd_schedule_runq(ahd);
1628         ahd_linux_run_complete_queue(ahd);
1629         ahd_midlayer_entrypoint_unlock(ahd, &s);
1630         printf("%s: Device reset returning 0x%x\n", ahd_name(ahd), retval);
1631         return (retval);
1632 }
1633
1634 /*
1635  * Reset the SCSI bus.
1636  */
1637 static int
1638 ahd_linux_bus_reset(Scsi_Cmnd *cmd)
1639 {
1640         struct ahd_softc *ahd;
1641         u_long s;
1642         int    found;
1643
1644         ahd = *(struct ahd_softc **)cmd->device->host->hostdata;
1645 #ifdef AHD_DEBUG
1646         if ((ahd_debug & AHD_SHOW_RECOVERY) != 0)
1647                 printf("%s: Bus reset called for cmd %p\n",
1648                        ahd_name(ahd), cmd);
1649 #endif
1650         ahd_midlayer_entrypoint_lock(ahd, &s);
1651         found = ahd_reset_channel(ahd, cmd->device->channel + 'A',
1652                                   /*initiate reset*/TRUE);
1653         ahd_linux_run_complete_queue(ahd);
1654         ahd_midlayer_entrypoint_unlock(ahd, &s);
1655
1656         if (bootverbose)
1657                 printf("%s: SCSI bus reset delivered. "
1658                        "%d SCBs aborted.\n", ahd_name(ahd), found);
1659
1660         return (SUCCESS);
1661 }
1662
1663 Scsi_Host_Template aic79xx_driver_template = {
1664         .module                 = THIS_MODULE,
1665         .name                   = "aic79xx",
1666         .proc_info              = ahd_linux_proc_info,
1667         .info                   = ahd_linux_info,
1668         .queuecommand           = ahd_linux_queue,
1669         .eh_abort_handler       = ahd_linux_abort,
1670         .eh_device_reset_handler = ahd_linux_dev_reset,
1671         .eh_bus_reset_handler   = ahd_linux_bus_reset,
1672 #if defined(__i386__)
1673         .bios_param             = ahd_linux_biosparam,
1674 #endif
1675         .can_queue              = AHD_MAX_QUEUE,
1676         .this_id                = -1,
1677         .cmd_per_lun            = 2,
1678         .use_clustering         = ENABLE_CLUSTERING,
1679         .slave_alloc            = ahd_linux_slave_alloc,
1680         .slave_configure        = ahd_linux_slave_configure,
1681         .slave_destroy          = ahd_linux_slave_destroy,
1682 };
1683
1684 /**************************** Tasklet Handler *********************************/
1685
1686 /*
1687  * In 2.4.X and above, this routine is called from a tasklet,
1688  * so we must re-acquire our lock prior to executing this code.
1689  * In all prior kernels, ahd_schedule_runq() calls this routine
1690  * directly and ahd_schedule_runq() is called with our lock held.
1691  */
1692 static void
1693 ahd_runq_tasklet(unsigned long data)
1694 {
1695         struct ahd_softc* ahd;
1696         struct ahd_linux_device *dev;
1697 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
1698         u_long flags;
1699 #endif
1700
1701         ahd = (struct ahd_softc *)data;
1702 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
1703         ahd_lock(ahd, &flags);
1704 #endif
1705         while ((dev = ahd_linux_next_device_to_run(ahd)) != NULL) {
1706         
1707                 TAILQ_REMOVE(&ahd->platform_data->device_runq, dev, links);
1708                 dev->flags &= ~AHD_DEV_ON_RUN_LIST;
1709                 ahd_linux_check_device_queue(ahd, dev);
1710 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
1711                 /* Yeild to our interrupt handler */
1712                 ahd_unlock(ahd, &flags);
1713                 ahd_lock(ahd, &flags);
1714 #endif
1715         }
1716 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
1717         ahd_unlock(ahd, &flags);
1718 #endif
1719 }
1720
1721 /******************************** Bus DMA *************************************/
1722 int
1723 ahd_dma_tag_create(struct ahd_softc *ahd, bus_dma_tag_t parent,
1724                    bus_size_t alignment, bus_size_t boundary,
1725                    bus_addr_t lowaddr, bus_addr_t highaddr,
1726                    bus_dma_filter_t *filter, void *filterarg,
1727                    bus_size_t maxsize, int nsegments,
1728                    bus_size_t maxsegsz, int flags, bus_dma_tag_t *ret_tag)
1729 {
1730         bus_dma_tag_t dmat;
1731
1732         dmat = malloc(sizeof(*dmat), M_DEVBUF, M_NOWAIT);
1733         if (dmat == NULL)
1734                 return (ENOMEM);
1735
1736         /*
1737          * Linux is very simplistic about DMA memory.  For now don't
1738          * maintain all specification information.  Once Linux supplies
1739          * better facilities for doing these operations, or the
1740          * needs of this particular driver change, we might need to do
1741          * more here.
1742          */
1743         dmat->alignment = alignment;
1744         dmat->boundary = boundary;
1745         dmat->maxsize = maxsize;
1746         *ret_tag = dmat;
1747         return (0);
1748 }
1749
1750 void
1751 ahd_dma_tag_destroy(struct ahd_softc *ahd, bus_dma_tag_t dmat)
1752 {
1753         free(dmat, M_DEVBUF);
1754 }
1755
1756 int
1757 ahd_dmamem_alloc(struct ahd_softc *ahd, bus_dma_tag_t dmat, void** vaddr,
1758                  int flags, bus_dmamap_t *mapp)
1759 {
1760         bus_dmamap_t map;
1761
1762 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,0)
1763         map = malloc(sizeof(*map), M_DEVBUF, M_NOWAIT);
1764         if (map == NULL)
1765                 return (ENOMEM);
1766         /*
1767          * Although we can dma data above 4GB, our
1768          * "consistent" memory is below 4GB for
1769          * space efficiency reasons (only need a 4byte
1770          * address).  For this reason, we have to reset
1771          * our dma mask when doing allocations.
1772          */
1773         if (ahd->dev_softc != NULL)
1774                 if (ahd_pci_set_dma_mask(ahd->dev_softc, 0xFFFFFFFF)) {
1775                         printk(KERN_WARNING "aic79xx: No suitable DMA available.\n");
1776                         return (ENODEV);
1777                 }
1778         *vaddr = pci_alloc_consistent(ahd->dev_softc,
1779                                       dmat->maxsize, &map->bus_addr);
1780         if (ahd->dev_softc != NULL)
1781                 if (ahd_pci_set_dma_mask(ahd->dev_softc,
1782                                      ahd->platform_data->hw_dma_mask)) {
1783                         printk(KERN_WARNING "aic79xx: No suitable DMA available.\n");
1784                         return (ENODEV);
1785                 }
1786 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(2,3,0) */
1787         /*
1788          * At least in 2.2.14, malloc is a slab allocator so all
1789          * allocations are aligned.  We assume for these kernel versions
1790          * that all allocations will be bellow 4Gig, physically contiguous,
1791          * and accessible via DMA by the controller.
1792          */
1793         map = NULL; /* No additional information to store */
1794         *vaddr = malloc(dmat->maxsize, M_DEVBUF, M_NOWAIT);
1795 #endif
1796         if (*vaddr == NULL)
1797                 return (ENOMEM);
1798         *mapp = map;
1799         return(0);
1800 }
1801
1802 void
1803 ahd_dmamem_free(struct ahd_softc *ahd, bus_dma_tag_t dmat,
1804                 void* vaddr, bus_dmamap_t map)
1805 {
1806 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,0)
1807         pci_free_consistent(ahd->dev_softc, dmat->maxsize,
1808                             vaddr, map->bus_addr);
1809 #else
1810         free(vaddr, M_DEVBUF);
1811 #endif
1812 }
1813
1814 int
1815 ahd_dmamap_load(struct ahd_softc *ahd, bus_dma_tag_t dmat, bus_dmamap_t map,
1816                 void *buf, bus_size_t buflen, bus_dmamap_callback_t *cb,
1817                 void *cb_arg, int flags)
1818 {
1819         /*
1820          * Assume for now that this will only be used during
1821          * initialization and not for per-transaction buffer mapping.
1822          */
1823         bus_dma_segment_t stack_sg;
1824
1825 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,0)
1826         stack_sg.ds_addr = map->bus_addr;
1827 #else
1828 #define VIRT_TO_BUS(a) (uint32_t)virt_to_bus((void *)(a))
1829         stack_sg.ds_addr = VIRT_TO_BUS(buf);
1830 #endif
1831         stack_sg.ds_len = dmat->maxsize;
1832         cb(cb_arg, &stack_sg, /*nseg*/1, /*error*/0);
1833         return (0);
1834 }
1835
1836 void
1837 ahd_dmamap_destroy(struct ahd_softc *ahd, bus_dma_tag_t dmat, bus_dmamap_t map)
1838 {
1839         /*
1840          * The map may is NULL in our < 2.3.X implementation.
1841          */
1842         if (map != NULL)
1843                 free(map, M_DEVBUF);
1844 }
1845
1846 int
1847 ahd_dmamap_unload(struct ahd_softc *ahd, bus_dma_tag_t dmat, bus_dmamap_t map)
1848 {
1849         /* Nothing to do */
1850         return (0);
1851 }
1852
1853 /********************* Platform Dependent Functions ***************************/
1854 /*
1855  * Compare "left hand" softc with "right hand" softc, returning:
1856  * < 0 - lahd has a lower priority than rahd
1857  *   0 - Softcs are equal
1858  * > 0 - lahd has a higher priority than rahd
1859  */
1860 int
1861 ahd_softc_comp(struct ahd_softc *lahd, struct ahd_softc *rahd)
1862 {
1863         int     value;
1864
1865         /*
1866          * Under Linux, cards are ordered as follows:
1867          *      1) PCI devices that are marked as the boot controller.
1868          *      2) PCI devices with BIOS enabled sorted by bus/slot/func.
1869          *      3) All remaining PCI devices sorted by bus/slot/func.
1870          */
1871 #if 0
1872         value = (lahd->flags & AHD_BOOT_CHANNEL)
1873               - (rahd->flags & AHD_BOOT_CHANNEL);
1874         if (value != 0)
1875                 /* Controllers set for boot have a *higher* priority */
1876                 return (value);
1877 #endif
1878
1879         value = (lahd->flags & AHD_BIOS_ENABLED)
1880               - (rahd->flags & AHD_BIOS_ENABLED);
1881         if (value != 0)
1882                 /* Controllers with BIOS enabled have a *higher* priority */
1883                 return (value);
1884
1885         /* Still equal.  Sort by bus/slot/func. */
1886         if (aic79xx_reverse_scan != 0)
1887                 value = ahd_get_pci_bus(lahd->dev_softc)
1888                       - ahd_get_pci_bus(rahd->dev_softc);
1889         else
1890                 value = ahd_get_pci_bus(rahd->dev_softc)
1891                       - ahd_get_pci_bus(lahd->dev_softc);
1892         if (value != 0)
1893                 return (value);
1894         if (aic79xx_reverse_scan != 0)
1895                 value = ahd_get_pci_slot(lahd->dev_softc)
1896                       - ahd_get_pci_slot(rahd->dev_softc);
1897         else
1898                 value = ahd_get_pci_slot(rahd->dev_softc)
1899                       - ahd_get_pci_slot(lahd->dev_softc);
1900         if (value != 0)
1901                 return (value);
1902
1903         value = rahd->channel - lahd->channel;
1904         return (value);
1905 }
1906
1907 static void
1908 ahd_linux_setup_tag_info(u_long arg, int instance, int targ, int32_t value)
1909 {
1910
1911         if ((instance >= 0) && (targ >= 0)
1912          && (instance < NUM_ELEMENTS(aic79xx_tag_info))
1913          && (targ < AHD_NUM_TARGETS)) {
1914                 aic79xx_tag_info[instance].tag_commands[targ] = value & 0x1FF;
1915                 if (bootverbose)
1916                         printf("tag_info[%d:%d] = %d\n", instance, targ, value);
1917         }
1918 }
1919
1920 static void
1921 ahd_linux_setup_rd_strm_info(u_long arg, int instance, int targ, int32_t value)
1922 {
1923         if ((instance >= 0)
1924          && (instance < NUM_ELEMENTS(aic79xx_rd_strm_info))) {
1925                 aic79xx_rd_strm_info[instance] = value & 0xFFFF;
1926                 if (bootverbose)
1927                         printf("rd_strm[%d] = 0x%x\n", instance, value);
1928         }
1929 }
1930
1931 static void
1932 ahd_linux_setup_dv(u_long arg, int instance, int targ, int32_t value)
1933 {
1934         if ((instance >= 0)
1935          && (instance < NUM_ELEMENTS(aic79xx_dv_settings))) {
1936                 aic79xx_dv_settings[instance] = value;
1937                 if (bootverbose)
1938                         printf("dv[%d] = %d\n", instance, value);
1939         }
1940 }
1941
1942 static void
1943 ahd_linux_setup_iocell_info(u_long index, int instance, int targ, int32_t value)
1944 {
1945
1946         if ((instance >= 0)
1947          && (instance < NUM_ELEMENTS(aic79xx_iocell_info))) {
1948                 uint8_t *iocell_info;
1949
1950                 iocell_info = (uint8_t*)&aic79xx_iocell_info[instance];
1951                 iocell_info[index] = value & 0xFFFF;
1952                 if (bootverbose)
1953                         printf("iocell[%d:%ld] = %d\n", instance, index, value);
1954         }
1955 }
1956
1957 static void
1958 ahd_linux_setup_tag_info_global(char *p)
1959 {
1960         int tags, i, j;
1961
1962         tags = simple_strtoul(p + 1, NULL, 0) & 0xff;
1963         printf("Setting Global Tags= %d\n", tags);
1964
1965         for (i = 0; i < NUM_ELEMENTS(aic79xx_tag_info); i++) {
1966                 for (j = 0; j < AHD_NUM_TARGETS; j++) {
1967                         aic79xx_tag_info[i].tag_commands[j] = tags;
1968                 }
1969         }
1970 }
1971
1972 /*
1973  * Handle Linux boot parameters. This routine allows for assigning a value
1974  * to a parameter with a ':' between the parameter and the value.
1975  * ie. aic79xx=stpwlev:1,extended
1976  */
1977 static int
1978 aic79xx_setup(char *s)
1979 {
1980         int     i, n;
1981         char   *p;
1982         char   *end;
1983
1984         static struct {
1985                 const char *name;
1986                 uint32_t *flag;
1987         } options[] = {
1988                 { "extended", &aic79xx_extended },
1989                 { "no_reset", &aic79xx_no_reset },
1990                 { "verbose", &aic79xx_verbose },
1991                 { "allow_memio", &aic79xx_allow_memio},
1992 #ifdef AHD_DEBUG
1993                 { "debug", &ahd_debug },
1994 #endif
1995                 { "reverse_scan", &aic79xx_reverse_scan },
1996                 { "periodic_otag", &aic79xx_periodic_otag },
1997                 { "pci_parity", &aic79xx_pci_parity },
1998                 { "seltime", &aic79xx_seltime },
1999                 { "tag_info", NULL },
2000                 { "global_tag_depth", NULL},
2001                 { "rd_strm", NULL },
2002                 { "dv", NULL },
2003                 { "slewrate", NULL },
2004                 { "precomp", NULL },
2005                 { "amplitude", NULL },
2006         };
2007
2008         end = strchr(s, '\0');
2009
2010         /*
2011          * XXX ia64 gcc isn't smart enough to know that NUM_ELEMENTS
2012          * will never be 0 in this case.
2013          */      
2014         n = 0;  
2015
2016         while ((p = strsep(&s, ",.")) != NULL) {
2017                 if (*p == '\0')
2018                         continue;
2019                 for (i = 0; i < NUM_ELEMENTS(options); i++) {
2020
2021                         n = strlen(options[i].name);
2022                         if (strncmp(options[i].name, p, n) == 0)
2023                                 break;
2024                 }
2025                 if (i == NUM_ELEMENTS(options))
2026                         continue;
2027
2028                 if (strncmp(p, "global_tag_depth", n) == 0) {
2029                         ahd_linux_setup_tag_info_global(p + n);
2030                 } else if (strncmp(p, "tag_info", n) == 0) {
2031                         s = aic_parse_brace_option("tag_info", p + n, end,
2032                             2, ahd_linux_setup_tag_info, 0);
2033                 } else if (strncmp(p, "rd_strm", n) == 0) {
2034                         s = aic_parse_brace_option("rd_strm", p + n, end,
2035                             1, ahd_linux_setup_rd_strm_info, 0);
2036                 } else if (strncmp(p, "dv", n) == 0) {
2037                         s = aic_parse_brace_option("dv", p + n, end, 1,
2038                             ahd_linux_setup_dv, 0);
2039                 } else if (strncmp(p, "slewrate", n) == 0) {
2040                         s = aic_parse_brace_option("slewrate",
2041                             p + n, end, 1, ahd_linux_setup_iocell_info,
2042                             AIC79XX_SLEWRATE_INDEX);
2043                 } else if (strncmp(p, "precomp", n) == 0) {
2044                         s = aic_parse_brace_option("precomp",
2045                             p + n, end, 1, ahd_linux_setup_iocell_info,
2046                             AIC79XX_PRECOMP_INDEX);
2047                 } else if (strncmp(p, "amplitude", n) == 0) {
2048                         s = aic_parse_brace_option("amplitude",
2049                             p + n, end, 1, ahd_linux_setup_iocell_info,
2050                             AIC79XX_AMPLITUDE_INDEX);
2051                 } else if (p[n] == ':') {
2052                         *(options[i].flag) = simple_strtoul(p + n + 1, NULL, 0);
2053                 } else if (!strncmp(p, "verbose", n)) {
2054                         *(options[i].flag) = 1;
2055                 } else {
2056                         *(options[i].flag) ^= 0xFFFFFFFF;
2057                 }
2058         }
2059         return 1;
2060 }
2061
2062 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,3,0)
2063 __setup("aic79xx=", aic79xx_setup);
2064 #endif
2065
2066 uint32_t aic79xx_verbose;
2067
2068 int
2069 ahd_linux_register_host(struct ahd_softc *ahd, Scsi_Host_Template *template)
2070 {
2071         char    buf[80];
2072         struct  Scsi_Host *host;
2073         char    *new_name;
2074         u_long  s;
2075         u_long  target;
2076
2077         template->name = ahd->description;
2078         host = scsi_host_alloc(template, sizeof(struct ahd_softc *));
2079         if (host == NULL)
2080                 return (ENOMEM);
2081
2082         *((struct ahd_softc **)host->hostdata) = ahd;
2083         ahd_lock(ahd, &s);
2084 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
2085         scsi_assign_lock(host, &ahd->platform_data->spin_lock);
2086 #elif AHD_SCSI_HAS_HOST_LOCK != 0
2087         host->lock = &ahd->platform_data->spin_lock;
2088 #endif
2089         ahd->platform_data->host = host;
2090         host->can_queue = AHD_MAX_QUEUE;
2091         host->cmd_per_lun = 2;
2092         host->sg_tablesize = AHD_NSEG;
2093         host->this_id = ahd->our_id;
2094         host->irq = ahd->platform_data->irq;
2095         host->max_id = (ahd->features & AHD_WIDE) ? 16 : 8;
2096         host->max_lun = AHD_NUM_LUNS;
2097         host->max_channel = 0;
2098         host->sg_tablesize = AHD_NSEG;
2099         ahd_set_unit(ahd, ahd_linux_next_unit());
2100         sprintf(buf, "scsi%d", host->host_no);
2101         new_name = malloc(strlen(buf) + 1, M_DEVBUF, M_NOWAIT);
2102         if (new_name != NULL) {
2103                 strcpy(new_name, buf);
2104                 ahd_set_name(ahd, new_name);
2105         }
2106         host->unique_id = ahd->unit;
2107 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,4) && \
2108     LINUX_VERSION_CODE  < KERNEL_VERSION(2,5,0)
2109         scsi_set_pci_device(host, ahd->dev_softc);
2110 #endif
2111         ahd_linux_setup_user_rd_strm_settings(ahd);
2112         ahd_linux_initialize_scsi_bus(ahd);
2113         ahd_unlock(ahd, &s);
2114         ahd->platform_data->dv_pid = kernel_thread(ahd_linux_dv_thread, ahd, 0);
2115         ahd_lock(ahd, &s);
2116         if (ahd->platform_data->dv_pid < 0) {
2117                 printf("%s: Failed to create DV thread, error= %d\n",
2118                        ahd_name(ahd), ahd->platform_data->dv_pid);
2119                 return (-ahd->platform_data->dv_pid);
2120         }
2121         /*
2122          * Initially allocate *all* of our linux target objects
2123          * so that the DV thread will scan them all in parallel
2124          * just after driver initialization.  Any device that
2125          * does not exist will have its target object destroyed
2126          * by the selection timeout handler.  In the case of a
2127          * device that appears after the initial DV scan, async
2128          * negotiation will occur for the first command, and DV
2129          * will comence should that first command be successful.
2130          */
2131         for (target = 0; target < host->max_id; target++) {
2132
2133                 /*
2134                  * Skip our own ID.  Some Compaq/HP storage devices
2135                  * have enclosure management devices that respond to
2136                  * single bit selection (i.e. selecting ourselves).
2137                  * It is expected that either an external application
2138                  * or a modified kernel will be used to probe this
2139                  * ID if it is appropriate.  To accommodate these
2140                  * installations, ahc_linux_alloc_target() will allocate
2141                  * for our ID if asked to do so.
2142                  */
2143                 if (target == ahd->our_id) 
2144                         continue;
2145
2146                 ahd_linux_alloc_target(ahd, 0, target);
2147         }
2148         ahd_intr_enable(ahd, TRUE);
2149         ahd_linux_start_dv(ahd);
2150         ahd_unlock(ahd, &s);
2151
2152 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
2153         scsi_add_host(host, &ahd->dev_softc->dev); /* XXX handle failure */
2154         scsi_scan_host(host);
2155 #endif
2156         return (0);
2157 }
2158
2159 uint64_t
2160 ahd_linux_get_memsize(void)
2161 {
2162         struct sysinfo si;
2163
2164         si_meminfo(&si);
2165         return ((uint64_t)si.totalram << PAGE_SHIFT);
2166 }
2167
2168 /*
2169  * Find the smallest available unit number to use
2170  * for a new device.  We don't just use a static
2171  * count to handle the "repeated hot-(un)plug"
2172  * scenario.
2173  */
2174 static int
2175 ahd_linux_next_unit(void)
2176 {
2177         struct ahd_softc *ahd;
2178         int unit;
2179
2180         unit = 0;
2181 retry:
2182         TAILQ_FOREACH(ahd, &ahd_tailq, links) {
2183                 if (ahd->unit == unit) {
2184                         unit++;
2185                         goto retry;
2186                 }
2187         }
2188         return (unit);
2189 }
2190
2191 /*
2192  * Place the SCSI bus into a known state by either resetting it,
2193  * or forcing transfer negotiations on the next command to any
2194  * target.
2195  */
2196 static void
2197 ahd_linux_initialize_scsi_bus(struct ahd_softc *ahd)
2198 {
2199         u_int target_id;
2200         u_int numtarg;
2201
2202         target_id = 0;
2203         numtarg = 0;
2204
2205         if (aic79xx_no_reset != 0)
2206                 ahd->flags &= ~AHD_RESET_BUS_A;
2207
2208         if ((ahd->flags & AHD_RESET_BUS_A) != 0)
2209                 ahd_reset_channel(ahd, 'A', /*initiate_reset*/TRUE);
2210         else
2211                 numtarg = (ahd->features & AHD_WIDE) ? 16 : 8;
2212
2213         /*
2214          * Force negotiation to async for all targets that
2215          * will not see an initial bus reset.
2216          */
2217         for (; target_id < numtarg; target_id++) {
2218                 struct ahd_devinfo devinfo;
2219                 struct ahd_initiator_tinfo *tinfo;
2220                 struct ahd_tmode_tstate *tstate;
2221
2222                 tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id,
2223                                             target_id, &tstate);
2224                 ahd_compile_devinfo(&devinfo, ahd->our_id, target_id,
2225                                     CAM_LUN_WILDCARD, 'A', ROLE_INITIATOR);
2226                 ahd_update_neg_request(ahd, &devinfo, tstate,
2227                                        tinfo, AHD_NEG_ALWAYS);
2228         }
2229         /* Give the bus some time to recover */
2230         if ((ahd->flags & AHD_RESET_BUS_A) != 0) {
2231                 ahd_freeze_simq(ahd);
2232                 init_timer(&ahd->platform_data->reset_timer);
2233                 ahd->platform_data->reset_timer.data = (u_long)ahd;
2234                 ahd->platform_data->reset_timer.expires =
2235                     jiffies + (AIC79XX_RESET_DELAY * HZ)/1000;
2236                 ahd->platform_data->reset_timer.function =
2237                     (ahd_linux_callback_t *)ahd_release_simq;
2238                 add_timer(&ahd->platform_data->reset_timer);
2239         }
2240 }
2241
2242 int
2243 ahd_platform_alloc(struct ahd_softc *ahd, void *platform_arg)
2244 {
2245         ahd->platform_data =
2246             malloc(sizeof(struct ahd_platform_data), M_DEVBUF, M_NOWAIT);
2247         if (ahd->platform_data == NULL)
2248                 return (ENOMEM);
2249         memset(ahd->platform_data, 0, sizeof(struct ahd_platform_data));
2250         TAILQ_INIT(&ahd->platform_data->completeq);
2251         TAILQ_INIT(&ahd->platform_data->device_runq);
2252         ahd->platform_data->irq = AHD_LINUX_NOIRQ;
2253         ahd->platform_data->hw_dma_mask = 0xFFFFFFFF;
2254         ahd_lockinit(ahd);
2255         ahd_done_lockinit(ahd);
2256         init_timer(&ahd->platform_data->completeq_timer);
2257         ahd->platform_data->completeq_timer.data = (u_long)ahd;
2258         ahd->platform_data->completeq_timer.function =
2259             (ahd_linux_callback_t *)ahd_linux_thread_run_complete_queue;
2260 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,0)
2261         init_MUTEX_LOCKED(&ahd->platform_data->eh_sem);
2262         init_MUTEX_LOCKED(&ahd->platform_data->dv_sem);
2263         init_MUTEX_LOCKED(&ahd->platform_data->dv_cmd_sem);
2264 #else
2265         ahd->platform_data->eh_sem = MUTEX_LOCKED;
2266         ahd->platform_data->dv_sem = MUTEX_LOCKED;
2267         ahd->platform_data->dv_cmd_sem = MUTEX_LOCKED;
2268 #endif
2269         ahd_setup_runq_tasklet(ahd);
2270         ahd->seltime = (aic79xx_seltime & 0x3) << 4;
2271         return (0);
2272 }
2273
2274 void
2275 ahd_platform_free(struct ahd_softc *ahd)
2276 {
2277         struct ahd_linux_target *targ;
2278         struct ahd_linux_device *dev;
2279         int i, j;
2280
2281         if (ahd->platform_data != NULL) {
2282                 del_timer_sync(&ahd->platform_data->completeq_timer);
2283                 ahd_linux_kill_dv_thread(ahd);
2284                 ahd_teardown_runq_tasklet(ahd);
2285                 if (ahd->platform_data->host != NULL) {
2286 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
2287                         scsi_remove_host(ahd->platform_data->host);
2288 #endif
2289                         scsi_host_put(ahd->platform_data->host);
2290                 }
2291
2292                 /* destroy all of the device and target objects */
2293                 for (i = 0; i < AHD_NUM_TARGETS; i++) {
2294                         targ = ahd->platform_data->targets[i];
2295                         if (targ != NULL) {
2296                                 /* Keep target around through the loop. */
2297                                 targ->refcount++;
2298                                 for (j = 0; j < AHD_NUM_LUNS; j++) {
2299
2300                                         if (targ->devices[j] == NULL)
2301                                                 continue;
2302                                         dev = targ->devices[j];
2303                                         ahd_linux_free_device(ahd, dev);
2304                                 }
2305                                 /*
2306                                  * Forcibly free the target now that
2307                                  * all devices are gone.
2308                                  */
2309                                 ahd_linux_free_target(ahd, targ);
2310                         }
2311                 }
2312
2313                 if (ahd->platform_data->irq != AHD_LINUX_NOIRQ)
2314                         free_irq(ahd->platform_data->irq, ahd);
2315                 if (ahd->tags[0] == BUS_SPACE_PIO
2316                  && ahd->bshs[0].ioport != 0)
2317                         release_region(ahd->bshs[0].ioport, 256);
2318                 if (ahd->tags[1] == BUS_SPACE_PIO
2319                  && ahd->bshs[1].ioport != 0)
2320                         release_region(ahd->bshs[1].ioport, 256);
2321                 if (ahd->tags[0] == BUS_SPACE_MEMIO
2322                  && ahd->bshs[0].maddr != NULL) {
2323                         u_long base_addr;
2324
2325                         base_addr = (u_long)ahd->bshs[0].maddr;
2326                         base_addr &= PAGE_MASK;
2327                         iounmap((void *)base_addr);
2328 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
2329                         release_mem_region(ahd->platform_data->mem_busaddr,
2330                                            0x1000);
2331 #endif
2332                 }
2333 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0) && \
2334     LINUX_VERSION_CODE  < KERNEL_VERSION(2,5,0)
2335                 /*
2336                  * In 2.4 we detach from the scsi midlayer before the PCI
2337                  * layer invokes our remove callback.  No per-instance
2338                  * detach is provided, so we must reach inside the PCI
2339                  * subsystem's internals and detach our driver manually.
2340                  */
2341                 if (ahd->dev_softc != NULL)
2342                         ahd->dev_softc->driver = NULL;
2343 #endif
2344                 free(ahd->platform_data, M_DEVBUF);
2345         }
2346 }
2347
2348 void
2349 ahd_platform_init(struct ahd_softc *ahd)
2350 {
2351         /*
2352          * Lookup and commit any modified IO Cell options.
2353          */
2354         if (ahd->unit < NUM_ELEMENTS(aic79xx_iocell_info)) {
2355                 struct ahd_linux_iocell_opts *iocell_opts;
2356
2357                 iocell_opts = &aic79xx_iocell_info[ahd->unit];
2358                 if (iocell_opts->precomp != AIC79XX_DEFAULT_PRECOMP)
2359                         AHD_SET_PRECOMP(ahd, iocell_opts->precomp);
2360                 if (iocell_opts->slewrate != AIC79XX_DEFAULT_SLEWRATE)
2361                         AHD_SET_SLEWRATE(ahd, iocell_opts->slewrate);
2362                 if (iocell_opts->amplitude != AIC79XX_DEFAULT_AMPLITUDE)
2363                         AHD_SET_AMPLITUDE(ahd, iocell_opts->amplitude);
2364         }
2365
2366 }
2367
2368 void
2369 ahd_platform_freeze_devq(struct ahd_softc *ahd, struct scb *scb)
2370 {
2371         ahd_platform_abort_scbs(ahd, SCB_GET_TARGET(ahd, scb),
2372                                 SCB_GET_CHANNEL(ahd, scb),
2373                                 SCB_GET_LUN(scb), SCB_LIST_NULL,
2374                                 ROLE_UNKNOWN, CAM_REQUEUE_REQ);
2375 }
2376
2377 void
2378 ahd_platform_set_tags(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
2379                       ahd_queue_alg alg)
2380 {
2381         struct ahd_linux_device *dev;
2382         int was_queuing;
2383         int now_queuing;
2384
2385         dev = ahd_linux_get_device(ahd, devinfo->channel - 'A',
2386                                    devinfo->target,
2387                                    devinfo->lun, /*alloc*/FALSE);
2388         if (dev == NULL)
2389                 return;
2390         was_queuing = dev->flags & (AHD_DEV_Q_BASIC|AHD_DEV_Q_TAGGED);
2391         switch (alg) {
2392         default:
2393         case AHD_QUEUE_NONE:
2394                 now_queuing = 0;
2395                 break; 
2396         case AHD_QUEUE_BASIC:
2397                 now_queuing = AHD_DEV_Q_BASIC;
2398                 break;
2399         case AHD_QUEUE_TAGGED:
2400                 now_queuing = AHD_DEV_Q_TAGGED;
2401                 break;
2402         }
2403         if ((dev->flags & AHD_DEV_FREEZE_TIL_EMPTY) == 0
2404          && (was_queuing != now_queuing)
2405          && (dev->active != 0)) {
2406                 dev->flags |= AHD_DEV_FREEZE_TIL_EMPTY;
2407                 dev->qfrozen++;
2408         }
2409
2410         dev->flags &= ~(AHD_DEV_Q_BASIC|AHD_DEV_Q_TAGGED|AHD_DEV_PERIODIC_OTAG);
2411         if (now_queuing) {
2412                 u_int usertags;
2413
2414                 usertags = ahd_linux_user_tagdepth(ahd, devinfo);
2415                 if (!was_queuing) {
2416                         /*
2417                          * Start out agressively and allow our
2418                          * dynamic queue depth algorithm to take
2419                          * care of the rest.
2420                          */
2421                         dev->maxtags = usertags;
2422                         dev->openings = dev->maxtags - dev->active;
2423                 }
2424                 if (dev->maxtags == 0) {
2425                         /*
2426                          * Queueing is disabled by the user.
2427                          */
2428                         dev->openings = 1;
2429                 } else if (alg == AHD_QUEUE_TAGGED) {
2430                         dev->flags |= AHD_DEV_Q_TAGGED;
2431                         if (aic79xx_periodic_otag != 0)
2432                                 dev->flags |= AHD_DEV_PERIODIC_OTAG;
2433                 } else
2434                         dev->flags |= AHD_DEV_Q_BASIC;
2435         } else {
2436                 /* We can only have one opening. */
2437                 dev->maxtags = 0;
2438                 dev->openings =  1 - dev->active;
2439         }
2440 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
2441         if (dev->scsi_device != NULL) {
2442                 switch ((dev->flags & (AHD_DEV_Q_BASIC|AHD_DEV_Q_TAGGED))) {
2443                 case AHD_DEV_Q_BASIC:
2444                         scsi_adjust_queue_depth(dev->scsi_device,
2445                                                 MSG_SIMPLE_TASK,
2446                                                 dev->openings + dev->active);
2447                         break;
2448                 case AHD_DEV_Q_TAGGED:
2449                         scsi_adjust_queue_depth(dev->scsi_device,
2450                                                 MSG_ORDERED_TASK,
2451                                                 dev->openings + dev->active);
2452                         break;
2453                 default:
2454                         /*
2455                          * We allow the OS to queue 2 untagged transactions to
2456                          * us at any time even though we can only execute them
2457                          * serially on the controller/device.  This should
2458                          * remove some latency.
2459                          */
2460                         scsi_adjust_queue_depth(dev->scsi_device,
2461                                                 /*NON-TAGGED*/0,
2462                                                 /*queue depth*/2);
2463                         break;
2464                 }
2465         }
2466 #endif
2467 }
2468
2469 int
2470 ahd_platform_abort_scbs(struct ahd_softc *ahd, int target, char channel,
2471                         int lun, u_int tag, role_t role, uint32_t status)
2472 {
2473         int targ;
2474         int maxtarg;
2475         int maxlun;
2476         int clun;
2477         int count;
2478
2479         if (tag != SCB_LIST_NULL)
2480                 return (0);
2481
2482         targ = 0;
2483         if (target != CAM_TARGET_WILDCARD) {
2484                 targ = target;
2485                 maxtarg = targ + 1;
2486         } else {
2487                 maxtarg = (ahd->features & AHD_WIDE) ? 16 : 8;
2488         }
2489         clun = 0;
2490         if (lun != CAM_LUN_WILDCARD) {
2491                 clun = lun;
2492                 maxlun = clun + 1;
2493         } else {
2494                 maxlun = AHD_NUM_LUNS;
2495         }
2496
2497         count = 0;
2498         for (; targ < maxtarg; targ++) {
2499
2500                 for (; clun < maxlun; clun++) {
2501                         struct ahd_linux_device *dev;
2502                         struct ahd_busyq *busyq;
2503                         struct ahd_cmd *acmd;
2504
2505                         dev = ahd_linux_get_device(ahd, /*chan*/0, targ,
2506                                                    clun, /*alloc*/FALSE);
2507                         if (dev == NULL)
2508                                 continue;
2509
2510                         busyq = &dev->busyq;
2511                         while ((acmd = TAILQ_FIRST(busyq)) != NULL) {
2512                                 Scsi_Cmnd *cmd;
2513
2514                                 cmd = &acmd_scsi_cmd(acmd);
2515                                 TAILQ_REMOVE(busyq, acmd,
2516                                              acmd_links.tqe);
2517                                 count++;
2518                                 cmd->result = status << 16;
2519                                 ahd_linux_queue_cmd_complete(ahd, cmd);
2520                         }
2521                 }
2522         }
2523
2524         return (count);
2525 }
2526
2527 static void
2528 ahd_linux_thread_run_complete_queue(struct ahd_softc *ahd)
2529 {
2530         u_long flags;
2531
2532         ahd_lock(ahd, &flags);
2533         del_timer(&ahd->platform_data->completeq_timer);
2534         ahd->platform_data->flags &= ~AHD_RUN_CMPLT_Q_TIMER;
2535         ahd_linux_run_complete_queue(ahd);
2536         ahd_unlock(ahd, &flags);
2537 }
2538
2539 static void
2540 ahd_linux_start_dv(struct ahd_softc *ahd)
2541 {
2542
2543         /*
2544          * Freeze the simq and signal ahd_linux_queue to not let any
2545          * more commands through
2546          */
2547         if ((ahd->platform_data->flags & AHD_DV_ACTIVE) == 0) {
2548 #ifdef AHD_DEBUG
2549                 if (ahd_debug & AHD_SHOW_DV)
2550                         printf("%s: Starting DV\n", ahd_name(ahd));
2551 #endif
2552
2553                 ahd->platform_data->flags |= AHD_DV_ACTIVE;
2554                 ahd_freeze_simq(ahd);
2555
2556                 /* Wake up the DV kthread */
2557                 up(&ahd->platform_data->dv_sem);
2558         }
2559 }
2560
2561 static int
2562 ahd_linux_dv_thread(void *data)
2563 {
2564         struct  ahd_softc *ahd;
2565         int     target;
2566         u_long  s;
2567
2568         ahd = (struct ahd_softc *)data;
2569
2570 #ifdef AHD_DEBUG
2571         if (ahd_debug & AHD_SHOW_DV)
2572                 printf("In DV Thread\n");
2573 #endif
2574
2575         /*
2576          * Complete thread creation.
2577          */
2578         lock_kernel();
2579 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,60)
2580         /*
2581          * Don't care about any signals.
2582          */
2583         siginitsetinv(&current->blocked, 0);
2584
2585         daemonize();
2586         sprintf(current->comm, "ahd_dv_%d", ahd->unit);
2587 #else
2588         daemonize("ahd_dv_%d", ahd->unit);
2589         current->flags |= PF_FREEZE;
2590 #endif
2591         unlock_kernel();
2592
2593         while (1) {
2594                 /*
2595                  * Use down_interruptible() rather than down() to
2596                  * avoid inclusion in the load average.
2597                  */
2598                 down_interruptible(&ahd->platform_data->dv_sem);
2599
2600                 /* Check to see if we've been signaled to exit */
2601                 ahd_lock(ahd, &s);
2602                 if ((ahd->platform_data->flags & AHD_DV_SHUTDOWN) != 0) {
2603                         ahd_unlock(ahd, &s);
2604                         break;
2605                 }
2606                 ahd_unlock(ahd, &s);
2607
2608 #ifdef AHD_DEBUG
2609                 if (ahd_debug & AHD_SHOW_DV)
2610                         printf("%s: Beginning Domain Validation\n",
2611                                ahd_name(ahd));
2612 #endif
2613
2614                 /*
2615                  * Wait for any pending commands to drain before proceeding.
2616                  */
2617                 ahd_lock(ahd, &s);
2618                 while (LIST_FIRST(&ahd->pending_scbs) != NULL) {
2619                         ahd->platform_data->flags |= AHD_DV_WAIT_SIMQ_EMPTY;
2620                         ahd_unlock(ahd, &s);
2621                         down_interruptible(&ahd->platform_data->dv_sem);
2622                         ahd_lock(ahd, &s);
2623                 }
2624
2625                 /*
2626                  * Wait for the SIMQ to be released so that DV is the
2627                  * only reason the queue is frozen.
2628                  */
2629                 while (AHD_DV_SIMQ_FROZEN(ahd) == 0) {
2630                         ahd->platform_data->flags |= AHD_DV_WAIT_SIMQ_RELEASE;
2631                         ahd_unlock(ahd, &s);
2632                         down_interruptible(&ahd->platform_data->dv_sem);
2633                         ahd_lock(ahd, &s);
2634                 }
2635                 ahd_unlock(ahd, &s);
2636
2637                 for (target = 0; target < AHD_NUM_TARGETS; target++)
2638                         ahd_linux_dv_target(ahd, target);
2639
2640                 ahd_lock(ahd, &s);
2641                 ahd->platform_data->flags &= ~AHD_DV_ACTIVE;
2642                 ahd_unlock(ahd, &s);
2643
2644                 /*
2645                  * Release the SIMQ so that normal commands are
2646                  * allowed to continue on the bus.
2647                  */
2648                 ahd_release_simq(ahd);
2649         }
2650         up(&ahd->platform_data->eh_sem);
2651         return (0);
2652 }
2653
2654 static void
2655 ahd_linux_kill_dv_thread(struct ahd_softc *ahd)
2656 {
2657         u_long s;
2658
2659         ahd_lock(ahd, &s);
2660         if (ahd->platform_data->dv_pid != 0) {
2661                 ahd->platform_data->flags |= AHD_DV_SHUTDOWN;
2662                 ahd_unlock(ahd, &s);
2663                 up(&ahd->platform_data->dv_sem);
2664
2665                 /*
2666                  * Use the eh_sem as an indicator that the
2667                  * dv thread is exiting.  Note that the dv
2668                  * thread must still return after performing
2669                  * the up on our semaphore before it has
2670                  * completely exited this module.  Unfortunately,
2671                  * there seems to be no easy way to wait for the
2672                  * exit of a thread for which you are not the
2673                  * parent (dv threads are parented by init).
2674                  * Cross your fingers...
2675                  */
2676                 down(&ahd->platform_data->eh_sem);
2677
2678                 /*
2679                  * Mark the dv thread as already dead.  This
2680                  * avoids attempting to kill it a second time.
2681                  * This is necessary because we must kill the
2682                  * DV thread before calling ahd_free() in the
2683                  * module shutdown case to avoid bogus locking
2684                  * in the SCSI mid-layer, but we ahd_free() is
2685                  * called without killing the DV thread in the
2686                  * instance detach case, so ahd_platform_free()
2687                  * calls us again to verify that the DV thread
2688                  * is dead.
2689                  */
2690                 ahd->platform_data->dv_pid = 0;
2691         } else {
2692                 ahd_unlock(ahd, &s);
2693         }
2694 }
2695
2696 #define AHD_LINUX_DV_INQ_SHORT_LEN      36
2697 #define AHD_LINUX_DV_INQ_LEN            256
2698 #define AHD_LINUX_DV_TIMEOUT            (HZ / 4)
2699
2700 #define AHD_SET_DV_STATE(ahd, targ, newstate) \
2701         ahd_set_dv_state(ahd, targ, newstate, __LINE__)
2702
2703 static __inline void
2704 ahd_set_dv_state(struct ahd_softc *ahd, struct ahd_linux_target *targ,
2705                  ahd_dv_state newstate, u_int line)
2706 {
2707         ahd_dv_state oldstate;
2708
2709         oldstate = targ->dv_state;
2710 #ifdef AHD_DEBUG
2711         if (ahd_debug & AHD_SHOW_DV)
2712                 printf("%s:%d: Going from state %d to state %d\n",
2713                        ahd_name(ahd), line, oldstate, newstate);
2714 #endif
2715
2716         if (oldstate == newstate)
2717                 targ->dv_state_retry++;
2718         else
2719                 targ->dv_state_retry = 0;
2720         targ->dv_state = newstate;
2721 }
2722
2723 static void
2724 ahd_linux_dv_target(struct ahd_softc *ahd, u_int target_offset)
2725 {
2726         struct   ahd_devinfo devinfo;
2727         struct   ahd_linux_target *targ;
2728         struct   scsi_cmnd *cmd;
2729         struct   scsi_device *scsi_dev;
2730         struct   scsi_sense_data *sense;
2731         uint8_t *buffer;
2732         u_long   s;
2733         u_int    timeout;
2734         int      echo_size;
2735
2736         sense = NULL;
2737         buffer = NULL;
2738         echo_size = 0;
2739         ahd_lock(ahd, &s);
2740         targ = ahd->platform_data->targets[target_offset];
2741         if (targ == NULL || (targ->flags & AHD_DV_REQUIRED) == 0) {
2742                 ahd_unlock(ahd, &s);
2743                 return;
2744         }
2745         ahd_compile_devinfo(&devinfo, ahd->our_id, targ->target, /*lun*/0,
2746                             targ->channel + 'A', ROLE_INITIATOR);
2747 #ifdef AHD_DEBUG
2748         if (ahd_debug & AHD_SHOW_DV) {
2749                 ahd_print_devinfo(ahd, &devinfo);
2750                 printf("Performing DV\n");
2751         }
2752 #endif
2753
2754         ahd_unlock(ahd, &s);
2755
2756         cmd = malloc(sizeof(struct scsi_cmnd), M_DEVBUF, M_WAITOK);
2757         scsi_dev = malloc(sizeof(struct scsi_device), M_DEVBUF, M_WAITOK);
2758         scsi_dev->host = ahd->platform_data->host;
2759         scsi_dev->id = devinfo.target;
2760         scsi_dev->lun = devinfo.lun;
2761         scsi_dev->channel = devinfo.channel - 'A';
2762         ahd->platform_data->dv_scsi_dev = scsi_dev;
2763
2764         AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_INQ_SHORT_ASYNC);
2765
2766         while (targ->dv_state != AHD_DV_STATE_EXIT) {
2767                 timeout = AHD_LINUX_DV_TIMEOUT;
2768                 switch (targ->dv_state) {
2769                 case AHD_DV_STATE_INQ_SHORT_ASYNC:
2770                 case AHD_DV_STATE_INQ_ASYNC:
2771                 case AHD_DV_STATE_INQ_ASYNC_VERIFY:
2772                         /*
2773                          * Set things to async narrow to reduce the
2774                          * chance that the INQ will fail.
2775                          */
2776                         ahd_lock(ahd, &s);
2777                         ahd_set_syncrate(ahd, &devinfo, 0, 0, 0,
2778                                          AHD_TRANS_GOAL, /*paused*/FALSE);
2779                         ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
2780                                       AHD_TRANS_GOAL, /*paused*/FALSE);
2781                         ahd_unlock(ahd, &s);
2782                         timeout = 10 * HZ;
2783                         targ->flags &= ~AHD_INQ_VALID;
2784                         /* FALLTHROUGH */
2785                 case AHD_DV_STATE_INQ_VERIFY:
2786                 {
2787                         u_int inq_len;
2788
2789                         if (targ->dv_state == AHD_DV_STATE_INQ_SHORT_ASYNC)
2790                                 inq_len = AHD_LINUX_DV_INQ_SHORT_LEN;
2791                         else
2792                                 inq_len = targ->inq_data->additional_length + 5;
2793                         ahd_linux_dv_inq(ahd, cmd, &devinfo, targ, inq_len);
2794                         break;
2795                 }
2796                 case AHD_DV_STATE_TUR:
2797                 case AHD_DV_STATE_BUSY:
2798                         timeout = 5 * HZ;
2799                         ahd_linux_dv_tur(ahd, cmd, &devinfo);
2800                         break;
2801                 case AHD_DV_STATE_REBD:
2802                         ahd_linux_dv_rebd(ahd, cmd, &devinfo, targ);
2803                         break;
2804                 case AHD_DV_STATE_WEB:
2805                         ahd_linux_dv_web(ahd, cmd, &devinfo, targ);
2806                         break;
2807
2808                 case AHD_DV_STATE_REB:
2809                         ahd_linux_dv_reb(ahd, cmd, &devinfo, targ);
2810                         break;
2811
2812                 case AHD_DV_STATE_SU:
2813                         ahd_linux_dv_su(ahd, cmd, &devinfo, targ);
2814                         timeout = 50 * HZ;
2815                         break;
2816
2817                 default:
2818                         ahd_print_devinfo(ahd, &devinfo);
2819                         printf("Unknown DV state %d\n", targ->dv_state);
2820                         goto out;
2821                 }
2822
2823                 /* Queue the command and wait for it to complete */
2824                 /* Abuse eh_timeout in the scsi_cmnd struct for our purposes */
2825                 init_timer(&cmd->eh_timeout);
2826 #ifdef AHD_DEBUG
2827                 if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
2828                         /*
2829                          * All of the printfs during negotiation
2830                          * really slow down the negotiation.
2831                          * Add a bit of time just to be safe.
2832                          */
2833                         timeout += HZ;
2834 #endif
2835                 scsi_add_timer(cmd, timeout, ahd_linux_dv_timeout);
2836                 /*
2837                  * In 2.5.X, it is assumed that all calls from the
2838                  * "midlayer" (which we are emulating) will have the
2839                  * ahd host lock held.  For other kernels, the
2840                  * io_request_lock must be held.
2841                  */
2842 #if AHD_SCSI_HAS_HOST_LOCK != 0
2843                 ahd_lock(ahd, &s);
2844 #else
2845                 spin_lock_irqsave(&io_request_lock, s);
2846 #endif
2847                 ahd_linux_queue(cmd, ahd_linux_dv_complete);
2848 #if AHD_SCSI_HAS_HOST_LOCK != 0
2849                 ahd_unlock(ahd, &s);
2850 #else
2851                 spin_unlock_irqrestore(&io_request_lock, s);
2852 #endif
2853                 down_interruptible(&ahd->platform_data->dv_cmd_sem);
2854                 /*
2855                  * Wait for the SIMQ to be released so that DV is the
2856                  * only reason the queue is frozen.
2857                  */
2858                 ahd_lock(ahd, &s);
2859                 while (AHD_DV_SIMQ_FROZEN(ahd) == 0) {
2860                         ahd->platform_data->flags |= AHD_DV_WAIT_SIMQ_RELEASE;
2861                         ahd_unlock(ahd, &s);
2862                         down_interruptible(&ahd->platform_data->dv_sem);
2863                         ahd_lock(ahd, &s);
2864                 }
2865                 ahd_unlock(ahd, &s);
2866
2867                 ahd_linux_dv_transition(ahd, cmd, &devinfo, targ);
2868         }
2869
2870 out:
2871         if ((targ->flags & AHD_INQ_VALID) != 0
2872          && ahd_linux_get_device(ahd, devinfo.channel - 'A',
2873                                  devinfo.target, devinfo.lun,
2874                                  /*alloc*/FALSE) == NULL) {
2875                 /*
2876                  * The DV state machine failed to configure this device.  
2877                  * This is normal if DV is disabled.  Since we have inquiry
2878                  * data, filter it and use the "optimistic" negotiation
2879                  * parameters found in the inquiry string.
2880                  */
2881                 ahd_linux_filter_inquiry(ahd, &devinfo);
2882                 if ((targ->flags & (AHD_BASIC_DV|AHD_ENHANCED_DV)) != 0) {
2883                         ahd_print_devinfo(ahd, &devinfo);
2884                         printf("DV failed to configure device.  "
2885                                "Please file a bug report against "
2886                                "this driver.\n");
2887                 }
2888         }
2889
2890         if (cmd != NULL)
2891                 free(cmd, M_DEVBUF);
2892
2893         if (ahd->platform_data->dv_scsi_dev != NULL) {
2894                 free(ahd->platform_data->dv_scsi_dev, M_DEVBUF);
2895                 ahd->platform_data->dv_scsi_dev = NULL;
2896         }
2897
2898         ahd_lock(ahd, &s);
2899         if (targ->dv_buffer != NULL) {
2900                 free(targ->dv_buffer, M_DEVBUF);
2901                 targ->dv_buffer = NULL;
2902         }
2903         if (targ->dv_buffer1 != NULL) {
2904                 free(targ->dv_buffer1, M_DEVBUF);
2905                 targ->dv_buffer1 = NULL;
2906         }
2907         targ->flags &= ~AHD_DV_REQUIRED;
2908         if (targ->refcount == 0)
2909                 ahd_linux_free_target(ahd, targ);
2910         ahd_unlock(ahd, &s);
2911 }
2912
2913 static __inline int
2914 ahd_linux_dv_fallback(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
2915 {
2916         u_long s;
2917         int retval;
2918
2919         ahd_lock(ahd, &s);
2920         retval = ahd_linux_fallback(ahd, devinfo);
2921         ahd_unlock(ahd, &s);
2922
2923         return (retval);
2924 }
2925
2926 static void
2927 ahd_linux_dv_transition(struct ahd_softc *ahd, struct scsi_cmnd *cmd,
2928                         struct ahd_devinfo *devinfo,
2929                         struct ahd_linux_target *targ)
2930 {
2931         u_int32_t status;
2932
2933         status = aic_error_action(cmd, targ->inq_data,
2934                                   ahd_cmd_get_transaction_status(cmd),
2935                                   ahd_cmd_get_scsi_status(cmd));
2936
2937         
2938 #ifdef AHD_DEBUG
2939         if (ahd_debug & AHD_SHOW_DV) {
2940                 ahd_print_devinfo(ahd, devinfo);
2941                 printf("Entering ahd_linux_dv_transition, state= %d, "
2942                        "status= 0x%x, cmd->result= 0x%x\n", targ->dv_state,
2943                        status, cmd->result);
2944         }
2945 #endif
2946
2947         switch (targ->dv_state) {
2948         case AHD_DV_STATE_INQ_SHORT_ASYNC:
2949         case AHD_DV_STATE_INQ_ASYNC:
2950                 switch (status & SS_MASK) {
2951                 case SS_NOP:
2952                 {
2953                         AHD_SET_DV_STATE(ahd, targ, targ->dv_state+1);
2954                         break;
2955                 }
2956                 case SS_INQ_REFRESH:
2957                         AHD_SET_DV_STATE(ahd, targ,
2958                                          AHD_DV_STATE_INQ_SHORT_ASYNC);
2959                         break;
2960                 case SS_TUR:
2961                 case SS_RETRY:
2962                         AHD_SET_DV_STATE(ahd, targ, targ->dv_state);
2963                         if (ahd_cmd_get_transaction_status(cmd)
2964                          == CAM_REQUEUE_REQ)
2965                                 targ->dv_state_retry--;
2966                         if ((status & SS_ERRMASK) == EBUSY)
2967                                 AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_BUSY);
2968                         if (targ->dv_state_retry < 10)
2969                                 break;
2970                         /* FALLTHROUGH */
2971                 default:
2972                         AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_EXIT);
2973 #ifdef AHD_DEBUG
2974                         if (ahd_debug & AHD_SHOW_DV) {
2975                                 ahd_print_devinfo(ahd, devinfo);
2976                                 printf("Failed DV inquiry, skipping\n");
2977                         }
2978 #endif
2979                         break;
2980                 }
2981                 break;
2982         case AHD_DV_STATE_INQ_ASYNC_VERIFY:
2983                 switch (status & SS_MASK) {
2984                 case SS_NOP:
2985                 {
2986                         u_int xportflags;
2987                         u_int spi3data;
2988
2989                         if (memcmp(targ->inq_data, targ->dv_buffer,
2990                                    AHD_LINUX_DV_INQ_LEN) != 0) {
2991                                 /*
2992                                  * Inquiry data must have changed.
2993                                  * Try from the top again.
2994                                  */
2995                                 AHD_SET_DV_STATE(ahd, targ,
2996                                                  AHD_DV_STATE_INQ_SHORT_ASYNC);
2997                                 break;
2998                         }
2999
3000                         AHD_SET_DV_STATE(ahd, targ, targ->dv_state+1);
3001                         targ->flags |= AHD_INQ_VALID;
3002                         if (ahd_linux_user_dv_setting(ahd) == 0)
3003                                 break;
3004
3005                         xportflags = targ->inq_data->flags;
3006                         if ((xportflags & (SID_Sync|SID_WBus16)) == 0)
3007                                 break;
3008
3009                         spi3data = targ->inq_data->spi3data;
3010                         switch (spi3data & SID_SPI_CLOCK_DT_ST) {
3011                         default:
3012                         case SID_SPI_CLOCK_ST:
3013                                 /* Assume only basic DV is supported. */
3014                                 targ->flags |= AHD_BASIC_DV;
3015                                 break;
3016                         case SID_SPI_CLOCK_DT:
3017                         case SID_SPI_CLOCK_DT_ST:
3018                                 targ->flags |= AHD_ENHANCED_DV;
3019                                 break;
3020                         }
3021                         break;
3022                 }
3023                 case SS_INQ_REFRESH:
3024                         AHD_SET_DV_STATE(ahd, targ,
3025                                          AHD_DV_STATE_INQ_SHORT_ASYNC);
3026                         break;
3027                 case SS_TUR:
3028                 case SS_RETRY:
3029                         AHD_SET_DV_STATE(ahd, targ, targ->dv_state);
3030                         if (ahd_cmd_get_transaction_status(cmd)
3031                          == CAM_REQUEUE_REQ)
3032                                 targ->dv_state_retry--;
3033
3034                         if ((status & SS_ERRMASK) == EBUSY)
3035                                 AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_BUSY);
3036                         if (targ->dv_state_retry < 10)
3037                                 break;
3038                         /* FALLTHROUGH */
3039                 default:
3040                         AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_EXIT);
3041 #ifdef AHD_DEBUG
3042                         if (ahd_debug & AHD_SHOW_DV) {
3043                                 ahd_print_devinfo(ahd, devinfo);
3044                                 printf("Failed DV inquiry, skipping\n");
3045                         }
3046 #endif
3047                         break;
3048                 }
3049                 break;
3050         case AHD_DV_STATE_INQ_VERIFY:
3051                 switch (status & SS_MASK) {
3052                 case SS_NOP:
3053                 {
3054
3055                         if (memcmp(targ->inq_data, targ->dv_buffer,
3056                                    AHD_LINUX_DV_INQ_LEN) == 0) {
3057                                 AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_EXIT);
3058                                 break;
3059                         }
3060
3061 #ifdef AHD_DEBUG
3062                         if (ahd_debug & AHD_SHOW_DV) {
3063                                 int i;
3064
3065                                 ahd_print_devinfo(ahd, devinfo);
3066                                 printf("Inquiry buffer mismatch:");
3067                                 for (i = 0; i < AHD_LINUX_DV_INQ_LEN; i++) {
3068                                         if ((i & 0xF) == 0)
3069                                                 printf("\n        ");
3070                                         printf("0x%x:0x0%x ",
3071                                                ((uint8_t *)targ->inq_data)[i], 
3072                                                targ->dv_buffer[i]);
3073                                 }
3074                                 printf("\n");
3075                         }
3076 #endif
3077
3078                         if (ahd_linux_dv_fallback(ahd, devinfo) != 0) {
3079                                 AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_EXIT);
3080                                 break;
3081                         }
3082                         /*
3083                          * Do not count "falling back"
3084                          * against our retries.
3085                          */
3086                         targ->dv_state_retry = 0;
3087                         AHD_SET_DV_STATE(ahd, targ, targ->dv_state);
3088                         break;
3089                 }
3090                 case SS_INQ_REFRESH:
3091                         AHD_SET_DV_STATE(ahd, targ,
3092                                          AHD_DV_STATE_INQ_SHORT_ASYNC);
3093                         break;
3094                 case SS_TUR:
3095                 case SS_RETRY:
3096                         AHD_SET_DV_STATE(ahd, targ, targ->dv_state);
3097                         if (ahd_cmd_get_transaction_status(cmd)
3098                          == CAM_REQUEUE_REQ) {
3099                                 targ->dv_state_retry--;
3100                         } else if ((status & SSQ_FALLBACK) != 0) {
3101                                 if (ahd_linux_dv_fallback(ahd, devinfo) != 0) {
3102                                         AHD_SET_DV_STATE(ahd, targ,
3103                                                          AHD_DV_STATE_EXIT);
3104                                         break;
3105                                 }
3106                                 /*
3107                                  * Do not count "falling back"
3108                                  * against our retries.
3109                                  */
3110                                 targ->dv_state_retry = 0;
3111                         } else if ((status & SS_ERRMASK) == EBUSY)
3112                                 AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_BUSY);
3113                         if (targ->dv_state_retry < 10)
3114                                 break;
3115                         /* FALLTHROUGH */
3116                 default:
3117                         AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_EXIT);
3118 #ifdef AHD_DEBUG
3119                         if (ahd_debug & AHD_SHOW_DV) {
3120                                 ahd_print_devinfo(ahd, devinfo);
3121                                 printf("Failed DV inquiry, skipping\n");
3122                         }
3123 #endif
3124                         break;
3125                 }
3126                 break;
3127
3128         case AHD_DV_STATE_TUR:
3129                 switch (status & SS_MASK) {
3130                 case SS_NOP:
3131                         if ((targ->flags & AHD_BASIC_DV) != 0) {
3132                                 ahd_linux_filter_inquiry(ahd, devinfo);
3133                                 AHD_SET_DV_STATE(ahd, targ,
3134                                                  AHD_DV_STATE_INQ_VERIFY);
3135                         } else if ((targ->flags & AHD_ENHANCED_DV) != 0) {
3136                                 AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_REBD);
3137                         } else {
3138                                 AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_EXIT);
3139                         }
3140                         break;
3141                 case SS_RETRY:
3142                 case SS_TUR:
3143                         if ((status & SS_ERRMASK) == EBUSY) {
3144                                 AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_BUSY);
3145                                 break;
3146                         }
3147                         AHD_SET_DV_STATE(ahd, targ, targ->dv_state);
3148                         if (ahd_cmd_get_transaction_status(cmd)
3149                          == CAM_REQUEUE_REQ) {
3150                                 targ->dv_state_retry--;
3151                         } else if ((status & SSQ_FALLBACK) != 0) {
3152                                 if (ahd_linux_dv_fallback(ahd, devinfo) != 0) {
3153                                         AHD_SET_DV_STATE(ahd, targ,
3154                                                          AHD_DV_STATE_EXIT);
3155                                         break;
3156                                 }
3157                                 /*
3158                                  * Do not count "falling back"
3159                                  * against our retries.
3160                                  */
3161                                 targ->dv_state_retry = 0;
3162                         }
3163                         if (targ->dv_state_retry >= 10) {
3164 #ifdef AHD_DEBUG
3165                                 if (ahd_debug & AHD_SHOW_DV) {
3166                                         ahd_print_devinfo(ahd, devinfo);
3167                                         printf("DV TUR reties exhausted\n");
3168                                 }
3169 #endif
3170                                 AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_EXIT);
3171                                 break;
3172                         }
3173                         if (status & SSQ_DELAY)
3174                                 ssleep(1);
3175
3176                         break;
3177                 case SS_START:
3178                         AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_SU);
3179                         break;
3180                 case SS_INQ_REFRESH:
3181                         AHD_SET_DV_STATE(ahd, targ,
3182                                          AHD_DV_STATE_INQ_SHORT_ASYNC);
3183                         break;
3184                 default:
3185                         AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_EXIT);
3186                         break;
3187                 }
3188                 break;
3189
3190         case AHD_DV_STATE_REBD:
3191                 switch (status & SS_MASK) {
3192                 case SS_NOP:
3193                 {
3194                         uint32_t echo_size;
3195
3196                         AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_WEB);
3197                         echo_size = scsi_3btoul(&targ->dv_buffer[1]);
3198                         echo_size &= 0x1FFF;
3199 #ifdef AHD_DEBUG
3200                         if (ahd_debug & AHD_SHOW_DV) {
3201                                 ahd_print_devinfo(ahd, devinfo);
3202                                 printf("Echo buffer size= %d\n", echo_size);
3203                         }
3204 #endif
3205                         if (echo_size == 0) {
3206                                 AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_EXIT);
3207                                 break;
3208                         }
3209
3210                         /* Generate the buffer pattern */
3211                         targ->dv_echo_size = echo_size;
3212                         ahd_linux_generate_dv_pattern(targ);
3213                         /*
3214                          * Setup initial negotiation values.
3215                          */
3216                         ahd_linux_filter_inquiry(ahd, devinfo);
3217                         break;
3218                 }
3219                 case SS_INQ_REFRESH:
3220                         AHD_SET_DV_STATE(ahd, targ,
3221                                          AHD_DV_STATE_INQ_SHORT_ASYNC);
3222                         break;
3223                 case SS_RETRY:
3224                         AHD_SET_DV_STATE(ahd, targ, targ->dv_state);
3225                         if (ahd_cmd_get_transaction_status(cmd)
3226                          == CAM_REQUEUE_REQ)
3227                                 targ->dv_state_retry--;
3228                         if (targ->dv_state_retry <= 10)
3229                                 break;
3230 #ifdef AHD_DEBUG
3231                         if (ahd_debug & AHD_SHOW_DV) {
3232                                 ahd_print_devinfo(ahd, devinfo);
3233                                 printf("DV REBD reties exhausted\n");
3234                         }
3235 #endif
3236                         /* FALLTHROUGH */
3237                 case SS_FATAL:
3238                 default:
3239                         /*
3240                          * Setup initial negotiation values
3241                          * and try level 1 DV.
3242                          */
3243                         ahd_linux_filter_inquiry(ahd, devinfo);
3244                         AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_INQ_VERIFY);
3245                         targ->dv_echo_size = 0;
3246                         break;
3247                 }
3248                 break;
3249
3250         case AHD_DV_STATE_WEB:
3251                 switch (status & SS_MASK) {
3252                 case SS_NOP:
3253                         AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_REB);
3254                         break;
3255                 case SS_INQ_REFRESH:
3256                         AHD_SET_DV_STATE(ahd, targ,
3257                                          AHD_DV_STATE_INQ_SHORT_ASYNC);
3258                         break;
3259                 case SS_RETRY:
3260                         AHD_SET_DV_STATE(ahd, targ, targ->dv_state);
3261                         if (ahd_cmd_get_transaction_status(cmd)
3262                          == CAM_REQUEUE_REQ) {
3263                                 targ->dv_state_retry--;
3264                         } else if ((status & SSQ_FALLBACK) != 0) {
3265                                 if (ahd_linux_dv_fallback(ahd, devinfo) != 0) {
3266                                         AHD_SET_DV_STATE(ahd, targ,
3267                                                          AHD_DV_STATE_EXIT);
3268                                         break;
3269                                 }
3270                                 /*
3271                                  * Do not count "falling back"
3272                                  * against our retries.
3273                                  */
3274                                 targ->dv_state_retry = 0;
3275                         }
3276                         if (targ->dv_state_retry <= 10)
3277                                 break;
3278                         /* FALLTHROUGH */
3279 #ifdef AHD_DEBUG
3280                         if (ahd_debug & AHD_SHOW_DV) {
3281                                 ahd_print_devinfo(ahd, devinfo);
3282                                 printf("DV WEB reties exhausted\n");
3283                         }
3284 #endif
3285                 default:
3286                         AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_EXIT);
3287                         break;
3288                 }
3289                 break;
3290
3291         case AHD_DV_STATE_REB:
3292                 switch (status & SS_MASK) {
3293                 case SS_NOP:
3294                         if (memcmp(targ->dv_buffer, targ->dv_buffer1,
3295                                    targ->dv_echo_size) != 0) {
3296                                 if (ahd_linux_dv_fallback(ahd, devinfo) != 0)
3297                                         AHD_SET_DV_STATE(ahd, targ,
3298                                                          AHD_DV_STATE_EXIT);
3299                                 else
3300                                         AHD_SET_DV_STATE(ahd, targ,
3301                                                          AHD_DV_STATE_WEB);
3302                                 break;
3303                         }
3304                         
3305                         if (targ->dv_buffer != NULL) {
3306                                 free(targ->dv_buffer, M_DEVBUF);
3307                                 targ->dv_buffer = NULL;
3308                         }
3309                         if (targ->dv_buffer1 != NULL) {
3310                                 free(targ->dv_buffer1, M_DEVBUF);
3311                                 targ->dv_buffer1 = NULL;
3312                         }
3313                         AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_EXIT);
3314                         break;
3315                 case SS_INQ_REFRESH:
3316                         AHD_SET_DV_STATE(ahd, targ,
3317                                          AHD_DV_STATE_INQ_SHORT_ASYNC);
3318                         break;
3319                 case SS_RETRY:
3320                         AHD_SET_DV_STATE(ahd, targ, targ->dv_state);
3321                         if (ahd_cmd_get_transaction_status(cmd)
3322                          == CAM_REQUEUE_REQ) {
3323                                 targ->dv_state_retry--;
3324                         } else if ((status & SSQ_FALLBACK) != 0) {
3325                                 if (ahd_linux_dv_fallback(ahd, devinfo) != 0) {
3326                                         AHD_SET_DV_STATE(ahd, targ,
3327                                                          AHD_DV_STATE_EXIT);
3328                                         break;
3329                                 }
3330                                 AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_WEB);
3331                         }
3332                         if (targ->dv_state_retry <= 10) {
3333                                 if ((status & (SSQ_DELAY_RANDOM|SSQ_DELAY))!= 0)
3334                                         msleep(ahd->our_id*1000/10);
3335                                 break;
3336                         }
3337 #ifdef AHD_DEBUG
3338                         if (ahd_debug & AHD_SHOW_DV) {
3339                                 ahd_print_devinfo(ahd, devinfo);
3340                                 printf("DV REB reties exhausted\n");
3341                         }
3342 #endif
3343                         /* FALLTHROUGH */
3344                 default:
3345                         AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_EXIT);
3346                         break;
3347                 }
3348                 break;
3349
3350         case AHD_DV_STATE_SU:
3351                 switch (status & SS_MASK) {
3352                 case SS_NOP:
3353                 case SS_INQ_REFRESH:
3354                         AHD_SET_DV_STATE(ahd, targ,
3355                                          AHD_DV_STATE_INQ_SHORT_ASYNC);
3356                         break;
3357                 default:
3358                         AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_EXIT);
3359                         break;
3360                 }
3361                 break;
3362
3363         case AHD_DV_STATE_BUSY:
3364                 switch (status & SS_MASK) {
3365                 case SS_NOP:
3366                 case SS_INQ_REFRESH:
3367                         AHD_SET_DV_STATE(ahd, targ,
3368                                          AHD_DV_STATE_INQ_SHORT_ASYNC);
3369                         break;
3370                 case SS_TUR:
3371                 case SS_RETRY:
3372                         AHD_SET_DV_STATE(ahd, targ, targ->dv_state);
3373                         if (ahd_cmd_get_transaction_status(cmd)
3374                          == CAM_REQUEUE_REQ) {
3375                                 targ->dv_state_retry--;
3376                         } else if (targ->dv_state_retry < 60) {
3377                                 if ((status & SSQ_DELAY) != 0)
3378                                         ssleep(1);
3379                         } else {
3380 #ifdef AHD_DEBUG
3381                                 if (ahd_debug & AHD_SHOW_DV) {
3382                                         ahd_print_devinfo(ahd, devinfo);
3383                                         printf("DV BUSY reties exhausted\n");
3384                                 }
3385 #endif
3386                                 AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_EXIT);
3387                         }
3388                         break;
3389                 default:
3390                         AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_EXIT);
3391                         break;
3392                 }
3393                 break;
3394
3395         default:
3396                 printf("%s: Invalid DV completion state %d\n", ahd_name(ahd),
3397                        targ->dv_state);
3398                 AHD_SET_DV_STATE(ahd, targ, AHD_DV_STATE_EXIT);
3399                 break;
3400         }
3401 }
3402
3403 static void
3404 ahd_linux_dv_fill_cmd(struct ahd_softc *ahd, struct scsi_cmnd *cmd,
3405                       struct ahd_devinfo *devinfo)
3406 {
3407         memset(cmd, 0, sizeof(struct scsi_cmnd));
3408         cmd->device = ahd->platform_data->dv_scsi_dev;
3409         cmd->scsi_done = ahd_linux_dv_complete;
3410 }
3411
3412 /*
3413  * Synthesize an inquiry command.  On the return trip, it'll be
3414  * sniffed and the device transfer settings set for us.
3415  */
3416 static void
3417 ahd_linux_dv_inq(struct ahd_softc *ahd, struct scsi_cmnd *cmd,
3418                  struct ahd_devinfo *devinfo, struct ahd_linux_target *targ,
3419                  u_int request_length)
3420 {
3421
3422 #ifdef AHD_DEBUG
3423         if (ahd_debug & AHD_SHOW_DV) {
3424                 ahd_print_devinfo(ahd, devinfo);
3425                 printf("Sending INQ\n");
3426         }
3427 #endif
3428         if (targ->inq_data == NULL)
3429                 targ->inq_data = malloc(AHD_LINUX_DV_INQ_LEN,
3430                                         M_DEVBUF, M_WAITOK);
3431         if (targ->dv_state > AHD_DV_STATE_INQ_ASYNC) {
3432                 if (targ->dv_buffer != NULL)
3433                         free(targ->dv_buffer, M_DEVBUF);
3434                 targ->dv_buffer = malloc(AHD_LINUX_DV_INQ_LEN,
3435                                          M_DEVBUF, M_WAITOK);
3436         }
3437
3438         ahd_linux_dv_fill_cmd(ahd, cmd, devinfo);
3439         cmd->sc_data_direction = SCSI_DATA_READ;
3440         cmd->cmd_len = 6;
3441         cmd->cmnd[0] = INQUIRY;
3442         cmd->cmnd[4] = request_length;
3443         cmd->request_bufflen = request_length;
3444         if (targ->dv_state > AHD_DV_STATE_INQ_ASYNC)
3445                 cmd->request_buffer = targ->dv_buffer;
3446         else
3447                 cmd->request_buffer = targ->inq_data;
3448         memset(cmd->request_buffer, 0, AHD_LINUX_DV_INQ_LEN);
3449 }
3450
3451 static void
3452 ahd_linux_dv_tur(struct ahd_softc *ahd, struct scsi_cmnd *cmd,
3453                  struct ahd_devinfo *devinfo)
3454 {
3455
3456 #ifdef AHD_DEBUG
3457         if (ahd_debug & AHD_SHOW_DV) {
3458                 ahd_print_devinfo(ahd, devinfo);
3459                 printf("Sending TUR\n");
3460         }
3461 #endif
3462         /* Do a TUR to clear out any non-fatal transitional state */
3463         ahd_linux_dv_fill_cmd(ahd, cmd, devinfo);
3464         cmd->sc_data_direction = SCSI_DATA_NONE;
3465         cmd->cmd_len = 6;
3466         cmd->cmnd[0] = TEST_UNIT_READY;
3467 }
3468
3469 #define AHD_REBD_LEN 4
3470
3471 static void
3472 ahd_linux_dv_rebd(struct ahd_softc *ahd, struct scsi_cmnd *cmd,
3473                  struct ahd_devinfo *devinfo, struct ahd_linux_target *targ)
3474 {
3475
3476 #ifdef AHD_DEBUG
3477         if (ahd_debug & AHD_SHOW_DV) {
3478                 ahd_print_devinfo(ahd, devinfo);
3479                 printf("Sending REBD\n");
3480         }
3481 #endif
3482         if (targ->dv_buffer != NULL)
3483                 free(targ->dv_buffer, M_DEVBUF);
3484         targ->dv_buffer = malloc(AHD_REBD_LEN, M_DEVBUF, M_WAITOK);
3485         ahd_linux_dv_fill_cmd(ahd, cmd, devinfo);
3486         cmd->sc_data_direction = SCSI_DATA_READ;
3487         cmd->cmd_len = 10;
3488         cmd->cmnd[0] = READ_BUFFER;
3489         cmd->cmnd[1] = 0x0b;
3490         scsi_ulto3b(AHD_REBD_LEN, &cmd->cmnd[6]);
3491         cmd->request_bufflen = AHD_REBD_LEN;
3492         cmd->underflow = cmd->request_bufflen;
3493         cmd->request_buffer = targ->dv_buffer;
3494 }
3495
3496 static void
3497 ahd_linux_dv_web(struct ahd_softc *ahd, struct scsi_cmnd *cmd,
3498                  struct ahd_devinfo *devinfo, struct ahd_linux_target *targ)
3499 {
3500
3501 #ifdef AHD_DEBUG
3502         if (ahd_debug & AHD_SHOW_DV) {
3503                 ahd_print_devinfo(ahd, devinfo);
3504                 printf("Sending WEB\n");
3505         }
3506 #endif
3507         ahd_linux_dv_fill_cmd(ahd, cmd, devinfo);
3508         cmd->sc_data_direction = SCSI_DATA_WRITE;
3509         cmd->cmd_len = 10;
3510         cmd->cmnd[0] = WRITE_BUFFER;
3511         cmd->cmnd[1] = 0x0a;
3512         scsi_ulto3b(targ->dv_echo_size, &cmd->cmnd[6]);
3513         cmd->request_bufflen = targ->dv_echo_size;
3514         cmd->underflow = cmd->request_bufflen;
3515         cmd->request_buffer = targ->dv_buffer;
3516 }
3517
3518 static void
3519 ahd_linux_dv_reb(struct ahd_softc *ahd, struct scsi_cmnd *cmd,
3520                  struct ahd_devinfo *devinfo, struct ahd_linux_target *targ)
3521 {
3522
3523 #ifdef AHD_DEBUG
3524         if (ahd_debug & AHD_SHOW_DV) {
3525                 ahd_print_devinfo(ahd, devinfo);
3526                 printf("Sending REB\n");
3527         }
3528 #endif
3529         ahd_linux_dv_fill_cmd(ahd, cmd, devinfo);
3530         cmd->sc_data_direction = SCSI_DATA_READ;
3531         cmd->cmd_len = 10;
3532         cmd->cmnd[0] = READ_BUFFER;
3533         cmd->cmnd[1] = 0x0a;
3534         scsi_ulto3b(targ->dv_echo_size, &cmd->cmnd[6]);
3535         cmd->request_bufflen = targ->dv_echo_size;
3536         cmd->underflow = cmd->request_bufflen;
3537         cmd->request_buffer = targ->dv_buffer1;
3538 }
3539
3540 static void
3541 ahd_linux_dv_su(struct ahd_softc *ahd, struct scsi_cmnd *cmd,
3542                 struct ahd_devinfo *devinfo,
3543                 struct ahd_linux_target *targ)
3544 {
3545         u_int le;
3546
3547         le = SID_IS_REMOVABLE(targ->inq_data) ? SSS_LOEJ : 0;
3548
3549 #ifdef AHD_DEBUG
3550         if (ahd_debug & AHD_SHOW_DV) {
3551                 ahd_print_devinfo(ahd, devinfo);
3552                 printf("Sending SU\n");
3553         }
3554 #endif
3555         ahd_linux_dv_fill_cmd(ahd, cmd, devinfo);
3556         cmd->sc_data_direction = SCSI_DATA_NONE;
3557         cmd->cmd_len = 6;
3558         cmd->cmnd[0] = START_STOP_UNIT;
3559         cmd->cmnd[4] = le | SSS_START;
3560 }
3561
3562 static int
3563 ahd_linux_fallback(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
3564 {
3565         struct  ahd_linux_target *targ;
3566         struct  ahd_initiator_tinfo *tinfo;
3567         struct  ahd_transinfo *goal;
3568         struct  ahd_tmode_tstate *tstate;
3569         u_int   width;
3570         u_int   period;
3571         u_int   offset;
3572         u_int   ppr_options;
3573         u_int   cur_speed;
3574         u_int   wide_speed;
3575         u_int   narrow_speed;
3576         u_int   fallback_speed;
3577
3578 #ifdef AHD_DEBUG
3579         if (ahd_debug & AHD_SHOW_DV) {
3580                 ahd_print_devinfo(ahd, devinfo);
3581                 printf("Trying to fallback\n");
3582         }
3583 #endif
3584         targ = ahd->platform_data->targets[devinfo->target_offset];
3585         tinfo = ahd_fetch_transinfo(ahd, devinfo->channel,
3586                                     devinfo->our_scsiid,
3587                                     devinfo->target, &tstate);
3588         goal = &tinfo->goal;
3589         width = goal->width;
3590         period = goal->period;
3591         offset = goal->offset;
3592         ppr_options = goal->ppr_options;
3593         if (offset == 0)
3594                 period = AHD_ASYNC_XFER_PERIOD;
3595         if (targ->dv_next_narrow_period == 0)
3596                 targ->dv_next_narrow_period = MAX(period, AHD_SYNCRATE_ULTRA2);
3597         if (targ->dv_next_wide_period == 0)
3598                 targ->dv_next_wide_period = period;
3599         if (targ->dv_max_width == 0)
3600                 targ->dv_max_width = width;
3601         if (targ->dv_max_ppr_options == 0)
3602                 targ->dv_max_ppr_options = ppr_options;
3603         if (targ->dv_last_ppr_options == 0)
3604                 targ->dv_last_ppr_options = ppr_options;
3605
3606         cur_speed = aic_calc_speed(width, period, offset, AHD_SYNCRATE_MIN);
3607         wide_speed = aic_calc_speed(MSG_EXT_WDTR_BUS_16_BIT,
3608                                           targ->dv_next_wide_period,
3609                                           MAX_OFFSET, AHD_SYNCRATE_MIN);
3610         narrow_speed = aic_calc_speed(MSG_EXT_WDTR_BUS_8_BIT,
3611                                             targ->dv_next_narrow_period,
3612                                             MAX_OFFSET, AHD_SYNCRATE_MIN);
3613         fallback_speed = aic_calc_speed(width, period+1, offset,
3614                                               AHD_SYNCRATE_MIN);
3615 #ifdef AHD_DEBUG
3616         if (ahd_debug & AHD_SHOW_DV) {
3617                 printf("cur_speed= %d, wide_speed= %d, narrow_speed= %d, "
3618                        "fallback_speed= %d\n", cur_speed, wide_speed,
3619                        narrow_speed, fallback_speed);
3620         }
3621 #endif
3622
3623         if (cur_speed > 160000) {
3624                 /*
3625                  * Paced/DT/IU_REQ only transfer speeds.  All we
3626                  * can do is fallback in terms of syncrate.
3627                  */
3628                 period++;
3629         } else if (cur_speed > 80000) {
3630                 if ((ppr_options & MSG_EXT_PPR_IU_REQ) != 0) {
3631                         /*
3632                          * Try without IU_REQ as it may be confusing
3633                          * an expander.
3634                          */
3635                         ppr_options &= ~MSG_EXT_PPR_IU_REQ;
3636                 } else {
3637                         /*
3638                          * Paced/DT only transfer speeds.  All we
3639                          * can do is fallback in terms of syncrate.
3640                          */
3641                         period++;
3642                         ppr_options = targ->dv_max_ppr_options;
3643                 }
3644         } else if (cur_speed > 3300) {
3645
3646                 /*
3647                  * In this range we the following
3648                  * options ordered from highest to
3649                  * lowest desireability:
3650                  *
3651                  * o Wide/DT
3652                  * o Wide/non-DT
3653                  * o Narrow at a potentally higher sync rate.
3654                  *
3655                  * All modes are tested with and without IU_REQ
3656                  * set since using IUs may confuse an expander.
3657                  */
3658                 if ((ppr_options & MSG_EXT_PPR_IU_REQ) != 0) {
3659
3660                         ppr_options &= ~MSG_EXT_PPR_IU_REQ;
3661                 } else if ((ppr_options & MSG_EXT_PPR_DT_REQ) != 0) {
3662                         /*
3663                          * Try going non-DT.
3664                          */
3665                         ppr_options = targ->dv_max_ppr_options;
3666                         ppr_options &= ~MSG_EXT_PPR_DT_REQ;
3667                 } else if (targ->dv_last_ppr_options != 0) {
3668                         /*
3669                          * Try without QAS or any other PPR options.
3670                          * We may need a non-PPR message to work with
3671                          * an expander.  We look at the "last PPR options"
3672                          * so we will perform this fallback even if the
3673                          * target responded to our PPR negotiation with
3674                          * no option bits set.
3675                          */
3676                         ppr_options = 0;
3677                 } else if (width == MSG_EXT_WDTR_BUS_16_BIT) {
3678                         /*
3679                          * If the next narrow speed is greater than
3680                          * the next wide speed, fallback to narrow.
3681                          * Otherwise fallback to the next DT/Wide setting.
3682                          * The narrow async speed will always be smaller
3683                          * than the wide async speed, so handle this case
3684                          * specifically.
3685                          */
3686                         ppr_options = targ->dv_max_ppr_options;
3687                         if (narrow_speed > fallback_speed
3688                          || period >= AHD_ASYNC_XFER_PERIOD) {
3689                                 targ->dv_next_wide_period = period+1;
3690                                 width = MSG_EXT_WDTR_BUS_8_BIT;
3691                                 period = targ->dv_next_narrow_period;
3692                         } else {
3693                                 period++;
3694                         }
3695                 } else if ((ahd->features & AHD_WIDE) != 0
3696                         && targ->dv_max_width != 0
3697                         && wide_speed >= fallback_speed
3698                         && (targ->dv_next_wide_period <= AHD_ASYNC_XFER_PERIOD
3699                          || period >= AHD_ASYNC_XFER_PERIOD)) {
3700
3701                         /*
3702                          * We are narrow.  Try falling back
3703                          * to the next wide speed with 
3704                          * all supported ppr options set.
3705                          */
3706                         targ->dv_next_narrow_period = period+1;
3707                         width = MSG_EXT_WDTR_BUS_16_BIT;
3708                         period = targ->dv_next_wide_period;
3709                         ppr_options = targ->dv_max_ppr_options;
3710                 } else {
3711                         /* Only narrow fallback is allowed. */
3712                         period++;
3713                         ppr_options = targ->dv_max_ppr_options;
3714                 }
3715         } else {
3716                 return (-1);
3717         }
3718         offset = MAX_OFFSET;
3719         ahd_find_syncrate(ahd, &period, &ppr_options, AHD_SYNCRATE_PACED);
3720         ahd_set_width(ahd, devinfo, width, AHD_TRANS_GOAL, FALSE);
3721         if (period == 0) {
3722                 period = 0;
3723                 offset = 0;
3724                 ppr_options = 0;
3725                 if (width == MSG_EXT_WDTR_BUS_8_BIT)
3726                         targ->dv_next_narrow_period = AHD_ASYNC_XFER_PERIOD;
3727                 else
3728                         targ->dv_next_wide_period = AHD_ASYNC_XFER_PERIOD;
3729         }
3730         ahd_set_syncrate(ahd, devinfo, period, offset,
3731                          ppr_options, AHD_TRANS_GOAL, FALSE);
3732         targ->dv_last_ppr_options = ppr_options;
3733         return (0);
3734 }
3735
3736 static void
3737 ahd_linux_dv_timeout(struct scsi_cmnd *cmd)
3738 {
3739         struct  ahd_softc *ahd;
3740         struct  scb *scb;
3741         u_long  flags;
3742
3743         ahd = *((struct ahd_softc **)cmd->device->host->hostdata);
3744         ahd_lock(ahd, &flags);
3745
3746 #ifdef AHD_DEBUG
3747         if (ahd_debug & AHD_SHOW_DV) {
3748                 printf("%s: Timeout while doing DV command %x.\n",
3749                        ahd_name(ahd), cmd->cmnd[0]);
3750                 ahd_dump_card_state(ahd);
3751         }
3752 #endif
3753         
3754         /*
3755          * Guard against "done race".  No action is
3756          * required if we just completed.
3757          */
3758         if ((scb = (struct scb *)cmd->host_scribble) == NULL) {
3759                 ahd_unlock(ahd, &flags);
3760                 return;
3761         }
3762
3763         /*
3764          * Command has not completed.  Mark this
3765          * SCB as having failing status prior to
3766          * resetting the bus, so we get the correct
3767          * error code.
3768          */
3769         if ((scb->flags & SCB_SENSE) != 0)
3770                 ahd_set_transaction_status(scb, CAM_AUTOSENSE_FAIL);
3771         else
3772                 ahd_set_transaction_status(scb, CAM_CMD_TIMEOUT);
3773         ahd_reset_channel(ahd, cmd->device->channel + 'A', /*initiate*/TRUE);
3774
3775         /*
3776          * Add a minimal bus settle delay for devices that are slow to
3777          * respond after bus resets.
3778          */
3779         ahd_freeze_simq(ahd);
3780         init_timer(&ahd->platform_data->reset_timer);
3781         ahd->platform_data->reset_timer.data = (u_long)ahd;
3782         ahd->platform_data->reset_timer.expires = jiffies + HZ / 2;
3783         ahd->platform_data->reset_timer.function =
3784             (ahd_linux_callback_t *)ahd_release_simq;
3785         add_timer(&ahd->platform_data->reset_timer);
3786         if (ahd_linux_next_device_to_run(ahd) != NULL)
3787                 ahd_schedule_runq(ahd);
3788         ahd_linux_run_complete_queue(ahd);
3789         ahd_unlock(ahd, &flags);
3790 }
3791
3792 static void
3793 ahd_linux_dv_complete(struct scsi_cmnd *cmd)
3794 {
3795         struct ahd_softc *ahd;
3796
3797         ahd = *((struct ahd_softc **)cmd->device->host->hostdata);
3798
3799         /* Delete the DV timer before it goes off! */
3800         scsi_delete_timer(cmd);
3801
3802 #ifdef AHD_DEBUG
3803         if (ahd_debug & AHD_SHOW_DV)
3804                 printf("%s:%c:%d: Command completed, status= 0x%x\n",
3805                        ahd_name(ahd), cmd->device->channel, cmd->device->id,
3806                        cmd->result);
3807 #endif
3808
3809         /* Wake up the state machine */
3810         up(&ahd->platform_data->dv_cmd_sem);
3811 }
3812
3813 static void
3814 ahd_linux_generate_dv_pattern(struct ahd_linux_target *targ)
3815 {
3816         uint16_t b;
3817         u_int    i;
3818         u_int    j;
3819
3820         if (targ->dv_buffer != NULL)
3821                 free(targ->dv_buffer, M_DEVBUF);
3822         targ->dv_buffer = malloc(targ->dv_echo_size, M_DEVBUF, M_WAITOK);
3823         if (targ->dv_buffer1 != NULL)
3824                 free(targ->dv_buffer1, M_DEVBUF);
3825         targ->dv_buffer1 = malloc(targ->dv_echo_size, M_DEVBUF, M_WAITOK);
3826
3827         i = 0;
3828
3829         b = 0x0001;
3830         for (j = 0 ; i < targ->dv_echo_size; j++) {
3831                 if (j < 32) {
3832                         /*
3833                          * 32bytes of sequential numbers.
3834                          */
3835                         targ->dv_buffer[i++] = j & 0xff;
3836                 } else if (j < 48) {
3837                         /*
3838                          * 32bytes of repeating 0x0000, 0xffff.
3839                          */
3840                         targ->dv_buffer[i++] = (j & 0x02) ? 0xff : 0x00;
3841                 } else if (j < 64) {
3842                         /*
3843                          * 32bytes of repeating 0x5555, 0xaaaa.
3844                          */
3845                         targ->dv_buffer[i++] = (j & 0x02) ? 0xaa : 0x55;
3846                 } else {
3847                         /*
3848                          * Remaining buffer is filled with a repeating
3849                          * patter of:
3850                          *
3851                          *       0xffff
3852                          *      ~0x0001 << shifted once in each loop.
3853                          */
3854                         if (j & 0x02) {
3855                                 if (j & 0x01) {
3856                                         targ->dv_buffer[i++] = ~(b >> 8) & 0xff;
3857                                         b <<= 1;
3858                                         if (b == 0x0000)
3859                                                 b = 0x0001;
3860                                 } else {
3861                                         targ->dv_buffer[i++] = (~b & 0xff);
3862                                 }
3863                         } else {
3864                                 targ->dv_buffer[i++] = 0xff;
3865                         }
3866                 }
3867         }
3868 }
3869
3870 static u_int
3871 ahd_linux_user_tagdepth(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
3872 {
3873         static int warned_user;
3874         u_int tags;
3875
3876         tags = 0;
3877         if ((ahd->user_discenable & devinfo->target_mask) != 0) {
3878                 if (ahd->unit >= NUM_ELEMENTS(aic79xx_tag_info)) {
3879
3880                         if (warned_user == 0) {
3881                                 printf(KERN_WARNING
3882 "aic79xx: WARNING: Insufficient tag_info instances\n"
3883 "aic79xx: for installed controllers.  Using defaults\n"
3884 "aic79xx: Please update the aic79xx_tag_info array in\n"
3885 "aic79xx: the aic79xx_osm.c source file.\n");
3886                                 warned_user++;
3887                         }
3888                         tags = AHD_MAX_QUEUE;
3889                 } else {
3890                         adapter_tag_info_t *tag_info;
3891
3892                         tag_info = &aic79xx_tag_info[ahd->unit];
3893                         tags = tag_info->tag_commands[devinfo->target_offset];
3894                         if (tags > AHD_MAX_QUEUE)
3895                                 tags = AHD_MAX_QUEUE;
3896                 }
3897         }
3898         return (tags);
3899 }
3900
3901 static u_int
3902 ahd_linux_user_dv_setting(struct ahd_softc *ahd)
3903 {
3904         static int warned_user;
3905         int dv;
3906
3907         if (ahd->unit >= NUM_ELEMENTS(aic79xx_dv_settings)) {
3908
3909                 if (warned_user == 0) {
3910                         printf(KERN_WARNING
3911 "aic79xx: WARNING: Insufficient dv settings instances\n"
3912 "aic79xx: for installed controllers. Using defaults\n"
3913 "aic79xx: Please update the aic79xx_dv_settings array in"
3914 "aic79xx: the aic79xx_osm.c source file.\n");
3915                         warned_user++;
3916                 }
3917                 dv = -1;
3918         } else {
3919
3920                 dv = aic79xx_dv_settings[ahd->unit];
3921         }
3922
3923         if (dv < 0) {
3924                 /*
3925                  * Apply the default.
3926                  */
3927                 dv = 1;
3928                 if (ahd->seep_config != 0)
3929                         dv = (ahd->seep_config->bios_control & CFENABLEDV);
3930         }
3931         return (dv);
3932 }
3933
3934 static void
3935 ahd_linux_setup_user_rd_strm_settings(struct ahd_softc *ahd)
3936 {
3937         static  int warned_user;
3938         u_int   rd_strm_mask;
3939         u_int   target_id;
3940
3941         /*
3942          * If we have specific read streaming info for this controller,
3943          * apply it.  Otherwise use the defaults.
3944          */
3945          if (ahd->unit >= NUM_ELEMENTS(aic79xx_rd_strm_info)) {
3946
3947                 if (warned_user == 0) {
3948
3949                         printf(KERN_WARNING
3950 "aic79xx: WARNING: Insufficient rd_strm instances\n"
3951 "aic79xx: for installed controllers. Using defaults\n"
3952 "aic79xx: Please update the aic79xx_rd_strm_info array\n"
3953 "aic79xx: in the aic79xx_osm.c source file.\n");
3954                         warned_user++;
3955                 }
3956                 rd_strm_mask = AIC79XX_CONFIGED_RD_STRM;
3957         } else {
3958
3959                 rd_strm_mask = aic79xx_rd_strm_info[ahd->unit];
3960         }
3961         for (target_id = 0; target_id < 16; target_id++) {
3962                 struct ahd_devinfo devinfo;
3963                 struct ahd_initiator_tinfo *tinfo;
3964                 struct ahd_tmode_tstate *tstate;
3965
3966                 tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id,
3967                                             target_id, &tstate);
3968                 ahd_compile_devinfo(&devinfo, ahd->our_id, target_id,
3969                                     CAM_LUN_WILDCARD, 'A', ROLE_INITIATOR);
3970                 tinfo->user.ppr_options &= ~MSG_EXT_PPR_RD_STRM;
3971                 if ((rd_strm_mask & devinfo.target_mask) != 0)
3972                         tinfo->user.ppr_options |= MSG_EXT_PPR_RD_STRM;
3973         }
3974 }
3975
3976 /*
3977  * Determines the queue depth for a given device.
3978  */
3979 static void
3980 ahd_linux_device_queue_depth(struct ahd_softc *ahd,
3981                              struct ahd_linux_device *dev)
3982 {
3983         struct  ahd_devinfo devinfo;
3984         u_int   tags;
3985
3986         ahd_compile_devinfo(&devinfo,
3987                             ahd->our_id,
3988                             dev->target->target, dev->lun,
3989                             dev->target->channel == 0 ? 'A' : 'B',
3990                             ROLE_INITIATOR);
3991         tags = ahd_linux_user_tagdepth(ahd, &devinfo);
3992         if (tags != 0
3993          && dev->scsi_device != NULL
3994          && dev->scsi_device->tagged_supported != 0) {
3995
3996                 ahd_set_tags(ahd, &devinfo, AHD_QUEUE_TAGGED);
3997                 ahd_print_devinfo(ahd, &devinfo);
3998                 printf("Tagged Queuing enabled.  Depth %d\n", tags);
3999         } else {
4000                 ahd_set_tags(ahd, &devinfo, AHD_QUEUE_NONE);
4001         }
4002 }
4003
4004 static void
4005 ahd_linux_run_device_queue(struct ahd_softc *ahd, struct ahd_linux_device *dev)
4006 {
4007         struct   ahd_cmd *acmd;
4008         struct   scsi_cmnd *cmd;
4009         struct   scb *scb;
4010         struct   hardware_scb *hscb;
4011         struct   ahd_initiator_tinfo *tinfo;
4012         struct   ahd_tmode_tstate *tstate;
4013         u_int    col_idx;
4014         uint16_t mask;
4015
4016         if ((dev->flags & AHD_DEV_ON_RUN_LIST) != 0)
4017                 panic("running device on run list");
4018
4019         while ((acmd = TAILQ_FIRST(&dev->busyq)) != NULL
4020             && dev->openings > 0 && dev->qfrozen == 0) {
4021
4022                 /*
4023                  * Schedule us to run later.  The only reason we are not
4024                  * running is because the whole controller Q is frozen.
4025                  */
4026                 if (ahd->platform_data->qfrozen != 0
4027                  && AHD_DV_SIMQ_FROZEN(ahd) == 0) {
4028
4029                         TAILQ_INSERT_TAIL(&ahd->platform_data->device_runq,
4030                                           dev, links);
4031                         dev->flags |= AHD_DEV_ON_RUN_LIST;
4032                         return;
4033                 }
4034
4035                 cmd = &acmd_scsi_cmd(acmd);
4036
4037                 /*
4038                  * Get an scb to use.
4039                  */
4040                 tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id,
4041                                             cmd->device->id, &tstate);
4042                 if ((dev->flags & (AHD_DEV_Q_TAGGED|AHD_DEV_Q_BASIC)) == 0
4043                  || (tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ) != 0) {
4044                         col_idx = AHD_NEVER_COL_IDX;
4045                 } else {
4046                         col_idx = AHD_BUILD_COL_IDX(cmd->device->id,
4047                                                     cmd->device->lun);
4048                 }
4049                 if ((scb = ahd_get_scb(ahd, col_idx)) == NULL) {
4050                         TAILQ_INSERT_TAIL(&ahd->platform_data->device_runq,
4051                                          dev, links);
4052                         dev->flags |= AHD_DEV_ON_RUN_LIST;
4053                         ahd->flags |= AHD_RESOURCE_SHORTAGE;
4054                         return;
4055                 }
4056                 TAILQ_REMOVE(&dev->busyq, acmd, acmd_links.tqe);
4057                 scb->io_ctx = cmd;
4058                 scb->platform_data->dev = dev;
4059                 hscb = scb->hscb;
4060                 cmd->host_scribble = (char *)scb;
4061
4062                 /*
4063                  * Fill out basics of the HSCB.
4064                  */
4065                 hscb->control = 0;
4066                 hscb->scsiid = BUILD_SCSIID(ahd, cmd);
4067                 hscb->lun = cmd->device->lun;
4068                 scb->hscb->task_management = 0;
4069                 mask = SCB_GET_TARGET_MASK(ahd, scb);
4070
4071                 if ((ahd->user_discenable & mask) != 0)
4072                         hscb->control |= DISCENB;
4073
4074                 if (AHD_DV_CMD(cmd) != 0)
4075                         scb->flags |= SCB_SILENT;
4076
4077                 if ((tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ) != 0)
4078                         scb->flags |= SCB_PACKETIZED;
4079
4080                 if ((tstate->auto_negotiate & mask) != 0) {
4081                         scb->flags |= SCB_AUTO_NEGOTIATE;
4082                         scb->hscb->control |= MK_MESSAGE;
4083                 }
4084
4085                 if ((dev->flags & (AHD_DEV_Q_TAGGED|AHD_DEV_Q_BASIC)) != 0) {
4086 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
4087                         int     msg_bytes;
4088                         uint8_t tag_msgs[2];
4089
4090                         msg_bytes = scsi_populate_tag_msg(cmd, tag_msgs);
4091                         if (msg_bytes && tag_msgs[0] != MSG_SIMPLE_TASK) {
4092                                 hscb->control |= tag_msgs[0];
4093                                 if (tag_msgs[0] == MSG_ORDERED_TASK)
4094                                         dev->commands_since_idle_or_otag = 0;
4095                         } else
4096 #endif
4097                         if (dev->commands_since_idle_or_otag == AHD_OTAG_THRESH
4098                          && (dev->flags & AHD_DEV_Q_TAGGED) != 0) {
4099                                 hscb->control |= MSG_ORDERED_TASK;
4100                                 dev->commands_since_idle_or_otag = 0;
4101                         } else {
4102                                 hscb->control |= MSG_SIMPLE_TASK;
4103                         }
4104                 }
4105
4106                 hscb->cdb_len = cmd->cmd_len;
4107                 memcpy(hscb->shared_data.idata.cdb, cmd->cmnd, hscb->cdb_len);
4108
4109                 scb->sg_count = 0;
4110                 ahd_set_residual(scb, 0);
4111                 ahd_set_sense_residual(scb, 0);
4112                 if (cmd->use_sg != 0) {
4113                         void    *sg;
4114                         struct   scatterlist *cur_seg;
4115                         u_int    nseg;
4116                         int      dir;
4117
4118                         cur_seg = (struct scatterlist *)cmd->request_buffer;
4119                         dir = scsi_to_pci_dma_dir(cmd->sc_data_direction);
4120                         nseg = pci_map_sg(ahd->dev_softc, cur_seg,
4121                                           cmd->use_sg, dir);
4122                         scb->platform_data->xfer_len = 0;
4123                         for (sg = scb->sg_list; nseg > 0; nseg--, cur_seg++) {
4124                                 bus_addr_t addr;
4125                                 bus_size_t len;
4126
4127                                 addr = sg_dma_address(cur_seg);
4128                                 len = sg_dma_len(cur_seg);
4129                                 scb->platform_data->xfer_len += len;
4130                                 sg = ahd_sg_setup(ahd, scb, sg, addr, len,
4131                                                   /*last*/nseg == 1);
4132                         }
4133                 } else if (cmd->request_bufflen != 0) {
4134                         void *sg;
4135                         bus_addr_t addr;
4136                         int dir;
4137
4138                         sg = scb->sg_list;
4139                         dir = scsi_to_pci_dma_dir(cmd->sc_data_direction);
4140                         addr = pci_map_single(ahd->dev_softc,
4141                                               cmd->request_buffer,
4142                                               cmd->request_bufflen, dir);
4143                         scb->platform_data->xfer_len = cmd->request_bufflen;
4144                         scb->platform_data->buf_busaddr = addr;
4145                         sg = ahd_sg_setup(ahd, scb, sg, addr,
4146                                           cmd->request_bufflen, /*last*/TRUE);
4147                 }
4148
4149                 LIST_INSERT_HEAD(&ahd->pending_scbs, scb, pending_links);
4150                 dev->openings--;
4151                 dev->active++;
4152                 dev->commands_issued++;
4153
4154                 /* Update the error counting bucket and dump if needed */
4155                 if (dev->target->cmds_since_error) {
4156                         dev->target->cmds_since_error++;
4157                         if (dev->target->cmds_since_error >
4158                             AHD_LINUX_ERR_THRESH)
4159                                 dev->target->cmds_since_error = 0;
4160                 }
4161
4162                 if ((dev->flags & AHD_DEV_PERIODIC_OTAG) != 0)
4163                         dev->commands_since_idle_or_otag++;
4164                 scb->flags |= SCB_ACTIVE;
4165                 ahd_queue_scb(ahd, scb);
4166         }
4167 }
4168
4169 /*
4170  * SCSI controller interrupt handler.
4171  */
4172 irqreturn_t
4173 ahd_linux_isr(int irq, void *dev_id, struct pt_regs * regs)
4174 {
4175         struct  ahd_softc *ahd;
4176         u_long  flags;
4177         int     ours;
4178
4179         ahd = (struct ahd_softc *) dev_id;
4180         ahd_lock(ahd, &flags); 
4181         ours = ahd_intr(ahd);
4182         if (ahd_linux_next_device_to_run(ahd) != NULL)
4183                 ahd_schedule_runq(ahd);
4184         ahd_linux_run_complete_queue(ahd);
4185         ahd_unlock(ahd, &flags);
4186         return IRQ_RETVAL(ours);
4187 }
4188
4189 void
4190 ahd_platform_flushwork(struct ahd_softc *ahd)
4191 {
4192
4193         while (ahd_linux_run_complete_queue(ahd) != NULL)
4194                 ;
4195 }
4196
4197 static struct ahd_linux_target*
4198 ahd_linux_alloc_target(struct ahd_softc *ahd, u_int channel, u_int target)
4199 {
4200         struct ahd_linux_target *targ;
4201
4202         targ = malloc(sizeof(*targ), M_DEVBUF, M_NOWAIT);
4203         if (targ == NULL)
4204                 return (NULL);
4205         memset(targ, 0, sizeof(*targ));
4206         targ->channel = channel;
4207         targ->target = target;
4208         targ->ahd = ahd;
4209         targ->flags = AHD_DV_REQUIRED;
4210         ahd->platform_data->targets[target] = targ;
4211         return (targ);
4212 }
4213
4214 static void
4215 ahd_linux_free_target(struct ahd_softc *ahd, struct ahd_linux_target *targ)
4216 {
4217         struct ahd_devinfo devinfo;
4218         struct ahd_initiator_tinfo *tinfo;
4219         struct ahd_tmode_tstate *tstate;
4220         u_int our_id;
4221         u_int target_offset;
4222         char channel;
4223
4224         /*
4225          * Force a negotiation to async/narrow on any
4226          * future command to this device unless a bus
4227          * reset occurs between now and that command.
4228          */
4229         channel = 'A' + targ->channel;
4230         our_id = ahd->our_id;
4231         target_offset = targ->target;
4232         tinfo = ahd_fetch_transinfo(ahd, channel, our_id,
4233                                     targ->target, &tstate);
4234         ahd_compile_devinfo(&devinfo, our_id, targ->target, CAM_LUN_WILDCARD,
4235                             channel, ROLE_INITIATOR);
4236         ahd_set_syncrate(ahd, &devinfo, 0, 0, 0,
4237                          AHD_TRANS_GOAL, /*paused*/FALSE);
4238         ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
4239                       AHD_TRANS_GOAL, /*paused*/FALSE);
4240         ahd_update_neg_request(ahd, &devinfo, tstate, tinfo, AHD_NEG_ALWAYS);
4241         ahd->platform_data->targets[target_offset] = NULL;
4242         if (targ->inq_data != NULL)
4243                 free(targ->inq_data, M_DEVBUF);
4244         if (targ->dv_buffer != NULL)
4245                 free(targ->dv_buffer, M_DEVBUF);
4246         if (targ->dv_buffer1 != NULL)
4247                 free(targ->dv_buffer1, M_DEVBUF);
4248         free(targ, M_DEVBUF);
4249 }
4250
4251 static struct ahd_linux_device*
4252 ahd_linux_alloc_device(struct ahd_softc *ahd,
4253                  struct ahd_linux_target *targ, u_int lun)
4254 {
4255         struct ahd_linux_device *dev;
4256
4257         dev = malloc(sizeof(*dev), M_DEVBUG, M_NOWAIT);
4258         if (dev == NULL)
4259                 return (NULL);
4260         memset(dev, 0, sizeof(*dev));
4261         init_timer(&dev->timer);
4262         TAILQ_INIT(&dev->busyq);
4263         dev->flags = AHD_DEV_UNCONFIGURED;
4264         dev->lun = lun;
4265         dev->target = targ;
4266
4267         /*
4268          * We start out life using untagged
4269          * transactions of which we allow one.
4270          */
4271         dev->openings = 1;
4272
4273         /*
4274          * Set maxtags to 0.  This will be changed if we
4275          * later determine that we are dealing with
4276          * a tagged queuing capable device.
4277          */
4278         dev->maxtags = 0;
4279         
4280         targ->refcount++;
4281         targ->devices[lun] = dev;
4282         return (dev);
4283 }
4284
4285 static void
4286 ahd_linux_free_device(struct ahd_softc *ahd, struct ahd_linux_device *dev)
4287 {
4288         struct ahd_linux_target *targ;
4289
4290         del_timer(&dev->timer);
4291         targ = dev->target;
4292         targ->devices[dev->lun] = NULL;
4293         free(dev, M_DEVBUF);
4294         targ->refcount--;
4295         if (targ->refcount == 0
4296          && (targ->flags & AHD_DV_REQUIRED) == 0)
4297                 ahd_linux_free_target(ahd, targ);
4298 }
4299
4300 void
4301 ahd_send_async(struct ahd_softc *ahd, char channel,
4302                u_int target, u_int lun, ac_code code, void *arg)
4303 {
4304         switch (code) {
4305         case AC_TRANSFER_NEG:
4306         {
4307                 char    buf[80];
4308                 struct  ahd_linux_target *targ;
4309                 struct  info_str info;
4310                 struct  ahd_initiator_tinfo *tinfo;
4311                 struct  ahd_tmode_tstate *tstate;
4312
4313                 info.buffer = buf;
4314                 info.length = sizeof(buf);
4315                 info.offset = 0;
4316                 info.pos = 0;
4317                 tinfo = ahd_fetch_transinfo(ahd, channel, ahd->our_id,
4318                                             target, &tstate);
4319
4320                 /*
4321                  * Don't bother reporting results while
4322                  * negotiations are still pending.
4323                  */
4324                 if (tinfo->curr.period != tinfo->goal.period
4325                  || tinfo->curr.width != tinfo->goal.width
4326                  || tinfo->curr.offset != tinfo->goal.offset
4327                  || tinfo->curr.ppr_options != tinfo->goal.ppr_options)
4328                         if (bootverbose == 0)
4329                                 break;
4330
4331                 /*
4332                  * Don't bother reporting results that
4333                  * are identical to those last reported.
4334                  */
4335                 targ = ahd->platform_data->targets[target];
4336                 if (targ == NULL)
4337                         break;
4338                 if (tinfo->curr.period == targ->last_tinfo.period
4339                  && tinfo->curr.width == targ->last_tinfo.width
4340                  && tinfo->curr.offset == targ->last_tinfo.offset
4341                  && tinfo->curr.ppr_options == targ->last_tinfo.ppr_options)
4342                         if (bootverbose == 0)
4343                                 break;
4344
4345                 targ->last_tinfo.period = tinfo->curr.period;
4346                 targ->last_tinfo.width = tinfo->curr.width;
4347                 targ->last_tinfo.offset = tinfo->curr.offset;
4348                 targ->last_tinfo.ppr_options = tinfo->curr.ppr_options;
4349
4350                 printf("(%s:%c:", ahd_name(ahd), channel);
4351                 if (target == CAM_TARGET_WILDCARD)
4352                         printf("*): ");
4353                 else
4354                         printf("%d): ", target);
4355                 ahd_format_transinfo(&info, &tinfo->curr);
4356                 if (info.pos < info.length)
4357                         *info.buffer = '\0';
4358                 else
4359                         buf[info.length - 1] = '\0';
4360                 printf("%s", buf);
4361                 break;
4362         }
4363         case AC_SENT_BDR:
4364         {
4365 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
4366                 WARN_ON(lun != CAM_LUN_WILDCARD);
4367                 scsi_report_device_reset(ahd->platform_data->host,
4368                                          channel - 'A', target);
4369 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,0)
4370                 Scsi_Device *scsi_dev;
4371
4372                 /*
4373                  * Find the SCSI device associated with this
4374                  * request and indicate that a UA is expected.
4375                  */
4376                 for (scsi_dev = ahd->platform_data->host->host_queue;
4377                      scsi_dev != NULL; scsi_dev = scsi_dev->next) {
4378                         if (channel - 'A' == scsi_dev->channel
4379                          && target == scsi_dev->id
4380                          && (lun == CAM_LUN_WILDCARD
4381                           || lun == scsi_dev->lun)) {
4382                                 scsi_dev->was_reset = 1;
4383                                 scsi_dev->expecting_cc_ua = 1;
4384                         }
4385                 }
4386 #endif
4387                 break;
4388         }
4389         case AC_BUS_RESET:
4390 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,0)
4391                 if (ahd->platform_data->host != NULL) {
4392                         scsi_report_bus_reset(ahd->platform_data->host,
4393                                               channel - 'A');
4394                 }
4395 #endif
4396                 break;
4397         default:
4398                 panic("ahd_send_async: Unexpected async event");
4399         }
4400 }
4401
4402 /*
4403  * Calls the higher level scsi done function and frees the scb.
4404  */
4405 void
4406 ahd_done(struct ahd_softc *ahd, struct scb *scb)
4407 {
4408         Scsi_Cmnd *cmd;
4409         struct    ahd_linux_device *dev;
4410
4411         if ((scb->flags & SCB_ACTIVE) == 0) {
4412                 printf("SCB %d done'd twice\n", SCB_GET_TAG(scb));
4413                 ahd_dump_card_state(ahd);
4414                 panic("Stopping for safety");
4415         }
4416         LIST_REMOVE(scb, pending_links);
4417         cmd = scb->io_ctx;
4418         dev = scb->platform_data->dev;
4419         dev->active--;
4420         dev->openings++;
4421         if ((cmd->result & (CAM_DEV_QFRZN << 16)) != 0) {
4422                 cmd->result &= ~(CAM_DEV_QFRZN << 16);
4423                 dev->qfrozen--;
4424         }
4425         ahd_linux_unmap_scb(ahd, scb);
4426
4427         /*
4428          * Guard against stale sense data.
4429          * The Linux mid-layer assumes that sense
4430          * was retrieved anytime the first byte of
4431          * the sense buffer looks "sane".
4432          */
4433         cmd->sense_buffer[0] = 0;
4434         if (ahd_get_transaction_status(scb) == CAM_REQ_INPROG) {
4435                 uint32_t amount_xferred;
4436
4437                 amount_xferred =
4438                     ahd_get_transfer_length(scb) - ahd_get_residual(scb);
4439                 if ((scb->flags & SCB_TRANSMISSION_ERROR) != 0) {
4440 #ifdef AHD_DEBUG
4441                         if ((ahd_debug & AHD_SHOW_MISC) != 0) {
4442                                 ahd_print_path(ahd, scb);
4443                                 printf("Set CAM_UNCOR_PARITY\n");
4444                         }
4445 #endif
4446                         ahd_set_transaction_status(scb, CAM_UNCOR_PARITY);
4447 #ifdef AHD_REPORT_UNDERFLOWS
4448                 /*
4449                  * This code is disabled by default as some
4450                  * clients of the SCSI system do not properly
4451                  * initialize the underflow parameter.  This
4452                  * results in spurious termination of commands
4453                  * that complete as expected (e.g. underflow is
4454                  * allowed as command can return variable amounts
4455                  * of data.
4456                  */
4457                 } else if (amount_xferred < scb->io_ctx->underflow) {
4458                         u_int i;
4459
4460                         ahd_print_path(ahd, scb);
4461                         printf("CDB:");
4462                         for (i = 0; i < scb->io_ctx->cmd_len; i++)
4463                                 printf(" 0x%x", scb->io_ctx->cmnd[i]);
4464                         printf("\n");
4465                         ahd_print_path(ahd, scb);
4466                         printf("Saw underflow (%ld of %ld bytes). "
4467                                "Treated as error\n",
4468                                 ahd_get_residual(scb),
4469                                 ahd_get_transfer_length(scb));
4470                         ahd_set_transaction_status(scb, CAM_DATA_RUN_ERR);
4471 #endif
4472                 } else {
4473                         ahd_set_transaction_status(scb, CAM_REQ_CMP);
4474                 }
4475         } else if (ahd_get_transaction_status(scb) == CAM_SCSI_STATUS_ERROR) {
4476                 ahd_linux_handle_scsi_status(ahd, dev, scb);
4477         } else if (ahd_get_transaction_status(scb) == CAM_SEL_TIMEOUT) {
4478                 dev->flags |= AHD_DEV_UNCONFIGURED;
4479                 if (AHD_DV_CMD(cmd) == FALSE)
4480                         dev->target->flags &= ~AHD_DV_REQUIRED;
4481         }
4482         /*
4483          * Start DV for devices that require it assuming the first command
4484          * sent does not result in a selection timeout.
4485          */
4486         if (ahd_get_transaction_status(scb) != CAM_SEL_TIMEOUT
4487          && (dev->target->flags & AHD_DV_REQUIRED) != 0)
4488                 ahd_linux_start_dv(ahd);
4489
4490         if (dev->openings == 1
4491          && ahd_get_transaction_status(scb) == CAM_REQ_CMP
4492          && ahd_get_scsi_status(scb) != SCSI_STATUS_QUEUE_FULL)
4493                 dev->tag_success_count++;
4494         /*
4495          * Some devices deal with temporary internal resource
4496          * shortages by returning queue full.  When the queue
4497          * full occurrs, we throttle back.  Slowly try to get
4498          * back to our previous queue depth.
4499          */
4500         if ((dev->openings + dev->active) < dev->maxtags
4501          && dev->tag_success_count > AHD_TAG_SUCCESS_INTERVAL) {
4502                 dev->tag_success_count = 0;
4503                 dev->openings++;
4504         }
4505
4506         if (dev->active == 0)
4507                 dev->commands_since_idle_or_otag = 0;
4508
4509         if (TAILQ_EMPTY(&dev->busyq)) {
4510                 if ((dev->flags & AHD_DEV_UNCONFIGURED) != 0
4511                  && dev->active == 0
4512                  && (dev->flags & AHD_DEV_TIMER_ACTIVE) == 0)
4513                         ahd_linux_free_device(ahd, dev);
4514         } else if ((dev->flags & AHD_DEV_ON_RUN_LIST) == 0) {
4515                 TAILQ_INSERT_TAIL(&ahd->platform_data->device_runq, dev, links);
4516                 dev->flags |= AHD_DEV_ON_RUN_LIST;
4517         }
4518
4519         if ((scb->flags & SCB_RECOVERY_SCB) != 0) {
4520                 printf("Recovery SCB completes\n");
4521                 if (ahd_get_transaction_status(scb) == CAM_BDR_SENT
4522                  || ahd_get_transaction_status(scb) == CAM_REQ_ABORTED)
4523                         ahd_set_transaction_status(scb, CAM_CMD_TIMEOUT);
4524                 if ((scb->platform_data->flags & AHD_SCB_UP_EH_SEM) != 0) {
4525                         scb->platform_data->flags &= ~AHD_SCB_UP_EH_SEM;
4526                         up(&ahd->platform_data->eh_sem);
4527                 }
4528         }
4529
4530         ahd_free_scb(ahd, scb);
4531         ahd_linux_queue_cmd_complete(ahd, cmd);
4532
4533         if ((ahd->platform_data->flags & AHD_DV_WAIT_SIMQ_EMPTY) != 0
4534          && LIST_FIRST(&ahd->pending_scbs) == NULL) {
4535                 ahd->platform_data->flags &= ~AHD_DV_WAIT_SIMQ_EMPTY;
4536                 up(&ahd->platform_data->dv_sem);
4537         }
4538 }
4539
4540 static void
4541 ahd_linux_handle_scsi_status(struct ahd_softc *ahd,
4542                              struct ahd_linux_device *dev, struct scb *scb)
4543 {
4544         struct  ahd_devinfo devinfo;
4545
4546         ahd_compile_devinfo(&devinfo,
4547                             ahd->our_id,
4548                             dev->target->target, dev->lun,
4549                             dev->target->channel == 0 ? 'A' : 'B',
4550                             ROLE_INITIATOR);
4551         
4552         /*
4553          * We don't currently trust the mid-layer to
4554          * properly deal with queue full or busy.  So,
4555          * when one occurs, we tell the mid-layer to
4556          * unconditionally requeue the command to us
4557          * so that we can retry it ourselves.  We also
4558          * implement our own throttling mechanism so
4559          * we don't clobber the device with too many
4560          * commands.
4561          */
4562         switch (ahd_get_scsi_status(scb)) {
4563         default:
4564                 break;
4565         case SCSI_STATUS_CHECK_COND:
4566         case SCSI_STATUS_CMD_TERMINATED:
4567         {
4568                 Scsi_Cmnd *cmd;
4569
4570                 /*
4571                  * Copy sense information to the OS's cmd
4572                  * structure if it is available.
4573                  */
4574                 cmd = scb->io_ctx;
4575                 if ((scb->flags & (SCB_SENSE|SCB_PKT_SENSE)) != 0) {
4576                         struct scsi_status_iu_header *siu;
4577                         u_int sense_size;
4578                         u_int sense_offset;
4579
4580                         if (scb->flags & SCB_SENSE) {
4581                                 sense_size = MIN(sizeof(struct scsi_sense_data)
4582                                                - ahd_get_sense_residual(scb),
4583                                                  sizeof(cmd->sense_buffer));
4584                                 sense_offset = 0;
4585                         } else {
4586                                 /*
4587                                  * Copy only the sense data into the provided
4588                                  * buffer.
4589                                  */
4590                                 siu = (struct scsi_status_iu_header *)
4591                                     scb->sense_data;
4592                                 sense_size = MIN(scsi_4btoul(siu->sense_length),
4593                                                 sizeof(cmd->sense_buffer));
4594                                 sense_offset = SIU_SENSE_OFFSET(siu);
4595                         }
4596
4597                         memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
4598                         memcpy(cmd->sense_buffer,
4599                                ahd_get_sense_buf(ahd, scb)
4600                                + sense_offset, sense_size);
4601                         cmd->result |= (DRIVER_SENSE << 24);
4602
4603 #ifdef AHD_DEBUG
4604                         if (ahd_debug & AHD_SHOW_SENSE) {
4605                                 int i;
4606
4607                                 printf("Copied %d bytes of sense data at %d:",
4608                                        sense_size, sense_offset);
4609                                 for (i = 0; i < sense_size; i++) {
4610                                         if ((i & 0xF) == 0)
4611                                                 printf("\n");
4612                                         printf("0x%x ", cmd->sense_buffer[i]);
4613                                 }
4614                                 printf("\n");
4615                         }
4616 #endif
4617                 }
4618                 break;
4619         }
4620         case SCSI_STATUS_QUEUE_FULL:
4621         {
4622                 /*
4623                  * By the time the core driver has returned this
4624                  * command, all other commands that were queued
4625                  * to us but not the device have been returned.
4626                  * This ensures that dev->active is equal to
4627                  * the number of commands actually queued to
4628                  * the device.
4629                  */
4630                 dev->tag_success_count = 0;
4631                 if (dev->active != 0) {
4632                         /*
4633                          * Drop our opening count to the number
4634                          * of commands currently outstanding.
4635                          */
4636                         dev->openings = 0;
4637 #ifdef AHD_DEBUG
4638                         if ((ahd_debug & AHD_SHOW_QFULL) != 0) {
4639                                 ahd_print_path(ahd, scb);
4640                                 printf("Dropping tag count to %d\n",
4641                                        dev->active);
4642                         }
4643 #endif
4644                         if (dev->active == dev->tags_on_last_queuefull) {
4645
4646                                 dev->last_queuefull_same_count++;
4647                                 /*
4648                                  * If we repeatedly see a queue full
4649                                  * at the same queue depth, this
4650                                  * device has a fixed number of tag
4651                                  * slots.  Lock in this tag depth
4652                                  * so we stop seeing queue fulls from
4653                                  * this device.
4654                                  */
4655                                 if (dev->last_queuefull_same_count
4656                                  == AHD_LOCK_TAGS_COUNT) {
4657                                         dev->maxtags = dev->active;
4658                                         ahd_print_path(ahd, scb);
4659                                         printf("Locking max tag count at %d\n",
4660                                                dev->active);
4661                                 }
4662                         } else {
4663                                 dev->tags_on_last_queuefull = dev->active;
4664                                 dev->last_queuefull_same_count = 0;
4665                         }
4666                         ahd_set_transaction_status(scb, CAM_REQUEUE_REQ);
4667                         ahd_set_scsi_status(scb, SCSI_STATUS_OK);
4668                         ahd_platform_set_tags(ahd, &devinfo,
4669                                      (dev->flags & AHD_DEV_Q_BASIC)
4670                                    ? AHD_QUEUE_BASIC : AHD_QUEUE_TAGGED);
4671                         break;
4672                 }
4673                 /*
4674                  * Drop down to a single opening, and treat this
4675                  * as if the target returned BUSY SCSI status.
4676                  */
4677                 dev->openings = 1;
4678                 ahd_platform_set_tags(ahd, &devinfo,
4679                              (dev->flags & AHD_DEV_Q_BASIC)
4680                            ? AHD_QUEUE_BASIC : AHD_QUEUE_TAGGED);
4681                 ahd_set_scsi_status(scb, SCSI_STATUS_BUSY);
4682                 /* FALLTHROUGH */
4683         }
4684         case SCSI_STATUS_BUSY:
4685                 /*
4686                  * Set a short timer to defer sending commands for
4687                  * a bit since Linux will not delay in this case.
4688                  */
4689                 if ((dev->flags & AHD_DEV_TIMER_ACTIVE) != 0) {
4690                         printf("%s:%c:%d: Device Timer still active during "
4691                                "busy processing\n", ahd_name(ahd),
4692                                 dev->target->channel, dev->target->target);
4693                         break;
4694                 }
4695                 dev->flags |= AHD_DEV_TIMER_ACTIVE;
4696                 dev->qfrozen++;
4697                 init_timer(&dev->timer);
4698                 dev->timer.data = (u_long)dev;
4699                 dev->timer.expires = jiffies + (HZ/2);
4700                 dev->timer.function = ahd_linux_dev_timed_unfreeze;
4701                 add_timer(&dev->timer);
4702                 break;
4703         }
4704 }
4705
4706 static void
4707 ahd_linux_queue_cmd_complete(struct ahd_softc *ahd, Scsi_Cmnd *cmd)
4708 {
4709         /*
4710          * Typically, the complete queue has very few entries
4711          * queued to it before the queue is emptied by
4712          * ahd_linux_run_complete_queue, so sorting the entries
4713          * by generation number should be inexpensive.
4714          * We perform the sort so that commands that complete
4715          * with an error are retuned in the order origionally
4716          * queued to the controller so that any subsequent retries
4717          * are performed in order.  The underlying ahd routines do
4718          * not guarantee the order that aborted commands will be
4719          * returned to us.
4720          */
4721         struct ahd_completeq *completeq;
4722         struct ahd_cmd *list_cmd;
4723         struct ahd_cmd *acmd;
4724
4725         /*
4726          * Map CAM error codes into Linux Error codes.  We
4727          * avoid the conversion so that the DV code has the
4728          * full error information available when making
4729          * state change decisions.
4730          */
4731         if (AHD_DV_CMD(cmd) == FALSE) {
4732                 uint32_t status;
4733                 u_int new_status;
4734
4735                 status = ahd_cmd_get_transaction_status(cmd);
4736                 if (status != CAM_REQ_CMP) {
4737                         struct ahd_linux_device *dev;
4738                         struct ahd_devinfo devinfo;
4739                         cam_status cam_status;
4740                         uint32_t action;
4741                         u_int scsi_status;
4742
4743                         dev = ahd_linux_get_device(ahd, cmd->device->channel,
4744                                                    cmd->device->id,
4745                                                    cmd->device->lun,
4746                                                    /*alloc*/FALSE);
4747
4748                         if (dev == NULL)
4749                                 goto no_fallback;
4750
4751                         ahd_compile_devinfo(&devinfo,
4752                                             ahd->our_id,
4753                                             dev->target->target, dev->lun,
4754                                             dev->target->channel == 0 ? 'A':'B',
4755                                             ROLE_INITIATOR);
4756
4757                         scsi_status = ahd_cmd_get_scsi_status(cmd);
4758                         cam_status = ahd_cmd_get_transaction_status(cmd);
4759                         action = aic_error_action(cmd, dev->target->inq_data,
4760                                                   cam_status, scsi_status);
4761                         if ((action & SSQ_FALLBACK) != 0) {
4762
4763                                 /* Update stats */
4764                                 dev->target->errors_detected++;
4765                                 if (dev->target->cmds_since_error == 0)
4766                                         dev->target->cmds_since_error++;
4767                                 else {
4768                                         dev->target->cmds_since_error = 0;
4769                                         ahd_linux_fallback(ahd, &devinfo);
4770                                 }
4771                         }
4772                 }
4773 no_fallback:
4774                 switch (status) {
4775                 case CAM_REQ_INPROG:
4776                 case CAM_REQ_CMP:
4777                 case CAM_SCSI_STATUS_ERROR:
4778                         new_status = DID_OK;
4779                         break;
4780                 case CAM_REQ_ABORTED:
4781                         new_status = DID_ABORT;
4782                         break;
4783                 case CAM_BUSY:
4784                         new_status = DID_BUS_BUSY;
4785                         break;
4786                 case CAM_REQ_INVALID:
4787                 case CAM_PATH_INVALID:
4788                         new_status = DID_BAD_TARGET;
4789                         break;
4790                 case CAM_SEL_TIMEOUT:
4791                         new_status = DID_NO_CONNECT;
4792                         break;
4793                 case CAM_SCSI_BUS_RESET:
4794                 case CAM_BDR_SENT:
4795                         new_status = DID_RESET;
4796                         break;
4797                 case CAM_UNCOR_PARITY:
4798                         new_status = DID_PARITY;
4799                         break;
4800                 case CAM_CMD_TIMEOUT:
4801                         new_status = DID_TIME_OUT;
4802                         break;
4803                 case CAM_UA_ABORT:
4804                 case CAM_REQ_CMP_ERR:
4805                 case CAM_AUTOSENSE_FAIL:
4806                 case CAM_NO_HBA:
4807                 case CAM_DATA_RUN_ERR:
4808                 case CAM_UNEXP_BUSFREE:
4809                 case CAM_SEQUENCE_FAIL:
4810                 case CAM_CCB_LEN_ERR:
4811                 case CAM_PROVIDE_FAIL:
4812                 case CAM_REQ_TERMIO:
4813                 case CAM_UNREC_HBA_ERROR:
4814                 case CAM_REQ_TOO_BIG:
4815                         new_status = DID_ERROR;
4816                         break;
4817                 case CAM_REQUEUE_REQ:
4818                         /*
4819                          * If we want the request requeued, make sure there
4820                          * are sufficent retries.  In the old scsi error code,
4821                          * we used to be able to specify a result code that
4822                          * bypassed the retry count.  Now we must use this
4823                          * hack.  We also "fake" a check condition with
4824                          * a sense code of ABORTED COMMAND.  This seems to
4825                          * evoke a retry even if this command is being sent
4826                          * via the eh thread.  Ick!  Ick!  Ick!
4827                          */
4828                         if (cmd->retries > 0)
4829                                 cmd->retries--;
4830                         new_status = DID_OK;
4831                         ahd_cmd_set_scsi_status(cmd, SCSI_STATUS_CHECK_COND);
4832                         cmd->result |= (DRIVER_SENSE << 24);
4833                         memset(cmd->sense_buffer, 0,
4834                                sizeof(cmd->sense_buffer));
4835                         cmd->sense_buffer[0] = SSD_ERRCODE_VALID
4836                                              | SSD_CURRENT_ERROR;
4837                         cmd->sense_buffer[2] = SSD_KEY_ABORTED_COMMAND;
4838                         break;
4839                 default:
4840                         /* We should never get here */
4841                         new_status = DID_ERROR;
4842                         break;
4843                 }
4844
4845                 ahd_cmd_set_transaction_status(cmd, new_status);
4846         }
4847
4848         completeq = &ahd->platform_data->completeq;
4849         list_cmd = TAILQ_FIRST(completeq);
4850         acmd = (struct ahd_cmd *)cmd;
4851         while (list_cmd != NULL
4852             && acmd_scsi_cmd(list_cmd).serial_number
4853              < acmd_scsi_cmd(acmd).serial_number)
4854                 list_cmd = TAILQ_NEXT(list_cmd, acmd_links.tqe);
4855         if (list_cmd != NULL)
4856                 TAILQ_INSERT_BEFORE(list_cmd, acmd, acmd_links.tqe);
4857         else
4858                 TAILQ_INSERT_TAIL(completeq, acmd, acmd_links.tqe);
4859 }
4860
4861 static void
4862 ahd_linux_filter_inquiry(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
4863 {
4864         struct  scsi_inquiry_data *sid;
4865         struct  ahd_initiator_tinfo *tinfo;
4866         struct  ahd_transinfo *user;
4867         struct  ahd_transinfo *goal;
4868         struct  ahd_transinfo *curr;
4869         struct  ahd_tmode_tstate *tstate;
4870         struct  ahd_linux_device *dev;
4871         u_int   width;
4872         u_int   period;
4873         u_int   offset;
4874         u_int   ppr_options;
4875         u_int   trans_version;
4876         u_int   prot_version;
4877
4878         /*
4879          * Determine if this lun actually exists.  If so,
4880          * hold on to its corresponding device structure.
4881          * If not, make sure we release the device and
4882          * don't bother processing the rest of this inquiry
4883          * command.
4884          */
4885         dev = ahd_linux_get_device(ahd, devinfo->channel - 'A',
4886                                    devinfo->target, devinfo->lun,
4887                                    /*alloc*/TRUE);
4888
4889         sid = (struct scsi_inquiry_data *)dev->target->inq_data;
4890         if (SID_QUAL(sid) == SID_QUAL_LU_CONNECTED) {
4891
4892                 dev->flags &= ~AHD_DEV_UNCONFIGURED;
4893         } else {
4894                 dev->flags |= AHD_DEV_UNCONFIGURED;
4895                 return;
4896         }
4897
4898         /*
4899          * Update our notion of this device's transfer
4900          * negotiation capabilities.
4901          */
4902         tinfo = ahd_fetch_transinfo(ahd, devinfo->channel,
4903                                     devinfo->our_scsiid,
4904                                     devinfo->target, &tstate);
4905         user = &tinfo->user;
4906         goal = &tinfo->goal;
4907         curr = &tinfo->curr;
4908         width = user->width;
4909         period = user->period;
4910         offset = user->offset;
4911         ppr_options = user->ppr_options;
4912         trans_version = user->transport_version;
4913         prot_version = MIN(user->protocol_version, SID_ANSI_REV(sid));
4914
4915         /*
4916          * Only attempt SPI3/4 once we've verified that
4917          * the device claims to support SPI3/4 features.
4918          */
4919         if (prot_version < SCSI_REV_2)
4920                 trans_version = SID_ANSI_REV(sid);
4921         else
4922                 trans_version = SCSI_REV_2;
4923
4924         if ((sid->flags & SID_WBus16) == 0)
4925                 width = MSG_EXT_WDTR_BUS_8_BIT;
4926         if ((sid->flags & SID_Sync) == 0) {
4927                 period = 0;
4928                 offset = 0;
4929                 ppr_options = 0;
4930         }
4931         if ((sid->spi3data & SID_SPI_QAS) == 0)
4932                 ppr_options &= ~MSG_EXT_PPR_QAS_REQ;
4933         if ((sid->spi3data & SID_SPI_CLOCK_DT) == 0)
4934                 ppr_options &= MSG_EXT_PPR_QAS_REQ;
4935         if ((sid->spi3data & SID_SPI_IUS) == 0)
4936                 ppr_options &= (MSG_EXT_PPR_DT_REQ
4937                               | MSG_EXT_PPR_QAS_REQ);
4938
4939         if (prot_version > SCSI_REV_2
4940          && ppr_options != 0)
4941                 trans_version = user->transport_version;
4942
4943         ahd_validate_width(ahd, /*tinfo limit*/NULL, &width, ROLE_UNKNOWN);
4944         ahd_find_syncrate(ahd, &period, &ppr_options, AHD_SYNCRATE_MAX);
4945         ahd_validate_offset(ahd, /*tinfo limit*/NULL, period,
4946                             &offset, width, ROLE_UNKNOWN);
4947         if (offset == 0 || period == 0) {
4948                 period = 0;
4949                 offset = 0;
4950                 ppr_options = 0;
4951         }
4952         /* Apply our filtered user settings. */
4953         curr->transport_version = trans_version;
4954         curr->protocol_version = prot_version;
4955         ahd_set_width(ahd, devinfo, width, AHD_TRANS_GOAL, /*paused*/FALSE);
4956         ahd_set_syncrate(ahd, devinfo, period, offset, ppr_options,
4957                          AHD_TRANS_GOAL, /*paused*/FALSE);
4958 }
4959
4960 void
4961 ahd_freeze_simq(struct ahd_softc *ahd)
4962 {
4963         ahd->platform_data->qfrozen++;
4964         if (ahd->platform_data->qfrozen == 1) {
4965                 scsi_block_requests(ahd->platform_data->host);
4966                 ahd_platform_abort_scbs(ahd, CAM_TARGET_WILDCARD, ALL_CHANNELS,
4967                                         CAM_LUN_WILDCARD, SCB_LIST_NULL,
4968                                         ROLE_INITIATOR, CAM_REQUEUE_REQ);
4969         }
4970 }
4971
4972 void
4973 ahd_release_simq(struct ahd_softc *ahd)
4974 {
4975         u_long s;
4976         int    unblock_reqs;
4977
4978         unblock_reqs = 0;
4979         ahd_lock(ahd, &s);
4980         if (ahd->platform_data->qfrozen > 0)
4981                 ahd->platform_data->qfrozen--;
4982         if (ahd->platform_data->qfrozen == 0) {
4983                 unblock_reqs = 1;
4984         }
4985         if (AHD_DV_SIMQ_FROZEN(ahd)
4986          && ((ahd->platform_data->flags & AHD_DV_WAIT_SIMQ_RELEASE) != 0)) {
4987                 ahd->platform_data->flags &= ~AHD_DV_WAIT_SIMQ_RELEASE;
4988                 up(&ahd->platform_data->dv_sem);
4989         }
4990         ahd_schedule_runq(ahd);
4991         ahd_unlock(ahd, &s);
4992         /*
4993          * There is still a race here.  The mid-layer
4994          * should keep its own freeze count and use
4995          * a bottom half handler to run the queues
4996          * so we can unblock with our own lock held.
4997          */
4998         if (unblock_reqs)
4999                 scsi_unblock_requests(ahd->platform_data->host);
5000 }
5001
5002 static void
5003 ahd_linux_sem_timeout(u_long arg)
5004 {
5005         struct  scb *scb;
5006         struct  ahd_softc *ahd;
5007         u_long  s;
5008
5009         scb = (struct scb *)arg;
5010         ahd = scb->ahd_softc;
5011         ahd_lock(ahd, &s);
5012         if ((scb->platform_data->flags & AHD_SCB_UP_EH_SEM) != 0) {
5013                 scb->platform_data->flags &= ~AHD_SCB_UP_EH_SEM;
5014                 up(&ahd->platform_data->eh_sem);
5015         }
5016         ahd_unlock(ahd, &s);
5017 }
5018
5019 static void
5020 ahd_linux_dev_timed_unfreeze(u_long arg)
5021 {
5022         struct ahd_linux_device *dev;
5023         struct ahd_softc *ahd;
5024         u_long s;
5025
5026         dev = (struct ahd_linux_device *)arg;
5027         ahd = dev->target->ahd;
5028         ahd_lock(ahd, &s);
5029         dev->flags &= ~AHD_DEV_TIMER_ACTIVE;
5030         if (dev->qfrozen > 0)
5031                 dev->qfrozen--;
5032         if (dev->qfrozen == 0
5033          && (dev->flags & AHD_DEV_ON_RUN_LIST) == 0)
5034                 ahd_linux_run_device_queue(ahd, dev);
5035         if ((dev->flags & AHD_DEV_UNCONFIGURED) != 0
5036          && dev->active == 0)
5037                 ahd_linux_free_device(ahd, dev);
5038         ahd_unlock(ahd, &s);
5039 }
5040
5041 void
5042 ahd_platform_dump_card_state(struct ahd_softc *ahd)
5043 {
5044         struct ahd_linux_device *dev;
5045         int target;
5046         int maxtarget;
5047         int lun;
5048         int i;
5049
5050         maxtarget = (ahd->features & AHD_WIDE) ? 15 : 7;
5051         for (target = 0; target <=maxtarget; target++) {
5052
5053                 for (lun = 0; lun < AHD_NUM_LUNS; lun++) {
5054                         struct ahd_cmd *acmd;
5055
5056                         dev = ahd_linux_get_device(ahd, 0, target,
5057                                                    lun, /*alloc*/FALSE);
5058                         if (dev == NULL)
5059                                 continue;
5060
5061                         printf("DevQ(%d:%d:%d): ", 0, target, lun);
5062                         i = 0;
5063                         TAILQ_FOREACH(acmd, &dev->busyq, acmd_links.tqe) {
5064                                 if (i++ > AHD_SCB_MAX)
5065                                         break;
5066                         }
5067                         printf("%d waiting\n", i);
5068                 }
5069         }
5070 }
5071
5072 static int __init
5073 ahd_linux_init(void)
5074 {
5075 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
5076        return (ahd_linux_detect(&aic79xx_driver_template) ? 0 : -ENODEV);
5077 #else
5078         scsi_register_module(MODULE_SCSI_HA, &aic79xx_driver_template);
5079         if (aic79xx_driver_template.present == 0) {
5080                 scsi_unregister_module(MODULE_SCSI_HA,
5081                                        &aic79xx_driver_template);
5082                 return (-ENODEV);
5083         }
5084
5085         return (0);
5086 #endif
5087 }
5088
5089 static void __exit
5090 ahd_linux_exit(void)
5091 {
5092         struct ahd_softc *ahd;
5093         u_long l;
5094
5095         /*
5096          * Shutdown DV threads before going into the SCSI mid-layer.
5097          * This avoids situations where the mid-layer locks the entire
5098          * kernel so that waiting for our DV threads to exit leads
5099          * to deadlock.
5100          */
5101         ahd_list_lock(&l);
5102         TAILQ_FOREACH(ahd, &ahd_tailq, links) {
5103
5104                 ahd_linux_kill_dv_thread(ahd);
5105         }
5106         ahd_list_unlock(&l);
5107 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
5108         /*
5109          * In 2.4 we have to unregister from the PCI core _after_
5110          * unregistering from the scsi midlayer to avoid dangling
5111          * references.
5112          */
5113         scsi_unregister_module(MODULE_SCSI_HA, &aic79xx_driver_template);
5114 #endif
5115         ahd_linux_pci_exit();
5116 }
5117
5118 module_init(ahd_linux_init);
5119 module_exit(ahd_linux_exit);