Merge to Fedora kernel-2.6.7-1.441
[linux-2.6.git] / drivers / scsi / scsi_lib.c
1 /*
2  *  scsi_lib.c Copyright (C) 1999 Eric Youngdale
3  *
4  *  SCSI queueing library.
5  *      Initial versions: Eric Youngdale (eric@andante.org).
6  *                        Based upon conversations with large numbers
7  *                        of people at Linux Expo.
8  */
9
10 #include <linux/bio.h>
11 #include <linux/blkdev.h>
12 #include <linux/completion.h>
13 #include <linux/kernel.h>
14 #include <linux/mempool.h>
15 #include <linux/slab.h>
16 #include <linux/init.h>
17 #include <linux/pci.h>
18
19 #include <scsi/scsi_driver.h>
20 #include <scsi/scsi_host.h>
21 #include "scsi.h"
22
23 #include "scsi_priv.h"
24 #include "scsi_logging.h"
25
26
27 #define SG_MEMPOOL_NR           (sizeof(scsi_sg_pools)/sizeof(struct scsi_host_sg_pool))
28 #define SG_MEMPOOL_SIZE         32
29
30 struct scsi_host_sg_pool {
31         size_t          size;
32         char            *name; 
33         kmem_cache_t    *slab;
34         mempool_t       *pool;
35 };
36
37 #if (SCSI_MAX_PHYS_SEGMENTS < 32)
38 #error SCSI_MAX_PHYS_SEGMENTS is too small
39 #endif
40
41 #define SP(x) { x, "sgpool-" #x } 
42 struct scsi_host_sg_pool scsi_sg_pools[] = { 
43         SP(8),
44         SP(16),
45         SP(32),
46 #if (SCSI_MAX_PHYS_SEGMENTS > 32)
47         SP(64),
48 #if (SCSI_MAX_PHYS_SEGMENTS > 64)
49         SP(128),
50 #if (SCSI_MAX_PHYS_SEGMENTS > 128)
51         SP(256),
52 #if (SCSI_MAX_PHYS_SEGMENTS > 256)
53 #error SCSI_MAX_PHYS_SEGMENTS is too large
54 #endif
55 #endif
56 #endif
57 #endif
58 };      
59 #undef SP
60
61
62 /*
63  * Function:    scsi_insert_special_req()
64  *
65  * Purpose:     Insert pre-formed request into request queue.
66  *
67  * Arguments:   sreq    - request that is ready to be queued.
68  *              at_head - boolean.  True if we should insert at head
69  *                        of queue, false if we should insert at tail.
70  *
71  * Lock status: Assumed that lock is not held upon entry.
72  *
73  * Returns:     Nothing
74  *
75  * Notes:       This function is called from character device and from
76  *              ioctl types of functions where the caller knows exactly
77  *              what SCSI command needs to be issued.   The idea is that
78  *              we merely inject the command into the queue (at the head
79  *              for now), and then call the queue request function to actually
80  *              process it.
81  */
82 int scsi_insert_special_req(struct scsi_request *sreq, int at_head)
83 {
84         /*
85          * Because users of this function are apt to reuse requests with no
86          * modification, we have to sanitise the request flags here
87          */
88         sreq->sr_request->flags &= ~REQ_DONTPREP;
89         blk_insert_request(sreq->sr_device->request_queue, sreq->sr_request,
90                            at_head, sreq, 0);
91         return 0;
92 }
93
94 /*
95  * Function:    scsi_queue_insert()
96  *
97  * Purpose:     Insert a command in the midlevel queue.
98  *
99  * Arguments:   cmd    - command that we are adding to queue.
100  *              reason - why we are inserting command to queue.
101  *
102  * Lock status: Assumed that lock is not held upon entry.
103  *
104  * Returns:     Nothing.
105  *
106  * Notes:       We do this for one of two cases.  Either the host is busy
107  *              and it cannot accept any more commands for the time being,
108  *              or the device returned QUEUE_FULL and can accept no more
109  *              commands.
110  * Notes:       This could be called either from an interrupt context or a
111  *              normal process context.
112  */
113 int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
114 {
115         struct Scsi_Host *host = cmd->device->host;
116         struct scsi_device *device = cmd->device;
117
118         SCSI_LOG_MLQUEUE(1,
119                  printk("Inserting command %p into mlqueue\n", cmd));
120
121         /*
122          * We are inserting the command into the ml queue.  First, we
123          * cancel the timer, so it doesn't time out.
124          */
125         scsi_delete_timer(cmd);
126
127         /*
128          * Next, set the appropriate busy bit for the device/host.
129          *
130          * If the host/device isn't busy, assume that something actually
131          * completed, and that we should be able to queue a command now.
132          *
133          * Note that the prior mid-layer assumption that any host could
134          * always queue at least one command is now broken.  The mid-layer
135          * will implement a user specifiable stall (see
136          * scsi_host.max_host_blocked and scsi_device.max_device_blocked)
137          * if a command is requeued with no other commands outstanding
138          * either for the device or for the host.
139          */
140         if (reason == SCSI_MLQUEUE_HOST_BUSY)
141                 host->host_blocked = host->max_host_blocked;
142         else if (reason == SCSI_MLQUEUE_DEVICE_BUSY)
143                 device->device_blocked = device->max_device_blocked;
144
145         /*
146          * Register the fact that we own the thing for now.
147          */
148         cmd->state = SCSI_STATE_MLQUEUE;
149         cmd->owner = SCSI_OWNER_MIDLEVEL;
150
151         /*
152          * Decrement the counters, since these commands are no longer
153          * active on the host/device.
154          */
155         scsi_device_unbusy(device);
156
157         /*
158          * Insert this command at the head of the queue for it's device.
159          * It will go before all other commands that are already in the queue.
160          *
161          * NOTE: there is magic here about the way the queue is plugged if
162          * we have no outstanding commands.
163          * 
164          * Although this *doesn't* plug the queue, it does call the request
165          * function.  The SCSI request function detects the blocked condition
166          * and plugs the queue appropriately.
167          */
168         blk_insert_request(device->request_queue, cmd->request, 1, cmd, 1);
169         return 0;
170 }
171
172 /*
173  * Function:    scsi_do_req
174  *
175  * Purpose:     Queue a SCSI request
176  *
177  * Arguments:   sreq      - command descriptor.
178  *              cmnd      - actual SCSI command to be performed.
179  *              buffer    - data buffer.
180  *              bufflen   - size of data buffer.
181  *              done      - completion function to be run.
182  *              timeout   - how long to let it run before timeout.
183  *              retries   - number of retries we allow.
184  *
185  * Lock status: No locks held upon entry.
186  *
187  * Returns:     Nothing.
188  *
189  * Notes:       This function is only used for queueing requests for things
190  *              like ioctls and character device requests - this is because
191  *              we essentially just inject a request into the queue for the
192  *              device.
193  *
194  *              In order to support the scsi_device_quiesce function, we
195  *              now inject requests on the *head* of the device queue
196  *              rather than the tail.
197  */
198 void scsi_do_req(struct scsi_request *sreq, const void *cmnd,
199                  void *buffer, unsigned bufflen,
200                  void (*done)(struct scsi_cmnd *),
201                  int timeout, int retries)
202 {
203         /*
204          * If the upper level driver is reusing these things, then
205          * we should release the low-level block now.  Another one will
206          * be allocated later when this request is getting queued.
207          */
208         __scsi_release_request(sreq);
209
210         /*
211          * Our own function scsi_done (which marks the host as not busy,
212          * disables the timeout counter, etc) will be called by us or by the
213          * scsi_hosts[host].queuecommand() function needs to also call
214          * the completion function for the high level driver.
215          */
216         memcpy(sreq->sr_cmnd, cmnd, sizeof(sreq->sr_cmnd));
217         sreq->sr_bufflen = bufflen;
218         sreq->sr_buffer = buffer;
219         sreq->sr_allowed = retries;
220         sreq->sr_done = done;
221         sreq->sr_timeout_per_command = timeout;
222
223         if (sreq->sr_cmd_len == 0)
224                 sreq->sr_cmd_len = COMMAND_SIZE(sreq->sr_cmnd[0]);
225
226         /*
227          * head injection *required* here otherwise quiesce won't work
228          */
229         scsi_insert_special_req(sreq, 1);
230 }
231  
232 static void scsi_wait_done(struct scsi_cmnd *cmd)
233 {
234         struct request *req = cmd->request;
235         struct request_queue *q = cmd->device->request_queue;
236         unsigned long flags;
237
238         req->rq_status = RQ_SCSI_DONE;  /* Busy, but indicate request done */
239
240         spin_lock_irqsave(q->queue_lock, flags);
241         if (blk_rq_tagged(req))
242                 blk_queue_end_tag(q, req);
243         spin_unlock_irqrestore(q->queue_lock, flags);
244
245         if (req->waiting)
246                 complete(req->waiting);
247 }
248
249 void scsi_wait_req(struct scsi_request *sreq, const void *cmnd, void *buffer,
250                    unsigned bufflen, int timeout, int retries)
251 {
252         DECLARE_COMPLETION(wait);
253         
254         sreq->sr_request->waiting = &wait;
255         sreq->sr_request->rq_status = RQ_SCSI_BUSY;
256         scsi_do_req(sreq, cmnd, buffer, bufflen, scsi_wait_done,
257                         timeout, retries);
258         wait_for_completion(&wait);
259         sreq->sr_request->waiting = NULL;
260         if (sreq->sr_request->rq_status != RQ_SCSI_DONE)
261                 sreq->sr_result |= (DRIVER_ERROR << 24);
262
263         __scsi_release_request(sreq);
264 }
265
266 /*
267  * Function:    scsi_init_cmd_errh()
268  *
269  * Purpose:     Initialize cmd fields related to error handling.
270  *
271  * Arguments:   cmd     - command that is ready to be queued.
272  *
273  * Returns:     Nothing
274  *
275  * Notes:       This function has the job of initializing a number of
276  *              fields related to error handling.   Typically this will
277  *              be called once for each command, as required.
278  */
279 static int scsi_init_cmd_errh(struct scsi_cmnd *cmd)
280 {
281         cmd->owner = SCSI_OWNER_MIDLEVEL;
282         cmd->serial_number = 0;
283         cmd->serial_number_at_timeout = 0;
284         cmd->abort_reason = 0;
285
286         memset(cmd->sense_buffer, 0, sizeof cmd->sense_buffer);
287
288         if (cmd->cmd_len == 0)
289                 cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]);
290
291         /*
292          * We need saved copies of a number of fields - this is because
293          * error handling may need to overwrite these with different values
294          * to run different commands, and once error handling is complete,
295          * we will need to restore these values prior to running the actual
296          * command.
297          */
298         cmd->old_use_sg = cmd->use_sg;
299         cmd->old_cmd_len = cmd->cmd_len;
300         cmd->sc_old_data_direction = cmd->sc_data_direction;
301         cmd->old_underflow = cmd->underflow;
302         memcpy(cmd->data_cmnd, cmd->cmnd, sizeof(cmd->cmnd));
303         cmd->buffer = cmd->request_buffer;
304         cmd->bufflen = cmd->request_bufflen;
305         cmd->internal_timeout = NORMAL_TIMEOUT;
306         cmd->abort_reason = 0;
307
308         return 1;
309 }
310
311 /*
312  * Function:   scsi_setup_cmd_retry()
313  *
314  * Purpose:    Restore the command state for a retry
315  *
316  * Arguments:  cmd      - command to be restored
317  *
318  * Returns:    Nothing
319  *
320  * Notes:      Immediately prior to retrying a command, we need
321  *             to restore certain fields that we saved above.
322  */
323 void scsi_setup_cmd_retry(struct scsi_cmnd *cmd)
324 {
325         memcpy(cmd->cmnd, cmd->data_cmnd, sizeof(cmd->data_cmnd));
326         cmd->request_buffer = cmd->buffer;
327         cmd->request_bufflen = cmd->bufflen;
328         cmd->use_sg = cmd->old_use_sg;
329         cmd->cmd_len = cmd->old_cmd_len;
330         cmd->sc_data_direction = cmd->sc_old_data_direction;
331         cmd->underflow = cmd->old_underflow;
332 }
333
334 void scsi_device_unbusy(struct scsi_device *sdev)
335 {
336         struct Scsi_Host *shost = sdev->host;
337         unsigned long flags;
338
339         spin_lock_irqsave(shost->host_lock, flags);
340         shost->host_busy--;
341         if (unlikely(test_bit(SHOST_RECOVERY, &shost->shost_state) &&
342                      shost->host_failed))
343                 scsi_eh_wakeup(shost);
344         spin_unlock(shost->host_lock);
345         spin_lock(&sdev->sdev_lock);
346         sdev->device_busy--;
347         spin_unlock_irqrestore(&sdev->sdev_lock, flags);
348 }
349
350 /*
351  * Called for single_lun devices on IO completion. Clear starget_sdev_user,
352  * and call blk_run_queue for all the scsi_devices on the target -
353  * including current_sdev first.
354  *
355  * Called with *no* scsi locks held.
356  */
357 static void scsi_single_lun_run(struct scsi_device *current_sdev)
358 {
359         struct Scsi_Host *shost = current_sdev->host;
360         struct scsi_device *sdev, *tmp;
361         unsigned long flags;
362
363         spin_lock_irqsave(shost->host_lock, flags);
364         current_sdev->sdev_target->starget_sdev_user = NULL;
365         spin_unlock_irqrestore(shost->host_lock, flags);
366
367         /*
368          * Call blk_run_queue for all LUNs on the target, starting with
369          * current_sdev. We race with others (to set starget_sdev_user),
370          * but in most cases, we will be first. Ideally, each LU on the
371          * target would get some limited time or requests on the target.
372          */
373         blk_run_queue(current_sdev->request_queue);
374
375         spin_lock_irqsave(shost->host_lock, flags);
376         if (current_sdev->sdev_target->starget_sdev_user)
377                 goto out;
378         list_for_each_entry_safe(sdev, tmp, &current_sdev->same_target_siblings,
379                         same_target_siblings) {
380                 if (scsi_device_get(sdev))
381                         continue;
382
383                 spin_unlock_irqrestore(shost->host_lock, flags);
384                 blk_run_queue(sdev->request_queue);
385                 spin_lock_irqsave(shost->host_lock, flags);
386         
387                 scsi_device_put(sdev);
388         }
389  out:
390         spin_unlock_irqrestore(shost->host_lock, flags);
391 }
392
393 /*
394  * Function:    scsi_run_queue()
395  *
396  * Purpose:     Select a proper request queue to serve next
397  *
398  * Arguments:   q       - last request's queue
399  *
400  * Returns:     Nothing
401  *
402  * Notes:       The previous command was completely finished, start
403  *              a new one if possible.
404  */
405 static void scsi_run_queue(struct request_queue *q)
406 {
407         struct scsi_device *sdev = q->queuedata;
408         struct Scsi_Host *shost = sdev->host;
409         unsigned long flags;
410
411         if (sdev->single_lun)
412                 scsi_single_lun_run(sdev);
413
414         spin_lock_irqsave(shost->host_lock, flags);
415         while (!list_empty(&shost->starved_list) &&
416                !shost->host_blocked && !shost->host_self_blocked &&
417                 !((shost->can_queue > 0) &&
418                   (shost->host_busy >= shost->can_queue))) {
419                 /*
420                  * As long as shost is accepting commands and we have
421                  * starved queues, call blk_run_queue. scsi_request_fn
422                  * drops the queue_lock and can add us back to the
423                  * starved_list.
424                  *
425                  * host_lock protects the starved_list and starved_entry.
426                  * scsi_request_fn must get the host_lock before checking
427                  * or modifying starved_list or starved_entry.
428                  */
429                 sdev = list_entry(shost->starved_list.next,
430                                           struct scsi_device, starved_entry);
431                 list_del_init(&sdev->starved_entry);
432                 spin_unlock_irqrestore(shost->host_lock, flags);
433
434                 blk_run_queue(sdev->request_queue);
435
436                 spin_lock_irqsave(shost->host_lock, flags);
437                 if (unlikely(!list_empty(&sdev->starved_entry)))
438                         /*
439                          * sdev lost a race, and was put back on the
440                          * starved list. This is unlikely but without this
441                          * in theory we could loop forever.
442                          */
443                         break;
444         }
445         spin_unlock_irqrestore(shost->host_lock, flags);
446
447         blk_run_queue(q);
448 }
449
450 /*
451  * Function:    scsi_requeue_command()
452  *
453  * Purpose:     Handle post-processing of completed commands.
454  *
455  * Arguments:   q       - queue to operate on
456  *              cmd     - command that may need to be requeued.
457  *
458  * Returns:     Nothing
459  *
460  * Notes:       After command completion, there may be blocks left
461  *              over which weren't finished by the previous command
462  *              this can be for a number of reasons - the main one is
463  *              I/O errors in the middle of the request, in which case
464  *              we need to request the blocks that come after the bad
465  *              sector.
466  */
467 static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
468 {
469         cmd->request->flags &= ~REQ_DONTPREP;
470         blk_insert_request(q, cmd->request, 1, cmd, 1);
471
472         scsi_run_queue(q);
473 }
474
475 void scsi_next_command(struct scsi_cmnd *cmd)
476 {
477         struct request_queue *q = cmd->device->request_queue;
478
479         scsi_put_command(cmd);
480         scsi_run_queue(q);
481 }
482
483 void scsi_run_host_queues(struct Scsi_Host *shost)
484 {
485         struct scsi_device *sdev;
486
487         shost_for_each_device(sdev, shost)
488                 scsi_run_queue(sdev->request_queue);
489 }
490
491 /*
492  * Function:    scsi_end_request()
493  *
494  * Purpose:     Post-processing of completed commands called from interrupt
495  *              handler or a bottom-half handler.
496  *
497  * Arguments:   cmd      - command that is complete.
498  *              uptodate - 1 if I/O indicates success, 0 for I/O error.
499  *              sectors  - number of sectors we want to mark.
500  *              requeue  - indicates whether we should requeue leftovers.
501  *              frequeue - indicates that if we release the command block
502  *                         that the queue request function should be called.
503  *
504  * Lock status: Assumed that lock is not held upon entry.
505  *
506  * Returns:     Nothing
507  *
508  * Notes:       This is called for block device requests in order to
509  *              mark some number of sectors as complete.
510  * 
511  *              We are guaranteeing that the request queue will be goosed
512  *              at some point during this call.
513  */
514 static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate,
515                                           int bytes, int requeue)
516 {
517         request_queue_t *q = cmd->device->request_queue;
518         struct request *req = cmd->request;
519         unsigned long flags;
520
521         /*
522          * If there are blocks left over at the end, set up the command
523          * to queue the remainder of them.
524          */
525         if (end_that_request_chunk(req, uptodate, bytes)) {
526                 int leftover = (req->hard_nr_sectors << 9);
527
528                 if (blk_pc_request(req))
529                         leftover = req->data_len;
530
531                 /* kill remainder if no retrys */
532                 if (!uptodate && blk_noretry_request(req))
533                         end_that_request_chunk(req, 0, leftover);
534                 else {
535                         if (requeue)
536                                 /*
537                                  * Bleah.  Leftovers again.  Stick the
538                                  * leftovers in the front of the
539                                  * queue, and goose the queue again.
540                                  */
541                                 scsi_requeue_command(q, cmd);
542
543                         return cmd;
544                 }
545         }
546
547         add_disk_randomness(req->rq_disk);
548
549         spin_lock_irqsave(q->queue_lock, flags);
550         if (blk_rq_tagged(req))
551                 blk_queue_end_tag(q, req);
552         end_that_request_last(req);
553         spin_unlock_irqrestore(q->queue_lock, flags);
554
555         /*
556          * This will goose the queue request function at the end, so we don't
557          * need to worry about launching another command.
558          */
559         scsi_next_command(cmd);
560         return NULL;
561 }
562
563 static struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, int gfp_mask)
564 {
565         struct scsi_host_sg_pool *sgp;
566         struct scatterlist *sgl;
567
568         BUG_ON(!cmd->use_sg);
569
570         switch (cmd->use_sg) {
571         case 1 ... 8:
572                 cmd->sglist_len = 0;
573                 break;
574         case 9 ... 16:
575                 cmd->sglist_len = 1;
576                 break;
577         case 17 ... 32:
578                 cmd->sglist_len = 2;
579                 break;
580 #if (SCSI_MAX_PHYS_SEGMENTS > 32)
581         case 33 ... 64:
582                 cmd->sglist_len = 3;
583                 break;
584 #if (SCSI_MAX_PHYS_SEGMENTS > 64)
585         case 65 ... 128:
586                 cmd->sglist_len = 4;
587                 break;
588 #if (SCSI_MAX_PHYS_SEGMENTS  > 128)
589         case 129 ... 256:
590                 cmd->sglist_len = 5;
591                 break;
592 #endif
593 #endif
594 #endif
595         default:
596                 return NULL;
597         }
598
599         sgp = scsi_sg_pools + cmd->sglist_len;
600         sgl = mempool_alloc(sgp->pool, gfp_mask);
601         if (sgl)
602                 memset(sgl, 0, sgp->size);
603         return sgl;
604 }
605
606 static void scsi_free_sgtable(struct scatterlist *sgl, int index)
607 {
608         struct scsi_host_sg_pool *sgp;
609
610         BUG_ON(index > SG_MEMPOOL_NR);
611
612         sgp = scsi_sg_pools + index;
613         mempool_free(sgl, sgp->pool);
614 }
615
616 /*
617  * Function:    scsi_release_buffers()
618  *
619  * Purpose:     Completion processing for block device I/O requests.
620  *
621  * Arguments:   cmd     - command that we are bailing.
622  *
623  * Lock status: Assumed that no lock is held upon entry.
624  *
625  * Returns:     Nothing
626  *
627  * Notes:       In the event that an upper level driver rejects a
628  *              command, we must release resources allocated during
629  *              the __init_io() function.  Primarily this would involve
630  *              the scatter-gather table, and potentially any bounce
631  *              buffers.
632  */
633 static void scsi_release_buffers(struct scsi_cmnd *cmd)
634 {
635         struct request *req = cmd->request;
636
637         /*
638          * Free up any indirection buffers we allocated for DMA purposes. 
639          */
640         if (cmd->use_sg)
641                 scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len);
642         else if (cmd->request_buffer != req->buffer)
643                 kfree(cmd->request_buffer);
644
645         /*
646          * Zero these out.  They now point to freed memory, and it is
647          * dangerous to hang onto the pointers.
648          */
649         cmd->buffer  = NULL;
650         cmd->bufflen = 0;
651         cmd->request_buffer = NULL;
652         cmd->request_bufflen = 0;
653 }
654
655 /*
656  * Function:    scsi_io_completion()
657  *
658  * Purpose:     Completion processing for block device I/O requests.
659  *
660  * Arguments:   cmd   - command that is finished.
661  *
662  * Lock status: Assumed that no lock is held upon entry.
663  *
664  * Returns:     Nothing
665  *
666  * Notes:       This function is matched in terms of capabilities to
667  *              the function that created the scatter-gather list.
668  *              In other words, if there are no bounce buffers
669  *              (the normal case for most drivers), we don't need
670  *              the logic to deal with cleaning up afterwards.
671  *
672  *              We must do one of several things here:
673  *
674  *              a) Call scsi_end_request.  This will finish off the
675  *                 specified number of sectors.  If we are done, the
676  *                 command block will be released, and the queue
677  *                 function will be goosed.  If we are not done, then
678  *                 scsi_end_request will directly goose the queue.
679  *
680  *              b) We can just use scsi_requeue_command() here.  This would
681  *                 be used if we just wanted to retry, for example.
682  */
683 void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes,
684                         unsigned int block_bytes)
685 {
686         int result = cmd->result;
687         int this_count = cmd->bufflen;
688         request_queue_t *q = cmd->device->request_queue;
689         struct request *req = cmd->request;
690         int clear_errors = 1;
691
692         /*
693          * Free up any indirection buffers we allocated for DMA purposes. 
694          * For the case of a READ, we need to copy the data out of the
695          * bounce buffer and into the real buffer.
696          */
697         if (cmd->use_sg)
698                 scsi_free_sgtable(cmd->buffer, cmd->sglist_len);
699         else if (cmd->buffer != req->buffer) {
700                 if (rq_data_dir(req) == READ) {
701                         unsigned long flags;
702                         char *to = bio_kmap_irq(req->bio, &flags);
703                         memcpy(to, cmd->buffer, cmd->bufflen);
704                         bio_kunmap_irq(to, &flags);
705                 }
706                 kfree(cmd->buffer);
707         }
708
709         if (blk_pc_request(req)) { /* SG_IO ioctl from block level */
710                 req->errors = (driver_byte(result) & DRIVER_SENSE) ?
711                               (CHECK_CONDITION << 1) : (result & 0xff);
712                 if (result) {
713                         clear_errors = 0;
714                         if (cmd->sense_buffer[0] & 0x70) {
715                                 int len = 8 + cmd->sense_buffer[7];
716
717                                 if (len > SCSI_SENSE_BUFFERSIZE)
718                                         len = SCSI_SENSE_BUFFERSIZE;
719                                 memcpy(req->sense, cmd->sense_buffer,  len);
720                                 req->sense_len = len;
721                         }
722                 } else
723                         req->data_len -= cmd->bufflen;
724         }
725
726         /*
727          * Zero these out.  They now point to freed memory, and it is
728          * dangerous to hang onto the pointers.
729          */
730         cmd->buffer  = NULL;
731         cmd->bufflen = 0;
732         cmd->request_buffer = NULL;
733         cmd->request_bufflen = 0;
734
735         /*
736          * Next deal with any sectors which we were able to correctly
737          * handle.
738          */
739         if (good_bytes >= 0) {
740                 SCSI_LOG_HLCOMPLETE(1, printk("%ld sectors total, %d bytes done.\n",
741                                               req->nr_sectors, good_bytes));
742                 SCSI_LOG_HLCOMPLETE(1, printk("use_sg is %d\n", cmd->use_sg));
743
744                 if (clear_errors)
745                         req->errors = 0;
746                 /*
747                  * If multiple sectors are requested in one buffer, then
748                  * they will have been finished off by the first command.
749                  * If not, then we have a multi-buffer command.
750                  *
751                  * If block_bytes != 0, it means we had a medium error
752                  * of some sort, and that we want to mark some number of
753                  * sectors as not uptodate.  Thus we want to inhibit
754                  * requeueing right here - we will requeue down below
755                  * when we handle the bad sectors.
756                  */
757                 cmd = scsi_end_request(cmd, 1, good_bytes, result == 0);
758
759                 /*
760                  * If the command completed without error, then either finish off the
761                  * rest of the command, or start a new one.
762                  */
763                 if (result == 0 || cmd == NULL ) {
764                         return;
765                 }
766         }
767         /*
768          * Now, if we were good little boys and girls, Santa left us a request
769          * sense buffer.  We can extract information from this, so we
770          * can choose a block to remap, etc.
771          */
772         if (driver_byte(result) != 0) {
773                 if ((cmd->sense_buffer[0] & 0x7f) == 0x70) {
774                         /*
775                          * If the device is in the process of becoming ready,
776                          * retry.
777                          */
778                         if (cmd->sense_buffer[12] == 0x04 &&
779                             cmd->sense_buffer[13] == 0x01) {
780                                 scsi_requeue_command(q, cmd);
781                                 return;
782                         }
783                         if ((cmd->sense_buffer[2] & 0xf) == UNIT_ATTENTION) {
784                                 if (cmd->device->removable) {
785                                         /* detected disc change.  set a bit 
786                                          * and quietly refuse further access.
787                                          */
788                                         cmd->device->changed = 1;
789                                         cmd = scsi_end_request(cmd, 0,
790                                                         this_count, 1);
791                                         return;
792                                 } else {
793                                         /*
794                                         * Must have been a power glitch, or a
795                                         * bus reset.  Could not have been a
796                                         * media change, so we just retry the
797                                         * request and see what happens.  
798                                         */
799                                         scsi_requeue_command(q, cmd);
800                                         return;
801                                 }
802                         }
803                 }
804                 /*
805                  * If we had an ILLEGAL REQUEST returned, then we may have
806                  * performed an unsupported command.  The only thing this
807                  * should be would be a ten byte read where only a six byte
808                  * read was supported.  Also, on a system where READ CAPACITY
809                  * failed, we may have read past the end of the disk.
810                  */
811
812                 switch (cmd->sense_buffer[2]) {
813                 case ILLEGAL_REQUEST:
814                         if (cmd->device->use_10_for_rw &&
815                             (cmd->cmnd[0] == READ_10 ||
816                              cmd->cmnd[0] == WRITE_10)) {
817                                 cmd->device->use_10_for_rw = 0;
818                                 /*
819                                  * This will cause a retry with a 6-byte
820                                  * command.
821                                  */
822                                 scsi_requeue_command(q, cmd);
823                                 result = 0;
824                         } else {
825                                 cmd = scsi_end_request(cmd, 0, this_count, 1);
826                                 return;
827                         }
828                         break;
829                 case NOT_READY:
830                         printk(KERN_INFO "Device %s not ready.\n",
831                                req->rq_disk ? req->rq_disk->disk_name : "");
832                         cmd = scsi_end_request(cmd, 0, this_count, 1);
833                         return;
834                         break;
835                 case MEDIUM_ERROR:
836                 case VOLUME_OVERFLOW:
837                         printk("scsi%d: ERROR on channel %d, id %d, lun %d, CDB: ",
838                                cmd->device->host->host_no, (int) cmd->device->channel,
839                                (int) cmd->device->id, (int) cmd->device->lun);
840                         print_command(cmd->data_cmnd);
841                         print_sense("", cmd);
842                         cmd = scsi_end_request(cmd, 0, block_bytes, 1);
843                         return;
844                 default:
845                         break;
846                 }
847         }                       /* driver byte != 0 */
848         if (host_byte(result) == DID_RESET) {
849                 /*
850                  * Third party bus reset or reset for error
851                  * recovery reasons.  Just retry the request
852                  * and see what happens.  
853                  */
854                 scsi_requeue_command(q, cmd);
855                 return;
856         }
857         if (result) {
858                 printk("SCSI error : <%d %d %d %d> return code = 0x%x\n",
859                        cmd->device->host->host_no,
860                        cmd->device->channel,
861                        cmd->device->id,
862                        cmd->device->lun, result);
863
864                 if (driver_byte(result) & DRIVER_SENSE)
865                         print_sense("", cmd);
866                 /*
867                  * Mark a single buffer as not uptodate.  Queue the remainder.
868                  * We sometimes get this cruft in the event that a medium error
869                  * isn't properly reported.
870                  */
871                 block_bytes = req->hard_cur_sectors << 9;
872                 if (!block_bytes)
873                         block_bytes = req->data_len;
874                 cmd = scsi_end_request(cmd, 0, block_bytes, 1);
875         }
876 }
877
878 /*
879  * Function:    scsi_init_io()
880  *
881  * Purpose:     SCSI I/O initialize function.
882  *
883  * Arguments:   cmd   - Command descriptor we wish to initialize
884  *
885  * Returns:     0 on success
886  *              BLKPREP_DEFER if the failure is retryable
887  *              BLKPREP_KILL if the failure is fatal
888  */
889 static int scsi_init_io(struct scsi_cmnd *cmd)
890 {
891         struct request     *req = cmd->request;
892         struct scatterlist *sgpnt;
893         int                count;
894
895         /*
896          * if this is a rq->data based REQ_BLOCK_PC, setup for a non-sg xfer
897          */
898         if ((req->flags & REQ_BLOCK_PC) && !req->bio) {
899                 cmd->request_bufflen = req->data_len;
900                 cmd->request_buffer = req->data;
901                 req->buffer = req->data;
902                 cmd->use_sg = 0;
903                 return 0;
904         }
905
906         /*
907          * we used to not use scatter-gather for single segment request,
908          * but now we do (it makes highmem I/O easier to support without
909          * kmapping pages)
910          */
911         cmd->use_sg = req->nr_phys_segments;
912
913         /*
914          * if sg table allocation fails, requeue request later.
915          */
916         sgpnt = scsi_alloc_sgtable(cmd, GFP_ATOMIC);
917         if (unlikely(!sgpnt)) {
918                 req->flags |= REQ_SPECIAL;
919                 return BLKPREP_DEFER;
920         }
921
922         cmd->request_buffer = (char *) sgpnt;
923         cmd->request_bufflen = req->nr_sectors << 9;
924         if (blk_pc_request(req))
925                 cmd->request_bufflen = req->data_len;
926         req->buffer = NULL;
927
928         /* 
929          * Next, walk the list, and fill in the addresses and sizes of
930          * each segment.
931          */
932         count = blk_rq_map_sg(req->q, req, cmd->request_buffer);
933
934         /*
935          * mapped well, send it off
936          */
937         if (likely(count <= cmd->use_sg)) {
938                 cmd->use_sg = count;
939                 return 0;
940         }
941
942         printk(KERN_ERR "Incorrect number of segments after building list\n");
943         printk(KERN_ERR "counted %d, received %d\n", count, cmd->use_sg);
944         printk(KERN_ERR "req nr_sec %lu, cur_nr_sec %u\n", req->nr_sectors,
945                         req->current_nr_sectors);
946
947         /* release the command and kill it */
948         scsi_release_buffers(cmd);
949         scsi_put_command(cmd);
950         return BLKPREP_KILL;
951 }
952
953 static int scsi_prep_fn(struct request_queue *q, struct request *req)
954 {
955         struct scsi_device *sdev = q->queuedata;
956         struct scsi_cmnd *cmd;
957         int specials_only = 0;
958
959         /*
960          * Just check to see if the device is online.  If it isn't, we
961          * refuse to process any commands.  The device must be brought
962          * online before trying any recovery commands
963          */
964         if (unlikely(!scsi_device_online(sdev))) {
965                 printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to offline device\n",
966                        sdev->host->host_no, sdev->id, sdev->lun);
967                 return BLKPREP_KILL;
968         }
969         if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
970                 /* OK, we're not in a running state don't prep
971                  * user commands */
972                 if (sdev->sdev_state == SDEV_DEL) {
973                         /* Device is fully deleted, no commands
974                          * at all allowed down */
975                         printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to dead device\n",
976                                sdev->host->host_no, sdev->id, sdev->lun);
977                         return BLKPREP_KILL;
978                 }
979                 /* OK, we only allow special commands (i.e. not
980                  * user initiated ones */
981                 specials_only = sdev->sdev_state;
982         }
983
984         /*
985          * Find the actual device driver associated with this command.
986          * The SPECIAL requests are things like character device or
987          * ioctls, which did not originate from ll_rw_blk.  Note that
988          * the special field is also used to indicate the cmd for
989          * the remainder of a partially fulfilled request that can 
990          * come up when there is a medium error.  We have to treat
991          * these two cases differently.  We differentiate by looking
992          * at request->cmd, as this tells us the real story.
993          */
994         if (req->flags & REQ_SPECIAL) {
995                 struct scsi_request *sreq = req->special;
996
997                 if (sreq->sr_magic == SCSI_REQ_MAGIC) {
998                         cmd = scsi_get_command(sreq->sr_device, GFP_ATOMIC);
999                         if (unlikely(!cmd))
1000                                 goto defer;
1001                         scsi_init_cmd_from_req(cmd, sreq);
1002                 } else
1003                         cmd = req->special;
1004         } else if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) {
1005
1006                 if(unlikely(specials_only)) {
1007                         if(specials_only == SDEV_QUIESCE)
1008                                 return BLKPREP_DEFER;
1009                         
1010                         printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to device being removed\n",
1011                                sdev->host->host_no, sdev->id, sdev->lun);
1012                         return BLKPREP_KILL;
1013                 }
1014                         
1015                         
1016                 /*
1017                  * Now try and find a command block that we can use.
1018                  */
1019                 if (!req->special) {
1020                         cmd = scsi_get_command(sdev, GFP_ATOMIC);
1021                         if (unlikely(!cmd))
1022                                 goto defer;
1023                 } else
1024                         cmd = req->special;
1025                 
1026                 /* pull a tag out of the request if we have one */
1027                 cmd->tag = req->tag;
1028         } else {
1029                 blk_dump_rq_flags(req, "SCSI bad req");
1030                 return BLKPREP_KILL;
1031         }
1032         
1033         /* note the overloading of req->special.  When the tag
1034          * is active it always means cmd.  If the tag goes
1035          * back for re-queueing, it may be reset */
1036         req->special = cmd;
1037         cmd->request = req;
1038         
1039         /*
1040          * FIXME: drop the lock here because the functions below
1041          * expect to be called without the queue lock held.  Also,
1042          * previously, we dequeued the request before dropping the
1043          * lock.  We hope REQ_STARTED prevents anything untoward from
1044          * happening now.
1045          */
1046         if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) {
1047                 struct scsi_driver *drv;
1048                 int ret;
1049
1050                 /*
1051                  * This will do a couple of things:
1052                  *  1) Fill in the actual SCSI command.
1053                  *  2) Fill in any other upper-level specific fields
1054                  * (timeout).
1055                  *
1056                  * If this returns 0, it means that the request failed
1057                  * (reading past end of disk, reading offline device,
1058                  * etc).   This won't actually talk to the device, but
1059                  * some kinds of consistency checking may cause the     
1060                  * request to be rejected immediately.
1061                  */
1062
1063                 /* 
1064                  * This sets up the scatter-gather table (allocating if
1065                  * required).
1066                  */
1067                 ret = scsi_init_io(cmd);
1068                 if (ret)        /* BLKPREP_KILL return also releases the command */
1069                         return ret;
1070                 
1071                 /*
1072                  * Initialize the actual SCSI command for this request.
1073                  */
1074                 drv = *(struct scsi_driver **)req->rq_disk->private_data;
1075                 if (unlikely(!drv->init_command(cmd))) {
1076                         scsi_release_buffers(cmd);
1077                         scsi_put_command(cmd);
1078                         return BLKPREP_KILL;
1079                 }
1080         }
1081
1082         /*
1083          * The request is now prepped, no need to come back here
1084          */
1085         req->flags |= REQ_DONTPREP;
1086         return BLKPREP_OK;
1087
1088  defer:
1089         /* If we defer, the elv_next_request() returns NULL, but the
1090          * queue must be restarted, so we plug here if no returning
1091          * command will automatically do that. */
1092         if (sdev->device_busy == 0)
1093                 blk_plug_device(q);
1094         return BLKPREP_DEFER;
1095 }
1096
1097 /*
1098  * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else
1099  * return 0.
1100  *
1101  * Called with the queue_lock held.
1102  */
1103 static inline int scsi_dev_queue_ready(struct request_queue *q,
1104                                   struct scsi_device *sdev)
1105 {
1106         if (sdev->device_busy >= sdev->queue_depth)
1107                 return 0;
1108         if (sdev->device_busy == 0 && sdev->device_blocked) {
1109                 /*
1110                  * unblock after device_blocked iterates to zero
1111                  */
1112                 if (--sdev->device_blocked == 0) {
1113                         SCSI_LOG_MLQUEUE(3,
1114                                 printk("scsi%d (%d:%d) unblocking device at"
1115                                        " zero depth\n", sdev->host->host_no,
1116                                        sdev->id, sdev->lun));
1117                 } else {
1118                         blk_plug_device(q);
1119                         return 0;
1120                 }
1121         }
1122         if (sdev->device_blocked)
1123                 return 0;
1124
1125         return 1;
1126 }
1127
1128 /*
1129  * scsi_host_queue_ready: if we can send requests to shost, return 1 else
1130  * return 0. We must end up running the queue again whenever 0 is
1131  * returned, else IO can hang.
1132  *
1133  * Called with host_lock held.
1134  */
1135 static inline int scsi_host_queue_ready(struct request_queue *q,
1136                                    struct Scsi_Host *shost,
1137                                    struct scsi_device *sdev)
1138 {
1139         if (test_bit(SHOST_RECOVERY, &shost->shost_state))
1140                 return 0;
1141         if (shost->host_busy == 0 && shost->host_blocked) {
1142                 /*
1143                  * unblock after host_blocked iterates to zero
1144                  */
1145                 if (--shost->host_blocked == 0) {
1146                         SCSI_LOG_MLQUEUE(3,
1147                                 printk("scsi%d unblocking host at zero depth\n",
1148                                         shost->host_no));
1149                 } else {
1150                         blk_plug_device(q);
1151                         return 0;
1152                 }
1153         }
1154         if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) ||
1155             shost->host_blocked || shost->host_self_blocked) {
1156                 if (list_empty(&sdev->starved_entry))
1157                         list_add_tail(&sdev->starved_entry, &shost->starved_list);
1158                 return 0;
1159         }
1160
1161         /* We're OK to process the command, so we can't be starved */
1162         if (!list_empty(&sdev->starved_entry))
1163                 list_del_init(&sdev->starved_entry);
1164
1165         return 1;
1166 }
1167
1168 /*
1169  * Function:    scsi_request_fn()
1170  *
1171  * Purpose:     Main strategy routine for SCSI.
1172  *
1173  * Arguments:   q       - Pointer to actual queue.
1174  *
1175  * Returns:     Nothing
1176  *
1177  * Lock status: IO request lock assumed to be held when called.
1178  */
1179 static void scsi_request_fn(struct request_queue *q)
1180 {
1181         struct scsi_device *sdev = q->queuedata;
1182         struct Scsi_Host *shost = sdev->host;
1183         struct scsi_cmnd *cmd;
1184         struct request *req;
1185
1186         if(!get_device(&sdev->sdev_gendev))
1187                 /* We must be tearing the block queue down already */
1188                 return;
1189
1190         /*
1191          * To start with, we keep looping until the queue is empty, or until
1192          * the host is no longer able to accept any more requests.
1193          */
1194         while (!blk_queue_plugged(q)) {
1195                 int rtn;
1196                 /*
1197                  * get next queueable request.  We do this early to make sure
1198                  * that the request is fully prepared even if we cannot 
1199                  * accept it.
1200                  */
1201                 req = elv_next_request(q);
1202                 if (!req || !scsi_dev_queue_ready(q, sdev))
1203                         break;
1204
1205                 if (unlikely(!scsi_device_online(sdev))) {
1206                         printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to offline device\n",
1207                                sdev->host->host_no, sdev->id, sdev->lun);
1208                         blkdev_dequeue_request(req);
1209                         req->flags |= REQ_QUIET;
1210                         while (end_that_request_first(req, 0, req->nr_sectors))
1211                                 ;
1212                         end_that_request_last(req);
1213                         continue;
1214                 }
1215
1216
1217                 /*
1218                  * Remove the request from the request list.
1219                  */
1220                 if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req)))
1221                         blkdev_dequeue_request(req);
1222                 sdev->device_busy++;
1223
1224                 spin_unlock(q->queue_lock);
1225                 spin_lock(shost->host_lock);
1226
1227                 if (!scsi_host_queue_ready(q, shost, sdev))
1228                         goto not_ready;
1229                 if (sdev->single_lun) {
1230                         if (sdev->sdev_target->starget_sdev_user &&
1231                             sdev->sdev_target->starget_sdev_user != sdev)
1232                                 goto not_ready;
1233                         sdev->sdev_target->starget_sdev_user = sdev;
1234                 }
1235                 shost->host_busy++;
1236
1237                 /*
1238                  * XXX(hch): This is rather suboptimal, scsi_dispatch_cmd will
1239                  *              take the lock again.
1240                  */
1241                 spin_unlock_irq(shost->host_lock);
1242
1243                 cmd = req->special;
1244                 if (unlikely(cmd == NULL)) {
1245                         printk(KERN_CRIT "impossible request in %s.\n"
1246                                          "please mail a stack trace to "
1247                                          "linux-scsi@vger.kernel.org",
1248                                          __FUNCTION__);
1249                         BUG();
1250                 }
1251
1252                 /*
1253                  * Finally, initialize any error handling parameters, and set up
1254                  * the timers for timeouts.
1255                  */
1256                 scsi_init_cmd_errh(cmd);
1257
1258                 /*
1259                  * Dispatch the command to the low-level driver.
1260                  */
1261                 rtn = scsi_dispatch_cmd(cmd);
1262                 spin_lock_irq(q->queue_lock);
1263                 if(rtn) {
1264                         /* we're refusing the command; because of
1265                          * the way locks get dropped, we need to 
1266                          * check here if plugging is required */
1267                         if(sdev->device_busy == 0)
1268                                 blk_plug_device(q);
1269
1270                         break;
1271                 }
1272         }
1273
1274         goto out;
1275
1276  not_ready:
1277         spin_unlock_irq(shost->host_lock);
1278
1279         /*
1280          * lock q, handle tag, requeue req, and decrement device_busy. We
1281          * must return with queue_lock held.
1282          *
1283          * Decrementing device_busy without checking it is OK, as all such
1284          * cases (host limits or settings) should run the queue at some
1285          * later time.
1286          */
1287         spin_lock_irq(q->queue_lock);
1288         blk_requeue_request(q, req);
1289         sdev->device_busy--;
1290         if(sdev->device_busy == 0)
1291                 blk_plug_device(q);
1292  out:
1293         /* must be careful here...if we trigger the ->remove() function
1294          * we cannot be holding the q lock */
1295         spin_unlock_irq(q->queue_lock);
1296         put_device(&sdev->sdev_gendev);
1297         spin_lock_irq(q->queue_lock);
1298 }
1299
1300 u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
1301 {
1302         struct device *host_dev;
1303
1304         if (shost->unchecked_isa_dma)
1305                 return BLK_BOUNCE_ISA;
1306
1307         host_dev = scsi_get_device(shost);
1308         if (PCI_DMA_BUS_IS_PHYS && host_dev && host_dev->dma_mask)
1309                 return *host_dev->dma_mask;
1310
1311         /*
1312          * Platforms with virtual-DMA translation
1313          * hardware have no practical limit.
1314          */
1315         return BLK_BOUNCE_ANY;
1316 }
1317
1318 struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
1319 {
1320         struct Scsi_Host *shost = sdev->host;
1321         struct request_queue *q;
1322
1323         q = blk_init_queue(scsi_request_fn, &sdev->sdev_lock);
1324         if (!q)
1325                 return NULL;
1326
1327         blk_queue_prep_rq(q, scsi_prep_fn);
1328
1329         blk_queue_max_hw_segments(q, shost->sg_tablesize);
1330         blk_queue_max_phys_segments(q, SCSI_MAX_PHYS_SEGMENTS);
1331         blk_queue_max_sectors(q, shost->max_sectors);
1332         blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
1333         blk_queue_segment_boundary(q, shost->dma_boundary);
1334  
1335         if (!shost->use_clustering)
1336                 clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
1337         return q;
1338 }
1339
1340 void scsi_free_queue(struct request_queue *q)
1341 {
1342         blk_cleanup_queue(q);
1343 }
1344
1345 /*
1346  * Function:    scsi_block_requests()
1347  *
1348  * Purpose:     Utility function used by low-level drivers to prevent further
1349  *              commands from being queued to the device.
1350  *
1351  * Arguments:   shost       - Host in question
1352  *
1353  * Returns:     Nothing
1354  *
1355  * Lock status: No locks are assumed held.
1356  *
1357  * Notes:       There is no timer nor any other means by which the requests
1358  *              get unblocked other than the low-level driver calling
1359  *              scsi_unblock_requests().
1360  */
1361 void scsi_block_requests(struct Scsi_Host *shost)
1362 {
1363         shost->host_self_blocked = 1;
1364 }
1365
1366 /*
1367  * Function:    scsi_unblock_requests()
1368  *
1369  * Purpose:     Utility function used by low-level drivers to allow further
1370  *              commands from being queued to the device.
1371  *
1372  * Arguments:   shost       - Host in question
1373  *
1374  * Returns:     Nothing
1375  *
1376  * Lock status: No locks are assumed held.
1377  *
1378  * Notes:       There is no timer nor any other means by which the requests
1379  *              get unblocked other than the low-level driver calling
1380  *              scsi_unblock_requests().
1381  *
1382  *              This is done as an API function so that changes to the
1383  *              internals of the scsi mid-layer won't require wholesale
1384  *              changes to drivers that use this feature.
1385  */
1386 void scsi_unblock_requests(struct Scsi_Host *shost)
1387 {
1388         shost->host_self_blocked = 0;
1389         scsi_run_host_queues(shost);
1390 }
1391
1392 int __init scsi_init_queue(void)
1393 {
1394         int i;
1395
1396         for (i = 0; i < SG_MEMPOOL_NR; i++) {
1397                 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1398                 int size = sgp->size * sizeof(struct scatterlist);
1399
1400                 sgp->slab = kmem_cache_create(sgp->name, size, 0,
1401                                 SLAB_HWCACHE_ALIGN, NULL, NULL);
1402                 if (!sgp->slab) {
1403                         printk(KERN_ERR "SCSI: can't init sg slab %s\n",
1404                                         sgp->name);
1405                 }
1406
1407                 sgp->pool = mempool_create(SG_MEMPOOL_SIZE,
1408                                 mempool_alloc_slab, mempool_free_slab,
1409                                 sgp->slab);
1410                 if (!sgp->pool) {
1411                         printk(KERN_ERR "SCSI: can't init sg mempool %s\n",
1412                                         sgp->name);
1413                 }
1414         }
1415
1416         return 0;
1417 }
1418
1419 void scsi_exit_queue(void)
1420 {
1421         int i;
1422
1423         for (i = 0; i < SG_MEMPOOL_NR; i++) {
1424                 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1425                 mempool_destroy(sgp->pool);
1426                 kmem_cache_destroy(sgp->slab);
1427         }
1428 }
1429 /**
1430  *      __scsi_mode_sense - issue a mode sense, falling back from 10 to 
1431  *              six bytes if necessary.
1432  *      @sreq:  SCSI request to fill in with the MODE_SENSE
1433  *      @dbd:   set if mode sense will allow block descriptors to be returned
1434  *      @modepage: mode page being requested
1435  *      @buffer: request buffer (may not be smaller than eight bytes)
1436  *      @len:   length of request buffer.
1437  *      @timeout: command timeout
1438  *      @retries: number of retries before failing
1439  *      @data: returns a structure abstracting the mode header data
1440  *
1441  *      Returns zero if unsuccessful, or the header offset (either 4
1442  *      or 8 depending on whether a six or ten byte command was
1443  *      issued) if successful.
1444  **/
1445 int
1446 __scsi_mode_sense(struct scsi_request *sreq, int dbd, int modepage,
1447                   unsigned char *buffer, int len, int timeout, int retries,
1448                   struct scsi_mode_data *data) {
1449         unsigned char cmd[12];
1450         int use_10_for_ms;
1451         int header_length;
1452
1453         memset(data, 0, sizeof(*data));
1454         memset(&cmd[0], 0, 12);
1455         cmd[1] = dbd & 0x18;    /* allows DBD and LLBA bits */
1456         cmd[2] = modepage;
1457
1458  retry:
1459         use_10_for_ms = sreq->sr_device->use_10_for_ms;
1460
1461         if (use_10_for_ms) {
1462                 if (len < 8)
1463                         len = 8;
1464
1465                 cmd[0] = MODE_SENSE_10;
1466                 cmd[8] = len;
1467                 header_length = 8;
1468         } else {
1469                 if (len < 4)
1470                         len = 4;
1471
1472                 cmd[0] = MODE_SENSE;
1473                 cmd[4] = len;
1474                 header_length = 4;
1475         }
1476
1477         sreq->sr_cmd_len = 0;
1478         sreq->sr_sense_buffer[0] = 0;
1479         sreq->sr_sense_buffer[2] = 0;
1480         sreq->sr_data_direction = DMA_FROM_DEVICE;
1481
1482         memset(buffer, 0, len);
1483
1484         scsi_wait_req(sreq, cmd, buffer, len, timeout, retries);
1485
1486         /* This code looks awful: what it's doing is making sure an
1487          * ILLEGAL REQUEST sense return identifies the actual command
1488          * byte as the problem.  MODE_SENSE commands can return
1489          * ILLEGAL REQUEST if the code page isn't supported */
1490         if (use_10_for_ms && ! scsi_status_is_good(sreq->sr_result) &&
1491             (driver_byte(sreq->sr_result) & DRIVER_SENSE) &&
1492             sreq->sr_sense_buffer[2] == ILLEGAL_REQUEST &&
1493             (sreq->sr_sense_buffer[4] & 0x40) == 0x40 &&
1494             sreq->sr_sense_buffer[5] == 0 &&
1495             sreq->sr_sense_buffer[6] == 0 ) {
1496                 sreq->sr_device->use_10_for_ms = 0;
1497                 goto retry;
1498         }
1499
1500         if(scsi_status_is_good(sreq->sr_result)) {
1501                 data->header_length = header_length;
1502                 if(use_10_for_ms) {
1503                         data->length = buffer[0]*256 + buffer[1] + 2;
1504                         data->medium_type = buffer[2];
1505                         data->device_specific = buffer[3];
1506                         data->longlba = buffer[4] & 0x01;
1507                         data->block_descriptor_length = buffer[6]*256
1508                                 + buffer[7];
1509                 } else {
1510                         data->length = buffer[0] + 1;
1511                         data->medium_type = buffer[1];
1512                         data->device_specific = buffer[2];
1513                         data->block_descriptor_length = buffer[3];
1514                 }
1515         }
1516
1517         return sreq->sr_result;
1518 }
1519
1520 /**
1521  *      scsi_mode_sense - issue a mode sense, falling back from 10 to 
1522  *              six bytes if necessary.
1523  *      @sdev:  scsi device to send command to.
1524  *      @dbd:   set if mode sense will disable block descriptors in the return
1525  *      @modepage: mode page being requested
1526  *      @buffer: request buffer (may not be smaller than eight bytes)
1527  *      @len:   length of request buffer.
1528  *      @timeout: command timeout
1529  *      @retries: number of retries before failing
1530  *
1531  *      Returns zero if unsuccessful, or the header offset (either 4
1532  *      or 8 depending on whether a six or ten byte command was
1533  *      issued) if successful.
1534  **/
1535 int
1536 scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
1537                 unsigned char *buffer, int len, int timeout, int retries,
1538                 struct scsi_mode_data *data)
1539 {
1540         struct scsi_request *sreq = scsi_allocate_request(sdev, GFP_KERNEL);
1541         int ret;
1542
1543         if (!sreq)
1544                 return -1;
1545
1546         ret = __scsi_mode_sense(sreq, dbd, modepage, buffer, len,
1547                                 timeout, retries, data);
1548
1549         scsi_release_request(sreq);
1550
1551         return ret;
1552 }
1553
1554 /**
1555  *      scsi_device_set_state - Take the given device through the device
1556  *              state model.
1557  *      @sdev:  scsi device to change the state of.
1558  *      @state: state to change to.
1559  *
1560  *      Returns zero if unsuccessful or an error if the requested 
1561  *      transition is illegal.
1562  **/
1563 int
1564 scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
1565 {
1566         enum scsi_device_state oldstate = sdev->sdev_state;
1567
1568         if (state == oldstate)
1569                 return 0;
1570
1571         switch (state) {
1572         case SDEV_CREATED:
1573                 /* There are no legal states that come back to
1574                  * created.  This is the manually initialised start
1575                  * state */
1576                 goto illegal;
1577                         
1578         case SDEV_RUNNING:
1579                 switch (oldstate) {
1580                 case SDEV_CREATED:
1581                 case SDEV_OFFLINE:
1582                 case SDEV_QUIESCE:
1583                         break;
1584                 default:
1585                         goto illegal;
1586                 }
1587                 break;
1588
1589         case SDEV_QUIESCE:
1590                 switch (oldstate) {
1591                 case SDEV_RUNNING:
1592                 case SDEV_OFFLINE:
1593                         break;
1594                 default:
1595                         goto illegal;
1596                 }
1597                 break;
1598
1599         case SDEV_OFFLINE:
1600                 switch (oldstate) {
1601                 case SDEV_CREATED:
1602                 case SDEV_RUNNING:
1603                 case SDEV_QUIESCE:
1604                         break;
1605                 default:
1606                         goto illegal;
1607                 }
1608                 break;
1609
1610         case SDEV_CANCEL:
1611                 switch (oldstate) {
1612                 case SDEV_CREATED:
1613                 case SDEV_RUNNING:
1614                 case SDEV_OFFLINE:
1615                         break;
1616                 default:
1617                         goto illegal;
1618                 }
1619                 break;
1620
1621         case SDEV_DEL:
1622                 switch (oldstate) {
1623                 case SDEV_CANCEL:
1624                         break;
1625                 default:
1626                         goto illegal;
1627                 }
1628                 break;
1629
1630         }
1631         sdev->sdev_state = state;
1632         return 0;
1633
1634  illegal:
1635         dev_printk(KERN_ERR, &sdev->sdev_gendev,
1636                    "Illegal state transition %s->%s\n",
1637                    scsi_device_state_name(oldstate),
1638                    scsi_device_state_name(state));
1639         WARN_ON(1);
1640         return -EINVAL;
1641 }
1642 EXPORT_SYMBOL(scsi_device_set_state);
1643
1644 /**
1645  *      scsi_device_quiesce - Block user issued commands.
1646  *      @sdev:  scsi device to quiesce.
1647  *
1648  *      This works by trying to transition to the SDEV_QUIESCE state
1649  *      (which must be a legal transition).  When the device is in this
1650  *      state, only special requests will be accepted, all others will
1651  *      be deferred.  Since special requests may also be requeued requests,
1652  *      a successful return doesn't guarantee the device will be 
1653  *      totally quiescent.
1654  *
1655  *      Must be called with user context, may sleep.
1656  *
1657  *      Returns zero if unsuccessful or an error if not.
1658  **/
1659 int
1660 scsi_device_quiesce(struct scsi_device *sdev)
1661 {
1662         int err = scsi_device_set_state(sdev, SDEV_QUIESCE);
1663         if (err)
1664                 return err;
1665
1666         scsi_run_queue(sdev->request_queue);
1667         while (sdev->device_busy) {
1668                 schedule_timeout(HZ/5);
1669                 scsi_run_queue(sdev->request_queue);
1670         }
1671         return 0;
1672 }
1673 EXPORT_SYMBOL(scsi_device_quiesce);
1674
1675 /**
1676  *      scsi_device_resume - Restart user issued commands to a quiesced device.
1677  *      @sdev:  scsi device to resume.
1678  *
1679  *      Moves the device from quiesced back to running and restarts the
1680  *      queues.
1681  *
1682  *      Must be called with user context, may sleep.
1683  **/
1684 void
1685 scsi_device_resume(struct scsi_device *sdev)
1686 {
1687         if(scsi_device_set_state(sdev, SDEV_RUNNING))
1688                 return;
1689         scsi_run_queue(sdev->request_queue);
1690 }
1691 EXPORT_SYMBOL(scsi_device_resume);
1692