patch-2_6_7-vs1_9_1_12
[linux-2.6.git] / drivers / scsi / scsi_lib.c
1 /*
2  *  scsi_lib.c Copyright (C) 1999 Eric Youngdale
3  *
4  *  SCSI queueing library.
5  *      Initial versions: Eric Youngdale (eric@andante.org).
6  *                        Based upon conversations with large numbers
7  *                        of people at Linux Expo.
8  */
9
10 #include <linux/bio.h>
11 #include <linux/blkdev.h>
12 #include <linux/completion.h>
13 #include <linux/kernel.h>
14 #include <linux/mempool.h>
15 #include <linux/slab.h>
16 #include <linux/init.h>
17 #include <linux/pci.h>
18
19 #include <scsi/scsi_driver.h>
20 #include <scsi/scsi_host.h>
21 #include "scsi.h"
22
23 #include "scsi_priv.h"
24 #include "scsi_logging.h"
25
26
27 #define SG_MEMPOOL_NR           (sizeof(scsi_sg_pools)/sizeof(struct scsi_host_sg_pool))
28 #define SG_MEMPOOL_SIZE         32
29
30 struct scsi_host_sg_pool {
31         size_t          size;
32         char            *name; 
33         kmem_cache_t    *slab;
34         mempool_t       *pool;
35 };
36
37 #if (SCSI_MAX_PHYS_SEGMENTS < 32)
38 #error SCSI_MAX_PHYS_SEGMENTS is too small
39 #endif
40
41 #define SP(x) { x, "sgpool-" #x } 
42 struct scsi_host_sg_pool scsi_sg_pools[] = { 
43         SP(8),
44         SP(16),
45         SP(32),
46 #if (SCSI_MAX_PHYS_SEGMENTS > 32)
47         SP(64),
48 #if (SCSI_MAX_PHYS_SEGMENTS > 64)
49         SP(128),
50 #if (SCSI_MAX_PHYS_SEGMENTS > 128)
51         SP(256),
52 #if (SCSI_MAX_PHYS_SEGMENTS > 256)
53 #error SCSI_MAX_PHYS_SEGMENTS is too large
54 #endif
55 #endif
56 #endif
57 #endif
58 };      
59 #undef SP
60
61
62 /*
63  * Function:    scsi_insert_special_req()
64  *
65  * Purpose:     Insert pre-formed request into request queue.
66  *
67  * Arguments:   sreq    - request that is ready to be queued.
68  *              at_head - boolean.  True if we should insert at head
69  *                        of queue, false if we should insert at tail.
70  *
71  * Lock status: Assumed that lock is not held upon entry.
72  *
73  * Returns:     Nothing
74  *
75  * Notes:       This function is called from character device and from
76  *              ioctl types of functions where the caller knows exactly
77  *              what SCSI command needs to be issued.   The idea is that
78  *              we merely inject the command into the queue (at the head
79  *              for now), and then call the queue request function to actually
80  *              process it.
81  */
82 int scsi_insert_special_req(struct scsi_request *sreq, int at_head)
83 {
84         /*
85          * Because users of this function are apt to reuse requests with no
86          * modification, we have to sanitise the request flags here
87          */
88         sreq->sr_request->flags &= ~REQ_DONTPREP;
89         blk_insert_request(sreq->sr_device->request_queue, sreq->sr_request,
90                            at_head, sreq, 0);
91         return 0;
92 }
93
94 /*
95  * Function:    scsi_queue_insert()
96  *
97  * Purpose:     Insert a command in the midlevel queue.
98  *
99  * Arguments:   cmd    - command that we are adding to queue.
100  *              reason - why we are inserting command to queue.
101  *
102  * Lock status: Assumed that lock is not held upon entry.
103  *
104  * Returns:     Nothing.
105  *
106  * Notes:       We do this for one of two cases.  Either the host is busy
107  *              and it cannot accept any more commands for the time being,
108  *              or the device returned QUEUE_FULL and can accept no more
109  *              commands.
110  * Notes:       This could be called either from an interrupt context or a
111  *              normal process context.
112  */
113 int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
114 {
115         struct Scsi_Host *host = cmd->device->host;
116         struct scsi_device *device = cmd->device;
117
118         SCSI_LOG_MLQUEUE(1,
119                  printk("Inserting command %p into mlqueue\n", cmd));
120
121         /*
122          * We are inserting the command into the ml queue.  First, we
123          * cancel the timer, so it doesn't time out.
124          */
125         scsi_delete_timer(cmd);
126
127         /*
128          * Next, set the appropriate busy bit for the device/host.
129          *
130          * If the host/device isn't busy, assume that something actually
131          * completed, and that we should be able to queue a command now.
132          *
133          * Note that the prior mid-layer assumption that any host could
134          * always queue at least one command is now broken.  The mid-layer
135          * will implement a user specifiable stall (see
136          * scsi_host.max_host_blocked and scsi_device.max_device_blocked)
137          * if a command is requeued with no other commands outstanding
138          * either for the device or for the host.
139          */
140         if (reason == SCSI_MLQUEUE_HOST_BUSY)
141                 host->host_blocked = host->max_host_blocked;
142         else if (reason == SCSI_MLQUEUE_DEVICE_BUSY)
143                 device->device_blocked = device->max_device_blocked;
144
145         /*
146          * Register the fact that we own the thing for now.
147          */
148         cmd->state = SCSI_STATE_MLQUEUE;
149         cmd->owner = SCSI_OWNER_MIDLEVEL;
150
151         /*
152          * Decrement the counters, since these commands are no longer
153          * active on the host/device.
154          */
155         scsi_device_unbusy(device);
156
157         /*
158          * Insert this command at the head of the queue for it's device.
159          * It will go before all other commands that are already in the queue.
160          *
161          * NOTE: there is magic here about the way the queue is plugged if
162          * we have no outstanding commands.
163          * 
164          * Although this *doesn't* plug the queue, it does call the request
165          * function.  The SCSI request function detects the blocked condition
166          * and plugs the queue appropriately.
167          */
168         blk_insert_request(device->request_queue, cmd->request, 1, cmd, 1);
169         return 0;
170 }
171
172 /*
173  * Function:    scsi_do_req
174  *
175  * Purpose:     Queue a SCSI request
176  *
177  * Arguments:   sreq      - command descriptor.
178  *              cmnd      - actual SCSI command to be performed.
179  *              buffer    - data buffer.
180  *              bufflen   - size of data buffer.
181  *              done      - completion function to be run.
182  *              timeout   - how long to let it run before timeout.
183  *              retries   - number of retries we allow.
184  *
185  * Lock status: No locks held upon entry.
186  *
187  * Returns:     Nothing.
188  *
189  * Notes:       This function is only used for queueing requests for things
190  *              like ioctls and character device requests - this is because
191  *              we essentially just inject a request into the queue for the
192  *              device.
193  *
194  *              In order to support the scsi_device_quiesce function, we
195  *              now inject requests on the *head* of the device queue
196  *              rather than the tail.
197  */
198 void scsi_do_req(struct scsi_request *sreq, const void *cmnd,
199                  void *buffer, unsigned bufflen,
200                  void (*done)(struct scsi_cmnd *),
201                  int timeout, int retries)
202 {
203         /*
204          * If the upper level driver is reusing these things, then
205          * we should release the low-level block now.  Another one will
206          * be allocated later when this request is getting queued.
207          */
208         __scsi_release_request(sreq);
209
210         /*
211          * Our own function scsi_done (which marks the host as not busy,
212          * disables the timeout counter, etc) will be called by us or by the
213          * scsi_hosts[host].queuecommand() function needs to also call
214          * the completion function for the high level driver.
215          */
216         memcpy(sreq->sr_cmnd, cmnd, sizeof(sreq->sr_cmnd));
217         sreq->sr_bufflen = bufflen;
218         sreq->sr_buffer = buffer;
219         sreq->sr_allowed = retries;
220         sreq->sr_done = done;
221         sreq->sr_timeout_per_command = timeout;
222
223         if (sreq->sr_cmd_len == 0)
224                 sreq->sr_cmd_len = COMMAND_SIZE(sreq->sr_cmnd[0]);
225
226         /*
227          * head injection *required* here otherwise quiesce won't work
228          */
229         scsi_insert_special_req(sreq, 1);
230 }
231  
232 static void scsi_wait_done(struct scsi_cmnd *cmd)
233 {
234         struct request *req = cmd->request;
235         struct request_queue *q = cmd->device->request_queue;
236         unsigned long flags;
237
238         req->rq_status = RQ_SCSI_DONE;  /* Busy, but indicate request done */
239
240         spin_lock_irqsave(q->queue_lock, flags);
241         if (blk_rq_tagged(req))
242                 blk_queue_end_tag(q, req);
243         spin_unlock_irqrestore(q->queue_lock, flags);
244
245         if (req->waiting)
246                 complete(req->waiting);
247 }
248
249 void scsi_wait_req(struct scsi_request *sreq, const void *cmnd, void *buffer,
250                    unsigned bufflen, int timeout, int retries)
251 {
252         DECLARE_COMPLETION(wait);
253         
254         sreq->sr_request->waiting = &wait;
255         sreq->sr_request->rq_status = RQ_SCSI_BUSY;
256         scsi_do_req(sreq, cmnd, buffer, bufflen, scsi_wait_done,
257                         timeout, retries);
258         generic_unplug_device(sreq->sr_device->request_queue);
259         wait_for_completion(&wait);
260         sreq->sr_request->waiting = NULL;
261         if (sreq->sr_request->rq_status != RQ_SCSI_DONE)
262                 sreq->sr_result |= (DRIVER_ERROR << 24);
263
264         __scsi_release_request(sreq);
265 }
266
267 /*
268  * Function:    scsi_init_cmd_errh()
269  *
270  * Purpose:     Initialize cmd fields related to error handling.
271  *
272  * Arguments:   cmd     - command that is ready to be queued.
273  *
274  * Returns:     Nothing
275  *
276  * Notes:       This function has the job of initializing a number of
277  *              fields related to error handling.   Typically this will
278  *              be called once for each command, as required.
279  */
280 static int scsi_init_cmd_errh(struct scsi_cmnd *cmd)
281 {
282         cmd->owner = SCSI_OWNER_MIDLEVEL;
283         cmd->serial_number = 0;
284         cmd->serial_number_at_timeout = 0;
285         cmd->abort_reason = 0;
286
287         memset(cmd->sense_buffer, 0, sizeof cmd->sense_buffer);
288
289         if (cmd->cmd_len == 0)
290                 cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]);
291
292         /*
293          * We need saved copies of a number of fields - this is because
294          * error handling may need to overwrite these with different values
295          * to run different commands, and once error handling is complete,
296          * we will need to restore these values prior to running the actual
297          * command.
298          */
299         cmd->old_use_sg = cmd->use_sg;
300         cmd->old_cmd_len = cmd->cmd_len;
301         cmd->sc_old_data_direction = cmd->sc_data_direction;
302         cmd->old_underflow = cmd->underflow;
303         memcpy(cmd->data_cmnd, cmd->cmnd, sizeof(cmd->cmnd));
304         cmd->buffer = cmd->request_buffer;
305         cmd->bufflen = cmd->request_bufflen;
306         cmd->internal_timeout = NORMAL_TIMEOUT;
307         cmd->abort_reason = 0;
308
309         return 1;
310 }
311
312 /*
313  * Function:   scsi_setup_cmd_retry()
314  *
315  * Purpose:    Restore the command state for a retry
316  *
317  * Arguments:  cmd      - command to be restored
318  *
319  * Returns:    Nothing
320  *
321  * Notes:      Immediately prior to retrying a command, we need
322  *             to restore certain fields that we saved above.
323  */
324 void scsi_setup_cmd_retry(struct scsi_cmnd *cmd)
325 {
326         memcpy(cmd->cmnd, cmd->data_cmnd, sizeof(cmd->data_cmnd));
327         cmd->request_buffer = cmd->buffer;
328         cmd->request_bufflen = cmd->bufflen;
329         cmd->use_sg = cmd->old_use_sg;
330         cmd->cmd_len = cmd->old_cmd_len;
331         cmd->sc_data_direction = cmd->sc_old_data_direction;
332         cmd->underflow = cmd->old_underflow;
333 }
334
335 void scsi_device_unbusy(struct scsi_device *sdev)
336 {
337         struct Scsi_Host *shost = sdev->host;
338         unsigned long flags;
339
340         spin_lock_irqsave(shost->host_lock, flags);
341         shost->host_busy--;
342         if (unlikely(test_bit(SHOST_RECOVERY, &shost->shost_state) &&
343                      shost->host_failed))
344                 scsi_eh_wakeup(shost);
345         spin_unlock(shost->host_lock);
346         spin_lock(&sdev->sdev_lock);
347         sdev->device_busy--;
348         spin_unlock_irqrestore(&sdev->sdev_lock, flags);
349 }
350
351 /*
352  * Called for single_lun devices on IO completion. Clear starget_sdev_user,
353  * and call blk_run_queue for all the scsi_devices on the target -
354  * including current_sdev first.
355  *
356  * Called with *no* scsi locks held.
357  */
358 static void scsi_single_lun_run(struct scsi_device *current_sdev)
359 {
360         struct Scsi_Host *shost = current_sdev->host;
361         struct scsi_device *sdev, *tmp;
362         unsigned long flags;
363
364         spin_lock_irqsave(shost->host_lock, flags);
365         current_sdev->sdev_target->starget_sdev_user = NULL;
366         spin_unlock_irqrestore(shost->host_lock, flags);
367
368         /*
369          * Call blk_run_queue for all LUNs on the target, starting with
370          * current_sdev. We race with others (to set starget_sdev_user),
371          * but in most cases, we will be first. Ideally, each LU on the
372          * target would get some limited time or requests on the target.
373          */
374         blk_run_queue(current_sdev->request_queue);
375
376         spin_lock_irqsave(shost->host_lock, flags);
377         if (current_sdev->sdev_target->starget_sdev_user)
378                 goto out;
379         list_for_each_entry_safe(sdev, tmp, &current_sdev->same_target_siblings,
380                         same_target_siblings) {
381                 if (scsi_device_get(sdev))
382                         continue;
383
384                 spin_unlock_irqrestore(shost->host_lock, flags);
385                 blk_run_queue(sdev->request_queue);
386                 spin_lock_irqsave(shost->host_lock, flags);
387         
388                 scsi_device_put(sdev);
389         }
390  out:
391         spin_unlock_irqrestore(shost->host_lock, flags);
392 }
393
394 /*
395  * Function:    scsi_run_queue()
396  *
397  * Purpose:     Select a proper request queue to serve next
398  *
399  * Arguments:   q       - last request's queue
400  *
401  * Returns:     Nothing
402  *
403  * Notes:       The previous command was completely finished, start
404  *              a new one if possible.
405  */
406 static void scsi_run_queue(struct request_queue *q)
407 {
408         struct scsi_device *sdev = q->queuedata;
409         struct Scsi_Host *shost = sdev->host;
410         unsigned long flags;
411
412         if (sdev->single_lun)
413                 scsi_single_lun_run(sdev);
414
415         spin_lock_irqsave(shost->host_lock, flags);
416         while (!list_empty(&shost->starved_list) &&
417                !shost->host_blocked && !shost->host_self_blocked &&
418                 !((shost->can_queue > 0) &&
419                   (shost->host_busy >= shost->can_queue))) {
420                 /*
421                  * As long as shost is accepting commands and we have
422                  * starved queues, call blk_run_queue. scsi_request_fn
423                  * drops the queue_lock and can add us back to the
424                  * starved_list.
425                  *
426                  * host_lock protects the starved_list and starved_entry.
427                  * scsi_request_fn must get the host_lock before checking
428                  * or modifying starved_list or starved_entry.
429                  */
430                 sdev = list_entry(shost->starved_list.next,
431                                           struct scsi_device, starved_entry);
432                 list_del_init(&sdev->starved_entry);
433                 spin_unlock_irqrestore(shost->host_lock, flags);
434
435                 blk_run_queue(sdev->request_queue);
436
437                 spin_lock_irqsave(shost->host_lock, flags);
438                 if (unlikely(!list_empty(&sdev->starved_entry)))
439                         /*
440                          * sdev lost a race, and was put back on the
441                          * starved list. This is unlikely but without this
442                          * in theory we could loop forever.
443                          */
444                         break;
445         }
446         spin_unlock_irqrestore(shost->host_lock, flags);
447
448         blk_run_queue(q);
449 }
450
451 /*
452  * Function:    scsi_requeue_command()
453  *
454  * Purpose:     Handle post-processing of completed commands.
455  *
456  * Arguments:   q       - queue to operate on
457  *              cmd     - command that may need to be requeued.
458  *
459  * Returns:     Nothing
460  *
461  * Notes:       After command completion, there may be blocks left
462  *              over which weren't finished by the previous command
463  *              this can be for a number of reasons - the main one is
464  *              I/O errors in the middle of the request, in which case
465  *              we need to request the blocks that come after the bad
466  *              sector.
467  */
468 static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
469 {
470         cmd->request->flags &= ~REQ_DONTPREP;
471         blk_insert_request(q, cmd->request, 1, cmd, 1);
472
473         scsi_run_queue(q);
474 }
475
476 void scsi_next_command(struct scsi_cmnd *cmd)
477 {
478         struct request_queue *q = cmd->device->request_queue;
479
480         scsi_put_command(cmd);
481         scsi_run_queue(q);
482 }
483
484 void scsi_run_host_queues(struct Scsi_Host *shost)
485 {
486         struct scsi_device *sdev;
487
488         shost_for_each_device(sdev, shost)
489                 scsi_run_queue(sdev->request_queue);
490 }
491
492 /*
493  * Function:    scsi_end_request()
494  *
495  * Purpose:     Post-processing of completed commands called from interrupt
496  *              handler or a bottom-half handler.
497  *
498  * Arguments:   cmd      - command that is complete.
499  *              uptodate - 1 if I/O indicates success, 0 for I/O error.
500  *              sectors  - number of sectors we want to mark.
501  *              requeue  - indicates whether we should requeue leftovers.
502  *              frequeue - indicates that if we release the command block
503  *                         that the queue request function should be called.
504  *
505  * Lock status: Assumed that lock is not held upon entry.
506  *
507  * Returns:     Nothing
508  *
509  * Notes:       This is called for block device requests in order to
510  *              mark some number of sectors as complete.
511  * 
512  *              We are guaranteeing that the request queue will be goosed
513  *              at some point during this call.
514  */
515 static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate,
516                                           int bytes, int requeue)
517 {
518         request_queue_t *q = cmd->device->request_queue;
519         struct request *req = cmd->request;
520         unsigned long flags;
521
522         /*
523          * If there are blocks left over at the end, set up the command
524          * to queue the remainder of them.
525          */
526         if (end_that_request_chunk(req, uptodate, bytes)) {
527                 int leftover = (req->hard_nr_sectors << 9);
528
529                 if (blk_pc_request(req))
530                         leftover = req->data_len;
531
532                 /* kill remainder if no retrys */
533                 if (!uptodate && blk_noretry_request(req))
534                         end_that_request_chunk(req, 0, leftover);
535                 else {
536                         if (requeue)
537                                 /*
538                                  * Bleah.  Leftovers again.  Stick the
539                                  * leftovers in the front of the
540                                  * queue, and goose the queue again.
541                                  */
542                                 scsi_requeue_command(q, cmd);
543
544                         return cmd;
545                 }
546         }
547
548         add_disk_randomness(req->rq_disk);
549
550         spin_lock_irqsave(q->queue_lock, flags);
551         if (blk_rq_tagged(req))
552                 blk_queue_end_tag(q, req);
553         end_that_request_last(req);
554         spin_unlock_irqrestore(q->queue_lock, flags);
555
556         /*
557          * This will goose the queue request function at the end, so we don't
558          * need to worry about launching another command.
559          */
560         scsi_next_command(cmd);
561         return NULL;
562 }
563
564 static struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, int gfp_mask)
565 {
566         struct scsi_host_sg_pool *sgp;
567         struct scatterlist *sgl;
568
569         BUG_ON(!cmd->use_sg);
570
571         switch (cmd->use_sg) {
572         case 1 ... 8:
573                 cmd->sglist_len = 0;
574                 break;
575         case 9 ... 16:
576                 cmd->sglist_len = 1;
577                 break;
578         case 17 ... 32:
579                 cmd->sglist_len = 2;
580                 break;
581 #if (SCSI_MAX_PHYS_SEGMENTS > 32)
582         case 33 ... 64:
583                 cmd->sglist_len = 3;
584                 break;
585 #if (SCSI_MAX_PHYS_SEGMENTS > 64)
586         case 65 ... 128:
587                 cmd->sglist_len = 4;
588                 break;
589 #if (SCSI_MAX_PHYS_SEGMENTS  > 128)
590         case 129 ... 256:
591                 cmd->sglist_len = 5;
592                 break;
593 #endif
594 #endif
595 #endif
596         default:
597                 return NULL;
598         }
599
600         sgp = scsi_sg_pools + cmd->sglist_len;
601         sgl = mempool_alloc(sgp->pool, gfp_mask);
602         if (sgl)
603                 memset(sgl, 0, sgp->size);
604         return sgl;
605 }
606
607 static void scsi_free_sgtable(struct scatterlist *sgl, int index)
608 {
609         struct scsi_host_sg_pool *sgp;
610
611         BUG_ON(index > SG_MEMPOOL_NR);
612
613         sgp = scsi_sg_pools + index;
614         mempool_free(sgl, sgp->pool);
615 }
616
617 /*
618  * Function:    scsi_release_buffers()
619  *
620  * Purpose:     Completion processing for block device I/O requests.
621  *
622  * Arguments:   cmd     - command that we are bailing.
623  *
624  * Lock status: Assumed that no lock is held upon entry.
625  *
626  * Returns:     Nothing
627  *
628  * Notes:       In the event that an upper level driver rejects a
629  *              command, we must release resources allocated during
630  *              the __init_io() function.  Primarily this would involve
631  *              the scatter-gather table, and potentially any bounce
632  *              buffers.
633  */
634 static void scsi_release_buffers(struct scsi_cmnd *cmd)
635 {
636         struct request *req = cmd->request;
637
638         /*
639          * Free up any indirection buffers we allocated for DMA purposes. 
640          */
641         if (cmd->use_sg)
642                 scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len);
643         else if (cmd->request_buffer != req->buffer)
644                 kfree(cmd->request_buffer);
645
646         /*
647          * Zero these out.  They now point to freed memory, and it is
648          * dangerous to hang onto the pointers.
649          */
650         cmd->buffer  = NULL;
651         cmd->bufflen = 0;
652         cmd->request_buffer = NULL;
653         cmd->request_bufflen = 0;
654 }
655
656 /*
657  * Function:    scsi_io_completion()
658  *
659  * Purpose:     Completion processing for block device I/O requests.
660  *
661  * Arguments:   cmd   - command that is finished.
662  *
663  * Lock status: Assumed that no lock is held upon entry.
664  *
665  * Returns:     Nothing
666  *
667  * Notes:       This function is matched in terms of capabilities to
668  *              the function that created the scatter-gather list.
669  *              In other words, if there are no bounce buffers
670  *              (the normal case for most drivers), we don't need
671  *              the logic to deal with cleaning up afterwards.
672  *
673  *              We must do one of several things here:
674  *
675  *              a) Call scsi_end_request.  This will finish off the
676  *                 specified number of sectors.  If we are done, the
677  *                 command block will be released, and the queue
678  *                 function will be goosed.  If we are not done, then
679  *                 scsi_end_request will directly goose the queue.
680  *
681  *              b) We can just use scsi_requeue_command() here.  This would
682  *                 be used if we just wanted to retry, for example.
683  */
684 void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes,
685                         unsigned int block_bytes)
686 {
687         int result = cmd->result;
688         int this_count = cmd->bufflen;
689         request_queue_t *q = cmd->device->request_queue;
690         struct request *req = cmd->request;
691         int clear_errors = 1;
692
693         /*
694          * Free up any indirection buffers we allocated for DMA purposes. 
695          * For the case of a READ, we need to copy the data out of the
696          * bounce buffer and into the real buffer.
697          */
698         if (cmd->use_sg)
699                 scsi_free_sgtable(cmd->buffer, cmd->sglist_len);
700         else if (cmd->buffer != req->buffer) {
701                 if (rq_data_dir(req) == READ) {
702                         unsigned long flags;
703                         char *to = bio_kmap_irq(req->bio, &flags);
704                         memcpy(to, cmd->buffer, cmd->bufflen);
705                         bio_kunmap_irq(to, &flags);
706                 }
707                 kfree(cmd->buffer);
708         }
709
710         if (blk_pc_request(req)) { /* SG_IO ioctl from block level */
711                 req->errors = (driver_byte(result) & DRIVER_SENSE) ?
712                               (CHECK_CONDITION << 1) : (result & 0xff);
713                 if (result) {
714                         clear_errors = 0;
715                         if (cmd->sense_buffer[0] & 0x70) {
716                                 int len = 8 + cmd->sense_buffer[7];
717
718                                 if (len > SCSI_SENSE_BUFFERSIZE)
719                                         len = SCSI_SENSE_BUFFERSIZE;
720                                 memcpy(req->sense, cmd->sense_buffer,  len);
721                                 req->sense_len = len;
722                         }
723                 } else
724                         req->data_len -= cmd->bufflen;
725         }
726
727         /*
728          * Zero these out.  They now point to freed memory, and it is
729          * dangerous to hang onto the pointers.
730          */
731         cmd->buffer  = NULL;
732         cmd->bufflen = 0;
733         cmd->request_buffer = NULL;
734         cmd->request_bufflen = 0;
735
736         /*
737          * Next deal with any sectors which we were able to correctly
738          * handle.
739          */
740         if (good_bytes >= 0) {
741                 SCSI_LOG_HLCOMPLETE(1, printk("%ld sectors total, %d bytes done.\n",
742                                               req->nr_sectors, good_bytes));
743                 SCSI_LOG_HLCOMPLETE(1, printk("use_sg is %d\n", cmd->use_sg));
744
745                 if (clear_errors)
746                         req->errors = 0;
747                 /*
748                  * If multiple sectors are requested in one buffer, then
749                  * they will have been finished off by the first command.
750                  * If not, then we have a multi-buffer command.
751                  *
752                  * If block_bytes != 0, it means we had a medium error
753                  * of some sort, and that we want to mark some number of
754                  * sectors as not uptodate.  Thus we want to inhibit
755                  * requeueing right here - we will requeue down below
756                  * when we handle the bad sectors.
757                  */
758                 cmd = scsi_end_request(cmd, 1, good_bytes, result == 0);
759
760                 /*
761                  * If the command completed without error, then either finish off the
762                  * rest of the command, or start a new one.
763                  */
764                 if (result == 0 || cmd == NULL ) {
765                         return;
766                 }
767         }
768         /*
769          * Now, if we were good little boys and girls, Santa left us a request
770          * sense buffer.  We can extract information from this, so we
771          * can choose a block to remap, etc.
772          */
773         if (driver_byte(result) != 0) {
774                 if ((cmd->sense_buffer[0] & 0x7f) == 0x70) {
775                         /*
776                          * If the device is in the process of becoming ready,
777                          * retry.
778                          */
779                         if (cmd->sense_buffer[12] == 0x04 &&
780                             cmd->sense_buffer[13] == 0x01) {
781                                 scsi_requeue_command(q, cmd);
782                                 return;
783                         }
784                         if ((cmd->sense_buffer[2] & 0xf) == UNIT_ATTENTION) {
785                                 if (cmd->device->removable) {
786                                         /* detected disc change.  set a bit 
787                                          * and quietly refuse further access.
788                                          */
789                                         cmd->device->changed = 1;
790                                         cmd = scsi_end_request(cmd, 0,
791                                                         this_count, 1);
792                                         return;
793                                 } else {
794                                         /*
795                                         * Must have been a power glitch, or a
796                                         * bus reset.  Could not have been a
797                                         * media change, so we just retry the
798                                         * request and see what happens.  
799                                         */
800                                         scsi_requeue_command(q, cmd);
801                                         return;
802                                 }
803                         }
804                 }
805                 /*
806                  * If we had an ILLEGAL REQUEST returned, then we may have
807                  * performed an unsupported command.  The only thing this
808                  * should be would be a ten byte read where only a six byte
809                  * read was supported.  Also, on a system where READ CAPACITY
810                  * failed, we may have read past the end of the disk.
811                  */
812
813                 switch (cmd->sense_buffer[2]) {
814                 case ILLEGAL_REQUEST:
815                         if (cmd->device->use_10_for_rw &&
816                             (cmd->cmnd[0] == READ_10 ||
817                              cmd->cmnd[0] == WRITE_10)) {
818                                 cmd->device->use_10_for_rw = 0;
819                                 /*
820                                  * This will cause a retry with a 6-byte
821                                  * command.
822                                  */
823                                 scsi_requeue_command(q, cmd);
824                                 result = 0;
825                         } else {
826                                 cmd = scsi_end_request(cmd, 0, this_count, 1);
827                                 return;
828                         }
829                         break;
830                 case NOT_READY:
831                         printk(KERN_INFO "Device %s not ready.\n",
832                                req->rq_disk ? req->rq_disk->disk_name : "");
833                         cmd = scsi_end_request(cmd, 0, this_count, 1);
834                         return;
835                         break;
836                 case MEDIUM_ERROR:
837                 case VOLUME_OVERFLOW:
838                         printk("scsi%d: ERROR on channel %d, id %d, lun %d, CDB: ",
839                                cmd->device->host->host_no, (int) cmd->device->channel,
840                                (int) cmd->device->id, (int) cmd->device->lun);
841                         print_command(cmd->data_cmnd);
842                         print_sense("", cmd);
843                         cmd = scsi_end_request(cmd, 0, block_bytes, 1);
844                         return;
845                 default:
846                         break;
847                 }
848         }                       /* driver byte != 0 */
849         if (host_byte(result) == DID_RESET) {
850                 /*
851                  * Third party bus reset or reset for error
852                  * recovery reasons.  Just retry the request
853                  * and see what happens.  
854                  */
855                 scsi_requeue_command(q, cmd);
856                 return;
857         }
858         if (result) {
859                 printk("SCSI error : <%d %d %d %d> return code = 0x%x\n",
860                        cmd->device->host->host_no,
861                        cmd->device->channel,
862                        cmd->device->id,
863                        cmd->device->lun, result);
864
865                 if (driver_byte(result) & DRIVER_SENSE)
866                         print_sense("", cmd);
867                 /*
868                  * Mark a single buffer as not uptodate.  Queue the remainder.
869                  * We sometimes get this cruft in the event that a medium error
870                  * isn't properly reported.
871                  */
872                 block_bytes = req->hard_cur_sectors << 9;
873                 if (!block_bytes)
874                         block_bytes = req->data_len;
875                 cmd = scsi_end_request(cmd, 0, block_bytes, 1);
876         }
877 }
878
879 /*
880  * Function:    scsi_init_io()
881  *
882  * Purpose:     SCSI I/O initialize function.
883  *
884  * Arguments:   cmd   - Command descriptor we wish to initialize
885  *
886  * Returns:     0 on success
887  *              BLKPREP_DEFER if the failure is retryable
888  *              BLKPREP_KILL if the failure is fatal
889  */
890 static int scsi_init_io(struct scsi_cmnd *cmd)
891 {
892         struct request     *req = cmd->request;
893         struct scatterlist *sgpnt;
894         int                count;
895
896         /*
897          * if this is a rq->data based REQ_BLOCK_PC, setup for a non-sg xfer
898          */
899         if ((req->flags & REQ_BLOCK_PC) && !req->bio) {
900                 cmd->request_bufflen = req->data_len;
901                 cmd->request_buffer = req->data;
902                 req->buffer = req->data;
903                 cmd->use_sg = 0;
904                 return 0;
905         }
906
907         /*
908          * we used to not use scatter-gather for single segment request,
909          * but now we do (it makes highmem I/O easier to support without
910          * kmapping pages)
911          */
912         cmd->use_sg = req->nr_phys_segments;
913
914         /*
915          * if sg table allocation fails, requeue request later.
916          */
917         sgpnt = scsi_alloc_sgtable(cmd, GFP_ATOMIC);
918         if (unlikely(!sgpnt)) {
919                 req->flags |= REQ_SPECIAL;
920                 return BLKPREP_DEFER;
921         }
922
923         cmd->request_buffer = (char *) sgpnt;
924         cmd->request_bufflen = req->nr_sectors << 9;
925         if (blk_pc_request(req))
926                 cmd->request_bufflen = req->data_len;
927         req->buffer = NULL;
928
929         /* 
930          * Next, walk the list, and fill in the addresses and sizes of
931          * each segment.
932          */
933         count = blk_rq_map_sg(req->q, req, cmd->request_buffer);
934
935         /*
936          * mapped well, send it off
937          */
938         if (likely(count <= cmd->use_sg)) {
939                 cmd->use_sg = count;
940                 return 0;
941         }
942
943         printk(KERN_ERR "Incorrect number of segments after building list\n");
944         printk(KERN_ERR "counted %d, received %d\n", count, cmd->use_sg);
945         printk(KERN_ERR "req nr_sec %lu, cur_nr_sec %u\n", req->nr_sectors,
946                         req->current_nr_sectors);
947
948         /* release the command and kill it */
949         scsi_release_buffers(cmd);
950         scsi_put_command(cmd);
951         return BLKPREP_KILL;
952 }
953
954 static int scsi_prep_fn(struct request_queue *q, struct request *req)
955 {
956         struct scsi_device *sdev = q->queuedata;
957         struct scsi_cmnd *cmd;
958         int specials_only = 0;
959
960         /*
961          * Just check to see if the device is online.  If it isn't, we
962          * refuse to process any commands.  The device must be brought
963          * online before trying any recovery commands
964          */
965         if (unlikely(!scsi_device_online(sdev))) {
966                 printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to offline device\n",
967                        sdev->host->host_no, sdev->id, sdev->lun);
968                 return BLKPREP_KILL;
969         }
970         if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
971                 /* OK, we're not in a running state don't prep
972                  * user commands */
973                 if (sdev->sdev_state == SDEV_DEL) {
974                         /* Device is fully deleted, no commands
975                          * at all allowed down */
976                         printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to dead device\n",
977                                sdev->host->host_no, sdev->id, sdev->lun);
978                         return BLKPREP_KILL;
979                 }
980                 /* OK, we only allow special commands (i.e. not
981                  * user initiated ones */
982                 specials_only = sdev->sdev_state;
983         }
984
985         /*
986          * Find the actual device driver associated with this command.
987          * The SPECIAL requests are things like character device or
988          * ioctls, which did not originate from ll_rw_blk.  Note that
989          * the special field is also used to indicate the cmd for
990          * the remainder of a partially fulfilled request that can 
991          * come up when there is a medium error.  We have to treat
992          * these two cases differently.  We differentiate by looking
993          * at request->cmd, as this tells us the real story.
994          */
995         if (req->flags & REQ_SPECIAL) {
996                 struct scsi_request *sreq = req->special;
997
998                 if (sreq->sr_magic == SCSI_REQ_MAGIC) {
999                         cmd = scsi_get_command(sreq->sr_device, GFP_ATOMIC);
1000                         if (unlikely(!cmd))
1001                                 goto defer;
1002                         scsi_init_cmd_from_req(cmd, sreq);
1003                 } else
1004                         cmd = req->special;
1005         } else if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) {
1006
1007                 if(unlikely(specials_only)) {
1008                         if(specials_only == SDEV_QUIESCE)
1009                                 return BLKPREP_DEFER;
1010                         
1011                         printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to device being removed\n",
1012                                sdev->host->host_no, sdev->id, sdev->lun);
1013                         return BLKPREP_KILL;
1014                 }
1015                         
1016                         
1017                 /*
1018                  * Now try and find a command block that we can use.
1019                  */
1020                 if (!req->special) {
1021                         cmd = scsi_get_command(sdev, GFP_ATOMIC);
1022                         if (unlikely(!cmd))
1023                                 goto defer;
1024                 } else
1025                         cmd = req->special;
1026                 
1027                 /* pull a tag out of the request if we have one */
1028                 cmd->tag = req->tag;
1029         } else {
1030                 blk_dump_rq_flags(req, "SCSI bad req");
1031                 return BLKPREP_KILL;
1032         }
1033         
1034         /* note the overloading of req->special.  When the tag
1035          * is active it always means cmd.  If the tag goes
1036          * back for re-queueing, it may be reset */
1037         req->special = cmd;
1038         cmd->request = req;
1039         
1040         /*
1041          * FIXME: drop the lock here because the functions below
1042          * expect to be called without the queue lock held.  Also,
1043          * previously, we dequeued the request before dropping the
1044          * lock.  We hope REQ_STARTED prevents anything untoward from
1045          * happening now.
1046          */
1047         if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) {
1048                 struct scsi_driver *drv;
1049                 int ret;
1050
1051                 /*
1052                  * This will do a couple of things:
1053                  *  1) Fill in the actual SCSI command.
1054                  *  2) Fill in any other upper-level specific fields
1055                  * (timeout).
1056                  *
1057                  * If this returns 0, it means that the request failed
1058                  * (reading past end of disk, reading offline device,
1059                  * etc).   This won't actually talk to the device, but
1060                  * some kinds of consistency checking may cause the     
1061                  * request to be rejected immediately.
1062                  */
1063
1064                 /* 
1065                  * This sets up the scatter-gather table (allocating if
1066                  * required).
1067                  */
1068                 ret = scsi_init_io(cmd);
1069                 if (ret)        /* BLKPREP_KILL return also releases the command */
1070                         return ret;
1071                 
1072                 /*
1073                  * Initialize the actual SCSI command for this request.
1074                  */
1075                 drv = *(struct scsi_driver **)req->rq_disk->private_data;
1076                 if (unlikely(!drv->init_command(cmd))) {
1077                         scsi_release_buffers(cmd);
1078                         scsi_put_command(cmd);
1079                         return BLKPREP_KILL;
1080                 }
1081         }
1082
1083         /*
1084          * The request is now prepped, no need to come back here
1085          */
1086         req->flags |= REQ_DONTPREP;
1087         return BLKPREP_OK;
1088
1089  defer:
1090         /* If we defer, the elv_next_request() returns NULL, but the
1091          * queue must be restarted, so we plug here if no returning
1092          * command will automatically do that. */
1093         if (sdev->device_busy == 0)
1094                 blk_plug_device(q);
1095         return BLKPREP_DEFER;
1096 }
1097
1098 /*
1099  * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else
1100  * return 0.
1101  *
1102  * Called with the queue_lock held.
1103  */
1104 static inline int scsi_dev_queue_ready(struct request_queue *q,
1105                                   struct scsi_device *sdev)
1106 {
1107         if (sdev->device_busy >= sdev->queue_depth)
1108                 return 0;
1109         if (sdev->device_busy == 0 && sdev->device_blocked) {
1110                 /*
1111                  * unblock after device_blocked iterates to zero
1112                  */
1113                 if (--sdev->device_blocked == 0) {
1114                         SCSI_LOG_MLQUEUE(3,
1115                                 printk("scsi%d (%d:%d) unblocking device at"
1116                                        " zero depth\n", sdev->host->host_no,
1117                                        sdev->id, sdev->lun));
1118                 } else {
1119                         blk_plug_device(q);
1120                         return 0;
1121                 }
1122         }
1123         if (sdev->device_blocked)
1124                 return 0;
1125
1126         return 1;
1127 }
1128
1129 /*
1130  * scsi_host_queue_ready: if we can send requests to shost, return 1 else
1131  * return 0. We must end up running the queue again whenever 0 is
1132  * returned, else IO can hang.
1133  *
1134  * Called with host_lock held.
1135  */
1136 static inline int scsi_host_queue_ready(struct request_queue *q,
1137                                    struct Scsi_Host *shost,
1138                                    struct scsi_device *sdev)
1139 {
1140         if (test_bit(SHOST_RECOVERY, &shost->shost_state))
1141                 return 0;
1142         if (shost->host_busy == 0 && shost->host_blocked) {
1143                 /*
1144                  * unblock after host_blocked iterates to zero
1145                  */
1146                 if (--shost->host_blocked == 0) {
1147                         SCSI_LOG_MLQUEUE(3,
1148                                 printk("scsi%d unblocking host at zero depth\n",
1149                                         shost->host_no));
1150                 } else {
1151                         blk_plug_device(q);
1152                         return 0;
1153                 }
1154         }
1155         if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) ||
1156             shost->host_blocked || shost->host_self_blocked) {
1157                 if (list_empty(&sdev->starved_entry))
1158                         list_add_tail(&sdev->starved_entry, &shost->starved_list);
1159                 return 0;
1160         }
1161
1162         /* We're OK to process the command, so we can't be starved */
1163         if (!list_empty(&sdev->starved_entry))
1164                 list_del_init(&sdev->starved_entry);
1165
1166         return 1;
1167 }
1168
1169 /*
1170  * Function:    scsi_request_fn()
1171  *
1172  * Purpose:     Main strategy routine for SCSI.
1173  *
1174  * Arguments:   q       - Pointer to actual queue.
1175  *
1176  * Returns:     Nothing
1177  *
1178  * Lock status: IO request lock assumed to be held when called.
1179  */
1180 static void scsi_request_fn(struct request_queue *q)
1181 {
1182         struct scsi_device *sdev = q->queuedata;
1183         struct Scsi_Host *shost = sdev->host;
1184         struct scsi_cmnd *cmd;
1185         struct request *req;
1186
1187         if(!get_device(&sdev->sdev_gendev))
1188                 /* We must be tearing the block queue down already */
1189                 return;
1190
1191         /*
1192          * To start with, we keep looping until the queue is empty, or until
1193          * the host is no longer able to accept any more requests.
1194          */
1195         while (!blk_queue_plugged(q)) {
1196                 int rtn;
1197                 /*
1198                  * get next queueable request.  We do this early to make sure
1199                  * that the request is fully prepared even if we cannot 
1200                  * accept it.
1201                  */
1202                 req = elv_next_request(q);
1203                 if (!req || !scsi_dev_queue_ready(q, sdev))
1204                         break;
1205
1206                 if (unlikely(!scsi_device_online(sdev))) {
1207                         printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to offline device\n",
1208                                sdev->host->host_no, sdev->id, sdev->lun);
1209                         blkdev_dequeue_request(req);
1210                         req->flags |= REQ_QUIET;
1211                         while (end_that_request_first(req, 0, req->nr_sectors))
1212                                 ;
1213                         end_that_request_last(req);
1214                         continue;
1215                 }
1216
1217
1218                 /*
1219                  * Remove the request from the request list.
1220                  */
1221                 if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req)))
1222                         blkdev_dequeue_request(req);
1223                 sdev->device_busy++;
1224
1225                 spin_unlock(q->queue_lock);
1226                 spin_lock(shost->host_lock);
1227
1228                 if (!scsi_host_queue_ready(q, shost, sdev))
1229                         goto not_ready;
1230                 if (sdev->single_lun) {
1231                         if (sdev->sdev_target->starget_sdev_user &&
1232                             sdev->sdev_target->starget_sdev_user != sdev)
1233                                 goto not_ready;
1234                         sdev->sdev_target->starget_sdev_user = sdev;
1235                 }
1236                 shost->host_busy++;
1237
1238                 /*
1239                  * XXX(hch): This is rather suboptimal, scsi_dispatch_cmd will
1240                  *              take the lock again.
1241                  */
1242                 spin_unlock_irq(shost->host_lock);
1243
1244                 cmd = req->special;
1245                 if (unlikely(cmd == NULL)) {
1246                         printk(KERN_CRIT "impossible request in %s.\n"
1247                                          "please mail a stack trace to "
1248                                          "linux-scsi@vger.kernel.org",
1249                                          __FUNCTION__);
1250                         BUG();
1251                 }
1252
1253                 /*
1254                  * Finally, initialize any error handling parameters, and set up
1255                  * the timers for timeouts.
1256                  */
1257                 scsi_init_cmd_errh(cmd);
1258
1259                 /*
1260                  * Dispatch the command to the low-level driver.
1261                  */
1262                 rtn = scsi_dispatch_cmd(cmd);
1263                 spin_lock_irq(q->queue_lock);
1264                 if(rtn) {
1265                         /* we're refusing the command; because of
1266                          * the way locks get dropped, we need to 
1267                          * check here if plugging is required */
1268                         if(sdev->device_busy == 0)
1269                                 blk_plug_device(q);
1270
1271                         break;
1272                 }
1273         }
1274
1275         goto out;
1276
1277  not_ready:
1278         spin_unlock_irq(shost->host_lock);
1279
1280         /*
1281          * lock q, handle tag, requeue req, and decrement device_busy. We
1282          * must return with queue_lock held.
1283          *
1284          * Decrementing device_busy without checking it is OK, as all such
1285          * cases (host limits or settings) should run the queue at some
1286          * later time.
1287          */
1288         spin_lock_irq(q->queue_lock);
1289         blk_requeue_request(q, req);
1290         sdev->device_busy--;
1291         if(sdev->device_busy == 0)
1292                 blk_plug_device(q);
1293  out:
1294         /* must be careful here...if we trigger the ->remove() function
1295          * we cannot be holding the q lock */
1296         spin_unlock_irq(q->queue_lock);
1297         put_device(&sdev->sdev_gendev);
1298         spin_lock_irq(q->queue_lock);
1299 }
1300
1301 u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
1302 {
1303         struct device *host_dev;
1304
1305         if (shost->unchecked_isa_dma)
1306                 return BLK_BOUNCE_ISA;
1307
1308         host_dev = scsi_get_device(shost);
1309         if (PCI_DMA_BUS_IS_PHYS && host_dev && host_dev->dma_mask)
1310                 return *host_dev->dma_mask;
1311
1312         /*
1313          * Platforms with virtual-DMA translation
1314          * hardware have no practical limit.
1315          */
1316         return BLK_BOUNCE_ANY;
1317 }
1318
1319 struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
1320 {
1321         struct Scsi_Host *shost = sdev->host;
1322         struct request_queue *q;
1323
1324         q = blk_init_queue(scsi_request_fn, &sdev->sdev_lock);
1325         if (!q)
1326                 return NULL;
1327
1328         blk_queue_prep_rq(q, scsi_prep_fn);
1329
1330         blk_queue_max_hw_segments(q, shost->sg_tablesize);
1331         blk_queue_max_phys_segments(q, SCSI_MAX_PHYS_SEGMENTS);
1332         blk_queue_max_sectors(q, shost->max_sectors);
1333         blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
1334         blk_queue_segment_boundary(q, shost->dma_boundary);
1335  
1336         if (!shost->use_clustering)
1337                 clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
1338         return q;
1339 }
1340
1341 void scsi_free_queue(struct request_queue *q)
1342 {
1343         blk_cleanup_queue(q);
1344 }
1345
1346 /*
1347  * Function:    scsi_block_requests()
1348  *
1349  * Purpose:     Utility function used by low-level drivers to prevent further
1350  *              commands from being queued to the device.
1351  *
1352  * Arguments:   shost       - Host in question
1353  *
1354  * Returns:     Nothing
1355  *
1356  * Lock status: No locks are assumed held.
1357  *
1358  * Notes:       There is no timer nor any other means by which the requests
1359  *              get unblocked other than the low-level driver calling
1360  *              scsi_unblock_requests().
1361  */
1362 void scsi_block_requests(struct Scsi_Host *shost)
1363 {
1364         shost->host_self_blocked = 1;
1365 }
1366
1367 /*
1368  * Function:    scsi_unblock_requests()
1369  *
1370  * Purpose:     Utility function used by low-level drivers to allow further
1371  *              commands from being queued to the device.
1372  *
1373  * Arguments:   shost       - Host in question
1374  *
1375  * Returns:     Nothing
1376  *
1377  * Lock status: No locks are assumed held.
1378  *
1379  * Notes:       There is no timer nor any other means by which the requests
1380  *              get unblocked other than the low-level driver calling
1381  *              scsi_unblock_requests().
1382  *
1383  *              This is done as an API function so that changes to the
1384  *              internals of the scsi mid-layer won't require wholesale
1385  *              changes to drivers that use this feature.
1386  */
1387 void scsi_unblock_requests(struct Scsi_Host *shost)
1388 {
1389         shost->host_self_blocked = 0;
1390         scsi_run_host_queues(shost);
1391 }
1392
1393 int __init scsi_init_queue(void)
1394 {
1395         int i;
1396
1397         for (i = 0; i < SG_MEMPOOL_NR; i++) {
1398                 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1399                 int size = sgp->size * sizeof(struct scatterlist);
1400
1401                 sgp->slab = kmem_cache_create(sgp->name, size, 0,
1402                                 SLAB_HWCACHE_ALIGN, NULL, NULL);
1403                 if (!sgp->slab) {
1404                         printk(KERN_ERR "SCSI: can't init sg slab %s\n",
1405                                         sgp->name);
1406                 }
1407
1408                 sgp->pool = mempool_create(SG_MEMPOOL_SIZE,
1409                                 mempool_alloc_slab, mempool_free_slab,
1410                                 sgp->slab);
1411                 if (!sgp->pool) {
1412                         printk(KERN_ERR "SCSI: can't init sg mempool %s\n",
1413                                         sgp->name);
1414                 }
1415         }
1416
1417         return 0;
1418 }
1419
1420 void scsi_exit_queue(void)
1421 {
1422         int i;
1423
1424         for (i = 0; i < SG_MEMPOOL_NR; i++) {
1425                 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1426                 mempool_destroy(sgp->pool);
1427                 kmem_cache_destroy(sgp->slab);
1428         }
1429 }
1430 /**
1431  *      __scsi_mode_sense - issue a mode sense, falling back from 10 to 
1432  *              six bytes if necessary.
1433  *      @sreq:  SCSI request to fill in with the MODE_SENSE
1434  *      @dbd:   set if mode sense will allow block descriptors to be returned
1435  *      @modepage: mode page being requested
1436  *      @buffer: request buffer (may not be smaller than eight bytes)
1437  *      @len:   length of request buffer.
1438  *      @timeout: command timeout
1439  *      @retries: number of retries before failing
1440  *      @data: returns a structure abstracting the mode header data
1441  *
1442  *      Returns zero if unsuccessful, or the header offset (either 4
1443  *      or 8 depending on whether a six or ten byte command was
1444  *      issued) if successful.
1445  **/
1446 int
1447 __scsi_mode_sense(struct scsi_request *sreq, int dbd, int modepage,
1448                   unsigned char *buffer, int len, int timeout, int retries,
1449                   struct scsi_mode_data *data) {
1450         unsigned char cmd[12];
1451         int use_10_for_ms;
1452         int header_length;
1453
1454         memset(data, 0, sizeof(*data));
1455         memset(&cmd[0], 0, 12);
1456         cmd[1] = dbd & 0x18;    /* allows DBD and LLBA bits */
1457         cmd[2] = modepage;
1458
1459  retry:
1460         use_10_for_ms = sreq->sr_device->use_10_for_ms;
1461
1462         if (use_10_for_ms) {
1463                 if (len < 8)
1464                         len = 8;
1465
1466                 cmd[0] = MODE_SENSE_10;
1467                 cmd[8] = len;
1468                 header_length = 8;
1469         } else {
1470                 if (len < 4)
1471                         len = 4;
1472
1473                 cmd[0] = MODE_SENSE;
1474                 cmd[4] = len;
1475                 header_length = 4;
1476         }
1477
1478         sreq->sr_cmd_len = 0;
1479         sreq->sr_sense_buffer[0] = 0;
1480         sreq->sr_sense_buffer[2] = 0;
1481         sreq->sr_data_direction = DMA_FROM_DEVICE;
1482
1483         memset(buffer, 0, len);
1484
1485         scsi_wait_req(sreq, cmd, buffer, len, timeout, retries);
1486
1487         /* This code looks awful: what it's doing is making sure an
1488          * ILLEGAL REQUEST sense return identifies the actual command
1489          * byte as the problem.  MODE_SENSE commands can return
1490          * ILLEGAL REQUEST if the code page isn't supported */
1491         if (use_10_for_ms && ! scsi_status_is_good(sreq->sr_result) &&
1492             (driver_byte(sreq->sr_result) & DRIVER_SENSE) &&
1493             sreq->sr_sense_buffer[2] == ILLEGAL_REQUEST &&
1494             (sreq->sr_sense_buffer[4] & 0x40) == 0x40 &&
1495             sreq->sr_sense_buffer[5] == 0 &&
1496             sreq->sr_sense_buffer[6] == 0 ) {
1497                 sreq->sr_device->use_10_for_ms = 0;
1498                 goto retry;
1499         }
1500
1501         if(scsi_status_is_good(sreq->sr_result)) {
1502                 data->header_length = header_length;
1503                 if(use_10_for_ms) {
1504                         data->length = buffer[0]*256 + buffer[1] + 2;
1505                         data->medium_type = buffer[2];
1506                         data->device_specific = buffer[3];
1507                         data->longlba = buffer[4] & 0x01;
1508                         data->block_descriptor_length = buffer[6]*256
1509                                 + buffer[7];
1510                 } else {
1511                         data->length = buffer[0] + 1;
1512                         data->medium_type = buffer[1];
1513                         data->device_specific = buffer[2];
1514                         data->block_descriptor_length = buffer[3];
1515                 }
1516         }
1517
1518         return sreq->sr_result;
1519 }
1520
1521 /**
1522  *      scsi_mode_sense - issue a mode sense, falling back from 10 to 
1523  *              six bytes if necessary.
1524  *      @sdev:  scsi device to send command to.
1525  *      @dbd:   set if mode sense will disable block descriptors in the return
1526  *      @modepage: mode page being requested
1527  *      @buffer: request buffer (may not be smaller than eight bytes)
1528  *      @len:   length of request buffer.
1529  *      @timeout: command timeout
1530  *      @retries: number of retries before failing
1531  *
1532  *      Returns zero if unsuccessful, or the header offset (either 4
1533  *      or 8 depending on whether a six or ten byte command was
1534  *      issued) if successful.
1535  **/
1536 int
1537 scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
1538                 unsigned char *buffer, int len, int timeout, int retries,
1539                 struct scsi_mode_data *data)
1540 {
1541         struct scsi_request *sreq = scsi_allocate_request(sdev, GFP_KERNEL);
1542         int ret;
1543
1544         if (!sreq)
1545                 return -1;
1546
1547         ret = __scsi_mode_sense(sreq, dbd, modepage, buffer, len,
1548                                 timeout, retries, data);
1549
1550         scsi_release_request(sreq);
1551
1552         return ret;
1553 }
1554
1555 /**
1556  *      scsi_device_set_state - Take the given device through the device
1557  *              state model.
1558  *      @sdev:  scsi device to change the state of.
1559  *      @state: state to change to.
1560  *
1561  *      Returns zero if unsuccessful or an error if the requested 
1562  *      transition is illegal.
1563  **/
1564 int
1565 scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
1566 {
1567         enum scsi_device_state oldstate = sdev->sdev_state;
1568
1569         if (state == oldstate)
1570                 return 0;
1571
1572         switch (state) {
1573         case SDEV_CREATED:
1574                 /* There are no legal states that come back to
1575                  * created.  This is the manually initialised start
1576                  * state */
1577                 goto illegal;
1578                         
1579         case SDEV_RUNNING:
1580                 switch (oldstate) {
1581                 case SDEV_CREATED:
1582                 case SDEV_OFFLINE:
1583                 case SDEV_QUIESCE:
1584                         break;
1585                 default:
1586                         goto illegal;
1587                 }
1588                 break;
1589
1590         case SDEV_QUIESCE:
1591                 switch (oldstate) {
1592                 case SDEV_RUNNING:
1593                 case SDEV_OFFLINE:
1594                         break;
1595                 default:
1596                         goto illegal;
1597                 }
1598                 break;
1599
1600         case SDEV_OFFLINE:
1601                 switch (oldstate) {
1602                 case SDEV_CREATED:
1603                 case SDEV_RUNNING:
1604                 case SDEV_QUIESCE:
1605                         break;
1606                 default:
1607                         goto illegal;
1608                 }
1609                 break;
1610
1611         case SDEV_CANCEL:
1612                 switch (oldstate) {
1613                 case SDEV_CREATED:
1614                 case SDEV_RUNNING:
1615                 case SDEV_OFFLINE:
1616                         break;
1617                 default:
1618                         goto illegal;
1619                 }
1620                 break;
1621
1622         case SDEV_DEL:
1623                 switch (oldstate) {
1624                 case SDEV_CANCEL:
1625                         break;
1626                 default:
1627                         goto illegal;
1628                 }
1629                 break;
1630
1631         }
1632         sdev->sdev_state = state;
1633         return 0;
1634
1635  illegal:
1636         dev_printk(KERN_ERR, &sdev->sdev_gendev,
1637                    "Illegal state transition %s->%s\n",
1638                    scsi_device_state_name(oldstate),
1639                    scsi_device_state_name(state));
1640         WARN_ON(1);
1641         return -EINVAL;
1642 }
1643 EXPORT_SYMBOL(scsi_device_set_state);
1644
1645 /**
1646  *      scsi_device_quiesce - Block user issued commands.
1647  *      @sdev:  scsi device to quiesce.
1648  *
1649  *      This works by trying to transition to the SDEV_QUIESCE state
1650  *      (which must be a legal transition).  When the device is in this
1651  *      state, only special requests will be accepted, all others will
1652  *      be deferred.  Since special requests may also be requeued requests,
1653  *      a successful return doesn't guarantee the device will be 
1654  *      totally quiescent.
1655  *
1656  *      Must be called with user context, may sleep.
1657  *
1658  *      Returns zero if unsuccessful or an error if not.
1659  **/
1660 int
1661 scsi_device_quiesce(struct scsi_device *sdev)
1662 {
1663         int err = scsi_device_set_state(sdev, SDEV_QUIESCE);
1664         if (err)
1665                 return err;
1666
1667         scsi_run_queue(sdev->request_queue);
1668         while (sdev->device_busy) {
1669                 schedule_timeout(HZ/5);
1670                 scsi_run_queue(sdev->request_queue);
1671         }
1672         return 0;
1673 }
1674 EXPORT_SYMBOL(scsi_device_quiesce);
1675
1676 /**
1677  *      scsi_device_resume - Restart user issued commands to a quiesced device.
1678  *      @sdev:  scsi device to resume.
1679  *
1680  *      Moves the device from quiesced back to running and restarts the
1681  *      queues.
1682  *
1683  *      Must be called with user context, may sleep.
1684  **/
1685 void
1686 scsi_device_resume(struct scsi_device *sdev)
1687 {
1688         if(scsi_device_set_state(sdev, SDEV_RUNNING))
1689                 return;
1690         scsi_run_queue(sdev->request_queue);
1691 }
1692 EXPORT_SYMBOL(scsi_device_resume);
1693