2 * scsi_lib.c Copyright (C) 1999 Eric Youngdale
4 * SCSI queueing library.
5 * Initial versions: Eric Youngdale (eric@andante.org).
6 * Based upon conversations with large numbers
7 * of people at Linux Expo.
10 #include <linux/bio.h>
11 #include <linux/blkdev.h>
12 #include <linux/completion.h>
13 #include <linux/kernel.h>
14 #include <linux/mempool.h>
15 #include <linux/slab.h>
16 #include <linux/init.h>
17 #include <linux/pci.h>
18 #include <linux/delay.h>
20 #include <scsi/scsi.h>
21 #include <scsi/scsi_dbg.h>
22 #include <scsi/scsi_device.h>
23 #include <scsi/scsi_driver.h>
24 #include <scsi/scsi_eh.h>
25 #include <scsi/scsi_host.h>
26 #include <scsi/scsi_request.h>
28 #include "scsi_priv.h"
29 #include "scsi_logging.h"
32 #define SG_MEMPOOL_NR (sizeof(scsi_sg_pools)/sizeof(struct scsi_host_sg_pool))
33 #define SG_MEMPOOL_SIZE 32
35 struct scsi_host_sg_pool {
42 #if (SCSI_MAX_PHYS_SEGMENTS < 32)
43 #error SCSI_MAX_PHYS_SEGMENTS is too small
46 #define SP(x) { x, "sgpool-" #x }
47 struct scsi_host_sg_pool scsi_sg_pools[] = {
51 #if (SCSI_MAX_PHYS_SEGMENTS > 32)
53 #if (SCSI_MAX_PHYS_SEGMENTS > 64)
55 #if (SCSI_MAX_PHYS_SEGMENTS > 128)
57 #if (SCSI_MAX_PHYS_SEGMENTS > 256)
58 #error SCSI_MAX_PHYS_SEGMENTS is too large
68 * Function: scsi_insert_special_req()
70 * Purpose: Insert pre-formed request into request queue.
72 * Arguments: sreq - request that is ready to be queued.
73 * at_head - boolean. True if we should insert at head
74 * of queue, false if we should insert at tail.
76 * Lock status: Assumed that lock is not held upon entry.
80 * Notes: This function is called from character device and from
81 * ioctl types of functions where the caller knows exactly
82 * what SCSI command needs to be issued. The idea is that
83 * we merely inject the command into the queue (at the head
84 * for now), and then call the queue request function to actually
87 int scsi_insert_special_req(struct scsi_request *sreq, int at_head)
90 * Because users of this function are apt to reuse requests with no
91 * modification, we have to sanitise the request flags here
93 sreq->sr_request->flags &= ~REQ_DONTPREP;
94 blk_insert_request(sreq->sr_device->request_queue, sreq->sr_request,
100 * Function: scsi_queue_insert()
102 * Purpose: Insert a command in the midlevel queue.
104 * Arguments: cmd - command that we are adding to queue.
105 * reason - why we are inserting command to queue.
107 * Lock status: Assumed that lock is not held upon entry.
111 * Notes: We do this for one of two cases. Either the host is busy
112 * and it cannot accept any more commands for the time being,
113 * or the device returned QUEUE_FULL and can accept no more
115 * Notes: This could be called either from an interrupt context or a
116 * normal process context.
118 int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
120 struct Scsi_Host *host = cmd->device->host;
121 struct scsi_device *device = cmd->device;
124 printk("Inserting command %p into mlqueue\n", cmd));
127 * We are inserting the command into the ml queue. First, we
128 * cancel the timer, so it doesn't time out.
130 scsi_delete_timer(cmd);
133 * Next, set the appropriate busy bit for the device/host.
135 * If the host/device isn't busy, assume that something actually
136 * completed, and that we should be able to queue a command now.
138 * Note that the prior mid-layer assumption that any host could
139 * always queue at least one command is now broken. The mid-layer
140 * will implement a user specifiable stall (see
141 * scsi_host.max_host_blocked and scsi_device.max_device_blocked)
142 * if a command is requeued with no other commands outstanding
143 * either for the device or for the host.
145 if (reason == SCSI_MLQUEUE_HOST_BUSY)
146 host->host_blocked = host->max_host_blocked;
147 else if (reason == SCSI_MLQUEUE_DEVICE_BUSY)
148 device->device_blocked = device->max_device_blocked;
151 * Register the fact that we own the thing for now.
153 cmd->state = SCSI_STATE_MLQUEUE;
154 cmd->owner = SCSI_OWNER_MIDLEVEL;
157 * Decrement the counters, since these commands are no longer
158 * active on the host/device.
160 scsi_device_unbusy(device);
163 * Insert this command at the head of the queue for it's device.
164 * It will go before all other commands that are already in the queue.
166 * NOTE: there is magic here about the way the queue is plugged if
167 * we have no outstanding commands.
169 * Although this *doesn't* plug the queue, it does call the request
170 * function. The SCSI request function detects the blocked condition
171 * and plugs the queue appropriately.
173 blk_insert_request(device->request_queue, cmd->request, 1, cmd, 1);
178 * Function: scsi_do_req
180 * Purpose: Queue a SCSI request
182 * Arguments: sreq - command descriptor.
183 * cmnd - actual SCSI command to be performed.
184 * buffer - data buffer.
185 * bufflen - size of data buffer.
186 * done - completion function to be run.
187 * timeout - how long to let it run before timeout.
188 * retries - number of retries we allow.
190 * Lock status: No locks held upon entry.
194 * Notes: This function is only used for queueing requests for things
195 * like ioctls and character device requests - this is because
196 * we essentially just inject a request into the queue for the
199 * In order to support the scsi_device_quiesce function, we
200 * now inject requests on the *head* of the device queue
201 * rather than the tail.
203 void scsi_do_req(struct scsi_request *sreq, const void *cmnd,
204 void *buffer, unsigned bufflen,
205 void (*done)(struct scsi_cmnd *),
206 int timeout, int retries)
209 * If the upper level driver is reusing these things, then
210 * we should release the low-level block now. Another one will
211 * be allocated later when this request is getting queued.
213 __scsi_release_request(sreq);
216 * Our own function scsi_done (which marks the host as not busy,
217 * disables the timeout counter, etc) will be called by us or by the
218 * scsi_hosts[host].queuecommand() function needs to also call
219 * the completion function for the high level driver.
221 memcpy(sreq->sr_cmnd, cmnd, sizeof(sreq->sr_cmnd));
222 sreq->sr_bufflen = bufflen;
223 sreq->sr_buffer = buffer;
224 sreq->sr_allowed = retries;
225 sreq->sr_done = done;
226 sreq->sr_timeout_per_command = timeout;
228 if (sreq->sr_cmd_len == 0)
229 sreq->sr_cmd_len = COMMAND_SIZE(sreq->sr_cmnd[0]);
232 * head injection *required* here otherwise quiesce won't work
234 scsi_insert_special_req(sreq, 1);
236 EXPORT_SYMBOL(scsi_do_req);
238 static void scsi_wait_done(struct scsi_cmnd *cmd)
240 struct request *req = cmd->request;
241 struct request_queue *q = cmd->device->request_queue;
244 req->rq_status = RQ_SCSI_DONE; /* Busy, but indicate request done */
246 spin_lock_irqsave(q->queue_lock, flags);
247 if (blk_rq_tagged(req))
248 blk_queue_end_tag(q, req);
249 spin_unlock_irqrestore(q->queue_lock, flags);
252 complete(req->waiting);
255 void scsi_wait_req(struct scsi_request *sreq, const void *cmnd, void *buffer,
256 unsigned bufflen, int timeout, int retries)
258 DECLARE_COMPLETION(wait);
260 sreq->sr_request->waiting = &wait;
261 sreq->sr_request->rq_status = RQ_SCSI_BUSY;
262 scsi_do_req(sreq, cmnd, buffer, bufflen, scsi_wait_done,
264 wait_for_completion(&wait);
265 sreq->sr_request->waiting = NULL;
266 if (sreq->sr_request->rq_status != RQ_SCSI_DONE)
267 sreq->sr_result |= (DRIVER_ERROR << 24);
269 __scsi_release_request(sreq);
271 EXPORT_SYMBOL(scsi_wait_req);
274 * Function: scsi_init_cmd_errh()
276 * Purpose: Initialize cmd fields related to error handling.
278 * Arguments: cmd - command that is ready to be queued.
282 * Notes: This function has the job of initializing a number of
283 * fields related to error handling. Typically this will
284 * be called once for each command, as required.
286 static int scsi_init_cmd_errh(struct scsi_cmnd *cmd)
288 cmd->owner = SCSI_OWNER_MIDLEVEL;
289 cmd->serial_number = 0;
290 cmd->serial_number_at_timeout = 0;
291 cmd->abort_reason = 0;
293 memset(cmd->sense_buffer, 0, sizeof cmd->sense_buffer);
295 if (cmd->cmd_len == 0)
296 cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]);
299 * We need saved copies of a number of fields - this is because
300 * error handling may need to overwrite these with different values
301 * to run different commands, and once error handling is complete,
302 * we will need to restore these values prior to running the actual
305 cmd->old_use_sg = cmd->use_sg;
306 cmd->old_cmd_len = cmd->cmd_len;
307 cmd->sc_old_data_direction = cmd->sc_data_direction;
308 cmd->old_underflow = cmd->underflow;
309 memcpy(cmd->data_cmnd, cmd->cmnd, sizeof(cmd->cmnd));
310 cmd->buffer = cmd->request_buffer;
311 cmd->bufflen = cmd->request_bufflen;
312 cmd->internal_timeout = NORMAL_TIMEOUT;
313 cmd->abort_reason = 0;
319 * Function: scsi_setup_cmd_retry()
321 * Purpose: Restore the command state for a retry
323 * Arguments: cmd - command to be restored
327 * Notes: Immediately prior to retrying a command, we need
328 * to restore certain fields that we saved above.
330 void scsi_setup_cmd_retry(struct scsi_cmnd *cmd)
332 memcpy(cmd->cmnd, cmd->data_cmnd, sizeof(cmd->data_cmnd));
333 cmd->request_buffer = cmd->buffer;
334 cmd->request_bufflen = cmd->bufflen;
335 cmd->use_sg = cmd->old_use_sg;
336 cmd->cmd_len = cmd->old_cmd_len;
337 cmd->sc_data_direction = cmd->sc_old_data_direction;
338 cmd->underflow = cmd->old_underflow;
341 void scsi_device_unbusy(struct scsi_device *sdev)
343 struct Scsi_Host *shost = sdev->host;
346 spin_lock_irqsave(shost->host_lock, flags);
348 if (unlikely(test_bit(SHOST_RECOVERY, &shost->shost_state) &&
350 scsi_eh_wakeup(shost);
351 spin_unlock(shost->host_lock);
352 spin_lock(&sdev->sdev_lock);
354 spin_unlock_irqrestore(&sdev->sdev_lock, flags);
358 * Called for single_lun devices on IO completion. Clear starget_sdev_user,
359 * and call blk_run_queue for all the scsi_devices on the target -
360 * including current_sdev first.
362 * Called with *no* scsi locks held.
364 static void scsi_single_lun_run(struct scsi_device *current_sdev)
366 struct Scsi_Host *shost = current_sdev->host;
367 struct scsi_device *sdev, *tmp;
370 spin_lock_irqsave(shost->host_lock, flags);
371 scsi_target(current_sdev)->starget_sdev_user = NULL;
372 spin_unlock_irqrestore(shost->host_lock, flags);
375 * Call blk_run_queue for all LUNs on the target, starting with
376 * current_sdev. We race with others (to set starget_sdev_user),
377 * but in most cases, we will be first. Ideally, each LU on the
378 * target would get some limited time or requests on the target.
380 blk_run_queue(current_sdev->request_queue);
382 spin_lock_irqsave(shost->host_lock, flags);
383 if (scsi_target(current_sdev)->starget_sdev_user)
385 list_for_each_entry_safe(sdev, tmp, ¤t_sdev->same_target_siblings,
386 same_target_siblings) {
387 if (scsi_device_get(sdev))
390 spin_unlock_irqrestore(shost->host_lock, flags);
391 blk_run_queue(sdev->request_queue);
392 spin_lock_irqsave(shost->host_lock, flags);
394 scsi_device_put(sdev);
397 spin_unlock_irqrestore(shost->host_lock, flags);
401 * Function: scsi_run_queue()
403 * Purpose: Select a proper request queue to serve next
405 * Arguments: q - last request's queue
409 * Notes: The previous command was completely finished, start
410 * a new one if possible.
412 static void scsi_run_queue(struct request_queue *q)
414 struct scsi_device *sdev = q->queuedata;
415 struct Scsi_Host *shost = sdev->host;
418 if (sdev->single_lun)
419 scsi_single_lun_run(sdev);
421 spin_lock_irqsave(shost->host_lock, flags);
422 while (!list_empty(&shost->starved_list) &&
423 !shost->host_blocked && !shost->host_self_blocked &&
424 !((shost->can_queue > 0) &&
425 (shost->host_busy >= shost->can_queue))) {
427 * As long as shost is accepting commands and we have
428 * starved queues, call blk_run_queue. scsi_request_fn
429 * drops the queue_lock and can add us back to the
432 * host_lock protects the starved_list and starved_entry.
433 * scsi_request_fn must get the host_lock before checking
434 * or modifying starved_list or starved_entry.
436 sdev = list_entry(shost->starved_list.next,
437 struct scsi_device, starved_entry);
438 list_del_init(&sdev->starved_entry);
439 spin_unlock_irqrestore(shost->host_lock, flags);
441 blk_run_queue(sdev->request_queue);
443 spin_lock_irqsave(shost->host_lock, flags);
444 if (unlikely(!list_empty(&sdev->starved_entry)))
446 * sdev lost a race, and was put back on the
447 * starved list. This is unlikely but without this
448 * in theory we could loop forever.
452 spin_unlock_irqrestore(shost->host_lock, flags);
458 * Function: scsi_requeue_command()
460 * Purpose: Handle post-processing of completed commands.
462 * Arguments: q - queue to operate on
463 * cmd - command that may need to be requeued.
467 * Notes: After command completion, there may be blocks left
468 * over which weren't finished by the previous command
469 * this can be for a number of reasons - the main one is
470 * I/O errors in the middle of the request, in which case
471 * we need to request the blocks that come after the bad
474 static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
476 cmd->request->flags &= ~REQ_DONTPREP;
477 blk_insert_request(q, cmd->request, 1, cmd, 1);
482 void scsi_next_command(struct scsi_cmnd *cmd)
484 struct request_queue *q = cmd->device->request_queue;
486 scsi_put_command(cmd);
490 void scsi_run_host_queues(struct Scsi_Host *shost)
492 struct scsi_device *sdev;
494 shost_for_each_device(sdev, shost)
495 scsi_run_queue(sdev->request_queue);
499 * Function: scsi_end_request()
501 * Purpose: Post-processing of completed commands (usually invoked at end
502 * of upper level post-processing and scsi_io_completion).
504 * Arguments: cmd - command that is complete.
505 * uptodate - 1 if I/O indicates success, <= 0 for I/O error.
506 * bytes - number of bytes of completed I/O
507 * requeue - indicates whether we should requeue leftovers.
509 * Lock status: Assumed that lock is not held upon entry.
511 * Returns: cmd if requeue done or required, NULL otherwise
513 * Notes: This is called for block device requests in order to
514 * mark some number of sectors as complete.
516 * We are guaranteeing that the request queue will be goosed
517 * at some point during this call.
519 static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate,
520 int bytes, int requeue)
522 request_queue_t *q = cmd->device->request_queue;
523 struct request *req = cmd->request;
527 * If there are blocks left over at the end, set up the command
528 * to queue the remainder of them.
530 if (end_that_request_chunk(req, uptodate, bytes)) {
531 int leftover = (req->hard_nr_sectors << 9);
533 if (blk_pc_request(req))
534 leftover = req->data_len;
536 /* kill remainder if no retrys */
537 if (!uptodate && blk_noretry_request(req))
538 end_that_request_chunk(req, 0, leftover);
542 * Bleah. Leftovers again. Stick the
543 * leftovers in the front of the
544 * queue, and goose the queue again.
546 scsi_requeue_command(q, cmd);
552 add_disk_randomness(req->rq_disk);
554 spin_lock_irqsave(q->queue_lock, flags);
555 if (blk_rq_tagged(req))
556 blk_queue_end_tag(q, req);
557 end_that_request_last(req);
558 spin_unlock_irqrestore(q->queue_lock, flags);
561 * This will goose the queue request function at the end, so we don't
562 * need to worry about launching another command.
564 scsi_next_command(cmd);
568 static struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, int gfp_mask)
570 struct scsi_host_sg_pool *sgp;
571 struct scatterlist *sgl;
573 BUG_ON(!cmd->use_sg);
575 switch (cmd->use_sg) {
585 #if (SCSI_MAX_PHYS_SEGMENTS > 32)
589 #if (SCSI_MAX_PHYS_SEGMENTS > 64)
593 #if (SCSI_MAX_PHYS_SEGMENTS > 128)
604 sgp = scsi_sg_pools + cmd->sglist_len;
605 sgl = mempool_alloc(sgp->pool, gfp_mask);
607 memset(sgl, 0, sgp->size);
611 static void scsi_free_sgtable(struct scatterlist *sgl, int index)
613 struct scsi_host_sg_pool *sgp;
615 BUG_ON(index > SG_MEMPOOL_NR);
617 sgp = scsi_sg_pools + index;
618 mempool_free(sgl, sgp->pool);
622 * Function: scsi_release_buffers()
624 * Purpose: Completion processing for block device I/O requests.
626 * Arguments: cmd - command that we are bailing.
628 * Lock status: Assumed that no lock is held upon entry.
632 * Notes: In the event that an upper level driver rejects a
633 * command, we must release resources allocated during
634 * the __init_io() function. Primarily this would involve
635 * the scatter-gather table, and potentially any bounce
638 static void scsi_release_buffers(struct scsi_cmnd *cmd)
640 struct request *req = cmd->request;
643 * Free up any indirection buffers we allocated for DMA purposes.
646 scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len);
647 else if (cmd->request_buffer != req->buffer)
648 kfree(cmd->request_buffer);
651 * Zero these out. They now point to freed memory, and it is
652 * dangerous to hang onto the pointers.
656 cmd->request_buffer = NULL;
657 cmd->request_bufflen = 0;
661 * Function: scsi_io_completion()
663 * Purpose: Completion processing for block device I/O requests.
665 * Arguments: cmd - command that is finished.
667 * Lock status: Assumed that no lock is held upon entry.
671 * Notes: This function is matched in terms of capabilities to
672 * the function that created the scatter-gather list.
673 * In other words, if there are no bounce buffers
674 * (the normal case for most drivers), we don't need
675 * the logic to deal with cleaning up afterwards.
677 * We must do one of several things here:
679 * a) Call scsi_end_request. This will finish off the
680 * specified number of sectors. If we are done, the
681 * command block will be released, and the queue
682 * function will be goosed. If we are not done, then
683 * scsi_end_request will directly goose the queue.
685 * b) We can just use scsi_requeue_command() here. This would
686 * be used if we just wanted to retry, for example.
688 void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes,
689 unsigned int block_bytes)
691 int result = cmd->result;
692 int this_count = cmd->bufflen;
693 request_queue_t *q = cmd->device->request_queue;
694 struct request *req = cmd->request;
695 int clear_errors = 1;
696 struct scsi_sense_hdr sshdr;
698 int sense_deferred = 0;
701 * Free up any indirection buffers we allocated for DMA purposes.
702 * For the case of a READ, we need to copy the data out of the
703 * bounce buffer and into the real buffer.
706 scsi_free_sgtable(cmd->buffer, cmd->sglist_len);
707 else if (cmd->buffer != req->buffer) {
708 if (rq_data_dir(req) == READ) {
710 char *to = bio_kmap_irq(req->bio, &flags);
711 memcpy(to, cmd->buffer, cmd->bufflen);
712 bio_kunmap_irq(to, &flags);
718 sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
720 sense_deferred = scsi_sense_is_deferred(&sshdr);
722 if (blk_pc_request(req)) { /* SG_IO ioctl from block level */
723 req->errors = result;
728 * SG_IO wants current and deferred errors
730 int len = 8 + cmd->sense_buffer[7];
732 if (len > SCSI_SENSE_BUFFERSIZE)
733 len = SCSI_SENSE_BUFFERSIZE;
734 memcpy(req->sense, cmd->sense_buffer, len);
735 req->sense_len = len;
738 req->data_len = cmd->resid;
742 * Zero these out. They now point to freed memory, and it is
743 * dangerous to hang onto the pointers.
747 cmd->request_buffer = NULL;
748 cmd->request_bufflen = 0;
751 * Next deal with any sectors which we were able to correctly
754 if (good_bytes >= 0) {
755 SCSI_LOG_HLCOMPLETE(1, printk("%ld sectors total, %d bytes done.\n",
756 req->nr_sectors, good_bytes));
757 SCSI_LOG_HLCOMPLETE(1, printk("use_sg is %d\n", cmd->use_sg));
762 * If multiple sectors are requested in one buffer, then
763 * they will have been finished off by the first command.
764 * If not, then we have a multi-buffer command.
766 * If block_bytes != 0, it means we had a medium error
767 * of some sort, and that we want to mark some number of
768 * sectors as not uptodate. Thus we want to inhibit
769 * requeueing right here - we will requeue down below
770 * when we handle the bad sectors.
772 cmd = scsi_end_request(cmd, 1, good_bytes, result == 0);
775 * If the command completed without error, then either finish off the
776 * rest of the command, or start a new one.
778 if (result == 0 || cmd == NULL ) {
783 * Now, if we were good little boys and girls, Santa left us a request
784 * sense buffer. We can extract information from this, so we
785 * can choose a block to remap, etc.
787 if (sense_valid && !sense_deferred) {
788 switch (sshdr.sense_key) {
790 if (cmd->device->removable) {
791 /* detected disc change. set a bit
792 * and quietly refuse further access.
794 cmd->device->changed = 1;
795 cmd = scsi_end_request(cmd, 0,
800 * Must have been a power glitch, or a
801 * bus reset. Could not have been a
802 * media change, so we just retry the
803 * request and see what happens.
805 scsi_requeue_command(q, cmd);
809 case ILLEGAL_REQUEST:
811 * If we had an ILLEGAL REQUEST returned, then we may
812 * have performed an unsupported command. The only
813 * thing this should be would be a ten byte read where
814 * only a six byte read was supported. Also, on a
815 * system where READ CAPACITY failed, we may have read
816 * past the end of the disk.
818 if (cmd->device->use_10_for_rw &&
819 (cmd->cmnd[0] == READ_10 ||
820 cmd->cmnd[0] == WRITE_10)) {
821 cmd->device->use_10_for_rw = 0;
823 * This will cause a retry with a 6-byte
826 scsi_requeue_command(q, cmd);
829 cmd = scsi_end_request(cmd, 0, this_count, 1);
835 * If the device is in the process of becoming ready,
838 if (sshdr.asc == 0x04 && sshdr.ascq == 0x01) {
839 scsi_requeue_command(q, cmd);
842 printk(KERN_INFO "Device %s not ready.\n",
843 req->rq_disk ? req->rq_disk->disk_name : "");
844 cmd = scsi_end_request(cmd, 0, this_count, 1);
846 case VOLUME_OVERFLOW:
847 printk(KERN_INFO "Volume overflow <%d %d %d %d> CDB: ",
848 cmd->device->host->host_no,
849 (int)cmd->device->channel,
850 (int)cmd->device->id, (int)cmd->device->lun);
851 __scsi_print_command(cmd->data_cmnd);
852 scsi_print_sense("", cmd);
853 cmd = scsi_end_request(cmd, 0, block_bytes, 1);
858 } /* driver byte != 0 */
859 if (host_byte(result) == DID_RESET) {
861 * Third party bus reset or reset for error
862 * recovery reasons. Just retry the request
863 * and see what happens.
865 scsi_requeue_command(q, cmd);
869 printk(KERN_INFO "SCSI error : <%d %d %d %d> return code "
870 "= 0x%x\n", cmd->device->host->host_no,
871 cmd->device->channel,
873 cmd->device->lun, result);
875 if (driver_byte(result) & DRIVER_SENSE)
876 scsi_print_sense("", cmd);
878 * Mark a single buffer as not uptodate. Queue the remainder.
879 * We sometimes get this cruft in the event that a medium error
880 * isn't properly reported.
882 block_bytes = req->hard_cur_sectors << 9;
884 block_bytes = req->data_len;
885 cmd = scsi_end_request(cmd, 0, block_bytes, 1);
888 EXPORT_SYMBOL(scsi_io_completion);
891 * Function: scsi_init_io()
893 * Purpose: SCSI I/O initialize function.
895 * Arguments: cmd - Command descriptor we wish to initialize
897 * Returns: 0 on success
898 * BLKPREP_DEFER if the failure is retryable
899 * BLKPREP_KILL if the failure is fatal
901 static int scsi_init_io(struct scsi_cmnd *cmd)
903 struct request *req = cmd->request;
904 struct scatterlist *sgpnt;
908 * if this is a rq->data based REQ_BLOCK_PC, setup for a non-sg xfer
910 if ((req->flags & REQ_BLOCK_PC) && !req->bio) {
911 cmd->request_bufflen = req->data_len;
912 cmd->request_buffer = req->data;
913 req->buffer = req->data;
919 * we used to not use scatter-gather for single segment request,
920 * but now we do (it makes highmem I/O easier to support without
923 cmd->use_sg = req->nr_phys_segments;
926 * if sg table allocation fails, requeue request later.
928 sgpnt = scsi_alloc_sgtable(cmd, GFP_ATOMIC);
929 if (unlikely(!sgpnt)) {
930 req->flags |= REQ_SPECIAL;
931 return BLKPREP_DEFER;
934 cmd->request_buffer = (char *) sgpnt;
935 cmd->request_bufflen = req->nr_sectors << 9;
936 if (blk_pc_request(req))
937 cmd->request_bufflen = req->data_len;
941 * Next, walk the list, and fill in the addresses and sizes of
944 count = blk_rq_map_sg(req->q, req, cmd->request_buffer);
947 * mapped well, send it off
949 if (likely(count <= cmd->use_sg)) {
954 printk(KERN_ERR "Incorrect number of segments after building list\n");
955 printk(KERN_ERR "counted %d, received %d\n", count, cmd->use_sg);
956 printk(KERN_ERR "req nr_sec %lu, cur_nr_sec %u\n", req->nr_sectors,
957 req->current_nr_sectors);
959 /* release the command and kill it */
960 scsi_release_buffers(cmd);
961 scsi_put_command(cmd);
965 static int scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk,
966 sector_t *error_sector)
968 struct scsi_device *sdev = q->queuedata;
969 struct scsi_driver *drv;
971 if (sdev->sdev_state != SDEV_RUNNING)
974 drv = *(struct scsi_driver **) disk->private_data;
975 if (drv->issue_flush)
976 return drv->issue_flush(&sdev->sdev_gendev, error_sector);
981 static int scsi_prep_fn(struct request_queue *q, struct request *req)
983 struct scsi_device *sdev = q->queuedata;
984 struct scsi_cmnd *cmd;
985 int specials_only = 0;
988 * Just check to see if the device is online. If it isn't, we
989 * refuse to process any commands. The device must be brought
990 * online before trying any recovery commands
992 if (unlikely(!scsi_device_online(sdev))) {
993 printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to offline device\n",
994 sdev->host->host_no, sdev->id, sdev->lun);
997 if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
998 /* OK, we're not in a running state don't prep
1000 if (sdev->sdev_state == SDEV_DEL) {
1001 /* Device is fully deleted, no commands
1002 * at all allowed down */
1003 printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to dead device\n",
1004 sdev->host->host_no, sdev->id, sdev->lun);
1005 return BLKPREP_KILL;
1007 /* OK, we only allow special commands (i.e. not
1008 * user initiated ones */
1009 specials_only = sdev->sdev_state;
1013 * Find the actual device driver associated with this command.
1014 * The SPECIAL requests are things like character device or
1015 * ioctls, which did not originate from ll_rw_blk. Note that
1016 * the special field is also used to indicate the cmd for
1017 * the remainder of a partially fulfilled request that can
1018 * come up when there is a medium error. We have to treat
1019 * these two cases differently. We differentiate by looking
1020 * at request->cmd, as this tells us the real story.
1022 if (req->flags & REQ_SPECIAL) {
1023 struct scsi_request *sreq = req->special;
1025 if (sreq->sr_magic == SCSI_REQ_MAGIC) {
1026 cmd = scsi_get_command(sreq->sr_device, GFP_ATOMIC);
1029 scsi_init_cmd_from_req(cmd, sreq);
1032 } else if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) {
1034 if(unlikely(specials_only)) {
1035 if(specials_only == SDEV_QUIESCE ||
1036 specials_only == SDEV_BLOCK)
1037 return BLKPREP_DEFER;
1039 printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to device being removed\n",
1040 sdev->host->host_no, sdev->id, sdev->lun);
1041 return BLKPREP_KILL;
1046 * Now try and find a command block that we can use.
1048 if (!req->special) {
1049 cmd = scsi_get_command(sdev, GFP_ATOMIC);
1055 /* pull a tag out of the request if we have one */
1056 cmd->tag = req->tag;
1058 blk_dump_rq_flags(req, "SCSI bad req");
1059 return BLKPREP_KILL;
1062 /* note the overloading of req->special. When the tag
1063 * is active it always means cmd. If the tag goes
1064 * back for re-queueing, it may be reset */
1069 * FIXME: drop the lock here because the functions below
1070 * expect to be called without the queue lock held. Also,
1071 * previously, we dequeued the request before dropping the
1072 * lock. We hope REQ_STARTED prevents anything untoward from
1075 if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) {
1076 struct scsi_driver *drv;
1080 * This will do a couple of things:
1081 * 1) Fill in the actual SCSI command.
1082 * 2) Fill in any other upper-level specific fields
1085 * If this returns 0, it means that the request failed
1086 * (reading past end of disk, reading offline device,
1087 * etc). This won't actually talk to the device, but
1088 * some kinds of consistency checking may cause the
1089 * request to be rejected immediately.
1093 * This sets up the scatter-gather table (allocating if
1096 ret = scsi_init_io(cmd);
1097 if (ret) /* BLKPREP_KILL return also releases the command */
1101 * Initialize the actual SCSI command for this request.
1103 drv = *(struct scsi_driver **)req->rq_disk->private_data;
1104 if (unlikely(!drv->init_command(cmd))) {
1105 scsi_release_buffers(cmd);
1106 scsi_put_command(cmd);
1107 return BLKPREP_KILL;
1112 * The request is now prepped, no need to come back here
1114 req->flags |= REQ_DONTPREP;
1118 /* If we defer, the elv_next_request() returns NULL, but the
1119 * queue must be restarted, so we plug here if no returning
1120 * command will automatically do that. */
1121 if (sdev->device_busy == 0)
1123 return BLKPREP_DEFER;
1127 * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else
1130 * Called with the queue_lock held.
1132 static inline int scsi_dev_queue_ready(struct request_queue *q,
1133 struct scsi_device *sdev)
1135 if (sdev->device_busy >= sdev->queue_depth)
1137 if (sdev->device_busy == 0 && sdev->device_blocked) {
1139 * unblock after device_blocked iterates to zero
1141 if (--sdev->device_blocked == 0) {
1143 printk("scsi%d (%d:%d) unblocking device at"
1144 " zero depth\n", sdev->host->host_no,
1145 sdev->id, sdev->lun));
1151 if (sdev->device_blocked)
1158 * scsi_host_queue_ready: if we can send requests to shost, return 1 else
1159 * return 0. We must end up running the queue again whenever 0 is
1160 * returned, else IO can hang.
1162 * Called with host_lock held.
1164 static inline int scsi_host_queue_ready(struct request_queue *q,
1165 struct Scsi_Host *shost,
1166 struct scsi_device *sdev)
1168 if (test_bit(SHOST_RECOVERY, &shost->shost_state))
1170 if (shost->host_busy == 0 && shost->host_blocked) {
1172 * unblock after host_blocked iterates to zero
1174 if (--shost->host_blocked == 0) {
1176 printk("scsi%d unblocking host at zero depth\n",
1183 if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) ||
1184 shost->host_blocked || shost->host_self_blocked) {
1185 if (list_empty(&sdev->starved_entry))
1186 list_add_tail(&sdev->starved_entry, &shost->starved_list);
1190 /* We're OK to process the command, so we can't be starved */
1191 if (!list_empty(&sdev->starved_entry))
1192 list_del_init(&sdev->starved_entry);
1198 * Function: scsi_request_fn()
1200 * Purpose: Main strategy routine for SCSI.
1202 * Arguments: q - Pointer to actual queue.
1206 * Lock status: IO request lock assumed to be held when called.
1208 static void scsi_request_fn(struct request_queue *q)
1210 struct scsi_device *sdev = q->queuedata;
1211 struct Scsi_Host *shost = sdev->host;
1212 struct scsi_cmnd *cmd;
1213 struct request *req;
1215 if(!get_device(&sdev->sdev_gendev))
1216 /* We must be tearing the block queue down already */
1220 * To start with, we keep looping until the queue is empty, or until
1221 * the host is no longer able to accept any more requests.
1223 while (!blk_queue_plugged(q)) {
1226 * get next queueable request. We do this early to make sure
1227 * that the request is fully prepared even if we cannot
1230 req = elv_next_request(q);
1231 if (!req || !scsi_dev_queue_ready(q, sdev))
1234 if (unlikely(!scsi_device_online(sdev))) {
1235 printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to offline device\n",
1236 sdev->host->host_no, sdev->id, sdev->lun);
1237 blkdev_dequeue_request(req);
1238 req->flags |= REQ_QUIET;
1239 while (end_that_request_first(req, 0, req->nr_sectors))
1241 end_that_request_last(req);
1247 * Remove the request from the request list.
1249 if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req)))
1250 blkdev_dequeue_request(req);
1251 sdev->device_busy++;
1253 spin_unlock(q->queue_lock);
1254 spin_lock(shost->host_lock);
1256 if (!scsi_host_queue_ready(q, shost, sdev))
1258 if (sdev->single_lun) {
1259 if (scsi_target(sdev)->starget_sdev_user &&
1260 scsi_target(sdev)->starget_sdev_user != sdev)
1262 scsi_target(sdev)->starget_sdev_user = sdev;
1267 * XXX(hch): This is rather suboptimal, scsi_dispatch_cmd will
1268 * take the lock again.
1270 spin_unlock_irq(shost->host_lock);
1273 if (unlikely(cmd == NULL)) {
1274 printk(KERN_CRIT "impossible request in %s.\n"
1275 "please mail a stack trace to "
1276 "linux-scsi@vger.kernel.org",
1282 * Finally, initialize any error handling parameters, and set up
1283 * the timers for timeouts.
1285 scsi_init_cmd_errh(cmd);
1288 * Dispatch the command to the low-level driver.
1290 rtn = scsi_dispatch_cmd(cmd);
1291 spin_lock_irq(q->queue_lock);
1293 /* we're refusing the command; because of
1294 * the way locks get dropped, we need to
1295 * check here if plugging is required */
1296 if(sdev->device_busy == 0)
1306 spin_unlock_irq(shost->host_lock);
1309 * lock q, handle tag, requeue req, and decrement device_busy. We
1310 * must return with queue_lock held.
1312 * Decrementing device_busy without checking it is OK, as all such
1313 * cases (host limits or settings) should run the queue at some
1316 spin_lock_irq(q->queue_lock);
1317 blk_requeue_request(q, req);
1318 sdev->device_busy--;
1319 if(sdev->device_busy == 0)
1322 /* must be careful here...if we trigger the ->remove() function
1323 * we cannot be holding the q lock */
1324 spin_unlock_irq(q->queue_lock);
1325 put_device(&sdev->sdev_gendev);
1326 spin_lock_irq(q->queue_lock);
1329 u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
1331 struct device *host_dev;
1332 u64 bounce_limit = 0xffffffff;
1334 if (shost->unchecked_isa_dma)
1335 return BLK_BOUNCE_ISA;
1337 * Platforms with virtual-DMA translation
1338 * hardware have no practical limit.
1340 if (!PCI_DMA_BUS_IS_PHYS)
1341 return BLK_BOUNCE_ANY;
1343 host_dev = scsi_get_device(shost);
1344 if (host_dev && host_dev->dma_mask)
1345 bounce_limit = *host_dev->dma_mask;
1347 return bounce_limit;
1349 EXPORT_SYMBOL(scsi_calculate_bounce_limit);
1351 struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
1353 struct Scsi_Host *shost = sdev->host;
1354 struct request_queue *q;
1356 q = blk_init_queue(scsi_request_fn, &sdev->sdev_lock);
1360 blk_queue_prep_rq(q, scsi_prep_fn);
1362 blk_queue_max_hw_segments(q, shost->sg_tablesize);
1363 blk_queue_max_phys_segments(q, SCSI_MAX_PHYS_SEGMENTS);
1364 blk_queue_max_sectors(q, shost->max_sectors);
1365 blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
1366 blk_queue_segment_boundary(q, shost->dma_boundary);
1367 blk_queue_issue_flush_fn(q, scsi_issue_flush_fn);
1369 if (!shost->use_clustering)
1370 clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
1374 void scsi_free_queue(struct request_queue *q)
1376 blk_cleanup_queue(q);
1380 * Function: scsi_block_requests()
1382 * Purpose: Utility function used by low-level drivers to prevent further
1383 * commands from being queued to the device.
1385 * Arguments: shost - Host in question
1389 * Lock status: No locks are assumed held.
1391 * Notes: There is no timer nor any other means by which the requests
1392 * get unblocked other than the low-level driver calling
1393 * scsi_unblock_requests().
1395 void scsi_block_requests(struct Scsi_Host *shost)
1397 shost->host_self_blocked = 1;
1399 EXPORT_SYMBOL(scsi_block_requests);
1402 * Function: scsi_unblock_requests()
1404 * Purpose: Utility function used by low-level drivers to allow further
1405 * commands from being queued to the device.
1407 * Arguments: shost - Host in question
1411 * Lock status: No locks are assumed held.
1413 * Notes: There is no timer nor any other means by which the requests
1414 * get unblocked other than the low-level driver calling
1415 * scsi_unblock_requests().
1417 * This is done as an API function so that changes to the
1418 * internals of the scsi mid-layer won't require wholesale
1419 * changes to drivers that use this feature.
1421 void scsi_unblock_requests(struct Scsi_Host *shost)
1423 shost->host_self_blocked = 0;
1424 scsi_run_host_queues(shost);
1426 EXPORT_SYMBOL(scsi_unblock_requests);
1428 int __init scsi_init_queue(void)
1432 for (i = 0; i < SG_MEMPOOL_NR; i++) {
1433 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1434 int size = sgp->size * sizeof(struct scatterlist);
1436 sgp->slab = kmem_cache_create(sgp->name, size, 0,
1437 SLAB_HWCACHE_ALIGN, NULL, NULL);
1439 printk(KERN_ERR "SCSI: can't init sg slab %s\n",
1443 sgp->pool = mempool_create(SG_MEMPOOL_SIZE,
1444 mempool_alloc_slab, mempool_free_slab,
1447 printk(KERN_ERR "SCSI: can't init sg mempool %s\n",
1455 void scsi_exit_queue(void)
1459 for (i = 0; i < SG_MEMPOOL_NR; i++) {
1460 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1461 mempool_destroy(sgp->pool);
1462 kmem_cache_destroy(sgp->slab);
1466 * __scsi_mode_sense - issue a mode sense, falling back from 10 to
1467 * six bytes if necessary.
1468 * @sreq: SCSI request to fill in with the MODE_SENSE
1469 * @dbd: set if mode sense will allow block descriptors to be returned
1470 * @modepage: mode page being requested
1471 * @buffer: request buffer (may not be smaller than eight bytes)
1472 * @len: length of request buffer.
1473 * @timeout: command timeout
1474 * @retries: number of retries before failing
1475 * @data: returns a structure abstracting the mode header data
1477 * Returns zero if unsuccessful, or the header offset (either 4
1478 * or 8 depending on whether a six or ten byte command was
1479 * issued) if successful.
1482 __scsi_mode_sense(struct scsi_request *sreq, int dbd, int modepage,
1483 unsigned char *buffer, int len, int timeout, int retries,
1484 struct scsi_mode_data *data) {
1485 unsigned char cmd[12];
1489 memset(data, 0, sizeof(*data));
1490 memset(&cmd[0], 0, 12);
1491 cmd[1] = dbd & 0x18; /* allows DBD and LLBA bits */
1495 use_10_for_ms = sreq->sr_device->use_10_for_ms;
1497 if (use_10_for_ms) {
1501 cmd[0] = MODE_SENSE_10;
1508 cmd[0] = MODE_SENSE;
1513 sreq->sr_cmd_len = 0;
1514 memset(sreq->sr_sense_buffer, 0, sizeof(sreq->sr_sense_buffer));
1515 sreq->sr_data_direction = DMA_FROM_DEVICE;
1517 memset(buffer, 0, len);
1519 scsi_wait_req(sreq, cmd, buffer, len, timeout, retries);
1521 /* This code looks awful: what it's doing is making sure an
1522 * ILLEGAL REQUEST sense return identifies the actual command
1523 * byte as the problem. MODE_SENSE commands can return
1524 * ILLEGAL REQUEST if the code page isn't supported */
1526 if (use_10_for_ms && !scsi_status_is_good(sreq->sr_result) &&
1527 (driver_byte(sreq->sr_result) & DRIVER_SENSE)) {
1528 struct scsi_sense_hdr sshdr;
1530 if (scsi_request_normalize_sense(sreq, &sshdr)) {
1531 if ((sshdr.sense_key == ILLEGAL_REQUEST) &&
1532 (sshdr.asc == 0x20) && (sshdr.ascq == 0)) {
1534 * Invalid command operation code
1536 sreq->sr_device->use_10_for_ms = 0;
1542 if(scsi_status_is_good(sreq->sr_result)) {
1543 data->header_length = header_length;
1545 data->length = buffer[0]*256 + buffer[1] + 2;
1546 data->medium_type = buffer[2];
1547 data->device_specific = buffer[3];
1548 data->longlba = buffer[4] & 0x01;
1549 data->block_descriptor_length = buffer[6]*256
1552 data->length = buffer[0] + 1;
1553 data->medium_type = buffer[1];
1554 data->device_specific = buffer[2];
1555 data->block_descriptor_length = buffer[3];
1559 return sreq->sr_result;
1561 EXPORT_SYMBOL(__scsi_mode_sense);
1564 * scsi_mode_sense - issue a mode sense, falling back from 10 to
1565 * six bytes if necessary.
1566 * @sdev: scsi device to send command to.
1567 * @dbd: set if mode sense will disable block descriptors in the return
1568 * @modepage: mode page being requested
1569 * @buffer: request buffer (may not be smaller than eight bytes)
1570 * @len: length of request buffer.
1571 * @timeout: command timeout
1572 * @retries: number of retries before failing
1574 * Returns zero if unsuccessful, or the header offset (either 4
1575 * or 8 depending on whether a six or ten byte command was
1576 * issued) if successful.
1579 scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
1580 unsigned char *buffer, int len, int timeout, int retries,
1581 struct scsi_mode_data *data)
1583 struct scsi_request *sreq = scsi_allocate_request(sdev, GFP_KERNEL);
1589 ret = __scsi_mode_sense(sreq, dbd, modepage, buffer, len,
1590 timeout, retries, data);
1592 scsi_release_request(sreq);
1596 EXPORT_SYMBOL(scsi_mode_sense);
1599 scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries)
1601 struct scsi_request *sreq;
1603 TEST_UNIT_READY, 0, 0, 0, 0, 0,
1607 sreq = scsi_allocate_request(sdev, GFP_KERNEL);
1611 sreq->sr_data_direction = DMA_NONE;
1612 scsi_wait_req(sreq, cmd, NULL, 0, timeout, retries);
1614 if ((driver_byte(sreq->sr_result) & DRIVER_SENSE) && sdev->removable) {
1615 struct scsi_sense_hdr sshdr;
1617 if ((scsi_request_normalize_sense(sreq, &sshdr)) &&
1618 ((sshdr.sense_key == UNIT_ATTENTION) ||
1619 (sshdr.sense_key == NOT_READY))) {
1621 sreq->sr_result = 0;
1624 result = sreq->sr_result;
1625 scsi_release_request(sreq);
1628 EXPORT_SYMBOL(scsi_test_unit_ready);
1631 * scsi_device_set_state - Take the given device through the device
1633 * @sdev: scsi device to change the state of.
1634 * @state: state to change to.
1636 * Returns zero if unsuccessful or an error if the requested
1637 * transition is illegal.
1640 scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
1642 enum scsi_device_state oldstate = sdev->sdev_state;
1644 if (state == oldstate)
1649 /* There are no legal states that come back to
1650 * created. This is the manually initialised start
1720 sdev->sdev_state = state;
1724 SCSI_LOG_ERROR_RECOVERY(1,
1725 dev_printk(KERN_ERR, &sdev->sdev_gendev,
1726 "Illegal state transition %s->%s\n",
1727 scsi_device_state_name(oldstate),
1728 scsi_device_state_name(state))
1732 EXPORT_SYMBOL(scsi_device_set_state);
1735 * scsi_device_quiesce - Block user issued commands.
1736 * @sdev: scsi device to quiesce.
1738 * This works by trying to transition to the SDEV_QUIESCE state
1739 * (which must be a legal transition). When the device is in this
1740 * state, only special requests will be accepted, all others will
1741 * be deferred. Since special requests may also be requeued requests,
1742 * a successful return doesn't guarantee the device will be
1743 * totally quiescent.
1745 * Must be called with user context, may sleep.
1747 * Returns zero if unsuccessful or an error if not.
1750 scsi_device_quiesce(struct scsi_device *sdev)
1752 int err = scsi_device_set_state(sdev, SDEV_QUIESCE);
1756 scsi_run_queue(sdev->request_queue);
1757 while (sdev->device_busy) {
1758 msleep_interruptible(200);
1759 scsi_run_queue(sdev->request_queue);
1763 EXPORT_SYMBOL(scsi_device_quiesce);
1766 * scsi_device_resume - Restart user issued commands to a quiesced device.
1767 * @sdev: scsi device to resume.
1769 * Moves the device from quiesced back to running and restarts the
1772 * Must be called with user context, may sleep.
1775 scsi_device_resume(struct scsi_device *sdev)
1777 if(scsi_device_set_state(sdev, SDEV_RUNNING))
1779 scsi_run_queue(sdev->request_queue);
1781 EXPORT_SYMBOL(scsi_device_resume);
1784 device_quiesce_fn(struct scsi_device *sdev, void *data)
1786 scsi_device_quiesce(sdev);
1790 scsi_target_quiesce(struct scsi_target *starget)
1792 starget_for_each_device(starget, NULL, device_quiesce_fn);
1794 EXPORT_SYMBOL(scsi_target_quiesce);
1797 device_resume_fn(struct scsi_device *sdev, void *data)
1799 scsi_device_resume(sdev);
1803 scsi_target_resume(struct scsi_target *starget)
1805 starget_for_each_device(starget, NULL, device_resume_fn);
1807 EXPORT_SYMBOL(scsi_target_resume);
1810 * scsi_internal_device_block - internal function to put a device
1811 * temporarily into the SDEV_BLOCK state
1812 * @sdev: device to block
1814 * Block request made by scsi lld's to temporarily stop all
1815 * scsi commands on the specified device. Called from interrupt
1816 * or normal process context.
1818 * Returns zero if successful or error if not
1821 * This routine transitions the device to the SDEV_BLOCK state
1822 * (which must be a legal transition). When the device is in this
1823 * state, all commands are deferred until the scsi lld reenables
1824 * the device with scsi_device_unblock or device_block_tmo fires.
1825 * This routine assumes the host_lock is held on entry.
1828 scsi_internal_device_block(struct scsi_device *sdev)
1830 request_queue_t *q = sdev->request_queue;
1831 unsigned long flags;
1834 err = scsi_device_set_state(sdev, SDEV_BLOCK);
1839 * The device has transitioned to SDEV_BLOCK. Stop the
1840 * block layer from calling the midlayer with this device's
1843 spin_lock_irqsave(q->queue_lock, flags);
1845 spin_unlock_irqrestore(q->queue_lock, flags);
1849 EXPORT_SYMBOL_GPL(scsi_internal_device_block);
1852 * scsi_internal_device_unblock - resume a device after a block request
1853 * @sdev: device to resume
1855 * Called by scsi lld's or the midlayer to restart the device queue
1856 * for the previously suspended scsi device. Called from interrupt or
1857 * normal process context.
1859 * Returns zero if successful or error if not.
1862 * This routine transitions the device to the SDEV_RUNNING state
1863 * (which must be a legal transition) allowing the midlayer to
1864 * goose the queue for this device. This routine assumes the
1865 * host_lock is held upon entry.
1868 scsi_internal_device_unblock(struct scsi_device *sdev)
1870 request_queue_t *q = sdev->request_queue;
1872 unsigned long flags;
1875 * Try to transition the scsi device to SDEV_RUNNING
1876 * and goose the device queue if successful.
1878 err = scsi_device_set_state(sdev, SDEV_RUNNING);
1882 spin_lock_irqsave(q->queue_lock, flags);
1884 spin_unlock_irqrestore(q->queue_lock, flags);
1888 EXPORT_SYMBOL_GPL(scsi_internal_device_unblock);