#include <linux/slab.h>
#include <linux/init.h>
#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/hardirq.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_device.h>
#include <scsi/scsi_driver.h>
+#include <scsi/scsi_eh.h>
#include <scsi/scsi_host.h>
-#include "scsi.h"
+#include <scsi/scsi_request.h>
#include "scsi_priv.h"
#include "scsi_logging.h"
#endif
#define SP(x) { x, "sgpool-" #x }
-struct scsi_host_sg_pool scsi_sg_pools[] = {
+static struct scsi_host_sg_pool scsi_sg_pools[] = {
SP(8),
SP(16),
SP(32),
};
#undef SP
+static void scsi_run_queue(struct request_queue *q);
/*
- * Function: scsi_insert_special_req()
+ * Function: scsi_unprep_request()
*
- * Purpose: Insert pre-formed request into request queue.
+ * Purpose: Remove all preparation done for a request, including its
+ * associated scsi_cmnd, so that it can be requeued.
*
- * Arguments: sreq - request that is ready to be queued.
- * at_head - boolean. True if we should insert at head
- * of queue, false if we should insert at tail.
+ * Arguments: req - request to unprepare
*
- * Lock status: Assumed that lock is not held upon entry.
- *
- * Returns: Nothing
+ * Lock status: Assumed that no locks are held upon entry.
*
- * Notes: This function is called from character device and from
- * ioctl types of functions where the caller knows exactly
- * what SCSI command needs to be issued. The idea is that
- * we merely inject the command into the queue (at the head
- * for now), and then call the queue request function to actually
- * process it.
+ * Returns: Nothing.
*/
-int scsi_insert_special_req(struct scsi_request *sreq, int at_head)
+static void scsi_unprep_request(struct request *req)
{
- /*
- * Because users of this function are apt to reuse requests with no
- * modification, we have to sanitise the request flags here
- */
- sreq->sr_request->flags &= ~REQ_DONTPREP;
- blk_insert_request(sreq->sr_device->request_queue, sreq->sr_request,
- at_head, sreq, 0);
- return 0;
+ struct scsi_cmnd *cmd = req->special;
+
+ req->flags &= ~REQ_DONTPREP;
+ req->special = (req->flags & REQ_SPECIAL) ? cmd->sc_request : NULL;
+
+ scsi_put_command(cmd);
}
/*
{
struct Scsi_Host *host = cmd->device->host;
struct scsi_device *device = cmd->device;
+ struct request_queue *q = device->request_queue;
+ unsigned long flags;
SCSI_LOG_MLQUEUE(1,
printk("Inserting command %p into mlqueue\n", cmd));
/*
- * We are inserting the command into the ml queue. First, we
- * cancel the timer, so it doesn't time out.
- */
- scsi_delete_timer(cmd);
-
- /*
- * Next, set the appropriate busy bit for the device/host.
+ * Set the appropriate busy bit for the device/host.
*
* If the host/device isn't busy, assume that something actually
* completed, and that we should be able to queue a command now.
else if (reason == SCSI_MLQUEUE_DEVICE_BUSY)
device->device_blocked = device->max_device_blocked;
- /*
- * Register the fact that we own the thing for now.
- */
- cmd->state = SCSI_STATE_MLQUEUE;
- cmd->owner = SCSI_OWNER_MIDLEVEL;
-
/*
* Decrement the counters, since these commands are no longer
* active on the host/device.
scsi_device_unbusy(device);
/*
- * Insert this command at the head of the queue for it's device.
- * It will go before all other commands that are already in the queue.
+ * Requeue this command. It will go before all other commands
+ * that are already in the queue.
*
* NOTE: there is magic here about the way the queue is plugged if
* we have no outstanding commands.
*
- * Although this *doesn't* plug the queue, it does call the request
+ * Although we *don't* plug the queue, we call the request
* function. The SCSI request function detects the blocked condition
* and plugs the queue appropriately.
- */
- blk_insert_request(device->request_queue, cmd->request, 1, cmd, 1);
+ */
+ spin_lock_irqsave(q->queue_lock, flags);
+ blk_requeue_request(q, cmd->request);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ scsi_run_queue(q);
+
return 0;
}
/*
* head injection *required* here otherwise quiesce won't work
+ *
+ * Because users of this function are apt to reuse requests with no
+ * modification, we have to sanitise the request flags here
*/
- scsi_insert_special_req(sreq, 1);
+ sreq->sr_request->flags &= ~REQ_DONTPREP;
+ blk_insert_request(sreq->sr_device->request_queue, sreq->sr_request,
+ 1, sreq);
}
-
-static void scsi_wait_done(struct scsi_cmnd *cmd)
+EXPORT_SYMBOL(scsi_do_req);
+
+/**
+ * scsi_execute - insert request and wait for the result
+ * @sdev: scsi device
+ * @cmd: scsi command
+ * @data_direction: data direction
+ * @buffer: data buffer
+ * @bufflen: len of buffer
+ * @sense: optional sense buffer
+ * @timeout: request timeout in seconds
+ * @retries: number of times to retry request
+ * @flags: or into request flags;
+ *
+ * returns the req->errors value which is the the scsi_cmnd result
+ * field.
+ **/
+int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
+ int data_direction, void *buffer, unsigned bufflen,
+ unsigned char *sense, int timeout, int retries, int flags)
{
- struct request *req = cmd->request;
- struct request_queue *q = cmd->device->request_queue;
- unsigned long flags;
+ struct request *req;
+ int write = (data_direction == DMA_TO_DEVICE);
+ int ret = DRIVER_ERROR << 24;
- req->rq_status = RQ_SCSI_DONE; /* Busy, but indicate request done */
+ req = blk_get_request(sdev->request_queue, write, __GFP_WAIT);
- spin_lock_irqsave(q->queue_lock, flags);
- if (blk_rq_tagged(req))
- blk_queue_end_tag(q, req);
- spin_unlock_irqrestore(q->queue_lock, flags);
+ if (bufflen && blk_rq_map_kern(sdev->request_queue, req,
+ buffer, bufflen, __GFP_WAIT))
+ goto out;
+
+ req->cmd_len = COMMAND_SIZE(cmd[0]);
+ memcpy(req->cmd, cmd, req->cmd_len);
+ req->sense = sense;
+ req->sense_len = 0;
+ req->retries = retries;
+ req->timeout = timeout;
+ req->flags |= flags | REQ_BLOCK_PC | REQ_SPECIAL | REQ_QUIET;
- if (req->waiting)
- complete(req->waiting);
+ /*
+ * head injection *required* here otherwise quiesce won't work
+ */
+ blk_execute_rq(req->q, NULL, req, 1);
+
+ ret = req->errors;
+ out:
+ blk_put_request(req);
+
+ return ret;
}
+EXPORT_SYMBOL(scsi_execute);
+
-void scsi_wait_req(struct scsi_request *sreq, const void *cmnd, void *buffer,
- unsigned bufflen, int timeout, int retries)
+int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,
+ int data_direction, void *buffer, unsigned bufflen,
+ struct scsi_sense_hdr *sshdr, int timeout, int retries)
{
- DECLARE_COMPLETION(wait);
+ char *sense = NULL;
+ int result;
- sreq->sr_request->waiting = &wait;
- sreq->sr_request->rq_status = RQ_SCSI_BUSY;
- scsi_do_req(sreq, cmnd, buffer, bufflen, scsi_wait_done,
- timeout, retries);
- generic_unplug_device(sreq->sr_device->request_queue);
- wait_for_completion(&wait);
- sreq->sr_request->waiting = NULL;
- if (sreq->sr_request->rq_status != RQ_SCSI_DONE)
- sreq->sr_result |= (DRIVER_ERROR << 24);
+ if (sshdr) {
+ sense = kmalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO);
+ if (!sense)
+ return DRIVER_ERROR << 24;
+ memset(sense, 0, SCSI_SENSE_BUFFERSIZE);
+ }
+ result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen,
+ sense, timeout, retries, 0);
+ if (sshdr)
+ scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr);
- __scsi_release_request(sreq);
+ kfree(sense);
+ return result;
+}
+EXPORT_SYMBOL(scsi_execute_req);
+
+struct scsi_io_context {
+ void *data;
+ void (*done)(void *data, char *sense, int result, int resid);
+ char sense[SCSI_SENSE_BUFFERSIZE];
+};
+
+static kmem_cache_t *scsi_io_context_cache;
+
+static void scsi_end_async(struct request *req, int uptodate)
+{
+ struct scsi_io_context *sioc = req->end_io_data;
+
+ if (sioc->done)
+ sioc->done(sioc->data, sioc->sense, req->errors, req->data_len);
+
+ kmem_cache_free(scsi_io_context_cache, sioc);
+ __blk_put_request(req->q, req);
+}
+
+static int scsi_merge_bio(struct request *rq, struct bio *bio)
+{
+ struct request_queue *q = rq->q;
+
+ bio->bi_flags &= ~(1 << BIO_SEG_VALID);
+ if (rq_data_dir(rq) == WRITE)
+ bio->bi_rw |= (1 << BIO_RW);
+ blk_queue_bounce(q, &bio);
+
+ if (!rq->bio)
+ blk_rq_bio_prep(q, rq, bio);
+ else if (!q->back_merge_fn(q, rq, bio))
+ return -EINVAL;
+ else {
+ rq->biotail->bi_next = bio;
+ rq->biotail = bio;
+ rq->hard_nr_sectors += bio_sectors(bio);
+ rq->nr_sectors = rq->hard_nr_sectors;
+ }
+
+ return 0;
+}
+
+static int scsi_bi_endio(struct bio *bio, unsigned int bytes_done, int error)
+{
+ if (bio->bi_size)
+ return 1;
+
+ bio_put(bio);
+ return 0;
}
+/**
+ * scsi_req_map_sg - map a scatterlist into a request
+ * @rq: request to fill
+ * @sg: scatterlist
+ * @nsegs: number of elements
+ * @bufflen: len of buffer
+ * @gfp: memory allocation flags
+ *
+ * scsi_req_map_sg maps a scatterlist into a request so that the
+ * request can be sent to the block layer. We do not trust the scatterlist
+ * sent to use, as some ULDs use that struct to only organize the pages.
+ */
+static int scsi_req_map_sg(struct request *rq, struct scatterlist *sgl,
+ int nsegs, unsigned bufflen, gfp_t gfp)
+{
+ struct request_queue *q = rq->q;
+ int nr_pages = (bufflen + sgl[0].offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ unsigned int data_len = 0, len, bytes, off;
+ struct page *page;
+ struct bio *bio = NULL;
+ int i, err, nr_vecs = 0;
+
+ for (i = 0; i < nsegs; i++) {
+ page = sgl[i].page;
+ off = sgl[i].offset;
+ len = sgl[i].length;
+ data_len += len;
+
+ while (len > 0) {
+ bytes = min_t(unsigned int, len, PAGE_SIZE - off);
+
+ if (!bio) {
+ nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages);
+ nr_pages -= nr_vecs;
+
+ bio = bio_alloc(gfp, nr_vecs);
+ if (!bio) {
+ err = -ENOMEM;
+ goto free_bios;
+ }
+ bio->bi_end_io = scsi_bi_endio;
+ }
+
+ if (bio_add_pc_page(q, bio, page, bytes, off) !=
+ bytes) {
+ bio_put(bio);
+ err = -EINVAL;
+ goto free_bios;
+ }
+
+ if (bio->bi_vcnt >= nr_vecs) {
+ err = scsi_merge_bio(rq, bio);
+ if (err) {
+ bio_endio(bio, bio->bi_size, 0);
+ goto free_bios;
+ }
+ bio = NULL;
+ }
+
+ page++;
+ len -= bytes;
+ off = 0;
+ }
+ }
+
+ rq->buffer = rq->data = NULL;
+ rq->data_len = data_len;
+ return 0;
+
+free_bios:
+ while ((bio = rq->bio) != NULL) {
+ rq->bio = bio->bi_next;
+ /*
+ * call endio instead of bio_put incase it was bounced
+ */
+ bio_endio(bio, bio->bi_size, 0);
+ }
+
+ return err;
+}
+
+/**
+ * scsi_execute_async - insert request
+ * @sdev: scsi device
+ * @cmd: scsi command
+ * @cmd_len: length of scsi cdb
+ * @data_direction: data direction
+ * @buffer: data buffer (this can be a kernel buffer or scatterlist)
+ * @bufflen: len of buffer
+ * @use_sg: if buffer is a scatterlist this is the number of elements
+ * @timeout: request timeout in seconds
+ * @retries: number of times to retry request
+ * @flags: or into request flags
+ **/
+int scsi_execute_async(struct scsi_device *sdev, const unsigned char *cmd,
+ int cmd_len, int data_direction, void *buffer, unsigned bufflen,
+ int use_sg, int timeout, int retries, void *privdata,
+ void (*done)(void *, char *, int, int), gfp_t gfp)
+{
+ struct request *req;
+ struct scsi_io_context *sioc;
+ int err = 0;
+ int write = (data_direction == DMA_TO_DEVICE);
+
+ sioc = kmem_cache_alloc(scsi_io_context_cache, gfp);
+ if (!sioc)
+ return DRIVER_ERROR << 24;
+ memset(sioc, 0, sizeof(*sioc));
+
+ req = blk_get_request(sdev->request_queue, write, gfp);
+ if (!req)
+ goto free_sense;
+ req->flags |= REQ_BLOCK_PC | REQ_QUIET;
+
+ if (use_sg)
+ err = scsi_req_map_sg(req, buffer, use_sg, bufflen, gfp);
+ else if (bufflen)
+ err = blk_rq_map_kern(req->q, req, buffer, bufflen, gfp);
+
+ if (err)
+ goto free_req;
+
+ req->cmd_len = cmd_len;
+ memset(req->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */
+ memcpy(req->cmd, cmd, req->cmd_len);
+ req->sense = sioc->sense;
+ req->sense_len = 0;
+ req->timeout = timeout;
+ req->retries = retries;
+ req->end_io_data = sioc;
+
+ sioc->data = privdata;
+ sioc->done = done;
+
+ blk_execute_rq_nowait(req->q, NULL, req, 1, scsi_end_async);
+ return 0;
+
+free_req:
+ blk_put_request(req);
+free_sense:
+ kfree(sioc);
+ return DRIVER_ERROR << 24;
+}
+EXPORT_SYMBOL_GPL(scsi_execute_async);
+
/*
* Function: scsi_init_cmd_errh()
*
*/
static int scsi_init_cmd_errh(struct scsi_cmnd *cmd)
{
- cmd->owner = SCSI_OWNER_MIDLEVEL;
cmd->serial_number = 0;
- cmd->serial_number_at_timeout = 0;
- cmd->abort_reason = 0;
memset(cmd->sense_buffer, 0, sizeof cmd->sense_buffer);
memcpy(cmd->data_cmnd, cmd->cmnd, sizeof(cmd->cmnd));
cmd->buffer = cmd->request_buffer;
cmd->bufflen = cmd->request_bufflen;
- cmd->internal_timeout = NORMAL_TIMEOUT;
- cmd->abort_reason = 0;
return 1;
}
spin_lock_irqsave(shost->host_lock, flags);
shost->host_busy--;
- if (unlikely(test_bit(SHOST_RECOVERY, &shost->shost_state) &&
+ if (unlikely(scsi_host_in_recovery(shost) &&
shost->host_failed))
scsi_eh_wakeup(shost);
spin_unlock(shost->host_lock);
- spin_lock(&sdev->sdev_lock);
+ spin_lock(sdev->request_queue->queue_lock);
sdev->device_busy--;
- spin_unlock_irqrestore(&sdev->sdev_lock, flags);
+ spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
}
/*
{
struct Scsi_Host *shost = current_sdev->host;
struct scsi_device *sdev, *tmp;
+ struct scsi_target *starget = scsi_target(current_sdev);
unsigned long flags;
spin_lock_irqsave(shost->host_lock, flags);
- current_sdev->sdev_target->starget_sdev_user = NULL;
+ starget->starget_sdev_user = NULL;
spin_unlock_irqrestore(shost->host_lock, flags);
/*
blk_run_queue(current_sdev->request_queue);
spin_lock_irqsave(shost->host_lock, flags);
- if (current_sdev->sdev_target->starget_sdev_user)
+ if (starget->starget_sdev_user)
goto out;
- list_for_each_entry_safe(sdev, tmp, ¤t_sdev->same_target_siblings,
+ list_for_each_entry_safe(sdev, tmp, &starget->devices,
same_target_siblings) {
+ if (sdev == current_sdev)
+ continue;
if (scsi_device_get(sdev))
continue;
* I/O errors in the middle of the request, in which case
* we need to request the blocks that come after the bad
* sector.
+ * Notes: Upon return, cmd is a stale pointer.
*/
static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
{
- cmd->request->flags &= ~REQ_DONTPREP;
- blk_insert_request(q, cmd->request, 1, cmd, 1);
+ struct request *req = cmd->request;
+ unsigned long flags;
+
+ scsi_unprep_request(req);
+ spin_lock_irqsave(q->queue_lock, flags);
+ blk_requeue_request(q, req);
+ spin_unlock_irqrestore(q->queue_lock, flags);
scsi_run_queue(q);
}
void scsi_next_command(struct scsi_cmnd *cmd)
{
- struct request_queue *q = cmd->device->request_queue;
+ struct scsi_device *sdev = cmd->device;
+ struct request_queue *q = sdev->request_queue;
+
+ /* need to hold a reference on the device before we let go of the cmd */
+ get_device(&sdev->sdev_gendev);
scsi_put_command(cmd);
scsi_run_queue(q);
+
+ /* ok to remove device now */
+ put_device(&sdev->sdev_gendev);
}
void scsi_run_host_queues(struct Scsi_Host *shost)
/*
* Function: scsi_end_request()
*
- * Purpose: Post-processing of completed commands called from interrupt
- * handler or a bottom-half handler.
+ * Purpose: Post-processing of completed commands (usually invoked at end
+ * of upper level post-processing and scsi_io_completion).
*
* Arguments: cmd - command that is complete.
- * uptodate - 1 if I/O indicates success, 0 for I/O error.
- * sectors - number of sectors we want to mark.
+ * uptodate - 1 if I/O indicates success, <= 0 for I/O error.
+ * bytes - number of bytes of completed I/O
* requeue - indicates whether we should requeue leftovers.
- * frequeue - indicates that if we release the command block
- * that the queue request function should be called.
*
* Lock status: Assumed that lock is not held upon entry.
*
- * Returns: Nothing
+ * Returns: cmd if requeue required, NULL otherwise.
*
* Notes: This is called for block device requests in order to
* mark some number of sectors as complete.
*
* We are guaranteeing that the request queue will be goosed
* at some point during this call.
+ * Notes: If cmd was requeued, upon return it will be a stale pointer.
*/
static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate,
int bytes, int requeue)
if (!uptodate && blk_noretry_request(req))
end_that_request_chunk(req, 0, leftover);
else {
- if (requeue)
+ if (requeue) {
/*
* Bleah. Leftovers again. Stick the
* leftovers in the front of the
* queue, and goose the queue again.
*/
scsi_requeue_command(q, cmd);
-
+ cmd = NULL;
+ }
return cmd;
}
}
spin_lock_irqsave(q->queue_lock, flags);
if (blk_rq_tagged(req))
blk_queue_end_tag(q, req);
- end_that_request_last(req);
+ end_that_request_last(req, uptodate);
spin_unlock_irqrestore(q->queue_lock, flags);
/*
return NULL;
}
-static struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, int gfp_mask)
+static struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_mask)
{
struct scsi_host_sg_pool *sgp;
struct scatterlist *sgl;
sgp = scsi_sg_pools + cmd->sglist_len;
sgl = mempool_alloc(sgp->pool, gfp_mask);
- if (sgl)
- memset(sgl, 0, sgp->size);
return sgl;
}
{
struct scsi_host_sg_pool *sgp;
- BUG_ON(index > SG_MEMPOOL_NR);
+ BUG_ON(index >= SG_MEMPOOL_NR);
sgp = scsi_sg_pools + index;
mempool_free(sgl, sgp->pool);
request_queue_t *q = cmd->device->request_queue;
struct request *req = cmd->request;
int clear_errors = 1;
+ struct scsi_sense_hdr sshdr;
+ int sense_valid = 0;
+ int sense_deferred = 0;
/*
* Free up any indirection buffers we allocated for DMA purposes.
kfree(cmd->buffer);
}
+ if (result) {
+ sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
+ if (sense_valid)
+ sense_deferred = scsi_sense_is_deferred(&sshdr);
+ }
if (blk_pc_request(req)) { /* SG_IO ioctl from block level */
- req->errors = (driver_byte(result) & DRIVER_SENSE) ?
- (CHECK_CONDITION << 1) : (result & 0xff);
+ req->errors = result;
if (result) {
clear_errors = 0;
- if (cmd->sense_buffer[0] & 0x70) {
+ if (sense_valid && req->sense) {
+ /*
+ * SG_IO wants current and deferred errors
+ */
int len = 8 + cmd->sense_buffer[7];
if (len > SCSI_SENSE_BUFFERSIZE)
req->sense_len = len;
}
} else
- req->data_len -= cmd->bufflen;
+ req->data_len = cmd->resid;
}
/*
* requeueing right here - we will requeue down below
* when we handle the bad sectors.
*/
- cmd = scsi_end_request(cmd, 1, good_bytes, result == 0);
/*
- * If the command completed without error, then either finish off the
- * rest of the command, or start a new one.
+ * If the command completed without error, then either
+ * finish off the rest of the command, or start a new one.
*/
- if (result == 0 || cmd == NULL ) {
+ if (scsi_end_request(cmd, 1, good_bytes, result == 0) == NULL)
return;
- }
}
/*
* Now, if we were good little boys and girls, Santa left us a request
* sense buffer. We can extract information from this, so we
* can choose a block to remap, etc.
*/
- if (driver_byte(result) != 0) {
- if ((cmd->sense_buffer[0] & 0x7f) == 0x70) {
- /*
- * If the device is in the process of becoming ready,
- * retry.
- */
- if (cmd->sense_buffer[12] == 0x04 &&
- cmd->sense_buffer[13] == 0x01) {
+ if (sense_valid && !sense_deferred) {
+ switch (sshdr.sense_key) {
+ case UNIT_ATTENTION:
+ if (cmd->device->removable) {
+ /* detected disc change. set a bit
+ * and quietly refuse further access.
+ */
+ cmd->device->changed = 1;
+ scsi_end_request(cmd, 0,
+ this_count, 1);
+ return;
+ } else {
+ /*
+ * Must have been a power glitch, or a
+ * bus reset. Could not have been a
+ * media change, so we just retry the
+ * request and see what happens.
+ */
scsi_requeue_command(q, cmd);
return;
}
- if ((cmd->sense_buffer[2] & 0xf) == UNIT_ATTENTION) {
- if (cmd->device->removable) {
- /* detected disc change. set a bit
- * and quietly refuse further access.
- */
- cmd->device->changed = 1;
- cmd = scsi_end_request(cmd, 0,
- this_count, 1);
- return;
- } else {
- /*
- * Must have been a power glitch, or a
- * bus reset. Could not have been a
- * media change, so we just retry the
- * request and see what happens.
- */
- scsi_requeue_command(q, cmd);
- return;
- }
- }
- }
- /*
- * If we had an ILLEGAL REQUEST returned, then we may have
- * performed an unsupported command. The only thing this
- * should be would be a ten byte read where only a six byte
- * read was supported. Also, on a system where READ CAPACITY
- * failed, we may have read past the end of the disk.
- */
-
- switch (cmd->sense_buffer[2]) {
+ break;
case ILLEGAL_REQUEST:
- if (cmd->device->use_10_for_rw &&
+ /*
+ * If we had an ILLEGAL REQUEST returned, then we may
+ * have performed an unsupported command. The only
+ * thing this should be would be a ten byte read where
+ * only a six byte read was supported. Also, on a
+ * system where READ CAPACITY failed, we may have read
+ * past the end of the disk.
+ */
+ if ((cmd->device->use_10_for_rw &&
+ sshdr.asc == 0x20 && sshdr.ascq == 0x00) &&
(cmd->cmnd[0] == READ_10 ||
cmd->cmnd[0] == WRITE_10)) {
cmd->device->use_10_for_rw = 0;
scsi_requeue_command(q, cmd);
result = 0;
} else {
- cmd = scsi_end_request(cmd, 0, this_count, 1);
+ scsi_end_request(cmd, 0, this_count, 1);
return;
}
break;
case NOT_READY:
- printk(KERN_INFO "Device %s not ready.\n",
- req->rq_disk ? req->rq_disk->disk_name : "");
- cmd = scsi_end_request(cmd, 0, this_count, 1);
+ /*
+ * If the device is in the process of becoming ready,
+ * retry.
+ */
+ if (sshdr.asc == 0x04 && sshdr.ascq == 0x01) {
+ scsi_requeue_command(q, cmd);
+ return;
+ }
+ if (!(req->flags & REQ_QUIET))
+ scmd_printk(KERN_INFO, cmd,
+ "Device not ready.\n");
+ scsi_end_request(cmd, 0, this_count, 1);
return;
- break;
- case MEDIUM_ERROR:
case VOLUME_OVERFLOW:
- printk("scsi%d: ERROR on channel %d, id %d, lun %d, CDB: ",
- cmd->device->host->host_no, (int) cmd->device->channel,
- (int) cmd->device->id, (int) cmd->device->lun);
- print_command(cmd->data_cmnd);
- print_sense("", cmd);
- cmd = scsi_end_request(cmd, 0, block_bytes, 1);
+ if (!(req->flags & REQ_QUIET)) {
+ scmd_printk(KERN_INFO, cmd,
+ "Volume overflow, CDB: ");
+ __scsi_print_command(cmd->data_cmnd);
+ scsi_print_sense("", cmd);
+ }
+ scsi_end_request(cmd, 0, block_bytes, 1);
return;
default:
break;
return;
}
if (result) {
- printk("SCSI error : <%d %d %d %d> return code = 0x%x\n",
- cmd->device->host->host_no,
- cmd->device->channel,
- cmd->device->id,
- cmd->device->lun, result);
-
- if (driver_byte(result) & DRIVER_SENSE)
- print_sense("", cmd);
+ if (!(req->flags & REQ_QUIET)) {
+ scmd_printk(KERN_INFO, cmd,
+ "SCSI error: return code = 0x%x\n", result);
+
+ if (driver_byte(result) & DRIVER_SENSE)
+ scsi_print_sense("", cmd);
+ }
/*
* Mark a single buffer as not uptodate. Queue the remainder.
* We sometimes get this cruft in the event that a medium error
block_bytes = req->hard_cur_sectors << 9;
if (!block_bytes)
block_bytes = req->data_len;
- cmd = scsi_end_request(cmd, 0, block_bytes, 1);
+ scsi_end_request(cmd, 0, block_bytes, 1);
}
}
+EXPORT_SYMBOL(scsi_io_completion);
/*
* Function: scsi_init_io()
*/
sgpnt = scsi_alloc_sgtable(cmd, GFP_ATOMIC);
if (unlikely(!sgpnt)) {
- req->flags |= REQ_SPECIAL;
+ scsi_unprep_request(req);
return BLKPREP_DEFER;
}
return BLKPREP_KILL;
}
+static int scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk,
+ sector_t *error_sector)
+{
+ struct scsi_device *sdev = q->queuedata;
+ struct scsi_driver *drv;
+
+ if (sdev->sdev_state != SDEV_RUNNING)
+ return -ENXIO;
+
+ drv = *(struct scsi_driver **) disk->private_data;
+ if (drv->issue_flush)
+ return drv->issue_flush(&sdev->sdev_gendev, error_sector);
+
+ return -EOPNOTSUPP;
+}
+
+static void scsi_blk_pc_done(struct scsi_cmnd *cmd)
+{
+ BUG_ON(!blk_pc_request(cmd->request));
+ /*
+ * This will complete the whole command with uptodate=1 so
+ * as far as the block layer is concerned the command completed
+ * successfully. Since this is a REQ_BLOCK_PC command the
+ * caller should check the request's errors value
+ */
+ scsi_io_completion(cmd, cmd->bufflen, 0);
+}
+
+static void scsi_setup_blk_pc_cmnd(struct scsi_cmnd *cmd)
+{
+ struct request *req = cmd->request;
+
+ BUG_ON(sizeof(req->cmd) > sizeof(cmd->cmnd));
+ memcpy(cmd->cmnd, req->cmd, sizeof(cmd->cmnd));
+ cmd->cmd_len = req->cmd_len;
+ if (!req->data_len)
+ cmd->sc_data_direction = DMA_NONE;
+ else if (rq_data_dir(req) == WRITE)
+ cmd->sc_data_direction = DMA_TO_DEVICE;
+ else
+ cmd->sc_data_direction = DMA_FROM_DEVICE;
+
+ cmd->transfersize = req->data_len;
+ cmd->allowed = req->retries;
+ cmd->timeout_per_command = req->timeout;
+ cmd->done = scsi_blk_pc_done;
+}
+
static int scsi_prep_fn(struct request_queue *q, struct request *req)
{
struct scsi_device *sdev = q->queuedata;
* online before trying any recovery commands
*/
if (unlikely(!scsi_device_online(sdev))) {
- printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to offline device\n",
- sdev->host->host_no, sdev->id, sdev->lun);
- return BLKPREP_KILL;
+ sdev_printk(KERN_ERR, sdev,
+ "rejecting I/O to offline device\n");
+ goto kill;
}
if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
/* OK, we're not in a running state don't prep
if (sdev->sdev_state == SDEV_DEL) {
/* Device is fully deleted, no commands
* at all allowed down */
- printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to dead device\n",
- sdev->host->host_no, sdev->id, sdev->lun);
- return BLKPREP_KILL;
+ sdev_printk(KERN_ERR, sdev,
+ "rejecting I/O to dead device\n");
+ goto kill;
}
/* OK, we only allow special commands (i.e. not
* user initiated ones */
* these two cases differently. We differentiate by looking
* at request->cmd, as this tells us the real story.
*/
- if (req->flags & REQ_SPECIAL) {
+ if (req->flags & REQ_SPECIAL && req->special) {
struct scsi_request *sreq = req->special;
if (sreq->sr_magic == SCSI_REQ_MAGIC) {
cmd = req->special;
} else if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) {
- if(unlikely(specials_only)) {
- if(specials_only == SDEV_QUIESCE)
- return BLKPREP_DEFER;
+ if(unlikely(specials_only) && !(req->flags & REQ_SPECIAL)) {
+ if(specials_only == SDEV_QUIESCE ||
+ specials_only == SDEV_BLOCK)
+ goto defer;
- printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to device being removed\n",
- sdev->host->host_no, sdev->id, sdev->lun);
- return BLKPREP_KILL;
+ sdev_printk(KERN_ERR, sdev,
+ "rejecting I/O to device being removed\n");
+ goto kill;
}
cmd->tag = req->tag;
} else {
blk_dump_rq_flags(req, "SCSI bad req");
- return BLKPREP_KILL;
+ goto kill;
}
/* note the overloading of req->special. When the tag
* happening now.
*/
if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) {
- struct scsi_driver *drv;
int ret;
/*
* required).
*/
ret = scsi_init_io(cmd);
- if (ret) /* BLKPREP_KILL return also releases the command */
- return ret;
+ switch(ret) {
+ /* For BLKPREP_KILL/DEFER the cmd was released */
+ case BLKPREP_KILL:
+ goto kill;
+ case BLKPREP_DEFER:
+ goto defer;
+ }
/*
* Initialize the actual SCSI command for this request.
*/
- drv = *(struct scsi_driver **)req->rq_disk->private_data;
- if (unlikely(!drv->init_command(cmd))) {
- scsi_release_buffers(cmd);
- scsi_put_command(cmd);
- return BLKPREP_KILL;
+ if (req->flags & REQ_BLOCK_PC) {
+ scsi_setup_blk_pc_cmnd(cmd);
+ } else if (req->rq_disk) {
+ struct scsi_driver *drv;
+
+ drv = *(struct scsi_driver **)req->rq_disk->private_data;
+ if (unlikely(!drv->init_command(cmd))) {
+ scsi_release_buffers(cmd);
+ scsi_put_command(cmd);
+ goto kill;
+ }
}
}
if (sdev->device_busy == 0)
blk_plug_device(q);
return BLKPREP_DEFER;
+ kill:
+ req->errors = DID_NO_CONNECT << 16;
+ return BLKPREP_KILL;
}
/*
*/
if (--sdev->device_blocked == 0) {
SCSI_LOG_MLQUEUE(3,
- printk("scsi%d (%d:%d) unblocking device at"
- " zero depth\n", sdev->host->host_no,
- sdev->id, sdev->lun));
+ sdev_printk(KERN_INFO, sdev,
+ "unblocking device at zero depth\n"));
} else {
blk_plug_device(q);
return 0;
struct Scsi_Host *shost,
struct scsi_device *sdev)
{
- if (test_bit(SHOST_RECOVERY, &shost->shost_state))
+ if (scsi_host_in_recovery(shost))
return 0;
if (shost->host_busy == 0 && shost->host_blocked) {
/*
return 1;
}
+/*
+ * Kill a request for a dead device
+ */
+static void scsi_kill_request(struct request *req, request_queue_t *q)
+{
+ struct scsi_cmnd *cmd = req->special;
+
+ blkdev_dequeue_request(req);
+
+ if (unlikely(cmd == NULL)) {
+ printk(KERN_CRIT "impossible request in %s.\n",
+ __FUNCTION__);
+ BUG();
+ }
+
+ scsi_init_cmd_errh(cmd);
+ cmd->result = DID_NO_CONNECT << 16;
+ atomic_inc(&cmd->device->iorequest_cnt);
+ __scsi_done(cmd);
+}
+
+static void scsi_softirq_done(struct request *rq)
+{
+ struct scsi_cmnd *cmd = rq->completion_data;
+ unsigned long wait_for = (cmd->allowed + 1) * cmd->timeout_per_command;
+ int disposition;
+
+ INIT_LIST_HEAD(&cmd->eh_entry);
+
+ disposition = scsi_decide_disposition(cmd);
+ if (disposition != SUCCESS &&
+ time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
+ sdev_printk(KERN_ERR, cmd->device,
+ "timing out command, waited %lus\n",
+ wait_for/HZ);
+ disposition = SUCCESS;
+ }
+
+ scsi_log_completion(cmd, disposition);
+
+ switch (disposition) {
+ case SUCCESS:
+ scsi_finish_command(cmd);
+ break;
+ case NEEDS_RETRY:
+ scsi_retry_command(cmd);
+ break;
+ case ADD_TO_MLQUEUE:
+ scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
+ break;
+ default:
+ if (!scsi_eh_scmd_add(cmd, 0))
+ scsi_finish_command(cmd);
+ }
+}
+
/*
* Function: scsi_request_fn()
*
static void scsi_request_fn(struct request_queue *q)
{
struct scsi_device *sdev = q->queuedata;
- struct Scsi_Host *shost = sdev->host;
+ struct Scsi_Host *shost;
struct scsi_cmnd *cmd;
struct request *req;
+ if (!sdev) {
+ printk("scsi: killing requests for dead queue\n");
+ while ((req = elv_next_request(q)) != NULL)
+ scsi_kill_request(req, q);
+ return;
+ }
+
if(!get_device(&sdev->sdev_gendev))
/* We must be tearing the block queue down already */
return;
* To start with, we keep looping until the queue is empty, or until
* the host is no longer able to accept any more requests.
*/
+ shost = sdev->host;
while (!blk_queue_plugged(q)) {
int rtn;
/*
break;
if (unlikely(!scsi_device_online(sdev))) {
- printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to offline device\n",
- sdev->host->host_no, sdev->id, sdev->lun);
- blkdev_dequeue_request(req);
- req->flags |= REQ_QUIET;
- while (end_that_request_first(req, 0, req->nr_sectors))
- ;
- end_that_request_last(req);
+ sdev_printk(KERN_ERR, sdev,
+ "rejecting I/O to offline device\n");
+ scsi_kill_request(req, q);
continue;
}
sdev->device_busy++;
spin_unlock(q->queue_lock);
+ cmd = req->special;
+ if (unlikely(cmd == NULL)) {
+ printk(KERN_CRIT "impossible request in %s.\n"
+ "please mail a stack trace to "
+ "linux-scsi@vger.kernel.org",
+ __FUNCTION__);
+ BUG();
+ }
spin_lock(shost->host_lock);
if (!scsi_host_queue_ready(q, shost, sdev))
goto not_ready;
if (sdev->single_lun) {
- if (sdev->sdev_target->starget_sdev_user &&
- sdev->sdev_target->starget_sdev_user != sdev)
+ if (scsi_target(sdev)->starget_sdev_user &&
+ scsi_target(sdev)->starget_sdev_user != sdev)
goto not_ready;
- sdev->sdev_target->starget_sdev_user = sdev;
+ scsi_target(sdev)->starget_sdev_user = sdev;
}
shost->host_busy++;
*/
spin_unlock_irq(shost->host_lock);
- cmd = req->special;
- if (unlikely(cmd == NULL)) {
- printk(KERN_CRIT "impossible request in %s.\n"
- "please mail a stack trace to "
- "linux-scsi@vger.kernel.org",
- __FUNCTION__);
- BUG();
- }
-
/*
* Finally, initialize any error handling parameters, and set up
* the timers for timeouts.
u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
{
struct device *host_dev;
+ u64 bounce_limit = 0xffffffff;
if (shost->unchecked_isa_dma)
return BLK_BOUNCE_ISA;
-
- host_dev = scsi_get_device(shost);
- if (PCI_DMA_BUS_IS_PHYS && host_dev && host_dev->dma_mask)
- return *host_dev->dma_mask;
-
/*
* Platforms with virtual-DMA translation
* hardware have no practical limit.
*/
- return BLK_BOUNCE_ANY;
+ if (!PCI_DMA_BUS_IS_PHYS)
+ return BLK_BOUNCE_ANY;
+
+ host_dev = scsi_get_device(shost);
+ if (host_dev && host_dev->dma_mask)
+ bounce_limit = *host_dev->dma_mask;
+
+ return bounce_limit;
}
+EXPORT_SYMBOL(scsi_calculate_bounce_limit);
struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
{
struct Scsi_Host *shost = sdev->host;
struct request_queue *q;
- q = blk_init_queue(scsi_request_fn, &sdev->sdev_lock);
+ q = blk_init_queue(scsi_request_fn, NULL);
if (!q)
return NULL;
blk_queue_max_sectors(q, shost->max_sectors);
blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
blk_queue_segment_boundary(q, shost->dma_boundary);
-
+ blk_queue_issue_flush_fn(q, scsi_issue_flush_fn);
+ blk_queue_softirq_done(q, scsi_softirq_done);
+
if (!shost->use_clustering)
clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
return q;
{
shost->host_self_blocked = 1;
}
+EXPORT_SYMBOL(scsi_block_requests);
/*
* Function: scsi_unblock_requests()
shost->host_self_blocked = 0;
scsi_run_host_queues(shost);
}
+EXPORT_SYMBOL(scsi_unblock_requests);
int __init scsi_init_queue(void)
{
int i;
+ scsi_io_context_cache = kmem_cache_create("scsi_io_context",
+ sizeof(struct scsi_io_context),
+ 0, 0, NULL, NULL);
+ if (!scsi_io_context_cache) {
+ printk(KERN_ERR "SCSI: can't init scsi io context cache\n");
+ return -ENOMEM;
+ }
+
for (i = 0; i < SG_MEMPOOL_NR; i++) {
struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
int size = sgp->size * sizeof(struct scatterlist);
{
int i;
+ kmem_cache_destroy(scsi_io_context_cache);
+
for (i = 0; i < SG_MEMPOOL_NR; i++) {
struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
mempool_destroy(sgp->pool);
}
}
/**
- * __scsi_mode_sense - issue a mode sense, falling back from 10 to
+ * scsi_mode_sense - issue a mode sense, falling back from 10 to
* six bytes if necessary.
- * @sreq: SCSI request to fill in with the MODE_SENSE
+ * @sdev: SCSI device to be queried
* @dbd: set if mode sense will allow block descriptors to be returned
* @modepage: mode page being requested
* @buffer: request buffer (may not be smaller than eight bytes)
* @timeout: command timeout
* @retries: number of retries before failing
* @data: returns a structure abstracting the mode header data
+ * @sense: place to put sense data (or NULL if no sense to be collected).
+ * must be SCSI_SENSE_BUFFERSIZE big.
*
* Returns zero if unsuccessful, or the header offset (either 4
* or 8 depending on whether a six or ten byte command was
* issued) if successful.
**/
int
-__scsi_mode_sense(struct scsi_request *sreq, int dbd, int modepage,
+scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
unsigned char *buffer, int len, int timeout, int retries,
- struct scsi_mode_data *data) {
+ struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr) {
unsigned char cmd[12];
int use_10_for_ms;
int header_length;
+ int result;
+ struct scsi_sense_hdr my_sshdr;
memset(data, 0, sizeof(*data));
memset(&cmd[0], 0, 12);
cmd[1] = dbd & 0x18; /* allows DBD and LLBA bits */
cmd[2] = modepage;
+ /* caller might not be interested in sense, but we need it */
+ if (!sshdr)
+ sshdr = &my_sshdr;
+
retry:
- use_10_for_ms = sreq->sr_device->use_10_for_ms;
+ use_10_for_ms = sdev->use_10_for_ms;
if (use_10_for_ms) {
if (len < 8)
header_length = 4;
}
- sreq->sr_cmd_len = 0;
- sreq->sr_sense_buffer[0] = 0;
- sreq->sr_sense_buffer[2] = 0;
- sreq->sr_data_direction = DMA_FROM_DEVICE;
-
memset(buffer, 0, len);
- scsi_wait_req(sreq, cmd, buffer, len, timeout, retries);
+ result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
+ sshdr, timeout, retries);
/* This code looks awful: what it's doing is making sure an
* ILLEGAL REQUEST sense return identifies the actual command
* byte as the problem. MODE_SENSE commands can return
* ILLEGAL REQUEST if the code page isn't supported */
- if (use_10_for_ms && ! scsi_status_is_good(sreq->sr_result) &&
- (driver_byte(sreq->sr_result) & DRIVER_SENSE) &&
- sreq->sr_sense_buffer[2] == ILLEGAL_REQUEST &&
- (sreq->sr_sense_buffer[4] & 0x40) == 0x40 &&
- sreq->sr_sense_buffer[5] == 0 &&
- sreq->sr_sense_buffer[6] == 0 ) {
- sreq->sr_device->use_10_for_ms = 0;
- goto retry;
+
+ if (use_10_for_ms && !scsi_status_is_good(result) &&
+ (driver_byte(result) & DRIVER_SENSE)) {
+ if (scsi_sense_valid(sshdr)) {
+ if ((sshdr->sense_key == ILLEGAL_REQUEST) &&
+ (sshdr->asc == 0x20) && (sshdr->ascq == 0)) {
+ /*
+ * Invalid command operation code
+ */
+ sdev->use_10_for_ms = 0;
+ goto retry;
+ }
+ }
}
- if(scsi_status_is_good(sreq->sr_result)) {
+ if(scsi_status_is_good(result)) {
data->header_length = header_length;
if(use_10_for_ms) {
data->length = buffer[0]*256 + buffer[1] + 2;
}
}
- return sreq->sr_result;
+ return result;
}
+EXPORT_SYMBOL(scsi_mode_sense);
-/**
- * scsi_mode_sense - issue a mode sense, falling back from 10 to
- * six bytes if necessary.
- * @sdev: scsi device to send command to.
- * @dbd: set if mode sense will disable block descriptors in the return
- * @modepage: mode page being requested
- * @buffer: request buffer (may not be smaller than eight bytes)
- * @len: length of request buffer.
- * @timeout: command timeout
- * @retries: number of retries before failing
- *
- * Returns zero if unsuccessful, or the header offset (either 4
- * or 8 depending on whether a six or ten byte command was
- * issued) if successful.
- **/
int
-scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
- unsigned char *buffer, int len, int timeout, int retries,
- struct scsi_mode_data *data)
+scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries)
{
- struct scsi_request *sreq = scsi_allocate_request(sdev, GFP_KERNEL);
- int ret;
-
- if (!sreq)
- return -1;
-
- ret = __scsi_mode_sense(sreq, dbd, modepage, buffer, len,
- timeout, retries, data);
+ char cmd[] = {
+ TEST_UNIT_READY, 0, 0, 0, 0, 0,
+ };
+ struct scsi_sense_hdr sshdr;
+ int result;
+
+ result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, &sshdr,
+ timeout, retries);
- scsi_release_request(sreq);
+ if ((driver_byte(result) & DRIVER_SENSE) && sdev->removable) {
- return ret;
+ if ((scsi_sense_valid(&sshdr)) &&
+ ((sshdr.sense_key == UNIT_ATTENTION) ||
+ (sshdr.sense_key == NOT_READY))) {
+ sdev->changed = 1;
+ result = 0;
+ }
+ }
+ return result;
}
+EXPORT_SYMBOL(scsi_test_unit_ready);
/**
* scsi_device_set_state - Take the given device through the device
case SDEV_CREATED:
case SDEV_OFFLINE:
case SDEV_QUIESCE:
+ case SDEV_BLOCK:
break;
default:
goto illegal;
case SDEV_CREATED:
case SDEV_RUNNING:
case SDEV_QUIESCE:
+ case SDEV_BLOCK:
+ break;
+ default:
+ goto illegal;
+ }
+ break;
+
+ case SDEV_BLOCK:
+ switch (oldstate) {
+ case SDEV_CREATED:
+ case SDEV_RUNNING:
break;
default:
goto illegal;
case SDEV_CREATED:
case SDEV_RUNNING:
case SDEV_OFFLINE:
+ case SDEV_BLOCK:
break;
default:
goto illegal;
return 0;
illegal:
- dev_printk(KERN_ERR, &sdev->sdev_gendev,
- "Illegal state transition %s->%s\n",
- scsi_device_state_name(oldstate),
- scsi_device_state_name(state));
- WARN_ON(1);
+ SCSI_LOG_ERROR_RECOVERY(1,
+ sdev_printk(KERN_ERR, sdev,
+ "Illegal state transition %s->%s\n",
+ scsi_device_state_name(oldstate),
+ scsi_device_state_name(state))
+ );
return -EINVAL;
}
EXPORT_SYMBOL(scsi_device_set_state);
scsi_run_queue(sdev->request_queue);
while (sdev->device_busy) {
- schedule_timeout(HZ/5);
+ msleep_interruptible(200);
scsi_run_queue(sdev->request_queue);
}
return 0;
}
EXPORT_SYMBOL(scsi_device_resume);
+static void
+device_quiesce_fn(struct scsi_device *sdev, void *data)
+{
+ scsi_device_quiesce(sdev);
+}
+
+void
+scsi_target_quiesce(struct scsi_target *starget)
+{
+ starget_for_each_device(starget, NULL, device_quiesce_fn);
+}
+EXPORT_SYMBOL(scsi_target_quiesce);
+
+static void
+device_resume_fn(struct scsi_device *sdev, void *data)
+{
+ scsi_device_resume(sdev);
+}
+
+void
+scsi_target_resume(struct scsi_target *starget)
+{
+ starget_for_each_device(starget, NULL, device_resume_fn);
+}
+EXPORT_SYMBOL(scsi_target_resume);
+
+/**
+ * scsi_internal_device_block - internal function to put a device
+ * temporarily into the SDEV_BLOCK state
+ * @sdev: device to block
+ *
+ * Block request made by scsi lld's to temporarily stop all
+ * scsi commands on the specified device. Called from interrupt
+ * or normal process context.
+ *
+ * Returns zero if successful or error if not
+ *
+ * Notes:
+ * This routine transitions the device to the SDEV_BLOCK state
+ * (which must be a legal transition). When the device is in this
+ * state, all commands are deferred until the scsi lld reenables
+ * the device with scsi_device_unblock or device_block_tmo fires.
+ * This routine assumes the host_lock is held on entry.
+ **/
+int
+scsi_internal_device_block(struct scsi_device *sdev)
+{
+ request_queue_t *q = sdev->request_queue;
+ unsigned long flags;
+ int err = 0;
+
+ err = scsi_device_set_state(sdev, SDEV_BLOCK);
+ if (err)
+ return err;
+
+ /*
+ * The device has transitioned to SDEV_BLOCK. Stop the
+ * block layer from calling the midlayer with this device's
+ * request queue.
+ */
+ spin_lock_irqsave(q->queue_lock, flags);
+ blk_stop_queue(q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(scsi_internal_device_block);
+
+/**
+ * scsi_internal_device_unblock - resume a device after a block request
+ * @sdev: device to resume
+ *
+ * Called by scsi lld's or the midlayer to restart the device queue
+ * for the previously suspended scsi device. Called from interrupt or
+ * normal process context.
+ *
+ * Returns zero if successful or error if not.
+ *
+ * Notes:
+ * This routine transitions the device to the SDEV_RUNNING state
+ * (which must be a legal transition) allowing the midlayer to
+ * goose the queue for this device. This routine assumes the
+ * host_lock is held upon entry.
+ **/
+int
+scsi_internal_device_unblock(struct scsi_device *sdev)
+{
+ request_queue_t *q = sdev->request_queue;
+ int err;
+ unsigned long flags;
+
+ /*
+ * Try to transition the scsi device to SDEV_RUNNING
+ * and goose the device queue if successful.
+ */
+ err = scsi_device_set_state(sdev, SDEV_RUNNING);
+ if (err)
+ return err;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ blk_start_queue(q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(scsi_internal_device_unblock);
+
+static void
+device_block(struct scsi_device *sdev, void *data)
+{
+ scsi_internal_device_block(sdev);
+}
+
+static int
+target_block(struct device *dev, void *data)
+{
+ if (scsi_is_target_device(dev))
+ starget_for_each_device(to_scsi_target(dev), NULL,
+ device_block);
+ return 0;
+}
+
+void
+scsi_target_block(struct device *dev)
+{
+ if (scsi_is_target_device(dev))
+ starget_for_each_device(to_scsi_target(dev), NULL,
+ device_block);
+ else
+ device_for_each_child(dev, NULL, target_block);
+}
+EXPORT_SYMBOL_GPL(scsi_target_block);
+
+static void
+device_unblock(struct scsi_device *sdev, void *data)
+{
+ scsi_internal_device_unblock(sdev);
+}
+
+static int
+target_unblock(struct device *dev, void *data)
+{
+ if (scsi_is_target_device(dev))
+ starget_for_each_device(to_scsi_target(dev), NULL,
+ device_unblock);
+ return 0;
+}
+
+void
+scsi_target_unblock(struct device *dev)
+{
+ if (scsi_is_target_device(dev))
+ starget_for_each_device(to_scsi_target(dev), NULL,
+ device_unblock);
+ else
+ device_for_each_child(dev, NULL, target_unblock);
+}
+EXPORT_SYMBOL_GPL(scsi_target_unblock);
+
+
+struct work_queue_work {
+ struct work_struct work;
+ void (*fn)(void *);
+ void *data;
+};
+
+static void execute_in_process_context_work(void *data)
+{
+ void (*fn)(void *data);
+ struct work_queue_work *wqw = data;
+
+ fn = wqw->fn;
+ data = wqw->data;
+
+ kfree(wqw);
+
+ fn(data);
+}
+
+/**
+ * scsi_execute_in_process_context - reliably execute the routine with user context
+ * @fn: the function to execute
+ * @data: data to pass to the function
+ *
+ * Executes the function immediately if process context is available,
+ * otherwise schedules the function for delayed execution.
+ *
+ * Returns: 0 - function was executed
+ * 1 - function was scheduled for execution
+ * <0 - error
+ */
+int scsi_execute_in_process_context(void (*fn)(void *data), void *data)
+{
+ struct work_queue_work *wqw;
+
+ if (!in_interrupt()) {
+ fn(data);
+ return 0;
+ }
+
+ wqw = kmalloc(sizeof(struct work_queue_work), GFP_ATOMIC);
+
+ if (unlikely(!wqw)) {
+ printk(KERN_ERR "Failed to allocate memory\n");
+ WARN_ON(1);
+ return -ENOMEM;
+ }
+
+ INIT_WORK(&wqw->work, execute_in_process_context_work, wqw);
+ wqw->fn = fn;
+ wqw->data = data;
+ schedule_work(&wqw->work);
+
+ return 1;
+}
+EXPORT_SYMBOL_GPL(scsi_execute_in_process_context);