X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=drivers%2Fscsi%2Fscsi.c;h=24cffd98ee63546ce716e0277812dda9bafd08d2;hb=refs%2Fheads%2Fvserver;hp=12f81878f1cbf777a8b43be08bbf10aca14402c5;hpb=9213980e6a70d8473e0ffd4b39ab5b6caaba9ff5;p=linux-2.6.git diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index 12f81878f..24cffd98e 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c @@ -48,20 +48,26 @@ #include #include #include -#include #include #include #include #include #include #include +#include +#include +#include +#include +#include +#include #include -#include "scsi.h" +#include #include "scsi_priv.h" #include "scsi_logging.h" +static void scsi_done(struct scsi_cmnd *cmd); /* * Definitions and constants. @@ -81,19 +87,20 @@ #define CDB_SIZE(cmd) (((((cmd)->cmnd[0] >> 5) & 7) < 6) ? \ COMMAND_SIZE((cmd)->cmnd[0]) : (cmd)->cmd_len) -/* - * Data declarations. - */ -unsigned long scsi_pid; -static unsigned long serial_number; - /* * Note - the initial logging level can be set here to log events at boot time. * After the system is up, you may enable logging via the /proc interface. */ unsigned int scsi_logging_level; +#if defined(CONFIG_SCSI_LOGGING) +EXPORT_SYMBOL(scsi_logging_level); +#endif -const char *const scsi_device_types[MAX_SCSI_DEVICE_CODE] = { +/* NB: These are exposed through /proc/scsi/scsi and form part of the ABI. + * You may not alter any existing entry (although adding new ones is + * encouraged once assigned by ANSI/INCITS T10 + */ +static const char *const scsi_device_types[] = { "Direct-Access ", "Sequential-Access", "Printer ", @@ -104,90 +111,36 @@ const char *const scsi_device_types[MAX_SCSI_DEVICE_CODE] = { "Optical Device ", "Medium Changer ", "Communications ", - "Unknown ", - "Unknown ", + "ASC IT8 ", + "ASC IT8 ", "RAID ", "Enclosure ", + "Direct-Access-RBC", + "Optical card ", + "Bridge controller", + "Object storage ", + "Automation/Drive ", }; -/* - * Function: scsi_allocate_request - * - * Purpose: Allocate a request descriptor. - * - * Arguments: device - device for which we want a request - * gfp_mask - allocation flags passed to kmalloc - * - * Lock status: No locks assumed to be held. This function is SMP-safe. - * - * Returns: Pointer to request block. - */ -struct scsi_request *scsi_allocate_request(struct scsi_device *sdev, - int gfp_mask) +const char * scsi_device_type(unsigned type) { - const int offset = ALIGN(sizeof(struct scsi_request), 4); - const int size = offset + sizeof(struct request); - struct scsi_request *sreq; - - sreq = kmalloc(size, gfp_mask); - if (likely(sreq != NULL)) { - memset(sreq, 0, size); - sreq->sr_request = (struct request *)(((char *)sreq) + offset); - sreq->sr_device = sdev; - sreq->sr_host = sdev->host; - sreq->sr_magic = SCSI_REQ_MAGIC; - sreq->sr_data_direction = DMA_BIDIRECTIONAL; - } - - return sreq; + if (type == 0x1e) + return "Well-known LUN "; + if (type == 0x1f) + return "No Device "; + if (type >= ARRAY_SIZE(scsi_device_types)) + return "Unknown "; + return scsi_device_types[type]; } -void __scsi_release_request(struct scsi_request *sreq) -{ - struct request *req = sreq->sr_request; - - /* unlikely because the tag was usually ended earlier by the - * mid-layer. However, for layering reasons ULD's don't end - * the tag of commands they generate. */ - if (unlikely(blk_rq_tagged(req))) { - unsigned long flags; - struct request_queue *q = req->q; - - spin_lock_irqsave(q->queue_lock, flags); - blk_queue_end_tag(q, req); - spin_unlock_irqrestore(q->queue_lock, flags); - } - - - if (likely(sreq->sr_command != NULL)) { - struct scsi_cmnd *cmd = sreq->sr_command; - - sreq->sr_command = NULL; - scsi_next_command(cmd); - } -} - -/* - * Function: scsi_release_request - * - * Purpose: Release a request descriptor. - * - * Arguments: sreq - request to release - * - * Lock status: No locks assumed to be held. This function is SMP-safe. - */ -void scsi_release_request(struct scsi_request *sreq) -{ - __scsi_release_request(sreq); - kfree(sreq); -} +EXPORT_SYMBOL(scsi_device_type); struct scsi_host_cmd_pool { - kmem_cache_t *slab; + struct kmem_cache *slab; unsigned int users; char *name; unsigned int slab_flags; - unsigned int gfp_mask; + gfp_t gfp_mask; }; static struct scsi_host_cmd_pool scsi_cmd_pool = { @@ -201,10 +154,9 @@ static struct scsi_host_cmd_pool scsi_cmd_dma_pool = { .gfp_mask = __GFP_DMA, }; -static DECLARE_MUTEX(host_cmd_pool_mutex); +static DEFINE_MUTEX(host_cmd_pool_mutex); -static struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost, - int gfp_mask) +struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost, gfp_t gfp_mask) { struct scsi_cmnd *cmd; @@ -225,6 +177,7 @@ static struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost, return cmd; } +EXPORT_SYMBOL_GPL(__scsi_get_command); /* * Function: scsi_get_command() @@ -236,26 +189,53 @@ static struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost, * * Returns: The allocated scsi command structure. */ -struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, int gfp_mask) +struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, gfp_t gfp_mask) { - struct scsi_cmnd *cmd = __scsi_get_command(dev->host, gfp_mask); + struct scsi_cmnd *cmd; + + /* Bail if we can't get a reference to the device */ + if (!get_device(&dev->sdev_gendev)) + return NULL; + + cmd = __scsi_get_command(dev->host, gfp_mask); if (likely(cmd != NULL)) { unsigned long flags; memset(cmd, 0, sizeof(*cmd)); cmd->device = dev; - cmd->state = SCSI_STATE_UNUSED; - cmd->owner = SCSI_OWNER_NOBODY; init_timer(&cmd->eh_timeout); INIT_LIST_HEAD(&cmd->list); spin_lock_irqsave(&dev->list_lock, flags); list_add_tail(&cmd->list, &dev->cmd_list); spin_unlock_irqrestore(&dev->list_lock, flags); - } + cmd->jiffies_at_alloc = jiffies; + } else + put_device(&dev->sdev_gendev); return cmd; -} +} +EXPORT_SYMBOL(scsi_get_command); + +void __scsi_put_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd, + struct device *dev) +{ + unsigned long flags; + + /* changing locks here, don't need to restore the irq state */ + spin_lock_irqsave(&shost->free_list_lock, flags); + if (unlikely(list_empty(&shost->free_list))) { + list_add(&cmd->list, &shost->free_list); + cmd = NULL; + } + spin_unlock_irqrestore(&shost->free_list_lock, flags); + + if (likely(cmd != NULL)) + kmem_cache_free(shost->cmd_pool->slab, cmd); + + put_device(dev); +} +EXPORT_SYMBOL(__scsi_put_command); /* * Function: scsi_put_command() @@ -270,25 +250,18 @@ struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, int gfp_mask) */ void scsi_put_command(struct scsi_cmnd *cmd) { - struct Scsi_Host *shost = cmd->device->host; + struct scsi_device *sdev = cmd->device; unsigned long flags; - + /* serious error if the command hasn't come from a device list */ spin_lock_irqsave(&cmd->device->list_lock, flags); BUG_ON(list_empty(&cmd->list)); list_del_init(&cmd->list); - spin_unlock(&cmd->device->list_lock); - /* changing locks here, don't need to restore the irq state */ - spin_lock(&shost->free_list_lock); - if (unlikely(list_empty(&shost->free_list))) { - list_add(&cmd->list, &shost->free_list); - cmd = NULL; - } - spin_unlock_irqrestore(&shost->free_list_lock, flags); + spin_unlock_irqrestore(&cmd->device->list_lock, flags); - if (likely(cmd != NULL)) - kmem_cache_free(shost->cmd_pool->slab, cmd); + __scsi_put_command(cmd->device->host, cmd, &sdev->sdev_gendev); } +EXPORT_SYMBOL(scsi_put_command); /* * Function: scsi_setup_command_freelist() @@ -311,7 +284,7 @@ int scsi_setup_command_freelist(struct Scsi_Host *shost) * Select a command slab for this host and create it if not * yet existant. */ - down(&host_cmd_pool_mutex); + mutex_lock(&host_cmd_pool_mutex); pool = (shost->unchecked_isa_dma ? &scsi_cmd_dma_pool : &scsi_cmd_pool); if (!pool->users) { pool->slab = kmem_cache_create(pool->name, @@ -323,7 +296,7 @@ int scsi_setup_command_freelist(struct Scsi_Host *shost) pool->users++; shost->cmd_pool = pool; - up(&host_cmd_pool_mutex); + mutex_unlock(&host_cmd_pool_mutex); /* * Get one backup command for this host. @@ -340,7 +313,7 @@ int scsi_setup_command_freelist(struct Scsi_Host *shost) kmem_cache_destroy(pool->slab); return -ENOMEM; fail: - up(&host_cmd_pool_mutex); + mutex_unlock(&host_cmd_pool_mutex); return -ENOMEM; } @@ -362,10 +335,10 @@ void scsi_destroy_command_freelist(struct Scsi_Host *shost) kmem_cache_free(shost->cmd_pool->slab, cmd); } - down(&host_cmd_pool_mutex); + mutex_lock(&host_cmd_pool_mutex); if (!--shost->cmd_pool->users) kmem_cache_destroy(shost->cmd_pool->slab); - up(&host_cmd_pool_mutex); + mutex_unlock(&host_cmd_pool_mutex); } #ifdef CONFIG_SCSI_LOGGING @@ -390,9 +363,7 @@ void scsi_log_send(struct scsi_cmnd *cmd) SCSI_LOG_MLQUEUE_BITS); if (level > 1) { sdev = cmd->device; - printk(KERN_INFO "scsi <%d:%d:%d:%d> send ", - sdev->host->host_no, sdev->channel, sdev->id, - sdev->lun); + sdev_printk(KERN_INFO, sdev, "send "); if (level > 2) printk("0x%p ", cmd); /* @@ -400,11 +371,11 @@ void scsi_log_send(struct scsi_cmnd *cmd) * output in scsi_log_completion. */ printk(" "); - print_command(cmd->cmnd); + scsi_print_command(cmd); if (level > 3) { printk(KERN_INFO "buffer = 0x%p, bufflen = %d," " done = 0x%p, queuecommand 0x%p\n", - cmd->buffer, cmd->bufflen, + cmd->request_buffer, cmd->request_bufflen, cmd->done, sdev->host->hostt->queuecommand); @@ -436,9 +407,7 @@ void scsi_log_completion(struct scsi_cmnd *cmd, int disposition) if (((level > 0) && (cmd->result || disposition != SUCCESS)) || (level > 1)) { sdev = cmd->device; - printk(KERN_INFO "scsi <%d:%d:%d:%d> done ", - sdev->host->host_no, sdev->channel, sdev->id, - sdev->lun); + sdev_printk(KERN_INFO, sdev, "done "); if (level > 2) printk("0x%p ", cmd); /* @@ -468,13 +437,13 @@ void scsi_log_completion(struct scsi_cmnd *cmd, int disposition) printk("UNKNOWN"); } printk(" %8x ", cmd->result); - print_command(cmd->cmnd); + scsi_print_command(cmd); if (status_byte(cmd->result) & CHECK_CONDITION) { /* - * XXX The print_sense formatting/prefix + * XXX The scsi_print_sense formatting/prefix * doesn't match this function. */ - print_sense("", cmd); + scsi_print_sense("", cmd); } if (level > 3) { printk(KERN_INFO "scsi host busy %d failed %d\n", @@ -486,6 +455,21 @@ void scsi_log_completion(struct scsi_cmnd *cmd, int disposition) } #endif +/* + * Assign a serial number and pid to the request for error recovery + * and debugging purposes. Protected by the Host_Lock of host. + */ +static inline void scsi_cmd_get_serial(struct Scsi_Host *host, struct scsi_cmnd *cmd) +{ + cmd->serial_number = host->cmd_serial_number++; + if (cmd->serial_number == 0) + cmd->serial_number = host->cmd_serial_number++; + + cmd->pid = host->cmd_pid++; + if (cmd->pid == 0) + cmd->pid = host->cmd_pid++; +} + /* * Function: scsi_dispatch_command * @@ -508,21 +492,36 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd) * returns an immediate error upwards, and signals * that the device is no longer present */ cmd->result = DID_NO_CONNECT << 16; - scsi_done(cmd); + atomic_inc(&cmd->device->iorequest_cnt); + __scsi_done(cmd); /* return 0 (because the command has been processed) */ goto out; } - /* Assign a unique nonzero serial_number. */ - /* XXX(hch): this is racy */ - if (++serial_number == 0) - serial_number = 1; - cmd->serial_number = serial_number; - cmd->pid = scsi_pid++; + + /* Check to see if the scsi lld put this device into state SDEV_BLOCK. */ + if (unlikely(cmd->device->sdev_state == SDEV_BLOCK)) { + /* + * in SDEV_BLOCK, the command is just put back on the device + * queue. The suspend state has already blocked the queue so + * future requests should not occur until the device + * transitions out of the suspend state. + */ + scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY); + + SCSI_LOG_MLQUEUE(3, printk("queuecommand : device blocked \n")); + + /* + * NOTE: rtn is still zero here because we don't need the + * queue to be plugged on return (it's already stopped) + */ + goto out; + } /* * If SCSI-2 or lower, store the LUN value in cmnd. */ - if (cmd->device->scsi_level <= SCSI_2) { + if (cmd->device->scsi_level <= SCSI_2 && + cmd->device->scsi_level != SCSI_UNKNOWN) { cmd->cmnd[1] = (cmd->cmnd[1] & 0x1f) | (cmd->device->lun << 5 & 0xe0); } @@ -549,6 +548,10 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd) host->resetting = 0; } + /* + * AK: unlikely race here: for some reason the timer could + * expire before the serial number is set up below. + */ scsi_add_timer(cmd, cmd->timeout_per_command, scsi_times_out); scsi_log_send(cmd); @@ -557,9 +560,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd) * We will use a queued command if possible, otherwise we will * emulate the queuing and calling of completion function ourselves. */ - - cmd->state = SCSI_STATE_QUEUED; - cmd->owner = SCSI_OWNER_LOWLEVEL; + atomic_inc(&cmd->device->iorequest_cnt); /* * Before we queue this command, check if the command @@ -570,14 +571,14 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd) printk("queuecommand : command too long.\n")); cmd->result = (DID_ABORT << 16); - spin_lock_irqsave(host->host_lock, flags); scsi_done(cmd); - spin_unlock_irqrestore(host->host_lock, flags); goto out; } spin_lock_irqsave(host->host_lock, flags); - if (unlikely(test_bit(SHOST_CANCEL, &host->shost_state))) { + scsi_cmd_get_serial(host, cmd); + + if (unlikely(host->shost_state == SHOST_DEL)) { cmd->result = (DID_NO_CONNECT << 16); scsi_done(cmd); } else { @@ -585,9 +586,12 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd) } spin_unlock_irqrestore(host->host_lock, flags); if (rtn) { - scsi_queue_insert(cmd, - (rtn == SCSI_MLQUEUE_DEVICE_BUSY) ? - rtn : SCSI_MLQUEUE_HOST_BUSY); + if (scsi_delete_timer(cmd)) { + atomic_inc(&cmd->device->iodone_cnt); + scsi_queue_insert(cmd, + (rtn == SCSI_MLQUEUE_DEVICE_BUSY) ? + rtn : SCSI_MLQUEUE_HOST_BUSY); + } SCSI_LOG_MLQUEUE(3, printk("queuecommand : request rejected\n")); } @@ -597,82 +601,23 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd) return rtn; } -/* - * Function: scsi_init_cmd_from_req - * - * Purpose: Queue a SCSI command - * Purpose: Initialize a struct scsi_cmnd from a struct scsi_request - * - * Arguments: cmd - command descriptor. - * sreq - Request from the queue. - * - * Lock status: None needed. - * - * Returns: Nothing. +/** + * scsi_req_abort_cmd -- Request command recovery for the specified command + * cmd: pointer to the SCSI command of interest * - * Notes: Mainly transfer data from the request structure to the - * command structure. The request structure is allocated - * using the normal memory allocator, and requests can pile - * up to more or less any depth. The command structure represents - * a consumable resource, as these are allocated into a pool - * when the SCSI subsystem initializes. The preallocation is - * required so that in low-memory situations a disk I/O request - * won't cause the memory manager to try and write out a page. - * The request structure is generally used by ioctls and character - * devices. + * This function requests that SCSI Core start recovery for the + * command by deleting the timer and adding the command to the eh + * queue. It can be called by either LLDDs or SCSI Core. LLDDs who + * implement their own error recovery MAY ignore the timeout event if + * they generated scsi_req_abort_cmd. */ -void scsi_init_cmd_from_req(struct scsi_cmnd *cmd, struct scsi_request *sreq) +void scsi_req_abort_cmd(struct scsi_cmnd *cmd) { - sreq->sr_command = cmd; - - cmd->owner = SCSI_OWNER_MIDLEVEL; - cmd->cmd_len = sreq->sr_cmd_len; - cmd->use_sg = sreq->sr_use_sg; - - cmd->request = sreq->sr_request; - memcpy(cmd->data_cmnd, sreq->sr_cmnd, sizeof(cmd->data_cmnd)); - cmd->serial_number = 0; - cmd->serial_number_at_timeout = 0; - cmd->bufflen = sreq->sr_bufflen; - cmd->buffer = sreq->sr_buffer; - cmd->retries = 0; - cmd->allowed = sreq->sr_allowed; - cmd->done = sreq->sr_done; - cmd->timeout_per_command = sreq->sr_timeout_per_command; - cmd->sc_data_direction = sreq->sr_data_direction; - cmd->sglist_len = sreq->sr_sglist_len; - cmd->underflow = sreq->sr_underflow; - cmd->sc_request = sreq; - memcpy(cmd->cmnd, sreq->sr_cmnd, sizeof(sreq->sr_cmnd)); - - /* - * Zero the sense buffer. Some host adapters automatically request - * sense on error. 0 is not a valid sense code. - */ - memset(cmd->sense_buffer, 0, sizeof(sreq->sr_sense_buffer)); - cmd->request_buffer = sreq->sr_buffer; - cmd->request_bufflen = sreq->sr_bufflen; - cmd->old_use_sg = cmd->use_sg; - if (cmd->cmd_len == 0) - cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]); - cmd->old_cmd_len = cmd->cmd_len; - cmd->sc_old_data_direction = cmd->sc_data_direction; - cmd->old_underflow = cmd->underflow; - - /* - * Start the timer ticking. - */ - cmd->internal_timeout = NORMAL_TIMEOUT; - cmd->abort_reason = 0; - cmd->result = 0; - - SCSI_LOG_MLQUEUE(3, printk("Leaving scsi_init_cmd_from_req()\n")); + if (!scsi_delete_timer(cmd)) + return; + scsi_times_out(cmd); } - -/* - * Per-CPU I/O completion queue. - */ -static DEFINE_PER_CPU(struct list_head, scsi_done_q); +EXPORT_SYMBOL(scsi_req_abort_cmd); /** * scsi_done - Enqueue the finished SCSI command into the done queue. @@ -687,10 +632,8 @@ static DEFINE_PER_CPU(struct list_head, scsi_done_q); * * This function is interrupt context safe. */ -void scsi_done(struct scsi_cmnd *cmd) +static void scsi_done(struct scsi_cmnd *cmd) { - unsigned long flags; - /* * We don't have to worry about this one timing out any more. * If we are unable to remove the timer, then the command @@ -701,66 +644,32 @@ void scsi_done(struct scsi_cmnd *cmd) */ if (!scsi_delete_timer(cmd)) return; + __scsi_done(cmd); +} + +/* Private entry to scsi_done() to complete a command when the timer + * isn't running --- used by scsi_times_out */ +void __scsi_done(struct scsi_cmnd *cmd) +{ + struct request *rq = cmd->request; /* * Set the serial numbers back to zero */ cmd->serial_number = 0; - cmd->serial_number_at_timeout = 0; - cmd->state = SCSI_STATE_BHQUEUE; - cmd->owner = SCSI_OWNER_BH_HANDLER; + + atomic_inc(&cmd->device->iodone_cnt); + if (cmd->result) + atomic_inc(&cmd->device->ioerr_cnt); + + BUG_ON(!rq); /* - * Next, enqueue the command into the done queue. - * It is a per-CPU queue, so we just disable local interrupts - * and need no spinlock. + * The uptodate/nbytes values don't matter, as we allow partial + * completes and thus will check this in the softirq callback */ - local_irq_save(flags); - list_add_tail(&cmd->eh_entry, &__get_cpu_var(scsi_done_q)); - raise_softirq_irqoff(SCSI_SOFTIRQ); - local_irq_restore(flags); -} - -/** - * scsi_softirq - Perform post-interrupt processing of finished SCSI commands. - * - * This is the consumer of the done queue. - * - * This is called with all interrupts enabled. This should reduce - * interrupt latency, stack depth, and reentrancy of the low-level - * drivers. - */ -static void scsi_softirq(struct softirq_action *h) -{ - int disposition; - LIST_HEAD(local_q); - - local_irq_disable(); - list_splice_init(&__get_cpu_var(scsi_done_q), &local_q); - local_irq_enable(); - - while (!list_empty(&local_q)) { - struct scsi_cmnd *cmd = list_entry(local_q.next, - struct scsi_cmnd, eh_entry); - list_del_init(&cmd->eh_entry); - - disposition = scsi_decide_disposition(cmd); - scsi_log_completion(cmd, disposition); - switch (disposition) { - case SUCCESS: - scsi_finish_command(cmd); - break; - case NEEDS_RETRY: - scsi_retry_command(cmd); - break; - case ADD_TO_MLQUEUE: - scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY); - break; - default: - if (!scsi_eh_scmd_add(cmd, 0)) - scsi_finish_command(cmd); - } - } + rq->completion_data = cmd; + blk_complete_request(rq); } /* @@ -775,11 +684,6 @@ static void scsi_softirq(struct softirq_action *h) */ int scsi_retry_command(struct scsi_cmnd *cmd) { - /* - * Restore the SCSI command state. - */ - scsi_setup_cmd_retry(cmd); - /* * Zero the sense information from the last time we tried * this command. @@ -800,7 +704,6 @@ void scsi_finish_command(struct scsi_cmnd *cmd) { struct scsi_device *sdev = cmd->device; struct Scsi_Host *shost = sdev->host; - struct scsi_request *sreq; scsi_device_unbusy(sdev); @@ -822,30 +725,9 @@ void scsi_finish_command(struct scsi_cmnd *cmd) if (SCSI_SENSE_VALID(cmd)) cmd->result |= (DRIVER_SENSE << 24); - SCSI_LOG_MLCOMPLETE(4, printk("Notifying upper driver of completion " - "for device %d %x\n", sdev->id, cmd->result)); - - cmd->owner = SCSI_OWNER_HIGHLEVEL; - cmd->state = SCSI_STATE_FINISHED; - - /* - * We can get here with use_sg=0, causing a panic in the upper level - */ - cmd->use_sg = cmd->old_use_sg; - - /* - * If there is an associated request structure, copy the data over - * before we call the completion function. - */ - sreq = cmd->sc_request; - if (sreq) { - sreq->sr_result = sreq->sr_command->result; - if (sreq->sr_result) { - memcpy(sreq->sr_sense_buffer, - sreq->sr_command->sense_buffer, - sizeof(sreq->sr_sense_buffer)); - } - } + SCSI_LOG_MLCOMPLETE(4, sdev_printk(KERN_INFO, sdev, + "Notifying upper driver of completion " + "(result %x)\n", cmd->result)); cmd->done(cmd); } @@ -872,12 +754,9 @@ EXPORT_SYMBOL(scsi_finish_command); * the right thing depending on whether or not the device is * currently active and whether or not it even has the * command blocks built yet. - * - * XXX(hch): What exactly is device_request_lock trying to protect? */ void scsi_adjust_queue_depth(struct scsi_device *sdev, int tagged, int tags) { - static spinlock_t device_request_lock = SPIN_LOCK_UNLOCKED; unsigned long flags; /* @@ -885,15 +764,15 @@ void scsi_adjust_queue_depth(struct scsi_device *sdev, int tagged, int tags) */ if (tags <= 0) return; - /* - * Limit max queue depth on a single lun to 256 for now. Remember, - * we allocate a struct scsi_command for each of these and keep it - * around forever. Too deep of a depth just wastes memory. - */ - if (tags > 256) - return; - spin_lock_irqsave(&device_request_lock, flags); + spin_lock_irqsave(sdev->request_queue->queue_lock, flags); + + /* Check to see if the queue is managed by the block layer + * if it is, and we fail to adjust the depth, exit */ + if (blk_queue_tagged(sdev->request_queue) && + blk_queue_resize_tags(sdev->request_queue, tags) != 0) + goto out; + sdev->queue_depth = tags; switch (tagged) { case MSG_ORDERED_TAG: @@ -905,17 +784,18 @@ void scsi_adjust_queue_depth(struct scsi_device *sdev, int tagged, int tags) sdev->simple_tags = 1; break; default: - printk(KERN_WARNING "(scsi%d:%d:%d:%d) " - "scsi_adjust_queue_depth, bad queue type, " - "disabled\n", sdev->host->host_no, - sdev->channel, sdev->id, sdev->lun); + sdev_printk(KERN_WARNING, sdev, + "scsi_adjust_queue_depth, bad queue type, " + "disabled\n"); case 0: sdev->ordered_tags = sdev->simple_tags = 0; sdev->queue_depth = tags; break; } - spin_unlock_irqrestore(&device_request_lock, flags); + out: + spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags); } +EXPORT_SYMBOL(scsi_adjust_queue_depth); /* * Function: scsi_track_queue_full() @@ -966,6 +846,7 @@ int scsi_track_queue_full(struct scsi_device *sdev, int depth) scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, depth); return depth; } +EXPORT_SYMBOL(scsi_track_queue_full); /** * scsi_device_get - get an addition reference to a scsi_device @@ -977,14 +858,14 @@ int scsi_track_queue_full(struct scsi_device *sdev, int depth) */ int scsi_device_get(struct scsi_device *sdev) { - if (sdev->sdev_state == SDEV_DEL || sdev->sdev_state == SDEV_CANCEL) + if (sdev->sdev_state == SDEV_DEL) return -ENXIO; if (!get_device(&sdev->sdev_gendev)) return -ENXIO; - if (!try_module_get(sdev->host->hostt->module)) { - put_device(&sdev->sdev_gendev); - return -ENXIO; - } + /* We can fail this if we're doing SCSI operations + * from module exit (like cache flush) */ + try_module_get(sdev->host->hostt->module); + return 0; } EXPORT_SYMBOL(scsi_device_get); @@ -999,7 +880,14 @@ EXPORT_SYMBOL(scsi_device_get); */ void scsi_device_put(struct scsi_device *sdev) { - module_put(sdev->host->hostt->module); +#ifdef CONFIG_MODULE_UNLOAD + struct module *module = sdev->host->hostt->module; + + /* The module refcount will be zero if scsi_device_get() + * was called from a module removal routine */ + if (module && module_refcount(module) != 0) + module_put(module); +#endif put_device(&sdev->sdev_gendev); } EXPORT_SYMBOL(scsi_device_put); @@ -1018,6 +906,7 @@ struct scsi_device *__scsi_iterate_devices(struct Scsi_Host *shost, /* skip devices that we can't get a reference to */ if (!scsi_device_get(next)) break; + next = NULL; list = list->next; } spin_unlock_irqrestore(shost->host_lock, flags); @@ -1028,6 +917,82 @@ struct scsi_device *__scsi_iterate_devices(struct Scsi_Host *shost, } EXPORT_SYMBOL(__scsi_iterate_devices); +/** + * starget_for_each_device - helper to walk all devices of a target + * @starget: target whose devices we want to iterate over. + * + * This traverses over each devices of @shost. The devices have + * a reference that must be released by scsi_host_put when breaking + * out of the loop. + */ +void starget_for_each_device(struct scsi_target *starget, void * data, + void (*fn)(struct scsi_device *, void *)) +{ + struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); + struct scsi_device *sdev; + + shost_for_each_device(sdev, shost) { + if ((sdev->channel == starget->channel) && + (sdev->id == starget->id)) + fn(sdev, data); + } +} +EXPORT_SYMBOL(starget_for_each_device); + +/** + * __scsi_device_lookup_by_target - find a device given the target (UNLOCKED) + * @starget: SCSI target pointer + * @lun: SCSI Logical Unit Number + * + * Looks up the scsi_device with the specified @lun for a give + * @starget. The returned scsi_device does not have an additional + * reference. You must hold the host's host_lock over this call and + * any access to the returned scsi_device. + * + * Note: The only reason why drivers would want to use this is because + * they're need to access the device list in irq context. Otherwise you + * really want to use scsi_device_lookup_by_target instead. + **/ +struct scsi_device *__scsi_device_lookup_by_target(struct scsi_target *starget, + uint lun) +{ + struct scsi_device *sdev; + + list_for_each_entry(sdev, &starget->devices, same_target_siblings) { + if (sdev->lun ==lun) + return sdev; + } + + return NULL; +} +EXPORT_SYMBOL(__scsi_device_lookup_by_target); + +/** + * scsi_device_lookup_by_target - find a device given the target + * @starget: SCSI target pointer + * @lun: SCSI Logical Unit Number + * + * Looks up the scsi_device with the specified @channel, @id, @lun for a + * give host. The returned scsi_device has an additional reference that + * needs to be release with scsi_host_put once you're done with it. + **/ +struct scsi_device *scsi_device_lookup_by_target(struct scsi_target *starget, + uint lun) +{ + struct scsi_device *sdev; + struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); + unsigned long flags; + + spin_lock_irqsave(shost->host_lock, flags); + sdev = __scsi_device_lookup_by_target(starget, lun); + if (sdev && scsi_device_get(sdev)) + sdev = NULL; + spin_unlock_irqrestore(shost->host_lock, flags); + + return sdev; +} +EXPORT_SYMBOL(scsi_device_lookup_by_target); + /** * scsi_device_lookup - find a device given the host (UNLOCKED) * @shost: SCSI host pointer @@ -1088,8 +1053,8 @@ EXPORT_SYMBOL(scsi_device_lookup); /** * scsi_device_cancel - cancel outstanding IO to this device - * @sdev: pointer to struct scsi_device - * @data: pointer to cancel value. + * @sdev: Pointer to struct scsi_device + * @recovery: Boolean instructing function to recover device or not. * **/ int scsi_device_cancel(struct scsi_device *sdev, int recovery) @@ -1103,7 +1068,7 @@ int scsi_device_cancel(struct scsi_device *sdev, int recovery) spin_lock_irqsave(&sdev->list_lock, flags); list_for_each_entry(scmd, &sdev->cmd_list, list) { - if (scmd->request && scmd->request->rq_status != RQ_INACTIVE) { + if (scmd->request) { /* * If we are unable to remove the timer, it means * that the command has already timed out or @@ -1120,9 +1085,8 @@ int scsi_device_cancel(struct scsi_device *sdev, int recovery) list_for_each_safe(lh, lh_sf, &active_list) { scmd = list_entry(lh, struct scsi_cmnd, eh_entry); list_del_init(lh); - if (recovery) { - scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD); - } else { + if (recovery && + !scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD)) { scmd->result = (DID_ABORT << 16); scsi_finish_command(scmd); } @@ -1131,38 +1095,7 @@ int scsi_device_cancel(struct scsi_device *sdev, int recovery) return 0; } - -#ifdef CONFIG_HOTPLUG_CPU -static int scsi_cpu_notify(struct notifier_block *self, - unsigned long action, void *hcpu) -{ - int cpu = (unsigned long)hcpu; - - switch(action) { - case CPU_DEAD: - /* Drain scsi_done_q. */ - local_irq_disable(); - list_splice_init(&per_cpu(scsi_done_q, cpu), - &__get_cpu_var(scsi_done_q)); - raise_softirq_irqoff(SCSI_SOFTIRQ); - local_irq_enable(); - break; - default: - break; - } - return NOTIFY_OK; -} - -static struct notifier_block __devinitdata scsi_cpu_nb = { - .notifier_call = scsi_cpu_notify, -}; - -#define register_scsi_cpu() register_cpu_notifier(&scsi_cpu_nb) -#define unregister_scsi_cpu() unregister_cpu_notifier(&scsi_cpu_nb) -#else -#define register_scsi_cpu() -#define unregister_scsi_cpu() -#endif /* CONFIG_HOTPLUG_CPU */ +EXPORT_SYMBOL(scsi_device_cancel); MODULE_DESCRIPTION("SCSI core"); MODULE_LICENSE("GPL"); @@ -1172,7 +1105,7 @@ MODULE_PARM_DESC(scsi_logging_level, "a bit mask of logging levels"); static int __init init_scsi(void) { - int error, i; + int error; error = scsi_init_queue(); if (error) @@ -1193,12 +1126,8 @@ static int __init init_scsi(void) if (error) goto cleanup_sysctl; - for (i = 0; i < NR_CPUS; i++) - INIT_LIST_HEAD(&per_cpu(scsi_done_q, i)); + scsi_netlink_init(); - devfs_mk_dir("scsi"); - open_softirq(SCSI_SOFTIRQ, scsi_softirq, NULL); - register_scsi_cpu(); printk(KERN_NOTICE "SCSI subsystem initialized\n"); return 0; @@ -1219,14 +1148,13 @@ cleanup_queue: static void __exit exit_scsi(void) { + scsi_netlink_exit(); scsi_sysfs_unregister(); scsi_exit_sysctl(); scsi_exit_hosts(); scsi_exit_devinfo(); - devfs_remove("scsi"); scsi_exit_procfs(); scsi_exit_queue(); - unregister_scsi_cpu(); } subsys_initcall(init_scsi);