X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=drivers%2Fscsi%2Fscsi.c;h=73994e2ac2cb73e3636042945511735bbb17a13f;hb=43bc926fffd92024b46cafaf7350d669ba9ca884;hp=ab4c237306e44b802d586c5cf874e678527cb8e6;hpb=6a77f38946aaee1cd85eeec6cf4229b204c15071;p=linux-2.6.git diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index ab4c23730..73994e2ac 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c @@ -48,13 +48,13 @@ #include #include #include -#include #include #include #include #include #include #include +#include #include #include @@ -68,6 +68,7 @@ #include "scsi_priv.h" #include "scsi_logging.h" +static void scsi_done(struct scsi_cmnd *cmd); /* * Definitions and constants. @@ -87,12 +88,6 @@ #define CDB_SIZE(cmd) (((((cmd)->cmnd[0] >> 5) & 7) < 6) ? \ COMMAND_SIZE((cmd)->cmnd[0]) : (cmd)->cmd_len) -/* - * Data declarations. - */ -unsigned long scsi_pid; -static unsigned long serial_number; - /* * Note - the initial logging level can be set here to log events at boot time. * After the system is up, you may enable logging via the /proc interface. @@ -117,6 +112,7 @@ const char *const scsi_device_types[MAX_SCSI_DEVICE_CODE] = { "Unknown ", "RAID ", "Enclosure ", + "Direct-Access-RBC", }; EXPORT_SYMBOL(scsi_device_types); @@ -133,15 +129,14 @@ EXPORT_SYMBOL(scsi_device_types); * Returns: Pointer to request block. */ struct scsi_request *scsi_allocate_request(struct scsi_device *sdev, - int gfp_mask) + gfp_t gfp_mask) { const int offset = ALIGN(sizeof(struct scsi_request), 4); const int size = offset + sizeof(struct request); struct scsi_request *sreq; - sreq = kmalloc(size, gfp_mask); + sreq = kzalloc(size, gfp_mask); if (likely(sreq != NULL)) { - memset(sreq, 0, size); sreq->sr_request = (struct request *)(((char *)sreq) + offset); sreq->sr_device = sdev; sreq->sr_host = sdev->host; @@ -199,7 +194,7 @@ struct scsi_host_cmd_pool { unsigned int users; char *name; unsigned int slab_flags; - unsigned int gfp_mask; + gfp_t gfp_mask; }; static struct scsi_host_cmd_pool scsi_cmd_pool = { @@ -213,10 +208,10 @@ static struct scsi_host_cmd_pool scsi_cmd_dma_pool = { .gfp_mask = __GFP_DMA, }; -static DECLARE_MUTEX(host_cmd_pool_mutex); +static DEFINE_MUTEX(host_cmd_pool_mutex); static struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost, - int gfp_mask) + gfp_t gfp_mask) { struct scsi_cmnd *cmd; @@ -248,7 +243,7 @@ static struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost, * * Returns: The allocated scsi command structure. */ -struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, int gfp_mask) +struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, gfp_t gfp_mask) { struct scsi_cmnd *cmd; @@ -263,13 +258,12 @@ struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, int gfp_mask) memset(cmd, 0, sizeof(*cmd)); cmd->device = dev; - cmd->state = SCSI_STATE_UNUSED; - cmd->owner = SCSI_OWNER_NOBODY; init_timer(&cmd->eh_timeout); INIT_LIST_HEAD(&cmd->list); spin_lock_irqsave(&dev->list_lock, flags); list_add_tail(&cmd->list, &dev->cmd_list); spin_unlock_irqrestore(&dev->list_lock, flags); + cmd->jiffies_at_alloc = jiffies; } else put_device(&dev->sdev_gendev); @@ -335,7 +329,7 @@ int scsi_setup_command_freelist(struct Scsi_Host *shost) * Select a command slab for this host and create it if not * yet existant. */ - down(&host_cmd_pool_mutex); + mutex_lock(&host_cmd_pool_mutex); pool = (shost->unchecked_isa_dma ? &scsi_cmd_dma_pool : &scsi_cmd_pool); if (!pool->users) { pool->slab = kmem_cache_create(pool->name, @@ -347,7 +341,7 @@ int scsi_setup_command_freelist(struct Scsi_Host *shost) pool->users++; shost->cmd_pool = pool; - up(&host_cmd_pool_mutex); + mutex_unlock(&host_cmd_pool_mutex); /* * Get one backup command for this host. @@ -364,7 +358,7 @@ int scsi_setup_command_freelist(struct Scsi_Host *shost) kmem_cache_destroy(pool->slab); return -ENOMEM; fail: - up(&host_cmd_pool_mutex); + mutex_unlock(&host_cmd_pool_mutex); return -ENOMEM; } @@ -386,10 +380,10 @@ void scsi_destroy_command_freelist(struct Scsi_Host *shost) kmem_cache_free(shost->cmd_pool->slab, cmd); } - down(&host_cmd_pool_mutex); + mutex_lock(&host_cmd_pool_mutex); if (!--shost->cmd_pool->users) kmem_cache_destroy(shost->cmd_pool->slab); - up(&host_cmd_pool_mutex); + mutex_unlock(&host_cmd_pool_mutex); } #ifdef CONFIG_SCSI_LOGGING @@ -414,9 +408,7 @@ void scsi_log_send(struct scsi_cmnd *cmd) SCSI_LOG_MLQUEUE_BITS); if (level > 1) { sdev = cmd->device; - printk(KERN_INFO "scsi <%d:%d:%d:%d> send ", - sdev->host->host_no, sdev->channel, sdev->id, - sdev->lun); + sdev_printk(KERN_INFO, sdev, "send "); if (level > 2) printk("0x%p ", cmd); /* @@ -460,9 +452,7 @@ void scsi_log_completion(struct scsi_cmnd *cmd, int disposition) if (((level > 0) && (cmd->result || disposition != SUCCESS)) || (level > 1)) { sdev = cmd->device; - printk(KERN_INFO "scsi <%d:%d:%d:%d> done ", - sdev->host->host_no, sdev->channel, sdev->id, - sdev->lun); + sdev_printk(KERN_INFO, sdev, "done "); if (level > 2) printk("0x%p ", cmd); /* @@ -495,7 +485,7 @@ void scsi_log_completion(struct scsi_cmnd *cmd, int disposition) scsi_print_command(cmd); if (status_byte(cmd->result) & CHECK_CONDITION) { /* - * XXX The print_sense formatting/prefix + * XXX The scsi_print_sense formatting/prefix * doesn't match this function. */ scsi_print_sense("", cmd); @@ -510,6 +500,21 @@ void scsi_log_completion(struct scsi_cmnd *cmd, int disposition) } #endif +/* + * Assign a serial number and pid to the request for error recovery + * and debugging purposes. Protected by the Host_Lock of host. + */ +static inline void scsi_cmd_get_serial(struct Scsi_Host *host, struct scsi_cmnd *cmd) +{ + cmd->serial_number = host->cmd_serial_number++; + if (cmd->serial_number == 0) + cmd->serial_number = host->cmd_serial_number++; + + cmd->pid = host->cmd_pid++; + if (cmd->pid == 0) + cmd->pid = host->cmd_pid++; +} + /* * Function: scsi_dispatch_command * @@ -532,7 +537,8 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd) * returns an immediate error upwards, and signals * that the device is no longer present */ cmd->result = DID_NO_CONNECT << 16; - scsi_done(cmd); + atomic_inc(&cmd->device->iorequest_cnt); + __scsi_done(cmd); /* return 0 (because the command has been processed) */ goto out; } @@ -556,17 +562,11 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd) goto out; } - /* Assign a unique nonzero serial_number. */ - /* XXX(hch): this is racy */ - if (++serial_number == 0) - serial_number = 1; - cmd->serial_number = serial_number; - cmd->pid = scsi_pid++; - /* * If SCSI-2 or lower, store the LUN value in cmnd. */ - if (cmd->device->scsi_level <= SCSI_2) { + if (cmd->device->scsi_level <= SCSI_2 && + cmd->device->scsi_level != SCSI_UNKNOWN) { cmd->cmnd[1] = (cmd->cmnd[1] & 0x1f) | (cmd->device->lun << 5 & 0xe0); } @@ -593,6 +593,10 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd) host->resetting = 0; } + /* + * AK: unlikely race here: for some reason the timer could + * expire before the serial number is set up below. + */ scsi_add_timer(cmd, cmd->timeout_per_command, scsi_times_out); scsi_log_send(cmd); @@ -601,9 +605,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd) * We will use a queued command if possible, otherwise we will * emulate the queuing and calling of completion function ourselves. */ - - cmd->state = SCSI_STATE_QUEUED; - cmd->owner = SCSI_OWNER_LOWLEVEL; + atomic_inc(&cmd->device->iorequest_cnt); /* * Before we queue this command, check if the command @@ -619,7 +621,9 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd) } spin_lock_irqsave(host->host_lock, flags); - if (unlikely(test_bit(SHOST_CANCEL, &host->shost_state))) { + scsi_cmd_get_serial(host, cmd); + + if (unlikely(host->shost_state == SHOST_DEL)) { cmd->result = (DID_NO_CONNECT << 16); scsi_done(cmd); } else { @@ -627,9 +631,12 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd) } spin_unlock_irqrestore(host->host_lock, flags); if (rtn) { - scsi_queue_insert(cmd, - (rtn == SCSI_MLQUEUE_DEVICE_BUSY) ? - rtn : SCSI_MLQUEUE_HOST_BUSY); + if (scsi_delete_timer(cmd)) { + atomic_inc(&cmd->device->iodone_cnt); + scsi_queue_insert(cmd, + (rtn == SCSI_MLQUEUE_DEVICE_BUSY) ? + rtn : SCSI_MLQUEUE_HOST_BUSY); + } SCSI_LOG_MLQUEUE(3, printk("queuecommand : request rejected\n")); } @@ -667,14 +674,12 @@ void scsi_init_cmd_from_req(struct scsi_cmnd *cmd, struct scsi_request *sreq) { sreq->sr_command = cmd; - cmd->owner = SCSI_OWNER_MIDLEVEL; cmd->cmd_len = sreq->sr_cmd_len; cmd->use_sg = sreq->sr_use_sg; cmd->request = sreq->sr_request; memcpy(cmd->data_cmnd, sreq->sr_cmnd, sizeof(cmd->data_cmnd)); cmd->serial_number = 0; - cmd->serial_number_at_timeout = 0; cmd->bufflen = sreq->sr_bufflen; cmd->buffer = sreq->sr_buffer; cmd->retries = 0; @@ -704,8 +709,6 @@ void scsi_init_cmd_from_req(struct scsi_cmnd *cmd, struct scsi_request *sreq) /* * Start the timer ticking. */ - cmd->internal_timeout = NORMAL_TIMEOUT; - cmd->abort_reason = 0; cmd->result = 0; SCSI_LOG_MLQUEUE(3, printk("Leaving scsi_init_cmd_from_req()\n")); @@ -729,7 +732,7 @@ static DEFINE_PER_CPU(struct list_head, scsi_done_q); * * This function is interrupt context safe. */ -void scsi_done(struct scsi_cmnd *cmd) +static void scsi_done(struct scsi_cmnd *cmd) { /* * We don't have to worry about this one timing out any more. @@ -748,67 +751,25 @@ void scsi_done(struct scsi_cmnd *cmd) * isn't running --- used by scsi_times_out */ void __scsi_done(struct scsi_cmnd *cmd) { - unsigned long flags; + struct request *rq = cmd->request; /* * Set the serial numbers back to zero */ cmd->serial_number = 0; - cmd->serial_number_at_timeout = 0; - cmd->state = SCSI_STATE_BHQUEUE; - cmd->owner = SCSI_OWNER_BH_HANDLER; + + atomic_inc(&cmd->device->iodone_cnt); + if (cmd->result) + atomic_inc(&cmd->device->ioerr_cnt); + + BUG_ON(!rq); /* - * Next, enqueue the command into the done queue. - * It is a per-CPU queue, so we just disable local interrupts - * and need no spinlock. + * The uptodate/nbytes values don't matter, as we allow partial + * completes and thus will check this in the softirq callback */ - local_irq_save(flags); - list_add_tail(&cmd->eh_entry, &__get_cpu_var(scsi_done_q)); - raise_softirq_irqoff(SCSI_SOFTIRQ); - local_irq_restore(flags); -} - -/** - * scsi_softirq - Perform post-interrupt processing of finished SCSI commands. - * - * This is the consumer of the done queue. - * - * This is called with all interrupts enabled. This should reduce - * interrupt latency, stack depth, and reentrancy of the low-level - * drivers. - */ -static void scsi_softirq(struct softirq_action *h) -{ - int disposition; - LIST_HEAD(local_q); - - local_irq_disable(); - list_splice_init(&__get_cpu_var(scsi_done_q), &local_q); - local_irq_enable(); - - while (!list_empty(&local_q)) { - struct scsi_cmnd *cmd = list_entry(local_q.next, - struct scsi_cmnd, eh_entry); - list_del_init(&cmd->eh_entry); - - disposition = scsi_decide_disposition(cmd); - scsi_log_completion(cmd, disposition); - switch (disposition) { - case SUCCESS: - scsi_finish_command(cmd); - break; - case NEEDS_RETRY: - scsi_retry_command(cmd); - break; - case ADD_TO_MLQUEUE: - scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY); - break; - default: - if (!scsi_eh_scmd_add(cmd, 0)) - scsi_finish_command(cmd); - } - } + rq->completion_data = cmd; + blk_complete_request(rq); } /* @@ -870,11 +831,9 @@ void scsi_finish_command(struct scsi_cmnd *cmd) if (SCSI_SENSE_VALID(cmd)) cmd->result |= (DRIVER_SENSE << 24); - SCSI_LOG_MLCOMPLETE(4, printk("Notifying upper driver of completion " - "for device %d %x\n", sdev->id, cmd->result)); - - cmd->owner = SCSI_OWNER_HIGHLEVEL; - cmd->state = SCSI_STATE_FINISHED; + SCSI_LOG_MLCOMPLETE(4, sdev_printk(KERN_INFO, sdev, + "Notifying upper driver of completion " + "(result %x)\n", cmd->result)); /* * We can get here with use_sg=0, causing a panic in the upper level @@ -920,12 +879,9 @@ EXPORT_SYMBOL(scsi_finish_command); * the right thing depending on whether or not the device is * currently active and whether or not it even has the * command blocks built yet. - * - * XXX(hch): What exactly is device_request_lock trying to protect? */ void scsi_adjust_queue_depth(struct scsi_device *sdev, int tagged, int tags) { - static DEFINE_SPINLOCK(device_request_lock); unsigned long flags; /* @@ -934,8 +890,7 @@ void scsi_adjust_queue_depth(struct scsi_device *sdev, int tagged, int tags) if (tags <= 0) return; - spin_lock_irqsave(&device_request_lock, flags); - spin_lock(sdev->request_queue->queue_lock); + spin_lock_irqsave(sdev->request_queue->queue_lock, flags); /* Check to see if the queue is managed by the block layer * if it is, and we fail to adjust the depth, exit */ @@ -954,18 +909,16 @@ void scsi_adjust_queue_depth(struct scsi_device *sdev, int tagged, int tags) sdev->simple_tags = 1; break; default: - printk(KERN_WARNING "(scsi%d:%d:%d:%d) " - "scsi_adjust_queue_depth, bad queue type, " - "disabled\n", sdev->host->host_no, - sdev->channel, sdev->id, sdev->lun); + sdev_printk(KERN_WARNING, sdev, + "scsi_adjust_queue_depth, bad queue type, " + "disabled\n"); case 0: sdev->ordered_tags = sdev->simple_tags = 0; sdev->queue_depth = tags; break; } out: - spin_unlock(sdev->request_queue->queue_lock); - spin_unlock_irqrestore(&device_request_lock, flags); + spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags); } EXPORT_SYMBOL(scsi_adjust_queue_depth); @@ -1104,6 +1057,60 @@ void starget_for_each_device(struct scsi_target *starget, void * data, } EXPORT_SYMBOL(starget_for_each_device); +/** + * __scsi_device_lookup_by_target - find a device given the target (UNLOCKED) + * @starget: SCSI target pointer + * @lun: SCSI Logical Unit Number + * + * Looks up the scsi_device with the specified @lun for a give + * @starget. The returned scsi_device does not have an additional + * reference. You must hold the host's host_lock over this call and + * any access to the returned scsi_device. + * + * Note: The only reason why drivers would want to use this is because + * they're need to access the device list in irq context. Otherwise you + * really want to use scsi_device_lookup_by_target instead. + **/ +struct scsi_device *__scsi_device_lookup_by_target(struct scsi_target *starget, + uint lun) +{ + struct scsi_device *sdev; + + list_for_each_entry(sdev, &starget->devices, same_target_siblings) { + if (sdev->lun ==lun) + return sdev; + } + + return NULL; +} +EXPORT_SYMBOL(__scsi_device_lookup_by_target); + +/** + * scsi_device_lookup_by_target - find a device given the target + * @starget: SCSI target pointer + * @lun: SCSI Logical Unit Number + * + * Looks up the scsi_device with the specified @channel, @id, @lun for a + * give host. The returned scsi_device has an additional reference that + * needs to be release with scsi_host_put once you're done with it. + **/ +struct scsi_device *scsi_device_lookup_by_target(struct scsi_target *starget, + uint lun) +{ + struct scsi_device *sdev; + struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); + unsigned long flags; + + spin_lock_irqsave(shost->host_lock, flags); + sdev = __scsi_device_lookup_by_target(starget, lun); + if (sdev && scsi_device_get(sdev)) + sdev = NULL; + spin_unlock_irqrestore(shost->host_lock, flags); + + return sdev; +} +EXPORT_SYMBOL(scsi_device_lookup_by_target); + /** * scsi_device_lookup - find a device given the host (UNLOCKED) * @shost: SCSI host pointer @@ -1196,9 +1203,8 @@ int scsi_device_cancel(struct scsi_device *sdev, int recovery) list_for_each_safe(lh, lh_sf, &active_list) { scmd = list_entry(lh, struct scsi_cmnd, eh_entry); list_del_init(lh); - if (recovery) { - scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD); - } else { + if (recovery && + !scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD)) { scmd->result = (DID_ABORT << 16); scsi_finish_command(scmd); } @@ -1209,38 +1215,6 @@ int scsi_device_cancel(struct scsi_device *sdev, int recovery) } EXPORT_SYMBOL(scsi_device_cancel); -#ifdef CONFIG_HOTPLUG_CPU -static int scsi_cpu_notify(struct notifier_block *self, - unsigned long action, void *hcpu) -{ - int cpu = (unsigned long)hcpu; - - switch(action) { - case CPU_DEAD: - /* Drain scsi_done_q. */ - local_irq_disable(); - list_splice_init(&per_cpu(scsi_done_q, cpu), - &__get_cpu_var(scsi_done_q)); - raise_softirq_irqoff(SCSI_SOFTIRQ); - local_irq_enable(); - break; - default: - break; - } - return NOTIFY_OK; -} - -static struct notifier_block __devinitdata scsi_cpu_nb = { - .notifier_call = scsi_cpu_notify, -}; - -#define register_scsi_cpu() register_cpu_notifier(&scsi_cpu_nb) -#define unregister_scsi_cpu() unregister_cpu_notifier(&scsi_cpu_nb) -#else -#define register_scsi_cpu() -#define unregister_scsi_cpu() -#endif /* CONFIG_HOTPLUG_CPU */ - MODULE_DESCRIPTION("SCSI core"); MODULE_LICENSE("GPL"); @@ -1270,12 +1244,9 @@ static int __init init_scsi(void) if (error) goto cleanup_sysctl; - for (i = 0; i < NR_CPUS; i++) + for_each_possible_cpu(i) INIT_LIST_HEAD(&per_cpu(scsi_done_q, i)); - devfs_mk_dir("scsi"); - open_softirq(SCSI_SOFTIRQ, scsi_softirq, NULL); - register_scsi_cpu(); printk(KERN_NOTICE "SCSI subsystem initialized\n"); return 0; @@ -1300,10 +1271,8 @@ static void __exit exit_scsi(void) scsi_exit_sysctl(); scsi_exit_hosts(); scsi_exit_devinfo(); - devfs_remove("scsi"); scsi_exit_procfs(); scsi_exit_queue(); - unregister_scsi_cpu(); } subsys_initcall(init_scsi);