X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=drivers%2Fscsi%2Fscsi.c;h=73994e2ac2cb73e3636042945511735bbb17a13f;hb=43bc926fffd92024b46cafaf7350d669ba9ca884;hp=4cff61bf99d87845e167016ce17b8e03e9f617af;hpb=5273a3df6485dc2ad6aa7ddd441b9a21970f003b;p=linux-2.6.git diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index 4cff61bf9..73994e2ac 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c @@ -48,20 +48,27 @@ #include #include #include -#include #include #include #include #include #include #include +#include +#include +#include +#include +#include +#include #include -#include "scsi.h" +#include +#include #include "scsi_priv.h" #include "scsi_logging.h" +static void scsi_done(struct scsi_cmnd *cmd); /* * Definitions and constants. @@ -81,17 +88,14 @@ #define CDB_SIZE(cmd) (((((cmd)->cmnd[0] >> 5) & 7) < 6) ? \ COMMAND_SIZE((cmd)->cmnd[0]) : (cmd)->cmd_len) -/* - * Data declarations. - */ -unsigned long scsi_pid; -static unsigned long serial_number; - /* * Note - the initial logging level can be set here to log events at boot time. * After the system is up, you may enable logging via the /proc interface. */ unsigned int scsi_logging_level; +#if defined(CONFIG_SCSI_LOGGING) +EXPORT_SYMBOL(scsi_logging_level); +#endif const char *const scsi_device_types[MAX_SCSI_DEVICE_CODE] = { "Direct-Access ", @@ -108,7 +112,9 @@ const char *const scsi_device_types[MAX_SCSI_DEVICE_CODE] = { "Unknown ", "RAID ", "Enclosure ", + "Direct-Access-RBC", }; +EXPORT_SYMBOL(scsi_device_types); /* * Function: scsi_allocate_request @@ -123,15 +129,14 @@ const char *const scsi_device_types[MAX_SCSI_DEVICE_CODE] = { * Returns: Pointer to request block. */ struct scsi_request *scsi_allocate_request(struct scsi_device *sdev, - int gfp_mask) + gfp_t gfp_mask) { const int offset = ALIGN(sizeof(struct scsi_request), 4); const int size = offset + sizeof(struct request); struct scsi_request *sreq; - sreq = kmalloc(size, gfp_mask); + sreq = kzalloc(size, gfp_mask); if (likely(sreq != NULL)) { - memset(sreq, 0, size); sreq->sr_request = (struct request *)(((char *)sreq) + offset); sreq->sr_device = sdev; sreq->sr_host = sdev->host; @@ -141,6 +146,7 @@ struct scsi_request *scsi_allocate_request(struct scsi_device *sdev, return sreq; } +EXPORT_SYMBOL(scsi_allocate_request); void __scsi_release_request(struct scsi_request *sreq) { @@ -181,13 +187,14 @@ void scsi_release_request(struct scsi_request *sreq) __scsi_release_request(sreq); kfree(sreq); } +EXPORT_SYMBOL(scsi_release_request); struct scsi_host_cmd_pool { kmem_cache_t *slab; unsigned int users; char *name; unsigned int slab_flags; - unsigned int gfp_mask; + gfp_t gfp_mask; }; static struct scsi_host_cmd_pool scsi_cmd_pool = { @@ -201,10 +208,10 @@ static struct scsi_host_cmd_pool scsi_cmd_dma_pool = { .gfp_mask = __GFP_DMA, }; -static DECLARE_MUTEX(host_cmd_pool_mutex); +static DEFINE_MUTEX(host_cmd_pool_mutex); static struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost, - int gfp_mask) + gfp_t gfp_mask) { struct scsi_cmnd *cmd; @@ -236,26 +243,33 @@ static struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost, * * Returns: The allocated scsi command structure. */ -struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, int gfp_mask) +struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, gfp_t gfp_mask) { - struct scsi_cmnd *cmd = __scsi_get_command(dev->host, gfp_mask); + struct scsi_cmnd *cmd; + + /* Bail if we can't get a reference to the device */ + if (!get_device(&dev->sdev_gendev)) + return NULL; + + cmd = __scsi_get_command(dev->host, gfp_mask); if (likely(cmd != NULL)) { unsigned long flags; memset(cmd, 0, sizeof(*cmd)); cmd->device = dev; - cmd->state = SCSI_STATE_UNUSED; - cmd->owner = SCSI_OWNER_NOBODY; init_timer(&cmd->eh_timeout); INIT_LIST_HEAD(&cmd->list); spin_lock_irqsave(&dev->list_lock, flags); list_add_tail(&cmd->list, &dev->cmd_list); spin_unlock_irqrestore(&dev->list_lock, flags); - } + cmd->jiffies_at_alloc = jiffies; + } else + put_device(&dev->sdev_gendev); return cmd; } +EXPORT_SYMBOL(scsi_get_command); /* * Function: scsi_put_command() @@ -270,7 +284,8 @@ struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, int gfp_mask) */ void scsi_put_command(struct scsi_cmnd *cmd) { - struct Scsi_Host *shost = cmd->device->host; + struct scsi_device *sdev = cmd->device; + struct Scsi_Host *shost = sdev->host; unsigned long flags; /* serious error if the command hasn't come from a device list */ @@ -288,7 +303,10 @@ void scsi_put_command(struct scsi_cmnd *cmd) if (likely(cmd != NULL)) kmem_cache_free(shost->cmd_pool->slab, cmd); + + put_device(&sdev->sdev_gendev); } +EXPORT_SYMBOL(scsi_put_command); /* * Function: scsi_setup_command_freelist() @@ -311,7 +329,7 @@ int scsi_setup_command_freelist(struct Scsi_Host *shost) * Select a command slab for this host and create it if not * yet existant. */ - down(&host_cmd_pool_mutex); + mutex_lock(&host_cmd_pool_mutex); pool = (shost->unchecked_isa_dma ? &scsi_cmd_dma_pool : &scsi_cmd_pool); if (!pool->users) { pool->slab = kmem_cache_create(pool->name, @@ -323,7 +341,7 @@ int scsi_setup_command_freelist(struct Scsi_Host *shost) pool->users++; shost->cmd_pool = pool; - up(&host_cmd_pool_mutex); + mutex_unlock(&host_cmd_pool_mutex); /* * Get one backup command for this host. @@ -340,7 +358,7 @@ int scsi_setup_command_freelist(struct Scsi_Host *shost) kmem_cache_destroy(pool->slab); return -ENOMEM; fail: - up(&host_cmd_pool_mutex); + mutex_unlock(&host_cmd_pool_mutex); return -ENOMEM; } @@ -362,10 +380,10 @@ void scsi_destroy_command_freelist(struct Scsi_Host *shost) kmem_cache_free(shost->cmd_pool->slab, cmd); } - down(&host_cmd_pool_mutex); + mutex_lock(&host_cmd_pool_mutex); if (!--shost->cmd_pool->users) kmem_cache_destroy(shost->cmd_pool->slab); - up(&host_cmd_pool_mutex); + mutex_unlock(&host_cmd_pool_mutex); } #ifdef CONFIG_SCSI_LOGGING @@ -390,9 +408,7 @@ void scsi_log_send(struct scsi_cmnd *cmd) SCSI_LOG_MLQUEUE_BITS); if (level > 1) { sdev = cmd->device; - printk(KERN_INFO "scsi <%d:%d:%d:%d> send ", - sdev->host->host_no, sdev->channel, sdev->id, - sdev->lun); + sdev_printk(KERN_INFO, sdev, "send "); if (level > 2) printk("0x%p ", cmd); /* @@ -400,7 +416,7 @@ void scsi_log_send(struct scsi_cmnd *cmd) * output in scsi_log_completion. */ printk(" "); - print_command(cmd->cmnd); + scsi_print_command(cmd); if (level > 3) { printk(KERN_INFO "buffer = 0x%p, bufflen = %d," " done = 0x%p, queuecommand 0x%p\n", @@ -436,9 +452,7 @@ void scsi_log_completion(struct scsi_cmnd *cmd, int disposition) if (((level > 0) && (cmd->result || disposition != SUCCESS)) || (level > 1)) { sdev = cmd->device; - printk(KERN_INFO "scsi <%d:%d:%d:%d> done ", - sdev->host->host_no, sdev->channel, sdev->id, - sdev->lun); + sdev_printk(KERN_INFO, sdev, "done "); if (level > 2) printk("0x%p ", cmd); /* @@ -468,13 +482,13 @@ void scsi_log_completion(struct scsi_cmnd *cmd, int disposition) printk("UNKNOWN"); } printk(" %8x ", cmd->result); - print_command(cmd->cmnd); + scsi_print_command(cmd); if (status_byte(cmd->result) & CHECK_CONDITION) { /* - * XXX The print_sense formatting/prefix + * XXX The scsi_print_sense formatting/prefix * doesn't match this function. */ - print_sense("", cmd); + scsi_print_sense("", cmd); } if (level > 3) { printk(KERN_INFO "scsi host busy %d failed %d\n", @@ -486,6 +500,21 @@ void scsi_log_completion(struct scsi_cmnd *cmd, int disposition) } #endif +/* + * Assign a serial number and pid to the request for error recovery + * and debugging purposes. Protected by the Host_Lock of host. + */ +static inline void scsi_cmd_get_serial(struct Scsi_Host *host, struct scsi_cmnd *cmd) +{ + cmd->serial_number = host->cmd_serial_number++; + if (cmd->serial_number == 0) + cmd->serial_number = host->cmd_serial_number++; + + cmd->pid = host->cmd_pid++; + if (cmd->pid == 0) + cmd->pid = host->cmd_pid++; +} + /* * Function: scsi_dispatch_command * @@ -508,21 +537,36 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd) * returns an immediate error upwards, and signals * that the device is no longer present */ cmd->result = DID_NO_CONNECT << 16; - scsi_done(cmd); + atomic_inc(&cmd->device->iorequest_cnt); + __scsi_done(cmd); /* return 0 (because the command has been processed) */ goto out; } - /* Assign a unique nonzero serial_number. */ - /* XXX(hch): this is racy */ - if (++serial_number == 0) - serial_number = 1; - cmd->serial_number = serial_number; - cmd->pid = scsi_pid++; + + /* Check to see if the scsi lld put this device into state SDEV_BLOCK. */ + if (unlikely(cmd->device->sdev_state == SDEV_BLOCK)) { + /* + * in SDEV_BLOCK, the command is just put back on the device + * queue. The suspend state has already blocked the queue so + * future requests should not occur until the device + * transitions out of the suspend state. + */ + scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY); + + SCSI_LOG_MLQUEUE(3, printk("queuecommand : device blocked \n")); + + /* + * NOTE: rtn is still zero here because we don't need the + * queue to be plugged on return (it's already stopped) + */ + goto out; + } /* * If SCSI-2 or lower, store the LUN value in cmnd. */ - if (cmd->device->scsi_level <= SCSI_2) { + if (cmd->device->scsi_level <= SCSI_2 && + cmd->device->scsi_level != SCSI_UNKNOWN) { cmd->cmnd[1] = (cmd->cmnd[1] & 0x1f) | (cmd->device->lun << 5 & 0xe0); } @@ -549,6 +593,10 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd) host->resetting = 0; } + /* + * AK: unlikely race here: for some reason the timer could + * expire before the serial number is set up below. + */ scsi_add_timer(cmd, cmd->timeout_per_command, scsi_times_out); scsi_log_send(cmd); @@ -557,9 +605,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd) * We will use a queued command if possible, otherwise we will * emulate the queuing and calling of completion function ourselves. */ - - cmd->state = SCSI_STATE_QUEUED; - cmd->owner = SCSI_OWNER_LOWLEVEL; + atomic_inc(&cmd->device->iorequest_cnt); /* * Before we queue this command, check if the command @@ -570,14 +616,14 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd) printk("queuecommand : command too long.\n")); cmd->result = (DID_ABORT << 16); - spin_lock_irqsave(host->host_lock, flags); scsi_done(cmd); - spin_unlock_irqrestore(host->host_lock, flags); goto out; } spin_lock_irqsave(host->host_lock, flags); - if (unlikely(test_bit(SHOST_CANCEL, &host->shost_state))) { + scsi_cmd_get_serial(host, cmd); + + if (unlikely(host->shost_state == SHOST_DEL)) { cmd->result = (DID_NO_CONNECT << 16); scsi_done(cmd); } else { @@ -585,9 +631,12 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd) } spin_unlock_irqrestore(host->host_lock, flags); if (rtn) { - scsi_queue_insert(cmd, - (rtn == SCSI_MLQUEUE_DEVICE_BUSY) ? - rtn : SCSI_MLQUEUE_HOST_BUSY); + if (scsi_delete_timer(cmd)) { + atomic_inc(&cmd->device->iodone_cnt); + scsi_queue_insert(cmd, + (rtn == SCSI_MLQUEUE_DEVICE_BUSY) ? + rtn : SCSI_MLQUEUE_HOST_BUSY); + } SCSI_LOG_MLQUEUE(3, printk("queuecommand : request rejected\n")); } @@ -625,14 +674,12 @@ void scsi_init_cmd_from_req(struct scsi_cmnd *cmd, struct scsi_request *sreq) { sreq->sr_command = cmd; - cmd->owner = SCSI_OWNER_MIDLEVEL; cmd->cmd_len = sreq->sr_cmd_len; cmd->use_sg = sreq->sr_use_sg; cmd->request = sreq->sr_request; memcpy(cmd->data_cmnd, sreq->sr_cmnd, sizeof(cmd->data_cmnd)); cmd->serial_number = 0; - cmd->serial_number_at_timeout = 0; cmd->bufflen = sreq->sr_bufflen; cmd->buffer = sreq->sr_buffer; cmd->retries = 0; @@ -662,8 +709,6 @@ void scsi_init_cmd_from_req(struct scsi_cmnd *cmd, struct scsi_request *sreq) /* * Start the timer ticking. */ - cmd->internal_timeout = NORMAL_TIMEOUT; - cmd->abort_reason = 0; cmd->result = 0; SCSI_LOG_MLQUEUE(3, printk("Leaving scsi_init_cmd_from_req()\n")); @@ -687,10 +732,8 @@ static DEFINE_PER_CPU(struct list_head, scsi_done_q); * * This function is interrupt context safe. */ -void scsi_done(struct scsi_cmnd *cmd) +static void scsi_done(struct scsi_cmnd *cmd) { - unsigned long flags; - /* * We don't have to worry about this one timing out any more. * If we are unable to remove the timer, then the command @@ -701,66 +744,32 @@ void scsi_done(struct scsi_cmnd *cmd) */ if (!scsi_delete_timer(cmd)) return; + __scsi_done(cmd); +} + +/* Private entry to scsi_done() to complete a command when the timer + * isn't running --- used by scsi_times_out */ +void __scsi_done(struct scsi_cmnd *cmd) +{ + struct request *rq = cmd->request; /* * Set the serial numbers back to zero */ cmd->serial_number = 0; - cmd->serial_number_at_timeout = 0; - cmd->state = SCSI_STATE_BHQUEUE; - cmd->owner = SCSI_OWNER_BH_HANDLER; + + atomic_inc(&cmd->device->iodone_cnt); + if (cmd->result) + atomic_inc(&cmd->device->ioerr_cnt); + + BUG_ON(!rq); /* - * Next, enqueue the command into the done queue. - * It is a per-CPU queue, so we just disable local interrupts - * and need no spinlock. + * The uptodate/nbytes values don't matter, as we allow partial + * completes and thus will check this in the softirq callback */ - local_irq_save(flags); - list_add_tail(&cmd->eh_entry, &__get_cpu_var(scsi_done_q)); - raise_softirq_irqoff(SCSI_SOFTIRQ); - local_irq_restore(flags); -} - -/** - * scsi_softirq - Perform post-interrupt processing of finished SCSI commands. - * - * This is the consumer of the done queue. - * - * This is called with all interrupts enabled. This should reduce - * interrupt latency, stack depth, and reentrancy of the low-level - * drivers. - */ -static void scsi_softirq(struct softirq_action *h) -{ - int disposition; - LIST_HEAD(local_q); - - local_irq_disable(); - list_splice_init(&__get_cpu_var(scsi_done_q), &local_q); - local_irq_enable(); - - while (!list_empty(&local_q)) { - struct scsi_cmnd *cmd = list_entry(local_q.next, - struct scsi_cmnd, eh_entry); - list_del_init(&cmd->eh_entry); - - disposition = scsi_decide_disposition(cmd); - scsi_log_completion(cmd, disposition); - switch (disposition) { - case SUCCESS: - scsi_finish_command(cmd); - break; - case NEEDS_RETRY: - scsi_retry_command(cmd); - break; - case ADD_TO_MLQUEUE: - scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY); - break; - default: - if (!scsi_eh_scmd_add(cmd, 0)) - scsi_finish_command(cmd); - } - } + rq->completion_data = cmd; + blk_complete_request(rq); } /* @@ -822,11 +831,9 @@ void scsi_finish_command(struct scsi_cmnd *cmd) if (SCSI_SENSE_VALID(cmd)) cmd->result |= (DRIVER_SENSE << 24); - SCSI_LOG_MLCOMPLETE(4, printk("Notifying upper driver of completion " - "for device %d %x\n", sdev->id, cmd->result)); - - cmd->owner = SCSI_OWNER_HIGHLEVEL; - cmd->state = SCSI_STATE_FINISHED; + SCSI_LOG_MLCOMPLETE(4, sdev_printk(KERN_INFO, sdev, + "Notifying upper driver of completion " + "(result %x)\n", cmd->result)); /* * We can get here with use_sg=0, causing a panic in the upper level @@ -872,12 +879,9 @@ EXPORT_SYMBOL(scsi_finish_command); * the right thing depending on whether or not the device is * currently active and whether or not it even has the * command blocks built yet. - * - * XXX(hch): What exactly is device_request_lock trying to protect? */ void scsi_adjust_queue_depth(struct scsi_device *sdev, int tagged, int tags) { - static spinlock_t device_request_lock = SPIN_LOCK_UNLOCKED; unsigned long flags; /* @@ -885,15 +889,15 @@ void scsi_adjust_queue_depth(struct scsi_device *sdev, int tagged, int tags) */ if (tags <= 0) return; - /* - * Limit max queue depth on a single lun to 256 for now. Remember, - * we allocate a struct scsi_command for each of these and keep it - * around forever. Too deep of a depth just wastes memory. - */ - if (tags > 256) - return; - spin_lock_irqsave(&device_request_lock, flags); + spin_lock_irqsave(sdev->request_queue->queue_lock, flags); + + /* Check to see if the queue is managed by the block layer + * if it is, and we fail to adjust the depth, exit */ + if (blk_queue_tagged(sdev->request_queue) && + blk_queue_resize_tags(sdev->request_queue, tags) != 0) + goto out; + sdev->queue_depth = tags; switch (tagged) { case MSG_ORDERED_TAG: @@ -905,17 +909,18 @@ void scsi_adjust_queue_depth(struct scsi_device *sdev, int tagged, int tags) sdev->simple_tags = 1; break; default: - printk(KERN_WARNING "(scsi%d:%d:%d:%d) " - "scsi_adjust_queue_depth, bad queue type, " - "disabled\n", sdev->host->host_no, - sdev->channel, sdev->id, sdev->lun); + sdev_printk(KERN_WARNING, sdev, + "scsi_adjust_queue_depth, bad queue type, " + "disabled\n"); case 0: sdev->ordered_tags = sdev->simple_tags = 0; sdev->queue_depth = tags; break; } - spin_unlock_irqrestore(&device_request_lock, flags); + out: + spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags); } +EXPORT_SYMBOL(scsi_adjust_queue_depth); /* * Function: scsi_track_queue_full() @@ -966,6 +971,7 @@ int scsi_track_queue_full(struct scsi_device *sdev, int depth) scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, depth); return depth; } +EXPORT_SYMBOL(scsi_track_queue_full); /** * scsi_device_get - get an addition reference to a scsi_device @@ -977,8 +983,6 @@ int scsi_track_queue_full(struct scsi_device *sdev, int depth) */ int scsi_device_get(struct scsi_device *sdev) { - if(!sdev) - return -ENXIO; if (sdev->sdev_state == SDEV_DEL || sdev->sdev_state == SDEV_CANCEL) return -ENXIO; if (!get_device(&sdev->sdev_gendev)) @@ -1020,6 +1024,7 @@ struct scsi_device *__scsi_iterate_devices(struct Scsi_Host *shost, /* skip devices that we can't get a reference to */ if (!scsi_device_get(next)) break; + next = NULL; list = list->next; } spin_unlock_irqrestore(shost->host_lock, flags); @@ -1030,6 +1035,82 @@ struct scsi_device *__scsi_iterate_devices(struct Scsi_Host *shost, } EXPORT_SYMBOL(__scsi_iterate_devices); +/** + * starget_for_each_device - helper to walk all devices of a target + * @starget: target whose devices we want to iterate over. + * + * This traverses over each devices of @shost. The devices have + * a reference that must be released by scsi_host_put when breaking + * out of the loop. + */ +void starget_for_each_device(struct scsi_target *starget, void * data, + void (*fn)(struct scsi_device *, void *)) +{ + struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); + struct scsi_device *sdev; + + shost_for_each_device(sdev, shost) { + if ((sdev->channel == starget->channel) && + (sdev->id == starget->id)) + fn(sdev, data); + } +} +EXPORT_SYMBOL(starget_for_each_device); + +/** + * __scsi_device_lookup_by_target - find a device given the target (UNLOCKED) + * @starget: SCSI target pointer + * @lun: SCSI Logical Unit Number + * + * Looks up the scsi_device with the specified @lun for a give + * @starget. The returned scsi_device does not have an additional + * reference. You must hold the host's host_lock over this call and + * any access to the returned scsi_device. + * + * Note: The only reason why drivers would want to use this is because + * they're need to access the device list in irq context. Otherwise you + * really want to use scsi_device_lookup_by_target instead. + **/ +struct scsi_device *__scsi_device_lookup_by_target(struct scsi_target *starget, + uint lun) +{ + struct scsi_device *sdev; + + list_for_each_entry(sdev, &starget->devices, same_target_siblings) { + if (sdev->lun ==lun) + return sdev; + } + + return NULL; +} +EXPORT_SYMBOL(__scsi_device_lookup_by_target); + +/** + * scsi_device_lookup_by_target - find a device given the target + * @starget: SCSI target pointer + * @lun: SCSI Logical Unit Number + * + * Looks up the scsi_device with the specified @channel, @id, @lun for a + * give host. The returned scsi_device has an additional reference that + * needs to be release with scsi_host_put once you're done with it. + **/ +struct scsi_device *scsi_device_lookup_by_target(struct scsi_target *starget, + uint lun) +{ + struct scsi_device *sdev; + struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); + unsigned long flags; + + spin_lock_irqsave(shost->host_lock, flags); + sdev = __scsi_device_lookup_by_target(starget, lun); + if (sdev && scsi_device_get(sdev)) + sdev = NULL; + spin_unlock_irqrestore(shost->host_lock, flags); + + return sdev; +} +EXPORT_SYMBOL(scsi_device_lookup_by_target); + /** * scsi_device_lookup - find a device given the host (UNLOCKED) * @shost: SCSI host pointer @@ -1090,8 +1171,8 @@ EXPORT_SYMBOL(scsi_device_lookup); /** * scsi_device_cancel - cancel outstanding IO to this device - * @sdev: pointer to struct scsi_device - * @data: pointer to cancel value. + * @sdev: Pointer to struct scsi_device + * @recovery: Boolean instructing function to recover device or not. * **/ int scsi_device_cancel(struct scsi_device *sdev, int recovery) @@ -1122,9 +1203,8 @@ int scsi_device_cancel(struct scsi_device *sdev, int recovery) list_for_each_safe(lh, lh_sf, &active_list) { scmd = list_entry(lh, struct scsi_cmnd, eh_entry); list_del_init(lh); - if (recovery) { - scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD); - } else { + if (recovery && + !scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD)) { scmd->result = (DID_ABORT << 16); scsi_finish_command(scmd); } @@ -1133,38 +1213,7 @@ int scsi_device_cancel(struct scsi_device *sdev, int recovery) return 0; } - -#ifdef CONFIG_HOTPLUG_CPU -static int scsi_cpu_notify(struct notifier_block *self, - unsigned long action, void *hcpu) -{ - int cpu = (unsigned long)hcpu; - - switch(action) { - case CPU_DEAD: - /* Drain scsi_done_q. */ - local_irq_disable(); - list_splice_init(&per_cpu(scsi_done_q, cpu), - &__get_cpu_var(scsi_done_q)); - raise_softirq_irqoff(SCSI_SOFTIRQ); - local_irq_enable(); - break; - default: - break; - } - return NOTIFY_OK; -} - -static struct notifier_block __devinitdata scsi_cpu_nb = { - .notifier_call = scsi_cpu_notify, -}; - -#define register_scsi_cpu() register_cpu_notifier(&scsi_cpu_nb) -#define unregister_scsi_cpu() unregister_cpu_notifier(&scsi_cpu_nb) -#else -#define register_scsi_cpu() -#define unregister_scsi_cpu() -#endif /* CONFIG_HOTPLUG_CPU */ +EXPORT_SYMBOL(scsi_device_cancel); MODULE_DESCRIPTION("SCSI core"); MODULE_LICENSE("GPL"); @@ -1195,12 +1244,9 @@ static int __init init_scsi(void) if (error) goto cleanup_sysctl; - for (i = 0; i < NR_CPUS; i++) + for_each_possible_cpu(i) INIT_LIST_HEAD(&per_cpu(scsi_done_q, i)); - devfs_mk_dir("scsi"); - open_softirq(SCSI_SOFTIRQ, scsi_softirq, NULL); - register_scsi_cpu(); printk(KERN_NOTICE "SCSI subsystem initialized\n"); return 0; @@ -1225,10 +1271,8 @@ static void __exit exit_scsi(void) scsi_exit_sysctl(); scsi_exit_hosts(); scsi_exit_devinfo(); - devfs_remove("scsi"); scsi_exit_procfs(); scsi_exit_queue(); - unregister_scsi_cpu(); } subsys_initcall(init_scsi);