#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/dma-mapping.h>
+#include <linux/delay.h>
#include <asm/vio.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
static int init_timeout = 5;
static int max_requests = 50;
-#define IBMVSCSI_VERSION "1.5.1"
+#define IBMVSCSI_VERSION "1.5.5"
MODULE_DESCRIPTION("IBM Virtual SCSI");
MODULE_AUTHOR("Dave Boutcher");
{
evt_struct->cmnd = NULL;
evt_struct->cmnd_done = NULL;
+ evt_struct->sync_srp = NULL;
evt_struct->crq.format = format;
evt_struct->crq.timeout = timeout;
evt_struct->done = done;
static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
struct ibmvscsi_host_data *hostdata)
{
- struct scsi_cmnd *cmnd = evt_struct->cmnd;
+ struct scsi_cmnd *cmnd;
u64 *crq_as_u64 = (u64 *) &evt_struct->crq;
int rc;
if ((evt_struct->crq.format == VIOSRP_SRP_FORMAT) &&
(atomic_dec_if_positive(&hostdata->request_limit) < 0)) {
/* See if the adapter is disabled */
- if (atomic_read(&hostdata->request_limit) < 0) {
- if (cmnd)
- cmnd->result = DID_ERROR << 16;
- if (evt_struct->cmnd_done)
- evt_struct->cmnd_done(cmnd);
- unmap_cmd_data(&evt_struct->iu.srp.cmd,
- hostdata->dev);
- free_event_struct(&hostdata->pool, evt_struct);
- return 0;
- } else {
- printk("ibmvscsi: Warning, request_limit exceeded\n");
- unmap_cmd_data(&evt_struct->iu.srp.cmd,
- hostdata->dev);
- free_event_struct(&hostdata->pool, evt_struct);
- return SCSI_MLQUEUE_HOST_BUSY;
- }
+ if (atomic_read(&hostdata->request_limit) < 0)
+ goto send_error;
+
+ printk(KERN_WARNING
+ "ibmvscsi: Warning, request_limit exceeded\n");
+ unmap_cmd_data(&evt_struct->iu.srp.cmd,
+ hostdata->dev);
+ free_event_struct(&hostdata->pool, evt_struct);
+ return SCSI_MLQUEUE_HOST_BUSY;
}
/* Copy the IU into the transfer area */
ibmvscsi_send_crq(hostdata, crq_as_u64[0], crq_as_u64[1])) != 0) {
list_del(&evt_struct->list);
- cmnd = evt_struct->cmnd;
printk(KERN_ERR "ibmvscsi: failed to send event struct rc %d\n",
rc);
- unmap_cmd_data(&evt_struct->iu.srp.cmd, hostdata->dev);
- free_event_struct(&hostdata->pool, evt_struct);
- if (cmnd)
- cmnd->result = DID_ERROR << 16;
- if (evt_struct->cmnd_done)
- evt_struct->cmnd_done(cmnd);
+ goto send_error;
}
return 0;
+
+ send_error:
+ unmap_cmd_data(&evt_struct->iu.srp.cmd, hostdata->dev);
+
+ if ((cmnd = evt_struct->cmnd) != NULL) {
+ cmnd->result = DID_ERROR << 16;
+ evt_struct->cmnd_done(cmnd);
+ } else if (evt_struct->done)
+ evt_struct->done(evt_struct);
+
+ free_event_struct(&hostdata->pool, evt_struct);
+ return 0;
}
/**
struct srp_rsp *rsp = &evt_struct->xfer_iu->srp.rsp;
struct scsi_cmnd *cmnd = evt_struct->cmnd;
+ if (unlikely(rsp->type != SRP_RSP_TYPE)) {
+ if (printk_ratelimit())
+ printk(KERN_WARNING
+ "ibmvscsi: bad SRP RSP type %d\n",
+ rsp->type);
+ }
+
if (cmnd) {
cmnd->result = rsp->status;
if (((cmnd->result >> 1) & 0x1f) == CHECK_CONDITION)
evt_struct->xfer_iu->mad.adapter_info.common.status);
} else {
printk("ibmvscsi: host srp version: %s, "
- "host partition %s (%d), OS %d\n",
+ "host partition %s (%d), OS %d, max io %u\n",
hostdata->madapter_info.srp_version,
hostdata->madapter_info.partition_name,
hostdata->madapter_info.partition_number,
- hostdata->madapter_info.os_type);
+ hostdata->madapter_info.os_type,
+ hostdata->madapter_info.port_max_txu[0]);
+
+ if (hostdata->madapter_info.port_max_txu[0])
+ hostdata->host->max_sectors =
+ hostdata->madapter_info.port_max_txu[0] >> 9;
}
}
*/
static void sync_completion(struct srp_event_struct *evt_struct)
{
+ /* copy the response back */
+ if (evt_struct->sync_srp)
+ *evt_struct->sync_srp = *evt_struct->xfer_iu;
+
complete(&evt_struct->comp);
}
struct srp_tsk_mgmt *tsk_mgmt;
struct srp_event_struct *evt;
struct srp_event_struct *tmp_evt, *found_evt;
+ union viosrp_iu srp_rsp;
+ int rsp_rc;
u16 lun = lun_from_dev(cmd->device);
/* First, find this command in our sent list so we can figure
printk(KERN_INFO "ibmvscsi: aborting command. lun 0x%lx, tag 0x%lx\n",
tsk_mgmt->lun, tsk_mgmt->managed_task_tag);
+ evt->sync_srp = &srp_rsp;
init_completion(&evt->comp);
if (ibmvscsi_send_srp_event(evt, hostdata) != 0) {
printk(KERN_ERR "ibmvscsi: failed to send abort() event\n");
wait_for_completion(&evt->comp);
spin_lock_irq(hostdata->host->host_lock);
+ /* make sure we got a good response */
+ if (unlikely(srp_rsp.srp.generic.type != SRP_RSP_TYPE)) {
+ if (printk_ratelimit())
+ printk(KERN_WARNING
+ "ibmvscsi: abort bad SRP RSP type %d\n",
+ srp_rsp.srp.generic.type);
+ return FAILED;
+ }
+
+ if (srp_rsp.srp.rsp.rspvalid)
+ rsp_rc = *((int *)srp_rsp.srp.rsp.sense_and_response_data);
+ else
+ rsp_rc = srp_rsp.srp.rsp.status;
+
+ if (rsp_rc) {
+ if (printk_ratelimit())
+ printk(KERN_WARNING
+ "ibmvscsi: abort code %d for task tag 0x%lx\n",
+ rsp_rc,
+ tsk_mgmt->managed_task_tag);
+ return FAILED;
+ }
+
/* Because we dropped the spinlock above, it's possible
* The event is no longer in our list. Make sure it didn't
* complete while we were aborting
}
}
+ if (found_evt == NULL) {
+ printk(KERN_INFO
+ "ibmvscsi: aborted task tag 0x%lx completed\n",
+ tsk_mgmt->managed_task_tag);
+ return SUCCESS;
+ }
+
printk(KERN_INFO
"ibmvscsi: successfully aborted task tag 0x%lx\n",
tsk_mgmt->managed_task_tag);
- if (found_evt == NULL)
- return SUCCESS;
-
cmd->result = (DID_ABORT << 16);
list_del(&found_evt->list);
unmap_cmd_data(&found_evt->iu.srp.cmd, found_evt->hostdata->dev);
struct srp_tsk_mgmt *tsk_mgmt;
struct srp_event_struct *evt;
struct srp_event_struct *tmp_evt, *pos;
+ union viosrp_iu srp_rsp;
+ int rsp_rc;
u16 lun = lun_from_dev(cmd->device);
evt = get_event_struct(&hostdata->pool);
printk(KERN_INFO "ibmvscsi: resetting device. lun 0x%lx\n",
tsk_mgmt->lun);
+ evt->sync_srp = &srp_rsp;
init_completion(&evt->comp);
if (ibmvscsi_send_srp_event(evt, hostdata) != 0) {
printk(KERN_ERR "ibmvscsi: failed to send reset event\n");
wait_for_completion(&evt->comp);
spin_lock_irq(hostdata->host->host_lock);
+ /* make sure we got a good response */
+ if (unlikely(srp_rsp.srp.generic.type != SRP_RSP_TYPE)) {
+ if (printk_ratelimit())
+ printk(KERN_WARNING
+ "ibmvscsi: reset bad SRP RSP type %d\n",
+ srp_rsp.srp.generic.type);
+ return FAILED;
+ }
+
+ if (srp_rsp.srp.rsp.rspvalid)
+ rsp_rc = *((int *)srp_rsp.srp.rsp.sense_and_response_data);
+ else
+ rsp_rc = srp_rsp.srp.rsp.status;
+
+ if (rsp_rc) {
+ if (printk_ratelimit())
+ printk(KERN_WARNING
+ "ibmvscsi: reset code %d for task tag 0x%lx\n",
+ rsp_rc,
+ tsk_mgmt->managed_task_tag);
+ return FAILED;
+ }
+
/* We need to find all commands for this LUN that have not yet been
* responded to, and fail them with DID_RESET
*/
return;
}
+ if (atomic_read(&evt_struct->free)) {
+ printk(KERN_ERR
+ "ibmvscsi: received duplicate correlation_token 0x%p!\n",
+ (void *)crq->IU_data_ptr);
+ return;
+ }
+
if (crq->format == VIOSRP_SRP_FORMAT)
atomic_add(evt_struct->xfer_iu->srp.rsp.request_limit_delta,
&hostdata->request_limit);
hostdata->host = host;
hostdata->dev = dev;
atomic_set(&hostdata->request_limit, -1);
+ hostdata->host->max_sectors = 32 * 8; /* default max I/O 32 pages */
if (ibmvscsi_init_crq_queue(&hostdata->queue, hostdata,
max_requests) != 0) {
*/
for (wait_switch = jiffies + (init_timeout * HZ);
time_before(jiffies, wait_switch) &&
- atomic_read(&hostdata->request_limit) < 0;) {
+ atomic_read(&hostdata->request_limit) < 2;) {
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(HZ / 100);
+ msleep(10);
}
/* if we now have a valid request_limit, initiate a scan */