pool->size = size;
pool->next = 0;
- pool->events = kcalloc(pool->size, sizeof(*pool->events), GFP_KERNEL);
+ pool->events = kmalloc(pool->size * sizeof(*pool->events), GFP_KERNEL);
if (!pool->events)
return -ENOMEM;
+ memset(pool->events, 0x00, pool->size * sizeof(*pool->events));
pool->iu_storage =
dma_alloc_coherent(hostdata->dev,
struct ibmvscsi_host_data *hostdata)
{
u64 *crq_as_u64 = (u64 *) &evt_struct->crq;
- int request_status;
int rc;
/* If we have exhausted our request limit, just fail this request.
* (such as task management requests) that the mid layer may think we
* can handle more requests (can_queue) when we actually can't
*/
- if (evt_struct->crq.format == VIOSRP_SRP_FORMAT) {
- request_status =
- atomic_dec_if_positive(&hostdata->request_limit);
- /* If request limit was -1 when we started, it is now even
- * less than that
- */
- if (request_status < -1)
- goto send_error;
- /* Otherwise, if we have run out of requests */
- else if (request_status < 0)
- goto send_busy;
- }
+ if ((evt_struct->crq.format == VIOSRP_SRP_FORMAT) &&
+ (atomic_dec_if_positive(&hostdata->request_limit) < 0))
+ goto send_error;
/* Copy the IU into the transfer area */
*evt_struct->xfer_iu = evt_struct->iu;
return 0;
- send_busy:
- unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, hostdata->dev);
-
- free_event_struct(&hostdata->pool, evt_struct);
- return SCSI_MLQUEUE_HOST_BUSY;
-
send_error:
unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, hostdata->dev);
- if (evt_struct->cmnd != NULL) {
- evt_struct->cmnd->result = DID_ERROR << 16;
- evt_struct->cmnd_done(evt_struct->cmnd);
- } else if (evt_struct->done)
- evt_struct->done(evt_struct);
-
free_event_struct(&hostdata->pool, evt_struct);
- return 0;
+ return SCSI_MLQUEUE_HOST_BUSY;
}
/**
return;
case 0xFF: /* Hypervisor telling us the connection is closed */
scsi_block_requests(hostdata->host);
- atomic_set(&hostdata->request_limit, 0);
if (crq->format == 0x06) {
/* We need to re-setup the interpartition connection */
printk(KERN_INFO
"ibmvscsi: Re-enabling adapter!\n");
+ atomic_set(&hostdata->request_limit, -1);
purge_requests(hostdata, DID_REQUEUE);
- if ((ibmvscsi_reenable_crq_queue(&hostdata->queue,
- hostdata) == 0) ||
- (ibmvscsi_send_crq(hostdata,
- 0xC001000000000000LL, 0))) {
- atomic_set(&hostdata->request_limit,
- -1);
+ if (ibmvscsi_reenable_crq_queue(&hostdata->queue,
+ hostdata) == 0)
+ if (ibmvscsi_send_crq(hostdata,
+ 0xC001000000000000LL, 0))
printk(KERN_ERR
- "ibmvscsi: error after"
+ "ibmvscsi: transmit error after"
" enable\n");
- }
} else {
printk(KERN_INFO
"ibmvscsi: Virtual adapter failed rc %d!\n",
crq->format);
+ atomic_set(&hostdata->request_limit, -1);
purge_requests(hostdata, DID_ERROR);
- if ((ibmvscsi_reset_crq_queue(&hostdata->queue,
- hostdata)) ||
- (ibmvscsi_send_crq(hostdata,
- 0xC001000000000000LL, 0))) {
- atomic_set(&hostdata->request_limit,
- -1);
- printk(KERN_ERR
- "ibmvscsi: error after reset\n");
- }
+ ibmvscsi_reset_crq_queue(&hostdata->queue, hostdata);
}
scsi_unblock_requests(hostdata->host);
return;
struct Scsi_Host *host;
struct device *dev = &vdev->dev;
unsigned long wait_switch = 0;
- int rc;
vdev->dev.driver_data = NULL;
atomic_set(&hostdata->request_limit, -1);
hostdata->host->max_sectors = 32 * 8; /* default max I/O 32 pages */
- rc = ibmvscsi_init_crq_queue(&hostdata->queue, hostdata, max_requests);
- if (rc != 0 && rc != H_RESOURCE) {
+ if (ibmvscsi_init_crq_queue(&hostdata->queue, hostdata,
+ max_requests) != 0) {
printk(KERN_ERR "ibmvscsi: couldn't initialize crq\n");
goto init_crq_failed;
}
* to fail if the other end is not acive. In that case we don't
* want to scan
*/
- if (ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0) == 0
- || rc == H_RESOURCE) {
+ if (ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0) == 0) {
/*
* Wait around max init_timeout secs for the adapter to finish
* initializing. When we are done initializing, we will have a