X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=drivers%2Fscsi%2Fibmvscsi%2Fibmvscsi.c;h=fbc1d5c3b0a793a9800a326c35d293a4368e9979;hb=97bf2856c6014879bd04983a3e9dfcdac1e7fe85;hp=eaefeddb2b4ad48aa57e124a6eef2dfefeaf472a;hpb=76828883507a47dae78837ab5dec5a5b4513c667;p=linux-2.6.git diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index eaefeddb2..fbc1d5c3b 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c @@ -121,10 +121,9 @@ static int initialize_event_pool(struct event_pool *pool, pool->size = size; pool->next = 0; - pool->events = kmalloc(pool->size * sizeof(*pool->events), GFP_KERNEL); + pool->events = kcalloc(pool->size, sizeof(*pool->events), GFP_KERNEL); if (!pool->events) return -ENOMEM; - memset(pool->events, 0x00, pool->size * sizeof(*pool->events)); pool->iu_storage = dma_alloc_coherent(hostdata->dev, @@ -168,7 +167,7 @@ static void release_event_pool(struct event_pool *pool, ++in_use; if (pool->events[i].ext_list) { dma_free_coherent(hostdata->dev, - SG_ALL * sizeof(struct memory_descriptor), + SG_ALL * sizeof(struct srp_direct_buf), pool->events[i].ext_list, pool->events[i].ext_list_token); } @@ -284,40 +283,37 @@ static void set_srp_direction(struct scsi_cmnd *cmd, struct srp_cmd *srp_cmd, int numbuf) { + u8 fmt; + if (numbuf == 0) return; - if (numbuf == 1) { + if (numbuf == 1) + fmt = SRP_DATA_DESC_DIRECT; + else { + fmt = SRP_DATA_DESC_INDIRECT; + numbuf = min(numbuf, MAX_INDIRECT_BUFS); + if (cmd->sc_data_direction == DMA_TO_DEVICE) - srp_cmd->data_out_format = SRP_DIRECT_BUFFER; - else - srp_cmd->data_in_format = SRP_DIRECT_BUFFER; - } else { - if (cmd->sc_data_direction == DMA_TO_DEVICE) { - srp_cmd->data_out_format = SRP_INDIRECT_BUFFER; - srp_cmd->data_out_count = - numbuf < MAX_INDIRECT_BUFS ? - numbuf: MAX_INDIRECT_BUFS; - } else { - srp_cmd->data_in_format = SRP_INDIRECT_BUFFER; - srp_cmd->data_in_count = - numbuf < MAX_INDIRECT_BUFS ? - numbuf: MAX_INDIRECT_BUFS; - } + srp_cmd->data_out_desc_cnt = numbuf; + else + srp_cmd->data_in_desc_cnt = numbuf; } + + if (cmd->sc_data_direction == DMA_TO_DEVICE) + srp_cmd->buf_fmt = fmt << 4; + else + srp_cmd->buf_fmt = fmt; } -static void unmap_sg_list(int num_entries, +static void unmap_sg_list(int num_entries, struct device *dev, - struct memory_descriptor *md) -{ + struct srp_direct_buf *md) +{ int i; - for (i = 0; i < num_entries; ++i) { - dma_unmap_single(dev, - md[i].virtual_address, - md[i].length, DMA_BIDIRECTIONAL); - } + for (i = 0; i < num_entries; ++i) + dma_unmap_single(dev, md[i].va, md[i].len, DMA_BIDIRECTIONAL); } /** @@ -330,23 +326,26 @@ static void unmap_cmd_data(struct srp_cmd *cmd, struct srp_event_struct *evt_struct, struct device *dev) { - if ((cmd->data_out_format == SRP_NO_BUFFER) && - (cmd->data_in_format == SRP_NO_BUFFER)) + u8 out_fmt, in_fmt; + + out_fmt = cmd->buf_fmt >> 4; + in_fmt = cmd->buf_fmt & ((1U << 4) - 1); + + if (out_fmt == SRP_NO_DATA_DESC && in_fmt == SRP_NO_DATA_DESC) return; - else if ((cmd->data_out_format == SRP_DIRECT_BUFFER) || - (cmd->data_in_format == SRP_DIRECT_BUFFER)) { - struct memory_descriptor *data = - (struct memory_descriptor *)cmd->additional_data; - dma_unmap_single(dev, data->virtual_address, data->length, - DMA_BIDIRECTIONAL); + else if (out_fmt == SRP_DATA_DESC_DIRECT || + in_fmt == SRP_DATA_DESC_DIRECT) { + struct srp_direct_buf *data = + (struct srp_direct_buf *) cmd->add_data; + dma_unmap_single(dev, data->va, data->len, DMA_BIDIRECTIONAL); } else { - struct indirect_descriptor *indirect = - (struct indirect_descriptor *)cmd->additional_data; - int num_mapped = indirect->head.length / - sizeof(indirect->list[0]); + struct srp_indirect_buf *indirect = + (struct srp_indirect_buf *) cmd->add_data; + int num_mapped = indirect->table_desc.len / + sizeof(struct srp_direct_buf); if (num_mapped <= MAX_INDIRECT_BUFS) { - unmap_sg_list(num_mapped, dev, &indirect->list[0]); + unmap_sg_list(num_mapped, dev, &indirect->desc_list[0]); return; } @@ -356,17 +355,17 @@ static void unmap_cmd_data(struct srp_cmd *cmd, static int map_sg_list(int num_entries, struct scatterlist *sg, - struct memory_descriptor *md) + struct srp_direct_buf *md) { int i; u64 total_length = 0; for (i = 0; i < num_entries; ++i) { - struct memory_descriptor *descr = md + i; + struct srp_direct_buf *descr = md + i; struct scatterlist *sg_entry = &sg[i]; - descr->virtual_address = sg_dma_address(sg_entry); - descr->length = sg_dma_len(sg_entry); - descr->memory_handle = 0; + descr->va = sg_dma_address(sg_entry); + descr->len = sg_dma_len(sg_entry); + descr->key = 0; total_length += sg_dma_len(sg_entry); } return total_length; @@ -389,10 +388,10 @@ static int map_sg_data(struct scsi_cmnd *cmd, int sg_mapped; u64 total_length = 0; struct scatterlist *sg = cmd->request_buffer; - struct memory_descriptor *data = - (struct memory_descriptor *)srp_cmd->additional_data; - struct indirect_descriptor *indirect = - (struct indirect_descriptor *)data; + struct srp_direct_buf *data = + (struct srp_direct_buf *) srp_cmd->add_data; + struct srp_indirect_buf *indirect = + (struct srp_indirect_buf *) data; sg_mapped = dma_map_sg(dev, sg, cmd->use_sg, DMA_BIDIRECTIONAL); @@ -403,9 +402,9 @@ static int map_sg_data(struct scsi_cmnd *cmd, /* special case; we can use a single direct descriptor */ if (sg_mapped == 1) { - data->virtual_address = sg_dma_address(&sg[0]); - data->length = sg_dma_len(&sg[0]); - data->memory_handle = 0; + data->va = sg_dma_address(&sg[0]); + data->len = sg_dma_len(&sg[0]); + data->key = 0; return 1; } @@ -416,25 +415,26 @@ static int map_sg_data(struct scsi_cmnd *cmd, return 0; } - indirect->head.virtual_address = 0; - indirect->head.length = sg_mapped * sizeof(indirect->list[0]); - indirect->head.memory_handle = 0; + indirect->table_desc.va = 0; + indirect->table_desc.len = sg_mapped * sizeof(struct srp_direct_buf); + indirect->table_desc.key = 0; if (sg_mapped <= MAX_INDIRECT_BUFS) { - total_length = map_sg_list(sg_mapped, sg, &indirect->list[0]); - indirect->total_length = total_length; + total_length = map_sg_list(sg_mapped, sg, + &indirect->desc_list[0]); + indirect->len = total_length; return 1; } /* get indirect table */ if (!evt_struct->ext_list) { - evt_struct->ext_list =(struct memory_descriptor*) + evt_struct->ext_list = (struct srp_direct_buf *) dma_alloc_coherent(dev, - SG_ALL * sizeof(struct memory_descriptor), - &evt_struct->ext_list_token, 0); + SG_ALL * sizeof(struct srp_direct_buf), + &evt_struct->ext_list_token, 0); if (!evt_struct->ext_list) { - printk(KERN_ERR - "ibmvscsi: Can't allocate memory for indirect table\n"); + printk(KERN_ERR + "ibmvscsi: Can't allocate memory for indirect table\n"); return 0; } @@ -442,11 +442,11 @@ static int map_sg_data(struct scsi_cmnd *cmd, total_length = map_sg_list(sg_mapped, sg, evt_struct->ext_list); - indirect->total_length = total_length; - indirect->head.virtual_address = evt_struct->ext_list_token; - indirect->head.length = sg_mapped * sizeof(indirect->list[0]); - memcpy(indirect->list, evt_struct->ext_list, - MAX_INDIRECT_BUFS * sizeof(struct memory_descriptor)); + indirect->len = total_length; + indirect->table_desc.va = evt_struct->ext_list_token; + indirect->table_desc.len = sg_mapped * sizeof(indirect->desc_list[0]); + memcpy(indirect->desc_list, evt_struct->ext_list, + MAX_INDIRECT_BUFS * sizeof(struct srp_direct_buf)); return 1; } @@ -463,20 +463,20 @@ static int map_sg_data(struct scsi_cmnd *cmd, static int map_single_data(struct scsi_cmnd *cmd, struct srp_cmd *srp_cmd, struct device *dev) { - struct memory_descriptor *data = - (struct memory_descriptor *)srp_cmd->additional_data; + struct srp_direct_buf *data = + (struct srp_direct_buf *) srp_cmd->add_data; - data->virtual_address = + data->va = dma_map_single(dev, cmd->request_buffer, cmd->request_bufflen, DMA_BIDIRECTIONAL); - if (dma_mapping_error(data->virtual_address)) { + if (dma_mapping_error(data->va)) { printk(KERN_ERR "ibmvscsi: Unable to map request_buffer for command!\n"); return 0; } - data->length = cmd->request_bufflen; - data->memory_handle = 0; + data->len = cmd->request_bufflen; + data->key = 0; set_srp_direction(cmd, srp_cmd, 1); @@ -535,6 +535,7 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct, struct ibmvscsi_host_data *hostdata) { u64 *crq_as_u64 = (u64 *) &evt_struct->crq; + int request_status; int rc; /* If we have exhausted our request limit, just fail this request. @@ -542,13 +543,22 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct, * (such as task management requests) that the mid layer may think we * can handle more requests (can_queue) when we actually can't */ - if ((evt_struct->crq.format == VIOSRP_SRP_FORMAT) && - (atomic_dec_if_positive(&hostdata->request_limit) < 0)) - goto send_error; + if (evt_struct->crq.format == VIOSRP_SRP_FORMAT) { + request_status = + atomic_dec_if_positive(&hostdata->request_limit); + /* If request limit was -1 when we started, it is now even + * less than that + */ + if (request_status < -1) + goto send_error; + /* Otherwise, if we have run out of requests */ + else if (request_status < 0) + goto send_busy; + } /* Copy the IU into the transfer area */ *evt_struct->xfer_iu = evt_struct->iu; - evt_struct->xfer_iu->srp.generic.tag = (u64)evt_struct; + evt_struct->xfer_iu->srp.rsp.tag = (u64)evt_struct; /* Add this to the sent list. We need to do this * before we actually send @@ -567,11 +577,23 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct, return 0; - send_error: + send_busy: unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, hostdata->dev); free_event_struct(&hostdata->pool, evt_struct); return SCSI_MLQUEUE_HOST_BUSY; + + send_error: + unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, hostdata->dev); + + if (evt_struct->cmnd != NULL) { + evt_struct->cmnd->result = DID_ERROR << 16; + evt_struct->cmnd_done(evt_struct->cmnd); + } else if (evt_struct->done) + evt_struct->done(evt_struct); + + free_event_struct(&hostdata->pool, evt_struct); + return 0; } /** @@ -586,27 +608,27 @@ static void handle_cmd_rsp(struct srp_event_struct *evt_struct) struct srp_rsp *rsp = &evt_struct->xfer_iu->srp.rsp; struct scsi_cmnd *cmnd = evt_struct->cmnd; - if (unlikely(rsp->type != SRP_RSP_TYPE)) { + if (unlikely(rsp->opcode != SRP_RSP)) { if (printk_ratelimit()) printk(KERN_WARNING "ibmvscsi: bad SRP RSP type %d\n", - rsp->type); + rsp->opcode); } if (cmnd) { cmnd->result = rsp->status; if (((cmnd->result >> 1) & 0x1f) == CHECK_CONDITION) memcpy(cmnd->sense_buffer, - rsp->sense_and_response_data, - rsp->sense_data_list_length); + rsp->data, + rsp->sense_data_len); unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, evt_struct->hostdata->dev); - if (rsp->doover) - cmnd->resid = rsp->data_out_residual_count; - else if (rsp->diover) - cmnd->resid = rsp->data_in_residual_count; + if (rsp->flags & SRP_RSP_FLAG_DOOVER) + cmnd->resid = rsp->data_out_res_cnt; + else if (rsp->flags & SRP_RSP_FLAG_DIOVER) + cmnd->resid = rsp->data_in_res_cnt; } if (evt_struct->cmnd_done) @@ -633,10 +655,11 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd, { struct srp_cmd *srp_cmd; struct srp_event_struct *evt_struct; - struct indirect_descriptor *indirect; + struct srp_indirect_buf *indirect; struct ibmvscsi_host_data *hostdata = (struct ibmvscsi_host_data *)&cmnd->device->host->hostdata; u16 lun = lun_from_dev(cmnd->device); + u8 out_fmt, in_fmt; evt_struct = get_event_struct(&hostdata->pool); if (!evt_struct) @@ -644,8 +667,8 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd, /* Set up the actual SRP IU */ srp_cmd = &evt_struct->iu.srp.cmd; - memset(srp_cmd, 0x00, sizeof(*srp_cmd)); - srp_cmd->type = SRP_CMD_TYPE; + memset(srp_cmd, 0x00, SRP_MAX_IU_LEN); + srp_cmd->opcode = SRP_CMD; memcpy(srp_cmd->cdb, cmnd->cmnd, sizeof(cmnd->cmnd)); srp_cmd->lun = ((u64) lun) << 48; @@ -664,13 +687,15 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd, evt_struct->cmnd_done = done; /* Fix up dma address of the buffer itself */ - indirect = (struct indirect_descriptor *)srp_cmd->additional_data; - if (((srp_cmd->data_out_format == SRP_INDIRECT_BUFFER) || - (srp_cmd->data_in_format == SRP_INDIRECT_BUFFER)) && - (indirect->head.virtual_address == 0)) { - indirect->head.virtual_address = evt_struct->crq.IU_data_ptr + - offsetof(struct srp_cmd, additional_data) + - offsetof(struct indirect_descriptor, list); + indirect = (struct srp_indirect_buf *) srp_cmd->add_data; + out_fmt = srp_cmd->buf_fmt >> 4; + in_fmt = srp_cmd->buf_fmt & ((1U << 4) - 1); + if ((in_fmt == SRP_DATA_DESC_INDIRECT || + out_fmt == SRP_DATA_DESC_INDIRECT) && + indirect->table_desc.va == 0) { + indirect->table_desc.va = evt_struct->crq.IU_data_ptr + + offsetof(struct srp_cmd, add_data) + + offsetof(struct srp_indirect_buf, desc_list); } return ibmvscsi_send_srp_event(evt_struct, hostdata); @@ -735,7 +760,8 @@ static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata) { struct viosrp_adapter_info *req; struct srp_event_struct *evt_struct; - + dma_addr_t addr; + evt_struct = get_event_struct(&hostdata->pool); if (!evt_struct) { printk(KERN_ERR "ibmvscsi: couldn't allocate an event " @@ -753,10 +779,10 @@ static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata) req->common.type = VIOSRP_ADAPTER_INFO_TYPE; req->common.length = sizeof(hostdata->madapter_info); - req->buffer = dma_map_single(hostdata->dev, - &hostdata->madapter_info, - sizeof(hostdata->madapter_info), - DMA_BIDIRECTIONAL); + req->buffer = addr = dma_map_single(hostdata->dev, + &hostdata->madapter_info, + sizeof(hostdata->madapter_info), + DMA_BIDIRECTIONAL); if (dma_mapping_error(req->buffer)) { printk(KERN_ERR @@ -766,8 +792,13 @@ static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata) return; } - if (ibmvscsi_send_srp_event(evt_struct, hostdata)) + if (ibmvscsi_send_srp_event(evt_struct, hostdata)) { printk(KERN_ERR "ibmvscsi: couldn't send ADAPTER_INFO_REQ!\n"); + dma_unmap_single(hostdata->dev, + addr, + sizeof(hostdata->madapter_info), + DMA_BIDIRECTIONAL); + } }; /** @@ -780,10 +811,10 @@ static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata) static void login_rsp(struct srp_event_struct *evt_struct) { struct ibmvscsi_host_data *hostdata = evt_struct->hostdata; - switch (evt_struct->xfer_iu->srp.generic.type) { - case SRP_LOGIN_RSP_TYPE: /* it worked! */ + switch (evt_struct->xfer_iu->srp.login_rsp.opcode) { + case SRP_LOGIN_RSP: /* it worked! */ break; - case SRP_LOGIN_REJ_TYPE: /* refused! */ + case SRP_LOGIN_REJ: /* refused! */ printk(KERN_INFO "ibmvscsi: SRP_LOGIN_REJ reason %u\n", evt_struct->xfer_iu->srp.login_rej.reason); /* Login failed. */ @@ -792,7 +823,7 @@ static void login_rsp(struct srp_event_struct *evt_struct) default: printk(KERN_ERR "ibmvscsi: Invalid login response typecode 0x%02x!\n", - evt_struct->xfer_iu->srp.generic.type); + evt_struct->xfer_iu->srp.login_rsp.opcode); /* Login failed. */ atomic_set(&hostdata->request_limit, -1); return; @@ -800,17 +831,17 @@ static void login_rsp(struct srp_event_struct *evt_struct) printk(KERN_INFO "ibmvscsi: SRP_LOGIN succeeded\n"); - if (evt_struct->xfer_iu->srp.login_rsp.request_limit_delta > + if (evt_struct->xfer_iu->srp.login_rsp.req_lim_delta > (max_requests - 2)) - evt_struct->xfer_iu->srp.login_rsp.request_limit_delta = + evt_struct->xfer_iu->srp.login_rsp.req_lim_delta = max_requests - 2; /* Now we know what the real request-limit is */ atomic_set(&hostdata->request_limit, - evt_struct->xfer_iu->srp.login_rsp.request_limit_delta); + evt_struct->xfer_iu->srp.login_rsp.req_lim_delta); hostdata->host->can_queue = - evt_struct->xfer_iu->srp.login_rsp.request_limit_delta - 2; + evt_struct->xfer_iu->srp.login_rsp.req_lim_delta - 2; if (hostdata->host->can_queue < 1) { printk(KERN_ERR "ibmvscsi: Invalid request_limit_delta\n"); @@ -849,18 +880,19 @@ static int send_srp_login(struct ibmvscsi_host_data *hostdata) login = &evt_struct->iu.srp.login_req; memset(login, 0x00, sizeof(struct srp_login_req)); - login->type = SRP_LOGIN_REQ_TYPE; - login->max_requested_initiator_to_target_iulen = sizeof(union srp_iu); - login->required_buffer_formats = 0x0006; + login->opcode = SRP_LOGIN_REQ; + login->req_it_iu_len = sizeof(union srp_iu); + login->req_buf_fmt = SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT; + spin_lock_irqsave(hostdata->host->host_lock, flags); /* Start out with a request limit of 1, since this is negotiated in * the login request we are just sending */ atomic_set(&hostdata->request_limit, 1); - spin_lock_irqsave(hostdata->host->host_lock, flags); rc = ibmvscsi_send_srp_event(evt_struct, hostdata); spin_unlock_irqrestore(hostdata->host->host_lock, flags); + printk("ibmvscsic: sent SRP login\n"); return rc; }; @@ -928,13 +960,13 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd) /* Set up an abort SRP command */ memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt)); - tsk_mgmt->type = SRP_TSK_MGMT_TYPE; + tsk_mgmt->opcode = SRP_TSK_MGMT; tsk_mgmt->lun = ((u64) lun) << 48; - tsk_mgmt->task_mgmt_flags = 0x01; /* ABORT TASK */ - tsk_mgmt->managed_task_tag = (u64) found_evt; + tsk_mgmt->tsk_mgmt_func = SRP_TSK_ABORT_TASK; + tsk_mgmt->task_tag = (u64) found_evt; printk(KERN_INFO "ibmvscsi: aborting command. lun 0x%lx, tag 0x%lx\n", - tsk_mgmt->lun, tsk_mgmt->managed_task_tag); + tsk_mgmt->lun, tsk_mgmt->task_tag); evt->sync_srp = &srp_rsp; init_completion(&evt->comp); @@ -948,25 +980,25 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd) wait_for_completion(&evt->comp); /* make sure we got a good response */ - if (unlikely(srp_rsp.srp.generic.type != SRP_RSP_TYPE)) { + if (unlikely(srp_rsp.srp.rsp.opcode != SRP_RSP)) { if (printk_ratelimit()) printk(KERN_WARNING "ibmvscsi: abort bad SRP RSP type %d\n", - srp_rsp.srp.generic.type); + srp_rsp.srp.rsp.opcode); return FAILED; } - if (srp_rsp.srp.rsp.rspvalid) - rsp_rc = *((int *)srp_rsp.srp.rsp.sense_and_response_data); + if (srp_rsp.srp.rsp.flags & SRP_RSP_FLAG_RSPVALID) + rsp_rc = *((int *)srp_rsp.srp.rsp.data); else rsp_rc = srp_rsp.srp.rsp.status; if (rsp_rc) { if (printk_ratelimit()) printk(KERN_WARNING - "ibmvscsi: abort code %d for task tag 0x%lx\n", + "ibmvscsi: abort code %d for task tag 0x%lx\n", rsp_rc, - tsk_mgmt->managed_task_tag); + tsk_mgmt->task_tag); return FAILED; } @@ -987,13 +1019,13 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd) spin_unlock_irqrestore(hostdata->host->host_lock, flags); printk(KERN_INFO "ibmvscsi: aborted task tag 0x%lx completed\n", - tsk_mgmt->managed_task_tag); + tsk_mgmt->task_tag); return SUCCESS; } printk(KERN_INFO "ibmvscsi: successfully aborted task tag 0x%lx\n", - tsk_mgmt->managed_task_tag); + tsk_mgmt->task_tag); cmd->result = (DID_ABORT << 16); list_del(&found_evt->list); @@ -1040,9 +1072,9 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd) /* Set up a lun reset SRP command */ memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt)); - tsk_mgmt->type = SRP_TSK_MGMT_TYPE; + tsk_mgmt->opcode = SRP_TSK_MGMT; tsk_mgmt->lun = ((u64) lun) << 48; - tsk_mgmt->task_mgmt_flags = 0x08; /* LUN RESET */ + tsk_mgmt->tsk_mgmt_func = SRP_TSK_LUN_RESET; printk(KERN_INFO "ibmvscsi: resetting device. lun 0x%lx\n", tsk_mgmt->lun); @@ -1059,16 +1091,16 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd) wait_for_completion(&evt->comp); /* make sure we got a good response */ - if (unlikely(srp_rsp.srp.generic.type != SRP_RSP_TYPE)) { + if (unlikely(srp_rsp.srp.rsp.opcode != SRP_RSP)) { if (printk_ratelimit()) printk(KERN_WARNING "ibmvscsi: reset bad SRP RSP type %d\n", - srp_rsp.srp.generic.type); + srp_rsp.srp.rsp.opcode); return FAILED; } - if (srp_rsp.srp.rsp.rspvalid) - rsp_rc = *((int *)srp_rsp.srp.rsp.sense_and_response_data); + if (srp_rsp.srp.rsp.flags & SRP_RSP_FLAG_RSPVALID) + rsp_rc = *((int *)srp_rsp.srp.rsp.data); else rsp_rc = srp_rsp.srp.rsp.status; @@ -1076,8 +1108,7 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd) if (printk_ratelimit()) printk(KERN_WARNING "ibmvscsi: reset code %d for task tag 0x%lx\n", - rsp_rc, - tsk_mgmt->managed_task_tag); + rsp_rc, tsk_mgmt->task_tag); return FAILED; } @@ -1175,26 +1206,37 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq, return; case 0xFF: /* Hypervisor telling us the connection is closed */ scsi_block_requests(hostdata->host); + atomic_set(&hostdata->request_limit, 0); if (crq->format == 0x06) { /* We need to re-setup the interpartition connection */ printk(KERN_INFO "ibmvscsi: Re-enabling adapter!\n"); purge_requests(hostdata, DID_REQUEUE); - if (ibmvscsi_reenable_crq_queue(&hostdata->queue, - hostdata) == 0) - if (ibmvscsi_send_crq(hostdata, - 0xC001000000000000LL, 0)) + if ((ibmvscsi_reenable_crq_queue(&hostdata->queue, + hostdata)) || + (ibmvscsi_send_crq(hostdata, + 0xC001000000000000LL, 0))) { + atomic_set(&hostdata->request_limit, + -1); printk(KERN_ERR - "ibmvscsi: transmit error after" + "ibmvscsi: error after" " enable\n"); + } } else { printk(KERN_INFO "ibmvscsi: Virtual adapter failed rc %d!\n", crq->format); - atomic_set(&hostdata->request_limit, -1); purge_requests(hostdata, DID_ERROR); - ibmvscsi_reset_crq_queue(&hostdata->queue, hostdata); + if ((ibmvscsi_reset_crq_queue(&hostdata->queue, + hostdata)) || + (ibmvscsi_send_crq(hostdata, + 0xC001000000000000LL, 0))) { + atomic_set(&hostdata->request_limit, + -1); + printk(KERN_ERR + "ibmvscsi: error after reset\n"); + } } scsi_unblock_requests(hostdata->host); return; @@ -1226,7 +1268,7 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq, } if (crq->format == VIOSRP_SRP_FORMAT) - atomic_add(evt_struct->xfer_iu->srp.rsp.request_limit_delta, + atomic_add(evt_struct->xfer_iu->srp.rsp.req_lim_delta, &hostdata->request_limit); if (evt_struct->done) @@ -1254,6 +1296,7 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata, { struct viosrp_host_config *host_config; struct srp_event_struct *evt_struct; + dma_addr_t addr; int rc; evt_struct = get_event_struct(&hostdata->pool); @@ -1274,8 +1317,9 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata, memset(host_config, 0x00, sizeof(*host_config)); host_config->common.type = VIOSRP_HOST_CONFIG_TYPE; host_config->common.length = length; - host_config->buffer = dma_map_single(hostdata->dev, buffer, length, - DMA_BIDIRECTIONAL); + host_config->buffer = addr = dma_map_single(hostdata->dev, buffer, + length, + DMA_BIDIRECTIONAL); if (dma_mapping_error(host_config->buffer)) { printk(KERN_ERR @@ -1286,11 +1330,9 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata, init_completion(&evt_struct->comp); rc = ibmvscsi_send_srp_event(evt_struct, hostdata); - if (rc == 0) { + if (rc == 0) wait_for_completion(&evt_struct->comp); - dma_unmap_single(hostdata->dev, host_config->buffer, - length, DMA_BIDIRECTIONAL); - } + dma_unmap_single(hostdata->dev, addr, length, DMA_BIDIRECTIONAL); return rc; } @@ -1457,6 +1499,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id) struct Scsi_Host *host; struct device *dev = &vdev->dev; unsigned long wait_switch = 0; + int rc; vdev->dev.driver_data = NULL; @@ -1474,8 +1517,8 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id) atomic_set(&hostdata->request_limit, -1); hostdata->host->max_sectors = 32 * 8; /* default max I/O 32 pages */ - if (ibmvscsi_init_crq_queue(&hostdata->queue, hostdata, - max_requests) != 0) { + rc = ibmvscsi_init_crq_queue(&hostdata->queue, hostdata, max_requests); + if (rc != 0 && rc != H_RESOURCE) { printk(KERN_ERR "ibmvscsi: couldn't initialize crq\n"); goto init_crq_failed; } @@ -1495,7 +1538,8 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id) * to fail if the other end is not acive. In that case we don't * want to scan */ - if (ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0) == 0) { + if (ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0) == 0 + || rc == H_RESOURCE) { /* * Wait around max init_timeout secs for the adapter to finish * initializing. When we are done initializing, we will have a