X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=drivers%2Fscsi%2Fqla2xxx%2Fqla_iocb.c;h=c5b3c610a32a2968219ca29c8a572cc35b0ac605;hb=refs%2Fheads%2Fvserver;hp=ec066074c72297baa28e1c81804269a09149e06c;hpb=6a77f38946aaee1cd85eeec6cf4229b204c15071;p=linux-2.6.git diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index ec066074c..c5b3c610a 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -1,22 +1,9 @@ -/****************************************************************************** - * QLOGIC LINUX SOFTWARE +/* + * QLogic Fibre Channel HBA Driver + * Copyright (c) 2003-2005 QLogic Corporation * - * QLogic ISP2x00 device driver for Linux 2.6.x - * Copyright (C) 2003-2004 QLogic Corporation - * (www.qlogic.com) - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2, or (at your option) any - * later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - ******************************************************************************/ - + * See LICENSE.qla2xxx for copyright and licensing details. + */ #include "qla_def.h" #include @@ -28,6 +15,7 @@ static inline uint16_t qla2x00_get_cmd_direction(struct scsi_cmnd *cmd); static inline cont_entry_t *qla2x00_prep_cont_type0_iocb(scsi_qla_host_t *); static inline cont_a64_entry_t *qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *); static request_t *qla2x00_req_pkt(scsi_qla_host_t *ha); +static void qla2x00_isp_cmd(scsi_qla_host_t *ha); /** * qla2x00_get_cmd_direction() - Determine control_flag data direction. @@ -216,18 +204,7 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt, cur_seg++; } } else { - dma_addr_t req_dma; - struct page *page; - unsigned long offset; - - page = virt_to_page(cmd->request_buffer); - offset = ((unsigned long)cmd->request_buffer & ~PAGE_MASK); - req_dma = pci_map_page(ha->pdev, page, offset, - cmd->request_bufflen, cmd->sc_data_direction); - - sp->dma_handle = req_dma; - - *cur_dsd++ = cpu_to_le32(req_dma); + *cur_dsd++ = cpu_to_le32(sp->dma_handle); *cur_dsd++ = cpu_to_le32(cmd->request_bufflen); } } @@ -299,19 +276,8 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt, cur_seg++; } } else { - dma_addr_t req_dma; - struct page *page; - unsigned long offset; - - page = virt_to_page(cmd->request_buffer); - offset = ((unsigned long)cmd->request_buffer & ~PAGE_MASK); - req_dma = pci_map_page(ha->pdev, page, offset, - cmd->request_bufflen, cmd->sc_data_direction); - - sp->dma_handle = req_dma; - - *cur_dsd++ = cpu_to_le32(LSD(req_dma)); - *cur_dsd++ = cpu_to_le32(MSD(req_dma)); + *cur_dsd++ = cpu_to_le32(LSD(sp->dma_handle)); + *cur_dsd++ = cpu_to_le32(MSD(sp->dma_handle)); *cur_dsd++ = cpu_to_le32(cmd->request_bufflen); } } @@ -328,26 +294,24 @@ qla2x00_start_scsi(srb_t *sp) int ret; unsigned long flags; scsi_qla_host_t *ha; - fc_lun_t *fclun; struct scsi_cmnd *cmd; uint32_t *clr_ptr; uint32_t index; uint32_t handle; cmd_entry_t *cmd_pkt; - uint32_t timeout; struct scatterlist *sg; uint16_t cnt; uint16_t req_cnt; uint16_t tot_dsds; - device_reg_t __iomem *reg; - char tag[2]; + struct device_reg_2xxx __iomem *reg; /* Setup device pointers. */ ret = 0; - fclun = sp->lun_queue->fclun; - ha = fclun->fcport->ha; - reg = ha->iobase; + ha = sp->ha; + reg = &ha->iobase->isp; cmd = sp->cmd; + /* So we know we haven't pci_map'ed anything yet */ + tot_dsds = 0; /* Send marker if required */ if (ha->marker_needed != 0) { @@ -372,8 +336,27 @@ qla2x00_start_scsi(srb_t *sp) if (index == MAX_OUTSTANDING_COMMANDS) goto queuing_error; + /* Map the sg table so we have an accurate count of sg entries needed */ + if (cmd->use_sg) { + sg = (struct scatterlist *) cmd->request_buffer; + tot_dsds = pci_map_sg(ha->pdev, sg, cmd->use_sg, + cmd->sc_data_direction); + if (tot_dsds == 0) + goto queuing_error; + } else if (cmd->request_bufflen) { + dma_addr_t req_dma; + + req_dma = pci_map_single(ha->pdev, cmd->request_buffer, + cmd->request_bufflen, cmd->sc_data_direction); + if (dma_mapping_error(req_dma)) + goto queuing_error; + + sp->dma_handle = req_dma; + tot_dsds = 1; + } + /* Calculate the number of request entries needed. */ - req_cnt = (ha->calc_request_entries)(cmd->request->nr_hw_segments); + req_cnt = ha->isp_ops.calc_req_entries(tot_dsds); if (ha->req_q_cnt < (req_cnt + 2)) { cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg)); if (ha->req_ring_index < cnt) @@ -385,19 +368,6 @@ qla2x00_start_scsi(srb_t *sp) if (ha->req_q_cnt < (req_cnt + 2)) goto queuing_error; - /* Finally, we have enough space, now perform mappings. */ - tot_dsds = 0; - if (cmd->use_sg) { - sg = (struct scatterlist *) cmd->request_buffer; - tot_dsds = pci_map_sg(ha->pdev, sg, cmd->use_sg, - cmd->sc_data_direction); - if (tot_dsds == 0) - goto queuing_error; - } else if (cmd->request_bufflen) { - tot_dsds++; - } - req_cnt = (ha->calc_request_entries)(tot_dsds); - /* Build command packet */ ha->current_outstanding_cmd = handle; ha->outstanding_cmds[handle] = sp; @@ -412,45 +382,19 @@ qla2x00_start_scsi(srb_t *sp) memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); - /* Set target ID */ - SET_TARGET_ID(ha, cmd_pkt->target, fclun->fcport->loop_id); - - /* Set LUN number*/ - cmd_pkt->lun = cpu_to_le16(fclun->lun); + /* Set target ID and LUN number*/ + SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id); + cmd_pkt->lun = cpu_to_le16(sp->cmd->device->lun); /* Update tagged queuing modifier */ cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG); - if (scsi_populate_tag_msg(cmd, tag)) { - switch (tag[0]) { - case MSG_HEAD_TAG: - cmd_pkt->control_flags = - __constant_cpu_to_le16(CF_HEAD_TAG); - break; - case MSG_ORDERED_TAG: - cmd_pkt->control_flags = - __constant_cpu_to_le16(CF_ORDERED_TAG); - break; - } - } - - /* - * Allocate at least 5 (+ QLA_CMD_TIMER_DELTA) seconds for RISC timeout. - */ - timeout = (uint32_t)(cmd->timeout_per_command / HZ); - if (timeout > 65535) - cmd_pkt->timeout = __constant_cpu_to_le16(0); - else if (timeout > 25) - cmd_pkt->timeout = cpu_to_le16((uint16_t)timeout - - (5 + QLA_CMD_TIMER_DELTA)); - else - cmd_pkt->timeout = cpu_to_le16((uint16_t)timeout); /* Load SCSI command packet. */ memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len); cmd_pkt->byte_count = cpu_to_le32((uint32_t)cmd->request_bufflen); /* Build IOCB segments */ - (ha->build_scsi_iocbs)(sp, cmd_pkt, tot_dsds); + ha->isp_ops.build_iocbs(sp, cmd_pkt, tot_dsds); /* Set total data segment count. */ cmd_pkt->entry_count = (uint8_t)req_cnt; @@ -464,21 +408,29 @@ qla2x00_start_scsi(srb_t *sp) } else ha->request_ring_ptr++; - ha->actthreads++; - ha->total_ios++; - sp->lun_queue->out_cnt++; sp->flags |= SRB_DMA_VALID; - sp->state = SRB_ACTIVE_STATE; - sp->u_start = jiffies; /* Set chip new ring index. */ WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), ha->req_ring_index); RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */ + /* Manage unprocessed RIO/ZIO commands in response queue. */ + if (ha->flags.process_response_queue && + ha->response_ring_ptr->signature != RESPONSE_PROCESSED) + qla2x00_process_response_queue(ha); + spin_unlock_irqrestore(&ha->hardware_lock, flags); return (QLA_SUCCESS); queuing_error: + if (cmd->use_sg && tot_dsds) { + sg = (struct scatterlist *) cmd->request_buffer; + pci_unmap_sg(ha->pdev, sg, cmd->use_sg, + cmd->sc_data_direction); + } else if (tot_dsds) { + pci_unmap_single(ha->pdev, sp->dma_handle, + cmd->request_bufflen, cmd->sc_data_direction); + } spin_unlock_irqrestore(&ha->hardware_lock, flags); return (QLA_FUNCTION_FAILED); @@ -499,31 +451,40 @@ int __qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun, uint8_t type) { - mrk_entry_t *pkt; + mrk_entry_t *mrk; + struct mrk_entry_24xx *mrk24; - pkt = (mrk_entry_t *)qla2x00_req_pkt(ha); - if (pkt == NULL) { - DEBUG2_3(printk("%s(): **** FAILED ****\n", __func__)); + mrk24 = NULL; + mrk = (mrk_entry_t *)qla2x00_req_pkt(ha); + if (mrk == NULL) { + DEBUG2_3(printk("%s(%ld): failed to allocate Marker IOCB.\n", + __func__, ha->host_no)); return (QLA_FUNCTION_FAILED); } - pkt->entry_type = MARKER_TYPE; - pkt->modifier = type; - + mrk->entry_type = MARKER_TYPE; + mrk->modifier = type; if (type != MK_SYNC_ALL) { - pkt->lun = cpu_to_le16(lun); - SET_TARGET_ID(ha, pkt->target, loop_id); + if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) { + mrk24 = (struct mrk_entry_24xx *) mrk; + mrk24->nport_handle = cpu_to_le16(loop_id); + mrk24->lun[1] = LSB(lun); + mrk24->lun[2] = MSB(lun); + host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun)); + } else { + SET_TARGET_ID(ha, mrk->target, loop_id); + mrk->lun = cpu_to_le16(lun); + } } wmb(); - /* Issue command to ISP */ qla2x00_isp_cmd(ha); return (QLA_SUCCESS); } -int +int qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun, uint8_t type) { @@ -559,7 +520,12 @@ qla2x00_req_pkt(scsi_qla_host_t *ha) for (timer = HZ; timer; timer--) { if ((req_cnt + 2) >= ha->req_q_cnt) { /* Calculate number of free request entries. */ - cnt = qla2x00_debounce_register(ISP_REQ_Q_OUT(ha, reg)); + if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) + cnt = (uint16_t)RD_REG_DWORD( + ®->isp24.req_q_out); + else + cnt = qla2x00_debounce_register( + ISP_REQ_Q_OUT(ha, ®->isp)); if (ha->req_ring_index < cnt) ha->req_q_cnt = cnt - ha->req_ring_index; else @@ -610,7 +576,7 @@ qla2x00_req_pkt(scsi_qla_host_t *ha) * * Note: The caller must hold the hardware lock before calling this routine. */ -void +static void qla2x00_isp_cmd(scsi_qla_host_t *ha) { device_reg_t __iomem *reg = ha->iobase; @@ -628,6 +594,274 @@ qla2x00_isp_cmd(scsi_qla_host_t *ha) ha->request_ring_ptr++; /* Set chip new ring index. */ - WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), ha->req_ring_index); - RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */ + if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) { + WRT_REG_DWORD(®->isp24.req_q_in, ha->req_ring_index); + RD_REG_DWORD_RELAXED(®->isp24.req_q_in); + } else { + WRT_REG_WORD(ISP_REQ_Q_IN(ha, ®->isp), ha->req_ring_index); + RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, ®->isp)); + } + +} + +/** + * qla24xx_calc_iocbs() - Determine number of Command Type 3 and + * Continuation Type 1 IOCBs to allocate. + * + * @dsds: number of data segment decriptors needed + * + * Returns the number of IOCB entries needed to store @dsds. + */ +static inline uint16_t +qla24xx_calc_iocbs(uint16_t dsds) +{ + uint16_t iocbs; + + iocbs = 1; + if (dsds > 1) { + iocbs += (dsds - 1) / 5; + if ((dsds - 1) % 5) + iocbs++; + } + return iocbs; +} + +/** + * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7 + * IOCB types. + * + * @sp: SRB command to process + * @cmd_pkt: Command type 3 IOCB + * @tot_dsds: Total number of segments to transfer + */ +static inline void +qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt, + uint16_t tot_dsds) +{ + uint16_t avail_dsds; + uint32_t *cur_dsd; + scsi_qla_host_t *ha; + struct scsi_cmnd *cmd; + + cmd = sp->cmd; + + /* Update entry type to indicate Command Type 3 IOCB */ + *((uint32_t *)(&cmd_pkt->entry_type)) = + __constant_cpu_to_le32(COMMAND_TYPE_7); + + /* No data transfer */ + if (cmd->request_bufflen == 0 || cmd->sc_data_direction == DMA_NONE) { + cmd_pkt->byte_count = __constant_cpu_to_le32(0); + return; + } + + ha = sp->ha; + + /* Set transfer direction */ + if (cmd->sc_data_direction == DMA_TO_DEVICE) + cmd_pkt->task_mgmt_flags = + __constant_cpu_to_le16(TMF_WRITE_DATA); + else if (cmd->sc_data_direction == DMA_FROM_DEVICE) + cmd_pkt->task_mgmt_flags = + __constant_cpu_to_le16(TMF_READ_DATA); + + /* One DSD is available in the Command Type 3 IOCB */ + avail_dsds = 1; + cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address; + + /* Load data segments */ + if (cmd->use_sg != 0) { + struct scatterlist *cur_seg; + struct scatterlist *end_seg; + + cur_seg = (struct scatterlist *)cmd->request_buffer; + end_seg = cur_seg + tot_dsds; + while (cur_seg < end_seg) { + dma_addr_t sle_dma; + cont_a64_entry_t *cont_pkt; + + /* Allocate additional continuation packets? */ + if (avail_dsds == 0) { + /* + * Five DSDs are available in the Continuation + * Type 1 IOCB. + */ + cont_pkt = qla2x00_prep_cont_type1_iocb(ha); + cur_dsd = (uint32_t *)cont_pkt->dseg_0_address; + avail_dsds = 5; + } + + sle_dma = sg_dma_address(cur_seg); + *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); + *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); + *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg)); + avail_dsds--; + + cur_seg++; + } + } else { + *cur_dsd++ = cpu_to_le32(LSD(sp->dma_handle)); + *cur_dsd++ = cpu_to_le32(MSD(sp->dma_handle)); + *cur_dsd++ = cpu_to_le32(cmd->request_bufflen); + } +} + + +/** + * qla24xx_start_scsi() - Send a SCSI command to the ISP + * @sp: command to send to the ISP + * + * Returns non-zero if a failure occured, else zero. + */ +int +qla24xx_start_scsi(srb_t *sp) +{ + int ret; + unsigned long flags; + scsi_qla_host_t *ha; + struct scsi_cmnd *cmd; + uint32_t *clr_ptr; + uint32_t index; + uint32_t handle; + struct cmd_type_7 *cmd_pkt; + struct scatterlist *sg; + uint16_t cnt; + uint16_t req_cnt; + uint16_t tot_dsds; + struct device_reg_24xx __iomem *reg; + + /* Setup device pointers. */ + ret = 0; + ha = sp->ha; + reg = &ha->iobase->isp24; + cmd = sp->cmd; + /* So we know we haven't pci_map'ed anything yet */ + tot_dsds = 0; + + /* Send marker if required */ + if (ha->marker_needed != 0) { + if (qla2x00_marker(ha, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) { + return QLA_FUNCTION_FAILED; + } + ha->marker_needed = 0; + } + + /* Acquire ring specific lock */ + spin_lock_irqsave(&ha->hardware_lock, flags); + + /* Check for room in outstanding command list. */ + handle = ha->current_outstanding_cmd; + for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) { + handle++; + if (handle == MAX_OUTSTANDING_COMMANDS) + handle = 1; + if (ha->outstanding_cmds[handle] == 0) + break; + } + if (index == MAX_OUTSTANDING_COMMANDS) + goto queuing_error; + + /* Map the sg table so we have an accurate count of sg entries needed */ + if (cmd->use_sg) { + sg = (struct scatterlist *) cmd->request_buffer; + tot_dsds = pci_map_sg(ha->pdev, sg, cmd->use_sg, + cmd->sc_data_direction); + if (tot_dsds == 0) + goto queuing_error; + } else if (cmd->request_bufflen) { + dma_addr_t req_dma; + + req_dma = pci_map_single(ha->pdev, cmd->request_buffer, + cmd->request_bufflen, cmd->sc_data_direction); + if (dma_mapping_error(req_dma)) + goto queuing_error; + + sp->dma_handle = req_dma; + tot_dsds = 1; + } + + req_cnt = qla24xx_calc_iocbs(tot_dsds); + if (ha->req_q_cnt < (req_cnt + 2)) { + cnt = (uint16_t)RD_REG_DWORD_RELAXED(®->req_q_out); + if (ha->req_ring_index < cnt) + ha->req_q_cnt = cnt - ha->req_ring_index; + else + ha->req_q_cnt = ha->request_q_length - + (ha->req_ring_index - cnt); + } + if (ha->req_q_cnt < (req_cnt + 2)) + goto queuing_error; + + /* Build command packet. */ + ha->current_outstanding_cmd = handle; + ha->outstanding_cmds[handle] = sp; + sp->ha = ha; + sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle; + ha->req_q_cnt -= req_cnt; + + cmd_pkt = (struct cmd_type_7 *)ha->request_ring_ptr; + cmd_pkt->handle = handle; + + /* Zero out remaining portion of packet. */ + /* tagged queuing modifier -- default is TSK_SIMPLE (0). */ + clr_ptr = (uint32_t *)cmd_pkt + 2; + memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); + cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); + + /* Set NPORT-ID and LUN number*/ + cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); + cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; + cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; + cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; + + int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun); + host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); + + /* Load SCSI command packet. */ + memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len); + host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb)); + + cmd_pkt->byte_count = cpu_to_le32((uint32_t)cmd->request_bufflen); + + /* Build IOCB segments */ + qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds); + + /* Set total data segment count. */ + cmd_pkt->entry_count = (uint8_t)req_cnt; + wmb(); + + /* Adjust ring index. */ + ha->req_ring_index++; + if (ha->req_ring_index == ha->request_q_length) { + ha->req_ring_index = 0; + ha->request_ring_ptr = ha->request_ring; + } else + ha->request_ring_ptr++; + + sp->flags |= SRB_DMA_VALID; + + /* Set chip new ring index. */ + WRT_REG_DWORD(®->req_q_in, ha->req_ring_index); + RD_REG_DWORD_RELAXED(®->req_q_in); /* PCI Posting. */ + + /* Manage unprocessed RIO/ZIO commands in response queue. */ + if (ha->flags.process_response_queue && + ha->response_ring_ptr->signature != RESPONSE_PROCESSED) + qla24xx_process_response_queue(ha); + + spin_unlock_irqrestore(&ha->hardware_lock, flags); + return QLA_SUCCESS; + +queuing_error: + if (cmd->use_sg && tot_dsds) { + sg = (struct scatterlist *) cmd->request_buffer; + pci_unmap_sg(ha->pdev, sg, cmd->use_sg, + cmd->sc_data_direction); + } else if (tot_dsds) { + pci_unmap_single(ha->pdev, sp->dma_handle, + cmd->request_bufflen, cmd->sc_data_direction); + } + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + return QLA_FUNCTION_FAILED; }