1 /******************************************************************************
2 * QLOGIC LINUX SOFTWARE
4 * QLogic ISP2x00 device driver for Linux 2.6.x
5 * Copyright (C) 2003-2004 QLogic Corporation
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2, or (at your option) any
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 ******************************************************************************/
22 #include <linux/blkdev.h>
23 #include <linux/delay.h>
25 static inline uint16_t qla2x00_get_cmd_direction(struct scsi_cmnd *cmd);
26 static inline cont_entry_t *qla2x00_prep_cont_type0_iocb(scsi_qla_host_t *);
27 static inline cont_a64_entry_t *qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *);
30 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
33 * Returns the proper CF_* direction based on CDB.
35 static inline uint16_t
36 qla2x00_get_cmd_direction(struct scsi_cmnd *cmd)
42 /* Set transfer direction */
43 if (cmd->sc_data_direction == DMA_TO_DEVICE)
45 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
51 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
52 * Continuation Type 0 IOCBs to allocate.
54 * @dsds: number of data segment decriptors needed
56 * Returns the number of IOCB entries needed to store @dsds.
59 qla2x00_calc_iocbs_32(uint16_t dsds)
65 iocbs += (dsds - 3) / 7;
73 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
74 * Continuation Type 1 IOCBs to allocate.
76 * @dsds: number of data segment decriptors needed
78 * Returns the number of IOCB entries needed to store @dsds.
81 qla2x00_calc_iocbs_64(uint16_t dsds)
87 iocbs += (dsds - 2) / 5;
95 * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
98 * Returns a pointer to the Continuation Type 0 IOCB packet.
100 static inline cont_entry_t *
101 qla2x00_prep_cont_type0_iocb(scsi_qla_host_t *ha)
103 cont_entry_t *cont_pkt;
105 /* Adjust ring index. */
106 ha->req_ring_index++;
107 if (ha->req_ring_index == ha->request_q_length) {
108 ha->req_ring_index = 0;
109 ha->request_ring_ptr = ha->request_ring;
111 ha->request_ring_ptr++;
114 cont_pkt = (cont_entry_t *)ha->request_ring_ptr;
116 /* Load packet defaults. */
117 *((uint32_t *)(&cont_pkt->entry_type)) =
118 __constant_cpu_to_le32(CONTINUE_TYPE);
124 * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
127 * Returns a pointer to the continuation type 1 IOCB packet.
129 static inline cont_a64_entry_t *
130 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *ha)
132 cont_a64_entry_t *cont_pkt;
134 /* Adjust ring index. */
135 ha->req_ring_index++;
136 if (ha->req_ring_index == ha->request_q_length) {
137 ha->req_ring_index = 0;
138 ha->request_ring_ptr = ha->request_ring;
140 ha->request_ring_ptr++;
143 cont_pkt = (cont_a64_entry_t *)ha->request_ring_ptr;
145 /* Load packet defaults. */
146 *((uint32_t *)(&cont_pkt->entry_type)) =
147 __constant_cpu_to_le32(CONTINUE_A64_TYPE);
153 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
154 * capable IOCB types.
156 * @sp: SRB command to process
157 * @cmd_pkt: Command type 2 IOCB
158 * @tot_dsds: Total number of segments to transfer
160 void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
166 struct scsi_cmnd *cmd;
170 /* Update entry type to indicate Command Type 2 IOCB */
171 *((uint32_t *)(&cmd_pkt->entry_type)) =
172 __constant_cpu_to_le32(COMMAND_TYPE);
174 /* No data transfer */
175 if (cmd->request_bufflen == 0 || cmd->sc_data_direction == DMA_NONE) {
176 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
182 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(cmd));
184 /* Three DSDs are available in the Command Type 2 IOCB */
186 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
188 /* Load data segments */
189 if (cmd->use_sg != 0) {
190 struct scatterlist *cur_seg;
191 struct scatterlist *end_seg;
193 cur_seg = (struct scatterlist *)cmd->request_buffer;
194 end_seg = cur_seg + tot_dsds;
195 while (cur_seg < end_seg) {
196 cont_entry_t *cont_pkt;
198 /* Allocate additional continuation packets? */
199 if (avail_dsds == 0) {
201 * Seven DSDs are available in the Continuation
204 cont_pkt = qla2x00_prep_cont_type0_iocb(ha);
205 cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
209 *cur_dsd++ = cpu_to_le32(sg_dma_address(cur_seg));
210 *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
218 unsigned long offset;
220 page = virt_to_page(cmd->request_buffer);
221 offset = ((unsigned long)cmd->request_buffer & ~PAGE_MASK);
222 req_dma = pci_map_page(ha->pdev, page, offset,
223 cmd->request_bufflen, cmd->sc_data_direction);
225 sp->dma_handle = req_dma;
227 *cur_dsd++ = cpu_to_le32(req_dma);
228 *cur_dsd++ = cpu_to_le32(cmd->request_bufflen);
233 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
234 * capable IOCB types.
236 * @sp: SRB command to process
237 * @cmd_pkt: Command type 3 IOCB
238 * @tot_dsds: Total number of segments to transfer
240 void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
246 struct scsi_cmnd *cmd;
250 /* Update entry type to indicate Command Type 3 IOCB */
251 *((uint32_t *)(&cmd_pkt->entry_type)) =
252 __constant_cpu_to_le32(COMMAND_A64_TYPE);
254 /* No data transfer */
255 if (cmd->request_bufflen == 0 || cmd->sc_data_direction == DMA_NONE) {
256 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
262 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(cmd));
264 /* Two DSDs are available in the Command Type 3 IOCB */
266 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
268 /* Load data segments */
269 if (cmd->use_sg != 0) {
270 struct scatterlist *cur_seg;
271 struct scatterlist *end_seg;
273 cur_seg = (struct scatterlist *)cmd->request_buffer;
274 end_seg = cur_seg + tot_dsds;
275 while (cur_seg < end_seg) {
277 cont_a64_entry_t *cont_pkt;
279 /* Allocate additional continuation packets? */
280 if (avail_dsds == 0) {
282 * Five DSDs are available in the Continuation
285 cont_pkt = qla2x00_prep_cont_type1_iocb(ha);
286 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
290 sle_dma = sg_dma_address(cur_seg);
291 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
292 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
293 *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
301 unsigned long offset;
303 page = virt_to_page(cmd->request_buffer);
304 offset = ((unsigned long)cmd->request_buffer & ~PAGE_MASK);
305 req_dma = pci_map_page(ha->pdev, page, offset,
306 cmd->request_bufflen, cmd->sc_data_direction);
308 sp->dma_handle = req_dma;
310 *cur_dsd++ = cpu_to_le32(LSD(req_dma));
311 *cur_dsd++ = cpu_to_le32(MSD(req_dma));
312 *cur_dsd++ = cpu_to_le32(cmd->request_bufflen);
317 * qla2x00_start_scsi() - Send a SCSI command to the ISP
318 * @sp: command to send to the ISP
320 * Returns non-zero if a failure occured, else zero.
323 qla2x00_start_scsi(srb_t *sp)
329 struct scsi_cmnd *cmd;
333 cmd_entry_t *cmd_pkt;
335 struct scatterlist *sg;
341 /* Setup device pointers. */
343 fclun = sp->lun_queue->fclun;
344 ha = fclun->fcport->ha;
348 /* Send marker if required */
349 if (ha->marker_needed != 0) {
350 if (qla2x00_marker(ha, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
351 return (QLA_FUNCTION_FAILED);
353 ha->marker_needed = 0;
356 /* Acquire ring specific lock */
357 spin_lock_irqsave(&ha->hardware_lock, flags);
359 /* Check for room in outstanding command list. */
360 handle = ha->current_outstanding_cmd;
361 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
363 if (handle == MAX_OUTSTANDING_COMMANDS)
365 if (ha->outstanding_cmds[handle] == 0)
368 if (index == MAX_OUTSTANDING_COMMANDS)
371 /* Calculate the number of request entries needed. */
372 req_cnt = (ha->calc_request_entries)(cmd->request->nr_hw_segments);
373 if (ha->req_q_cnt < (req_cnt + 2)) {
374 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
375 if (ha->req_ring_index < cnt)
376 ha->req_q_cnt = cnt - ha->req_ring_index;
378 ha->req_q_cnt = ha->request_q_length -
379 (ha->req_ring_index - cnt);
381 if (ha->req_q_cnt < (req_cnt + 2))
384 /* Finally, we have enough space, now perform mappings. */
387 sg = (struct scatterlist *) cmd->request_buffer;
388 tot_dsds = pci_map_sg(ha->pdev, sg, cmd->use_sg,
389 cmd->sc_data_direction);
392 } else if (cmd->request_bufflen) {
395 req_cnt = (ha->calc_request_entries)(tot_dsds);
397 /* Build command packet */
398 ha->current_outstanding_cmd = handle;
399 ha->outstanding_cmds[handle] = sp;
401 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
402 ha->req_q_cnt -= req_cnt;
404 cmd_pkt = (cmd_entry_t *)ha->request_ring_ptr;
405 cmd_pkt->handle = handle;
406 /* Zero out remaining portion of packet. */
407 clr_ptr = (uint32_t *)cmd_pkt + 2;
408 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
409 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
412 SET_TARGET_ID(ha, cmd_pkt->target, fclun->fcport->loop_id);
415 cmd_pkt->lun = cpu_to_le16(fclun->lun);
417 /* Update tagged queuing modifier */
418 cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG);
419 if (cmd->device->tagged_supported) {
421 case HEAD_OF_QUEUE_TAG:
422 cmd_pkt->control_flags =
423 __constant_cpu_to_le16(CF_HEAD_TAG);
425 case ORDERED_QUEUE_TAG:
426 cmd_pkt->control_flags =
427 __constant_cpu_to_le16(CF_ORDERED_TAG);
433 * Allocate at least 5 (+ QLA_CMD_TIMER_DELTA) seconds for RISC timeout.
435 timeout = (uint32_t)(cmd->timeout_per_command / HZ);
437 cmd_pkt->timeout = __constant_cpu_to_le16(0);
438 else if (timeout > 25)
439 cmd_pkt->timeout = cpu_to_le16((uint16_t)timeout -
440 (5 + QLA_CMD_TIMER_DELTA));
442 cmd_pkt->timeout = cpu_to_le16((uint16_t)timeout);
444 /* Load SCSI command packet. */
445 memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
446 cmd_pkt->byte_count = cpu_to_le32((uint32_t)cmd->request_bufflen);
448 /* Build IOCB segments */
449 (ha->build_scsi_iocbs)(sp, cmd_pkt, tot_dsds);
451 /* Set total data segment count. */
452 cmd_pkt->entry_count = (uint8_t)req_cnt;
455 /* Adjust ring index. */
456 ha->req_ring_index++;
457 if (ha->req_ring_index == ha->request_q_length) {
458 ha->req_ring_index = 0;
459 ha->request_ring_ptr = ha->request_ring;
461 ha->request_ring_ptr++;
465 sp->lun_queue->out_cnt++;
466 sp->flags |= SRB_DMA_VALID;
467 sp->state = SRB_ACTIVE_STATE;
468 sp->u_start = jiffies;
470 /* Set chip new ring index. */
471 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), ha->req_ring_index);
472 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */
474 spin_unlock_irqrestore(&ha->hardware_lock, flags);
475 return (QLA_SUCCESS);
478 spin_unlock_irqrestore(&ha->hardware_lock, flags);
480 return (QLA_FUNCTION_FAILED);
484 * qla2x00_marker() - Send a marker IOCB to the firmware.
488 * @type: marker modifier
490 * Can be called from both normal and interrupt context.
492 * Returns non-zero if a failure occured, else zero.
495 __qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
500 pkt = (mrk_entry_t *)qla2x00_req_pkt(ha);
502 DEBUG2_3(printk("%s(): **** FAILED ****\n", __func__));
504 return (QLA_FUNCTION_FAILED);
507 pkt->entry_type = MARKER_TYPE;
508 pkt->modifier = type;
510 if (type != MK_SYNC_ALL) {
511 pkt->lun = cpu_to_le16(lun);
512 SET_TARGET_ID(ha, pkt->target, loop_id);
516 /* Issue command to ISP */
519 return (QLA_SUCCESS);
523 qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
527 unsigned long flags = 0;
529 spin_lock_irqsave(&ha->hardware_lock, flags);
530 ret = __qla2x00_marker(ha, loop_id, lun, type);
531 spin_unlock_irqrestore(&ha->hardware_lock, flags);
537 * qla2x00_req_pkt() - Retrieve a request packet from the request ring.
540 * Note: The caller must hold the hardware lock before calling this routine.
542 * Returns NULL if function failed, else, a pointer to the request packet.
545 qla2x00_req_pkt(scsi_qla_host_t *ha)
547 device_reg_t *reg = ha->iobase;
548 request_t *pkt = NULL;
552 uint16_t req_cnt = 1;
554 /* Wait 1 second for slot. */
555 for (timer = HZ; timer; timer--) {
556 if ((req_cnt + 2) >= ha->req_q_cnt) {
557 /* Calculate number of free request entries. */
558 cnt = qla2x00_debounce_register(ISP_REQ_Q_OUT(ha, reg));
559 if (ha->req_ring_index < cnt)
560 ha->req_q_cnt = cnt - ha->req_ring_index;
562 ha->req_q_cnt = ha->request_q_length -
563 (ha->req_ring_index - cnt);
565 /* If room for request in request ring. */
566 if ((req_cnt + 2) < ha->req_q_cnt) {
568 pkt = ha->request_ring_ptr;
570 /* Zero out packet. */
571 dword_ptr = (uint32_t *)pkt;
572 for (cnt = 0; cnt < REQUEST_ENTRY_SIZE / 4; cnt++)
575 /* Set system defined field. */
576 pkt->sys_define = (uint8_t)ha->req_ring_index;
578 /* Set entry count. */
579 pkt->entry_count = 1;
584 /* Release ring specific lock */
585 spin_unlock(&ha->hardware_lock);
587 udelay(2); /* 2 us */
589 /* Check for pending interrupts. */
590 /* During init we issue marker directly */
591 if (!ha->marker_needed)
594 spin_lock_irq(&ha->hardware_lock);
597 DEBUG2_3(printk("%s(): **** FAILED ****\n", __func__));
604 * qla2x00_ms_req_pkt() - Retrieve a Management Server request packet from
607 * @sp: pointer to handle post function call
609 * Note: The caller must hold the hardware lock before calling this routine.
611 * Returns NULL if function failed, else, a pointer to the request packet.
614 qla2x00_ms_req_pkt(scsi_qla_host_t *ha, srb_t *sp)
616 device_reg_t *reg = ha->iobase;
617 request_t *pkt = NULL;
618 uint16_t cnt, i, index;
622 uint16_t req_cnt = 1;
624 /* Wait 1 second for slot. */
625 for (timer = HZ; timer; timer--) {
626 if ((req_cnt + 2) >= ha->req_q_cnt) {
627 /* Calculate number of free request entries. */
628 cnt = qla2x00_debounce_register(ISP_REQ_Q_OUT(ha, reg));
629 if (ha->req_ring_index < cnt) {
630 ha->req_q_cnt = cnt - ha->req_ring_index;
632 ha->req_q_cnt = ha->request_q_length -
633 (ha->req_ring_index - cnt);
637 /* Check for room in outstanding command list. */
638 cnt = ha->current_outstanding_cmd;
639 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
641 if (cnt == MAX_OUTSTANDING_COMMANDS)
644 if (ha->outstanding_cmds[cnt] == 0) {
646 ha->current_outstanding_cmd = cnt;
651 /* If room for request in request ring. */
652 if (found && (req_cnt + 2) < ha->req_q_cnt) {
653 pkt = ha->request_ring_ptr;
655 /* Zero out packet. */
656 dword_ptr = (uint32_t *)pkt;
657 for (i = 0; i < REQUEST_ENTRY_SIZE / 4; i++ )
660 DEBUG5(printk("%s(): putting sp=%p in "
661 "outstanding_cmds[%x]\n",
665 ha->outstanding_cmds[cnt] = sp;
667 /* save the handle */
668 sp->cmd->host_scribble = (unsigned char *) (u_long) cnt;
669 CMD_SP(sp->cmd) = (void *)sp;
672 pkt->handle = (uint32_t)cnt;
674 /* Set system defined field. */
675 pkt->sys_define = (uint8_t)ha->req_ring_index;
676 pkt->entry_status = 0;
681 /* Release ring specific lock */
682 spin_unlock(&ha->hardware_lock);
685 /* Check for pending interrupts. */
688 spin_lock_irq(&ha->hardware_lock);
691 DEBUG2_3(printk("%s(): **** FAILED ****\n", __func__));
698 * qla2x00_isp_cmd() - Modify the request ring pointer.
701 * Note: The caller must hold the hardware lock before calling this routine.
704 qla2x00_isp_cmd(scsi_qla_host_t *ha)
706 device_reg_t *reg = ha->iobase;
708 DEBUG5(printk("%s(): IOCB data:\n", __func__));
709 DEBUG5(qla2x00_dump_buffer(
710 (uint8_t *)ha->request_ring_ptr, REQUEST_ENTRY_SIZE));
712 /* Adjust ring index. */
713 ha->req_ring_index++;
714 if (ha->req_ring_index == ha->request_q_length) {
715 ha->req_ring_index = 0;
716 ha->request_ring_ptr = ha->request_ring;
718 ha->request_ring_ptr++;
720 /* Set chip new ring index. */
721 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), ha->req_ring_index);
722 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */