1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Enterprise Fibre Channel Host Bus Adapters. *
4 * Refer to the README file included with this package for *
5 * driver version and adapter support. *
6 * Copyright (C) 2004 Emulex Corporation. *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of the GNU General Public License *
11 * as published by the Free Software Foundation; either version 2 *
12 * of the License, or (at your option) any later version. *
14 * This program is distributed in the hope that it will be useful, *
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
17 * GNU General Public License for more details, a copy of which *
18 * can be found in the file COPYING included with this package. *
19 *******************************************************************/
22 * $Id: lpfc_sli.c 1.178 2004/11/23 16:57:11EST sf_support Exp $
25 #include <linux/version.h>
26 #include <linux/blkdev.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/pci.h>
29 #include <linux/spinlock.h>
31 #include <scsi/scsi_cmnd.h>
32 #include <scsi/scsi_device.h>
33 #include <scsi/scsi_host.h>
36 #include "lpfc_disc.h"
37 #include "lpfc_scsi.h"
39 #include "lpfc_crtn.h"
41 #include "lpfc_logmsg.h"
43 #include "lpfc_compat.h"
45 static int lpfc_sli_reset_on_init = 1;
48 * Define macro to log: Mailbox command x%x cannot issue Data
49 * This allows multiple uses of lpfc_msgBlk0311
50 * w/o perturbing log msg utility.
52 #define LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag) \
53 lpfc_printf_log(phba, \
56 "%d:0311 Mailbox command x%x cannot issue " \
57 "Data: x%x x%x x%x\n", \
61 psli->sliinit.sli_flag, \
65 /* This will save a huge switch to determine if the IOCB cmd
66 * is unsolicited or solicited.
68 #define LPFC_UNKNOWN_IOCB 0
69 #define LPFC_UNSOL_IOCB 1
70 #define LPFC_SOL_IOCB 2
71 #define LPFC_ABORT_IOCB 3
72 static uint8_t lpfc_sli_iocb_cmd_type[CMD_MAX_IOCB_CMD] = {
73 LPFC_UNKNOWN_IOCB, /* 0x00 */
74 LPFC_UNSOL_IOCB, /* CMD_RCV_SEQUENCE_CX 0x01 */
75 LPFC_SOL_IOCB, /* CMD_XMIT_SEQUENCE_CR 0x02 */
76 LPFC_SOL_IOCB, /* CMD_XMIT_SEQUENCE_CX 0x03 */
77 LPFC_SOL_IOCB, /* CMD_XMIT_BCAST_CN 0x04 */
78 LPFC_SOL_IOCB, /* CMD_XMIT_BCAST_CX 0x05 */
79 LPFC_UNKNOWN_IOCB, /* CMD_QUE_RING_BUF_CN 0x06 */
80 LPFC_UNKNOWN_IOCB, /* CMD_QUE_XRI_BUF_CX 0x07 */
81 LPFC_UNKNOWN_IOCB, /* CMD_IOCB_CONTINUE_CN 0x08 */
82 LPFC_UNKNOWN_IOCB, /* CMD_RET_XRI_BUF_CX 0x09 */
83 LPFC_SOL_IOCB, /* CMD_ELS_REQUEST_CR 0x0A */
84 LPFC_SOL_IOCB, /* CMD_ELS_REQUEST_CX 0x0B */
85 LPFC_UNKNOWN_IOCB, /* 0x0C */
86 LPFC_UNSOL_IOCB, /* CMD_RCV_ELS_REQ_CX 0x0D */
87 LPFC_ABORT_IOCB, /* CMD_ABORT_XRI_CN 0x0E */
88 LPFC_ABORT_IOCB, /* CMD_ABORT_XRI_CX 0x0F */
89 LPFC_ABORT_IOCB, /* CMD_CLOSE_XRI_CR 0x10 */
90 LPFC_ABORT_IOCB, /* CMD_CLOSE_XRI_CX 0x11 */
91 LPFC_SOL_IOCB, /* CMD_CREATE_XRI_CR 0x12 */
92 LPFC_SOL_IOCB, /* CMD_CREATE_XRI_CX 0x13 */
93 LPFC_SOL_IOCB, /* CMD_GET_RPI_CN 0x14 */
94 LPFC_SOL_IOCB, /* CMD_XMIT_ELS_RSP_CX 0x15 */
95 LPFC_SOL_IOCB, /* CMD_GET_RPI_CR 0x16 */
96 LPFC_ABORT_IOCB, /* CMD_XRI_ABORTED_CX 0x17 */
97 LPFC_SOL_IOCB, /* CMD_FCP_IWRITE_CR 0x18 */
98 LPFC_SOL_IOCB, /* CMD_FCP_IWRITE_CX 0x19 */
99 LPFC_SOL_IOCB, /* CMD_FCP_IREAD_CR 0x1A */
100 LPFC_SOL_IOCB, /* CMD_FCP_IREAD_CX 0x1B */
101 LPFC_SOL_IOCB, /* CMD_FCP_ICMND_CR 0x1C */
102 LPFC_SOL_IOCB, /* CMD_FCP_ICMND_CX 0x1D */
103 LPFC_UNKNOWN_IOCB, /* 0x1E */
104 LPFC_SOL_IOCB, /* CMD_FCP_TSEND_CX 0x1F */
105 LPFC_SOL_IOCB, /* CMD_ADAPTER_MSG 0x20 */
106 LPFC_SOL_IOCB, /* CMD_FCP_TRECEIVE_CX 0x21 */
107 LPFC_SOL_IOCB, /* CMD_ADAPTER_DUMP 0x22 */
108 LPFC_SOL_IOCB, /* CMD_FCP_TRSP_CX 0x23 */
110 LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
111 LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
112 LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
113 LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
115 LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
116 LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
117 LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
118 LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
119 LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
122 LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
123 LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
124 LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
125 LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
126 LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
139 LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
140 LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
143 LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
144 LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
145 LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
146 LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
147 LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
150 LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
151 LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
152 LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
153 LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
154 LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
158 LPFC_UNSOL_IOCB, /* CMD_RCV_SEQUENCE64_CX 0x81 */
159 LPFC_SOL_IOCB, /* CMD_XMIT_SEQUENCE64_CR 0x82 */
160 LPFC_SOL_IOCB, /* CMD_XMIT_SEQUENCE64_CX 0x83 */
161 LPFC_SOL_IOCB, /* CMD_XMIT_BCAST64_CN 0x84 */
162 LPFC_SOL_IOCB, /* CMD_XMIT_BCAST64_CX 0x85 */
163 LPFC_UNKNOWN_IOCB, /* CMD_QUE_RING_BUF64_CN 0x86 */
164 LPFC_UNKNOWN_IOCB, /* CMD_QUE_XRI_BUF64_CX 0x87 */
165 LPFC_UNKNOWN_IOCB, /* CMD_IOCB_CONTINUE64_CN 0x88 */
166 LPFC_UNKNOWN_IOCB, /* CMD_RET_XRI_BUF64_CX 0x89 */
167 LPFC_SOL_IOCB, /* CMD_ELS_REQUEST64_CR 0x8A */
168 LPFC_SOL_IOCB, /* CMD_ELS_REQUEST64_CX 0x8B */
169 LPFC_ABORT_IOCB, /* CMD_ABORT_MXRI64_CN 0x8C */
170 LPFC_UNSOL_IOCB, /* CMD_RCV_ELS_REQ64_CX 0x8D */
172 LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
173 LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
175 LPFC_SOL_IOCB, /* CMD_XMIT_ELS_RSP64_CX 0x95 */
176 LPFC_UNKNOWN_IOCB, /* 0x96 */
177 LPFC_UNKNOWN_IOCB, /* 0x97 */
178 LPFC_SOL_IOCB, /* CMD_FCP_IWRITE64_CR 0x98 */
179 LPFC_SOL_IOCB, /* CMD_FCP_IWRITE64_CX 0x99 */
180 LPFC_SOL_IOCB, /* CMD_FCP_IREAD64_CR 0x9A */
181 LPFC_SOL_IOCB, /* CMD_FCP_IREAD64_CX 0x9B */
182 LPFC_SOL_IOCB, /* CMD_FCP_ICMND64_CR 0x9C */
183 LPFC_SOL_IOCB, /* CMD_FCP_ICMND64_CX 0x9D */
184 LPFC_UNKNOWN_IOCB, /* 0x9E */
185 LPFC_SOL_IOCB, /* CMD_FCP_TSEND64_CX 0x9F */
186 LPFC_UNKNOWN_IOCB, /* 0xA0 */
187 LPFC_SOL_IOCB, /* CMD_FCP_TRECEIVE64_CX 0xA1 */
188 LPFC_UNKNOWN_IOCB, /* 0xA2 */
189 LPFC_SOL_IOCB, /* CMD_FCP_TRSP64_CX 0xA3 */
191 LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
192 LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
193 LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
194 LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
195 LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
196 LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
197 LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
198 LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
199 LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
200 LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
201 LPFC_SOL_IOCB, /* CMD_GEN_REQUEST64_CR 0xC2 */
202 LPFC_SOL_IOCB, /* CMD_GEN_REQUEST64_CX 0xC3 */
204 LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
205 LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
206 LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
207 LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
210 LPFC_SOL_IOCB, /* CMD_SENDTEXT_CR 0xD1 */
211 LPFC_SOL_IOCB, /* CMD_SENDTEXT_CX 0xD2 */
212 LPFC_SOL_IOCB, /* CMD_RCV_LOGIN 0xD3 */
213 LPFC_SOL_IOCB, /* CMD_ACCEPT_LOGIN 0xD4 */
214 LPFC_SOL_IOCB, /* CMD_REJECT_LOGIN 0xD5 */
217 LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
218 LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
219 LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB, LPFC_UNKNOWN_IOCB,
231 lpfc_sli_ring_map(struct lpfc_hba * phba)
233 struct lpfc_sli *psli;
240 /* Get a Mailbox buffer to setup mailbox commands for HBA
242 if ((pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
244 phba->hba_state = LPFC_HBA_ERROR;
249 /* Initialize the struct lpfc_sli_ring structure for each ring */
250 for (i = 0; i < psli->sliinit.num_rings; i++) {
251 /* Issue a CONFIG_RING mailbox command for each ring */
252 phba->hba_state = LPFC_INIT_MBX_CMDS;
253 lpfc_config_ring(phba, i, pmb);
254 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
255 /* Adapter failed to init, mbxCmd <cmd> CFG_RING,
256 mbxStatus <status>, ring <num> */
257 lpfc_printf_log(phba,
260 "%d:0446 Adapter failed to init, "
261 "mbxCmd x%x CFG_RING, mbxStatus x%x, "
267 phba->hba_state = LPFC_HBA_ERROR;
268 mempool_free( pmb, phba->mbox_mem_pool);
272 mempool_free( pmb, phba->mbox_mem_pool);
277 lpfc_sli_ringtxcmpl_put(struct lpfc_hba * phba,
278 struct lpfc_sli_ring * pring, struct lpfc_iocbq * piocb)
282 list_add_tail(&piocb->list, &pring->txcmplq);
283 pring->txcmplq_cnt++;
285 if (pring->fast_lookup) {
286 /* Setup fast lookup based on iotag for completion */
287 iotag = piocb->iocb.ulpIoTag;
289 < phba->sli.sliinit.ringinit[pring->ringno].fast_iotag))
290 *(pring->fast_lookup + iotag) = piocb;
293 /* Cmd ring <ringno> put: iotag <iotag> greater then
294 configured max <fast_iotag> wd0 <icmd> */
295 lpfc_printf_log(phba,
298 "%d:0316 Cmd ring %d put: iotag x%x "
299 "greater then configured max x%x "
302 pring->ringno, iotag, phba->sli.sliinit
303 .ringinit[pring->ringno].fast_iotag,
304 *(((uint32_t *)(&piocb->iocb)) + 7));
310 static struct lpfc_iocbq *
311 lpfc_sli_ringtx_get(struct lpfc_hba * phba, struct lpfc_sli_ring * pring)
313 struct list_head *dlp;
314 struct lpfc_iocbq *cmd_iocb;
315 struct lpfc_iocbq *next_iocb;
319 next_iocb = (struct lpfc_iocbq *) pring->txq.next;
320 if (next_iocb != (struct lpfc_iocbq *) & pring->txq) {
321 /* If the first ptr is not equal to the list header,
322 * deque the IOCBQ_t and return it.
324 cmd_iocb = next_iocb;
325 list_del(&cmd_iocb->list);
332 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
334 MAILBOX_t *mbox = (MAILBOX_t *)phba->sli.MBhostaddr;
335 PGP *pgp = (PGP *)&mbox->us.s2.port[pring->ringno];
336 uint32_t max_cmd_idx =
337 phba->sli.sliinit.ringinit[pring->ringno].numCiocb;
340 if((pring->next_cmdidx == pring->cmdidx) &&
341 (++pring->next_cmdidx >= max_cmd_idx))
342 pring->next_cmdidx = 0;
344 if (unlikely(pring->local_getidx == pring->next_cmdidx)) {
346 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
348 if (unlikely(pring->local_getidx >= max_cmd_idx)) {
349 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
350 "%d:0315 Ring %d issue: portCmdGet %d "
351 "is bigger then cmd ring %d\n",
352 phba->brd_no, pring->ringno,
353 pring->local_getidx, max_cmd_idx);
355 phba->hba_state = LPFC_HBA_ERROR;
356 lpfc_handle_eratt(phba, HS_FFER3);
361 if (pring->local_getidx == pring->next_cmdidx)
365 iocb = IOCB_ENTRY(pring->cmdringaddr, pring->cmdidx);
371 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
372 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
374 struct lpfc_sli *psli = &phba->sli;
375 int ringno = pring->ringno;
378 * Alloocate and set up an iotag
380 nextiocb->iocb.ulpIoTag =
381 lpfc_sli_next_iotag(phba, &psli->ring[psli->fcp_ring]);
384 * Issue iocb command to adapter
386 lpfc_sli_pcimem_bcopy((uint32_t *)&nextiocb->iocb,
387 (uint32_t *)(iocb), sizeof (IOCB_t));
389 psli->slistat.iocbCmd[ringno]++;
392 * If there is no completion routine to call, we can release the
393 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
394 * that have no rsp ring completion, iocb_cmpl MUST be NULL.
396 if (nextiocb->iocb_cmpl)
397 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
399 mempool_free(nextiocb, phba->iocb_mem_pool);
402 * Let the HBA know what IOCB slot will be the next one the
403 * driver will put a command into.
405 pring->cmdidx = pring->next_cmdidx;
406 writeb(pring->cmdidx,
407 (u8 *)phba->MBslimaddr + (SLIMOFF + (ringno * 2)) * 4);
413 lpfc_sli_update_full_ring(struct lpfc_hba * phba,
414 struct lpfc_sli_ring *pring)
416 int ringno = pring->ringno;
418 pring->flag |= LPFC_CALL_RING_AVAILABLE;
423 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
424 * The HBA will tell us when an IOCB entry is available.
426 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
427 readl(phba->CAregaddr); /* flush */
429 phba->sli.slistat.iocbCmdFull[ringno]++;
433 lpfc_sli_update_ring(struct lpfc_hba * phba,
434 struct lpfc_sli_ring *pring)
436 int ringno = pring->ringno;
439 * Tell the HBA that there is work to do in this ring.
442 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
443 readl(phba->CAregaddr); /* flush */
447 lpfc_sli_resume_iocb(struct lpfc_hba * phba, struct lpfc_sli_ring * pring)
449 struct lpfc_sli *psli = &phba->sli;
451 struct lpfc_iocbq *nextiocb;
455 * (a) there is anything on the txq to send
457 * (c) link attention events can be processed (fcp ring only)
458 * (d) IOCB processing is not blocked by the outstanding mbox command.
460 if (pring->txq_cnt &&
461 (phba->hba_state > LPFC_LINK_DOWN) &&
462 (pring->ringno != psli->fcp_ring ||
463 psli->sliinit.sli_flag & LPFC_PROCESS_LA) &&
464 !(pring->flag & LPFC_STOP_IOCB_MBX)) {
466 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
467 (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
468 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
471 lpfc_sli_update_ring(phba, pring);
473 lpfc_sli_update_full_ring(phba, pring);
479 /* lpfc_sli_turn_on_ring is only called by lpfc_sli_handle_mb_event below */
481 lpfc_sli_turn_on_ring(struct lpfc_hba * phba, int ringno)
483 struct lpfc_sli *psli;
484 struct lpfc_sli_ring *pring;
488 pring = &psli->ring[ringno];
489 pgp = (PGP *) & (((MAILBOX_t *)psli->MBhostaddr)->us.s2.port[ringno]);
491 /* If the ring is active, flag it */
492 if (psli->ring[ringno].cmdringaddr) {
493 if (psli->ring[ringno].flag & LPFC_STOP_IOCB_MBX) {
494 psli->ring[ringno].flag &= ~LPFC_STOP_IOCB_MBX;
496 * Force update of the local copy of cmdGetInx
498 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
499 lpfc_sli_resume_iocb(phba, pring);
505 lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
509 switch (mbxCommand) {
513 case MBX_RUN_BIU_DIAG:
516 case MBX_CONFIG_LINK:
517 case MBX_CONFIG_RING:
519 case MBX_READ_CONFIG:
520 case MBX_READ_RCONFIG:
522 case MBX_READ_STATUS:
526 case MBX_READ_LNK_STAT:
528 case MBX_UNREG_LOGIN:
531 case MBX_DUMP_MEMORY:
532 case MBX_DUMP_CONTEXT:
537 case MBX_DEL_LD_ENTRY:
538 case MBX_RUN_PROGRAM:
542 case MBX_CONFIG_FARP:
544 case MBX_RUN_BIU_DIAG64:
545 case MBX_CONFIG_PORT:
546 case MBX_READ_SPARM64:
548 case MBX_REG_LOGIN64:
550 case MBX_FLASH_WR_ULA:
552 case MBX_LOAD_EXP_ROM:
562 lpfc_sli_handle_mb_event(struct lpfc_hba * phba)
567 struct lpfc_dmabuf *mp;
568 struct lpfc_sli *psli;
571 uint32_t process_next;
575 /* We should only get here if we are in SLI2 mode */
576 if (!(psli->sliinit.sli_flag & LPFC_SLI2_ACTIVE)) {
580 spin_lock_irqsave(phba->host->host_lock, iflag);
582 psli->slistat.mboxEvent++;
584 /* Get a Mailbox buffer to setup mailbox commands for callback */
585 if ((pmb = psli->mbox_active)) {
587 mbox = (MAILBOX_t *) psli->MBhostaddr;
589 /* First check out the status word */
590 lpfc_sli_pcimem_bcopy((uint32_t *) mbox, (uint32_t *) pmbox,
593 /* Sanity check to ensure the host owns the mailbox */
594 if (pmbox->mbxOwner != OWN_HOST) {
595 /* Lets try for a while */
596 for (i = 0; i < 10240; i++) {
597 /* First copy command data */
598 lpfc_sli_pcimem_bcopy((uint32_t *) mbox,
601 if (pmbox->mbxOwner == OWN_HOST)
604 /* Stray Mailbox Interrupt, mbxCommand <cmd> mbxStatus
606 lpfc_printf_log(phba,
609 "%d:0304 Stray Mailbox Interrupt "
610 "mbxCommand x%x mbxStatus x%x\n",
615 psli->sliinit.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
616 spin_unlock_irqrestore(phba->host->host_lock, iflag);
621 del_timer_sync(&psli->mbox_tmo);
624 * It is a fatal error if unknown mbox command completion.
626 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
629 /* Unknow mailbox command compl */
630 lpfc_printf_log(phba,
633 "%d:0323 Unknown Mailbox command %x Cmpl\n",
636 phba->hba_state = LPFC_HBA_ERROR;
637 spin_unlock_irqrestore(phba->host->host_lock, iflag);
638 lpfc_handle_eratt(phba, HS_FFER3);
642 psli->mbox_active = NULL;
643 if (pmbox->mbxStatus) {
644 psli->slistat.mboxStatErr++;
645 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
646 /* Mbox cmd cmpl error - RETRYing */
647 lpfc_printf_log(phba,
650 "%d:0305 Mbox cmd cmpl error - "
651 "RETRYing Data: x%x x%x x%x x%x\n",
655 pmbox->un.varWords[0],
657 pmbox->mbxStatus = 0;
658 pmbox->mbxOwner = OWN_HOST;
659 psli->sliinit.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
660 if (lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT)
662 spin_unlock_irqrestore(
663 phba->host->host_lock,
670 /* Mailbox cmd <cmd> Cmpl <cmpl> */
671 lpfc_printf_log(phba,
674 "%d:0307 Mailbox cmd x%x Cmpl x%p "
675 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n",
679 *((uint32_t *) pmbox),
680 pmbox->un.varWords[0],
681 pmbox->un.varWords[1],
682 pmbox->un.varWords[2],
683 pmbox->un.varWords[3],
684 pmbox->un.varWords[4],
685 pmbox->un.varWords[5],
686 pmbox->un.varWords[6],
687 pmbox->un.varWords[7]);
689 if (pmb->mbox_cmpl) {
690 /* Copy entire mbox completion over buffer */
691 lpfc_sli_pcimem_bcopy((uint32_t *) mbox,
694 (MAILBOX_CMD_WSIZE)));
695 /* All mbox cmpls are posted to discovery tasklet */
696 lpfc_discq_post_event(phba, pmb, NULL,
699 mp = (struct lpfc_dmabuf *) (pmb->context1);
701 lpfc_mbuf_free(phba, mp->virt, mp->phys);
704 mempool_free( pmb, phba->mbox_mem_pool);
710 process_next = 0; /* by default don't loop */
711 psli->sliinit.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
713 /* Process next mailbox command if there is one */
714 if ((pmb = lpfc_mbox_get(phba))) {
715 if (lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT) ==
717 mp = (struct lpfc_dmabuf *) (pmb->context1);
719 lpfc_mbuf_free(phba, mp->virt,
723 mempool_free( pmb, phba->mbox_mem_pool);
725 continue; /* loop back */
728 /* Turn on IOCB processing */
729 for (i = 0; i < psli->sliinit.num_rings; i++) {
730 lpfc_sli_turn_on_ring(phba, i);
733 /* Free any lpfc_dmabuf's waiting for mbox cmd cmpls */
734 while (!list_empty(&phba->freebufList)) {
735 struct lpfc_dmabuf *mp;
737 mp = (struct lpfc_dmabuf *)
738 (phba->freebufList.next);
740 lpfc_mbuf_free(phba, mp->virt,
748 } while (process_next);
750 spin_unlock_irqrestore(phba->host->host_lock, iflag);
754 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
755 struct lpfc_iocbq *saveq)
757 struct lpfc_sli * psli;
759 LPFC_RING_INIT_t * pringinit;
762 uint32_t match, ringno, i;
766 ringno = pring->ringno;
767 irsp = &(saveq->iocb);
768 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX)
769 || (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX)) {
774 (WORD5 *) & (saveq->iocb.un.
776 Rctl = w5p->hcsw.Rctl;
777 Type = w5p->hcsw.Type;
779 /* unSolicited Responses */
780 pringinit = &psli->sliinit.ringinit[ringno];
781 if (pringinit->prt[0].profile) {
782 /* If this ring has a profile set, just
784 /* All unsol iocbs for LPFC_ELS_RING
785 * are posted to discovery tasklet.
787 if (ringno == LPFC_ELS_RING) {
788 lpfc_discq_post_event(phba, (void *)&pringinit->prt[0],
789 (void *)saveq, LPFC_EVT_UNSOL_IOCB);
793 lpfc_sli_rcv_unsol_event) (phba, pring, saveq);
797 /* We must search, based on rctl / type
798 for the right routine */
799 for (i = 0; i < pringinit->num_mask;
801 if ((pringinit->prt[i].rctl ==
803 && (pringinit->prt[i].
805 /* All unsol iocbs for LPFC_ELS_RING
806 * are posted to discovery tasklet.
808 if (ringno == LPFC_ELS_RING) {
809 lpfc_discq_post_event(phba,
810 (void *)&pringinit->prt[i],
811 (void *)saveq, LPFC_EVT_UNSOL_IOCB);
815 lpfc_sli_rcv_unsol_event)
816 (phba, pring, saveq);
824 /* Unexpected Rctl / Type received */
825 /* Ring <ringno> handler: unexpected
826 Rctl <Rctl> Type <Type> received */
827 lpfc_printf_log(phba,
830 "%d:0313 Ring %d handler: unexpected Rctl x%x "
831 "Type x%x received \n",
839 static struct lpfc_iocbq *
840 lpfc_search_txcmpl(struct lpfc_sli_ring * pring, struct lpfc_iocbq * prspiocb)
844 struct lpfc_iocbq *cmd_iocb;
845 struct lpfc_iocbq *iocb, *next_iocb;
848 irsp = &prspiocb->iocb;
849 iotag = irsp->ulpIoTag;
852 /* Search through txcmpl from the begining */
853 list_for_each_entry_safe(iocb, next_iocb, &(pring->txcmplq), list) {
855 if (iotag == icmd->ulpIoTag) {
858 list_del(&iocb->list);
859 pring->txcmplq_cnt--;
866 static struct lpfc_iocbq *
867 lpfc_sli_ringtxcmpl_get(struct lpfc_hba * phba,
868 struct lpfc_sli_ring * pring,
869 struct lpfc_iocbq * prspiocb, uint32_t srch)
871 struct list_head *dlp;
873 struct lpfc_iocbq *cmd_iocb;
874 struct lpfc_sli *psli;
878 dlp = &pring->txcmplq;
880 if (pring->fast_lookup && (srch == 0)) {
882 * Use fast lookup based on iotag for completion
885 irsp = &prspiocb->iocb;
886 iotag = irsp->ulpIoTag;
887 if (iotag < psli->sliinit.ringinit[pring->ringno].fast_iotag) {
888 cmd_iocb = *(pring->fast_lookup + iotag);
889 *(pring->fast_lookup + iotag) = NULL;
891 list_del(&cmd_iocb->list);
892 pring->txcmplq_cnt--;
897 * Rsp ring <ringno> get: iotag <iotag> greater then
898 * configured max <fast_iotag> wd0 <irsp>
900 lpfc_printf_log(phba,
903 "%d:0317 Rsp ring %d get: iotag x%x "
904 "greater then configured max x%x "
907 pring->ringno, iotag,
908 psli->sliinit.ringinit[pring->ringno]
910 *(((uint32_t *) irsp) + 7));
914 cmd_iocb = lpfc_search_txcmpl(pring, prspiocb);
920 lpfc_sli_process_sol_iocb(struct lpfc_hba * phba, struct lpfc_sli_ring * pring,
921 struct lpfc_iocbq *saveq)
923 struct lpfc_iocbq * cmdiocbp;
928 ringno = pring->ringno;
929 /* Solicited Responses */
930 /* Based on the iotag field, get the cmd IOCB
932 spin_lock_irqsave(phba->host->host_lock, iflag);
934 lpfc_sli_ringtxcmpl_get(phba, pring, saveq,
936 /* Call the specified completion
938 if (cmdiocbp->iocb_cmpl) {
939 /* All iocb cmpls for LPFC_ELS_RING
940 * are posted to discovery tasklet.
942 if (ringno == LPFC_ELS_RING) {
943 lpfc_discq_post_event(phba, (void *)cmdiocbp,
944 (void *)saveq, LPFC_EVT_SOL_IOCB);
947 if (cmdiocbp->iocb_flag & LPFC_IO_POLL) {
951 spin_unlock_irqrestore(phba->host->host_lock,
953 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
954 spin_lock_irqsave(phba->host->host_lock, iflag);
957 mempool_free( cmdiocbp, phba->iocb_mem_pool);
960 /* Could not find the initiating command
961 * based of the response iotag.
962 * This is expected on ELS ring because of lpfc_els_abort().
964 if (ringno != LPFC_ELS_RING) {
965 /* Ring <ringno> handler: unexpected
966 completion IoTag <IoTag> */
967 lpfc_printf_log(phba,
970 "%d:0322 Ring %d handler: unexpected "
971 "completion IoTag x%x Data: x%x x%x x%x x%x\n",
974 saveq->iocb.ulpIoTag,
975 saveq->iocb.ulpStatus,
976 saveq->iocb.un.ulpWord[4],
977 saveq->iocb.ulpCommand,
978 saveq->iocb.ulpContext);
981 spin_unlock_irqrestore(phba->host->host_lock, iflag);
985 lpfc_sli_handle_ring_event(struct lpfc_hba * phba,
986 struct lpfc_sli_ring * pring, uint32_t mask)
988 struct lpfc_sli * psli;
991 struct lpfc_iocbq * rspiocbp, *next_iocb;
992 struct lpfc_iocbq * cmdiocbp;
993 struct lpfc_iocbq * saveq;
997 uint32_t status, free_saveq;
998 uint32_t portRspPut, portRspMax;
999 int ringno, loopcnt, rc;
1001 unsigned long iflag;
1005 ringno = pring->ringno;
1009 spin_lock_irqsave(phba->host->host_lock, iflag);
1010 psli->slistat.iocbEvent[ringno]++;
1012 /* At this point we assume SLI-2 */
1013 mbox = (MAILBOX_t *) psli->MBhostaddr;
1014 pgp = (PGP *) & mbox->us.s2.port[ringno];
1015 hgp = (HGP *) & mbox->us.s2.host[ringno];
1017 /* portRspMax is the number of rsp ring entries for this specific
1019 portRspMax = psli->sliinit.ringinit[ringno].numRiocb;
1024 /* Gather iocb entries off response ring.
1025 * rspidx is the IOCB index of the next IOCB that the driver
1026 * is going to process.
1028 entry = IOCB_ENTRY(pring->rspringaddr, pring->rspidx);
1029 portRspPut = le32_to_cpu(pgp->rspPutInx);
1031 if (portRspPut >= portRspMax) {
1033 /* Ring <ringno> handler: portRspPut <portRspPut> is bigger then
1034 rsp ring <portRspMax> */
1035 lpfc_printf_log(phba,
1038 "%d:0312 Ring %d handler: portRspPut %d "
1039 "is bigger then rsp ring %d\n",
1041 ringno, portRspPut, portRspMax);
1043 * Treat it as adapter hardware error.
1045 phba->hba_state = LPFC_HBA_ERROR;
1046 spin_unlock_irqrestore(phba->host->host_lock, iflag);
1047 lpfc_handle_eratt(phba, HS_FFER3);
1053 /* Get the next available response iocb.
1054 * rspidx is the IOCB index of the next IOCB that the driver
1055 * is going to process.
1057 while (pring->rspidx != portRspPut) {
1058 /* get an iocb buffer to copy entry into */
1059 if ((rspiocbp = mempool_alloc(phba->iocb_mem_pool,
1060 GFP_ATOMIC)) == 0) {
1064 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
1065 (uint32_t *) & rspiocbp->iocb,
1067 irsp = &rspiocbp->iocb;
1069 /* bump iocb available response index */
1070 if (++pring->rspidx >= portRspMax) {
1074 /* Let the HBA know what IOCB slot will be the next one the
1075 * driver will read a response from.
1077 to_slim = (uint8_t *) phba->MBslimaddr +
1078 (SLIMOFF + (ringno * 2) + 1) * 4;
1079 writeb( pring->rspidx, to_slim);
1081 /* chain all iocb entries until LE is set */
1082 if (list_empty(&(pring->iocb_continueq))) {
1083 list_add(&rspiocbp->list, &(pring->iocb_continueq));
1085 list_add_tail(&rspiocbp->list,
1086 &(pring->iocb_continueq));
1088 pring->iocb_continueq_cnt++;
1091 * When the ulpLe field is set, the entire Command has been
1092 * received. Start by getting a pointer to the first iocb entry
1097 * By default, the driver expects to free all resources
1098 * associated with this iocb completion.
1101 saveq = list_entry(pring->iocb_continueq.next,
1102 struct lpfc_iocbq, list);
1103 irsp = &(saveq->iocb);
1104 list_del_init(&pring->iocb_continueq);
1105 pring->iocb_continueq_cnt = 0;
1107 psli->slistat.iocbRsp[ringno]++;
1109 if(irsp->ulpStatus) {
1110 /* Rsp ring <ringno> error: IOCB */
1111 lpfc_printf_log(phba,
1114 "%d:0324 Rsp Ring %d error: IOCB Data: "
1115 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
1118 irsp->un.ulpWord[0],
1119 irsp->un.ulpWord[1],
1120 irsp->un.ulpWord[2],
1121 irsp->un.ulpWord[3],
1122 irsp->un.ulpWord[4],
1123 irsp->un.ulpWord[5],
1124 *(((uint32_t *) irsp) + 6),
1125 *(((uint32_t *) irsp) + 7));
1128 /* Determine if IOCB command is a solicited or
1129 unsolicited event */
1131 lpfc_sli_iocb_cmd_type[(irsp->
1134 if (type == LPFC_SOL_IOCB) {
1135 spin_unlock_irqrestore(phba->host->host_lock,
1137 rc = lpfc_sli_process_sol_iocb(phba, pring,
1139 spin_lock_irqsave(phba->host->host_lock, iflag);
1141 * If this solicted completion is an ELS
1142 * command, don't free the resources now because
1143 * the discoverytasklet does later.
1145 if (pring->ringno == LPFC_ELS_RING)
1150 } else if (type == LPFC_UNSOL_IOCB) {
1151 spin_unlock_irqrestore(phba->host->host_lock,
1153 rc = lpfc_sli_process_unsol_iocb(phba, pring,
1155 spin_lock_irqsave(phba->host->host_lock, iflag);
1158 * If this unsolicted completion is an ELS
1159 * command, don't free the resources now because
1160 * the discoverytasklet does later.
1162 if (pring->ringno == LPFC_ELS_RING)
1167 } else if (type == LPFC_ABORT_IOCB) {
1168 /* Solicited ABORT Responses */
1169 /* Based on the iotag field, get the cmd IOCB
1171 if ((irsp->ulpCommand != CMD_XRI_ABORTED_CX) &&
1173 lpfc_sli_ringtxcmpl_get(phba, pring,
1175 /* Call the specified completion
1177 if (cmdiocbp->iocb_cmpl) {
1178 spin_unlock_irqrestore(
1179 phba->host->host_lock,
1181 (cmdiocbp->iocb_cmpl) (phba,
1184 phba->host->host_lock,
1187 mempool_free(cmdiocbp,
1188 phba->iocb_mem_pool);
1191 } else if (type == LPFC_UNKNOWN_IOCB) {
1192 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
1194 char adaptermsg[LPFC_MAX_ADPTMSG];
1196 memset(adaptermsg, 0,
1198 memcpy(&adaptermsg[0], (uint8_t *) irsp,
1200 dev_warn(&((phba->pcidev)->dev),
1202 phba->brd_no, adaptermsg);
1204 /* Unknown IOCB command */
1205 lpfc_printf_log(phba,
1208 "%d:0321 Unknown IOCB command "
1209 "Data: x%x x%x x%x x%x\n",
1220 * Free up iocb buffer chain for command just
1223 if (!list_empty(&pring->iocb_continueq)) {
1224 list_for_each_entry_safe(rspiocbp,
1226 &pring->iocb_continueq, list) {
1227 list_del_init(&rspiocbp->list);
1228 mempool_free(rspiocbp,
1229 phba->iocb_mem_pool);
1232 mempool_free( saveq, phba->iocb_mem_pool);
1236 /* Entire Command has been received */
1237 entry = IOCB_ENTRY(pring->rspringaddr, pring->rspidx);
1239 /* If the port response put pointer has not been updated, sync
1240 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
1241 * response put pointer.
1243 if (pring->rspidx == portRspPut) {
1244 portRspPut = le32_to_cpu(pgp->rspPutInx);
1246 } /* while (pring->rspidx != portRspPut) */
1248 if ((rspiocbp != 0) && (mask & HA_R0RE_REQ)) {
1249 /* At least one response entry has been freed */
1250 psli->slistat.iocbRspFull[ringno]++;
1251 /* SET RxRE_RSP in Chip Att register */
1252 status = ((CA_R0ATT | CA_R0RE_RSP) << (ringno * 4));
1253 writel(status, phba->CAregaddr);
1254 readl(phba->CAregaddr); /* flush */
1256 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
1257 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
1258 psli->slistat.iocbCmdEmpty[ringno]++;
1260 * Force update of the local copy of cmdGetInx
1262 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
1263 lpfc_sli_resume_iocb(phba, pring);
1265 if ((psli->sliinit.ringinit[ringno].lpfc_sli_cmd_available))
1266 (psli->sliinit.ringinit[ringno].
1267 lpfc_sli_cmd_available) (phba, pring);
1271 spin_unlock_irqrestore(phba->host->host_lock, iflag);
1276 lpfc_intr_prep(struct lpfc_hba * phba)
1280 /* Ignore all interrupts during initialization. */
1281 if (phba->hba_state < LPFC_LINK_DOWN)
1284 /* Read host attention register to determine interrupt source */
1285 ha_copy = readl(phba->HAregaddr);
1287 /* Clear Attention Sources, except ERATT (to preserve status) & LATT
1288 * (ha_copy & ~(HA_ERATT | HA_LATT));
1290 writel((ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
1291 readl(phba->HAregaddr); /* flush */
1293 } /* lpfc_intr_prep */
1296 lpfc_sli_intr(struct lpfc_hba * phba)
1298 struct lpfc_sli *psli;
1299 struct lpfc_sli_ring *pring;
1300 uint32_t ha_copy, status;
1304 psli->slistat.sliIntr++;
1307 * Call the HBA to see if it is interrupting. If not, don't claim
1310 ha_copy = lpfc_intr_prep(phba);
1315 if (ha_copy & HA_ERATT) {
1317 * There was a link/board error. Read the status register to
1318 * retrieve the error event and process it.
1320 psli->slistat.errAttnEvent++;
1321 status = readl(phba->HSregaddr);
1323 /* Clear Chip error bit */
1324 writel(HA_ERATT, phba->HAregaddr);
1325 readl(phba->HAregaddr); /* flush */
1327 lpfc_handle_eratt(phba, status);
1331 if (ha_copy & HA_MBATT) {
1332 /* There was a Mailbox event. */
1333 lpfc_sli_handle_mb_event(phba);
1336 if (ha_copy & HA_LATT) {
1338 * There was a link attention event. Provided the driver is in
1339 * a state to handle link events, handle this event.
1341 if (psli->sliinit.sli_flag & LPFC_PROCESS_LA) {
1342 lpfc_handle_latt(phba);
1346 /* Process all events on each ring */
1347 for (i = 0; i < psli->sliinit.num_rings; i++) {
1348 pring = &psli->ring[i];
1349 if ((ha_copy & HA_RXATT)
1350 || (pring->flag & LPFC_DEFERRED_RING_EVENT)) {
1351 if (pring->flag & LPFC_STOP_IOCB_MASK) {
1352 pring->flag |= LPFC_DEFERRED_RING_EVENT;
1354 lpfc_sli_handle_ring_event(phba, pring,
1357 pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
1360 ha_copy = (ha_copy >> 4);
1367 lpfc_sli_abort_iocb_ring(struct lpfc_hba * phba, struct lpfc_sli_ring * pring,
1370 struct lpfc_sli *psli;
1371 struct lpfc_iocbq *iocb, *next_iocb;
1372 struct lpfc_iocbq *abtsiocbp;
1373 IOCB_t *icmd = NULL, *cmd = NULL;
1380 /* Error everything on txq and txcmplq
1383 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
1384 list_del_init(&iocb->list);
1385 if (iocb->iocb_cmpl) {
1387 icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
1388 icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
1389 (iocb->iocb_cmpl) (phba, iocb, iocb);
1391 mempool_free( iocb, phba->iocb_mem_pool);
1396 INIT_LIST_HEAD(&(pring->txq));
1398 /* Next issue ABTS for everything on the txcmplq */
1399 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
1402 if (flag == LPFC_SLI_ABORT_IMED) {
1404 * Imediate abort of IOCB, clear fast_lookup entry,
1405 * if any, deque and call compl
1407 iotag = cmd->ulpIoTag;
1408 if (pring->fast_lookup &&
1411 psli->sliinit.ringinit[pring->ringno].fast_iotag))
1412 *(pring->fast_lookup + iotag) = NULL;
1414 list_del_init(&iocb->list);
1415 pring->txcmplq_cnt--;
1417 if (iocb->iocb_cmpl) {
1418 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
1419 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
1420 (iocb->iocb_cmpl) (phba, iocb, iocb);
1422 mempool_free( iocb, phba->iocb_mem_pool);
1427 /* issue ABTS for this IOCB based on iotag */
1429 if ((abtsiocbp = mempool_alloc(phba->iocb_mem_pool,
1430 GFP_ATOMIC)) == 0) {
1434 memset(abtsiocbp, 0, sizeof (struct lpfc_iocbq));
1435 icmd = &abtsiocbp->iocb;
1437 icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
1438 icmd->un.acxri.abortContextTag = cmd->ulpContext;
1439 icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
1442 icmd->ulpClass = cmd->ulpClass;
1443 if (phba->hba_state >= LPFC_LINK_UP) {
1444 icmd->ulpCommand = CMD_ABORT_XRI_CN;
1446 icmd->ulpCommand = CMD_CLOSE_XRI_CN;
1450 if (lpfc_sli_issue_iocb
1451 (phba, pring, abtsiocbp, 0) == IOCB_ERROR) {
1452 mempool_free(abtsiocbp, phba->iocb_mem_pool);
1456 /* The rsp ring completion will remove IOCB from txcmplq when
1457 * abort is read by HBA.
1461 if (flag == LPFC_SLI_ABORT_IMED) {
1462 INIT_LIST_HEAD(&(pring->txcmplq));
1463 pring->txcmplq_cnt = 0;
1470 lpfc_sli_brdreset(struct lpfc_hba * phba)
1473 struct lpfc_sli *psli;
1474 struct lpfc_sli_ring *pring;
1475 uint16_t cfg_value, skip_post;
1476 volatile uint32_t word0;
1479 struct lpfc_dmabuf *mp, *next_mp;
1483 /* A board reset must use REAL SLIM. */
1484 psli->sliinit.sli_flag &= ~LPFC_SLI2_ACTIVE;
1487 swpmb = (MAILBOX_t *) & word0;
1488 swpmb->mbxCommand = MBX_RESTART;
1491 to_slim = phba->MBslimaddr;
1492 writel(*(uint32_t *) swpmb, to_slim);
1493 readl(to_slim); /* flush */
1495 /* Only skip post after fc_ffinit is completed */
1496 if (phba->hba_state) {
1498 word0 = 1; /* This is really setting up word1 */
1501 word0 = 0; /* This is really setting up word1 */
1503 to_slim = (uint8_t *) phba->MBslimaddr + sizeof (uint32_t);
1504 writel(*(uint32_t *) swpmb, to_slim);
1505 readl(to_slim); /* flush */
1508 lpfc_printf_log(phba,
1511 "%d:0325 Reset HBA Data: x%x x%x\n",
1514 psli->sliinit.sli_flag);
1516 /* Turn off SERR, PERR in PCI cmd register */
1517 phba->hba_state = LPFC_INIT_START;
1519 /* perform board reset */
1520 phba->fc_eventTag = 0;
1522 phba->fc_prevDID = 0;
1524 /* Turn off parity checking and serr during the physical reset */
1525 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
1526 pci_write_config_word(phba->pcidev, PCI_COMMAND,
1528 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
1530 /* Now toggle INITFF bit in the Host Control Register */
1531 writel(HC_INITFF, phba->HCregaddr);
1533 readl(phba->HCregaddr); /* flush */
1534 writel(0, phba->HCregaddr);
1535 readl(phba->HCregaddr); /* flush */
1537 /* Restore PCI cmd register */
1539 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
1540 phba->hba_state = LPFC_INIT_START;
1542 /* Initialize relevant SLI info */
1543 for (i = 0; i < psli->sliinit.num_rings; i++) {
1544 pring = &psli->ring[i];
1547 pring->next_cmdidx = 0;
1548 pring->local_getidx = 0;
1550 pring->missbufcnt = 0;
1559 /* Cleanup preposted buffers on the ELS ring */
1560 pring = &psli->ring[LPFC_ELS_RING];
1561 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
1562 list_del(&mp->list);
1563 pring->postbufq_cnt--;
1564 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1568 for (i = 0; i < psli->sliinit.num_rings; i++) {
1569 pring = &psli->ring[i];
1570 lpfc_sli_abort_iocb_ring(phba, pring, LPFC_SLI_ABORT_IMED);
1577 lpfc_setup_slim_access(struct lpfc_hba *phba)
1579 phba->MBslimaddr = phba->slim_memmap_p;
1580 phba->HAregaddr = (uint32_t *) (phba->ctrl_regs_memmap_p) +
1582 phba->HCregaddr = (uint32_t *) (phba->ctrl_regs_memmap_p) +
1584 phba->CAregaddr = (uint32_t *) (phba->ctrl_regs_memmap_p) +
1586 phba->HSregaddr = (uint32_t *) (phba->ctrl_regs_memmap_p) +
1592 lpfc_sli_hba_setup(struct lpfc_hba * phba)
1594 struct lpfc_sli *psli;
1596 int read_rev_reset, i, rc;
1601 /* Setep SLI interface for HBA register and HBA SLIM access */
1602 lpfc_setup_slim_access(phba);
1604 /* Set board state to initialization started */
1605 phba->hba_state = LPFC_INIT_START;
1608 /* On some platforms/OS's, the driver can't rely on the state the
1609 * adapter may be in. For this reason, the driver is allowed to reset
1610 * the HBA before initialization.
1612 if (lpfc_sli_reset_on_init) {
1613 phba->hba_state = 0; /* Don't skip post */
1614 lpfc_sli_brdreset(phba);
1615 phba->hba_state = LPFC_INIT_START;
1617 /* Sleep for 2.5 sec */
1622 /* Read the HBA Host Status Register */
1623 status = readl(phba->HSregaddr);
1625 /* Check status register to see what current state is */
1627 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
1629 /* Check every 100ms for 5 retries, then every 500ms for 5, then
1630 * every 2.5 sec for 5, then reset board and every 2.5 sec for
1634 /* Adapter failed to init, timeout, status reg
1636 lpfc_printf_log(phba,
1639 "%d:0436 Adapter failed to init, "
1640 "timeout, status reg x%x\n",
1643 phba->hba_state = LPFC_HBA_ERROR;
1647 /* Check to see if any errors occurred during init */
1648 if (status & HS_FFERM) {
1649 /* ERROR: During chipset initialization */
1650 /* Adapter failed to init, chipset, status reg
1652 lpfc_printf_log(phba,
1655 "%d:0437 Adapter failed to init, "
1656 "chipset, status reg x%x\n",
1659 phba->hba_state = LPFC_HBA_ERROR;
1665 } else if (i <= 10) {
1672 phba->hba_state = 0; /* Don't skip post */
1673 lpfc_sli_brdreset(phba);
1674 phba->hba_state = LPFC_INIT_START;
1676 /* Read the HBA Host Status Register */
1677 status = readl(phba->HSregaddr);
1680 /* Check to see if any errors occurred during init */
1681 if (status & HS_FFERM) {
1682 /* ERROR: During chipset initialization */
1683 /* Adapter failed to init, chipset, status reg <status> */
1684 lpfc_printf_log(phba,
1687 "%d:0438 Adapter failed to init, chipset, "
1691 phba->hba_state = LPFC_HBA_ERROR;
1695 /* Clear all interrupt enable conditions */
1696 writel(0, phba->HCregaddr);
1697 readl(phba->HCregaddr); /* flush */
1699 /* setup host attn register */
1700 writel(0xffffffff, phba->HAregaddr);
1701 readl(phba->HAregaddr); /* flush */
1703 /* Get a Mailbox buffer to setup mailbox commands for HBA
1705 if ((pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
1706 GFP_ATOMIC)) == 0) {
1707 phba->hba_state = LPFC_HBA_ERROR;
1711 /* Call pre CONFIG_PORT mailbox command initialization. A value of 0
1712 * means the call was successful. Any other nonzero value is a failure,
1713 * but if ERESTART is returned, the driver may reset the HBA and try
1716 if ((rc = lpfc_config_port_prep(phba))) {
1717 if ((rc == -ERESTART) && (read_rev_reset == 0)) {
1718 mempool_free( pmb, phba->mbox_mem_pool);
1719 phba->hba_state = 0; /* Don't skip post */
1720 lpfc_sli_brdreset(phba);
1721 phba->hba_state = LPFC_INIT_START;
1726 phba->hba_state = LPFC_HBA_ERROR;
1727 mempool_free( pmb, phba->mbox_mem_pool);
1731 /* Setup and issue mailbox CONFIG_PORT command */
1732 phba->hba_state = LPFC_INIT_MBX_CMDS;
1733 lpfc_config_port(phba, pmb);
1734 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
1735 /* Adapter failed to init, mbxCmd <cmd> CONFIG_PORT,
1736 mbxStatus <status> */
1737 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1738 "%d:0442 Adapter failed to init, mbxCmd x%x "
1739 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
1740 phba->brd_no, pmb->mb.mbxCommand,
1741 pmb->mb.mbxStatus, 0);
1743 /* This clause gives the config_port call is given multiple
1744 chances to succeed. */
1745 if (read_rev_reset == 0) {
1746 mempool_free( pmb, phba->mbox_mem_pool);
1747 phba->hba_state = 0; /* Don't skip post */
1748 lpfc_sli_brdreset(phba);
1749 phba->hba_state = LPFC_INIT_START;
1755 psli->sliinit.sli_flag &= ~LPFC_SLI2_ACTIVE;
1756 phba->hba_state = LPFC_HBA_ERROR;
1757 mempool_free( pmb, phba->mbox_mem_pool);
1761 if ((rc = lpfc_sli_ring_map(phba))) {
1762 phba->hba_state = LPFC_HBA_ERROR;
1763 mempool_free( pmb, phba->mbox_mem_pool);
1766 psli->sliinit.sli_flag |= LPFC_PROCESS_LA;
1768 /* Call post CONFIG_PORT mailbox command initialization. */
1769 if ((rc = lpfc_config_port_post(phba))) {
1770 phba->hba_state = LPFC_HBA_ERROR;
1771 mempool_free( pmb, phba->mbox_mem_pool);
1774 mempool_free( pmb, phba->mbox_mem_pool);
1785 lpfc_mbox_abort(struct lpfc_hba * phba)
1787 struct lpfc_sli *psli;
1788 LPFC_MBOXQ_t *pmbox;
1790 struct lpfc_dmabuf *mp;
1794 if (psli->mbox_active) {
1795 del_timer_sync(&psli->mbox_tmo);
1796 pmbox = psli->mbox_active;
1798 psli->mbox_active = NULL;
1799 if (pmbox->mbox_cmpl) {
1800 mb->mbxStatus = MBX_NOT_FINISHED;
1801 (pmbox->mbox_cmpl) (phba, pmbox);
1803 mp = (struct lpfc_dmabuf *) (pmbox->context1);
1805 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1808 mempool_free( pmbox, phba->mbox_mem_pool);
1810 psli->sliinit.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
1813 /* Abort all the non active mailbox commands. */
1814 pmbox = lpfc_mbox_get(phba);
1817 if (pmbox->mbox_cmpl) {
1818 mb->mbxStatus = MBX_NOT_FINISHED;
1819 (pmbox->mbox_cmpl) (phba, pmbox);
1821 mp = (struct lpfc_dmabuf *) (pmbox->context1);
1823 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1826 mempool_free( pmbox, phba->mbox_mem_pool);
1828 pmbox = lpfc_mbox_get(phba);
1832 /*! lpfc_mbox_timeout
1836 * \param hba Pointer to per struct lpfc_hba structure
1837 * \param l1 Pointer to the driver's mailbox queue.
1843 * This routine handles mailbox timeout events at timer interrupt context.
1846 lpfc_mbox_timeout(unsigned long ptr)
1848 struct lpfc_hba *phba;
1849 struct lpfc_sli *psli;
1850 LPFC_MBOXQ_t *pmbox;
1852 struct lpfc_dmabuf *mp;
1853 unsigned long iflag;
1855 phba = (struct lpfc_hba *)ptr;
1857 spin_lock_irqsave(phba->host->host_lock, iflag);
1859 pmbox = psli->mbox_active;
1862 /* Mbox cmd <mbxCommand> timeout */
1863 lpfc_printf_log(phba,
1866 "%d:0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
1870 psli->sliinit.sli_flag,
1873 if (psli->mbox_active == pmbox) {
1874 psli->mbox_active = NULL;
1875 if (pmbox->mbox_cmpl) {
1876 mb->mbxStatus = MBX_NOT_FINISHED;
1877 (pmbox->mbox_cmpl) (phba, pmbox);
1879 mp = (struct lpfc_dmabuf *) (pmbox->context1);
1881 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1884 mempool_free( pmbox, phba->mbox_mem_pool);
1886 psli->sliinit.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
1889 lpfc_mbox_abort(phba);
1890 spin_unlock_irqrestore(phba->host->host_lock, iflag);
1896 lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag)
1900 struct lpfc_sli *psli;
1901 uint32_t status, evtctr;
1904 unsigned long drvr_flag = 0;
1905 volatile uint32_t word0, ldata;
1909 if (flag & MBX_POLL) {
1910 spin_lock_irqsave(phba->host->host_lock, drvr_flag);
1914 status = MBX_SUCCESS;
1916 if (psli->sliinit.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
1917 /* Polling for a mbox command when another one is already active
1918 * is not allowed in SLI. Also, the driver must have established
1919 * SLI2 mode to queue and process multiple mbox commands.
1922 if (flag & MBX_POLL) {
1923 spin_unlock_irqrestore(phba->host->host_lock,
1926 /* Mbox command <mbxCommand> cannot issue */
1927 LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag)
1928 return (MBX_NOT_FINISHED);
1931 if (!(psli->sliinit.sli_flag & LPFC_SLI2_ACTIVE)) {
1933 /* Mbox command <mbxCommand> cannot issue */
1934 LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag)
1935 return (MBX_NOT_FINISHED);
1938 /* Handle STOP IOCB processing flag. This is only meaningful
1939 * if we are not polling for mbox completion.
1941 if (flag & MBX_STOP_IOCB) {
1942 flag &= ~MBX_STOP_IOCB;
1943 /* Now flag each ring */
1944 for (i = 0; i < psli->sliinit.num_rings; i++) {
1945 /* If the ring is active, flag it */
1946 if (psli->ring[i].cmdringaddr) {
1947 psli->ring[i].flag |=
1953 /* Another mailbox command is still being processed, queue this
1954 * command to be processed later.
1956 lpfc_mbox_put(phba, pmbox);
1958 /* Mbox cmd issue - BUSY */
1959 lpfc_printf_log(phba,
1962 "%d:0308 Mbox cmd issue - BUSY Data: x%x x%x x%x x%x\n",
1966 psli->sliinit.sli_flag,
1969 psli->slistat.mboxBusy++;
1970 if (flag == MBX_POLL) {
1971 spin_unlock_irqrestore(phba->host->host_lock,
1977 /* Handle STOP IOCB processing flag. This is only meaningful
1978 * if we are not polling for mbox completion.
1980 if (flag & MBX_STOP_IOCB) {
1981 flag &= ~MBX_STOP_IOCB;
1982 if (flag == MBX_NOWAIT) {
1983 /* Now flag each ring */
1984 for (i = 0; i < psli->sliinit.num_rings; i++) {
1985 /* If the ring is active, flag it */
1986 if (psli->ring[i].cmdringaddr) {
1987 psli->ring[i].flag |=
1994 psli->sliinit.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
1996 /* If we are not polling, we MUST be in SLI2 mode */
1997 if (flag != MBX_POLL) {
1998 if (!(psli->sliinit.sli_flag & LPFC_SLI2_ACTIVE)) {
1999 psli->sliinit.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2001 /* Mbox command <mbxCommand> cannot issue */
2002 LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag);
2003 return (MBX_NOT_FINISHED);
2005 /* timeout active mbox command */
2006 mod_timer(&psli->mbox_tmo, jiffies + HZ * LPFC_MBOX_TMO);
2009 /* Mailbox cmd <cmd> issue */
2010 lpfc_printf_log(phba,
2013 "%d:0309 Mailbox cmd x%x issue Data: x%x x%x x%x\n",
2017 psli->sliinit.sli_flag,
2020 psli->slistat.mboxCmd++;
2021 evtctr = psli->slistat.mboxEvent;
2023 /* next set own bit for the adapter and copy over command word */
2024 mb->mbxOwner = OWN_CHIP;
2026 if (psli->sliinit.sli_flag & LPFC_SLI2_ACTIVE) {
2028 /* First copy command data to host SLIM area */
2029 mbox = (MAILBOX_t *) psli->MBhostaddr;
2030 lpfc_sli_pcimem_bcopy((uint32_t *) mb, (uint32_t *) mbox,
2031 (sizeof (uint32_t) *
2032 (MAILBOX_CMD_WSIZE)));
2034 pci_dma_sync_single_for_device(phba->pcidev,
2035 phba->slim2p_mapping,
2039 if (mb->mbxCommand == MBX_CONFIG_PORT) {
2040 /* copy command data into host mbox for cmpl */
2041 mbox = (MAILBOX_t *) psli->MBhostaddr;
2042 lpfc_sli_pcimem_bcopy((uint32_t *) mb,
2044 (sizeof (uint32_t) *
2045 (MAILBOX_CMD_WSIZE)));
2048 /* First copy mbox command data to HBA SLIM, skip past first
2050 to_slim = (uint8_t *) phba->MBslimaddr + sizeof (uint32_t);
2051 lpfc_memcpy_to_slim(to_slim, (void *)&mb->un.varWords[0],
2052 (MAILBOX_CMD_WSIZE - 1) * sizeof (uint32_t));
2054 /* Next copy over first word, with mbxOwner set */
2055 ldata = *((volatile uint32_t *)mb);
2056 to_slim = phba->MBslimaddr;
2057 writel(ldata, to_slim);
2058 readl(to_slim); /* flush */
2060 if (mb->mbxCommand == MBX_CONFIG_PORT) {
2061 /* switch over to host mailbox */
2062 psli->sliinit.sli_flag |= LPFC_SLI2_ACTIVE;
2067 /* interrupt board to doit right away */
2068 writel(CA_MBATT, phba->CAregaddr);
2069 readl(phba->CAregaddr); /* flush */
2073 /* Don't wait for it to finish, just return */
2074 psli->mbox_active = pmbox;
2079 psli->mbox_active = NULL;
2080 if (psli->sliinit.sli_flag & LPFC_SLI2_ACTIVE) {
2081 /* First read mbox status word */
2082 mbox = (MAILBOX_t *) psli->MBhostaddr;
2083 word0 = *((volatile uint32_t *)mbox);
2084 word0 = le32_to_cpu(word0);
2086 /* First read mbox status word */
2087 word0 = readl(phba->MBslimaddr);
2090 /* Read the HBA Host Attention Register */
2091 ha_copy = readl(phba->HAregaddr);
2093 /* Wait for command to complete */
2094 while (((word0 & OWN_CHIP) == OWN_CHIP)
2095 || !(ha_copy & HA_MBATT)) {
2097 psli->sliinit.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2098 spin_unlock_irqrestore(phba->host->host_lock,
2100 return (MBX_NOT_FINISHED);
2103 /* Check if we took a mbox interrupt while we were
2105 if (((word0 & OWN_CHIP) != OWN_CHIP)
2106 && (evtctr != psli->slistat.mboxEvent))
2109 spin_unlock_irqrestore(phba->host->host_lock,
2112 /* Can be in interrupt context, do not sleep */
2113 /* (or might be called with interrupts disabled) */
2116 spin_lock_irqsave(phba->host->host_lock, drvr_flag);
2118 if (psli->sliinit.sli_flag & LPFC_SLI2_ACTIVE) {
2119 /* First copy command data */
2120 mbox = (MAILBOX_t *) psli->MBhostaddr;
2121 word0 = *((volatile uint32_t *)mbox);
2122 word0 = le32_to_cpu(word0);
2123 if (mb->mbxCommand == MBX_CONFIG_PORT) {
2125 volatile uint32_t slimword0;
2126 /* Check real SLIM for any errors */
2127 slimword0 = readl(phba->MBslimaddr);
2128 slimmb = (MAILBOX_t *) & slimword0;
2129 if (((slimword0 & OWN_CHIP) != OWN_CHIP)
2130 && slimmb->mbxStatus) {
2131 psli->sliinit.sli_flag &=
2137 /* First copy command data */
2138 word0 = readl(phba->MBslimaddr);
2140 /* Read the HBA Host Attention Register */
2141 ha_copy = readl(phba->HAregaddr);
2144 if (psli->sliinit.sli_flag & LPFC_SLI2_ACTIVE) {
2145 /* First copy command data */
2146 mbox = (MAILBOX_t *) psli->MBhostaddr;
2147 /* copy results back to user */
2148 lpfc_sli_pcimem_bcopy((uint32_t *) mbox,
2150 (sizeof (uint32_t) *
2151 MAILBOX_CMD_WSIZE));
2153 /* First copy command data */
2154 lpfc_memcpy_from_slim((void *)mb,
2156 sizeof (uint32_t) * (MAILBOX_CMD_WSIZE));
2159 writel(HA_MBATT, phba->HAregaddr);
2160 readl(phba->HAregaddr); /* flush */
2162 psli->sliinit.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2163 status = mb->mbxStatus;
2166 if (flag == MBX_POLL) {
2167 spin_unlock_irqrestore(phba->host->host_lock, drvr_flag);
2173 lpfc_sli_ringtx_put(struct lpfc_hba * phba, struct lpfc_sli_ring * pring,
2174 struct lpfc_iocbq * piocb)
2176 /* Insert the caller's iocb in the txq tail for later processing. */
2177 list_add_tail(&piocb->list, &pring->txq);
2182 static struct lpfc_iocbq *
2183 lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2184 struct lpfc_iocbq ** piocb)
2186 struct lpfc_iocbq * nextiocb;
2188 nextiocb = lpfc_sli_ringtx_get(phba, pring);
2198 lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2199 struct lpfc_iocbq *piocb, uint32_t flag)
2201 struct lpfc_sli *psli = &phba->sli;
2202 int ringno = pring->ringno;
2203 struct lpfc_iocbq *nextiocb;
2207 * We should never get an IOCB if we are in a < LINK_DOWN state
2209 if (unlikely(phba->hba_state < LPFC_LINK_DOWN))
2213 * Check to see if we are blocking IOCB processing because of a
2214 * outstanding mbox command.
2216 if (unlikely(pring->flag & LPFC_STOP_IOCB_MBX))
2219 if (unlikely(phba->hba_state == LPFC_LINK_DOWN)) {
2221 * Only CREATE_XRI, CLOSE_XRI, ABORT_XRI, and QUE_RING_BUF
2222 * can be issued if the link is not up.
2224 switch (piocb->iocb.ulpCommand) {
2225 case CMD_QUE_RING_BUF_CN:
2226 case CMD_QUE_RING_BUF64_CN:
2227 case CMD_CLOSE_XRI_CN:
2228 case CMD_ABORT_XRI_CN:
2230 * For IOCBs, like QUE_RING_BUF, that have no rsp ring
2231 * completion, iocb_cmpl MUST be 0.
2233 if (piocb->iocb_cmpl)
2234 piocb->iocb_cmpl = NULL;
2236 case CMD_CREATE_XRI_CR:
2243 * For FCP commands, we must be in a state where we can process link
2246 } else if (unlikely(pring->ringno == psli->fcp_ring &&
2247 !(psli->sliinit.sli_flag & LPFC_PROCESS_LA)))
2251 * Check to see if this is a high priority command.
2252 * If so bypass tx queue processing.
2254 if (unlikely((flag & SLI_IOCB_HIGH_PRIORITY) &&
2255 (iocb = lpfc_sli_next_iocb_slot(phba, pring)))) {
2256 lpfc_sli_submit_iocb(phba, pring, iocb, piocb);
2260 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
2261 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
2262 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
2265 lpfc_sli_update_ring(phba, pring);
2267 lpfc_sli_update_full_ring(phba, pring);
2270 return IOCB_SUCCESS;
2275 psli->slistat.iocbCmdDelay[ringno]++;
2279 if (!(flag & SLI_IOCB_RET_IOCB)) {
2280 lpfc_sli_ringtx_put(phba, pring, piocb);
2281 return IOCB_SUCCESS;
2288 lpfc_sli_queue_setup(struct lpfc_hba * phba)
2290 struct lpfc_sli *psli;
2291 struct lpfc_sli_ring *pring;
2295 INIT_LIST_HEAD(&psli->mboxq);
2296 /* Initialize list headers for txq and txcmplq as double linked lists */
2297 for (i = 0; i < psli->sliinit.num_rings; i++) {
2298 pring = &psli->ring[i];
2300 pring->next_cmdidx = 0;
2301 pring->local_getidx = 0;
2303 INIT_LIST_HEAD(&pring->txq);
2304 INIT_LIST_HEAD(&pring->txcmplq);
2305 INIT_LIST_HEAD(&pring->iocb_continueq);
2306 INIT_LIST_HEAD(&pring->postbufq);
2307 cnt = psli->sliinit.ringinit[i].fast_iotag;
2309 pring->fast_lookup =
2310 kmalloc(cnt * sizeof (struct lpfc_iocbq *),
2312 if (pring->fast_lookup == 0) {
2315 memset((char *)pring->fast_lookup, 0,
2316 cnt * sizeof (struct lpfc_iocbq *));
2323 lpfc_sli_hba_down(struct lpfc_hba * phba)
2325 struct lpfc_sli *psli;
2326 struct lpfc_sli_ring *pring;
2328 struct lpfc_dmabuf *mp;
2329 struct lpfc_iocbq *iocb, *next_iocb;
2330 IOCB_t *icmd = NULL;
2334 lpfc_hba_down_prep(phba);
2336 for (i = 0; i < psli->sliinit.num_rings; i++) {
2337 pring = &psli->ring[i];
2338 pring->flag |= LPFC_DEFERRED_RING_EVENT;
2341 * Error everything on the txq since these iocbs have not been
2342 * given to the FW yet.
2346 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
2347 list_del_init(&iocb->list);
2348 if (iocb->iocb_cmpl) {
2350 icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
2351 icmd->un.ulpWord[4] = IOERR_SLI_DOWN;
2352 (iocb->iocb_cmpl) (phba, iocb, iocb);
2354 mempool_free( iocb, phba->iocb_mem_pool);
2358 INIT_LIST_HEAD(&(pring->txq));
2360 if (pring->fast_lookup) {
2361 kfree(pring->fast_lookup);
2362 pring->fast_lookup = NULL;
2367 /* Return any active mbox cmds */
2368 del_timer_sync(&psli->mbox_tmo);
2369 if ((psli->mbox_active)) {
2370 pmb = psli->mbox_active;
2371 mp = (struct lpfc_dmabuf *) (pmb->context1);
2373 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2376 mempool_free(psli->mbox_active, phba->mbox_mem_pool);
2378 psli->sliinit.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2379 psli->mbox_active = NULL;
2381 /* Return any pending mbox cmds */
2382 while ((pmb = lpfc_mbox_get(phba)) != NULL) {
2383 mp = (struct lpfc_dmabuf *) (pmb->context1);
2385 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2388 mempool_free(pmb, phba->mbox_mem_pool);
2391 INIT_LIST_HEAD(&psli->mboxq);
2394 * Provided the hba is not in an error state, reset it. It is not
2395 * capable of IO anymore.
2397 if (phba->hba_state != LPFC_HBA_ERROR) {
2398 phba->hba_state = LPFC_INIT_START;
2399 lpfc_sli_brdreset(phba);
2406 lpfc_sli_pcimem_bcopy(uint32_t * src, uint32_t * dest, uint32_t cnt)
2411 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
2413 ldata = le32_to_cpu(ldata);
2419 lpfc_sli_ringpostbuf_put(struct lpfc_hba * phba, struct lpfc_sli_ring * pring,
2420 struct lpfc_dmabuf * mp)
2422 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
2424 list_add_tail(&mp->list, &pring->postbufq);
2426 pring->postbufq_cnt++;
2431 struct lpfc_dmabuf *
2432 lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2435 struct lpfc_dmabuf *mp, *next_mp;
2436 struct list_head *slp = &pring->postbufq;
2438 /* Search postbufq, from the begining, looking for a match on phys */
2439 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
2440 if (mp->phys == phys) {
2441 list_del_init(&mp->list);
2442 pring->postbufq_cnt--;
2443 pci_dma_sync_single_for_cpu(phba->pcidev, mp->phys,
2444 LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
2449 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2450 "%d:0410 Cannot find virtual addr for mapped buf on "
2451 "ring %d Data x%llx x%p x%p x%x\n",
2452 phba->brd_no, pring->ringno, (unsigned long long)phys,
2453 slp->next, slp->prev, pring->postbufq_cnt);
2458 lpfc_sli_next_iotag(struct lpfc_hba * phba, struct lpfc_sli_ring * pring)
2460 LPFC_RING_INIT_t *pringinit;
2461 struct lpfc_sli *psli;
2462 uint32_t search_start;
2465 pringinit = &psli->sliinit.ringinit[pring->ringno];
2467 if (pring->fast_lookup == NULL) {
2468 pringinit->iotag_ctr++;
2469 if (pringinit->iotag_ctr >= pringinit->iotag_max)
2470 pringinit->iotag_ctr = 1;
2471 return pringinit->iotag_ctr;
2474 search_start = pringinit->iotag_ctr;
2477 pringinit->iotag_ctr++;
2478 if (pringinit->iotag_ctr >= pringinit->fast_iotag)
2479 pringinit->iotag_ctr = 1;
2481 if(*(pring->fast_lookup + pringinit->iotag_ctr) == NULL)
2482 return pringinit->iotag_ctr;
2484 } while (pringinit->iotag_ctr != search_start);
2487 * Outstanding I/O count for ring <ringno> is at max <fast_iotag>
2489 lpfc_printf_log(phba,
2492 "%d:0318 Outstanding I/O count for ring %d is at max x%x\n",
2495 psli->sliinit.ringinit[pring->ringno].fast_iotag);
2500 lpfc_sli_abort_elsreq_cmpl(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
2501 struct lpfc_iocbq * rspiocb)
2503 struct lpfc_dmabuf *buf_ptr, *buf_ptr1;
2504 /* Free the resources associated with the ELS_REQUEST64 IOCB the driver
2506 * In this case, context2 = cmd, context2->next = rsp, context3 = bpl
2508 if (cmdiocb->context2) {
2509 buf_ptr1 = (struct lpfc_dmabuf *) cmdiocb->context2;
2511 /* Free the response IOCB before completing the abort
2513 if (!list_empty(&buf_ptr1->list)) {
2515 buf_ptr = list_entry(buf_ptr1->list.next,
2516 struct lpfc_dmabuf, list);
2518 list_del(&buf_ptr->list);
2519 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
2522 lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys);
2526 if (cmdiocb->context3) {
2527 buf_ptr = (struct lpfc_dmabuf *) cmdiocb->context3;
2528 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
2531 mempool_free( cmdiocb, phba->iocb_mem_pool);
2536 lpfc_sli_issue_abort_iotag32(struct lpfc_hba * phba,
2537 struct lpfc_sli_ring * pring,
2538 struct lpfc_iocbq * cmdiocb)
2540 struct lpfc_sli *psli;
2541 struct lpfc_iocbq *abtsiocbp;
2542 IOCB_t *icmd = NULL;
2543 IOCB_t *iabt = NULL;
2548 /* issue ABTS for this IOCB based on iotag */
2549 if ((abtsiocbp = mempool_alloc(phba->iocb_mem_pool, GFP_ATOMIC)) == 0) {
2552 memset(abtsiocbp, 0, sizeof (struct lpfc_iocbq));
2553 iabt = &abtsiocbp->iocb;
2555 icmd = &cmdiocb->iocb;
2556 switch (icmd->ulpCommand) {
2557 case CMD_ELS_REQUEST64_CR:
2558 iotag32 = icmd->un.elsreq64.bdl.ulpIoTag32;
2559 /* Even though we abort the ELS command, the firmware may access
2560 * the BPL or other resources before it processes our
2561 * ABORT_MXRI64. Thus we must delay reusing the cmdiocb
2562 * resources till the actual abort request completes.
2564 abtsiocbp->context1 = (void *)((unsigned long)icmd->ulpCommand);
2565 abtsiocbp->context2 = cmdiocb->context2;
2566 abtsiocbp->context3 = cmdiocb->context3;
2567 cmdiocb->context2 = NULL;
2568 cmdiocb->context3 = NULL;
2569 abtsiocbp->iocb_cmpl = lpfc_sli_abort_elsreq_cmpl;
2572 mempool_free( abtsiocbp, phba->iocb_mem_pool);
2576 iabt->un.amxri.abortType = ABORT_TYPE_ABTS;
2577 iabt->un.amxri.iotag32 = iotag32;
2580 iabt->ulpClass = CLASS3;
2581 iabt->ulpCommand = CMD_ABORT_MXRI64_CN;
2583 if (lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0) == IOCB_ERROR) {
2584 mempool_free( abtsiocbp, phba->iocb_mem_pool);
2592 lpfc_sli_abort_iocb_ctx(struct lpfc_hba * phba, struct lpfc_sli_ring * pring,
2595 struct lpfc_sli *psli;
2596 struct lpfc_iocbq *iocb, *next_iocb;
2597 struct lpfc_iocbq *abtsiocbp;
2598 IOCB_t *icmd = NULL, *cmd = NULL;
2604 /* Error matching iocb on txq or txcmplq
2605 * First check the txq.
2607 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
2609 if (cmd->ulpContext != ctx) {
2613 list_del_init(&iocb->list);
2615 if (iocb->iocb_cmpl) {
2617 icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
2618 icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
2619 (iocb->iocb_cmpl) (phba, iocb, iocb);
2621 mempool_free( iocb, phba->iocb_mem_pool);
2625 /* Next check the txcmplq */
2626 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
2628 if (cmd->ulpContext != ctx) {
2632 /* issue ABTS for this IOCB based on iotag */
2633 if ((abtsiocbp = mempool_alloc(phba->iocb_mem_pool,
2634 GFP_ATOMIC)) == 0) {
2638 memset(abtsiocbp, 0, sizeof (struct lpfc_iocbq));
2639 icmd = &abtsiocbp->iocb;
2641 icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
2642 icmd->un.acxri.abortContextTag = cmd->ulpContext;
2643 icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
2646 icmd->ulpClass = cmd->ulpClass;
2647 if (phba->hba_state >= LPFC_LINK_UP) {
2648 icmd->ulpCommand = CMD_ABORT_XRI_CN;
2650 icmd->ulpCommand = CMD_CLOSE_XRI_CN;
2653 if (lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0) ==
2655 mempool_free( abtsiocbp, phba->iocb_mem_pool);
2659 /* The rsp ring completion will remove IOCB from txcmplq when
2660 * abort is read by HBA.
2667 lpfc_sli_sum_iocb_host(struct lpfc_hba * phba,
2668 struct lpfc_sli_ring * pring)
2670 struct lpfc_sli *psli;
2671 struct lpfc_iocbq *iocb, *next_iocb;
2673 struct lpfc_scsi_buf *lpfc_cmd;
2679 /* Error matching iocb on txq or txcmplq
2680 * First check the txq.
2682 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
2685 /* Must be a FCP command */
2686 if ((cmd->ulpCommand != CMD_FCP_ICMND64_CR) &&
2687 (cmd->ulpCommand != CMD_FCP_IWRITE64_CR) &&
2688 (cmd->ulpCommand != CMD_FCP_IREAD64_CR)) {
2692 /* context1 MUST be a struct lpfc_scsi_buf */
2693 lpfc_cmd = (struct lpfc_scsi_buf *) (iocb->context1);
2694 if (lpfc_cmd == 0) {
2700 /* Next check the txcmplq */
2701 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
2704 /* Must be a FCP command */
2705 if ((cmd->ulpCommand != CMD_FCP_ICMND64_CR) &&
2706 (cmd->ulpCommand != CMD_FCP_IWRITE64_CR) &&
2707 (cmd->ulpCommand != CMD_FCP_IREAD64_CR)) {
2711 /* context1 MUST be a struct lpfc_scsi_buf */
2712 lpfc_cmd = (struct lpfc_scsi_buf *) (iocb->context1);
2713 if (lpfc_cmd == 0) {
2722 lpfc_sli_abort_iocb_host(struct lpfc_hba * phba,
2723 struct lpfc_sli_ring * pring, int flag)
2725 struct lpfc_sli *psli;
2726 struct lpfc_iocbq *iocb, *next_iocb;
2727 struct lpfc_iocbq *abtsiocbp;
2728 IOCB_t *icmd = NULL, *cmd = NULL;
2729 struct lpfc_scsi_buf *lpfc_cmd;
2735 /* Error matching iocb on txq or txcmplq
2736 * First check the txq.
2738 if(flag & LPFC_ABORT_TXQ) {
2739 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
2742 /* Must be a FCP command */
2743 if ((cmd->ulpCommand != CMD_FCP_ICMND64_CR) &&
2744 (cmd->ulpCommand != CMD_FCP_IWRITE64_CR) &&
2745 (cmd->ulpCommand != CMD_FCP_IREAD64_CR)) {
2749 /* context1 MUST be a struct lpfc_scsi_buf */
2750 lpfc_cmd = (struct lpfc_scsi_buf *) (iocb->context1);
2751 if (lpfc_cmd == 0) {
2755 list_del_init(&iocb->list);
2757 if (iocb->iocb_cmpl) {
2759 icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
2760 icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
2761 (iocb->iocb_cmpl) (phba, iocb, iocb);
2763 mempool_free( iocb, phba->iocb_mem_pool);
2768 if(flag & LPFC_ABORT_TXCMPLQ) {
2769 /* Next check the txcmplq */
2770 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq,
2774 /* Must be a FCP command */
2775 if ((cmd->ulpCommand != CMD_FCP_ICMND64_CR) &&
2776 (cmd->ulpCommand != CMD_FCP_IWRITE64_CR) &&
2777 (cmd->ulpCommand != CMD_FCP_IREAD64_CR)) {
2781 /* context1 MUST be a struct lpfc_scsi_buf */
2782 lpfc_cmd = (struct lpfc_scsi_buf *) (iocb->context1);
2783 if (lpfc_cmd == 0) {
2787 /* issue ABTS for this IOCB based on iotag */
2788 if ((abtsiocbp = mempool_alloc(phba->iocb_mem_pool,
2789 GFP_ATOMIC)) == 0) {
2793 memset(abtsiocbp, 0, sizeof (struct lpfc_iocbq));
2794 icmd = &abtsiocbp->iocb;
2796 icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
2797 icmd->un.acxri.abortContextTag = cmd->ulpContext;
2798 icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
2801 icmd->ulpClass = cmd->ulpClass;
2802 if (phba->hba_state >= LPFC_LINK_UP) {
2803 icmd->ulpCommand = CMD_ABORT_XRI_CN;
2805 icmd->ulpCommand = CMD_CLOSE_XRI_CN;
2808 if (lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0) ==
2810 mempool_free( abtsiocbp, phba->iocb_mem_pool);
2814 /* The rsp ring completion will remove IOCB from
2815 * tacmplq when abort is read by HBA.
2823 lpfc_sli_sum_iocb_lun(struct lpfc_hba * phba,
2824 struct lpfc_sli_ring * pring,
2825 uint16_t scsi_target, uint64_t scsi_lun)
2827 struct lpfc_sli *psli;
2828 struct lpfc_iocbq *iocb, *next_iocb;
2830 struct lpfc_scsi_buf *lpfc_cmd;
2836 /* Error matching iocb on txq or txcmplq
2837 * First check the txq.
2839 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
2842 /* Must be a FCP command */
2843 if ((cmd->ulpCommand != CMD_FCP_ICMND64_CR) &&
2844 (cmd->ulpCommand != CMD_FCP_IWRITE64_CR) &&
2845 (cmd->ulpCommand != CMD_FCP_IREAD64_CR)) {
2849 /* context1 MUST be a struct lpfc_scsi_buf */
2850 lpfc_cmd = (struct lpfc_scsi_buf *) (iocb->context1);
2851 if ((lpfc_cmd == 0) ||
2852 (lpfc_cmd->pCmd->device->id != scsi_target) ||
2853 (lpfc_cmd->pCmd->device->lun != scsi_lun)) {
2859 /* Next check the txcmplq */
2860 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
2863 /* Must be a FCP command */
2864 if ((cmd->ulpCommand != CMD_FCP_ICMND64_CR) &&
2865 (cmd->ulpCommand != CMD_FCP_IWRITE64_CR) &&
2866 (cmd->ulpCommand != CMD_FCP_IREAD64_CR)) {
2870 /* context1 MUST be a struct lpfc_scsi_buf */
2871 lpfc_cmd = (struct lpfc_scsi_buf *) (iocb->context1);
2872 if ((lpfc_cmd == 0) ||
2873 (lpfc_cmd->pCmd->device->id != scsi_target) ||
2874 (lpfc_cmd->pCmd->device->lun != scsi_lun)) {
2884 lpfc_sli_abort_iocb_lun(struct lpfc_hba * phba,
2885 struct lpfc_sli_ring * pring,
2886 uint16_t scsi_target, uint64_t scsi_lun, int flag)
2888 struct lpfc_sli *psli;
2889 struct lpfc_iocbq *iocb, *next_iocb;
2890 struct lpfc_iocbq *abtsiocbp;
2891 IOCB_t *icmd = NULL, *cmd = NULL;
2892 struct lpfc_scsi_buf *lpfc_cmd;
2898 /* Error matching iocb on txq or txcmplq
2899 * First check the txq.
2901 if(flag & LPFC_ABORT_TXQ) {
2902 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
2905 /* Must be a FCP command */
2906 if ((cmd->ulpCommand != CMD_FCP_ICMND64_CR) &&
2907 (cmd->ulpCommand != CMD_FCP_IWRITE64_CR) &&
2908 (cmd->ulpCommand != CMD_FCP_IREAD64_CR)) {
2912 /* context1 MUST be a struct lpfc_scsi_buf */
2913 lpfc_cmd = (struct lpfc_scsi_buf *) (iocb->context1);
2914 if ((lpfc_cmd == 0) ||
2915 (lpfc_cmd->pCmd->device->id != scsi_target) ||
2916 (lpfc_cmd->pCmd->device->lun != scsi_lun)) {
2920 list_del_init(&iocb->list);
2922 if (iocb->iocb_cmpl) {
2924 icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
2925 icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
2926 (iocb->iocb_cmpl) (phba, iocb, iocb);
2928 mempool_free( iocb, phba->iocb_mem_pool);
2933 if(flag & LPFC_ABORT_TXCMPLQ) {
2934 /* Next check the txcmplq */
2935 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq,
2939 /* Must be a FCP command */
2940 if ((cmd->ulpCommand != CMD_FCP_ICMND64_CR) &&
2941 (cmd->ulpCommand != CMD_FCP_IWRITE64_CR) &&
2942 (cmd->ulpCommand != CMD_FCP_IREAD64_CR)) {
2946 /* context1 MUST be a struct lpfc_scsi_buf */
2947 lpfc_cmd = (struct lpfc_scsi_buf *) (iocb->context1);
2948 if ((lpfc_cmd == 0) ||
2949 (lpfc_cmd->pCmd->device->id != scsi_target) ||
2950 (lpfc_cmd->pCmd->device->lun != scsi_lun)) {
2954 /* issue ABTS for this IOCB based on iotag */
2955 if ((abtsiocbp = mempool_alloc(phba->iocb_mem_pool,
2956 GFP_ATOMIC)) == 0) {
2960 memset(abtsiocbp, 0, sizeof (struct lpfc_iocbq));
2961 icmd = &abtsiocbp->iocb;
2963 icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
2964 icmd->un.acxri.abortContextTag = cmd->ulpContext;
2965 icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
2968 icmd->ulpClass = cmd->ulpClass;
2969 if (phba->hba_state >= LPFC_LINK_UP) {
2970 icmd->ulpCommand = CMD_ABORT_XRI_CN;
2972 icmd->ulpCommand = CMD_CLOSE_XRI_CN;
2975 if (lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0) ==
2977 mempool_free( abtsiocbp, phba->iocb_mem_pool);
2981 /* The rsp ring completion will remove IOCB from
2982 * tacmplq when abort is read by HBA.
2990 lpfc_sli_abort_iocb_tgt(struct lpfc_hba * phba,
2991 struct lpfc_sli_ring * pring,
2992 uint16_t scsi_target, int flag)
2994 struct lpfc_sli *psli;
2995 struct lpfc_iocbq *iocb, *next_iocb;
2996 struct lpfc_iocbq *abtsiocbp;
2997 IOCB_t *icmd = NULL, *cmd = NULL;
2998 struct lpfc_scsi_buf *lpfc_cmd;
3004 /* Error matching iocb on txq or txcmplq
3005 * First check the txq.
3007 if(flag & LPFC_ABORT_TXQ) {
3008 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
3011 /* Must be a FCP command */
3012 if ((cmd->ulpCommand != CMD_FCP_ICMND64_CR) &&
3013 (cmd->ulpCommand != CMD_FCP_IWRITE64_CR) &&
3014 (cmd->ulpCommand != CMD_FCP_IREAD64_CR)) {
3018 /* context1 MUST be a struct lpfc_scsi_buf */
3019 lpfc_cmd = (struct lpfc_scsi_buf *) (iocb->context1);
3021 || (lpfc_cmd->pCmd->device->id != scsi_target)) {
3025 list_del_init(&iocb->list);
3027 if (iocb->iocb_cmpl) {
3029 icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
3030 icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
3031 (iocb->iocb_cmpl) (phba, iocb, iocb);
3033 mempool_free( iocb, phba->iocb_mem_pool);
3038 if(flag & LPFC_ABORT_TXCMPLQ) {
3039 /* Next check the txcmplq */
3040 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq,
3044 /* Must be a FCP command */
3045 if ((cmd->ulpCommand != CMD_FCP_ICMND64_CR) &&
3046 (cmd->ulpCommand != CMD_FCP_IWRITE64_CR) &&
3047 (cmd->ulpCommand != CMD_FCP_IREAD64_CR)) {
3051 /* context1 MUST be a struct lpfc_scsi_buf */
3052 lpfc_cmd = (struct lpfc_scsi_buf *) (iocb->context1);
3054 || (lpfc_cmd->pCmd->device->id != scsi_target)) {
3058 /* issue ABTS for this IOCB based on iotag */
3059 if ((abtsiocbp = mempool_alloc(phba->iocb_mem_pool,
3060 GFP_ATOMIC)) == 0) {
3064 memset(abtsiocbp, 0, sizeof (struct lpfc_iocbq));
3065 icmd = &abtsiocbp->iocb;
3067 icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
3068 icmd->un.acxri.abortContextTag = cmd->ulpContext;
3069 icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
3072 icmd->ulpClass = cmd->ulpClass;
3073 if (phba->hba_state >= LPFC_LINK_UP) {
3074 icmd->ulpCommand = CMD_ABORT_XRI_CN;
3076 icmd->ulpCommand = CMD_CLOSE_XRI_CN;
3079 if (lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0) ==
3081 mempool_free( abtsiocbp, phba->iocb_mem_pool);
3085 /* The rsp ring completion will remove IOCB from
3086 * txcmplq when abort is read by HBA.
3096 lpfc_sli_wake_iocb_high_priority(struct lpfc_hba * phba,
3097 struct lpfc_iocbq * queue1,
3098 struct lpfc_iocbq * queue2)
3100 if (queue1->context2 && queue2)
3101 memcpy(queue1->context2, queue2, sizeof (struct lpfc_iocbq));
3103 /* The waiter is looking for LPFC_IO_HIPRI bit to be set
3104 as a signal to wake up */
3105 queue1->iocb_flag |= LPFC_IO_HIPRI;
3110 lpfc_sli_issue_iocb_wait_high_priority(struct lpfc_hba * phba,
3111 struct lpfc_sli_ring * pring,
3112 struct lpfc_iocbq * piocb,
3114 struct lpfc_iocbq * prspiocbq,
3117 int j, delay_time, retval = IOCB_ERROR;
3119 /* The caller must left context1 empty. */
3120 if (piocb->context_un.hipri_wait_queue != 0) {
3125 * If the caller has provided a response iocbq buffer, context2 must
3126 * be NULL or its an error.
3128 if (prspiocbq && piocb->context2) {
3132 piocb->context2 = prspiocbq;
3134 /* Setup callback routine and issue the command. */
3135 piocb->iocb_cmpl = lpfc_sli_wake_iocb_high_priority;
3136 retval = lpfc_sli_issue_iocb(phba, pring, piocb,
3137 flag | SLI_IOCB_HIGH_PRIORITY);
3138 if (retval != IOCB_SUCCESS) {
3139 piocb->context2 = NULL;
3144 * This high-priority iocb was sent out-of-band. Poll for its
3145 * completion rather than wait for a signal. Note that the host_lock
3146 * is held by the midlayer and must be released here to allow the
3147 * interrupt handlers to complete the IO and signal this routine via
3149 * Also, the delay_time is computed to be one second longer than
3150 * the scsi command timeout to give the FW time to abort on
3151 * timeout rather than the driver just giving up. Typically,
3152 * the midlayer does not specify a time for this command so the
3153 * driver is free to enforce its own timeout.
3156 delay_time = ((timeout + 1) * 1000) >> 6;
3157 retval = IOCB_ERROR;
3158 spin_unlock_irq(phba->host->host_lock);
3159 for (j = 0; j < 64; j++) {
3160 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,6)
3165 if (piocb->iocb_flag & LPFC_IO_HIPRI) {
3166 piocb->iocb_flag &= ~LPFC_IO_HIPRI;
3167 retval = IOCB_SUCCESS;
3172 spin_lock_irq(phba->host->host_lock);
3173 piocb->context2 = NULL;
3177 lpfc_sli_wake_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
3179 wait_queue_head_t *pdone_q;
3182 * If pdone_q is empty, the driver thread gave up waiting and
3183 * continued running.
3185 pdone_q = (wait_queue_head_t *) pmboxq->context1;
3187 wake_up_interruptible(pdone_q);
3192 lpfc_sli_issue_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq,
3195 DECLARE_WAIT_QUEUE_HEAD(done_q);
3196 DECLARE_WAITQUEUE(wq_entry, current);
3197 uint32_t timeleft = 0;
3200 /* The caller must leave context1 empty. */
3201 if (pmboxq->context1 != 0) {
3202 return (MBX_NOT_FINISHED);
3205 /* setup wake call as IOCB callback */
3206 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
3207 /* setup context field to pass wait_queue pointer to wake function */
3208 pmboxq->context1 = &done_q;
3210 /* start to sleep before we wait, to avoid races */
3211 set_current_state(TASK_INTERRUPTIBLE);
3212 add_wait_queue(&done_q, &wq_entry);
3214 /* now issue the command */
3215 spin_lock_irq(phba->host->host_lock);
3216 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
3217 spin_unlock_irq(phba->host->host_lock);
3219 if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
3220 timeleft = schedule_timeout(timeout * HZ);
3221 pmboxq->context1 = NULL;
3222 /* if schedule_timeout returns 0, we timed out and were not
3224 if (timeleft == 0) {
3225 retval = MBX_TIMEOUT;
3227 retval = MBX_SUCCESS;
3232 set_current_state(TASK_RUNNING);
3233 remove_wait_queue(&done_q, &wq_entry);
3238 lpfc_sli_wake_iocb_wait(struct lpfc_hba * phba,
3239 struct lpfc_iocbq * queue1, struct lpfc_iocbq * queue2)
3241 wait_queue_head_t *pdone_q;
3243 queue1->iocb_flag |= LPFC_IO_WAIT;
3244 if (queue1->context2 && queue2)
3245 memcpy(queue1->context2, queue2, sizeof (struct lpfc_iocbq));
3248 * If pdone_q is empty, the waiter gave up and returned and this
3249 * call has nothing to do.
3251 pdone_q = queue1->context_un.hipri_wait_queue;
3260 lpfc_sli_issue_iocb_wait(struct lpfc_hba * phba,
3261 struct lpfc_sli_ring * pring,
3262 struct lpfc_iocbq * piocb,
3263 struct lpfc_iocbq * prspiocbq, uint32_t timeout)
3265 DECLARE_WAIT_QUEUE_HEAD(done_q);
3266 DECLARE_WAITQUEUE(wq_entry, current);
3267 uint32_t timeleft = 0;
3270 /* The caller must leave context1 empty for the driver. */
3271 if (piocb->context_un.hipri_wait_queue != 0)
3272 return (IOCB_ERROR);
3274 /* If the caller has provided a response iocbq buffer, then context2
3275 * is NULL or its an error.
3278 if (piocb->context2)
3279 return (IOCB_ERROR);
3280 piocb->context2 = prspiocbq;
3283 /* setup wake call as IOCB callback */
3284 piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
3285 /* setup context field to pass wait_queue pointer to wake function */
3286 piocb->context_un.hipri_wait_queue = &done_q;
3288 /* start to sleep before we wait, to avoid races */
3289 set_current_state(TASK_UNINTERRUPTIBLE);
3290 add_wait_queue(&done_q, &wq_entry);
3292 /* now issue the command */
3293 retval = lpfc_sli_issue_iocb(phba, pring, piocb, 0);
3294 if (retval == IOCB_SUCCESS) {
3295 /* Give up thread time and wait for the iocb to complete or for
3296 * the alloted time to expire.
3298 timeleft = schedule_timeout(timeout * HZ);
3300 piocb->context_un.hipri_wait_queue = NULL;
3301 piocb->iocb_cmpl = NULL;
3302 if (piocb->context2 == prspiocbq)
3303 piocb->context2 = NULL;
3306 * Catch the error cases. A timeleft of zero is an error since
3307 * the iocb should have completed. The iocb_flag not have value
3308 * LPFC_IO_WAIT is also an error since the wakeup callback sets
3309 * this flag when it runs. Handle each.
3311 if (timeleft == 0) {
3312 printk(KERN_WARNING "lpfc driver detected iocb "
3314 retval = IOCB_TIMEDOUT;
3315 } else if (!(piocb->iocb_flag & LPFC_IO_WAIT)) {
3316 printk(KERN_WARNING "lpfc driver detected iocb "
3317 "flag = 0x%X\n", piocb->iocb_flag);
3318 retval = IOCB_TIMEDOUT;
3322 remove_wait_queue(&done_q, &wq_entry);
3323 set_current_state(TASK_RUNNING);
3324 piocb->context2 = NULL;
3329 lpfc_intr_handler(int irq, void *dev_id, struct pt_regs * regs)
3331 struct lpfc_hba *phba;
3335 * Get the driver's phba structure from the dev_id and
3336 * assume the HBA is not interrupting.
3338 phba = (struct lpfc_hba *) dev_id;
3341 /* Call SLI to handle the interrupt event. */
3342 intr_status = lpfc_sli_intr(phba);
3343 if (intr_status == 0)
3349 } /* lpfc_intr_handler */