1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Enterprise Fibre Channel Host Bus Adapters. *
4 * Refer to the README file included with this package for *
5 * driver version and adapter support. *
6 * Copyright (C) 2004 Emulex Corporation. *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of the GNU General Public License *
11 * as published by the Free Software Foundation; either version 2 *
12 * of the License, or (at your option) any later version. *
14 * This program is distributed in the hope that it will be useful, *
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
17 * GNU General Public License for more details, a copy of which *
18 * can be found in the file COPYING included with this package. *
19 *******************************************************************/
22 * $Id: lpfc_nportdisc.c 1.146 2004/11/18 14:53:54EST sf_support Exp $
25 #include <linux/version.h>
26 #include <linux/blkdev.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/pci.h>
29 #include <linux/spinlock.h>
30 #include <scsi/scsi_device.h>
32 #include "lpfc_disc.h"
33 #include "lpfc_scsi.h"
35 #include "lpfc_crtn.h"
37 #include "lpfc_logmsg.h"
40 extern uint8_t lpfcAlpaArray[];
43 /* Called to verify a rcv'ed ADISC was intended for us. */
45 lpfc_check_adisc(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
46 struct lpfc_name * nn, struct lpfc_name * pn)
48 /* Compare the ADISC rsp WWNN / WWPN matches our internal node
49 * table entry for that node.
51 if (memcmp(nn, &ndlp->nlp_nodename, sizeof (struct lpfc_name)) != 0)
54 if (memcmp(pn, &ndlp->nlp_portname, sizeof (struct lpfc_name)) != 0)
57 /* we match, return success */
63 lpfc_check_sparm(struct lpfc_hba * phba,
64 struct lpfc_nodelist * ndlp, struct serv_parm * sp,
67 volatile struct serv_parm *hsp = &phba->fc_sparam;
68 /* First check for supported version */
70 /* Next check for class validity */
71 if (sp->cls1.classValid) {
73 if (sp->cls1.rcvDataSizeMsb > hsp->cls1.rcvDataSizeMsb)
74 sp->cls1.rcvDataSizeMsb = hsp->cls1.rcvDataSizeMsb;
75 if (sp->cls1.rcvDataSizeLsb > hsp->cls1.rcvDataSizeLsb)
76 sp->cls1.rcvDataSizeLsb = hsp->cls1.rcvDataSizeLsb;
77 } else if (class == CLASS1) {
81 if (sp->cls2.classValid) {
83 if (sp->cls2.rcvDataSizeMsb > hsp->cls2.rcvDataSizeMsb)
84 sp->cls2.rcvDataSizeMsb = hsp->cls2.rcvDataSizeMsb;
85 if (sp->cls2.rcvDataSizeLsb > hsp->cls2.rcvDataSizeLsb)
86 sp->cls2.rcvDataSizeLsb = hsp->cls2.rcvDataSizeLsb;
87 } else if (class == CLASS2) {
91 if (sp->cls3.classValid) {
93 if (sp->cls3.rcvDataSizeMsb > hsp->cls3.rcvDataSizeMsb)
94 sp->cls3.rcvDataSizeMsb = hsp->cls3.rcvDataSizeMsb;
95 if (sp->cls3.rcvDataSizeLsb > hsp->cls3.rcvDataSizeLsb)
96 sp->cls3.rcvDataSizeLsb = hsp->cls3.rcvDataSizeLsb;
97 } else if (class == CLASS3) {
101 if (sp->cmn.bbRcvSizeMsb > hsp->cmn.bbRcvSizeMsb)
102 sp->cmn.bbRcvSizeMsb = hsp->cmn.bbRcvSizeMsb;
103 if (sp->cmn.bbRcvSizeLsb > hsp->cmn.bbRcvSizeLsb)
104 sp->cmn.bbRcvSizeLsb = hsp->cmn.bbRcvSizeLsb;
106 /* If check is good, copy wwpn wwnn into ndlp */
107 memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof (struct lpfc_name));
108 memcpy(&ndlp->nlp_portname, &sp->portName, sizeof (struct lpfc_name));
113 lpfc_check_elscmpl_iocb(struct lpfc_hba * phba,
114 struct lpfc_iocbq *cmdiocb,
115 struct lpfc_iocbq *rspiocb)
117 struct lpfc_dmabuf *pcmd, *prsp;
122 irsp = &rspiocb->iocb;
123 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
125 /* For lpfc_els_abort, context2 could be zero'ed to delay
126 * freeing associated memory till after ABTS completes.
129 prsp = (struct lpfc_dmabuf *) pcmd->list.next;
130 lp = (uint32_t *) prsp->virt;
132 pci_dma_sync_single_for_cpu(phba->pcidev, prsp->phys,
133 LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
135 ptr = (void *)((uint8_t *)lp + sizeof(uint32_t));
138 /* Force ulpStatus error since we are returning NULL ptr */
139 if (!(irsp->ulpStatus)) {
140 irsp->ulpStatus = IOSTAT_LOCAL_REJECT;
141 irsp->un.ulpWord[4] = IOERR_SLI_ABORTED;
150 * Free resources / clean up outstanding I/Os
151 * associated with a LPFC_NODELIST entry. This
152 * routine effectively results in a "software abort".
155 lpfc_els_abort(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
158 struct lpfc_sli *psli;
159 struct lpfc_sli_ring *pring;
160 struct lpfc_iocbq *iocb, *next_iocb;
163 /* Abort outstanding I/O on NPort <nlp_DID> */
164 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
165 "%d:0201 Abort outstanding I/O on NPort x%x "
166 "Data: x%x x%x x%x\n",
167 phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag,
168 ndlp->nlp_state, ndlp->nlp_rpi);
171 pring = &psli->ring[LPFC_ELS_RING];
173 /* First check the txq */
174 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
175 /* Check to see if iocb matches the nport we are looking for */
176 if ((lpfc_check_sli_ndlp(phba, pring, iocb, ndlp))) {
177 /* It matches, so deque and call compl with an error */
178 list_del(&iocb->list);
180 if (iocb->iocb_cmpl) {
182 icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
183 icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
184 (iocb->iocb_cmpl) (phba, iocb, iocb);
186 mempool_free(iocb, phba->iocb_mem_pool);
191 /* Everything on txcmplq will be returned by firmware
192 * with a no rpi / linkdown / abort error. For ring 0,
193 * ELS discovery, we want to get rid of it right here.
195 /* Next check the txcmplq */
196 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
197 /* Check to see if iocb matches the nport we are looking for */
198 if ((lpfc_check_sli_ndlp (phba, pring, iocb, ndlp))) {
199 /* It matches, so deque and call compl with an error */
200 list_del(&iocb->list);
201 pring->txcmplq_cnt--;
204 /* If the driver is completing an ELS
205 * command early, flush it out of the firmware.
208 (icmd->ulpCommand == CMD_ELS_REQUEST64_CR) &&
209 (icmd->un.elsreq64.bdl.ulpIoTag32)) {
210 lpfc_sli_issue_abort_iotag32(phba, pring, iocb);
212 if (iocb->iocb_cmpl) {
213 icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
214 icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
215 (iocb->iocb_cmpl) (phba, iocb, iocb);
217 mempool_free(iocb, phba->iocb_mem_pool);
222 /* If we are delaying issuing an ELS command, cancel it */
223 if(ndlp->nlp_flag & NLP_DELAY_TMO) {
224 ndlp->nlp_flag &= ~NLP_DELAY_TMO;
225 del_timer_sync(&ndlp->nlp_delayfunc);
231 lpfc_rcv_plogi(struct lpfc_hba * phba,
232 struct lpfc_nodelist * ndlp,
233 struct lpfc_iocbq *cmdiocb)
235 struct lpfc_dmabuf *pcmd;
238 struct serv_parm *sp;
242 memset(&stat, 0, sizeof (struct ls_rjt));
243 if (phba->hba_state <= LPFC_FLOGI) {
244 /* Before responding to PLOGI, check for pt2pt mode.
245 * If we are pt2pt, with an outstanding FLOGI, abort
246 * the FLOGI and resend it first.
248 if (phba->fc_flag & FC_PT2PT) {
249 lpfc_els_abort_flogi(phba);
250 if(!(phba->fc_flag & FC_PT2PT_PLOGI)) {
251 /* If the other side is supposed to initiate
252 * the PLOGI anyway, just ACC it now and
253 * move on with discovery.
255 phba->fc_edtov = FF_DEF_EDTOV;
256 phba->fc_ratov = FF_DEF_RATOV;
257 /* Start discovery - this should just do
259 lpfc_disc_start(phba);
262 lpfc_initial_flogi(phba);
266 stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY;
267 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
271 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
272 lp = (uint32_t *) pcmd->virt;
273 sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
274 if ((lpfc_check_sparm(phba, ndlp, sp, CLASS3) == 0)) {
275 /* Reject this request because invalid parameters */
276 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
277 stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
280 icmd = &cmdiocb->iocb;
282 /* PLOGI chkparm OK */
283 lpfc_printf_log(phba,
286 "%d:0114 PLOGI chkparm OK Data: x%x x%x x%x x%x\n",
288 ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag,
291 if ((phba->cfg_fcp_class == 2) &&
292 (sp->cls2.classValid)) {
293 ndlp->nlp_fcp_info |= CLASS2;
295 ndlp->nlp_fcp_info |= CLASS3;
298 /* no need to reg_login if we are already in one of these states */
299 switch(ndlp->nlp_state) {
300 case NLP_STE_REG_LOGIN_ISSUE:
301 case NLP_STE_PRLI_ISSUE:
302 case NLP_STE_UNMAPPED_NODE:
303 case NLP_STE_MAPPED_NODE:
304 lpfc_els_rsp_acc(phba, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL, 0);
308 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC)) == 0) {
309 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
310 stat.un.b.lsRjtRsnCodeExp = LSEXP_OUT_OF_RESOURCE;
314 if ((phba->fc_flag & FC_PT2PT)
315 && !(phba->fc_flag & FC_PT2PT_PLOGI)) {
316 /* rcv'ed PLOGI decides what our NPortId will be */
317 phba->fc_myDID = icmd->un.rcvels.parmRo;
318 lpfc_config_link(phba, mbox);
319 if (lpfc_sli_issue_mbox
320 (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB))
321 == MBX_NOT_FINISHED) {
322 mempool_free( mbox, phba->mbox_mem_pool);
323 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
324 stat.un.b.lsRjtRsnCodeExp = LSEXP_OUT_OF_RESOURCE;
327 if ((mbox = mempool_alloc(phba->mbox_mem_pool,
329 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
330 stat.un.b.lsRjtRsnCodeExp = LSEXP_OUT_OF_RESOURCE;
333 lpfc_can_disctmo(phba);
336 if(lpfc_reg_login(phba, icmd->un.rcvels.remoteID,
337 (uint8_t *) sp, mbox, 0)) {
338 mempool_free( mbox, phba->mbox_mem_pool);
339 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
340 stat.un.b.lsRjtRsnCodeExp = LSEXP_OUT_OF_RESOURCE;
342 lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
346 /* ACC PLOGI rsp command needs to execute first,
347 * queue this mbox command to be processed later.
349 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
350 mbox->context2 = ndlp;
351 ndlp->nlp_flag |= NLP_ACC_REGLOGIN;
353 /* If there is an outstanding PLOGI issued, abort it before
354 * sending ACC rsp to PLOGI recieved.
356 if(ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) {
357 /* software abort outstanding PLOGI */
358 lpfc_els_abort(phba, ndlp, 1);
360 ndlp->nlp_flag |= NLP_RCV_PLOGI;
361 lpfc_els_rsp_acc(phba, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox, 0);
366 lpfc_rcv_padisc(struct lpfc_hba * phba,
367 struct lpfc_nodelist * ndlp,
368 struct lpfc_iocbq *cmdiocb)
370 struct lpfc_dmabuf *pcmd;
371 struct serv_parm *sp;
372 struct lpfc_name *pnn, *ppn;
379 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
380 lp = (uint32_t *) pcmd->virt;
383 if (cmd == ELS_CMD_ADISC) {
385 pnn = (struct lpfc_name *) & ap->nodeName;
386 ppn = (struct lpfc_name *) & ap->portName;
388 sp = (struct serv_parm *) lp;
389 pnn = (struct lpfc_name *) & sp->nodeName;
390 ppn = (struct lpfc_name *) & sp->portName;
393 icmd = &cmdiocb->iocb;
394 if ((icmd->ulpStatus == 0) &&
395 (lpfc_check_adisc(phba, ndlp, pnn, ppn))) {
396 if (cmd == ELS_CMD_ADISC) {
397 lpfc_els_rsp_adisc_acc(phba, cmdiocb, ndlp);
400 lpfc_els_rsp_acc(phba, ELS_CMD_PLOGI, cmdiocb, ndlp,
405 /* Reject this request because invalid parameters */
406 stat.un.b.lsRjtRsvd0 = 0;
407 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
408 stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
409 stat.un.b.vendorUnique = 0;
410 lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
412 ndlp->nlp_last_elscmd = (unsigned long)ELS_CMD_PLOGI;
414 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
416 ndlp->nlp_flag |= NLP_DELAY_TMO;
417 ndlp->nlp_state = NLP_STE_NPR_NODE;
418 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
423 lpfc_rcv_logo(struct lpfc_hba * phba,
424 struct lpfc_nodelist * ndlp,
425 struct lpfc_iocbq *cmdiocb)
427 /* Put ndlp on NPR list with 1 sec timeout for plogi, ACC logo */
428 /* Only call LOGO ACC for first LOGO, this avoids sending unnecessary
429 * PLOGIs during LOGO storms from a device.
431 ndlp->nlp_flag |= NLP_LOGO_ACC;
432 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
434 if (!(ndlp->nlp_type & NLP_FABRIC)) {
435 /* Only try to re-login if this is NOT a Fabric Node */
436 ndlp->nlp_last_elscmd = (unsigned long)ELS_CMD_PLOGI;
437 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
438 ndlp->nlp_flag |= NLP_DELAY_TMO;
441 ndlp->nlp_state = NLP_STE_NPR_NODE;
442 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
444 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
445 /* The driver has to wait until the ACC completes before it continues
446 * processing the LOGO. The action will resume in
447 * lpfc_cmpl_els_logo_acc routine. Since part of processing includes an
448 * unreg_login, the driver waits so the ACC does not get aborted.
454 lpfc_binding_found(struct lpfc_bindlist * blp, struct lpfc_nodelist * ndlp)
456 uint16_t bindtype = blp->nlp_bind_type;
458 if ((bindtype & FCP_SEED_DID) &&
459 (ndlp->nlp_DID == be32_to_cpu(blp->nlp_DID))) {
461 } else if ((bindtype & FCP_SEED_WWPN) &&
462 (memcmp(&ndlp->nlp_portname, &blp->nlp_portname,
463 sizeof (struct lpfc_name)) == 0)) {
465 } else if ((bindtype & FCP_SEED_WWNN) &&
466 (memcmp(&ndlp->nlp_nodename, &blp->nlp_nodename,
467 sizeof (struct lpfc_name)) == 0)) {
474 lpfc_binding_useid(struct lpfc_hba * phba, uint32_t sid)
476 struct lpfc_bindlist *blp;
478 list_for_each_entry(blp, &phba->fc_nlpbind_list, nlp_listp) {
479 if (blp->nlp_sid == sid) {
487 lpfc_mapping_useid(struct lpfc_hba * phba, uint32_t sid)
489 struct lpfc_nodelist *mapnode;
490 struct lpfc_bindlist *blp;
492 list_for_each_entry(mapnode, &phba->fc_nlpmap_list, nlp_listp) {
493 blp = mapnode->nlp_listp_bind;
494 if (blp->nlp_sid == sid) {
501 static struct lpfc_bindlist *
502 lpfc_create_binding(struct lpfc_hba * phba,
503 struct lpfc_nodelist * ndlp, uint16_t index,
506 struct lpfc_bindlist *blp;
508 if ((blp = mempool_alloc(phba->bind_mem_pool, GFP_ATOMIC))) {
509 memset(blp, 0, sizeof (struct lpfc_bindlist));
512 blp->nlp_bind_type = FCP_SEED_WWPN;
515 blp->nlp_bind_type = FCP_SEED_WWNN;
518 blp->nlp_bind_type = FCP_SEED_DID;
521 blp->nlp_sid = index;
522 blp->nlp_DID = ndlp->nlp_DID;
523 memcpy(&blp->nlp_nodename, &ndlp->nlp_nodename,
524 sizeof (struct lpfc_name));
525 memcpy(&blp->nlp_portname, &ndlp->nlp_portname,
526 sizeof (struct lpfc_name));
534 static struct lpfc_bindlist *
535 lpfc_consistent_bind_get(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
537 struct lpfc_bindlist *blp, *next_blp;
540 /* check binding list */
541 list_for_each_entry_safe(blp, next_blp, &phba->fc_nlpbind_list,
543 if (lpfc_binding_found(blp, ndlp)) {
545 /* take it off the binding list */
547 list_del_init(&blp->nlp_listp);
549 /* Reassign scsi id <sid> to NPort <nlp_DID> */
550 lpfc_printf_log(phba,
552 LOG_DISCOVERY | LOG_FCP,
553 "%d:0213 Reassign scsi id x%x to "
554 "NPort x%x Data: x%x x%x x%x x%x\n",
556 blp->nlp_sid, ndlp->nlp_DID,
557 blp->nlp_bind_type, ndlp->nlp_flag,
558 ndlp->nlp_state, ndlp->nlp_rpi);
564 /* NOTE: if scan-down = 2 and we have private loop, then we use
565 * AlpaArray to determine sid.
567 if ((phba->cfg_fcp_bind_method == 4) &&
568 ((phba->fc_flag & (FC_PUBLIC_LOOP | FC_FABRIC)) ||
569 (phba->fc_topology != TOPOLOGY_LOOP))) {
570 /* Log message: ALPA based binding used on a non loop
572 lpfc_printf_log(phba,
575 "%d:0245 ALPA based bind method used on an HBA "
576 "which is in a nonloop topology Data: x%x\n",
581 if ((phba->cfg_fcp_bind_method == 4) &&
582 !(phba->fc_flag & (FC_PUBLIC_LOOP | FC_FABRIC)) &&
583 (phba->fc_topology == TOPOLOGY_LOOP)) {
584 for (index = 0; index < FC_MAXLOOP; index++) {
585 if (ndlp->nlp_DID == (uint32_t) lpfcAlpaArray[index]) {
587 lpfc_create_binding(phba, ndlp, index,
596 if (phba->cfg_automap) {
598 if ((lpfc_binding_useid(phba, phba->sid_cnt))
599 || (lpfc_mapping_useid (phba, phba->sid_cnt))) {
604 lpfc_create_binding(phba, ndlp,
606 phba->fcp_mapping))) {
607 blp->nlp_bind_type |= FCP_SEED_AUTO;
618 /* Cannot assign scsi id on NPort <nlp_DID> */
619 lpfc_printf_log(phba,
621 LOG_DISCOVERY | LOG_FCP,
622 "%d:0230 Cannot assign scsi ID on NPort x%x "
623 "Data: x%x x%x x%x\n",
625 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
632 lpfc_assign_binding(struct lpfc_hba * phba,
633 struct lpfc_nodelist * ndlp, struct lpfc_bindlist *blp)
635 struct lpfc_target *targetp;
637 targetp = lpfc_find_target(phba, blp->nlp_sid, ndlp);
639 /* Cannot assign scsi id <sid> to NPort <nlp_DID> */
640 lpfc_printf_log(phba,
642 LOG_DISCOVERY | LOG_FCP,
643 "%d:0229 Cannot assign scsi id x%x to NPort x%x "
644 "Data: x%x x%x x%x\n",
645 phba->brd_no, blp->nlp_sid,
646 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
650 ndlp->nlp_sid = blp->nlp_sid;
651 ndlp->nlp_flag &= ~NLP_SEED_MASK;
652 switch ((blp->nlp_bind_type & FCP_SEED_MASK)) {
654 ndlp->nlp_flag |= NLP_SEED_WWPN;
657 ndlp->nlp_flag |= NLP_SEED_WWNN;
660 ndlp->nlp_flag |= NLP_SEED_DID;
663 if (blp->nlp_bind_type & FCP_SEED_AUTO) {
664 ndlp->nlp_flag |= NLP_AUTOMAP;
666 /* Assign scsi id <sid> to NPort <nlp_DID> */
667 lpfc_printf_log(phba,
669 LOG_DISCOVERY | LOG_FCP,
670 "%d:0216 Assign scsi "
671 "id x%x to NPort x%x "
672 "Data: x%x x%x x%x x%x\n",
674 ndlp->nlp_sid, ndlp->nlp_DID,
676 ndlp->nlp_flag, ndlp->nlp_state,
682 lpfc_disc_set_adisc(struct lpfc_hba * phba,
683 struct lpfc_nodelist * ndlp)
685 /* Check config parameter use-adisc or FCP-2 */
686 if ((phba->cfg_use_adisc == 0) &&
687 !(phba->fc_flag & FC_RSCN_MODE)) {
690 ndlp->nlp_flag |= NLP_NPR_ADISC;
695 lpfc_disc_noop(struct lpfc_hba * phba,
696 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
698 /* This routine does nothing, just return the current state */
699 return (ndlp->nlp_state);
703 lpfc_disc_illegal(struct lpfc_hba * phba,
704 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
706 lpfc_printf_log(phba,
709 "%d:0253 Illegal State Transition: node x%x event x%x, "
710 "state x%x Data: x%x x%x\n",
712 ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi,
714 return (ndlp->nlp_state);
717 /* Start of Discovery State Machine routines */
720 lpfc_rcv_plogi_unused_node(struct lpfc_hba * phba,
721 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
723 struct lpfc_iocbq *cmdiocb;
724 struct lpfc_dmabuf *pcmd;
726 cmdiocb = (struct lpfc_iocbq *) arg;
727 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
729 pci_dma_sync_single_for_cpu(phba->pcidev, pcmd->phys,
730 LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
732 if(lpfc_rcv_plogi(phba, ndlp, cmdiocb)) {
733 ndlp->nlp_state = NLP_STE_UNUSED_NODE;
734 lpfc_nlp_list(phba, ndlp, NLP_UNUSED_LIST);
735 return (ndlp->nlp_state);
737 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
738 return (NLP_STE_FREED_NODE);
742 lpfc_rcv_els_unused_node(struct lpfc_hba * phba,
743 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
745 lpfc_issue_els_logo(phba, ndlp, 0);
746 lpfc_nlp_list(phba, ndlp, NLP_UNUSED_LIST);
747 return (ndlp->nlp_state);
751 lpfc_rcv_logo_unused_node(struct lpfc_hba * phba,
752 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
754 struct lpfc_iocbq *cmdiocb;
755 struct lpfc_dmabuf *pcmd;
757 cmdiocb = (struct lpfc_iocbq *) arg;
758 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
760 pci_dma_sync_single_for_cpu(phba->pcidev, pcmd->phys,
761 LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
763 ndlp->nlp_flag |= NLP_LOGO_ACC;
764 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
765 lpfc_nlp_list(phba, ndlp, NLP_UNUSED_LIST);
767 return (ndlp->nlp_state);
771 lpfc_cmpl_logo_unused_node(struct lpfc_hba * phba,
772 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
774 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
775 return (NLP_STE_FREED_NODE);
779 lpfc_device_rm_unused_node(struct lpfc_hba * phba,
780 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
782 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
783 return (NLP_STE_FREED_NODE);
787 lpfc_rcv_plogi_plogi_issue(struct lpfc_hba * phba,
788 struct lpfc_nodelist * ndlp,
789 struct lpfc_iocbq *cmdiocb, uint32_t evt)
791 struct lpfc_dmabuf *pcmd;
792 struct serv_parm *sp;
797 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
798 lp = (uint32_t *) pcmd->virt;
799 sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
801 pci_dma_sync_single_for_cpu(phba->pcidev, pcmd->phys,
802 LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
804 memset(&stat, 0, sizeof (struct ls_rjt));
806 /* For a PLOGI, we only accept if our portname is less
807 * than the remote portname.
809 phba->fc_stat.elsLogiCol++;
810 port_cmp = memcmp(&phba->fc_portname, &sp->portName,
811 sizeof (struct lpfc_name));
814 /* Reject this request because the remote node will accept
816 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
817 stat.un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS;
818 lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
821 lpfc_rcv_plogi(phba, ndlp, cmdiocb);
822 } /* if our portname was less */
824 return (ndlp->nlp_state);
828 lpfc_rcv_els_plogi_issue(struct lpfc_hba * phba,
829 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
831 struct lpfc_iocbq *cmdiocb;
832 struct lpfc_dmabuf *pcmd;
834 cmdiocb = (struct lpfc_iocbq *) arg;
835 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
837 pci_dma_sync_single_for_cpu(phba->pcidev, pcmd->phys,
838 LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
840 /* software abort outstanding PLOGI */
841 lpfc_els_abort(phba, ndlp, 1);
842 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
843 ndlp->nlp_flag |= NLP_DELAY_TMO;
845 if(evt == NLP_EVT_RCV_LOGO) {
846 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
849 lpfc_issue_els_logo(phba, ndlp, 0);
852 /* Put ndlp in npr list set plogi timer for 1 sec */
853 ndlp->nlp_last_elscmd = (unsigned long)ELS_CMD_PLOGI;
854 ndlp->nlp_state = NLP_STE_NPR_NODE;
855 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
857 return (ndlp->nlp_state);
861 lpfc_cmpl_plogi_plogi_issue(struct lpfc_hba * phba,
862 struct lpfc_nodelist * ndlp, void *arg,
865 struct lpfc_iocbq *cmdiocb, *rspiocb;
866 struct lpfc_dmabuf *pcmd, *prsp;
869 struct serv_parm *sp;
872 cmdiocb = (struct lpfc_iocbq *) arg;
873 rspiocb = cmdiocb->context_un.rsp_iocb;
875 if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
876 return (ndlp->nlp_state);
879 irsp = &rspiocb->iocb;
881 if (irsp->ulpStatus == 0) {
882 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
884 prsp = (struct lpfc_dmabuf *) pcmd->list.next;
885 lp = (uint32_t *) prsp->virt;
887 pci_dma_sync_single_for_cpu(phba->pcidev, prsp->phys,
888 LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
890 sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
891 if ((lpfc_check_sparm(phba, ndlp, sp, CLASS3))) {
892 /* PLOGI chkparm OK */
893 lpfc_printf_log(phba,
896 "%d:0121 PLOGI chkparm OK "
897 "Data: x%x x%x x%x x%x\n",
899 ndlp->nlp_DID, ndlp->nlp_state,
900 ndlp->nlp_flag, ndlp->nlp_rpi);
902 if ((phba->cfg_fcp_class == 2) &&
903 (sp->cls2.classValid)) {
904 ndlp->nlp_fcp_info |= CLASS2;
906 ndlp->nlp_fcp_info |= CLASS3;
909 if ((mbox = mempool_alloc(phba->mbox_mem_pool,
911 lpfc_unreg_rpi(phba, ndlp);
913 (phba, irsp->un.elsreq64.remoteID,
914 (uint8_t *) sp, mbox, 0) == 0) {
915 /* set_slim mailbox command needs to
916 * execute first, queue this command to
917 * be processed later.
919 switch(ndlp->nlp_DID) {
922 lpfc_mbx_cmpl_ns_reg_login;
926 lpfc_mbx_cmpl_fdmi_reg_login;
930 lpfc_mbx_cmpl_reg_login;
932 mbox->context2 = ndlp;
933 if (lpfc_sli_issue_mbox(phba, mbox,
934 (MBX_NOWAIT | MBX_STOP_IOCB))
935 != MBX_NOT_FINISHED) {
937 NLP_STE_REG_LOGIN_ISSUE;
938 lpfc_nlp_list(phba, ndlp,
940 return (ndlp->nlp_state);
942 mempool_free(mbox, phba->mbox_mem_pool);
944 mempool_free(mbox, phba->mbox_mem_pool);
950 /* Free this node since the driver cannot login or has the wrong
952 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
953 return (NLP_STE_FREED_NODE);
957 lpfc_device_rm_plogi_issue(struct lpfc_hba * phba,
958 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
960 /* software abort outstanding PLOGI */
961 lpfc_els_abort(phba, ndlp, 1);
963 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
964 return (NLP_STE_FREED_NODE);
968 lpfc_device_recov_plogi_issue(struct lpfc_hba * phba,
969 struct lpfc_nodelist * ndlp, void *arg,
972 /* software abort outstanding PLOGI */
973 lpfc_els_abort(phba, ndlp, 1);
975 ndlp->nlp_state = NLP_STE_NPR_NODE;
976 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
977 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
979 return (ndlp->nlp_state);
983 lpfc_rcv_plogi_adisc_issue(struct lpfc_hba * phba,
984 struct lpfc_nodelist * ndlp, void *arg,
987 struct lpfc_iocbq *cmdiocb;
988 struct lpfc_dmabuf *pcmd;
990 /* software abort outstanding ADISC */
991 lpfc_els_abort(phba, ndlp, 1);
993 cmdiocb = (struct lpfc_iocbq *) arg;
994 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
996 pci_dma_sync_single_for_cpu(phba->pcidev, pcmd->phys,
997 LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
999 if(lpfc_rcv_plogi(phba, ndlp, cmdiocb)) {
1000 return (ndlp->nlp_state);
1002 ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
1003 lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
1004 lpfc_issue_els_plogi(phba, ndlp, 0);
1006 return (ndlp->nlp_state);
1010 lpfc_rcv_prli_adisc_issue(struct lpfc_hba * phba,
1011 struct lpfc_nodelist * ndlp, void *arg,
1014 struct lpfc_iocbq *cmdiocb;
1015 struct lpfc_dmabuf *pcmd;
1017 cmdiocb = (struct lpfc_iocbq *) arg;
1018 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
1020 pci_dma_sync_single_for_cpu(phba->pcidev, pcmd->phys,
1021 LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
1023 lpfc_els_rsp_prli_acc(phba, cmdiocb, ndlp);
1024 return (ndlp->nlp_state);
1028 lpfc_rcv_logo_adisc_issue(struct lpfc_hba * phba,
1029 struct lpfc_nodelist * ndlp, void *arg,
1032 struct lpfc_iocbq *cmdiocb;
1033 struct lpfc_dmabuf *pcmd;
1035 cmdiocb = (struct lpfc_iocbq *) arg;
1036 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
1038 pci_dma_sync_single_for_cpu(phba->pcidev, pcmd->phys,
1039 LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
1041 /* software abort outstanding ADISC */
1042 lpfc_els_abort(phba, ndlp, 0);
1044 lpfc_rcv_logo(phba, ndlp, cmdiocb);
1045 return (ndlp->nlp_state);
1049 lpfc_rcv_padisc_adisc_issue(struct lpfc_hba * phba,
1050 struct lpfc_nodelist * ndlp, void *arg,
1053 struct lpfc_iocbq *cmdiocb;
1054 struct lpfc_dmabuf *pcmd;
1056 cmdiocb = (struct lpfc_iocbq *) arg;
1057 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
1059 pci_dma_sync_single_for_cpu(phba->pcidev, pcmd->phys,
1060 LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
1062 lpfc_rcv_padisc(phba, ndlp, cmdiocb);
1063 return (ndlp->nlp_state);
1067 lpfc_rcv_prlo_adisc_issue(struct lpfc_hba * phba,
1068 struct lpfc_nodelist * ndlp, void *arg,
1071 struct lpfc_iocbq *cmdiocb;
1072 struct lpfc_dmabuf *pcmd;
1074 cmdiocb = (struct lpfc_iocbq *) arg;
1075 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
1077 pci_dma_sync_single_for_cpu(phba->pcidev, pcmd->phys,
1078 LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
1080 /* Treat like rcv logo */
1081 lpfc_rcv_logo(phba, ndlp, cmdiocb);
1082 return (ndlp->nlp_state);
1086 lpfc_cmpl_adisc_adisc_issue(struct lpfc_hba * phba,
1087 struct lpfc_nodelist * ndlp, void *arg,
1090 struct lpfc_iocbq *cmdiocb, *rspiocb;
1091 struct lpfc_bindlist *blp;
1095 cmdiocb = (struct lpfc_iocbq *) arg;
1096 rspiocb = cmdiocb->context_un.rsp_iocb;
1098 ap = (ADISC *)lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb);
1099 irsp = &rspiocb->iocb;
1101 if ((irsp->ulpStatus) ||
1102 (!lpfc_check_adisc(phba, ndlp, &ap->nodeName, &ap->portName))) {
1103 ndlp->nlp_last_elscmd = (unsigned long)ELS_CMD_PLOGI;
1105 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
1106 ndlp->nlp_flag |= NLP_DELAY_TMO;
1108 memset(&ndlp->nlp_nodename, 0, sizeof (struct lpfc_name));
1109 memset(&ndlp->nlp_portname, 0, sizeof (struct lpfc_name));
1111 ndlp->nlp_state = NLP_STE_NPR_NODE;
1112 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
1113 lpfc_unreg_rpi(phba, ndlp);
1114 return (ndlp->nlp_state);
1116 /* move to mapped / unmapped list accordingly */
1117 /* Can we assign a SCSI Id to this NPort */
1118 if ((blp = lpfc_consistent_bind_get(phba, ndlp))) {
1119 /* Next 4 lines MUST be in this order */
1120 if(lpfc_assign_binding(phba, ndlp, blp)) {
1121 ndlp->nlp_state = NLP_STE_MAPPED_NODE;
1122 lpfc_nlp_list(phba, ndlp, NLP_MAPPED_LIST);
1123 ndlp->nlp_listp_bind = blp;
1125 lpfc_set_failmask(phba, ndlp,
1126 (LPFC_DEV_DISCOVERY_INP|LPFC_DEV_DISCONNECTED),
1129 return (ndlp->nlp_state);
1132 ndlp->nlp_flag |= NLP_TGT_NO_SCSIID;
1133 ndlp->nlp_state = NLP_STE_UNMAPPED_NODE;
1134 lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST);
1136 lpfc_set_failmask(phba, ndlp,
1137 (LPFC_DEV_DISCOVERY_INP | LPFC_DEV_DISCONNECTED),
1140 return (ndlp->nlp_state);
1144 lpfc_device_rm_adisc_issue(struct lpfc_hba * phba,
1145 struct lpfc_nodelist * ndlp, void *arg,
1148 /* software abort outstanding ADISC */
1149 lpfc_els_abort(phba, ndlp, 1);
1151 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
1152 return (NLP_STE_FREED_NODE);
1156 lpfc_device_recov_adisc_issue(struct lpfc_hba * phba,
1157 struct lpfc_nodelist * ndlp, void *arg,
1160 /* software abort outstanding ADISC */
1161 lpfc_els_abort(phba, ndlp, 1);
1163 ndlp->nlp_state = NLP_STE_NPR_NODE;
1164 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
1165 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
1167 lpfc_disc_set_adisc(phba, ndlp);
1168 return (ndlp->nlp_state);
1172 lpfc_rcv_plogi_reglogin_issue(struct lpfc_hba * phba,
1173 struct lpfc_nodelist * ndlp, void *arg,
1176 struct lpfc_iocbq *cmdiocb;
1177 struct lpfc_dmabuf *pcmd;
1179 cmdiocb = (struct lpfc_iocbq *) arg;
1180 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
1182 pci_dma_sync_single_for_cpu(phba->pcidev, pcmd->phys,
1183 LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
1185 lpfc_rcv_plogi(phba, ndlp, cmdiocb);
1186 return (ndlp->nlp_state);
1190 lpfc_rcv_prli_reglogin_issue(struct lpfc_hba * phba,
1191 struct lpfc_nodelist * ndlp, void *arg,
1194 struct lpfc_iocbq *cmdiocb;
1195 struct lpfc_dmabuf *pcmd;
1197 cmdiocb = (struct lpfc_iocbq *) arg;
1198 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
1200 pci_dma_sync_single_for_cpu(phba->pcidev, pcmd->phys,
1201 LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
1203 lpfc_els_rsp_prli_acc(phba, cmdiocb, ndlp);
1204 return (ndlp->nlp_state);
1208 lpfc_rcv_logo_reglogin_issue(struct lpfc_hba * phba,
1209 struct lpfc_nodelist * ndlp, void *arg,
1212 struct lpfc_iocbq *cmdiocb;
1213 struct lpfc_dmabuf *pcmd;
1215 cmdiocb = (struct lpfc_iocbq *) arg;
1216 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
1218 pci_dma_sync_single_for_cpu(phba->pcidev, pcmd->phys,
1219 LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
1221 lpfc_rcv_logo(phba, ndlp, cmdiocb);
1222 return (ndlp->nlp_state);
1226 lpfc_rcv_padisc_reglogin_issue(struct lpfc_hba * phba,
1227 struct lpfc_nodelist * ndlp, void *arg,
1230 struct lpfc_iocbq *cmdiocb;
1231 struct lpfc_dmabuf *pcmd;
1233 cmdiocb = (struct lpfc_iocbq *) arg;
1234 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
1236 pci_dma_sync_single_for_cpu(phba->pcidev, pcmd->phys,
1237 LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
1239 lpfc_rcv_padisc(phba, ndlp, cmdiocb);
1240 return (ndlp->nlp_state);
1244 lpfc_rcv_prlo_reglogin_issue(struct lpfc_hba * phba,
1245 struct lpfc_nodelist * ndlp, void *arg,
1248 struct lpfc_iocbq *cmdiocb;
1250 cmdiocb = (struct lpfc_iocbq *) arg;
1251 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
1252 return (ndlp->nlp_state);
1256 lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_hba * phba,
1257 struct lpfc_nodelist * ndlp,
1258 void *arg, uint32_t evt)
1264 pmb = (LPFC_MBOXQ_t *) arg;
1266 did = mb->un.varWords[1];
1267 if (mb->mbxStatus) {
1268 /* RegLogin failed */
1269 lpfc_printf_log(phba,
1272 "%d:0246 RegLogin failed Data: x%x x%x x%x\n",
1274 did, mb->mbxStatus, phba->hba_state);
1276 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
1277 ndlp->nlp_flag |= NLP_DELAY_TMO;
1279 lpfc_issue_els_logo(phba, ndlp, 0);
1280 /* Put ndlp in npr list set plogi timer for 1 sec */
1281 ndlp->nlp_last_elscmd = (unsigned long)ELS_CMD_PLOGI;
1282 ndlp->nlp_state = NLP_STE_NPR_NODE;
1283 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
1284 return (ndlp->nlp_state);
1287 if (ndlp->nlp_rpi != 0)
1288 lpfc_findnode_remove_rpi(phba, ndlp->nlp_rpi);
1290 ndlp->nlp_rpi = mb->un.varWords[0];
1291 lpfc_addnode_rpi(phba, ndlp, ndlp->nlp_rpi);
1293 /* Only if we are not a fabric nport do we issue PRLI */
1294 if (!(ndlp->nlp_type & NLP_FABRIC)) {
1295 ndlp->nlp_state = NLP_STE_PRLI_ISSUE;
1296 lpfc_nlp_list(phba, ndlp, NLP_PRLI_LIST);
1297 lpfc_issue_els_prli(phba, ndlp, 0);
1299 ndlp->nlp_state = NLP_STE_UNMAPPED_NODE;
1300 lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST);
1302 return (ndlp->nlp_state);
1306 lpfc_device_rm_reglogin_issue(struct lpfc_hba * phba,
1307 struct lpfc_nodelist * ndlp, void *arg,
1310 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
1311 return (NLP_STE_FREED_NODE);
1315 lpfc_device_recov_reglogin_issue(struct lpfc_hba * phba,
1316 struct lpfc_nodelist * ndlp, void *arg,
1319 ndlp->nlp_state = NLP_STE_NPR_NODE;
1320 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
1321 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
1322 return (ndlp->nlp_state);
1326 lpfc_rcv_plogi_prli_issue(struct lpfc_hba * phba,
1327 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1329 struct lpfc_iocbq *cmdiocb;
1330 struct lpfc_dmabuf *pcmd;
1332 cmdiocb = (struct lpfc_iocbq *) arg;
1333 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
1335 pci_dma_sync_single_for_cpu(phba->pcidev, pcmd->phys,
1336 LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
1338 lpfc_rcv_plogi(phba, ndlp, cmdiocb);
1339 return (ndlp->nlp_state);
1343 lpfc_rcv_prli_prli_issue(struct lpfc_hba * phba,
1344 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1346 struct lpfc_iocbq *cmdiocb;
1347 struct lpfc_dmabuf *pcmd;
1349 cmdiocb = (struct lpfc_iocbq *) arg;
1350 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
1352 pci_dma_sync_single_for_cpu(phba->pcidev, pcmd->phys,
1353 LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
1355 lpfc_els_rsp_prli_acc(phba, cmdiocb, ndlp);
1356 return (ndlp->nlp_state);
1360 lpfc_rcv_logo_prli_issue(struct lpfc_hba * phba,
1361 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1363 struct lpfc_iocbq *cmdiocb;
1364 struct lpfc_dmabuf *pcmd;
1366 cmdiocb = (struct lpfc_iocbq *) arg;
1367 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
1369 pci_dma_sync_single_for_cpu(phba->pcidev, pcmd->phys,
1370 LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
1372 /* Software abort outstanding PRLI before sending acc */
1373 lpfc_els_abort(phba, ndlp, 1);
1375 lpfc_rcv_logo(phba, ndlp, cmdiocb);
1376 return (ndlp->nlp_state);
1380 lpfc_rcv_padisc_prli_issue(struct lpfc_hba * phba,
1381 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1383 struct lpfc_iocbq *cmdiocb;
1384 struct lpfc_dmabuf *pcmd;
1386 cmdiocb = (struct lpfc_iocbq *) arg;
1387 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
1389 pci_dma_sync_single_for_cpu(phba->pcidev, pcmd->phys,
1390 LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
1392 lpfc_rcv_padisc(phba, ndlp, cmdiocb);
1393 return (ndlp->nlp_state);
1396 /* This routine is envoked when we rcv a PRLO request from a nport
1397 * we are logged into. We should send back a PRLO rsp setting the
1399 * NEXT STATE = PRLI_ISSUE
1402 lpfc_rcv_prlo_prli_issue(struct lpfc_hba * phba,
1403 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1405 struct lpfc_iocbq *cmdiocb;
1407 cmdiocb = (struct lpfc_iocbq *) arg;
1408 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
1409 return (ndlp->nlp_state);
1413 lpfc_cmpl_prli_prli_issue(struct lpfc_hba * phba,
1414 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1416 struct lpfc_iocbq *cmdiocb, *rspiocb;
1419 struct lpfc_bindlist *blp;
1421 cmdiocb = (struct lpfc_iocbq *) arg;
1422 rspiocb = cmdiocb->context_un.rsp_iocb;
1423 npr = (PRLI *)lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb);
1425 irsp = &rspiocb->iocb;
1426 if (irsp->ulpStatus) {
1427 ndlp->nlp_state = NLP_STE_UNMAPPED_NODE;
1428 lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST);
1429 lpfc_set_failmask(phba, ndlp, LPFC_DEV_DISCOVERY_INP,
1431 return (ndlp->nlp_state);
1434 /* Check out PRLI rsp */
1435 if ((npr->acceptRspCode != PRLI_REQ_EXECUTED) ||
1436 (npr->prliType != PRLI_FCP_TYPE) || (npr->targetFunc != 1)) {
1437 ndlp->nlp_state = NLP_STE_UNMAPPED_NODE;
1438 lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST);
1439 lpfc_set_failmask(phba, ndlp,
1440 (LPFC_DEV_DISCOVERY_INP | LPFC_DEV_DISCONNECTED),
1442 return (ndlp->nlp_state);
1444 if (npr->Retry == 1) {
1445 ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
1448 /* Can we assign a SCSI Id to this NPort */
1449 if ((blp = lpfc_consistent_bind_get(phba, ndlp))) {
1450 /* Next 4 lines MUST be in this order */
1451 if(lpfc_assign_binding(phba, ndlp, blp)) {
1452 ndlp->nlp_state = NLP_STE_MAPPED_NODE;
1453 lpfc_nlp_list(phba, ndlp, NLP_MAPPED_LIST);
1454 ndlp->nlp_listp_bind = blp;
1456 lpfc_set_failmask(phba, ndlp,
1457 (LPFC_DEV_DISCOVERY_INP|LPFC_DEV_DISCONNECTED),
1459 return (ndlp->nlp_state);
1462 ndlp->nlp_flag |= NLP_TGT_NO_SCSIID;
1463 ndlp->nlp_state = NLP_STE_UNMAPPED_NODE;
1464 lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST);
1466 lpfc_set_failmask(phba, ndlp,
1467 (LPFC_DEV_DISCOVERY_INP | LPFC_DEV_DISCONNECTED),
1469 return (ndlp->nlp_state);
1472 /*! lpfc_device_rm_prli_issue
1483 * This routine is envoked when we a request to remove a nport we are in the
1484 * process of PRLIing. We should software abort outstanding prli, unreg
1485 * login, send a logout. We will change node state to UNUSED_NODE, put it
1486 * on plogi list so it can be freed when LOGO completes.
1490 lpfc_device_rm_prli_issue(struct lpfc_hba * phba,
1491 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1493 /* software abort outstanding PRLI */
1494 lpfc_els_abort(phba, ndlp, 1);
1496 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
1497 return (NLP_STE_FREED_NODE);
1501 /*! lpfc_device_recov_prli_issue
1512 * The routine is envoked when the state of a device is unknown, like
1513 * during a link down. We should remove the nodelist entry from the
1514 * unmapped list, issue a UNREG_LOGIN, do a software abort of the
1515 * outstanding PRLI command, then free the node entry.
1518 lpfc_device_recov_prli_issue(struct lpfc_hba * phba,
1519 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1521 /* software abort outstanding PRLI */
1522 lpfc_els_abort(phba, ndlp, 1);
1524 ndlp->nlp_state = NLP_STE_NPR_NODE;
1525 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
1526 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
1527 return (ndlp->nlp_state);
1531 lpfc_rcv_plogi_unmap_node(struct lpfc_hba * phba,
1532 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1534 struct lpfc_iocbq *cmdiocb;
1535 struct lpfc_dmabuf *pcmd;
1537 cmdiocb = (struct lpfc_iocbq *) arg;
1538 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
1540 pci_dma_sync_single_for_cpu(phba->pcidev, pcmd->phys,
1541 LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
1543 lpfc_rcv_plogi(phba, ndlp, cmdiocb);
1544 return (ndlp->nlp_state);
1548 lpfc_rcv_prli_unmap_node(struct lpfc_hba * phba,
1549 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1551 struct lpfc_iocbq *cmdiocb;
1552 struct lpfc_dmabuf *pcmd;
1554 cmdiocb = (struct lpfc_iocbq *) arg;
1555 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
1557 pci_dma_sync_single_for_cpu(phba->pcidev, pcmd->phys,
1558 LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
1560 lpfc_els_rsp_prli_acc(phba, cmdiocb, ndlp);
1561 return (ndlp->nlp_state);
1565 lpfc_rcv_logo_unmap_node(struct lpfc_hba * phba,
1566 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1568 struct lpfc_iocbq *cmdiocb;
1569 struct lpfc_dmabuf *pcmd;
1571 cmdiocb = (struct lpfc_iocbq *) arg;
1572 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
1574 pci_dma_sync_single_for_cpu(phba->pcidev, pcmd->phys,
1575 LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
1577 lpfc_rcv_logo(phba, ndlp, cmdiocb);
1578 return (ndlp->nlp_state);
1582 lpfc_rcv_padisc_unmap_node(struct lpfc_hba * phba,
1583 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1585 struct lpfc_iocbq *cmdiocb;
1586 struct lpfc_dmabuf *pcmd;
1588 cmdiocb = (struct lpfc_iocbq *) arg;
1589 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
1591 pci_dma_sync_single_for_cpu(phba->pcidev, pcmd->phys,
1592 LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
1594 lpfc_rcv_padisc(phba, ndlp, cmdiocb);
1595 return (ndlp->nlp_state);
1599 lpfc_rcv_prlo_unmap_node(struct lpfc_hba * phba,
1600 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1602 struct lpfc_iocbq *cmdiocb;
1603 struct lpfc_dmabuf *pcmd;
1605 cmdiocb = (struct lpfc_iocbq *) arg;
1606 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
1608 pci_dma_sync_single_for_cpu(phba->pcidev, pcmd->phys,
1609 LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
1611 /* Treat like rcv logo */
1612 lpfc_rcv_logo(phba, ndlp, cmdiocb);
1613 return (ndlp->nlp_state);
1617 lpfc_device_recov_unmap_node(struct lpfc_hba * phba,
1618 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1620 ndlp->nlp_state = NLP_STE_NPR_NODE;
1621 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
1622 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
1623 lpfc_disc_set_adisc(phba, ndlp);
1625 return (ndlp->nlp_state);
1629 lpfc_rcv_plogi_mapped_node(struct lpfc_hba * phba,
1630 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1632 struct lpfc_iocbq *cmdiocb;
1633 struct lpfc_dmabuf *pcmd;
1635 cmdiocb = (struct lpfc_iocbq *) arg;
1636 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
1638 pci_dma_sync_single_for_cpu(phba->pcidev, pcmd->phys,
1639 LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
1641 lpfc_rcv_plogi(phba, ndlp, cmdiocb);
1642 return (ndlp->nlp_state);
1646 lpfc_rcv_prli_mapped_node(struct lpfc_hba * phba,
1647 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1649 struct lpfc_iocbq *cmdiocb;
1650 struct lpfc_dmabuf *pcmd;
1652 cmdiocb = (struct lpfc_iocbq *) arg;
1653 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
1655 pci_dma_sync_single_for_cpu(phba->pcidev, pcmd->phys,
1656 LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
1658 lpfc_els_rsp_prli_acc(phba, cmdiocb, ndlp);
1659 return (ndlp->nlp_state);
1663 lpfc_rcv_logo_mapped_node(struct lpfc_hba * phba,
1664 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1666 struct lpfc_iocbq *cmdiocb;
1667 struct lpfc_dmabuf *pcmd;
1669 cmdiocb = (struct lpfc_iocbq *) arg;
1670 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
1672 pci_dma_sync_single_for_cpu(phba->pcidev, pcmd->phys,
1673 LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
1675 lpfc_rcv_logo(phba, ndlp, cmdiocb);
1676 return (ndlp->nlp_state);
1680 lpfc_rcv_padisc_mapped_node(struct lpfc_hba * phba,
1681 struct lpfc_nodelist * ndlp, void *arg,
1684 struct lpfc_iocbq *cmdiocb;
1685 struct lpfc_dmabuf *pcmd;
1687 cmdiocb = (struct lpfc_iocbq *) arg;
1688 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
1690 pci_dma_sync_single_for_cpu(phba->pcidev, pcmd->phys,
1691 LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
1693 lpfc_rcv_padisc(phba, ndlp, cmdiocb);
1694 return (ndlp->nlp_state);
1698 lpfc_rcv_prlo_mapped_node(struct lpfc_hba * phba,
1699 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1701 struct lpfc_iocbq *cmdiocb;
1702 struct lpfc_dmabuf *pcmd;
1704 cmdiocb = (struct lpfc_iocbq *) arg;
1705 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
1707 pci_dma_sync_single_for_cpu(phba->pcidev, pcmd->phys,
1708 LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
1710 /* flush the target */
1711 lpfc_sli_abort_iocb_tgt(phba,
1712 &phba->sli.ring[phba->sli.fcp_ring],
1713 ndlp->nlp_sid, LPFC_ABORT_ALLQ);
1715 /* Treat like rcv logo */
1716 lpfc_rcv_logo(phba, ndlp, cmdiocb);
1717 return (ndlp->nlp_state);
1721 lpfc_device_recov_mapped_node(struct lpfc_hba * phba,
1722 struct lpfc_nodelist * ndlp, void *arg,
1725 ndlp->nlp_state = NLP_STE_NPR_NODE;
1726 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
1727 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
1728 lpfc_disc_set_adisc(phba, ndlp);
1729 return (ndlp->nlp_state);
1733 lpfc_rcv_plogi_npr_node(struct lpfc_hba * phba,
1734 struct lpfc_nodelist * ndlp, void *arg,
1737 struct lpfc_iocbq *cmdiocb;
1738 struct lpfc_dmabuf *pcmd;
1740 cmdiocb = (struct lpfc_iocbq *) arg;
1741 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
1743 pci_dma_sync_single_for_cpu(phba->pcidev, pcmd->phys,
1744 LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
1746 /* Ignore PLOGI if we have an outstanding LOGO */
1747 if (ndlp->nlp_flag & NLP_LOGO_SND) {
1748 return (ndlp->nlp_state);
1751 if(lpfc_rcv_plogi(phba, ndlp, cmdiocb)) {
1752 ndlp->nlp_flag &= ~(NLP_NPR_ADISC | NLP_NPR_2B_DISC);
1753 return (ndlp->nlp_state);
1756 /* send PLOGI immediately, move to PLOGI issue state */
1757 if(!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
1758 ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
1759 lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
1760 lpfc_issue_els_plogi(phba, ndlp, 0);
1762 return (ndlp->nlp_state);
1766 lpfc_rcv_prli_npr_node(struct lpfc_hba * phba,
1767 struct lpfc_nodelist * ndlp, void *arg,
1770 struct lpfc_iocbq *cmdiocb;
1771 struct lpfc_dmabuf *pcmd;
1774 cmdiocb = (struct lpfc_iocbq *) arg;
1775 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
1777 pci_dma_sync_single_for_cpu(phba->pcidev, pcmd->phys,
1778 LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
1780 memset(&stat, 0, sizeof (struct ls_rjt));
1781 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
1782 stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
1783 lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
1785 if(!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
1786 if (ndlp->nlp_flag & NLP_NPR_ADISC) {
1787 ndlp->nlp_state = NLP_STE_ADISC_ISSUE;
1788 lpfc_nlp_list(phba, ndlp, NLP_ADISC_LIST);
1789 lpfc_issue_els_adisc(phba, ndlp, 0);
1791 ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
1792 lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
1793 lpfc_issue_els_plogi(phba, ndlp, 0);
1796 return (ndlp->nlp_state);
1800 lpfc_rcv_logo_npr_node(struct lpfc_hba * phba,
1801 struct lpfc_nodelist * ndlp, void *arg,
1804 struct lpfc_iocbq *cmdiocb;
1805 struct lpfc_dmabuf *pcmd;
1807 cmdiocb = (struct lpfc_iocbq *) arg;
1808 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
1810 pci_dma_sync_single_for_cpu(phba->pcidev, pcmd->phys,
1811 LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
1813 lpfc_rcv_logo(phba, ndlp, cmdiocb);
1814 return (ndlp->nlp_state);
1818 lpfc_rcv_padisc_npr_node(struct lpfc_hba * phba,
1819 struct lpfc_nodelist * ndlp, void *arg,
1822 struct lpfc_iocbq *cmdiocb;
1823 struct lpfc_dmabuf *pcmd;
1825 cmdiocb = (struct lpfc_iocbq *) arg;
1826 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
1828 pci_dma_sync_single_for_cpu(phba->pcidev, pcmd->phys,
1829 LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
1831 lpfc_rcv_padisc(phba, ndlp, cmdiocb);
1833 if(!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
1834 if (ndlp->nlp_flag & NLP_NPR_ADISC) {
1835 ndlp->nlp_state = NLP_STE_ADISC_ISSUE;
1836 lpfc_nlp_list(phba, ndlp, NLP_ADISC_LIST);
1837 lpfc_issue_els_adisc(phba, ndlp, 0);
1839 ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
1840 lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
1841 lpfc_issue_els_plogi(phba, ndlp, 0);
1844 return (ndlp->nlp_state);
1848 lpfc_rcv_prlo_npr_node(struct lpfc_hba * phba,
1849 struct lpfc_nodelist * ndlp, void *arg,
1852 struct lpfc_iocbq *cmdiocb;
1853 struct lpfc_dmabuf *pcmd;
1855 cmdiocb = (struct lpfc_iocbq *) arg;
1856 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
1858 pci_dma_sync_single_for_cpu(phba->pcidev, pcmd->phys,
1859 LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
1861 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
1863 if(ndlp->nlp_flag & NLP_DELAY_TMO) {
1864 if (ndlp->nlp_last_elscmd == (unsigned long)ELS_CMD_PLOGI) {
1865 return (ndlp->nlp_state);
1867 del_timer_sync(&ndlp->nlp_delayfunc);
1868 ndlp->nlp_flag &= ~NLP_DELAY_TMO;
1872 ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
1873 lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
1874 lpfc_issue_els_plogi(phba, ndlp, 0);
1875 return (ndlp->nlp_state);
1879 lpfc_cmpl_logo_npr_node(struct lpfc_hba * phba,
1880 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1882 lpfc_unreg_rpi(phba, ndlp);
1883 /* This routine does nothing, just return the current state */
1884 return (ndlp->nlp_state);
1888 lpfc_cmpl_reglogin_npr_node(struct lpfc_hba * phba,
1889 struct lpfc_nodelist * ndlp, void *arg,
1895 pmb = (LPFC_MBOXQ_t *) arg;
1899 if (ndlp->nlp_rpi != 0)
1900 lpfc_findnode_remove_rpi(phba, ndlp->nlp_rpi);
1902 ndlp->nlp_rpi = mb->un.varWords[0];
1903 lpfc_addnode_rpi(phba, ndlp, ndlp->nlp_rpi);
1905 return (ndlp->nlp_state);
1909 lpfc_device_rm_npr_node(struct lpfc_hba * phba,
1910 struct lpfc_nodelist * ndlp, void *arg,
1913 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
1914 return (NLP_STE_FREED_NODE);
1918 lpfc_device_recov_npr_node(struct lpfc_hba * phba,
1919 struct lpfc_nodelist * ndlp, void *arg,
1922 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
1923 return (ndlp->nlp_state);
1927 /* This next section defines the NPort Discovery State Machine */
1929 /* There are 4 different double linked lists nodelist entries can reside on.
1930 * The plogi list and adisc list are used when Link Up discovery or RSCN
1931 * processing is needed. Each list holds the nodes that we will send PLOGI
1932 * or ADISC on. These lists will keep track of what nodes will be effected
1933 * by an RSCN, or a Link Up (Typically, all nodes are effected on Link Up).
1934 * The unmapped_list will contain all nodes that we have successfully logged
1935 * into at the Fibre Channel level. The mapped_list will contain all nodes
1936 * that are mapped FCP targets.
1939 * The bind list is a list of undiscovered (potentially non-existent) nodes
1940 * that we have saved binding information on. This information is used when
1941 * nodes transition from the unmapped to the mapped list.
1943 /* For UNUSED_NODE state, the node has just been allocated .
1944 * For PLOGI_ISSUE and REG_LOGIN_ISSUE, the node is on
1945 * the PLOGI list. For REG_LOGIN_COMPL, the node is taken off the PLOGI list
1946 * and put on the unmapped list. For ADISC processing, the node is taken off
1947 * the ADISC list and placed on either the mapped or unmapped list (depending
1948 * on its previous state). Once on the unmapped list, a PRLI is issued and the
1949 * state changed to PRLI_ISSUE. When the PRLI completion occurs, the state is
1950 * changed to UNMAPPED_NODE. If the completion indicates a mapped
1951 * node, the node is taken off the unmapped list. The binding list is checked
1952 * for a valid binding, or a binding is automatically assigned. If binding
1953 * assignment is unsuccessful, the node is left on the unmapped list. If
1954 * binding assignment is successful, the associated binding list entry (if
1955 * any) is removed, and the node is placed on the mapped list.
1958 * For a Link Down, all nodes on the ADISC, PLOGI, unmapped or mapped
1959 * lists will receive a DEVICE_RECOVERY event. If the linkdown or nodev timers
1960 * expire, all effected nodes will receive a DEVICE_RM event.
1963 * For a Link Up or RSCN, all nodes will move from the mapped / unmapped lists
1964 * to either the ADISC or PLOGI list. After a Nameserver query or ALPA loopmap
1965 * check, additional nodes may be added or removed (via DEVICE_RM) to / from
1966 * the PLOGI or ADISC lists. Once the PLOGI and ADISC lists are populated,
1967 * we will first process the ADISC list. 32 entries are processed initially and
1968 * ADISC is initited for each one. Completions / Events for each node are
1969 * funnelled thru the state machine. As each node finishes ADISC processing, it
1970 * starts ADISC for any nodes waiting for ADISC processing. If no nodes are
1971 * waiting, and the ADISC list count is identically 0, then we are done. For
1972 * Link Up discovery, since all nodes on the PLOGI list are UNREG_LOGIN'ed, we
1973 * can issue a CLEAR_LA and reenable Link Events. Next we will process the PLOGI
1974 * list. 32 entries are processed initially and PLOGI is initited for each one.
1975 * Completions / Events for each node are funnelled thru the state machine. As
1976 * each node finishes PLOGI processing, it starts PLOGI for any nodes waiting
1977 * for PLOGI processing. If no nodes are waiting, and the PLOGI list count is
1978 * indentically 0, then we are done. We have now completed discovery / RSCN
1979 * handling. Upon completion, ALL nodes should be on either the mapped or
1983 static void *lpfc_disc_action[NLP_STE_MAX_STATE * NLP_EVT_MAX_EVENT] = {
1984 /* Action routine Event Current State */
1985 (void *)lpfc_rcv_plogi_unused_node, /* RCV_PLOGI UNUSED_NODE */
1986 (void *)lpfc_rcv_els_unused_node, /* RCV_PRLI */
1987 (void *)lpfc_rcv_logo_unused_node, /* RCV_LOGO */
1988 (void *)lpfc_rcv_els_unused_node, /* RCV_ADISC */
1989 (void *)lpfc_rcv_els_unused_node, /* RCV_PDISC */
1990 (void *)lpfc_rcv_els_unused_node, /* RCV_PRLO */
1991 (void *)lpfc_disc_illegal, /* CMPL_PLOGI */
1992 (void *)lpfc_disc_illegal, /* CMPL_PRLI */
1993 (void *)lpfc_cmpl_logo_unused_node, /* CMPL_LOGO */
1994 (void *)lpfc_disc_illegal, /* CMPL_ADISC */
1995 (void *)lpfc_disc_illegal, /* CMPL_REG_LOGIN */
1996 (void *)lpfc_device_rm_unused_node, /* DEVICE_RM */
1997 (void *)lpfc_disc_illegal, /* DEVICE_RECOVERY */
1999 (void *)lpfc_rcv_plogi_plogi_issue, /* RCV_PLOGI PLOGI_ISSUE */
2000 (void *)lpfc_rcv_els_plogi_issue, /* RCV_PRLI */
2001 (void *)lpfc_rcv_els_plogi_issue, /* RCV_LOGO */
2002 (void *)lpfc_rcv_els_plogi_issue, /* RCV_ADISC */
2003 (void *)lpfc_rcv_els_plogi_issue, /* RCV_PDISC */
2004 (void *)lpfc_rcv_els_plogi_issue, /* RCV_PRLO */
2005 (void *)lpfc_cmpl_plogi_plogi_issue, /* CMPL_PLOGI */
2006 (void *)lpfc_disc_illegal, /* CMPL_PRLI */
2007 (void *)lpfc_disc_illegal, /* CMPL_LOGO */
2008 (void *)lpfc_disc_illegal, /* CMPL_ADISC */
2009 (void *)lpfc_disc_illegal, /* CMPL_REG_LOGIN */
2010 (void *)lpfc_device_rm_plogi_issue, /* DEVICE_RM */
2011 (void *)lpfc_device_recov_plogi_issue, /* DEVICE_RECOVERY */
2013 (void *)lpfc_rcv_plogi_adisc_issue, /* RCV_PLOGI ADISC_ISSUE */
2014 (void *)lpfc_rcv_prli_adisc_issue, /* RCV_PRLI */
2015 (void *)lpfc_rcv_logo_adisc_issue, /* RCV_LOGO */
2016 (void *)lpfc_rcv_padisc_adisc_issue, /* RCV_ADISC */
2017 (void *)lpfc_rcv_padisc_adisc_issue, /* RCV_PDISC */
2018 (void *)lpfc_rcv_prlo_adisc_issue, /* RCV_PRLO */
2019 (void *)lpfc_disc_illegal, /* CMPL_PLOGI */
2020 (void *)lpfc_disc_illegal, /* CMPL_PRLI */
2021 (void *)lpfc_disc_illegal, /* CMPL_LOGO */
2022 (void *)lpfc_cmpl_adisc_adisc_issue, /* CMPL_ADISC */
2023 (void *)lpfc_disc_illegal, /* CMPL_REG_LOGIN */
2024 (void *)lpfc_device_rm_adisc_issue, /* DEVICE_RM */
2025 (void *)lpfc_device_recov_adisc_issue, /* DEVICE_RECOVERY */
2027 (void *)lpfc_rcv_plogi_reglogin_issue, /* RCV_PLOGI REG_LOGIN_ISSUE */
2028 (void *)lpfc_rcv_prli_reglogin_issue, /* RCV_PLOGI */
2029 (void *)lpfc_rcv_logo_reglogin_issue, /* RCV_LOGO */
2030 (void *)lpfc_rcv_padisc_reglogin_issue, /* RCV_ADISC */
2031 (void *)lpfc_rcv_padisc_reglogin_issue, /* RCV_PDISC */
2032 (void *)lpfc_rcv_prlo_reglogin_issue, /* RCV_PRLO */
2033 (void *)lpfc_disc_illegal, /* CMPL_PLOGI */
2034 (void *)lpfc_disc_illegal, /* CMPL_PRLI */
2035 (void *)lpfc_disc_illegal, /* CMPL_LOGO */
2036 (void *)lpfc_disc_illegal, /* CMPL_ADISC */
2037 (void *)lpfc_cmpl_reglogin_reglogin_issue,/* CMPL_REG_LOGIN */
2038 (void *)lpfc_device_rm_reglogin_issue, /* DEVICE_RM */
2039 (void *)lpfc_device_recov_reglogin_issue,/* DEVICE_RECOVERY */
2041 (void *)lpfc_rcv_plogi_prli_issue, /* RCV_PLOGI PRLI_ISSUE */
2042 (void *)lpfc_rcv_prli_prli_issue, /* RCV_PRLI */
2043 (void *)lpfc_rcv_logo_prli_issue, /* RCV_LOGO */
2044 (void *)lpfc_rcv_padisc_prli_issue, /* RCV_ADISC */
2045 (void *)lpfc_rcv_padisc_prli_issue, /* RCV_PDISC */
2046 (void *)lpfc_rcv_prlo_prli_issue, /* RCV_PRLO */
2047 (void *)lpfc_disc_illegal, /* CMPL_PLOGI */
2048 (void *)lpfc_cmpl_prli_prli_issue, /* CMPL_PRLI */
2049 (void *)lpfc_disc_illegal, /* CMPL_LOGO */
2050 (void *)lpfc_disc_illegal, /* CMPL_ADISC */
2051 (void *)lpfc_disc_illegal, /* CMPL_REG_LOGIN */
2052 (void *)lpfc_device_rm_prli_issue, /* DEVICE_RM */
2053 (void *)lpfc_device_recov_prli_issue, /* DEVICE_RECOVERY */
2055 (void *)lpfc_rcv_plogi_unmap_node, /* RCV_PLOGI UNMAPPED_NODE */
2056 (void *)lpfc_rcv_prli_unmap_node, /* RCV_PRLI */
2057 (void *)lpfc_rcv_logo_unmap_node, /* RCV_LOGO */
2058 (void *)lpfc_rcv_padisc_unmap_node, /* RCV_ADISC */
2059 (void *)lpfc_rcv_padisc_unmap_node, /* RCV_PDISC */
2060 (void *)lpfc_rcv_prlo_unmap_node, /* RCV_PRLO */
2061 (void *)lpfc_disc_illegal, /* CMPL_PLOGI */
2062 (void *)lpfc_disc_illegal, /* CMPL_PRLI */
2063 (void *)lpfc_disc_illegal, /* CMPL_LOGO */
2064 (void *)lpfc_disc_illegal, /* CMPL_ADISC */
2065 (void *)lpfc_disc_illegal, /* CMPL_REG_LOGIN */
2066 (void *)lpfc_disc_illegal, /* DEVICE_RM */
2067 (void *)lpfc_device_recov_unmap_node, /* DEVICE_RECOVERY */
2069 (void *)lpfc_rcv_plogi_mapped_node, /* RCV_PLOGI MAPPED_NODE */
2070 (void *)lpfc_rcv_prli_mapped_node, /* RCV_PRLI */
2071 (void *)lpfc_rcv_logo_mapped_node, /* RCV_LOGO */
2072 (void *)lpfc_rcv_padisc_mapped_node, /* RCV_ADISC */
2073 (void *)lpfc_rcv_padisc_mapped_node, /* RCV_PDISC */
2074 (void *)lpfc_rcv_prlo_mapped_node, /* RCV_PRLO */
2075 (void *)lpfc_disc_illegal, /* CMPL_PLOGI */
2076 (void *)lpfc_disc_illegal, /* CMPL_PRLI */
2077 (void *)lpfc_disc_illegal, /* CMPL_LOGO */
2078 (void *)lpfc_disc_illegal, /* CMPL_ADISC */
2079 (void *)lpfc_disc_illegal, /* CMPL_REG_LOGIN */
2080 (void *)lpfc_disc_illegal, /* DEVICE_RM */
2081 (void *)lpfc_device_recov_mapped_node, /* DEVICE_RECOVERY */
2083 (void *)lpfc_rcv_plogi_npr_node, /* RCV_PLOGI NPR_NODE */
2084 (void *)lpfc_rcv_prli_npr_node, /* RCV_PRLI */
2085 (void *)lpfc_rcv_logo_npr_node, /* RCV_LOGO */
2086 (void *)lpfc_rcv_padisc_npr_node, /* RCV_ADISC */
2087 (void *)lpfc_rcv_padisc_npr_node, /* RCV_PDISC */
2088 (void *)lpfc_rcv_prlo_npr_node, /* RCV_PRLO */
2089 (void *)lpfc_disc_noop, /* CMPL_PLOGI */
2090 (void *)lpfc_disc_noop, /* CMPL_PRLI */
2091 (void *)lpfc_cmpl_logo_npr_node, /* CMPL_LOGO */
2092 (void *)lpfc_disc_noop, /* CMPL_ADISC */
2093 (void *)lpfc_cmpl_reglogin_npr_node, /* CMPL_REG_LOGIN */
2094 (void *)lpfc_device_rm_npr_node, /* DEVICE_RM */
2095 (void *)lpfc_device_recov_npr_node, /* DEVICE_RECOVERY */
2099 lpfc_disc_state_machine(struct lpfc_hba * phba,
2100 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
2102 uint32_t cur_state, rc;
2103 uint32_t(*func) (struct lpfc_hba *, struct lpfc_nodelist *, void *,
2106 ndlp->nlp_disc_refcnt++;
2107 cur_state = ndlp->nlp_state;
2109 /* DSM in event <evt> on NPort <nlp_DID> in state <cur_state> */
2110 lpfc_printf_log(phba,
2113 "%d:0211 DSM in event x%x on NPort x%x in state %d "
2116 evt, ndlp->nlp_DID, cur_state, ndlp->nlp_flag);
2118 func = (uint32_t(*)(struct lpfc_hba *, struct lpfc_nodelist *, void *,
2120 lpfc_disc_action[(cur_state * NLP_EVT_MAX_EVENT) + evt];
2121 rc = (func) (phba, ndlp, arg, evt);
2123 /* DSM out state <rc> on NPort <nlp_DID> */
2124 lpfc_printf_log(phba,
2127 "%d:0212 DSM out state %d on NPort x%x Data: x%x\n",
2129 rc, ndlp->nlp_DID, ndlp->nlp_flag);
2131 ndlp->nlp_disc_refcnt--;
2133 /* Check to see if ndlp removal is deferred */
2134 if ((ndlp->nlp_disc_refcnt == 0)
2135 && (ndlp->nlp_flag & NLP_DELAY_REMOVE)) {
2137 ndlp->nlp_flag &= ~NLP_DELAY_REMOVE;
2138 lpfc_nlp_remove(phba, ndlp);
2139 return (NLP_STE_FREED_NODE);
2141 if (rc == NLP_STE_FREED_NODE)
2142 return (NLP_STE_FREED_NODE);
2143 ndlp->nlp_state = rc;