1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Enterprise Fibre Channel Host Bus Adapters. *
4 * Refer to the README file included with this package for *
5 * driver version and adapter support. *
6 * Copyright (C) 2004 Emulex Corporation. *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of the GNU General Public License *
11 * as published by the Free Software Foundation; either version 2 *
12 * of the License, or (at your option) any later version. *
14 * This program is distributed in the hope that it will be useful, *
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
17 * GNU General Public License for more details, a copy of which *
18 * can be found in the file COPYING included with this package. *
19 *******************************************************************/
22 * $Id: lpfc_els.c 1.152 2004/11/18 18:27:53EST sf_support Exp $
24 #include <linux/version.h>
25 #include <linux/blkdev.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/pci.h>
28 #include <linux/spinlock.h>
29 #include <scsi/scsi_device.h>
30 #include <scsi/scsi_host.h>
32 #include "lpfc_disc.h"
33 #include "lpfc_scsi.h"
35 #include "lpfc_crtn.h"
37 #include "lpfc_logmsg.h"
41 static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *,
43 static int lpfc_max_els_tries = 3;
46 lpfc_els_chk_latt(struct lpfc_hba * phba)
48 struct lpfc_sli *psli;
54 if ((phba->hba_state < LPFC_HBA_READY) &&
55 (phba->hba_state != LPFC_LINK_DOWN)) {
57 /* Read the HBA Host Attention Register */
58 ha_copy = readl(phba->HAregaddr);
60 if (ha_copy & HA_LATT) { /* Link Attention interrupt */
62 /* Pending Link Event during Discovery */
63 lpfc_printf_log(phba, KERN_WARNING, LOG_DISCOVERY,
64 "%d:0237 Pending Link Event during "
65 "Discovery: State x%x\n",
66 phba->brd_no, phba->hba_state);
68 /* CLEAR_LA should re-enable link attention events and
69 * we should then imediately take a LATT event. The
70 * LATT processing should call lpfc_linkdown() which
71 * will cleanup any left over in-progress discovery
74 phba->fc_flag |= FC_ABORT_DISCOVERY;
76 if (phba->hba_state != LPFC_CLEAR_LA) {
77 if ((mbox = mempool_alloc(phba->mbox_mem_pool,
79 phba->hba_state = LPFC_CLEAR_LA;
80 lpfc_clear_la(phba, mbox);
82 lpfc_mbx_cmpl_clear_la;
83 if (lpfc_sli_issue_mbox
85 (MBX_NOWAIT | MBX_STOP_IOCB))
86 == MBX_NOT_FINISHED) {
102 lpfc_prep_els_iocb(struct lpfc_hba * phba,
105 uint8_t retry, struct lpfc_nodelist * ndlp, uint32_t elscmd)
107 struct lpfc_sli *psli;
108 struct lpfc_sli_ring *pring;
109 struct lpfc_iocbq *elsiocb;
110 struct lpfc_dmabuf *pcmd, *prsp, *pbuflist;
111 struct ulp_bde64 *bpl;
116 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
118 if (phba->hba_state < LPFC_LINK_UP)
122 /* Allocate buffer for command iocb */
123 elsiocb = mempool_alloc(phba->iocb_mem_pool, GFP_ATOMIC);
127 memset(elsiocb, 0, sizeof (struct lpfc_iocbq));
128 icmd = &elsiocb->iocb;
130 /* fill in BDEs for command */
131 /* Allocate buffer for command payload */
132 if (((pcmd = kmalloc(sizeof (struct lpfc_dmabuf), GFP_ATOMIC)) == 0) ||
133 ((pcmd->virt = lpfc_mbuf_alloc(phba,
134 MEM_PRI, &(pcmd->phys))) == 0)) {
137 mempool_free( elsiocb, phba->iocb_mem_pool);
141 INIT_LIST_HEAD(&pcmd->list);
143 /* Allocate buffer for response payload */
145 prsp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_ATOMIC);
147 prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
149 if (prsp == 0 || prsp->virt == 0) {
152 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
154 mempool_free( elsiocb, phba->iocb_mem_pool);
157 INIT_LIST_HEAD(&prsp->list);
162 /* Allocate buffer for Buffer ptr list */
163 pbuflist = kmalloc(sizeof (struct lpfc_dmabuf), GFP_ATOMIC);
165 pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
167 if (pbuflist == 0 || pbuflist->virt == 0) {
168 mempool_free( elsiocb, phba->iocb_mem_pool);
169 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
170 lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
178 INIT_LIST_HEAD(&pbuflist->list);
180 icmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys);
181 icmd->un.elsreq64.bdl.addrLow = putPaddrLow(pbuflist->phys);
182 icmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BDL;
184 icmd->un.elsreq64.bdl.bdeSize = (2 * sizeof (struct ulp_bde64));
185 icmd->un.elsreq64.remoteID = ndlp->nlp_DID; /* DID */
186 icmd->ulpCommand = CMD_ELS_REQUEST64_CR;
188 icmd->un.elsreq64.bdl.bdeSize = sizeof (struct ulp_bde64);
189 icmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX;
192 /* NOTE: we don't use ulpIoTag0 because it is a t2 structure */
193 tag = lpfc_sli_next_iotag(phba, pring);
194 icmd->ulpIoTag = (uint16_t)(tag & 0xffff);
195 icmd->un.elsreq64.bdl.ulpIoTag32 = tag;
196 icmd->ulpBdeCount = 1;
198 icmd->ulpClass = CLASS3;
200 bpl = (struct ulp_bde64 *) pbuflist->virt;
201 bpl->addrLow = le32_to_cpu(putPaddrLow(pcmd->phys));
202 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pcmd->phys));
203 bpl->tus.f.bdeSize = cmdSize;
204 bpl->tus.f.bdeFlags = 0;
205 bpl->tus.w = le32_to_cpu(bpl->tus.w);
209 bpl->addrLow = le32_to_cpu(putPaddrLow(prsp->phys));
210 bpl->addrHigh = le32_to_cpu(putPaddrHigh(prsp->phys));
211 bpl->tus.f.bdeSize = FCELSSIZE;
212 bpl->tus.f.bdeFlags = BUFF_USE_RCV;
213 bpl->tus.w = le32_to_cpu(bpl->tus.w);
216 /* Save for completion so we can release these resources */
217 elsiocb->context1 = (uint8_t *) ndlp;
218 elsiocb->context2 = (uint8_t *) pcmd;
219 elsiocb->context3 = (uint8_t *) pbuflist;
220 elsiocb->retry = retry;
221 elsiocb->drvrTimeout = (phba->fc_ratov << 1) + LPFC_DRVR_TIMEOUT;
224 list_add(&prsp->list, &pcmd->list);
227 /* The els iocb is fully initialize. Flush it to main store for the
228 * HBA. Note that all els iocb context buffer are from the driver's
229 * dma pool and have length LPFC_BPL_SIZE. Get a short-hand pointer to
230 * the physical address.
232 pci_dma_sync_single_for_device(phba->pcidev, pbuflist->phys,
233 LPFC_BPL_SIZE, PCI_DMA_TODEVICE);
236 /* Xmit ELS command <elsCmd> to remote NPORT <did> */
237 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
238 "%d:0116 Xmit ELS command x%x to remote "
239 "NPORT x%x Data: x%x x%x\n",
240 phba->brd_no, elscmd,
241 ndlp->nlp_DID, icmd->ulpIoTag, phba->hba_state);
243 /* Xmit ELS response <elsCmd> to remote NPORT <did> */
244 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
245 "%d:0117 Xmit ELS response x%x to remote "
246 "NPORT x%x Data: x%x x%x\n",
247 phba->brd_no, elscmd,
248 ndlp->nlp_DID, icmd->ulpIoTag, cmdSize);
255 lpfc_cmpl_els_flogi(struct lpfc_hba * phba,
256 struct lpfc_iocbq * cmdiocb, struct lpfc_iocbq * rspiocb)
259 struct lpfc_dmabuf *pcmd, *prsp;
260 struct serv_parm *sp;
263 struct lpfc_sli *psli;
264 struct lpfc_nodelist *ndlp;
268 irsp = &(rspiocb->iocb);
269 ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
270 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
272 /* Check to see if link went down during discovery */
273 if (lpfc_els_chk_latt(phba)) {
274 lpfc_nlp_remove(phba, ndlp);
278 if (irsp->ulpStatus) {
279 /* Check for retry */
280 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
281 /* ELS command is being retried */
284 /* FLOGI failed, so there is no fabric */
285 phba->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
287 /* If private loop, then allow max outstandting els to be
288 * LPFC_MAX_DISC_THREADS (32). Scanning in the case of no
289 * alpa map would take too long otherwise.
291 if (phba->alpa_map[0] == 0) {
292 phba->cfg_discovery_threads =
293 LPFC_MAX_DISC_THREADS;
297 lpfc_printf_log(phba,
300 "%d:0100 FLOGI failure Data: x%x x%x\n",
302 irsp->ulpStatus, irsp->un.ulpWord[4]);
304 /* The FLogI succeeded. Sync the data for the CPU before
307 prsp = (struct lpfc_dmabuf *) pcmd->list.next;
308 lp = (uint32_t *) prsp->virt;
310 /* The HBA populated the response buffer. Flush cpu cache to
311 * before the driver touches this memory.
313 pci_dma_sync_single_for_cpu(phba->pcidev, prsp->phys,
314 LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
315 sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
317 /* FLOGI completes successfully */
318 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
319 "%d:0101 FLOGI completes sucessfully "
320 "Data: x%x x%x x%x x%x\n",
322 irsp->un.ulpWord[4], sp->cmn.e_d_tov,
323 sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution);
325 if (phba->hba_state == LPFC_FLOGI) {
326 /* If Common Service Parameters indicate Nport
327 * we are point to point, if Fport we are Fabric.
330 phba->fc_flag |= FC_FABRIC;
331 if (sp->cmn.edtovResolution) {
332 /* E_D_TOV ticks are in nanoseconds */
334 (be32_to_cpu(sp->cmn.e_d_tov) +
337 /* E_D_TOV ticks are in milliseconds */
339 be32_to_cpu(sp->cmn.e_d_tov);
342 (be32_to_cpu(sp->cmn.w2.r_a_tov) +
345 if (phba->fc_topology == TOPOLOGY_LOOP) {
346 phba->fc_flag |= FC_PUBLIC_LOOP;
348 /* If we are a N-port connected to a
349 * Fabric, fixup sparam's so logins to
350 * devices on remote loops work.
352 phba->fc_sparam.cmn.altBbCredit = 1;
355 phba->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
357 memcpy(&ndlp->nlp_portname, &sp->portName,
358 sizeof (struct lpfc_name));
359 memcpy(&ndlp->nlp_nodename, &sp->nodeName,
360 sizeof (struct lpfc_name));
361 memcpy(&phba->fc_fabparam, sp,
362 sizeof (struct serv_parm));
363 if ((mbox = mempool_alloc(phba->mbox_mem_pool,
367 phba->hba_state = LPFC_FABRIC_CFG_LINK;
368 lpfc_config_link(phba, mbox);
369 if (lpfc_sli_issue_mbox
370 (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB))
371 == MBX_NOT_FINISHED) {
372 mempool_free(mbox, phba->mbox_mem_pool);
376 if ((mbox = mempool_alloc(phba->mbox_mem_pool,
380 if (lpfc_reg_login(phba, Fabric_DID,
381 (uint8_t *) sp, mbox,
383 /* set_slim mailbox command needs to
384 * execute first, queue this command to
385 * be processed later.
388 lpfc_mbx_cmpl_fabric_reg_login;
389 mbox->context2 = ndlp;
390 if (lpfc_sli_issue_mbox
392 (MBX_NOWAIT | MBX_STOP_IOCB))
393 == MBX_NOT_FINISHED) {
395 phba->mbox_mem_pool);
399 mempool_free(mbox, phba->mbox_mem_pool);
403 /* We FLOGIed into an NPort, initiate pt2pt
405 phba->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
406 phba->fc_edtov = FF_DEF_EDTOV;
407 phba->fc_ratov = FF_DEF_RATOV;
408 rc = memcmp(&phba->fc_portname, &sp->portName,
409 sizeof(struct lpfc_name));
411 /* This side will initiate the PLOGI */
412 phba->fc_flag |= FC_PT2PT_PLOGI;
414 /* N_Port ID cannot be 0, set our to
415 * LocalID the other side will be
421 phba->fc_myDID = PT2PT_LocalID;
424 mempool_alloc(phba->mbox_mem_pool,
429 lpfc_config_link(phba, mbox);
430 if (lpfc_sli_issue_mbox
432 (MBX_NOWAIT | MBX_STOP_IOCB))
433 == MBX_NOT_FINISHED) {
435 phba->mbox_mem_pool);
438 mempool_free( ndlp, phba->nlp_mem_pool);
441 lpfc_findnode_did(phba,
445 /* Cannot find existing Fabric
446 ndlp, so allocate a new
454 lpfc_nlp_init(phba, ndlp,
457 memcpy(&ndlp->nlp_portname,
459 sizeof (struct lpfc_name));
460 memcpy(&ndlp->nlp_nodename,
462 sizeof (struct lpfc_name));
463 ndlp->nlp_state = NLP_STE_NPR_NODE;
464 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
465 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
468 /* This side will wait for the PLOGI */
469 mempool_free( ndlp, phba->nlp_mem_pool);
472 phba->fc_flag |= FC_PT2PT;
474 /* Start discovery - this should just do
476 lpfc_disc_start(phba);
483 lpfc_nlp_remove(phba, ndlp);
485 if((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
486 ((irsp->un.ulpWord[4] != IOERR_SLI_ABORTED) &&
487 (irsp->un.ulpWord[4] != IOERR_SLI_DOWN))) {
489 /* FLOGI failed, so just use loop map to make discovery list */
490 lpfc_disc_list_loopmap(phba);
492 /* Start discovery */
493 lpfc_disc_start(phba);
497 lpfc_els_free_iocb(phba, cmdiocb);
502 lpfc_issue_els_flogi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
505 struct serv_parm *sp;
507 struct lpfc_iocbq *elsiocb;
508 struct lpfc_sli_ring *pring;
509 struct lpfc_sli *psli;
510 struct lpfc_dmabuf *bmp;
516 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
518 cmdsize = (sizeof (uint32_t) + sizeof (struct serv_parm));
519 if ((elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry,
520 ndlp, ELS_CMD_FLOGI)) == 0) {
524 icmd = &elsiocb->iocb;
525 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
527 /* For FLOGI request, remainder of payload is service parameters */
528 *((uint32_t *) (pcmd)) = ELS_CMD_FLOGI;
529 pcmd += sizeof (uint32_t);
530 memcpy(pcmd, &phba->fc_sparam, sizeof (struct serv_parm));
531 sp = (struct serv_parm *) pcmd;
533 /* Setup CSPs accordingly for Fabric */
535 sp->cmn.w2.r_a_tov = 0;
536 sp->cls1.classValid = 0;
537 sp->cls2.seqDelivery = 1;
538 sp->cls3.seqDelivery = 1;
539 if (sp->cmn.fcphLow < FC_PH3)
540 sp->cmn.fcphLow = FC_PH3;
541 if (sp->cmn.fcphHigh < FC_PH3)
542 sp->cmn.fcphHigh = FC_PH3;
544 tmo = phba->fc_ratov;
545 phba->fc_ratov = LPFC_DISC_FLOGI_TMO;
546 lpfc_set_disctmo(phba);
547 phba->fc_ratov = tmo;
549 /* Flush the els buffer to main store for the HBA. This context always
550 * comes from the driver's dma pool and is always LPFC_BPL_SIZE.
552 bmp = (struct lpfc_dmabuf *) (elsiocb->context2);
553 pci_dma_sync_single_for_device(phba->pcidev, bmp->phys,
554 LPFC_BPL_SIZE, PCI_DMA_TODEVICE);
556 phba->fc_stat.elsXmitFLOGI++;
557 elsiocb->iocb_cmpl = lpfc_cmpl_els_flogi;
558 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
559 lpfc_els_free_iocb(phba, elsiocb);
566 lpfc_els_abort_flogi(struct lpfc_hba * phba)
568 struct lpfc_sli *psli;
569 struct lpfc_sli_ring *pring;
570 struct lpfc_iocbq *iocb, *next_iocb;
571 struct lpfc_nodelist *ndlp;
573 struct list_head *curr, *next;
575 /* Abort outstanding I/O on NPort <nlp_DID> */
576 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
577 "%d:0201 Abort outstanding I/O on NPort x%x\n",
578 phba->brd_no, Fabric_DID);
581 pring = &psli->ring[LPFC_ELS_RING];
583 /* check the txcmplq */
584 list_for_each_safe(curr, next, &pring->txcmplq) {
585 next_iocb = list_entry(curr, struct lpfc_iocbq, list);
587 /* Check to see if iocb matches the nport we are
590 if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR) {
591 ndlp = (struct lpfc_nodelist *)(iocb->context1);
592 if(ndlp && (ndlp->nlp_DID == Fabric_DID)) {
593 /* It matches, so deque and call compl
595 list_del(&iocb->list);
596 pring->txcmplq_cnt--;
598 if ((icmd->un.elsreq64.bdl.ulpIoTag32)) {
599 lpfc_sli_issue_abort_iotag32
602 if (iocb->iocb_cmpl) {
605 icmd->un.ulpWord[4] =
607 (iocb->iocb_cmpl) (phba, iocb, iocb);
609 mempool_free(iocb, phba->iocb_mem_pool);
618 lpfc_initial_flogi(struct lpfc_hba * phba)
620 struct lpfc_nodelist *ndlp;
622 /* First look for Fabric ndlp on the unmapped list */
625 lpfc_findnode_did(phba, NLP_SEARCH_UNMAPPED,
627 /* Cannot find existing Fabric ndlp, so allocate a new one */
628 if ((ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC))
632 lpfc_nlp_init(phba, ndlp, Fabric_DID);
635 phba->fc_unmap_cnt--;
636 list_del(&ndlp->nlp_listp);
637 ndlp->nlp_flag &= ~NLP_LIST_MASK;
639 if (lpfc_issue_els_flogi(phba, ndlp, 0)) {
640 mempool_free( ndlp, phba->nlp_mem_pool);
646 lpfc_more_plogi(struct lpfc_hba * phba)
650 if (phba->num_disc_nodes)
651 phba->num_disc_nodes--;
653 /* Continue discovery with <num_disc_nodes> PLOGIs to go */
654 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
655 "%d:0232 Continue discovery with %d PLOGIs to go "
656 "Data: x%x x%x x%x\n",
657 phba->brd_no, phba->num_disc_nodes, phba->fc_plogi_cnt,
658 phba->fc_flag, phba->hba_state);
660 /* Check to see if there are more PLOGIs to be sent */
661 if (phba->fc_flag & FC_NLP_MORE) {
662 /* go thru NPR list and issue any remaining ELS PLOGIs */
663 sentplogi = lpfc_els_disc_plogi(phba);
669 lpfc_cmpl_els_plogi(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
670 struct lpfc_iocbq * rspiocb)
673 struct lpfc_sli *psli;
674 struct lpfc_nodelist *ndlp;
675 int disc, rc, did, type;
679 /* we pass cmdiocb to state machine which needs rspiocb as well */
680 cmdiocb->context_un.rsp_iocb = rspiocb;
682 irsp = &rspiocb->iocb;
683 ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
684 ndlp->nlp_flag &= ~NLP_PLOGI_SND;
686 /* Since ndlp can be freed in the disc state machine, note if this node
687 * is being used during discovery.
689 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
692 /* PLOGI completes to NPort <nlp_DID> */
693 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
694 "%d:0102 PLOGI completes to NPort x%x "
695 "Data: x%x x%x x%x x%x\n",
696 phba->brd_no, ndlp->nlp_DID, irsp->ulpStatus,
697 irsp->un.ulpWord[4], disc, phba->num_disc_nodes);
699 /* Check to see if link went down during discovery */
700 if (lpfc_els_chk_latt(phba)) {
701 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
705 /* ndlp could be freed in DSM, save these values now */
706 type = ndlp->nlp_type;
709 if (irsp->ulpStatus) {
710 /* Check for retry */
711 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
712 /* ELS command is being retried */
714 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
720 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
721 if((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
722 ((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) ||
723 (irsp->un.ulpWord[4] == IOERR_SLI_DOWN))) {
724 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
727 rc = lpfc_disc_state_machine(phba, ndlp, cmdiocb,
731 /* Good status, call state machine */
732 rc = lpfc_disc_state_machine(phba, ndlp, cmdiocb,
736 if(type & NLP_FABRIC) {
737 /* If we cannot login to Nameserver, kick off discovery now */
738 if ((did == NameServer_DID) && (rc == NLP_STE_FREED_NODE)) {
739 lpfc_disc_start(phba);
744 if (disc && phba->num_disc_nodes) {
745 /* Check to see if there are more PLOGIs to be sent */
746 lpfc_more_plogi(phba);
749 if (rc != NLP_STE_FREED_NODE)
750 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
752 if (phba->num_disc_nodes == 0) {
753 lpfc_can_disctmo(phba);
754 if (phba->fc_flag & FC_RSCN_MODE) {
755 /* Check to see if more RSCNs came in while we were
756 * processing this one.
758 if ((phba->fc_rscn_id_cnt == 0) &&
759 (!(phba->fc_flag & FC_RSCN_DISCOVERY))) {
760 lpfc_els_flush_rscn(phba);
762 lpfc_els_handle_rscn(phba);
768 lpfc_els_free_iocb(phba, cmdiocb);
773 lpfc_issue_els_plogi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
776 struct serv_parm *sp;
778 struct lpfc_iocbq *elsiocb;
779 struct lpfc_sli_ring *pring;
780 struct lpfc_sli *psli;
781 struct lpfc_dmabuf *bmp;
786 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
788 cmdsize = (sizeof (uint32_t) + sizeof (struct serv_parm));
789 if ((elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry,
790 ndlp, ELS_CMD_PLOGI)) == 0) {
794 icmd = &elsiocb->iocb;
795 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
797 /* For PLOGI request, remainder of payload is service parameters */
798 *((uint32_t *) (pcmd)) = ELS_CMD_PLOGI;
799 pcmd += sizeof (uint32_t);
800 memcpy(pcmd, &phba->fc_sparam, sizeof (struct serv_parm));
801 sp = (struct serv_parm *) pcmd;
803 if (sp->cmn.fcphLow < FC_PH_4_3)
804 sp->cmn.fcphLow = FC_PH_4_3;
806 if (sp->cmn.fcphHigh < FC_PH3)
807 sp->cmn.fcphHigh = FC_PH3;
809 /* The lpfc iocb is fully initialize. Flush it to main store for the
810 * HBA. Note that all els iocb context buffer are from the driver's
811 * dma pool and have length LPFC_BPL_SIZE.
813 bmp = (struct lpfc_dmabuf *) (elsiocb->context2);
814 pci_dma_sync_single_for_device(phba->pcidev, bmp->phys,
815 LPFC_BPL_SIZE, PCI_DMA_TODEVICE);
817 phba->fc_stat.elsXmitPLOGI++;
818 elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi;
819 ndlp->nlp_flag |= NLP_PLOGI_SND;
820 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
821 ndlp->nlp_flag &= ~NLP_PLOGI_SND;
822 lpfc_els_free_iocb(phba, elsiocb);
829 lpfc_cmpl_els_prli(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
830 struct lpfc_iocbq * rspiocb)
833 struct lpfc_sli *psli;
834 struct lpfc_nodelist *ndlp;
837 /* we pass cmdiocb to state machine which needs rspiocb as well */
838 cmdiocb->context_un.rsp_iocb = rspiocb;
840 irsp = &(rspiocb->iocb);
841 ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
842 ndlp->nlp_flag &= ~NLP_PRLI_SND;
844 /* PRLI completes to NPort <nlp_DID> */
845 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
846 "%d:0103 PRLI completes to NPort x%x "
847 "Data: x%x x%x x%x\n",
848 phba->brd_no, ndlp->nlp_DID, irsp->ulpStatus,
849 irsp->un.ulpWord[4], phba->num_disc_nodes);
851 phba->fc_prli_sent--;
852 /* Check to see if link went down during discovery */
853 if (lpfc_els_chk_latt(phba))
856 if (irsp->ulpStatus) {
857 /* Check for retry */
858 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
859 /* ELS command is being retried */
863 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
864 if((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
865 ((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) ||
866 (irsp->un.ulpWord[4] == IOERR_SLI_DOWN))) {
870 lpfc_disc_state_machine(phba, ndlp, cmdiocb,
874 /* Good status, call state machine */
875 lpfc_disc_state_machine(phba, ndlp, cmdiocb, NLP_EVT_CMPL_PRLI);
879 lpfc_els_free_iocb(phba, cmdiocb);
884 lpfc_issue_els_prli(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
889 struct lpfc_iocbq *elsiocb;
890 struct lpfc_sli_ring *pring;
891 struct lpfc_sli *psli;
892 struct lpfc_dmabuf *bmp;
897 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
899 cmdsize = (sizeof (uint32_t) + sizeof (PRLI));
900 if ((elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry,
901 ndlp, ELS_CMD_PRLI)) == 0) {
905 icmd = &elsiocb->iocb;
906 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
908 /* For PRLI request, remainder of payload is service parameters */
909 memset(pcmd, 0, (sizeof (PRLI) + sizeof (uint32_t)));
910 *((uint32_t *) (pcmd)) = ELS_CMD_PRLI;
911 pcmd += sizeof (uint32_t);
913 /* For PRLI, remainder of payload is PRLI parameter page */
916 * If our firmware version is 3.20 or later,
917 * set the following bits for FC-TAPE support.
919 if (phba->vpd.rev.feaLevelHigh >= 0x02) {
920 npr->ConfmComplAllowed = 1;
922 npr->TaskRetryIdReq = 1;
924 npr->estabImagePair = 1;
925 npr->readXferRdyDis = 1;
927 /* For FCP support */
928 npr->prliType = PRLI_FCP_TYPE;
929 npr->initiatorFunc = 1;
931 /* The lpfc iocb is fully initialize. Flush it to main store for the
932 * HBA. Note that all els iocb context buffer are from the driver's
933 * dma pool and have length LPFC_BPL_SIZE.
935 bmp = (struct lpfc_dmabuf *) (elsiocb->context2);
936 pci_dma_sync_single_for_device(phba->pcidev, bmp->phys,
937 LPFC_BPL_SIZE, PCI_DMA_TODEVICE);
939 phba->fc_stat.elsXmitPRLI++;
940 elsiocb->iocb_cmpl = lpfc_cmpl_els_prli;
941 ndlp->nlp_flag |= NLP_PRLI_SND;
942 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
943 ndlp->nlp_flag &= ~NLP_PRLI_SND;
944 lpfc_els_free_iocb(phba, elsiocb);
947 phba->fc_prli_sent++;
952 lpfc_more_adisc(struct lpfc_hba * phba)
956 if (phba->num_disc_nodes)
957 phba->num_disc_nodes--;
959 /* Continue discovery with <num_disc_nodes> ADISCs to go */
960 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
961 "%d:0210 Continue discovery with %d ADISCs to go "
962 "Data: x%x x%x x%x\n",
963 phba->brd_no, phba->num_disc_nodes, phba->fc_adisc_cnt,
964 phba->fc_flag, phba->hba_state);
966 /* Check to see if there are more ADISCs to be sent */
967 if (phba->fc_flag & FC_NLP_MORE) {
968 lpfc_set_disctmo(phba);
970 /* go thru NPR list and issue any remaining ELS ADISCs */
971 sentadisc = lpfc_els_disc_adisc(phba);
977 lpfc_rscn_disc(struct lpfc_hba * phba)
980 /* go thru NPR list and issue ELS PLOGIs */
981 if (phba->fc_npr_cnt) {
982 lpfc_els_disc_plogi(phba);
985 if (phba->fc_flag & FC_RSCN_MODE) {
986 /* Check to see if more RSCNs came in while we were
987 * processing this one.
989 if ((phba->fc_rscn_id_cnt == 0) &&
990 (!(phba->fc_flag & FC_RSCN_DISCOVERY))) {
991 lpfc_els_flush_rscn(phba);
993 lpfc_els_handle_rscn(phba);
999 lpfc_cmpl_els_adisc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
1000 struct lpfc_iocbq * rspiocb)
1003 struct lpfc_sli *psli;
1004 struct lpfc_nodelist *ndlp;
1010 /* we pass cmdiocb to state machine which needs rspiocb as well */
1011 cmdiocb->context_un.rsp_iocb = rspiocb;
1013 irsp = &(rspiocb->iocb);
1014 ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
1015 ndlp->nlp_flag &= ~NLP_ADISC_SND;
1017 /* Since ndlp can be freed in the disc state machine, note if this node
1018 * is being used during discovery.
1020 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
1022 /* ADISC completes to NPort <nlp_DID> */
1023 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1024 "%d:0104 ADISC completes to NPort x%x "
1025 "Data: x%x x%x x%x x%x\n",
1026 phba->brd_no, ndlp->nlp_DID, irsp->ulpStatus,
1027 irsp->un.ulpWord[4], disc, phba->num_disc_nodes);
1029 /* Check to see if link went down during discovery */
1030 if (lpfc_els_chk_latt(phba)) {
1031 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1035 if (irsp->ulpStatus) {
1036 /* Check for retry */
1037 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
1038 /* ELS command is being retried */
1040 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1041 lpfc_set_disctmo(phba);
1046 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
1047 if((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
1048 ((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) ||
1049 (irsp->un.ulpWord[4] == IOERR_SLI_DOWN))) {
1050 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
1053 lpfc_disc_state_machine(phba, ndlp, cmdiocb,
1054 NLP_EVT_CMPL_ADISC);
1057 /* Good status, call state machine */
1058 lpfc_disc_state_machine(phba, ndlp, cmdiocb,
1059 NLP_EVT_CMPL_ADISC);
1062 if (disc && phba->num_disc_nodes) {
1063 /* Check to see if there are more ADISCs to be sent */
1064 lpfc_more_adisc(phba);
1066 /* Check to see if we are done with ADISC authentication */
1067 if (phba->num_disc_nodes == 0) {
1068 /* If we get here, there is nothing left to wait for */
1069 if ((phba->hba_state < LPFC_HBA_READY) &&
1070 (phba->hba_state != LPFC_CLEAR_LA)) {
1071 /* Link up discovery */
1072 if ((mbox = mempool_alloc(phba->mbox_mem_pool,
1074 phba->hba_state = LPFC_CLEAR_LA;
1075 lpfc_clear_la(phba, mbox);
1077 lpfc_mbx_cmpl_clear_la;
1078 if (lpfc_sli_issue_mbox
1080 (MBX_NOWAIT | MBX_STOP_IOCB))
1081 == MBX_NOT_FINISHED) {
1083 phba->mbox_mem_pool);
1084 lpfc_disc_flush_list(phba);
1085 psli->ring[(psli->ip_ring)].
1087 ~LPFC_STOP_IOCB_EVENT;
1088 psli->ring[(psli->fcp_ring)].
1090 ~LPFC_STOP_IOCB_EVENT;
1091 psli->ring[(psli->next_ring)].
1093 ~LPFC_STOP_IOCB_EVENT;
1099 lpfc_rscn_disc(phba);
1103 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
1105 lpfc_els_free_iocb(phba, cmdiocb);
1110 lpfc_issue_els_adisc(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
1115 struct lpfc_iocbq *elsiocb;
1116 struct lpfc_sli_ring *pring;
1117 struct lpfc_sli *psli;
1118 struct lpfc_dmabuf *bmp;
1123 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
1125 cmdsize = (sizeof (uint32_t) + sizeof (ADISC));
1126 if ((elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry,
1127 ndlp, ELS_CMD_ADISC)) == 0) {
1131 icmd = &elsiocb->iocb;
1132 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1134 /* For ADISC request, remainder of payload is service parameters */
1135 *((uint32_t *) (pcmd)) = ELS_CMD_ADISC;
1136 pcmd += sizeof (uint32_t);
1138 /* Fill in ADISC payload */
1139 ap = (ADISC *) pcmd;
1140 ap->hardAL_PA = phba->fc_pref_ALPA;
1141 memcpy(&ap->portName, &phba->fc_portname, sizeof (struct lpfc_name));
1142 memcpy(&ap->nodeName, &phba->fc_nodename, sizeof (struct lpfc_name));
1143 ap->DID = be32_to_cpu(phba->fc_myDID);
1145 /* The lpfc iocb is fully initialize. Flush it to main store for the
1146 * HBA. Note that all els iocb context buffer are from the driver's
1147 * dma pool and have length LPFC_BPL_SIZE. Get a short-hand pointer to
1148 * the physical address.
1150 bmp = (struct lpfc_dmabuf *) (elsiocb->context2);
1151 pci_dma_sync_single_for_device(phba->pcidev, bmp->phys,
1152 LPFC_BPL_SIZE, PCI_DMA_TODEVICE);
1154 phba->fc_stat.elsXmitADISC++;
1155 elsiocb->iocb_cmpl = lpfc_cmpl_els_adisc;
1156 ndlp->nlp_flag |= NLP_ADISC_SND;
1157 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
1158 ndlp->nlp_flag &= ~NLP_ADISC_SND;
1159 lpfc_els_free_iocb(phba, elsiocb);
1166 lpfc_cmpl_els_logo(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
1167 struct lpfc_iocbq * rspiocb)
1170 struct lpfc_sli *psli;
1171 struct lpfc_nodelist *ndlp;
1174 /* we pass cmdiocb to state machine which needs rspiocb as well */
1175 cmdiocb->context_un.rsp_iocb = rspiocb;
1177 irsp = &(rspiocb->iocb);
1178 ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
1179 ndlp->nlp_flag &= ~NLP_LOGO_SND;
1181 /* LOGO completes to NPort <nlp_DID> */
1182 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1183 "%d:0105 LOGO completes to NPort x%x "
1184 "Data: x%x x%x x%x\n",
1185 phba->brd_no, ndlp->nlp_DID, irsp->ulpStatus,
1186 irsp->un.ulpWord[4], phba->num_disc_nodes);
1188 /* Check to see if link went down during discovery */
1189 if (lpfc_els_chk_latt(phba))
1192 if (irsp->ulpStatus) {
1193 /* Check for retry */
1194 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
1195 /* ELS command is being retried */
1199 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */
1200 if((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
1201 ((irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) ||
1202 (irsp->un.ulpWord[4] == IOERR_SLI_DOWN))) {
1206 lpfc_disc_state_machine(phba, ndlp, cmdiocb,
1210 /* Good status, call state machine */
1211 lpfc_disc_state_machine(phba, ndlp, cmdiocb, NLP_EVT_CMPL_LOGO);
1213 if(ndlp->nlp_flag & NLP_DELAY_TMO) {
1214 lpfc_unreg_rpi(phba, ndlp);
1219 lpfc_els_free_iocb(phba, cmdiocb);
1224 lpfc_issue_els_logo(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
1228 struct lpfc_iocbq *elsiocb;
1229 struct lpfc_sli_ring *pring;
1230 struct lpfc_sli *psli;
1231 struct lpfc_dmabuf *bmp;
1236 pring = &psli->ring[LPFC_ELS_RING];
1238 cmdsize = 2 * (sizeof (uint32_t) + sizeof (struct lpfc_name));
1239 if ((elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry,
1240 ndlp, ELS_CMD_LOGO)) == 0) {
1244 icmd = &elsiocb->iocb;
1245 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1246 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
1247 pcmd += sizeof (uint32_t);
1249 /* Fill in LOGO payload */
1250 *((uint32_t *) (pcmd)) = be32_to_cpu(phba->fc_myDID);
1251 pcmd += sizeof (uint32_t);
1252 memcpy(pcmd, &phba->fc_portname, sizeof (struct lpfc_name));
1254 /* The els iocb is fully initialize. Flush it to main store for the
1255 * HBA. Note that all els iocb context buffer are from the driver's
1256 * dma pool and have length LPFC_BPL_SIZE. Get a short-hand pointer to
1257 * the physical address.
1259 bmp = (struct lpfc_dmabuf *) (elsiocb->context2);
1260 pci_dma_sync_single_for_device(phba->pcidev, bmp->phys,
1261 LPFC_BPL_SIZE, PCI_DMA_TODEVICE);
1263 phba->fc_stat.elsXmitLOGO++;
1264 elsiocb->iocb_cmpl = lpfc_cmpl_els_logo;
1265 ndlp->nlp_flag |= NLP_LOGO_SND;
1266 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
1267 ndlp->nlp_flag &= ~NLP_LOGO_SND;
1268 lpfc_els_free_iocb(phba, elsiocb);
1275 lpfc_cmpl_els_cmd(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
1276 struct lpfc_iocbq * rspiocb)
1280 irsp = &rspiocb->iocb;
1282 /* ELS cmd tag <ulpIoTag> completes */
1283 lpfc_printf_log(phba,
1286 "%d:0106 ELS cmd tag x%x completes Data: x%x x%x\n",
1288 irsp->ulpIoTag, irsp->ulpStatus, irsp->un.ulpWord[4]);
1290 /* Check to see if link went down during discovery */
1291 lpfc_els_chk_latt(phba);
1292 lpfc_els_free_iocb(phba, cmdiocb);
1297 lpfc_issue_els_scr(struct lpfc_hba * phba, uint32_t nportid, uint8_t retry)
1300 struct lpfc_iocbq *elsiocb;
1301 struct lpfc_sli_ring *pring;
1302 struct lpfc_sli *psli;
1303 struct lpfc_dmabuf *bmp;
1306 struct lpfc_nodelist *ndlp;
1309 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
1310 cmdsize = (sizeof (uint32_t) + sizeof (SCR));
1311 if ((ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC)) == 0) {
1315 lpfc_nlp_init(phba, ndlp, nportid);
1317 if ((elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry,
1318 ndlp, ELS_CMD_SCR)) == 0) {
1319 mempool_free( ndlp, phba->nlp_mem_pool);
1323 icmd = &elsiocb->iocb;
1324 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1326 *((uint32_t *) (pcmd)) = ELS_CMD_SCR;
1327 pcmd += sizeof (uint32_t);
1329 /* For SCR, remainder of payload is SCR parameter page */
1330 memset(pcmd, 0, sizeof (SCR));
1331 ((SCR *) pcmd)->Function = SCR_FUNC_FULL;
1333 /* The els iocb is fully initialize. Flush it to main store for the
1334 * HBA. Note that all els iocb context buffer are from the driver's
1335 * dma pool and have length LPFC_BPL_SIZE. Get a short-hand pointer to
1336 * the physical address.
1338 bmp = (struct lpfc_dmabuf *) (elsiocb->context2);
1339 pci_dma_sync_single_for_device(phba->pcidev, bmp->phys,
1340 LPFC_BPL_SIZE, PCI_DMA_TODEVICE);
1342 phba->fc_stat.elsXmitSCR++;
1343 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
1344 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
1345 mempool_free( ndlp, phba->nlp_mem_pool);
1346 lpfc_els_free_iocb(phba, elsiocb);
1349 mempool_free( ndlp, phba->nlp_mem_pool);
1354 lpfc_issue_els_farpr(struct lpfc_hba * phba, uint32_t nportid, uint8_t retry)
1357 struct lpfc_iocbq *elsiocb;
1358 struct lpfc_sli_ring *pring;
1359 struct lpfc_sli *psli;
1360 struct lpfc_dmabuf *bmp;
1365 struct lpfc_nodelist *ondlp;
1366 struct lpfc_nodelist *ndlp;
1369 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
1370 cmdsize = (sizeof (uint32_t) + sizeof (FARP));
1371 if ((ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC)) == 0) {
1374 lpfc_nlp_init(phba, ndlp, nportid);
1376 if ((elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry,
1377 ndlp, ELS_CMD_RNID)) == 0) {
1378 mempool_free( ndlp, phba->nlp_mem_pool);
1382 icmd = &elsiocb->iocb;
1383 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1385 *((uint32_t *) (pcmd)) = ELS_CMD_FARPR;
1386 pcmd += sizeof (uint32_t);
1388 /* Fill in FARPR payload */
1389 fp = (FARP *) (pcmd);
1390 memset(fp, 0, sizeof (FARP));
1391 lp = (uint32_t *) pcmd;
1392 *lp++ = be32_to_cpu(nportid);
1393 *lp++ = be32_to_cpu(phba->fc_myDID);
1395 fp->Mflags = (FARP_MATCH_PORT | FARP_MATCH_NODE);
1397 memcpy(&fp->RportName, &phba->fc_portname, sizeof (struct lpfc_name));
1398 memcpy(&fp->RnodeName, &phba->fc_nodename, sizeof (struct lpfc_name));
1399 if ((ondlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, nportid))) {
1400 memcpy(&fp->OportName, &ondlp->nlp_portname,
1401 sizeof (struct lpfc_name));
1402 memcpy(&fp->OnodeName, &ondlp->nlp_nodename,
1403 sizeof (struct lpfc_name));
1406 /* The els iocb is fully initialize. Flush it to main store for the
1407 * HBA. Note that all els iocb context buffer are from the driver's
1408 * dma pool and have length LPFC_BPL_SIZE. Get a short-hand pointer to
1409 * the physical address.
1411 bmp = (struct lpfc_dmabuf *) (elsiocb->context2);
1412 pci_dma_sync_single_for_device(phba->pcidev, bmp->phys,
1413 LPFC_BPL_SIZE, PCI_DMA_TODEVICE);
1415 phba->fc_stat.elsXmitFARPR++;
1416 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
1417 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
1418 mempool_free( ndlp, phba->nlp_mem_pool);
1419 lpfc_els_free_iocb(phba, elsiocb);
1422 mempool_free( ndlp, phba->nlp_mem_pool);
1427 lpfc_els_retry_delay(unsigned long ptr)
1429 struct lpfc_hba *phba;
1430 struct lpfc_nodelist *ndlp;
1434 unsigned long iflag;
1436 ndlp = (struct lpfc_nodelist *)ptr;
1437 phba = ndlp->nlp_phba;
1438 spin_lock_irqsave(phba->host->host_lock, iflag);
1439 did = (uint32_t) (ndlp->nlp_DID);
1440 cmd = (uint32_t) (ndlp->nlp_last_elscmd);
1442 ndlp->nlp_flag &= ~NLP_DELAY_TMO;
1443 retry = ndlp->nlp_retry;
1447 lpfc_issue_els_flogi(phba, ndlp, retry);
1450 ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
1451 lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
1452 lpfc_issue_els_plogi(phba, ndlp, retry);
1455 ndlp->nlp_state = NLP_STE_ADISC_ISSUE;
1456 lpfc_nlp_list(phba, ndlp, NLP_ADISC_LIST);
1457 lpfc_issue_els_adisc(phba, ndlp, retry);
1460 ndlp->nlp_state = NLP_STE_PRLI_ISSUE;
1461 lpfc_nlp_list(phba, ndlp, NLP_PRLI_LIST);
1462 lpfc_issue_els_prli(phba, ndlp, retry);
1465 ndlp->nlp_state = NLP_STE_NPR_NODE;
1466 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
1467 lpfc_issue_els_logo(phba, ndlp, retry);
1470 spin_unlock_irqrestore(phba->host->host_lock, iflag);
1475 lpfc_els_retry(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
1476 struct lpfc_iocbq * rspiocb)
1479 struct lpfc_dmabuf *pcmd;
1480 struct lpfc_nodelist *ndlp;
1483 int retry, maxretry;
1489 maxretry = lpfc_max_els_tries;
1490 irsp = &rspiocb->iocb;
1491 ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
1492 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
1494 /* Note: context2 may be 0 for internal driver abort
1495 * of delays ELS command.
1498 if (pcmd && pcmd->virt) {
1499 elscmd = (uint32_t *) (pcmd->virt);
1503 switch (irsp->ulpStatus) {
1504 case IOSTAT_FCP_RSP_ERROR:
1505 case IOSTAT_REMOTE_STOP:
1508 case IOSTAT_LOCAL_REJECT:
1509 switch ((irsp->un.ulpWord[4] & 0xff)) {
1510 case IOERR_LOOP_OPEN_FAILURE:
1511 if (cmd == ELS_CMD_PLOGI) {
1512 if (cmdiocb->retry == 0) {
1519 case IOERR_SEQUENCE_TIMEOUT:
1521 if ((cmd == ELS_CMD_FLOGI)
1522 && (phba->fc_topology != TOPOLOGY_LOOP)) {
1528 case IOERR_NO_RESOURCES:
1529 if (cmd == ELS_CMD_PLOGI) {
1535 case IOERR_INVALID_RPI:
1541 case IOSTAT_NPORT_RJT:
1542 case IOSTAT_FABRIC_RJT:
1543 if (irsp->un.ulpWord[4] & RJT_UNAVAIL_TEMP) {
1549 case IOSTAT_NPORT_BSY:
1550 case IOSTAT_FABRIC_BSY:
1555 stat.un.lsRjtError = be32_to_cpu(irsp->un.ulpWord[4]);
1556 /* Added for Vendor specifc support
1557 * Just keep retrying for these Rsn / Exp codes
1559 switch (stat.un.b.lsRjtRsnCode) {
1560 case LSRJT_UNABLE_TPC:
1561 if (stat.un.b.lsRjtRsnCodeExp ==
1562 LSEXP_CMD_IN_PROGRESS) {
1563 if (cmd == ELS_CMD_PLOGI) {
1570 if (cmd == ELS_CMD_PLOGI) {
1572 maxretry = lpfc_max_els_tries + 1;
1578 case LSRJT_LOGICAL_BSY:
1579 if (cmd == ELS_CMD_PLOGI) {
1588 case IOSTAT_INTERMED_RSP:
1596 if (ndlp->nlp_DID == FDMI_DID) {
1600 if ((++cmdiocb->retry) >= maxretry) {
1601 phba->fc_stat.elsRetryExceeded++;
1607 /* Retry ELS command <elsCmd> to remote NPORT <did> */
1608 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1609 "%d:0107 Retry ELS command x%x to remote "
1610 "NPORT x%x Data: x%x x%x\n",
1612 cmd, ndlp->nlp_DID, cmdiocb->retry, delay);
1614 if ((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) {
1615 /* If discovery / RSCN timer is running, reset it */
1616 if (timer_pending(&phba->fc_disctmo) ||
1617 (phba->fc_flag & FC_RSCN_MODE)) {
1618 lpfc_set_disctmo(phba);
1622 phba->fc_stat.elsXmitRetry++;
1624 phba->fc_stat.elsDelayRetry++;
1625 ndlp->nlp_retry = cmdiocb->retry;
1627 mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
1628 ndlp->nlp_flag |= NLP_DELAY_TMO;
1630 ndlp->nlp_state = NLP_STE_NPR_NODE;
1631 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
1632 ndlp->nlp_last_elscmd = cmd;
1638 lpfc_issue_els_flogi(phba, ndlp, cmdiocb->retry);
1641 ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
1642 lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
1643 lpfc_issue_els_plogi(phba, ndlp, cmdiocb->retry);
1646 ndlp->nlp_state = NLP_STE_ADISC_ISSUE;
1647 lpfc_nlp_list(phba, ndlp, NLP_ADISC_LIST);
1648 lpfc_issue_els_adisc(phba, ndlp, cmdiocb->retry);
1651 ndlp->nlp_state = NLP_STE_PRLI_ISSUE;
1652 lpfc_nlp_list(phba, ndlp, NLP_PRLI_LIST);
1653 lpfc_issue_els_prli(phba, ndlp, cmdiocb->retry);
1656 ndlp->nlp_state = NLP_STE_NPR_NODE;
1657 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
1658 lpfc_issue_els_logo(phba, ndlp, cmdiocb->retry);
1663 /* No retry ELS command <elsCmd> to remote NPORT <did> */
1664 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1665 "%d:0108 No retry ELS command x%x to remote NPORT x%x "
1668 cmd, ndlp->nlp_DID, cmdiocb->retry, ndlp->nlp_flag);
1674 lpfc_els_free_iocb(struct lpfc_hba * phba, struct lpfc_iocbq * elsiocb)
1676 struct lpfc_dmabuf *buf_ptr, *buf_ptr1;
1678 /* context2 = cmd, context2->next = rsp, context3 = bpl */
1679 if (elsiocb->context2) {
1680 buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2;
1681 /* Free the response before processing the command. */
1682 if (!list_empty(&buf_ptr1->list)) {
1683 buf_ptr = list_entry(buf_ptr1->list.next,
1684 struct lpfc_dmabuf, list);
1685 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
1688 lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys);
1692 if (elsiocb->context3) {
1693 buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3;
1694 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
1698 mempool_free( elsiocb, phba->iocb_mem_pool);
1703 lpfc_cmpl_els_logo_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
1704 struct lpfc_iocbq * rspiocb)
1706 struct lpfc_nodelist *ndlp;
1708 ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
1710 /* ACC to LOGO completes to NPort <nlp_DID> */
1711 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1712 "%d:0109 ACC to LOGO completes to NPort x%x "
1713 "Data: x%x x%x x%x\n",
1714 phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag,
1715 ndlp->nlp_state, ndlp->nlp_rpi);
1717 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
1719 switch (ndlp->nlp_state) {
1720 case NLP_STE_UNUSED_NODE: /* node is just allocated */
1721 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
1723 case NLP_STE_NPR_NODE: /* NPort Recovery mode */
1724 lpfc_unreg_rpi(phba, ndlp);
1729 lpfc_els_free_iocb(phba, cmdiocb);
1734 lpfc_cmpl_els_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
1735 struct lpfc_iocbq * rspiocb)
1737 struct lpfc_nodelist *ndlp;
1738 LPFC_MBOXQ_t *mbox = NULL;
1740 ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
1741 if (cmdiocb->context_un.mbox)
1742 mbox = cmdiocb->context_un.mbox;
1745 /* Check to see if link went down during discovery */
1746 if ((lpfc_els_chk_latt(phba)) || !ndlp) {
1748 mempool_free( mbox, phba->mbox_mem_pool);
1753 /* ELS response tag <ulpIoTag> completes */
1754 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1755 "%d:0110 ELS response tag x%x completes "
1756 "Data: x%x x%x x%x x%x x%x x%x\n",
1758 cmdiocb->iocb.ulpIoTag, rspiocb->iocb.ulpStatus,
1759 rspiocb->iocb.un.ulpWord[4], ndlp->nlp_DID,
1760 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
1763 if ((rspiocb->iocb.ulpStatus == 0)
1764 && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) {
1765 /* set_slim mailbox command needs to execute first,
1766 * queue this command to be processed later.
1768 lpfc_unreg_rpi(phba, ndlp);
1769 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
1770 mbox->context2 = ndlp;
1771 ndlp->nlp_state = NLP_STE_REG_LOGIN_ISSUE;
1772 lpfc_nlp_list(phba, ndlp, NLP_REGLOGIN_LIST);
1773 if (lpfc_sli_issue_mbox(phba, mbox,
1774 (MBX_NOWAIT | MBX_STOP_IOCB))
1775 != MBX_NOT_FINISHED) {
1778 /* NOTE: we should have messages for unsuccessful
1780 mempool_free( mbox, phba->mbox_mem_pool);
1782 mempool_free( mbox, phba->mbox_mem_pool);
1783 if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
1784 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
1790 ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN;
1791 lpfc_els_free_iocb(phba, cmdiocb);
1796 lpfc_els_rsp_acc(struct lpfc_hba * phba, uint32_t flag,
1797 struct lpfc_iocbq * oldiocb, struct lpfc_nodelist * ndlp,
1798 LPFC_MBOXQ_t * mbox, uint8_t newnode)
1802 struct lpfc_iocbq *elsiocb;
1803 struct lpfc_sli_ring *pring;
1804 struct lpfc_sli *psli;
1805 struct lpfc_dmabuf *bmp;
1810 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
1811 oldcmd = &oldiocb->iocb;
1815 cmdsize = sizeof (uint32_t);
1817 lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry,
1818 ndlp, ELS_CMD_ACC)) == 0) {
1821 icmd = &elsiocb->iocb;
1822 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
1823 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1824 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
1825 pcmd += sizeof (uint32_t);
1828 cmdsize = (sizeof (struct serv_parm) + sizeof (uint32_t));
1830 lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry,
1831 ndlp, ELS_CMD_ACC)) == 0) {
1834 icmd = &elsiocb->iocb;
1835 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
1836 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1839 elsiocb->context_un.mbox = mbox;
1841 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
1842 pcmd += sizeof (uint32_t);
1843 memcpy(pcmd, &phba->fc_sparam, sizeof (struct serv_parm));
1850 elsiocb->context1 = NULL;
1852 /* Xmit ELS ACC response tag <ulpIoTag> */
1853 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1854 "%d:0128 Xmit ELS ACC response tag x%x "
1855 "Data: x%x x%x x%x x%x x%x\n",
1857 elsiocb->iocb.ulpIoTag,
1858 elsiocb->iocb.ulpContext, ndlp->nlp_DID,
1859 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
1861 bmp = (struct lpfc_dmabuf *) (elsiocb->context2);
1862 pci_dma_sync_single_for_device(phba->pcidev, bmp->phys,
1863 LPFC_BPL_SIZE, PCI_DMA_TODEVICE);
1865 if (ndlp->nlp_flag & NLP_LOGO_ACC) {
1866 elsiocb->iocb_cmpl = lpfc_cmpl_els_logo_acc;
1868 elsiocb->iocb_cmpl = lpfc_cmpl_els_acc;
1871 phba->fc_stat.elsXmitACC++;
1872 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
1873 lpfc_els_free_iocb(phba, elsiocb);
1880 lpfc_els_rsp_reject(struct lpfc_hba * phba, uint32_t rejectError,
1881 struct lpfc_iocbq * oldiocb, struct lpfc_nodelist * ndlp)
1885 struct lpfc_iocbq *elsiocb;
1886 struct lpfc_sli_ring *pring;
1887 struct lpfc_sli *psli;
1888 struct lpfc_dmabuf *bmp;
1893 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
1895 cmdsize = 2 * sizeof (uint32_t);
1896 if ((elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry,
1897 ndlp, ELS_CMD_LS_RJT)) == 0) {
1901 icmd = &elsiocb->iocb;
1902 oldcmd = &oldiocb->iocb;
1903 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
1904 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1906 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT;
1907 pcmd += sizeof (uint32_t);
1908 *((uint32_t *) (pcmd)) = rejectError;
1910 /* The els iocb is fully initialize. Flush it to main store for the
1911 * HBA. Note that all els iocb context buffer are from the driver's
1912 * dma pool and have length LPFC_BPL_SIZE. Get a short-hand pointer to
1913 * the physical address.
1915 bmp = (struct lpfc_dmabuf *) (elsiocb->context2);
1916 pci_dma_sync_single_for_device(phba->pcidev, bmp->phys,
1917 LPFC_BPL_SIZE, PCI_DMA_TODEVICE);
1919 /* Xmit ELS RJT <err> response tag <ulpIoTag> */
1920 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1921 "%d:0129 Xmit ELS RJT x%x response tag x%x "
1922 "Data: x%x x%x x%x x%x x%x\n",
1924 rejectError, elsiocb->iocb.ulpIoTag,
1925 elsiocb->iocb.ulpContext, ndlp->nlp_DID,
1926 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
1928 phba->fc_stat.elsXmitLSRJT++;
1929 elsiocb->iocb_cmpl = lpfc_cmpl_els_acc;
1931 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
1932 lpfc_els_free_iocb(phba, elsiocb);
1939 lpfc_els_rsp_adisc_acc(struct lpfc_hba * phba,
1940 struct lpfc_iocbq * oldiocb, struct lpfc_nodelist * ndlp)
1945 struct lpfc_iocbq *elsiocb;
1946 struct lpfc_sli_ring *pring;
1947 struct lpfc_sli *psli;
1948 struct lpfc_dmabuf *bmp;
1953 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
1955 cmdsize = sizeof (uint32_t) + sizeof (ADISC);
1956 if ((elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry,
1957 ndlp, ELS_CMD_ACC)) == 0) {
1961 /* Xmit ADISC ACC response tag <ulpIoTag> */
1962 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1963 "%d:0130 Xmit ADISC ACC response tag x%x "
1964 "Data: x%x x%x x%x x%x x%x\n",
1966 elsiocb->iocb.ulpIoTag,
1967 elsiocb->iocb.ulpContext, ndlp->nlp_DID,
1968 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
1970 icmd = &elsiocb->iocb;
1971 oldcmd = &oldiocb->iocb;
1972 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
1973 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1975 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
1976 pcmd += sizeof (uint32_t);
1978 ap = (ADISC *) (pcmd);
1979 ap->hardAL_PA = phba->fc_pref_ALPA;
1980 memcpy(&ap->portName, &phba->fc_portname, sizeof (struct lpfc_name));
1981 memcpy(&ap->nodeName, &phba->fc_nodename, sizeof (struct lpfc_name));
1982 ap->DID = be32_to_cpu(phba->fc_myDID);
1984 /* The els iocb is fully initialize. Flush it to main store for the
1985 * HBA. Note that all els iocb context buffer are from the driver's
1986 * dma pool and have length LPFC_BPL_SIZE. Get a short-hand pointer to
1987 * the physical address.
1989 bmp = (struct lpfc_dmabuf *) (elsiocb->context2);
1990 pci_dma_sync_single_for_device(phba->pcidev, bmp->phys,
1991 LPFC_BPL_SIZE, PCI_DMA_TODEVICE);
1993 phba->fc_stat.elsXmitACC++;
1994 elsiocb->iocb_cmpl = lpfc_cmpl_els_acc;
1996 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
1997 lpfc_els_free_iocb(phba, elsiocb);
2004 lpfc_els_rsp_prli_acc(struct lpfc_hba * phba,
2005 struct lpfc_iocbq * oldiocb, struct lpfc_nodelist * ndlp)
2011 struct lpfc_iocbq *elsiocb;
2012 struct lpfc_sli_ring *pring;
2013 struct lpfc_sli *psli;
2014 struct lpfc_dmabuf *bmp;
2019 pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */
2021 cmdsize = sizeof (uint32_t) + sizeof (PRLI);
2022 if ((elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry,
2025 (ELS_CMD_PRLI & ~ELS_RSP_MASK)))) ==
2030 /* Xmit PRLI ACC response tag <ulpIoTag> */
2031 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
2032 "%d:0131 Xmit PRLI ACC response tag x%x "
2033 "Data: x%x x%x x%x x%x x%x\n",
2035 elsiocb->iocb.ulpIoTag,
2036 elsiocb->iocb.ulpContext, ndlp->nlp_DID,
2037 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
2039 icmd = &elsiocb->iocb;
2040 oldcmd = &oldiocb->iocb;
2041 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
2042 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2044 *((uint32_t *) (pcmd)) = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK));
2045 pcmd += sizeof (uint32_t);
2047 /* For PRLI, remainder of payload is PRLI parameter page */
2048 memset(pcmd, 0, sizeof (PRLI));
2050 npr = (PRLI *) pcmd;
2053 * If our firmware version is 3.20 or later,
2054 * set the following bits for FC-TAPE support.
2056 if (vpd->rev.feaLevelHigh >= 0x02) {
2057 npr->ConfmComplAllowed = 1;
2059 npr->TaskRetryIdReq = 1;
2062 npr->acceptRspCode = PRLI_REQ_EXECUTED;
2063 npr->estabImagePair = 1;
2064 npr->readXferRdyDis = 1;
2065 npr->ConfmComplAllowed = 1;
2067 npr->prliType = PRLI_FCP_TYPE;
2068 npr->initiatorFunc = 1;
2070 /* The els iocb is fully initialize. Flush it to main store for the
2071 * HBA. Note that all els iocb context buffer are from the driver's
2072 * dma pool and have length LPFC_BPL_SIZE. Get a short-hand pointer to
2073 * the physical address.
2075 bmp = (struct lpfc_dmabuf *) (elsiocb->context2);
2076 pci_dma_sync_single_for_device(phba->pcidev, bmp->phys,
2077 LPFC_BPL_SIZE, PCI_DMA_TODEVICE);
2079 phba->fc_stat.elsXmitACC++;
2080 elsiocb->iocb_cmpl = lpfc_cmpl_els_acc;
2082 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
2083 lpfc_els_free_iocb(phba, elsiocb);
2090 lpfc_els_rsp_rnid_acc(struct lpfc_hba * phba,
2092 struct lpfc_iocbq * oldiocb, struct lpfc_nodelist * ndlp)
2097 struct lpfc_iocbq *elsiocb;
2098 struct lpfc_sli_ring *pring;
2099 struct lpfc_sli *psli;
2100 struct lpfc_dmabuf *bmp;
2105 pring = &psli->ring[LPFC_ELS_RING];
2107 cmdsize = sizeof (uint32_t) + sizeof (uint32_t)
2108 + (2 * sizeof (struct lpfc_name));
2110 cmdsize += sizeof (RNID_TOP_DISC);
2112 if ((elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry,
2113 ndlp, ELS_CMD_ACC)) == 0) {
2117 /* Xmit RNID ACC response tag <ulpIoTag> */
2118 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
2119 "%d:0132 Xmit RNID ACC response tag x%x "
2122 elsiocb->iocb.ulpIoTag,
2123 elsiocb->iocb.ulpContext);
2125 icmd = &elsiocb->iocb;
2126 oldcmd = &oldiocb->iocb;
2127 icmd->ulpContext = oldcmd->ulpContext; /* Xri */
2128 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2130 *((uint32_t *) (pcmd)) = ELS_CMD_ACC;
2131 pcmd += sizeof (uint32_t);
2133 memset(pcmd, 0, sizeof (RNID));
2134 rn = (RNID *) (pcmd);
2135 rn->Format = format;
2136 rn->CommonLen = (2 * sizeof (struct lpfc_name));
2137 memcpy(&rn->portName, &phba->fc_portname, sizeof (struct lpfc_name));
2138 memcpy(&rn->nodeName, &phba->fc_nodename, sizeof (struct lpfc_name));
2141 rn->SpecificLen = 0;
2143 case RNID_TOPOLOGY_DISC:
2144 rn->SpecificLen = sizeof (RNID_TOP_DISC);
2145 memcpy(&rn->un.topologyDisc.portName,
2146 &phba->fc_portname, sizeof (struct lpfc_name));
2147 rn->un.topologyDisc.unitType = RNID_HBA;
2148 rn->un.topologyDisc.physPort = 0;
2149 rn->un.topologyDisc.attachedNodes = 0;
2153 rn->SpecificLen = 0;
2157 /* The els iocb is fully initialize. Flush it to main store for the
2158 * HBA. Note that all els iocb context buffer are from the driver's
2159 * dma pool and have length LPFC_BPL_SIZE. Get a short-hand pointer to
2160 * the physical address.
2162 bmp = (struct lpfc_dmabuf *) (elsiocb->context2);
2163 pci_dma_sync_single_for_device(phba->pcidev, bmp->phys,
2164 LPFC_BPL_SIZE, PCI_DMA_TODEVICE);
2166 phba->fc_stat.elsXmitACC++;
2167 elsiocb->iocb_cmpl = lpfc_cmpl_els_acc;
2168 elsiocb->context1 = NULL; /* Don't need ndlp for cmpl,
2169 * it could be freed */
2171 if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
2172 lpfc_els_free_iocb(phba, elsiocb);
2179 lpfc_els_disc_adisc(struct lpfc_hba * phba)
2182 struct lpfc_nodelist *ndlp, *next_ndlp;
2185 /* go thru NPR list and issue any remaining ELS ADISCs */
2186 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_npr_list,
2188 if(ndlp->nlp_flag & NLP_NPR_2B_DISC) {
2189 if(ndlp->nlp_flag & NLP_NPR_ADISC) {
2190 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2191 ndlp->nlp_state = NLP_STE_ADISC_ISSUE;
2192 lpfc_nlp_list(phba, ndlp,
2194 lpfc_issue_els_adisc(phba, ndlp, 0);
2196 phba->num_disc_nodes++;
2197 if (phba->num_disc_nodes >=
2198 phba->cfg_discovery_threads) {
2199 phba->fc_flag |= FC_NLP_MORE;
2205 if (sentadisc == 0) {
2206 phba->fc_flag &= ~FC_NLP_MORE;
2212 lpfc_els_disc_plogi(struct lpfc_hba * phba)
2215 struct lpfc_nodelist *ndlp, *next_ndlp;
2218 /* go thru NPR list and issue any remaining ELS PLOGIs */
2219 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_npr_list,
2221 if((ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
2222 (!(ndlp->nlp_flag & NLP_DELAY_TMO))) {
2223 if(!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
2224 ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
2225 lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
2226 lpfc_issue_els_plogi(phba, ndlp, 0);
2228 phba->num_disc_nodes++;
2229 if (phba->num_disc_nodes >=
2230 phba->cfg_discovery_threads) {
2231 phba->fc_flag |= FC_NLP_MORE;
2237 if (sentplogi == 0) {
2238 phba->fc_flag &= ~FC_NLP_MORE;
2244 lpfc_els_flush_rscn(struct lpfc_hba * phba)
2246 struct lpfc_dmabuf *mp;
2249 for (i = 0; i < phba->fc_rscn_id_cnt; i++) {
2250 mp = phba->fc_rscn_id_list[i];
2251 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2253 phba->fc_rscn_id_list[i] = NULL;
2255 phba->fc_rscn_id_cnt = 0;
2256 phba->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY);
2257 lpfc_can_disctmo(phba);
2262 lpfc_rscn_payload_check(struct lpfc_hba * phba, uint32_t did)
2266 struct lpfc_dmabuf *mp;
2268 uint32_t payload_len, cmd, i, match;
2270 ns_did.un.word = did;
2273 /* If we are doing a FULL RSCN rediscovery, match everything */
2274 if (phba->fc_flag & FC_RSCN_DISCOVERY) {
2278 for (i = 0; i < phba->fc_rscn_id_cnt; i++) {
2279 mp = phba->fc_rscn_id_list[i];
2280 lp = (uint32_t *) mp->virt;
2282 payload_len = be32_to_cpu(cmd) & 0xffff; /* payload length */
2283 payload_len -= sizeof (uint32_t); /* take off word 0 */
2284 while (payload_len) {
2285 rscn_did.un.word = *lp++;
2286 rscn_did.un.word = be32_to_cpu(rscn_did.un.word);
2287 payload_len -= sizeof (uint32_t);
2288 switch (rscn_did.un.b.resv) {
2289 case 0: /* Single N_Port ID effected */
2290 if (ns_did.un.word == rscn_did.un.word) {
2294 case 1: /* Whole N_Port Area effected */
2295 if ((ns_did.un.b.domain == rscn_did.un.b.domain)
2296 && (ns_did.un.b.area == rscn_did.un.b.area))
2301 case 2: /* Whole N_Port Domain effected */
2302 if (ns_did.un.b.domain == rscn_did.un.b.domain)
2307 case 3: /* Whole Fabric effected */
2311 /* Unknown Identifier in RSCN list */
2312 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2313 "%d:0217 Unknown Identifier in "
2314 "RSCN payload Data: x%x\n",
2315 phba->brd_no, rscn_did.un.word);
2327 lpfc_rscn_recovery_check(struct lpfc_hba * phba)
2329 struct lpfc_nodelist *ndlp = NULL, *next_ndlp;
2330 struct list_head *listp;
2331 struct list_head *node_list[7];
2334 /* Look at all nodes effected by pending RSCNs and move
2337 node_list[0] = &phba->fc_npr_list; /* MUST do this list first */
2338 node_list[1] = &phba->fc_nlpmap_list;
2339 node_list[2] = &phba->fc_nlpunmap_list;
2340 node_list[3] = &phba->fc_prli_list;
2341 node_list[4] = &phba->fc_reglogin_list;
2342 node_list[5] = &phba->fc_adisc_list;
2343 node_list[6] = &phba->fc_plogi_list;
2344 for (i = 0; i < 7; i++) {
2345 listp = node_list[i];
2346 if (list_empty(listp))
2349 list_for_each_entry_safe(ndlp, next_ndlp, listp, nlp_listp) {
2350 if((lpfc_rscn_payload_check(phba, ndlp->nlp_DID))) {
2351 /* part of RSCN, process this entry */
2352 lpfc_set_failmask(phba, ndlp,
2353 LPFC_DEV_DISCOVERY_INP,
2356 lpfc_disc_state_machine(phba, ndlp, NULL,
2357 NLP_EVT_DEVICE_RECOVERY);
2358 if(ndlp->nlp_flag & NLP_DELAY_TMO) {
2359 ndlp->nlp_flag &= ~NLP_DELAY_TMO;
2360 del_timer_sync(&ndlp->nlp_delayfunc);
2369 lpfc_els_rcv_rscn(struct lpfc_hba * phba,
2370 struct lpfc_iocbq * cmdiocb,
2371 struct lpfc_nodelist * ndlp, uint8_t newnode)
2373 struct lpfc_dmabuf *pcmd;
2376 uint32_t payload_len, cmd;
2378 icmd = &cmdiocb->iocb;
2379 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
2380 lp = (uint32_t *) pcmd->virt;
2382 /* The response iocb was populated by the HBA. Flush it to main store
2383 * for the driver. Note that all iocb context buffers are from the
2384 * driver's dma pool and have length LPFC_BPL_SIZE.
2386 pci_dma_sync_single_for_device(phba->pcidev, pcmd->phys,
2387 LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
2390 payload_len = be32_to_cpu(cmd) & 0xffff; /* payload length */
2391 payload_len -= sizeof (uint32_t); /* take off word 0 */
2392 cmd &= ELS_CMD_MASK;
2395 lpfc_printf_log(phba,
2398 "%d:0214 RSCN received Data: x%x x%x x%x x%x\n",
2400 phba->fc_flag, payload_len, *lp, phba->fc_rscn_id_cnt);
2402 /* if we are already processing an RSCN, save the received
2403 * RSCN payload buffer, cmdiocb->context2 to process later.
2404 * If we zero, cmdiocb->context2, the calling routine will
2405 * not try to free it.
2407 if (phba->fc_flag & FC_RSCN_MODE) {
2408 if ((phba->fc_rscn_id_cnt < FC_MAX_HOLD_RSCN) &&
2409 !(phba->fc_flag & FC_RSCN_DISCOVERY)) {
2410 phba->fc_rscn_id_list[phba->fc_rscn_id_cnt++] = pcmd;
2411 cmdiocb->context2 = NULL;
2413 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
2414 "%d:0235 Deferred RSCN "
2415 "Data: x%x x%x x%x\n",
2416 phba->brd_no, phba->fc_rscn_id_cnt,
2417 phba->fc_flag, phba->hba_state);
2419 phba->fc_flag |= FC_RSCN_DISCOVERY;
2420 /* ReDiscovery RSCN */
2421 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
2422 "%d:0234 ReDiscovery RSCN "
2423 "Data: x%x x%x x%x\n",
2424 phba->brd_no, phba->fc_rscn_id_cnt,
2425 phba->fc_flag, phba->hba_state);
2428 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL,
2431 /* send RECOVERY event for ALL nodes that match RSCN payload */
2432 lpfc_rscn_recovery_check(phba);
2436 phba->fc_flag |= FC_RSCN_MODE;
2437 phba->fc_rscn_id_list[phba->fc_rscn_id_cnt++] = pcmd;
2439 * If we zero, cmdiocb->context2, the calling routine will
2440 * not try to free it.
2442 cmdiocb->context2 = NULL;
2444 lpfc_set_disctmo(phba);
2447 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, newnode);
2449 /* send RECOVERY event for ALL nodes that match RSCN payload */
2450 lpfc_rscn_recovery_check(phba);
2452 return (lpfc_els_handle_rscn(phba));
2456 lpfc_els_handle_rscn(struct lpfc_hba * phba)
2458 struct lpfc_nodelist *ndlp;
2460 lpfc_put_event(phba, HBA_EVENT_RSCN, phba->fc_myDID,
2461 (void *)(unsigned long)(phba->fc_myDID), 0, 0);
2463 /* Start timer for RSCN processing */
2464 lpfc_set_disctmo(phba);
2466 /* RSCN processed */
2467 lpfc_printf_log(phba,
2470 "%d:0215 RSCN processed Data: x%x x%x x%x x%x\n",
2472 phba->fc_flag, 0, phba->fc_rscn_id_cnt,
2475 /* To process RSCN, first compare RSCN data with NameServer */
2476 phba->fc_ns_retry = 0;
2477 if ((ndlp = lpfc_findnode_did(phba, NLP_SEARCH_UNMAPPED,
2479 /* Good ndlp, issue CT Request to NameServer */
2480 if (lpfc_ns_cmd(phba, ndlp, SLI_CTNS_GID_FT) == 0) {
2481 /* Wait for NameServer query cmpl before we can
2486 /* If login to NameServer does not exist, issue one */
2487 /* Good status, issue PLOGI to NameServer */
2489 lpfc_findnode_did(phba, NLP_SEARCH_ALL, NameServer_DID))) {
2490 /* Wait for NameServer login cmpl before we can
2494 if ((ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC))
2496 lpfc_els_flush_rscn(phba);
2499 lpfc_nlp_init(phba, ndlp, NameServer_DID);
2500 ndlp->nlp_type |= NLP_FABRIC;
2501 ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
2502 lpfc_issue_els_plogi(phba, ndlp, 0);
2503 /* Wait for NameServer login cmpl before we can
2509 lpfc_els_flush_rscn(phba);
2514 lpfc_els_rcv_flogi(struct lpfc_hba * phba,
2515 struct lpfc_iocbq * cmdiocb,
2516 struct lpfc_nodelist * ndlp, uint8_t newnode)
2518 struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
2519 uint32_t *lp = (uint32_t *) pcmd->virt;
2520 IOCB_t *icmd = &cmdiocb->iocb;
2521 struct serv_parm *sp;
2527 /* The response iocb was populated by the HBA. Flush it to main store
2528 * for the driver. Note that all iocb context buffers are from the
2529 * driver's dma pool and have length LPFC_BPL_SIZE.
2531 pci_dma_sync_single_for_device(phba->pcidev, pcmd->phys,
2532 LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
2535 sp = (struct serv_parm *) lp;
2537 /* FLOGI received */
2539 lpfc_set_disctmo(phba);
2541 if (phba->fc_topology == TOPOLOGY_LOOP) {
2542 /* We should never receive a FLOGI in loop mode, ignore it */
2543 did = icmd->un.elsreq64.remoteID;
2545 /* An FLOGI ELS command <elsCmd> was received from DID <did> in
2547 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
2548 "%d:0113 An FLOGI ELS command x%x was received "
2549 "from DID x%x in Loop Mode\n",
2550 phba->brd_no, cmd, did);
2556 if ((lpfc_check_sparm(phba, ndlp, sp, CLASS3))) {
2557 /* For a FLOGI we accept, then if our portname is greater
2558 * then the remote portname we initiate Nport login.
2562 rc = memcmp(&phba->fc_portname, &sp->portName,
2563 sizeof (struct lpfc_name));
2566 if ((mbox = mempool_alloc(phba->mbox_mem_pool,
2567 GFP_ATOMIC)) == 0) {
2570 lpfc_linkdown(phba);
2571 lpfc_init_link(phba, mbox,
2573 phba->cfg_link_speed);
2574 mbox->mb.un.varInitLnk.lipsr_AL_PA = 0;
2575 if (lpfc_sli_issue_mbox
2576 (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB))
2577 == MBX_NOT_FINISHED) {
2578 mempool_free( mbox, phba->mbox_mem_pool);
2583 else if (rc > 0) { /* greater than */
2584 phba->fc_flag |= FC_PT2PT_PLOGI;
2586 phba->fc_flag |= FC_PT2PT;
2587 phba->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
2589 /* Reject this request because invalid parameters */
2590 stat.un.b.lsRjtRsvd0 = 0;
2591 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
2592 stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
2593 stat.un.b.vendorUnique = 0;
2594 lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
2599 lpfc_els_rsp_acc(phba, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL, newnode);
2605 lpfc_els_rcv_rnid(struct lpfc_hba * phba,
2606 struct lpfc_iocbq * cmdiocb, struct lpfc_nodelist * ndlp)
2608 struct lpfc_dmabuf *pcmd;
2615 icmd = &cmdiocb->iocb;
2616 did = icmd->un.elsreq64.remoteID;
2617 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
2618 lp = (uint32_t *) pcmd->virt;
2620 /* The response iocb was populated by the HBA. Flush it to main store
2621 * for the driver. Note that all iocb context buffers are from the
2622 * driver's dma pool and have length LPFC_BPL_SIZE.
2624 pci_dma_sync_single_for_device(phba->pcidev, pcmd->phys,
2625 LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
2632 switch (rn->Format) {
2634 case RNID_TOPOLOGY_DISC:
2636 lpfc_els_rsp_rnid_acc(phba, rn->Format, cmdiocb, ndlp);
2639 /* Reject this request because format not supported */
2640 stat.un.b.lsRjtRsvd0 = 0;
2641 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
2642 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
2643 stat.un.b.vendorUnique = 0;
2644 lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
2650 lpfc_els_rcv_rrq(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
2651 struct lpfc_nodelist * ndlp)
2653 struct lpfc_dmabuf *pcmd;
2656 struct lpfc_sli_ring *pring;
2657 struct lpfc_sli *psli;
2662 pring = &psli->ring[LPFC_FCP_RING];
2663 icmd = &cmdiocb->iocb;
2664 did = icmd->un.elsreq64.remoteID;
2665 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
2666 lp = (uint32_t *) pcmd->virt;
2668 /* The response iocb was populated by the HBA. Flush it to main store
2669 * for the driver. Note that all iocb context buffers are from the
2670 * driver's dma pool and have length LPFC_BPL_SIZE.
2672 pci_dma_sync_single_for_cpu(phba->pcidev, pcmd->phys,
2673 LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
2679 /* Get oxid / rxid from payload and abort it */
2680 if ((rrq->SID == be32_to_cpu(phba->fc_myDID))) {
2681 lpfc_sli_abort_iocb_ctx(phba, pring, rrq->Oxid);
2683 lpfc_sli_abort_iocb_ctx(phba, pring, rrq->Rxid);
2685 /* ACCEPT the rrq request */
2686 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
2692 lpfc_els_rcv_farp(struct lpfc_hba * phba,
2693 struct lpfc_iocbq * cmdiocb, struct lpfc_nodelist * ndlp)
2695 struct lpfc_dmabuf *pcmd;
2699 uint32_t cmd, cnt, did;
2701 icmd = &cmdiocb->iocb;
2702 did = icmd->un.elsreq64.remoteID;
2703 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
2704 lp = (uint32_t *) pcmd->virt;
2706 /* The response iocb was populated by the HBA. Flush it to main store
2707 * for the driver. Note that all iocb context buffers are from the
2708 * driver's dma pool and have length LPFC_BPL_SIZE.
2710 pci_dma_sync_single_for_cpu(phba->pcidev, pcmd->phys,
2711 LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
2716 /* FARP-REQ received from DID <did> */
2717 lpfc_printf_log(phba,
2720 "%d:0601 FARP-REQ received from DID x%x\n",
2723 /* We will only support match on WWPN or WWNN */
2724 if (fp->Mflags & ~(FARP_MATCH_NODE | FARP_MATCH_PORT)) {
2729 /* If this FARP command is searching for my portname */
2730 if (fp->Mflags & FARP_MATCH_PORT) {
2731 if (memcmp(&fp->RportName, &phba->fc_portname,
2732 sizeof (struct lpfc_name)) == 0)
2736 /* If this FARP command is searching for my nodename */
2737 if (fp->Mflags & FARP_MATCH_NODE) {
2738 if (memcmp(&fp->RnodeName, &phba->fc_nodename,
2739 sizeof (struct lpfc_name)) == 0)
2744 if((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) ||
2745 (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
2746 /* Log back into the node before sending the FARP. */
2747 if (fp->Rflags & FARP_REQUEST_PLOGI) {
2748 ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
2749 lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
2750 lpfc_issue_els_plogi(phba, ndlp, 0);
2753 /* Send a FARP response to that node */
2754 if (fp->Rflags & FARP_REQUEST_FARPR) {
2755 lpfc_issue_els_farpr(phba, did, 0);
2763 lpfc_els_rcv_farpr(struct lpfc_hba * phba,
2764 struct lpfc_iocbq * cmdiocb, struct lpfc_nodelist * ndlp)
2766 struct lpfc_dmabuf *pcmd;
2771 icmd = &cmdiocb->iocb;
2772 did = icmd->un.elsreq64.remoteID;
2773 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
2774 lp = (uint32_t *) pcmd->virt;
2776 /* The response iocb was populated by the HBA. Flush it to main store
2777 * for the driver. Note that all iocb context buffers are from the
2778 * driver's dma pool and have length LPFC_BPL_SIZE.
2780 pci_dma_sync_single_for_cpu(phba->pcidev, pcmd->phys,
2781 LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
2784 /* FARP-RSP received from DID <did> */
2785 lpfc_printf_log(phba,
2788 "%d:0600 FARP-RSP received from DID x%x\n",
2791 /* ACCEPT the Farp resp request */
2792 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
2798 lpfc_els_rcv_fan(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
2799 struct lpfc_nodelist * ndlp)
2801 struct lpfc_dmabuf *pcmd;
2807 icmd = &cmdiocb->iocb;
2808 did = icmd->un.elsreq64.remoteID;
2809 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
2810 lp = (uint32_t *) pcmd->virt;
2812 /* The response iocb was populated by the HBA. Flush it to main store
2813 * for the driver. Note that all iocb context buffers are from the
2814 * driver's dma pool and have length LPFC_BPL_SIZE.
2816 pci_dma_sync_single_for_cpu(phba->pcidev, pcmd->phys,
2817 LPFC_BPL_SIZE, PCI_DMA_FROMDEVICE);
2824 /* ACCEPT the FAN request */
2825 lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
2827 if (phba->hba_state == LPFC_LOCAL_CFG_LINK) {
2828 /* The discovery state machine needs to take a different
2829 * action if this node has switched fabrics
2831 if ((memcmp(&fp->FportName, &phba->fc_fabparam.portName,
2832 sizeof (struct lpfc_name)) != 0)
2834 (memcmp(&fp->FnodeName, &phba->fc_fabparam.nodeName,
2835 sizeof (struct lpfc_name)) != 0)) {
2836 /* This node has switched fabrics. An FLOGI is required
2842 /* Start discovery */
2843 lpfc_disc_start(phba);
2850 lpfc_els_timeout_handler(unsigned long ptr)
2852 struct lpfc_hba *phba;
2853 struct lpfc_sli *psli;
2854 struct lpfc_sli_ring *pring;
2855 struct lpfc_iocbq *tmp_iocb, *piocb;
2857 struct lpfc_dmabuf *pcmd;
2858 struct list_head *dlp;
2860 uint32_t els_command;
2863 unsigned long iflag;
2865 phba = (struct lpfc_hba *)ptr;
2868 spin_lock_irqsave(phba->host->host_lock, iflag);
2869 timeout = (uint32_t)(phba->fc_ratov << 1);
2872 pring = &psli->ring[LPFC_ELS_RING];
2873 dlp = &pring->txcmplq;
2875 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
2878 if (piocb->iocb_flag & LPFC_IO_LIBDFC) {
2881 pcmd = (struct lpfc_dmabuf *) piocb->context2;
2882 elscmd = (uint32_t *) (pcmd->virt);
2883 els_command = *elscmd;
2885 if ((els_command == ELS_CMD_FARP)
2886 || (els_command == ELS_CMD_FARPR)) {
2890 if (piocb->drvrTimeout > 0) {
2891 if (piocb->drvrTimeout >= timeout) {
2892 piocb->drvrTimeout -= timeout;
2894 piocb->drvrTimeout = 0;
2899 list_del(&piocb->list);
2900 pring->txcmplq_cnt--;
2902 if (cmd->ulpCommand == CMD_GEN_REQUEST64_CR) {
2903 struct lpfc_nodelist *ndlp;
2905 ndlp = lpfc_findnode_rpi(phba, cmd->ulpContext);
2906 remote_ID = ndlp->nlp_DID;
2907 if (cmd->un.elsreq64.bdl.ulpIoTag32) {
2908 lpfc_sli_issue_abort_iotag32(phba,
2912 remote_ID = cmd->un.elsreq64.remoteID;
2915 lpfc_printf_log(phba,
2918 "%d:0127 ELS timeout Data: x%x x%x x%x x%x\n",
2919 phba->brd_no, els_command,
2920 remote_ID, cmd->ulpCommand, cmd->ulpIoTag);
2923 * The iocb has timed out; abort it.
2925 if (piocb->iocb_cmpl) {
2926 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
2927 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
2928 (piocb->iocb_cmpl) (phba, piocb, piocb);
2930 mempool_free(piocb, phba->iocb_mem_pool);
2934 phba->els_tmofunc.expires = jiffies + HZ * timeout;
2935 add_timer(&phba->els_tmofunc);
2936 spin_unlock_irqrestore(phba->host->host_lock, iflag);
2940 lpfc_els_flush_cmd(struct lpfc_hba * phba)
2942 struct lpfc_sli *psli;
2943 struct lpfc_sli_ring *pring;
2944 struct lpfc_iocbq *tmp_iocb, *piocb;
2946 struct lpfc_dmabuf *pcmd;
2948 uint32_t els_command;
2952 pring = &psli->ring[LPFC_ELS_RING];
2954 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) {
2957 if (piocb->iocb_flag & LPFC_IO_LIBDFC) {
2961 /* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */
2962 if ((cmd->ulpCommand == CMD_QUE_RING_BUF_CN) ||
2963 (cmd->ulpCommand == CMD_QUE_RING_BUF64_CN) ||
2964 (cmd->ulpCommand == CMD_CLOSE_XRI_CN) ||
2965 (cmd->ulpCommand == CMD_ABORT_XRI_CN)) {
2969 pcmd = (struct lpfc_dmabuf *) piocb->context2;
2970 elscmd = (uint32_t *) (pcmd->virt);
2971 els_command = *elscmd;
2973 if (cmd->ulpCommand == CMD_GEN_REQUEST64_CR) {
2974 struct lpfc_nodelist *ndlp;
2976 ndlp = lpfc_findnode_rpi(phba, cmd->ulpContext);
2977 remote_ID = ndlp->nlp_DID;
2978 if (phba->hba_state == LPFC_HBA_READY) {
2982 remote_ID = cmd->un.elsreq64.remoteID;
2985 list_del(&piocb->list);
2986 pring->txcmplq_cnt--;
2988 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
2989 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
2991 if (piocb->iocb_cmpl) {
2992 (piocb->iocb_cmpl) (phba, piocb, piocb);
2994 mempool_free( piocb, phba->iocb_mem_pool);
2998 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
3001 if (piocb->iocb_flag & LPFC_IO_LIBDFC) {
3004 pcmd = (struct lpfc_dmabuf *) piocb->context2;
3005 elscmd = (uint32_t *) (pcmd->virt);
3006 els_command = *elscmd;
3008 if (cmd->ulpCommand == CMD_GEN_REQUEST64_CR) {
3009 struct lpfc_nodelist *ndlp;
3011 ndlp = lpfc_findnode_rpi(phba, cmd->ulpContext);
3012 remote_ID = ndlp->nlp_DID;
3013 if (phba->hba_state == LPFC_HBA_READY) {
3017 remote_ID = cmd->un.elsreq64.remoteID;
3020 list_del(&piocb->list);
3021 pring->txcmplq_cnt--;
3023 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
3024 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
3026 if (piocb->iocb_cmpl) {
3027 (piocb->iocb_cmpl) (phba, piocb, piocb);
3029 mempool_free( piocb, phba->iocb_mem_pool);
3036 lpfc_els_unsol_event(struct lpfc_hba * phba,
3037 struct lpfc_sli_ring * pring, struct lpfc_iocbq * elsiocb)
3039 struct lpfc_sli *psli;
3040 struct lpfc_nodelist *ndlp;
3041 struct lpfc_dmabuf *mp;
3048 uint32_t drop_cmd = 0; /* by default do NOT drop received cmd */
3049 uint32_t rjt_err = 0;
3052 icmd = &elsiocb->iocb;
3054 /* type of ELS cmd is first 32bit word in packet */
3055 mp = lpfc_sli_ringpostbuf_get(phba, pring, getPaddr(icmd->un.
3059 cont64[0].addrLow));
3066 lp = (uint32_t *) mp->virt;
3068 lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], 1, 1);
3070 if (icmd->ulpStatus) {
3071 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3077 /* Check to see if link went down during discovery */
3078 if (lpfc_els_chk_latt(phba)) {
3079 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3085 did = icmd->un.rcvels.remoteID;
3086 if ((ndlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, did)) == 0) {
3087 /* Cannot find existing Fabric ndlp, so allocate a new one */
3088 if ((ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC))
3090 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3096 lpfc_nlp_init(phba, ndlp, did);
3098 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) {
3099 ndlp->nlp_type |= NLP_FABRIC;
3103 phba->fc_stat.elsRcvFrame++;
3104 elsiocb->context1 = ndlp;
3105 elsiocb->context2 = mp;
3107 if ((cmd & ELS_CMD_MASK) == ELS_CMD_RSCN) {
3108 cmd &= ELS_CMD_MASK;
3110 /* ELS command <elsCmd> received from NPORT <did> */
3111 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
3112 "%d:0112 ELS command x%x received from NPORT x%x "
3113 "Data: x%x\n", phba->brd_no, cmd, did, phba->hba_state);
3117 phba->fc_stat.elsRcvPLOGI++;
3118 if(phba->hba_state < LPFC_DISC_AUTH) {
3119 rjt_err = LSEXP_NOTHING_MORE;
3122 lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_PLOGI);
3125 phba->fc_stat.elsRcvFLOGI++;
3126 lpfc_els_rcv_flogi(phba, elsiocb, ndlp, newnode);
3128 mempool_free( ndlp, phba->nlp_mem_pool);
3132 phba->fc_stat.elsRcvLOGO++;
3133 if(phba->hba_state < LPFC_DISC_AUTH) {
3134 rjt_err = LSEXP_NOTHING_MORE;
3137 lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_LOGO);
3140 phba->fc_stat.elsRcvPRLO++;
3141 if(phba->hba_state < LPFC_DISC_AUTH) {
3142 rjt_err = LSEXP_NOTHING_MORE;
3145 lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_PRLO);
3148 phba->fc_stat.elsRcvRSCN++;
3149 lpfc_els_rcv_rscn(phba, elsiocb, ndlp, newnode);
3151 mempool_free( ndlp, phba->nlp_mem_pool);
3155 phba->fc_stat.elsRcvADISC++;
3156 if(phba->hba_state < LPFC_DISC_AUTH) {
3157 rjt_err = LSEXP_NOTHING_MORE;
3160 lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_ADISC);
3163 phba->fc_stat.elsRcvPDISC++;
3164 if(phba->hba_state < LPFC_DISC_AUTH) {
3165 rjt_err = LSEXP_NOTHING_MORE;
3168 lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_PDISC);
3171 phba->fc_stat.elsRcvFARPR++;
3172 lpfc_els_rcv_farpr(phba, elsiocb, ndlp);
3175 phba->fc_stat.elsRcvFARP++;
3176 lpfc_els_rcv_farp(phba, elsiocb, ndlp);
3179 phba->fc_stat.elsRcvFAN++;
3180 lpfc_els_rcv_fan(phba, elsiocb, ndlp);
3183 phba->fc_stat.elsRcvRRQ++;
3184 lpfc_els_rcv_rrq(phba, elsiocb, ndlp);
3187 phba->fc_stat.elsRcvPRLI++;
3188 if(phba->hba_state < LPFC_DISC_AUTH) {
3189 rjt_err = LSEXP_NOTHING_MORE;
3192 lpfc_disc_state_machine(phba, ndlp, elsiocb, NLP_EVT_RCV_PRLI);
3195 phba->fc_stat.elsRcvRNID++;
3196 lpfc_els_rcv_rnid(phba, elsiocb, ndlp);
3199 /* Unsupported ELS command, reject */
3200 rjt_err = LSEXP_NOTHING_MORE;
3202 /* Unknown ELS command <elsCmd> received from NPORT <did> */
3203 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
3204 "%d:0115 Unknown ELS command x%x received from "
3205 "NPORT x%x\n", phba->brd_no, cmd, did);
3207 mempool_free( ndlp, phba->nlp_mem_pool);
3212 /* check if need to LS_RJT received ELS cmd */
3214 stat.un.b.lsRjtRsvd0 = 0;
3215 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
3216 stat.un.b.lsRjtRsnCodeExp = rjt_err;
3217 stat.un.b.vendorUnique = 0;
3218 lpfc_els_rsp_reject(phba, stat.un.lsRjtError, elsiocb, ndlp);
3221 if (elsiocb->context2) {
3222 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3226 /* check if need to drop received ELS cmd */
3227 if (drop_cmd == 1) {
3228 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
3229 "%d:0111 Dropping received ELS cmd "
3230 "Data: x%x x%x\n", phba->brd_no,
3231 icmd->ulpStatus, icmd->un.ulpWord[4]);
3232 phba->fc_stat.elsRcvDrop++;