1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Enterprise Fibre Channel Host Bus Adapters. *
4 * Refer to the README file included with this package for *
5 * driver version and adapter support. *
6 * Copyright (C) 2004 Emulex Corporation. *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of the GNU General Public License *
11 * as published by the Free Software Foundation; either version 2 *
12 * of the License, or (at your option) any later version. *
14 * This program is distributed in the hope that it will be useful, *
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
17 * GNU General Public License for more details, a copy of which *
18 * can be found in the file COPYING included with this package. *
19 *******************************************************************/
22 * $Id: lpfc_hbadisc.c 1.199 2004/11/18 20:19:30EST sf_support Exp $
25 #include <linux/version.h>
26 #include <linux/blkdev.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/pci.h>
29 #include <linux/spinlock.h>
30 #include <linux/kernel.h>
31 #include <linux/smp_lock.h>
33 #include <scsi/scsi_cmnd.h>
34 #include <scsi/scsi_device.h>
35 #include <scsi/scsi_host.h>
37 #include <scsi/scsi_transport_fc.h>
40 #include "lpfc_disc.h"
41 #include "lpfc_scsi.h"
43 #include "lpfc_crtn.h"
46 #include "lpfc_logmsg.h"
49 /* AlpaArray for assignment of scsid for scan-down and bind_method */
50 uint8_t lpfcAlpaArray[] = {
51 0xEF, 0xE8, 0xE4, 0xE2, 0xE1, 0xE0, 0xDC, 0xDA, 0xD9, 0xD6,
52 0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xCE, 0xCD, 0xCC, 0xCB, 0xCA,
53 0xC9, 0xC7, 0xC6, 0xC5, 0xC3, 0xBC, 0xBA, 0xB9, 0xB6, 0xB5,
54 0xB4, 0xB3, 0xB2, 0xB1, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9,
55 0xA7, 0xA6, 0xA5, 0xA3, 0x9F, 0x9E, 0x9D, 0x9B, 0x98, 0x97,
56 0x90, 0x8F, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7C, 0x7A, 0x79,
57 0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6E, 0x6D, 0x6C, 0x6B,
58 0x6A, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5C, 0x5A, 0x59, 0x56,
59 0x55, 0x54, 0x53, 0x52, 0x51, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A,
60 0x49, 0x47, 0x46, 0x45, 0x43, 0x3C, 0x3A, 0x39, 0x36, 0x35,
61 0x34, 0x33, 0x32, 0x31, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29,
62 0x27, 0x26, 0x25, 0x23, 0x1F, 0x1E, 0x1D, 0x1B, 0x18, 0x17,
63 0x10, 0x0F, 0x08, 0x04, 0x02, 0x01
67 lpfc_evt_iocb_free(struct lpfc_hba * phba, struct lpfc_iocbq * saveq)
69 struct lpfc_iocbq *rspiocbp, *tmpiocbp;
71 /* Free up iocb buffer chain for cmd just processed */
72 list_for_each_entry_safe(rspiocbp, tmpiocbp,
74 list_del(&rspiocbp->list);
75 mempool_free( rspiocbp, phba->iocb_mem_pool);
77 mempool_free( saveq, phba->iocb_mem_pool);
81 lpfc_process_nodev_timeout(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
83 struct lpfc_target *targetp;
86 if (!(ndlp->nlp_type & NLP_FABRIC)) {
87 /* Nodev timeout on NPort <nlp_DID> */
88 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
89 "%d:0203 Nodev timeout on NPort x%x "
90 "Data: x%x x%x x%x\n",
91 phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag,
92 ndlp->nlp_state, ndlp->nlp_rpi);
95 ndlp->nlp_flag &= ~NLP_NODEV_TMO;
97 for(scsid=0;scsid<MAX_FCP_TARGET;scsid++) {
98 targetp = phba->device_queue_hash[scsid];
99 /* First see if the SCSI ID has an allocated struct
102 if (targetp->pnode == ndlp) {
103 /* flush the target */
104 lpfc_sli_abort_iocb_tgt(phba,
105 &phba->sli.ring[phba->sli.fcp_ring],
106 scsid, LPFC_ABORT_ALLQ);
111 lpfc_disc_state_machine(phba, ndlp, NULL, NLP_EVT_DEVICE_RM);
116 lpfc_disc_done(struct lpfc_hba * phba)
118 struct lpfc_sli *psli = &phba->sli;
119 LPFC_DISC_EVT_t *evtp, *next_evtp;
121 struct lpfc_iocbq *cmdiocbp, *saveq;
122 struct lpfc_nodelist *ndlp;
123 LPFC_RING_MASK_t *func;
124 struct Scsi_Host *shost;
125 LIST_HEAD(local_dpc_disc);
127 list_splice_init(&phba->dpc_disc, &local_dpc_disc);
129 /* check discovery event list */
130 list_for_each_entry_safe(evtp, next_evtp, &local_dpc_disc, evt_listp) {
131 list_del(&evtp->evt_listp);
135 pmb = (LPFC_MBOXQ_t *)(evtp->evt_arg1);
136 (pmb->mbox_cmpl) (phba, pmb);
138 case LPFC_EVT_SOL_IOCB:
139 cmdiocbp = (struct lpfc_iocbq *)(evtp->evt_arg1);
140 saveq = (struct lpfc_iocbq *)(evtp->evt_arg2);
141 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
142 lpfc_evt_iocb_free(phba, saveq);
144 case LPFC_EVT_UNSOL_IOCB:
145 func = (LPFC_RING_MASK_t *)(evtp->evt_arg1);
146 saveq = (struct lpfc_iocbq *)(evtp->evt_arg2);
147 (func->lpfc_sli_rcv_unsol_event) (phba,
148 &psli->ring[LPFC_ELS_RING], saveq);
149 lpfc_evt_iocb_free(phba, saveq);
151 case LPFC_EVT_NODEV_TMO:
152 ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
153 lpfc_process_nodev_timeout(phba, ndlp);
156 /* SCSI HOTPLUG supported */
158 #ifdef USE_SCAN_TARGET
160 struct lpfc_target *targetp;
162 targetp = (struct lpfc_target *)(evtp->evt_arg1);
163 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY | LOG_FCP,
164 "%d:0251 Rescanning scsi target %d\n",
165 phba->brd_no, targetp->scsi_id);
168 phba->device_queue_hash[targetp->scsi_id]) {
169 spin_unlock_irq(phba->host->host_lock);
170 scsi_scan_single_target(shost, 0,
172 spin_lock_irq(phba->host->host_lock);
176 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY | LOG_FCP,
177 "%d:0251 Rescanning scsi host\n", phba->brd_no);
178 spin_unlock_irq(shost->host_lock);
179 scsi_scan_host(shost);
180 spin_lock_irq(shost->host_lock);
192 DECLARE_MUTEX_LOCKED(sem);
193 struct lpfc_hba *phba = (struct lpfc_hba *)p;
197 daemonize("lpfc_dpc_%d", phba->brd_no);
198 allow_signal(SIGHUP);
200 phba->dpc_wait = &sem;
201 set_user_nice(current, -20);
205 complete(&phba->dpc_startup);
208 if (down_interruptible(&sem))
211 if (signal_pending(current))
217 spin_lock_irqsave(phba->host->host_lock, flags);
218 lpfc_disc_done(phba);
219 spin_unlock_irqrestore(phba->host->host_lock, flags);
222 /* Zero out semaphore we were waiting on. */
223 phba->dpc_wait = NULL;
224 complete_and_exit(&phba->dpc_exiting, 0);
229 * This is only called to handle FC discovery events. Since this a rare
230 * occurance, we allocate an LPFC_DISC_EVT_t structure here instead of
231 * embedding it in the IOCB.
234 lpfc_discq_post_event(struct lpfc_hba * phba, void *arg1, void *arg2,
237 LPFC_DISC_EVT_t *evtp;
239 /* All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events
240 * will be queued to DPC for processing
242 evtp = (LPFC_DISC_EVT_t *) kmalloc(sizeof(LPFC_DISC_EVT_t), GFP_ATOMIC);
246 evtp->evt_arg1 = arg1;
247 evtp->evt_arg2 = arg2;
249 evtp->evt_listp.next = NULL;
250 evtp->evt_listp.prev = NULL;
252 /* Queue the event to the DPC to be processed later */
253 list_add_tail(&evtp->evt_listp, &phba->dpc_disc);
261 lpfc_linkdown(struct lpfc_hba * phba)
263 struct lpfc_sli *psli;
264 struct lpfc_nodelist *ndlp, *next_ndlp;
265 struct list_head *listp;
266 struct list_head *node_list[7];
271 phba->hba_state = LPFC_LINK_DOWN;
273 #if !defined(FC_TRANS_VER1) && !defined(FC_TRANS_265_BLKPATCH)
274 /* Stop all requests to the driver from the midlayer. */
275 scsi_block_requests(phba->host);
278 lpfc_put_event(phba, HBA_EVENT_LINK_DOWN, phba->fc_myDID, NULL, 0, 0);
280 /* Clean up any firmware default rpi's */
281 if ((mb = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC))) {
282 lpfc_unreg_did(phba, 0xffffffff, mb);
283 if (lpfc_sli_issue_mbox(phba, mb, (MBX_NOWAIT | MBX_STOP_IOCB))
284 == MBX_NOT_FINISHED) {
285 mempool_free( mb, phba->mbox_mem_pool);
289 /* Cleanup any outstanding RSCN activity */
290 lpfc_els_flush_rscn(phba);
292 /* Cleanup any outstanding ELS commands */
293 lpfc_els_flush_cmd(phba);
296 * If this function was called by the lpfc_do_dpc, don't recurse into
297 * the routine again. If not, just process any outstanding
300 if (!list_empty(&phba->dpc_disc)) {
301 lpfc_disc_done(phba);
304 /* Issue a LINK DOWN event to all nodes */
305 node_list[0] = &phba->fc_npr_list; /* MUST do this list first */
306 node_list[1] = &phba->fc_nlpmap_list;
307 node_list[2] = &phba->fc_nlpunmap_list;
308 node_list[3] = &phba->fc_prli_list;
309 node_list[4] = &phba->fc_reglogin_list;
310 node_list[5] = &phba->fc_adisc_list;
311 node_list[6] = &phba->fc_plogi_list;
312 for (i = 0; i < 7; i++) {
313 listp = node_list[i];
314 if (list_empty(listp))
317 list_for_each_entry_safe(ndlp, next_ndlp, listp, nlp_listp) {
318 /* Fabric nodes are not handled thru state machine for
320 if (ndlp->nlp_type & NLP_FABRIC) {
321 /* Remove ALL Fabric nodes except Fabric_DID */
322 if (ndlp->nlp_DID != Fabric_DID) {
323 /* Take it off current list and free */
324 lpfc_nlp_list(phba, ndlp,
329 lpfc_set_failmask(phba, ndlp,
333 rc = lpfc_disc_state_machine(phba, ndlp, NULL,
334 NLP_EVT_DEVICE_RECOVERY);
339 /* free any ndlp's on unused list */
340 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_unused_list,
342 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
345 /* Setup myDID for link up if we are in pt2pt mode */
346 if (phba->fc_flag & FC_PT2PT) {
348 if ((mb = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC))) {
349 lpfc_config_link(phba, mb);
350 if (lpfc_sli_issue_mbox
351 (phba, mb, (MBX_NOWAIT | MBX_STOP_IOCB))
352 == MBX_NOT_FINISHED) {
353 mempool_free( mb, phba->mbox_mem_pool);
356 phba->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI);
358 phba->fc_flag &= ~FC_LBIT;
360 /* Turn off discovery timer if its running */
361 lpfc_can_disctmo(phba);
363 /* Must process IOCBs on all rings to handle ABORTed I/Os */
368 lpfc_linkup(struct lpfc_hba * phba)
370 struct lpfc_nodelist *ndlp, *next_ndlp;
371 struct list_head *listp;
372 struct list_head *node_list[7];
375 phba->hba_state = LPFC_LINK_UP;
376 phba->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY |
377 FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY);
378 phba->fc_ns_retry = 0;
381 lpfc_put_event(phba, HBA_EVENT_LINK_UP, phba->fc_myDID,
382 (void *)(unsigned long)(phba->fc_topology),
383 0, phba->fc_linkspeed);
386 * Clean up old Fabric NLP_FABRIC logins.
388 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nlpunmap_list,
390 if (ndlp->nlp_DID == Fabric_DID) {
391 /* Take it off current list and free */
392 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
396 /* free any ndlp's on unused list */
397 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_unused_list,
399 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
402 /* Mark all nodes for LINK UP */
403 node_list[0] = &phba->fc_plogi_list;
404 node_list[1] = &phba->fc_adisc_list;
405 node_list[2] = &phba->fc_reglogin_list;
406 node_list[3] = &phba->fc_prli_list;
407 node_list[4] = &phba->fc_nlpunmap_list;
408 node_list[5] = &phba->fc_nlpmap_list;
409 node_list[6] = &phba->fc_npr_list;
410 for (i = 0; i < 7; i++) {
411 listp = node_list[i];
412 if (list_empty(listp))
415 list_for_each_entry(ndlp, listp, nlp_listp) {
416 lpfc_set_failmask(phba, ndlp, LPFC_DEV_DISCOVERY_INP,
418 lpfc_set_failmask(phba, ndlp, LPFC_DEV_LINK_DOWN,
423 #if !defined(FC_TRANS_VER1) && !defined(FC_TRANS_265_BLKPATCH)
424 spin_unlock_irq(phba->host->host_lock);
425 scsi_unblock_requests(phba->host);
426 spin_lock_irq(phba->host->host_lock);
432 * This routine handles processing a CLEAR_LA mailbox
433 * command upon completion. It is setup in the LPFC_MBOXQ
434 * as the completion routine when the command is
435 * handed off to the SLI layer.
438 lpfc_mbx_cmpl_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
440 struct lpfc_sli *psli;
446 /* Since we don't do discovery right now, turn these off here */
447 psli->ring[psli->ip_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
448 psli->ring[psli->fcp_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
449 psli->ring[psli->next_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
450 /* Check for error */
451 if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) {
452 /* CLEAR_LA mbox error <mbxStatus> state <hba_state> */
453 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
454 "%d:0320 CLEAR_LA mbxStatus error x%x hba "
456 phba->brd_no, mb->mbxStatus, phba->hba_state);
458 phba->hba_state = LPFC_HBA_ERROR;
462 if(phba->fc_flag & FC_ABORT_DISCOVERY)
465 phba->num_disc_nodes = 0;
466 /* go thru NPR list and issue ELS PLOGIs */
467 if (phba->fc_npr_cnt) {
468 lpfc_els_disc_plogi(phba);
471 phba->hba_state = LPFC_HBA_READY;
474 phba->fc_flag &= ~FC_ABORT_DISCOVERY;
475 /* Device Discovery completes */
476 lpfc_printf_log(phba,
479 "%d:0225 Device Discovery completes\n",
482 mempool_free( pmb, phba->mbox_mem_pool);
483 if (phba->fc_flag & FC_ESTABLISH_LINK) {
484 phba->fc_flag &= ~FC_ESTABLISH_LINK;
486 del_timer_sync(&phba->fc_estabtmo);
487 lpfc_can_disctmo(phba);
489 /* turn on Link Attention interrupts */
490 psli->sliinit.sli_flag |= LPFC_PROCESS_LA;
491 control = readl(phba->HCregaddr);
492 control |= HC_LAINT_ENA;
493 writel(control, phba->HCregaddr);
494 readl(phba->HCregaddr); /* flush */
500 lpfc_mbx_cmpl_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
502 struct lpfc_sli *psli;
507 /* Check for error */
509 /* CONFIG_LINK mbox error <mbxStatus> state <hba_state> */
510 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
511 "%d:0306 CONFIG_LINK mbxStatus error x%x "
513 phba->brd_no, mb->mbxStatus, phba->hba_state);
516 phba->hba_state = LPFC_HBA_ERROR;
520 if (phba->hba_state == LPFC_LOCAL_CFG_LINK) {
521 if (phba->fc_topology == TOPOLOGY_LOOP) {
522 /* If we are public loop and L bit was set */
523 if ((phba->fc_flag & FC_PUBLIC_LOOP) &&
524 !(phba->fc_flag & FC_LBIT)) {
525 /* Need to wait for FAN - use discovery timer
526 * for timeout. hba_state is identically
527 * LPFC_LOCAL_CFG_LINK while waiting for FAN
529 lpfc_set_disctmo(phba);
530 mempool_free( pmb, phba->mbox_mem_pool);
535 /* Start discovery by sending a FLOGI hba_state is identically
536 * LPFC_FLOGI while waiting for FLOGI cmpl
538 phba->hba_state = LPFC_FLOGI;
539 lpfc_set_disctmo(phba);
540 lpfc_initial_flogi(phba);
541 mempool_free( pmb, phba->mbox_mem_pool);
544 if (phba->hba_state == LPFC_FABRIC_CFG_LINK) {
545 mempool_free( pmb, phba->mbox_mem_pool);
550 /* CONFIG_LINK bad hba state <hba_state> */
551 lpfc_printf_log(phba,
554 "%d:0200 CONFIG_LINK bad hba state x%x\n",
555 phba->brd_no, phba->hba_state);
557 if (phba->hba_state != LPFC_CLEAR_LA) {
558 lpfc_clear_la(phba, pmb);
559 pmb->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
560 if (lpfc_sli_issue_mbox(phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB))
561 == MBX_NOT_FINISHED) {
562 mempool_free( pmb, phba->mbox_mem_pool);
563 lpfc_disc_flush_list(phba);
564 psli->ring[(psli->ip_ring)].flag &=
565 ~LPFC_STOP_IOCB_EVENT;
566 psli->ring[(psli->fcp_ring)].flag &=
567 ~LPFC_STOP_IOCB_EVENT;
568 psli->ring[(psli->next_ring)].flag &=
569 ~LPFC_STOP_IOCB_EVENT;
570 phba->hba_state = LPFC_HBA_READY;
573 mempool_free( pmb, phba->mbox_mem_pool);
579 lpfc_mbx_cmpl_read_sparam(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
581 struct lpfc_sli *psli = &phba->sli;
582 MAILBOX_t *mb = &pmb->mb;
583 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1;
586 /* Check for error */
588 /* READ_SPARAM mbox error <mbxStatus> state <hba_state> */
589 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
590 "%d:0319 READ_SPARAM mbxStatus error x%x "
592 phba->brd_no, mb->mbxStatus, phba->hba_state);
595 phba->hba_state = LPFC_HBA_ERROR;
599 /* The mailbox was populated by the HBA. Flush it to main store for the
600 * driver. Note that all context buffers are from the driver's
601 * dma pool and have length LPFC_BPL_SIZE.
603 pci_dma_sync_single_for_cpu(phba->pcidev, mp->phys, LPFC_BPL_SIZE,
606 memcpy((uint8_t *) & phba->fc_sparam, (uint8_t *) mp->virt,
607 sizeof (struct serv_parm));
608 memcpy((uint8_t *) & phba->fc_nodename,
609 (uint8_t *) & phba->fc_sparam.nodeName,
610 sizeof (struct lpfc_name));
611 memcpy((uint8_t *) & phba->fc_portname,
612 (uint8_t *) & phba->fc_sparam.portName,
613 sizeof (struct lpfc_name));
614 lpfc_mbuf_free(phba, mp->virt, mp->phys);
616 mempool_free( pmb, phba->mbox_mem_pool);
620 pmb->context1 = NULL;
621 lpfc_mbuf_free(phba, mp->virt, mp->phys);
623 if (phba->hba_state != LPFC_CLEAR_LA) {
624 lpfc_clear_la(phba, pmb);
625 pmb->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
626 if (lpfc_sli_issue_mbox(phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB))
627 == MBX_NOT_FINISHED) {
628 mempool_free( pmb, phba->mbox_mem_pool);
629 lpfc_disc_flush_list(phba);
630 psli->ring[(psli->ip_ring)].flag &=
631 ~LPFC_STOP_IOCB_EVENT;
632 psli->ring[(psli->fcp_ring)].flag &=
633 ~LPFC_STOP_IOCB_EVENT;
634 psli->ring[(psli->next_ring)].flag &=
635 ~LPFC_STOP_IOCB_EVENT;
636 phba->hba_state = LPFC_HBA_READY;
639 mempool_free( pmb, phba->mbox_mem_pool);
645 * This routine handles processing a READ_LA mailbox
646 * command upon completion. It is setup in the LPFC_MBOXQ
647 * as the completion routine when the command is
648 * handed off to the SLI layer.
651 lpfc_mbx_cmpl_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
653 struct lpfc_sli *psli = &phba->sli;
656 MAILBOX_t *mb = &pmb->mb;
657 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
661 /* Check for error */
663 /* READ_LA mbox error <mbxStatus> state <hba_state> */
664 lpfc_printf_log(phba,
667 "%d:1307 READ_LA mbox error x%x state x%x\n",
669 mb->mbxStatus, phba->hba_state);
672 phba->hba_state = LPFC_HBA_ERROR;
674 /* turn on Link Attention interrupts */
675 psli->sliinit.sli_flag |= LPFC_PROCESS_LA;
676 control = readl(phba->HCregaddr);
677 control |= HC_LAINT_ENA;
678 writel(control, phba->HCregaddr);
679 readl(phba->HCregaddr); /* flush */
682 la = (READ_LA_VAR *) & pmb->mb.un.varReadLA;
685 /* The mailbox was populated by the HBA. Flush it to main store for the
686 * driver. Note that all context buffers are from the driver's
687 * dma pool and have length LPFC_BPL_SIZE.
689 pci_dma_sync_single_for_cpu(phba->pcidev, mp->phys, LPFC_BPL_SIZE,
692 /* Get Loop Map information */
694 memcpy(&phba->alpa_map[0], mp->virt, 128);
696 memset(&phba->alpa_map[0], 0, 128);
699 if (((phba->fc_eventTag + 1) < la->eventTag) ||
700 (phba->fc_eventTag == la->eventTag)) {
701 phba->fc_stat.LinkMultiEvent++;
702 if (la->attType == AT_LINK_UP) {
703 if (phba->fc_eventTag != 0) {
710 phba->fc_eventTag = la->eventTag;
712 if (la->attType == AT_LINK_UP) {
713 phba->fc_stat.LinkUp++;
714 /* Link Up Event <eventTag> received */
715 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
716 "%d:1303 Link Up Event x%x received "
717 "Data: x%x x%x x%x x%x\n",
718 phba->brd_no, la->eventTag, phba->fc_eventTag,
719 la->granted_AL_PA, la->UlnkSpeed,
722 switch(la->UlnkSpeed) {
724 phba->fc_linkspeed = LA_1GHZ_LINK;
727 phba->fc_linkspeed = LA_2GHZ_LINK;
730 phba->fc_linkspeed = LA_4GHZ_LINK;
733 phba->fc_linkspeed = LA_UNKNW_LINK;
737 if ((phba->fc_topology = la->topology) == TOPOLOGY_LOOP) {
740 phba->fc_flag |= FC_LBIT;
743 phba->fc_myDID = la->granted_AL_PA;
745 i = la->un.lilpBde64.tus.f.bdeSize;
747 phba->alpa_map[0] = 0;
749 if (phba->cfg_log_verbose
762 numalpa = phba->alpa_map[0];
764 while (j < numalpa) {
765 memset(un.pamap, 0, 16);
766 for (k = 1; j < numalpa; k++) {
774 /* Link Up Event ALPA map */
775 lpfc_printf_log(phba,
778 "%d:1304 Link Up Event "
779 "ALPA map Data: x%x "
782 un.pa.wd1, un.pa.wd2,
783 un.pa.wd3, un.pa.wd4);
788 phba->fc_myDID = phba->fc_pref_DID;
789 phba->fc_flag |= FC_LBIT;
793 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC))) {
794 lpfc_read_sparam(phba, mbox);
795 mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
797 (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB));
800 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC))) {
801 phba->hba_state = LPFC_LOCAL_CFG_LINK;
802 lpfc_config_link(phba, mbox);
803 mbox->mbox_cmpl = lpfc_mbx_cmpl_config_link;
805 (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB));
808 phba->fc_stat.LinkDown++;
809 /* Link Down Event <eventTag> received */
810 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
811 "%d:1305 Link Down Event x%x received "
812 "Data: x%x x%x x%x\n",
813 phba->brd_no, la->eventTag, phba->fc_eventTag,
814 phba->hba_state, phba->fc_flag);
818 /* turn on Link Attention interrupts - no CLEAR_LA needed */
819 psli->sliinit.sli_flag |= LPFC_PROCESS_LA;
820 control = readl(phba->HCregaddr);
821 control |= HC_LAINT_ENA;
822 writel(control, phba->HCregaddr);
823 readl(phba->HCregaddr); /* flush */
826 pmb->context1 = NULL;
827 lpfc_mbuf_free(phba, mp->virt, mp->phys);
829 mempool_free( pmb, phba->mbox_mem_pool);
834 * This routine handles processing a REG_LOGIN mailbox
835 * command upon completion. It is setup in the LPFC_MBOXQ
836 * as the completion routine when the command is
837 * handed off to the SLI layer.
840 lpfc_mbx_cmpl_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
842 struct lpfc_sli *psli;
844 struct lpfc_dmabuf *mp;
845 struct lpfc_nodelist *ndlp;
850 ndlp = (struct lpfc_nodelist *) pmb->context2;
851 mp = (struct lpfc_dmabuf *) (pmb->context1);
853 /* The mailbox was populated by the HBA. Flush it to main store for the
854 * driver. Note that all context buffers are from the driver's
855 * dma pool and have length LPFC_BPL_SIZE.
857 pci_dma_sync_single_for_cpu(phba->pcidev, mp->phys, LPFC_BPL_SIZE,
860 pmb->context1 = NULL;
862 /* Good status, call state machine */
863 lpfc_disc_state_machine(phba, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN);
864 lpfc_mbuf_free(phba, mp->virt, mp->phys);
866 mempool_free( pmb, phba->mbox_mem_pool);
872 * This routine handles processing a Fabric REG_LOGIN mailbox
873 * command upon completion. It is setup in the LPFC_MBOXQ
874 * as the completion routine when the command is
875 * handed off to the SLI layer.
878 lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
880 struct lpfc_sli *psli;
882 struct lpfc_dmabuf *mp;
883 struct lpfc_nodelist *ndlp;
884 struct lpfc_nodelist *ndlp_fdmi;
890 ndlp = (struct lpfc_nodelist *) pmb->context2;
891 mp = (struct lpfc_dmabuf *) (pmb->context1);
894 lpfc_mbuf_free(phba, mp->virt, mp->phys);
896 mempool_free( pmb, phba->mbox_mem_pool);
897 mempool_free( ndlp, phba->nlp_mem_pool);
899 /* FLOGI failed, so just use loop map to make discovery list */
900 lpfc_disc_list_loopmap(phba);
902 /* Start discovery */
903 lpfc_disc_start(phba);
907 /* The mailbox was populated by the HBA. Flush it to main store for the
908 * driver. Note that all context buffers are from the driver's
909 * dma pool and have length LPFC_BPL_SIZE.
911 pci_dma_sync_single_for_cpu(phba->pcidev, mp->phys, LPFC_BPL_SIZE,
914 pmb->context1 = NULL;
916 if (ndlp->nlp_rpi != 0)
917 lpfc_findnode_remove_rpi(phba, ndlp->nlp_rpi);
918 ndlp->nlp_rpi = mb->un.varWords[0];
919 lpfc_addnode_rpi(phba, ndlp, ndlp->nlp_rpi);
920 ndlp->nlp_type |= NLP_FABRIC;
921 ndlp->nlp_state = NLP_STE_UNMAPPED_NODE;
922 lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST);
924 if (phba->hba_state == LPFC_FABRIC_CFG_LINK) {
925 /* This NPort has been assigned an NPort_ID by the fabric as a
926 * result of the completed fabric login. Issue a State Change
927 * Registration (SCR) ELS request to the fabric controller
928 * (SCR_DID) so that this NPort gets RSCN events from the
931 lpfc_issue_els_scr(phba, SCR_DID, 0);
933 /* Allocate a new node instance. If the pool is empty, just
934 * start the discovery process and skip the Nameserver login
935 * process. This is attempted again later on. Otherwise, issue
936 * a Port Login (PLOGI) to the NameServer
938 if ((ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC))
940 lpfc_disc_start(phba);
942 lpfc_nlp_init(phba, ndlp, NameServer_DID);
943 ndlp->nlp_type |= NLP_FABRIC;
944 ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
945 lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
946 lpfc_issue_els_plogi(phba, ndlp, 0);
947 if (phba->cfg_fdmi_on) {
948 if ((ndlp_fdmi = mempool_alloc(
951 lpfc_nlp_init(phba, ndlp_fdmi,
953 ndlp_fdmi->nlp_type |= NLP_FABRIC;
954 ndlp_fdmi->nlp_state =
956 lpfc_issue_els_plogi(phba, ndlp_fdmi,
963 lpfc_mbuf_free(phba, mp->virt, mp->phys);
965 mempool_free( pmb, phba->mbox_mem_pool);
971 * This routine handles processing a NameServer REG_LOGIN mailbox
972 * command upon completion. It is setup in the LPFC_MBOXQ
973 * as the completion routine when the command is
974 * handed off to the SLI layer.
977 lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
979 struct lpfc_sli *psli;
981 struct lpfc_dmabuf *mp;
982 struct lpfc_nodelist *ndlp;
987 ndlp = (struct lpfc_nodelist *) pmb->context2;
988 mp = (struct lpfc_dmabuf *) (pmb->context1);
991 lpfc_mbuf_free(phba, mp->virt, mp->phys);
993 mempool_free( pmb, phba->mbox_mem_pool);
994 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
996 /* RegLogin failed, so just use loop map to make discovery
998 lpfc_disc_list_loopmap(phba);
1000 /* Start discovery */
1001 lpfc_disc_start(phba);
1005 /* The mailbox was populated by the HBA. Flush it to main store for the
1006 * driver. Note that all context buffers are from the driver's
1007 * dma pool and have length LPFC_BPL_SIZE.
1009 pci_dma_sync_single_for_cpu(phba->pcidev, mp->phys, LPFC_BPL_SIZE,
1010 PCI_DMA_FROMDEVICE);
1012 pmb->context1 = NULL;
1014 if (ndlp->nlp_rpi != 0)
1015 lpfc_findnode_remove_rpi(phba, ndlp->nlp_rpi);
1016 ndlp->nlp_rpi = mb->un.varWords[0];
1017 lpfc_addnode_rpi(phba, ndlp, ndlp->nlp_rpi);
1018 ndlp->nlp_type |= NLP_FABRIC;
1019 ndlp->nlp_state = NLP_STE_UNMAPPED_NODE;
1020 lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST);
1022 if (phba->hba_state < LPFC_HBA_READY) {
1023 /* Link up discovery requires Fabrib registration. */
1024 lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RNN_ID);
1025 lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RSNN_NN);
1026 lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RFT_ID);
1029 phba->fc_ns_retry = 0;
1030 /* Good status, issue CT Request to NameServer */
1031 if (lpfc_ns_cmd(phba, ndlp, SLI_CTNS_GID_FT)) {
1032 /* Cannot issue NameServer Query, so finish up discovery */
1033 lpfc_disc_start(phba);
1036 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1038 mempool_free( pmb, phba->mbox_mem_pool);
1043 /* Put blp on the bind list */
1045 lpfc_consistent_bind_save(struct lpfc_hba * phba, struct lpfc_bindlist * blp)
1047 /* Put it at the end of the bind list */
1048 list_add_tail(&blp->nlp_listp, &phba->fc_nlpbind_list);
1049 phba->fc_bind_cnt++;
1051 /* Add scsiid <sid> to BIND list */
1052 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1053 "%d:0903 Add scsiid %d to BIND list "
1054 "Data: x%x x%x x%x x%p\n",
1055 phba->brd_no, blp->nlp_sid, phba->fc_bind_cnt,
1056 blp->nlp_DID, blp->nlp_bind_type, blp);
1062 lpfc_nlp_list(struct lpfc_hba * phba, struct lpfc_nodelist * nlp, int list)
1064 struct lpfc_bindlist *blp;
1065 struct lpfc_target *targetp;
1066 struct lpfc_sli *psli;
1069 /* Sanity check to ensure we are not moving to / from the same list */
1070 if((nlp->nlp_flag & NLP_LIST_MASK) == list) {
1071 if(list != NLP_NO_LIST)
1075 blp = nlp->nlp_listp_bind;
1077 switch(nlp->nlp_flag & NLP_LIST_MASK) {
1078 case NLP_NO_LIST: /* Not on any list */
1080 case NLP_UNUSED_LIST:
1081 phba->fc_unused_cnt--;
1082 list_del(&nlp->nlp_listp);
1083 nlp->nlp_flag &= ~NLP_LIST_MASK;
1085 case NLP_PLOGI_LIST:
1086 phba->fc_plogi_cnt--;
1087 list_del(&nlp->nlp_listp);
1088 nlp->nlp_flag &= ~NLP_LIST_MASK;
1090 case NLP_ADISC_LIST:
1091 phba->fc_adisc_cnt--;
1092 list_del(&nlp->nlp_listp);
1093 nlp->nlp_flag &= ~NLP_LIST_MASK;
1095 case NLP_REGLOGIN_LIST:
1096 phba->fc_reglogin_cnt--;
1097 list_del(&nlp->nlp_listp);
1098 nlp->nlp_flag &= ~NLP_LIST_MASK;
1101 phba->fc_prli_cnt--;
1102 list_del(&nlp->nlp_listp);
1103 nlp->nlp_flag &= ~NLP_LIST_MASK;
1105 case NLP_UNMAPPED_LIST:
1106 phba->fc_unmap_cnt--;
1107 list_del(&nlp->nlp_listp);
1108 nlp->nlp_flag &= ~NLP_LIST_MASK;
1109 nlp->nlp_flag &= ~NLP_TGT_NO_SCSIID;
1110 nlp->nlp_type &= ~NLP_FC_NODE;
1111 phba->nport_event_cnt++;
1113 case NLP_MAPPED_LIST:
1115 list_del(&nlp->nlp_listp);
1116 nlp->nlp_flag &= ~NLP_LIST_MASK;
1117 phba->nport_event_cnt++;
1118 lpfc_set_failmask(phba, nlp, LPFC_DEV_DISAPPEARED,
1120 nlp->nlp_type &= ~NLP_FCP_TARGET;
1121 targetp = nlp->nlp_Target;
1122 if (targetp && (list != NLP_MAPPED_LIST)) {
1123 nlp->nlp_Target = NULL;
1124 #if defined(FC_TRANS_VER1) || defined(FC_TRANS_265_BLKPATCH)
1126 * Do not block the target if the driver has just reset
1127 * its interface to the hardware.
1129 if (phba->hba_state != LPFC_INIT_START)
1130 lpfc_target_block(phba, targetp);
1137 list_del(&nlp->nlp_listp);
1138 nlp->nlp_flag &= ~NLP_LIST_MASK;
1139 /* Stop delay tmo if taking node off NPR list */
1140 if ((nlp->nlp_flag & NLP_DELAY_TMO) &&
1141 (list != NLP_NPR_LIST)) {
1142 nlp->nlp_flag &= ~NLP_DELAY_TMO;
1143 del_timer_sync(&nlp->nlp_delayfunc);
1148 /* Add NPort <did> to <num> list */
1149 lpfc_printf_log(phba,
1152 "%d:0904 Add NPort x%x to %d list Data: x%x x%p\n",
1154 nlp->nlp_DID, list, nlp->nlp_flag, blp);
1156 nlp->nlp_listp_bind = NULL;
1159 case NLP_NO_LIST: /* No list, just remove it */
1160 lpfc_nlp_remove(phba, nlp);
1162 case NLP_UNUSED_LIST:
1163 nlp->nlp_flag |= list;
1164 /* Put it at the end of the unused list */
1165 list_add_tail(&nlp->nlp_listp, &phba->fc_unused_list);
1166 phba->fc_unused_cnt++;
1168 case NLP_PLOGI_LIST:
1169 nlp->nlp_flag |= list;
1170 /* Put it at the end of the plogi list */
1171 list_add_tail(&nlp->nlp_listp, &phba->fc_plogi_list);
1172 phba->fc_plogi_cnt++;
1174 case NLP_ADISC_LIST:
1175 nlp->nlp_flag |= list;
1176 /* Put it at the end of the adisc list */
1177 list_add_tail(&nlp->nlp_listp, &phba->fc_adisc_list);
1178 phba->fc_adisc_cnt++;
1180 case NLP_REGLOGIN_LIST:
1181 nlp->nlp_flag |= list;
1182 /* Put it at the end of the reglogin list */
1183 list_add_tail(&nlp->nlp_listp, &phba->fc_reglogin_list);
1184 phba->fc_reglogin_cnt++;
1187 nlp->nlp_flag |= list;
1188 /* Put it at the end of the prli list */
1189 list_add_tail(&nlp->nlp_listp, &phba->fc_prli_list);
1190 phba->fc_prli_cnt++;
1192 case NLP_UNMAPPED_LIST:
1193 nlp->nlp_flag |= list;
1194 /* Put it at the end of the unmap list */
1195 list_add_tail(&nlp->nlp_listp, &phba->fc_nlpunmap_list);
1196 phba->fc_unmap_cnt++;
1197 phba->nport_event_cnt++;
1198 /* stop nodev tmo if running */
1199 if (nlp->nlp_flag & NLP_NODEV_TMO) {
1200 del_timer_sync(&nlp->nlp_tmofunc);
1202 nlp->nlp_flag &= ~NLP_NODEV_TMO;
1203 nlp->nlp_type |= NLP_FC_NODE;
1204 lpfc_set_failmask(phba, nlp, LPFC_DEV_DISCOVERY_INP,
1207 case NLP_MAPPED_LIST:
1208 nlp->nlp_flag |= list;
1209 /* Put it at the end of the map list */
1210 list_add_tail(&nlp->nlp_listp, &phba->fc_nlpmap_list);
1212 phba->nport_event_cnt++;
1213 /* stop nodev tmo if running */
1214 if (nlp->nlp_flag & NLP_NODEV_TMO) {
1215 del_timer_sync(&nlp->nlp_tmofunc);
1217 nlp->nlp_flag &= ~NLP_NODEV_TMO;
1218 nlp->nlp_type |= NLP_FCP_TARGET;
1219 lpfc_set_failmask(phba, nlp, LPFC_DEV_DISAPPEARED,
1221 lpfc_set_failmask(phba, nlp, LPFC_DEV_DISCOVERY_INP,
1224 targetp = phba->device_queue_hash[nlp->nlp_sid];
1225 if (targetp && targetp->pnode) {
1226 nlp->nlp_Target = targetp;
1227 #if defined(FC_TRANS_VER1) || defined(FC_TRANS_265_BLKPATCH)
1228 /* Unblock I/Os on target */
1229 if(targetp->blocked)
1230 lpfc_target_unblock(phba, targetp);
1235 nlp->nlp_flag |= list;
1236 /* Put it at the end of the npr list */
1237 list_add_tail(&nlp->nlp_listp, &phba->fc_npr_list);
1241 * Sanity check for Fabric entity.
1242 * Set nodev_tmo for NPR state, for Fabric use 1 sec.
1244 if (nlp->nlp_type & NLP_FABRIC) {
1245 mod_timer(&nlp->nlp_tmofunc, jiffies + HZ);
1248 mod_timer(&nlp->nlp_tmofunc,
1249 jiffies + HZ * phba->cfg_nodev_tmo);
1251 nlp->nlp_flag |= NLP_NODEV_TMO;
1252 nlp->nlp_flag &= ~NLP_RCV_PLOGI;
1260 nlp->nlp_flag &= ~NLP_SEED_MASK;
1261 nlp->nlp_Target = NULL;
1262 lpfc_consistent_bind_save(phba, blp);
1268 * Start / ReStart rescue timer for Discovery / RSCN handling
1271 lpfc_set_disctmo(struct lpfc_hba * phba)
1275 tmo = ((phba->fc_ratov * 2) + 1);
1277 mod_timer(&phba->fc_disctmo, jiffies + HZ * tmo);
1278 phba->fc_flag |= FC_DISC_TMO;
1280 /* Start Discovery Timer state <hba_state> */
1281 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
1282 "%d:0247 Start Discovery Timer state x%x "
1283 "Data: x%x x%lx x%x x%x\n",
1285 phba->hba_state, tmo, (unsigned long)&phba->fc_disctmo,
1286 phba->fc_plogi_cnt, phba->fc_adisc_cnt);
1292 * Cancel rescue timer for Discovery / RSCN handling
1295 lpfc_can_disctmo(struct lpfc_hba * phba)
1297 /* Turn off discovery timer if its running */
1298 if(phba->fc_flag & FC_DISC_TMO) {
1299 del_timer_sync(&phba->fc_disctmo);
1301 phba->fc_flag &= ~FC_DISC_TMO;
1303 /* Cancel Discovery Timer state <hba_state> */
1304 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
1305 "%d:0248 Cancel Discovery Timer state x%x "
1306 "Data: x%x x%x x%x\n",
1307 phba->brd_no, phba->hba_state, phba->fc_flag,
1308 phba->fc_plogi_cnt, phba->fc_adisc_cnt);
1314 * Check specified ring for outstanding IOCB on the SLI queue
1315 * Return true if iocb matches the specified nport
1318 lpfc_check_sli_ndlp(struct lpfc_hba * phba,
1319 struct lpfc_sli_ring * pring,
1320 struct lpfc_iocbq * iocb, struct lpfc_nodelist * ndlp)
1322 struct lpfc_sli *psli;
1327 if (pring->ringno == LPFC_ELS_RING) {
1328 switch (icmd->ulpCommand) {
1329 case CMD_GEN_REQUEST64_CR:
1330 if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi)
1332 case CMD_ELS_REQUEST64_CR:
1333 case CMD_XMIT_ELS_RSP64_CX:
1334 if (iocb->context1 == (uint8_t *) ndlp)
1337 } else if (pring->ringno == psli->ip_ring) {
1339 } else if (pring->ringno == psli->fcp_ring) {
1340 /* Skip match check if waiting to relogin to FCP target */
1341 if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
1342 (ndlp->nlp_flag & NLP_DELAY_TMO)) {
1345 if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) {
1348 } else if (pring->ringno == psli->next_ring) {
1355 * Free resources / clean up outstanding I/Os
1356 * associated with nlp_rpi in the LPFC_NODELIST entry.
1359 lpfc_no_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1361 struct lpfc_sli *psli;
1362 struct lpfc_sli_ring *pring;
1363 struct lpfc_iocbq *iocb, *next_iocb;
1368 rpi = ndlp->nlp_rpi;
1370 /* Now process each ring */
1371 for (i = 0; i < psli->sliinit.num_rings; i++) {
1372 pring = &psli->ring[i];
1374 list_for_each_entry_safe(iocb, next_iocb, &pring->txq,
1377 * Check to see if iocb matches the nport we are
1380 if ((lpfc_check_sli_ndlp
1381 (phba, pring, iocb, ndlp))) {
1382 /* It matches, so deque and call compl
1384 list_del(&iocb->list);
1386 if (iocb->iocb_cmpl) {
1389 IOSTAT_LOCAL_REJECT;
1390 icmd->un.ulpWord[4] =
1392 (iocb->iocb_cmpl) (phba,
1396 phba->iocb_mem_pool);
1400 /* Everything that matches on txcmplq will be returned
1401 * by firmware with a no rpi error.
1409 * Free rpi associated with LPFC_NODELIST entry.
1410 * This routine is called from lpfc_freenode(), when we are removing
1411 * a LPFC_NODELIST entry. It is also called if the driver initiates a
1412 * LOGO that completes successfully, and we are waiting to PLOGI back
1413 * to the remote NPort. In addition, it is called after we receive
1414 * and unsolicated ELS cmd, send back a rsp, the rsp completes and
1415 * we are waiting to PLOGI back to the remote NPort.
1418 lpfc_unreg_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1422 if (ndlp->nlp_rpi) {
1423 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC))) {
1424 lpfc_unreg_login(phba, ndlp->nlp_rpi, mbox);
1425 if (lpfc_sli_issue_mbox
1426 (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB))
1427 == MBX_NOT_FINISHED) {
1428 mempool_free( mbox, phba->mbox_mem_pool);
1431 lpfc_findnode_remove_rpi(phba, ndlp->nlp_rpi);
1432 lpfc_no_rpi(phba, ndlp);
1434 lpfc_set_failmask(phba, ndlp, LPFC_DEV_DISCONNECTED,
1442 * Free resources associated with LPFC_NODELIST entry
1443 * so it can be freed.
1446 lpfc_freenode(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1448 struct lpfc_target *targetp;
1449 struct lpfc_sli *psli;
1452 /* The psli variable gets rid of the long pointer deference. */
1455 /* Cleanup node for NPort <nlp_DID> */
1456 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1457 "%d:0900 Cleanup node for NPort x%x "
1458 "Data: x%x x%x x%x\n",
1459 phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag,
1460 ndlp->nlp_state, ndlp->nlp_rpi);
1462 lpfc_nlp_list(phba, ndlp, NLP_JUST_DQ);
1464 if(ndlp->nlp_flag & NLP_NODEV_TMO) {
1465 del_timer_sync(&ndlp->nlp_tmofunc);
1467 ndlp->nlp_flag &= ~NLP_NODEV_TMO;
1469 if(ndlp->nlp_flag & NLP_DELAY_TMO) {
1470 del_timer_sync(&ndlp->nlp_delayfunc);
1472 ndlp->nlp_flag &= ~NLP_DELAY_TMO;
1474 lpfc_unreg_rpi(phba, ndlp);
1476 for(scsid=0;scsid<MAX_FCP_TARGET;scsid++) {
1477 targetp = phba->device_queue_hash[scsid];
1478 /* First see if the SCSI ID has an allocated struct
1481 if (targetp->pnode == ndlp) {
1482 targetp->pnode = NULL;
1483 ndlp->nlp_Target = NULL;
1484 #ifdef FC_TRANS_VER1
1486 * This code does not apply to SLES9 since there
1487 * is no starget defined in the midlayer.
1488 * Additionally, dynamic target discovery to the
1489 * midlayer is not supported yet.
1491 if (targetp->starget) {
1492 /* Remove SCSI target / SCSI Hotplug */
1493 lpfc_target_remove(phba, targetp);
1504 * Check to see if we can free the nlp back to the freelist.
1505 * If we are in the middle of using the nlp in the discovery state
1506 * machine, defer the free till we reach the end of the state machine.
1509 lpfc_nlp_remove(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1511 if(ndlp->nlp_flag & NLP_NODEV_TMO) {
1512 del_timer_sync(&ndlp->nlp_tmofunc);
1514 ndlp->nlp_flag &= ~NLP_NODEV_TMO;
1516 if(ndlp->nlp_flag & NLP_DELAY_TMO) {
1517 del_timer_sync(&ndlp->nlp_delayfunc);
1519 ndlp->nlp_flag &= ~NLP_DELAY_TMO;
1521 if (ndlp->nlp_disc_refcnt) {
1522 ndlp->nlp_flag |= NLP_DELAY_REMOVE;
1525 lpfc_freenode(phba, ndlp);
1526 mempool_free( ndlp, phba->nlp_mem_pool);
1532 lpfc_matchdid(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, uint32_t did)
1538 if (did == Bcast_DID)
1541 if (ndlp->nlp_DID == 0) {
1545 /* First check for Direct match */
1546 if (ndlp->nlp_DID == did)
1549 /* Next check for area/domain identically equals 0 match */
1550 mydid.un.word = phba->fc_myDID;
1551 if ((mydid.un.b.domain == 0) && (mydid.un.b.area == 0)) {
1555 matchdid.un.word = did;
1556 ndlpdid.un.word = ndlp->nlp_DID;
1557 if (matchdid.un.b.id == ndlpdid.un.b.id) {
1558 if ((mydid.un.b.domain == matchdid.un.b.domain) &&
1559 (mydid.un.b.area == matchdid.un.b.area)) {
1560 if ((ndlpdid.un.b.domain == 0) &&
1561 (ndlpdid.un.b.area == 0)) {
1562 if (ndlpdid.un.b.id)
1568 matchdid.un.word = ndlp->nlp_DID;
1569 if ((mydid.un.b.domain == ndlpdid.un.b.domain) &&
1570 (mydid.un.b.area == ndlpdid.un.b.area)) {
1571 if ((matchdid.un.b.domain == 0) &&
1572 (matchdid.un.b.area == 0)) {
1573 if (matchdid.un.b.id)
1581 /* Search for a nodelist entry on a specific list */
1582 struct lpfc_nodelist *
1583 lpfc_findnode_wwpn(struct lpfc_hba * phba, uint32_t order,
1584 struct lpfc_name * wwpn)
1586 struct lpfc_nodelist *ndlp, *next_ndlp;
1589 if (order & NLP_SEARCH_UNMAPPED) {
1590 list_for_each_entry_safe(ndlp, next_ndlp,
1591 &phba->fc_nlpunmap_list, nlp_listp) {
1592 if (memcmp(&ndlp->nlp_portname, wwpn,
1593 sizeof(struct lpfc_name)) == 0) {
1595 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1596 ((uint32_t) ndlp->nlp_xri << 16) |
1597 ((uint32_t) ndlp->nlp_type << 8) |
1598 ((uint32_t) ndlp->nlp_rpi & 0xff));
1599 /* FIND node DID unmapped */
1600 lpfc_printf_log(phba,
1603 "%d:0911 FIND node DID unmapped"
1604 " Data: x%p x%x x%x x%x\n",
1606 ndlp, ndlp->nlp_DID,
1607 ndlp->nlp_flag, data1);
1613 if (order & NLP_SEARCH_MAPPED) {
1614 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nlpmap_list,
1616 if (memcmp(&ndlp->nlp_portname, wwpn,
1617 sizeof(struct lpfc_name)) == 0) {
1619 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1620 ((uint32_t) ndlp->nlp_xri << 16) |
1621 ((uint32_t) ndlp->nlp_type << 8) |
1622 ((uint32_t) ndlp->nlp_rpi & 0xff));
1623 /* FIND node DID mapped */
1624 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1625 "%d:0901 FIND node DID mapped "
1626 "Data: x%p x%x x%x x%x\n",
1628 ndlp, ndlp->nlp_DID,
1629 ndlp->nlp_flag, data1);
1635 /* no match found */
1636 return ((struct lpfc_nodelist *) 0);
1638 /* Search for a nodelist entry on a specific list */
1639 struct lpfc_nodelist *
1640 lpfc_findnode_wwnn(struct lpfc_hba * phba, uint32_t order,
1641 struct lpfc_name * wwnn)
1643 struct lpfc_nodelist *ndlp, *next_ndlp;
1646 if (order & NLP_SEARCH_UNMAPPED) {
1647 list_for_each_entry_safe(ndlp, next_ndlp,
1648 &phba->fc_nlpunmap_list, nlp_listp) {
1649 if (memcmp(&ndlp->nlp_nodename, wwnn,
1650 sizeof(struct lpfc_name)) == 0) {
1652 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1653 ((uint32_t) ndlp->nlp_xri << 16) |
1654 ((uint32_t) ndlp->nlp_type << 8) |
1655 ((uint32_t) ndlp->nlp_rpi & 0xff));
1656 /* FIND node DID unmapped */
1657 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1658 "%d:0910 FIND node DID unmapped"
1659 "Data: x%p x%x x%x x%x\n",
1661 ndlp, ndlp->nlp_DID,
1662 ndlp->nlp_flag, data1);
1668 if (order & NLP_SEARCH_MAPPED) {
1669 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nlpmap_list,
1671 if (memcmp(&ndlp->nlp_nodename, wwnn,
1672 sizeof(struct lpfc_name)) == 0) {
1674 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1675 ((uint32_t) ndlp->nlp_xri << 16) |
1676 ((uint32_t) ndlp->nlp_type << 8) |
1677 ((uint32_t) ndlp->nlp_rpi & 0xff));
1678 /* FIND node did mapped */
1679 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1680 "%d:0902 FIND node DID mapped "
1681 "Data: x%p x%x x%x x%x\n",
1683 ndlp, ndlp->nlp_DID,
1684 ndlp->nlp_flag, data1);
1690 /* no match found */
1691 return ((struct lpfc_nodelist *) 0);
1693 /* Search for a nodelist entry on a specific list */
1694 struct lpfc_nodelist *
1695 lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did)
1697 struct lpfc_nodelist *ndlp, *next_ndlp;
1700 if (order & NLP_SEARCH_UNMAPPED) {
1701 list_for_each_entry_safe(ndlp, next_ndlp,
1702 &phba->fc_nlpunmap_list, nlp_listp) {
1703 if (lpfc_matchdid(phba, ndlp, did)) {
1704 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1705 ((uint32_t) ndlp->nlp_xri << 16) |
1706 ((uint32_t) ndlp->nlp_type << 8) |
1707 ((uint32_t) ndlp->nlp_rpi & 0xff));
1708 /* FIND node DID unmapped */
1709 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1710 "%d:0929 FIND node DID unmapped"
1711 " Data: x%p x%x x%x x%x\n",
1713 ndlp, ndlp->nlp_DID,
1714 ndlp->nlp_flag, data1);
1720 if (order & NLP_SEARCH_MAPPED) {
1721 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nlpmap_list,
1723 if (lpfc_matchdid(phba, ndlp, did)) {
1725 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1726 ((uint32_t) ndlp->nlp_xri << 16) |
1727 ((uint32_t) ndlp->nlp_type << 8) |
1728 ((uint32_t) ndlp->nlp_rpi & 0xff));
1729 /* FIND node DID mapped */
1730 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1731 "%d:0930 FIND node DID mapped "
1732 "Data: x%p x%x x%x x%x\n",
1734 ndlp, ndlp->nlp_DID,
1735 ndlp->nlp_flag, data1);
1741 if (order & NLP_SEARCH_PLOGI) {
1742 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_plogi_list,
1744 if (lpfc_matchdid(phba, ndlp, did)) {
1746 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1747 ((uint32_t) ndlp->nlp_xri << 16) |
1748 ((uint32_t) ndlp->nlp_type << 8) |
1749 ((uint32_t) ndlp->nlp_rpi & 0xff));
1750 /* LOG change to PLOGI */
1751 /* FIND node DID plogi */
1752 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1753 "%d:0908 FIND node DID plogi "
1754 "Data: x%p x%x x%x x%x\n",
1756 ndlp, ndlp->nlp_DID,
1757 ndlp->nlp_flag, data1);
1763 if (order & NLP_SEARCH_ADISC) {
1764 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_adisc_list,
1766 if (lpfc_matchdid(phba, ndlp, did)) {
1768 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1769 ((uint32_t) ndlp->nlp_xri << 16) |
1770 ((uint32_t) ndlp->nlp_type << 8) |
1771 ((uint32_t) ndlp->nlp_rpi & 0xff));
1772 /* LOG change to ADISC */
1773 /* FIND node DID adisc */
1774 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1775 "%d:0931 FIND node DID adisc "
1776 "Data: x%p x%x x%x x%x\n",
1778 ndlp, ndlp->nlp_DID,
1779 ndlp->nlp_flag, data1);
1785 if (order & NLP_SEARCH_REGLOGIN) {
1786 list_for_each_entry_safe(ndlp, next_ndlp,
1787 &phba->fc_reglogin_list, nlp_listp) {
1788 if (lpfc_matchdid(phba, ndlp, did)) {
1790 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1791 ((uint32_t) ndlp->nlp_xri << 16) |
1792 ((uint32_t) ndlp->nlp_type << 8) |
1793 ((uint32_t) ndlp->nlp_rpi & 0xff));
1794 /* LOG change to REGLOGIN */
1795 /* FIND node DID reglogin */
1796 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1797 "%d:0931 FIND node DID reglogin"
1798 " Data: x%p x%x x%x x%x\n",
1800 ndlp, ndlp->nlp_DID,
1801 ndlp->nlp_flag, data1);
1807 if (order & NLP_SEARCH_PRLI) {
1808 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_prli_list,
1810 if (lpfc_matchdid(phba, ndlp, did)) {
1812 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1813 ((uint32_t) ndlp->nlp_xri << 16) |
1814 ((uint32_t) ndlp->nlp_type << 8) |
1815 ((uint32_t) ndlp->nlp_rpi & 0xff));
1816 /* LOG change to PRLI */
1817 /* FIND node DID prli */
1818 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1819 "%d:0931 FIND node DID prli "
1820 "Data: x%p x%x x%x x%x\n",
1822 ndlp, ndlp->nlp_DID,
1823 ndlp->nlp_flag, data1);
1829 if (order & NLP_SEARCH_NPR) {
1830 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_npr_list,
1832 if (lpfc_matchdid(phba, ndlp, did)) {
1834 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1835 ((uint32_t) ndlp->nlp_xri << 16) |
1836 ((uint32_t) ndlp->nlp_type << 8) |
1837 ((uint32_t) ndlp->nlp_rpi & 0xff));
1838 /* LOG change to NPR */
1839 /* FIND node DID npr */
1840 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1841 "%d:0931 FIND node DID npr "
1842 "Data: x%p x%x x%x x%x\n",
1844 ndlp, ndlp->nlp_DID,
1845 ndlp->nlp_flag, data1);
1851 if (order & NLP_SEARCH_UNUSED) {
1852 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_adisc_list,
1854 if (lpfc_matchdid(phba, ndlp, did)) {
1856 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1857 ((uint32_t) ndlp->nlp_xri << 16) |
1858 ((uint32_t) ndlp->nlp_type << 8) |
1859 ((uint32_t) ndlp->nlp_rpi & 0xff));
1860 /* LOG change to UNUSED */
1861 /* FIND node DID unused */
1862 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1863 "%d:0931 FIND node DID unused "
1864 "Data: x%p x%x x%x x%x\n",
1866 ndlp, ndlp->nlp_DID,
1867 ndlp->nlp_flag, data1);
1873 /* FIND node did <did> NOT FOUND */
1874 lpfc_printf_log(phba,
1877 "%d:0932 FIND node did x%x NOT FOUND Data: x%x\n",
1878 phba->brd_no, did, order);
1880 /* no match found */
1881 return ((struct lpfc_nodelist *) 0);
1884 struct lpfc_nodelist *
1885 lpfc_setup_disc_node(struct lpfc_hba * phba, uint32_t did)
1887 struct lpfc_nodelist *ndlp;
1890 if((ndlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, did)) == 0) {
1891 if ((phba->hba_state == LPFC_HBA_READY) &&
1892 ((lpfc_rscn_payload_check(phba, did) == 0)))
1894 ndlp = (struct lpfc_nodelist *)
1895 mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC);
1898 lpfc_nlp_init(phba, ndlp, did);
1899 ndlp->nlp_state = NLP_STE_NPR_NODE;
1900 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
1901 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1904 if ((phba->hba_state == LPFC_HBA_READY) &&
1905 (phba->fc_flag & FC_RSCN_MODE)) {
1906 if(lpfc_rscn_payload_check(phba, did)) {
1907 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1910 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
1915 flg = ndlp->nlp_flag & NLP_LIST_MASK;
1916 if ((flg == NLP_ADISC_LIST) ||
1917 (flg == NLP_PLOGI_LIST)) {
1920 ndlp->nlp_state = NLP_STE_NPR_NODE;
1921 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
1922 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1927 /* Build a list of nodes to discover based on the loopmap */
1929 lpfc_disc_list_loopmap(struct lpfc_hba * phba)
1932 uint32_t alpa, index;
1934 if (phba->hba_state <= LPFC_LINK_DOWN) {
1937 if (phba->fc_topology != TOPOLOGY_LOOP) {
1941 /* Check for loop map present or not */
1942 if (phba->alpa_map[0]) {
1943 for (j = 1; j <= phba->alpa_map[0]; j++) {
1944 alpa = phba->alpa_map[j];
1946 if (((phba->fc_myDID & 0xff) == alpa) || (alpa == 0)) {
1949 lpfc_setup_disc_node(phba, alpa);
1952 /* No alpamap, so try all alpa's */
1953 for (j = 0; j < FC_MAXLOOP; j++) {
1954 /* If cfg_scan_down is set, start from highest
1955 * ALPA (0xef) to lowest (0x1).
1957 if (phba->cfg_scan_down)
1960 index = FC_MAXLOOP - j - 1;
1961 alpa = lpfcAlpaArray[index];
1962 if ((phba->fc_myDID & 0xff) == alpa) {
1966 lpfc_setup_disc_node(phba, alpa);
1972 /* Start Link up / RSCN discovery on NPR list */
1974 lpfc_disc_start(struct lpfc_hba * phba)
1976 struct lpfc_sli *psli;
1978 struct lpfc_nodelist *ndlp, *next_ndlp;
1979 uint32_t did_changed, num_sent;
1980 uint32_t clear_la_pending;
1984 if (phba->hba_state <= LPFC_LINK_DOWN) {
1987 if (phba->hba_state == LPFC_CLEAR_LA)
1988 clear_la_pending = 1;
1990 clear_la_pending = 0;
1992 if (phba->hba_state < LPFC_HBA_READY) {
1993 phba->hba_state = LPFC_DISC_AUTH;
1995 lpfc_set_disctmo(phba);
1997 if (phba->fc_prevDID == phba->fc_myDID) {
2002 phba->fc_prevDID = phba->fc_myDID;
2003 phba->num_disc_nodes = 0;
2005 /* Start Discovery state <hba_state> */
2006 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
2007 "%d:0202 Start Discovery hba state x%x "
2008 "Data: x%x x%x x%x\n",
2009 phba->brd_no, phba->hba_state, phba->fc_flag,
2010 phba->fc_plogi_cnt, phba->fc_adisc_cnt);
2012 /* If our did changed, we MUST do PLOGI */
2013 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_npr_list,
2015 if(ndlp->nlp_flag & NLP_NPR_2B_DISC) {
2017 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2021 /* First do ADISCs - if any */
2022 num_sent = lpfc_els_disc_adisc(phba);
2027 if ((phba->hba_state < LPFC_HBA_READY) && (!clear_la_pending)) {
2028 /* If we get here, there is nothing to ADISC */
2029 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC))) {
2030 phba->hba_state = LPFC_CLEAR_LA;
2031 lpfc_clear_la(phba, mbox);
2032 mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
2033 if (lpfc_sli_issue_mbox
2034 (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB))
2035 == MBX_NOT_FINISHED) {
2036 mempool_free( mbox, phba->mbox_mem_pool);
2037 lpfc_disc_flush_list(phba);
2038 psli->ring[(psli->ip_ring)].flag &=
2039 ~LPFC_STOP_IOCB_EVENT;
2040 psli->ring[(psli->fcp_ring)].flag &=
2041 ~LPFC_STOP_IOCB_EVENT;
2042 psli->ring[(psli->next_ring)].flag &=
2043 ~LPFC_STOP_IOCB_EVENT;
2044 phba->hba_state = LPFC_HBA_READY;
2048 /* Next do PLOGIs - if any */
2049 num_sent = lpfc_els_disc_plogi(phba);
2054 if (phba->fc_flag & FC_RSCN_MODE) {
2055 /* Check to see if more RSCNs came in while we
2056 * were processing this one.
2058 if ((phba->fc_rscn_id_cnt == 0) &&
2059 (!(phba->fc_flag & FC_RSCN_DISCOVERY))) {
2060 lpfc_els_flush_rscn(phba);
2062 lpfc_els_handle_rscn(phba);
2070 * Ignore completion for all IOCBs on tx and txcmpl queue for ELS
2071 * ring the match the sppecified nodelist.
2074 lpfc_free_tx(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
2076 struct lpfc_sli *psli;
2078 struct lpfc_iocbq *iocb, *next_iocb;
2079 struct lpfc_sli_ring *pring;
2080 struct lpfc_dmabuf *mp;
2083 pring = &psli->ring[LPFC_ELS_RING];
2085 /* Error matching iocb on txq or txcmplq
2086 * First check the txq.
2088 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
2089 if (iocb->context1 != ndlp) {
2093 if ((icmd->ulpCommand == CMD_ELS_REQUEST64_CR) ||
2094 (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) {
2096 list_del(&iocb->list);
2098 lpfc_els_free_iocb(phba, iocb);
2102 /* Next check the txcmplq */
2103 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
2104 if (iocb->context1 != ndlp) {
2108 if ((icmd->ulpCommand == CMD_ELS_REQUEST64_CR) ||
2109 (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) {
2111 iocb->iocb_cmpl = NULL;
2112 /* context2 = cmd, context2->next = rsp, context3 =
2114 if (iocb->context2) {
2115 /* Free the response IOCB before handling the
2118 mp = (struct lpfc_dmabuf *)
2119 (((struct lpfc_dmabuf *) (iocb->context2))
2122 /* Delay before releasing rsp buffer to
2123 * give UNREG mbox a chance to take
2127 &phba->freebufList);
2129 lpfc_mbuf_free(phba,
2130 ((struct lpfc_dmabuf *)
2131 iocb->context2)->virt,
2132 ((struct lpfc_dmabuf *)
2133 iocb->context2)->phys);
2134 kfree(iocb->context2);
2137 if (iocb->context3) {
2138 lpfc_mbuf_free(phba,
2139 ((struct lpfc_dmabuf *)
2140 iocb->context3)->virt,
2141 ((struct lpfc_dmabuf *)
2142 iocb->context3)->phys);
2143 kfree(iocb->context3);
2152 lpfc_disc_flush_list(struct lpfc_hba * phba)
2154 struct lpfc_nodelist *ndlp, *next_ndlp;
2156 if (phba->fc_plogi_cnt) {
2157 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_plogi_list,
2159 lpfc_set_failmask(phba, ndlp, LPFC_DEV_DISCONNECTED,
2161 lpfc_free_tx(phba, ndlp);
2162 lpfc_nlp_remove(phba, ndlp);
2165 if (phba->fc_adisc_cnt) {
2166 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_adisc_list,
2168 lpfc_set_failmask(phba, ndlp, LPFC_DEV_DISCONNECTED,
2170 lpfc_free_tx(phba, ndlp);
2171 lpfc_nlp_remove(phba, ndlp);
2177 /*****************************************************************************/
2179 * NAME: lpfc_disc_timeout
2181 * FUNCTION: Fibre Channel driver discovery timeout routine.
2183 * EXECUTION ENVIRONMENT: interrupt only
2191 /*****************************************************************************/
2193 lpfc_disc_timeout(unsigned long ptr)
2195 struct lpfc_hba *phba;
2196 struct lpfc_sli *psli;
2197 struct lpfc_nodelist *ndlp;
2199 unsigned long iflag;
2201 phba = (struct lpfc_hba *)ptr;
2205 spin_lock_irqsave(phba->host->host_lock, iflag);
2208 phba->fc_flag &= ~FC_DISC_TMO;
2210 /* hba_state is identically LPFC_LOCAL_CFG_LINK while waiting for FAN */
2211 if (phba->hba_state == LPFC_LOCAL_CFG_LINK) {
2213 lpfc_printf_log(phba,
2216 "%d:0221 FAN timeout\n",
2219 /* Forget about FAN, Start discovery by sending a FLOGI
2220 * hba_state is identically LPFC_FLOGI while waiting for FLOGI
2223 phba->hba_state = LPFC_FLOGI;
2224 lpfc_set_disctmo(phba);
2225 lpfc_initial_flogi(phba);
2229 /* hba_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */
2230 if (phba->hba_state == LPFC_FLOGI) {
2231 /* Initial FLOGI timeout */
2232 lpfc_printf_log(phba,
2235 "%d:0222 Initial FLOGI timeout\n",
2238 /* Assume no Fabric and go on with discovery.
2239 * Check for outstanding ELS FLOGI to abort.
2242 /* FLOGI failed, so just use loop map to make discovery list */
2243 lpfc_disc_list_loopmap(phba);
2245 /* Start discovery */
2246 lpfc_disc_start(phba);
2250 /* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for
2252 if (phba->hba_state == LPFC_FABRIC_CFG_LINK) {
2253 /* Timeout while waiting for NameServer login */
2254 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2255 "%d:0223 Timeout while waiting for NameServer "
2256 "login\n", phba->brd_no);
2258 /* Next look for NameServer ndlp */
2259 if ((ndlp = lpfc_findnode_did(phba,
2260 NLP_SEARCH_ALL, NameServer_DID))) {
2261 lpfc_nlp_remove(phba, ndlp);
2263 /* Start discovery */
2264 lpfc_disc_start(phba);
2268 /* Check for wait for NameServer Rsp timeout */
2269 if (phba->hba_state == LPFC_NS_QRY) {
2270 /* NameServer Query timeout */
2271 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2272 "%d:0224 NameServer Query timeout "
2275 phba->fc_ns_retry, LPFC_MAX_NS_RETRY);
2278 lpfc_findnode_did(phba, NLP_SEARCH_UNMAPPED,
2280 if (phba->fc_ns_retry < LPFC_MAX_NS_RETRY) {
2281 /* Try it one more time */
2282 if (lpfc_ns_cmd(phba, ndlp, SLI_CTNS_GID_FT) ==
2287 phba->fc_ns_retry = 0;
2290 /* Nothing to authenticate, so CLEAR_LA right now */
2291 if (phba->hba_state != LPFC_CLEAR_LA) {
2292 if ((mbox = mempool_alloc(phba->mbox_mem_pool,
2294 phba->hba_state = LPFC_CLEAR_LA;
2295 lpfc_clear_la(phba, mbox);
2296 mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
2297 if (lpfc_sli_issue_mbox
2298 (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB))
2299 == MBX_NOT_FINISHED) {
2300 mempool_free(mbox, phba->mbox_mem_pool);
2304 /* Device Discovery completion error */
2305 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2306 "%d:0226 Device Discovery "
2307 "completion error\n",
2309 phba->hba_state = LPFC_HBA_ERROR;
2312 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC))) {
2313 /* Setup and issue mailbox INITIALIZE LINK command */
2314 lpfc_linkdown(phba);
2315 lpfc_init_link(phba, mbox,
2317 phba->cfg_link_speed);
2318 mbox->mb.un.varInitLnk.lipsr_AL_PA = 0;
2319 if (lpfc_sli_issue_mbox
2320 (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB))
2321 == MBX_NOT_FINISHED) {
2322 mempool_free( mbox, phba->mbox_mem_pool);
2328 if (phba->hba_state == LPFC_DISC_AUTH) {
2329 /* Node Authentication timeout */
2330 lpfc_printf_log(phba,
2333 "%d:0227 Node Authentication timeout\n",
2335 lpfc_disc_flush_list(phba);
2336 if (phba->hba_state != LPFC_CLEAR_LA) {
2337 if ((mbox = mempool_alloc(phba->mbox_mem_pool,
2339 phba->hba_state = LPFC_CLEAR_LA;
2340 lpfc_clear_la(phba, mbox);
2341 mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
2342 if (lpfc_sli_issue_mbox
2343 (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB))
2344 == MBX_NOT_FINISHED) {
2345 mempool_free(mbox, phba->mbox_mem_pool);
2353 if (phba->hba_state == LPFC_CLEAR_LA) {
2354 /* CLEAR LA timeout */
2355 lpfc_printf_log(phba,
2358 "%d:0228 CLEAR LA timeout\n",
2361 lpfc_disc_flush_list(phba);
2362 psli->ring[(psli->ip_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
2363 psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
2364 psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
2365 phba->hba_state = LPFC_HBA_READY;
2369 if ((phba->hba_state == LPFC_HBA_READY) &&
2370 (phba->fc_flag & FC_RSCN_MODE)) {
2372 lpfc_printf_log(phba,
2375 "%d:0231 RSCN timeout Data: x%x x%x\n",
2377 phba->fc_ns_retry, LPFC_MAX_NS_RETRY);
2379 /* Cleanup any outstanding ELS commands */
2380 lpfc_els_flush_cmd(phba);
2382 lpfc_els_flush_rscn(phba);
2383 lpfc_disc_flush_list(phba);
2388 spin_unlock_irqrestore(phba->host->host_lock, iflag);
2392 /*****************************************************************************/
2394 * NAME: lpfc_scan_timeout
2396 * FUNCTION: Fibre Channel driver scsi_scan_host timeout routine.
2398 * EXECUTION ENVIRONMENT: interrupt only
2406 /*****************************************************************************/
2408 lpfc_scan_timeout(unsigned long ptr)
2410 struct lpfc_hba *phba;
2411 unsigned long iflag;
2413 phba = (struct lpfc_hba *)ptr;
2417 spin_lock_irqsave(phba->host->host_lock, iflag);
2418 phba->fc_flag &= ~FC_SCSI_SCAN_TMO;
2419 lpfc_discq_post_event(phba, NULL, NULL, LPFC_EVT_SCAN);
2420 spin_unlock_irqrestore(phba->host->host_lock, iflag);
2425 lpfc_nodev_timeout(unsigned long ptr)
2427 struct lpfc_hba *phba;
2428 struct lpfc_nodelist *ndlp;
2429 unsigned long iflag;
2431 ndlp = (struct lpfc_nodelist *)ptr;
2432 phba = ndlp->nlp_phba;
2433 spin_lock_irqsave(phba->host->host_lock, iflag);
2435 /* All nodev timeouts are posted to discovery tasklet */
2436 lpfc_discq_post_event(phba, ndlp, NULL, LPFC_EVT_NODEV_TMO);
2438 spin_unlock_irqrestore(phba->host->host_lock, iflag);
2442 /*****************************************************************************/
2444 * NAME: lpfc_find_target
2446 * FUNCTION: Fibre Channel bus/target/LUN to struct lpfc_target lookup
2448 * EXECUTION ENVIRONMENT:
2451 * ptr to desired struct lpfc_target
2453 /*****************************************************************************/
2454 struct lpfc_target *
2455 lpfc_find_target(struct lpfc_hba * phba, uint32_t tgt,
2456 struct lpfc_nodelist *nlp)
2458 struct lpfc_target *targetp;
2460 /* search the mapped list for this target ID */
2462 list_for_each_entry(nlp, &phba->fc_nlpmap_list, nlp_listp) {
2463 if (tgt == nlp->nlp_sid)
2467 if (&(nlp->nlp_listp) == &(phba->fc_nlpmap_list))
2471 targetp = phba->device_queue_hash[tgt];
2473 /* First see if the SCSI ID has an allocated struct lpfc_target */
2475 targetp = kmalloc(sizeof (struct lpfc_target), GFP_ATOMIC);
2479 memset(targetp, 0, sizeof (struct lpfc_target));
2480 phba->device_queue_hash[tgt] = targetp;
2481 targetp->scsi_id = tgt;
2483 /* Create SCSI Target <tgt> */
2484 lpfc_printf_log(phba,
2486 LOG_DISCOVERY | LOG_FCP,
2487 "%d:0204 Create SCSI Target %d\n",
2491 if (targetp->pnode == NULL) {
2492 targetp->pnode = nlp;
2493 nlp->nlp_Target = targetp;
2494 #ifdef FC_TRANS_VER1
2496 * This code does not apply to SLES9 since there is no
2497 * starget defined in the midlayer. Additionally,
2498 * dynamic target discovery to the midlayer is not
2501 if(!(phba->fc_flag & FC_LOADING)) {
2502 /* Add SCSI target / SCSI Hotplug if called
2503 * after initial driver load.
2505 lpfc_target_add(phba, targetp);
2510 if(targetp->pnode != nlp) {
2512 * Get rid of the old nlp before updating
2513 * targetp with the new one.
2515 lpfc_nlp_list(phba, targetp->pnode, NLP_NO_LIST);
2516 targetp->pnode = nlp;
2519 nlp->nlp_Target = targetp;
2525 * Set, or clear, failMask bits in struct lpfc_nodelist
2528 lpfc_set_failmask(struct lpfc_hba * phba,
2529 struct lpfc_nodelist * ndlp, uint32_t bitmask, uint32_t flag)
2534 /* Failmask change on NPort <nlp_DID> */
2535 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
2536 "%d:0208 Failmask change on NPort x%x "
2537 "Data: x%x x%x x%x\n",
2539 ndlp->nlp_DID, ndlp->nlp_failMask, bitmask, flag);
2541 if (flag == LPFC_SET_BITMASK) {
2542 oldmask = ndlp->nlp_failMask;
2543 /* Set failMask event */
2544 ndlp->nlp_failMask |= bitmask;
2545 if (oldmask != ndlp->nlp_failMask) {
2552 /* Clear failMask event */
2553 ndlp->nlp_failMask &= ~bitmask;
2560 * This routine handles processing a NameServer REG_LOGIN mailbox
2561 * command upon completion. It is setup in the LPFC_MBOXQ
2562 * as the completion routine when the command is
2563 * handed off to the SLI layer.
2566 lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
2568 struct lpfc_sli *psli;
2570 struct lpfc_dmabuf *mp;
2571 struct lpfc_nodelist *ndlp;
2576 ndlp = (struct lpfc_nodelist *) pmb->context2;
2577 mp = (struct lpfc_dmabuf *) (pmb->context1);
2579 /* The mailbox was populated by the HBA. Flush it to main store for the
2580 * driver. Note that all context buffers are from the driver's
2581 * dma pool and have length LPFC_BPL_SIZE.
2583 pci_dma_sync_single_for_cpu(phba->pcidev, mp->phys, LPFC_BPL_SIZE,
2584 PCI_DMA_FROMDEVICE);
2586 pmb->context1 = NULL;
2588 if (ndlp->nlp_rpi != 0)
2589 lpfc_findnode_remove_rpi(phba, ndlp->nlp_rpi);
2590 ndlp->nlp_rpi = mb->un.varWords[0];
2591 lpfc_addnode_rpi(phba, ndlp, ndlp->nlp_rpi);
2592 ndlp->nlp_type |= NLP_FABRIC;
2593 ndlp->nlp_state = NLP_STE_UNMAPPED_NODE;
2594 lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST);
2596 /* Start issuing Fabric-Device Management Interface (FDMI)
2597 * command to 0xfffffa (FDMI well known port)
2599 if (phba->cfg_fdmi_on == 1) {
2600 lpfc_fdmi_cmd(phba, ndlp, SLI_MGMT_DHBA);
2603 * Delay issuing FDMI command if fdmi-on=2
2604 * (supporting RPA/hostnmae)
2606 mod_timer(&phba->fc_fdmitmo, jiffies + HZ * 60);
2609 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2611 mempool_free( pmb, phba->mbox_mem_pool);
2617 * This routine looks up the ndlp hash
2618 * table for the given RPI. If rpi found
2619 * it return the node list pointer
2622 struct lpfc_nodelist *
2623 lpfc_findnode_rpi(struct lpfc_hba * phba, uint16_t rpi)
2625 struct lpfc_nodelist *ret;
2627 ret = phba->fc_nlplookup[LPFC_RPI_HASH_FUNC(rpi)];
2628 while ((ret != 0) && (ret->nlp_rpi != rpi)) {
2629 ret = ret->nlp_rpi_hash_next;
2635 * This routine looks up the ndlp hash table for the
2636 * given RPI. If rpi found it return the node list
2637 * pointer else return 0 after deleting the entry
2640 struct lpfc_nodelist *
2641 lpfc_findnode_remove_rpi(struct lpfc_hba * phba, uint16_t rpi)
2643 struct lpfc_nodelist *ret, *temp;;
2645 ret = phba->fc_nlplookup[LPFC_RPI_HASH_FUNC(rpi)];
2649 if (ret->nlp_rpi == rpi) {
2650 phba->fc_nlplookup[LPFC_RPI_HASH_FUNC(rpi)] =
2651 ret->nlp_rpi_hash_next;
2652 ret->nlp_rpi_hash_next = NULL;
2656 while ((ret->nlp_rpi_hash_next != 0) &&
2657 (ret->nlp_rpi_hash_next->nlp_rpi != rpi)) {
2658 ret = ret->nlp_rpi_hash_next;
2661 if (ret->nlp_rpi_hash_next != 0) {
2662 temp = ret->nlp_rpi_hash_next;
2663 ret->nlp_rpi_hash_next = temp->nlp_rpi_hash_next;
2664 temp->nlp_rpi_hash_next = NULL;
2672 * This routine adds the node list entry to the
2676 lpfc_addnode_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
2682 index = LPFC_RPI_HASH_FUNC(rpi);
2683 ndlp->nlp_rpi_hash_next = phba->fc_nlplookup[index];
2684 phba->fc_nlplookup[index] = ndlp;
2689 lpfc_nlp_init(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
2692 memset(ndlp, 0, sizeof (struct lpfc_nodelist));
2693 init_timer(&ndlp->nlp_tmofunc);
2694 ndlp->nlp_tmofunc.function = lpfc_nodev_timeout;
2695 ndlp->nlp_tmofunc.data = (unsigned long)ndlp;
2696 init_timer(&ndlp->nlp_delayfunc);
2697 ndlp->nlp_delayfunc.function = lpfc_els_retry_delay;
2698 ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
2699 ndlp->nlp_DID = did;
2700 ndlp->nlp_phba = phba;