vserver 2.0 rc7
[linux-2.6.git] / drivers / scsi / lpfc / lpfc_hbadisc.c
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Enterprise Fibre Channel Host Bus Adapters.                     *
4  * Refer to the README file included with this package for         *
5  * driver version and adapter support.                             *
6  * Copyright (C) 2004 Emulex Corporation.                          *
7  * www.emulex.com                                                  *
8  *                                                                 *
9  * This program is free software; you can redistribute it and/or   *
10  * modify it under the terms of the GNU General Public License     *
11  * as published by the Free Software Foundation; either version 2  *
12  * of the License, or (at your option) any later version.          *
13  *                                                                 *
14  * This program is distributed in the hope that it will be useful, *
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of  *
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the   *
17  * GNU General Public License for more details, a copy of which    *
18  * can be found in the file COPYING included with this package.    *
19  *******************************************************************/
20
21 /*
22  * $Id: lpfc_hbadisc.c 1.266 2005/04/13 11:59:06EDT sf_support Exp  $
23  */
24
25 #include <linux/blkdev.h>
26 #include <linux/pci.h>
27 #include <linux/kthread.h>
28 #include <linux/interrupt.h>
29
30 #include <scsi/scsi_device.h>
31 #include <scsi/scsi_host.h>
32 #include <scsi/scsi_transport_fc.h>
33
34 #include "lpfc_hw.h"
35 #include "lpfc_disc.h"
36 #include "lpfc_sli.h"
37 #include "lpfc_scsi.h"
38 #include "lpfc.h"
39 #include "lpfc_logmsg.h"
40 #include "lpfc_crtn.h"
41
42 /* AlpaArray for assignment of scsid for scan-down and bind_method */
43 static uint8_t lpfcAlpaArray[] = {
44         0xEF, 0xE8, 0xE4, 0xE2, 0xE1, 0xE0, 0xDC, 0xDA, 0xD9, 0xD6,
45         0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xCE, 0xCD, 0xCC, 0xCB, 0xCA,
46         0xC9, 0xC7, 0xC6, 0xC5, 0xC3, 0xBC, 0xBA, 0xB9, 0xB6, 0xB5,
47         0xB4, 0xB3, 0xB2, 0xB1, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9,
48         0xA7, 0xA6, 0xA5, 0xA3, 0x9F, 0x9E, 0x9D, 0x9B, 0x98, 0x97,
49         0x90, 0x8F, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7C, 0x7A, 0x79,
50         0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6E, 0x6D, 0x6C, 0x6B,
51         0x6A, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5C, 0x5A, 0x59, 0x56,
52         0x55, 0x54, 0x53, 0x52, 0x51, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A,
53         0x49, 0x47, 0x46, 0x45, 0x43, 0x3C, 0x3A, 0x39, 0x36, 0x35,
54         0x34, 0x33, 0x32, 0x31, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29,
55         0x27, 0x26, 0x25, 0x23, 0x1F, 0x1E, 0x1D, 0x1B, 0x18, 0x17,
56         0x10, 0x0F, 0x08, 0x04, 0x02, 0x01
57 };
58
59 static void lpfc_disc_timeout_handler(struct lpfc_hba *);
60
61 static void
62 lpfc_process_nodev_timeout(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
63 {
64         if (!(ndlp->nlp_type & NLP_FABRIC)) {
65                 /* Nodev timeout on NPort <nlp_DID> */
66                 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
67                         "%d:0203 Nodev timeout on NPort x%x "
68                         "Data: x%x x%x x%x\n",
69                         phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag,
70                         ndlp->nlp_state, ndlp->nlp_rpi);
71         }
72
73         spin_lock_irq(phba->host->host_lock);
74         if (!(ndlp->nlp_flag & NLP_NODEV_TMO)) {
75                 spin_unlock_irq(phba->host->host_lock);
76                 return;
77         }
78
79         ndlp->nlp_flag &= ~NLP_NODEV_TMO;
80
81         if (ndlp->nlp_sid != NLP_NO_SID) {
82                 /* flush the target */
83                 lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
84                         ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT);
85         }
86         spin_unlock_irq(phba->host->host_lock);
87
88         lpfc_disc_state_machine(phba, ndlp, NULL, NLP_EVT_DEVICE_RM);
89         return;
90 }
91
92 static void
93 lpfc_work_list_done(struct lpfc_hba * phba)
94 {
95         struct lpfc_work_evt  *evtp = NULL;
96         struct lpfc_nodelist  *ndlp;
97         int free_evt;
98
99         spin_lock_irq(phba->host->host_lock);
100         while(!list_empty(&phba->work_list)) {
101                 list_remove_head((&phba->work_list), evtp, typeof(*evtp),
102                                  evt_listp);
103                 spin_unlock_irq(phba->host->host_lock);
104                 free_evt = 1;
105                 switch(evtp->evt) {
106                 case LPFC_EVT_NODEV_TMO:
107                         ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
108                         lpfc_process_nodev_timeout(phba, ndlp);
109                         free_evt = 0;
110                         break;
111                 case LPFC_EVT_ELS_RETRY:
112                         ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
113                         lpfc_els_retry_delay_handler(ndlp);
114                         free_evt = 0;
115                         break;
116                 case LPFC_EVT_ONLINE:
117                         *(int *)(evtp->evt_arg1)  = lpfc_online(phba);
118                         complete((struct completion *)(evtp->evt_arg2));
119                         break;
120                 case LPFC_EVT_OFFLINE:
121                         *(int *)(evtp->evt_arg1)  = lpfc_offline(phba);
122                         complete((struct completion *)(evtp->evt_arg2));
123                         break;
124                 }
125                 if (free_evt)
126                         kfree(evtp);
127                 spin_lock_irq(phba->host->host_lock);
128         }
129         spin_unlock_irq(phba->host->host_lock);
130
131 }
132
133 static void
134 lpfc_work_done(struct lpfc_hba * phba)
135 {
136         struct lpfc_sli_ring *pring;
137         int i;
138         uint32_t ha_copy;
139         uint32_t control;
140         uint32_t work_hba_events;
141
142         spin_lock_irq(phba->host->host_lock);
143         ha_copy = phba->work_ha;
144         phba->work_ha = 0;
145         work_hba_events=phba->work_hba_events;
146         spin_unlock_irq(phba->host->host_lock);
147
148         if(ha_copy & HA_ERATT)
149                 lpfc_handle_eratt(phba);
150
151         if(ha_copy & HA_MBATT)
152                 lpfc_sli_handle_mb_event(phba);
153
154         if(ha_copy & HA_LATT)
155                 lpfc_handle_latt(phba);
156
157         if (work_hba_events & WORKER_DISC_TMO)
158                 lpfc_disc_timeout_handler(phba);
159
160         if (work_hba_events & WORKER_ELS_TMO)
161                 lpfc_els_timeout_handler(phba);
162
163         if (work_hba_events & WORKER_MBOX_TMO)
164                 lpfc_mbox_timeout_handler(phba);
165
166         if (work_hba_events & WORKER_FDMI_TMO)
167                 lpfc_fdmi_tmo_handler(phba);
168
169         spin_lock_irq(phba->host->host_lock);
170         phba->work_hba_events &= ~work_hba_events;
171         spin_unlock_irq(phba->host->host_lock);
172
173         for (i = 0; i < phba->sli.num_rings; i++, ha_copy >>= 4) {
174                 pring = &phba->sli.ring[i];
175                 if ((ha_copy & HA_RXATT)
176                     || (pring->flag & LPFC_DEFERRED_RING_EVENT)) {
177                         if (pring->flag & LPFC_STOP_IOCB_MASK) {
178                                 pring->flag |= LPFC_DEFERRED_RING_EVENT;
179                         } else {
180                                 lpfc_sli_handle_slow_ring_event(phba, pring,
181                                                                 (ha_copy &
182                                                                  HA_RXMASK));
183                                 pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
184                         }
185                         /*
186                          * Turn on Ring interrupts
187                          */
188                         spin_lock_irq(phba->host->host_lock);
189                         control = readl(phba->HCregaddr);
190                         control |= (HC_R0INT_ENA << i);
191                         writel(control, phba->HCregaddr);
192                         readl(phba->HCregaddr); /* flush */
193                         spin_unlock_irq(phba->host->host_lock);
194                 }
195         }
196
197         lpfc_work_list_done (phba);
198
199 }
200
201 static int
202 check_work_wait_done(struct lpfc_hba *phba) {
203
204         spin_lock_irq(phba->host->host_lock);
205         if (phba->work_ha ||
206             phba->work_hba_events ||
207             (!list_empty(&phba->work_list)) ||
208             kthread_should_stop()) {
209                 spin_unlock_irq(phba->host->host_lock);
210                 return 1;
211         } else {
212                 spin_unlock_irq(phba->host->host_lock);
213                 return 0;
214         }
215 }
216
217 int
218 lpfc_do_work(void *p)
219 {
220         struct lpfc_hba *phba = p;
221         int rc;
222         DECLARE_WAIT_QUEUE_HEAD(work_waitq);
223
224         set_user_nice(current, -20);
225         phba->work_wait = &work_waitq;
226
227         while (1) {
228
229                 rc = wait_event_interruptible(work_waitq,
230                                                 check_work_wait_done(phba));
231                 BUG_ON(rc);
232
233                 if (kthread_should_stop())
234                         break;
235
236                 lpfc_work_done(phba);
237
238         }
239         phba->work_wait = NULL;
240         return 0;
241 }
242
243 /*
244  * This is only called to handle FC worker events. Since this a rare
245  * occurance, we allocate a struct lpfc_work_evt structure here instead of
246  * embedding it in the IOCB.
247  */
248 int
249 lpfc_workq_post_event(struct lpfc_hba * phba, void *arg1, void *arg2,
250                       uint32_t evt)
251 {
252         struct lpfc_work_evt  *evtp;
253
254         /*
255          * All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will
256          * be queued to worker thread for processing
257          */
258         evtp = kmalloc(sizeof(struct lpfc_work_evt), GFP_KERNEL);
259         if (!evtp)
260                 return 0;
261
262         evtp->evt_arg1  = arg1;
263         evtp->evt_arg2  = arg2;
264         evtp->evt       = evt;
265
266         list_add_tail(&evtp->evt_listp, &phba->work_list);
267         spin_lock_irq(phba->host->host_lock);
268         if (phba->work_wait)
269                 wake_up(phba->work_wait);
270         spin_unlock_irq(phba->host->host_lock);
271
272         return 1;
273 }
274
275 int
276 lpfc_linkdown(struct lpfc_hba * phba)
277 {
278         struct lpfc_sli       *psli;
279         struct lpfc_nodelist  *ndlp, *next_ndlp;
280         struct list_head *listp;
281         struct list_head *node_list[7];
282         LPFC_MBOXQ_t     *mb;
283         int               rc, i;
284
285         psli = &phba->sli;
286
287         spin_lock_irq(phba->host->host_lock);
288         phba->hba_state = LPFC_LINK_DOWN;
289         spin_unlock_irq(phba->host->host_lock);
290
291         /* Clean up any firmware default rpi's */
292         if ((mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) {
293                 lpfc_unreg_did(phba, 0xffffffff, mb);
294                 mb->mbox_cmpl=lpfc_sli_def_mbox_cmpl;
295                 if (lpfc_sli_issue_mbox(phba, mb, (MBX_NOWAIT | MBX_STOP_IOCB))
296                     == MBX_NOT_FINISHED) {
297                         mempool_free( mb, phba->mbox_mem_pool);
298                 }
299         }
300
301         /* Cleanup any outstanding RSCN activity */
302         lpfc_els_flush_rscn(phba);
303
304         /* Cleanup any outstanding ELS commands */
305         lpfc_els_flush_cmd(phba);
306
307         /* Issue a LINK DOWN event to all nodes */
308         node_list[0] = &phba->fc_npr_list;  /* MUST do this list first */
309         node_list[1] = &phba->fc_nlpmap_list;
310         node_list[2] = &phba->fc_nlpunmap_list;
311         node_list[3] = &phba->fc_prli_list;
312         node_list[4] = &phba->fc_reglogin_list;
313         node_list[5] = &phba->fc_adisc_list;
314         node_list[6] = &phba->fc_plogi_list;
315         for (i = 0; i < 7; i++) {
316                 listp = node_list[i];
317                 if (list_empty(listp))
318                         continue;
319
320                 list_for_each_entry_safe(ndlp, next_ndlp, listp, nlp_listp) {
321                         /* Fabric nodes are not handled thru state machine for
322                            link down */
323                         if (ndlp->nlp_type & NLP_FABRIC) {
324                                 /* Remove ALL Fabric nodes except Fabric_DID */
325                                 if (ndlp->nlp_DID != Fabric_DID) {
326                                         /* Take it off current list and free */
327                                         lpfc_nlp_list(phba, ndlp,
328                                                 NLP_NO_LIST);
329                                 }
330                         }
331                         else {
332
333                                 rc = lpfc_disc_state_machine(phba, ndlp, NULL,
334                                                      NLP_EVT_DEVICE_RECOVERY);
335
336                                 /* Check config parameter use-adisc or FCP-2 */
337                                 if ((rc != NLP_STE_FREED_NODE) &&
338                                         (phba->cfg_use_adisc == 0) &&
339                                         !(ndlp->nlp_fcp_info &
340                                                 NLP_FCP_2_DEVICE)) {
341                                         /* We know we will have to relogin, so
342                                          * unreglogin the rpi right now to fail
343                                          * any outstanding I/Os quickly.
344                                          */
345                                         lpfc_unreg_rpi(phba, ndlp);
346                                 }
347                         }
348                 }
349         }
350
351         /* free any ndlp's on unused list */
352         list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_unused_list,
353                                 nlp_listp) {
354                 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
355         }
356
357         /* Setup myDID for link up if we are in pt2pt mode */
358         if (phba->fc_flag & FC_PT2PT) {
359                 phba->fc_myDID = 0;
360                 if ((mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) {
361                         lpfc_config_link(phba, mb);
362                         mb->mbox_cmpl=lpfc_sli_def_mbox_cmpl;
363                         if (lpfc_sli_issue_mbox
364                             (phba, mb, (MBX_NOWAIT | MBX_STOP_IOCB))
365                             == MBX_NOT_FINISHED) {
366                                 mempool_free( mb, phba->mbox_mem_pool);
367                         }
368                 }
369                 spin_lock_irq(phba->host->host_lock);
370                 phba->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI);
371                 spin_unlock_irq(phba->host->host_lock);
372         }
373         spin_lock_irq(phba->host->host_lock);
374         phba->fc_flag &= ~FC_LBIT;
375         spin_unlock_irq(phba->host->host_lock);
376
377         /* Turn off discovery timer if its running */
378         lpfc_can_disctmo(phba);
379
380         /* Must process IOCBs on all rings to handle ABORTed I/Os */
381         return (0);
382 }
383
384 static int
385 lpfc_linkup(struct lpfc_hba * phba)
386 {
387         struct lpfc_nodelist *ndlp, *next_ndlp;
388
389         spin_lock_irq(phba->host->host_lock);
390         phba->hba_state = LPFC_LINK_UP;
391         phba->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY |
392                            FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY);
393         phba->fc_flag |= FC_NDISC_ACTIVE;
394         phba->fc_ns_retry = 0;
395         spin_unlock_irq(phba->host->host_lock);
396
397
398         /*
399          * Clean up old Fabric NLP_FABRIC logins.
400          */
401         list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nlpunmap_list,
402                                 nlp_listp) {
403                 if (ndlp->nlp_DID == Fabric_DID) {
404                         /* Take it off current list and free */
405                         lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
406                 }
407         }
408
409         /* free any ndlp's on unused list */
410         list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_unused_list,
411                                 nlp_listp) {
412                 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
413         }
414
415         return 0;
416 }
417
418 /*
419  * This routine handles processing a CLEAR_LA mailbox
420  * command upon completion. It is setup in the LPFC_MBOXQ
421  * as the completion routine when the command is
422  * handed off to the SLI layer.
423  */
424 void
425 lpfc_mbx_cmpl_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
426 {
427         struct lpfc_sli *psli;
428         MAILBOX_t *mb;
429         uint32_t control;
430
431         psli = &phba->sli;
432         mb = &pmb->mb;
433         /* Since we don't do discovery right now, turn these off here */
434         psli->ring[psli->ip_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
435         psli->ring[psli->fcp_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
436         psli->ring[psli->next_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
437
438         /* Check for error */
439         if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) {
440                 /* CLEAR_LA mbox error <mbxStatus> state <hba_state> */
441                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
442                                 "%d:0320 CLEAR_LA mbxStatus error x%x hba "
443                                 "state x%x\n",
444                                 phba->brd_no, mb->mbxStatus, phba->hba_state);
445
446                 phba->hba_state = LPFC_HBA_ERROR;
447                 goto out;
448         }
449
450         if (phba->fc_flag & FC_ABORT_DISCOVERY)
451                 goto out;
452
453         phba->num_disc_nodes = 0;
454         /* go thru NPR list and issue ELS PLOGIs */
455         if (phba->fc_npr_cnt) {
456                 lpfc_els_disc_plogi(phba);
457         }
458
459         if(!phba->num_disc_nodes) {
460                 spin_lock_irq(phba->host->host_lock);
461                 phba->fc_flag &= ~FC_NDISC_ACTIVE;
462                 spin_unlock_irq(phba->host->host_lock);
463         }
464
465         phba->hba_state = LPFC_HBA_READY;
466
467 out:
468         /* Device Discovery completes */
469         lpfc_printf_log(phba,
470                          KERN_INFO,
471                          LOG_DISCOVERY,
472                          "%d:0225 Device Discovery completes\n",
473                          phba->brd_no);
474
475         mempool_free( pmb, phba->mbox_mem_pool);
476
477         spin_lock_irq(phba->host->host_lock);
478         phba->fc_flag &= ~FC_ABORT_DISCOVERY;
479         if (phba->fc_flag & FC_ESTABLISH_LINK) {
480                 phba->fc_flag &= ~FC_ESTABLISH_LINK;
481         }
482         spin_unlock_irq(phba->host->host_lock);
483
484         del_timer_sync(&phba->fc_estabtmo);
485
486         lpfc_can_disctmo(phba);
487
488         /* turn on Link Attention interrupts */
489         spin_lock_irq(phba->host->host_lock);
490         psli->sli_flag |= LPFC_PROCESS_LA;
491         control = readl(phba->HCregaddr);
492         control |= HC_LAINT_ENA;
493         writel(control, phba->HCregaddr);
494         readl(phba->HCregaddr); /* flush */
495         spin_unlock_irq(phba->host->host_lock);
496
497         return;
498 }
499
500 static void
501 lpfc_mbx_cmpl_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
502 {
503         struct lpfc_sli *psli;
504         MAILBOX_t *mb;
505
506         psli = &phba->sli;
507         mb = &pmb->mb;
508         /* Check for error */
509         if (mb->mbxStatus) {
510                 /* CONFIG_LINK mbox error <mbxStatus> state <hba_state> */
511                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
512                                 "%d:0306 CONFIG_LINK mbxStatus error x%x "
513                                 "HBA state x%x\n",
514                                 phba->brd_no, mb->mbxStatus, phba->hba_state);
515
516                 lpfc_linkdown(phba);
517                 phba->hba_state = LPFC_HBA_ERROR;
518                 goto out;
519         }
520
521         if (phba->hba_state == LPFC_LOCAL_CFG_LINK) {
522                 if (phba->fc_topology == TOPOLOGY_LOOP) {
523                         /* If we are public loop and L bit was set */
524                         if ((phba->fc_flag & FC_PUBLIC_LOOP) &&
525                             !(phba->fc_flag & FC_LBIT)) {
526                                 /* Need to wait for FAN - use discovery timer
527                                  * for timeout.  hba_state is identically
528                                  * LPFC_LOCAL_CFG_LINK while waiting for FAN
529                                  */
530                                 lpfc_set_disctmo(phba);
531                                 mempool_free( pmb, phba->mbox_mem_pool);
532                                 return;
533                         }
534                 }
535
536                 /* Start discovery by sending a FLOGI hba_state is identically
537                  * LPFC_FLOGI while waiting for FLOGI cmpl
538                  */
539                 phba->hba_state = LPFC_FLOGI;
540                 lpfc_set_disctmo(phba);
541                 lpfc_initial_flogi(phba);
542                 mempool_free( pmb, phba->mbox_mem_pool);
543                 return;
544         }
545         if (phba->hba_state == LPFC_FABRIC_CFG_LINK) {
546                 mempool_free( pmb, phba->mbox_mem_pool);
547                 return;
548         }
549
550 out:
551         /* CONFIG_LINK bad hba state <hba_state> */
552         lpfc_printf_log(phba,
553                         KERN_ERR,
554                         LOG_DISCOVERY,
555                         "%d:0200 CONFIG_LINK bad hba state x%x\n",
556                         phba->brd_no, phba->hba_state);
557
558         if (phba->hba_state != LPFC_CLEAR_LA) {
559                 lpfc_clear_la(phba, pmb);
560                 pmb->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
561                 if (lpfc_sli_issue_mbox(phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB))
562                     == MBX_NOT_FINISHED) {
563                         mempool_free( pmb, phba->mbox_mem_pool);
564                         lpfc_disc_flush_list(phba);
565                         psli->ring[(psli->ip_ring)].flag &=
566                                 ~LPFC_STOP_IOCB_EVENT;
567                         psli->ring[(psli->fcp_ring)].flag &=
568                                 ~LPFC_STOP_IOCB_EVENT;
569                         psli->ring[(psli->next_ring)].flag &=
570                                 ~LPFC_STOP_IOCB_EVENT;
571                         phba->hba_state = LPFC_HBA_READY;
572                 }
573         } else {
574                 mempool_free( pmb, phba->mbox_mem_pool);
575         }
576         return;
577 }
578
579 static void
580 lpfc_mbx_cmpl_read_sparam(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
581 {
582         struct lpfc_sli *psli = &phba->sli;
583         MAILBOX_t *mb = &pmb->mb;
584         struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1;
585
586
587         /* Check for error */
588         if (mb->mbxStatus) {
589                 /* READ_SPARAM mbox error <mbxStatus> state <hba_state> */
590                 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
591                                 "%d:0319 READ_SPARAM mbxStatus error x%x "
592                                 "hba state x%x>\n",
593                                 phba->brd_no, mb->mbxStatus, phba->hba_state);
594
595                 lpfc_linkdown(phba);
596                 phba->hba_state = LPFC_HBA_ERROR;
597                 goto out;
598         }
599
600         memcpy((uint8_t *) & phba->fc_sparam, (uint8_t *) mp->virt,
601                sizeof (struct serv_parm));
602         memcpy((uint8_t *) & phba->fc_nodename,
603                (uint8_t *) & phba->fc_sparam.nodeName,
604                sizeof (struct lpfc_name));
605         memcpy((uint8_t *) & phba->fc_portname,
606                (uint8_t *) & phba->fc_sparam.portName,
607                sizeof (struct lpfc_name));
608         lpfc_mbuf_free(phba, mp->virt, mp->phys);
609         kfree(mp);
610         mempool_free( pmb, phba->mbox_mem_pool);
611         return;
612
613 out:
614         pmb->context1 = NULL;
615         lpfc_mbuf_free(phba, mp->virt, mp->phys);
616         kfree(mp);
617         if (phba->hba_state != LPFC_CLEAR_LA) {
618                 lpfc_clear_la(phba, pmb);
619                 pmb->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
620                 if (lpfc_sli_issue_mbox(phba, pmb, (MBX_NOWAIT | MBX_STOP_IOCB))
621                     == MBX_NOT_FINISHED) {
622                         mempool_free( pmb, phba->mbox_mem_pool);
623                         lpfc_disc_flush_list(phba);
624                         psli->ring[(psli->ip_ring)].flag &=
625                             ~LPFC_STOP_IOCB_EVENT;
626                         psli->ring[(psli->fcp_ring)].flag &=
627                             ~LPFC_STOP_IOCB_EVENT;
628                         psli->ring[(psli->next_ring)].flag &=
629                             ~LPFC_STOP_IOCB_EVENT;
630                         phba->hba_state = LPFC_HBA_READY;
631                 }
632         } else {
633                 mempool_free( pmb, phba->mbox_mem_pool);
634         }
635         return;
636 }
637
638 static void
639 lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
640 {
641         int i;
642         LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox;
643         sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
644         cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
645
646         spin_lock_irq(phba->host->host_lock);
647         switch(la->UlnkSpeed) {
648                 case LA_1GHZ_LINK:
649                         phba->fc_linkspeed = LA_1GHZ_LINK;
650                         break;
651                 case LA_2GHZ_LINK:
652                         phba->fc_linkspeed = LA_2GHZ_LINK;
653                         break;
654                 case LA_4GHZ_LINK:
655                         phba->fc_linkspeed = LA_4GHZ_LINK;
656                         break;
657                 default:
658                         phba->fc_linkspeed = LA_UNKNW_LINK;
659                         break;
660         }
661
662         phba->fc_topology = la->topology;
663
664         if (phba->fc_topology == TOPOLOGY_LOOP) {
665         /* Get Loop Map information */
666
667                 if (la->il)
668                         phba->fc_flag |= FC_LBIT;
669
670                 phba->fc_myDID = la->granted_AL_PA;
671                 i = la->un.lilpBde64.tus.f.bdeSize;
672
673                 if (i == 0) {
674                         phba->alpa_map[0] = 0;
675                 } else {
676                         if (phba->cfg_log_verbose & LOG_LINK_EVENT) {
677                                 int numalpa, j, k;
678                                 union {
679                                         uint8_t pamap[16];
680                                         struct {
681                                                 uint32_t wd1;
682                                                 uint32_t wd2;
683                                                 uint32_t wd3;
684                                                 uint32_t wd4;
685                                         } pa;
686                                 } un;
687                                 numalpa = phba->alpa_map[0];
688                                 j = 0;
689                                 while (j < numalpa) {
690                                         memset(un.pamap, 0, 16);
691                                         for (k = 1; j < numalpa; k++) {
692                                                 un.pamap[k - 1] =
693                                                         phba->alpa_map[j + 1];
694                                                 j++;
695                                                 if (k == 16)
696                                                         break;
697                                         }
698                                         /* Link Up Event ALPA map */
699                                         lpfc_printf_log(phba,
700                                                 KERN_WARNING,
701                                                 LOG_LINK_EVENT,
702                                                 "%d:1304 Link Up Event "
703                                                 "ALPA map Data: x%x "
704                                                 "x%x x%x x%x\n",
705                                                 phba->brd_no,
706                                                 un.pa.wd1, un.pa.wd2,
707                                                 un.pa.wd3, un.pa.wd4);
708                                 }
709                         }
710                 }
711         } else {
712                 phba->fc_myDID = phba->fc_pref_DID;
713                 phba->fc_flag |= FC_LBIT;
714         }
715         spin_unlock_irq(phba->host->host_lock);
716
717         lpfc_linkup(phba);
718         if (sparam_mbox) {
719                 lpfc_read_sparam(phba, sparam_mbox);
720                 sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
721                 lpfc_sli_issue_mbox(phba, sparam_mbox,
722                                                 (MBX_NOWAIT | MBX_STOP_IOCB));
723         }
724
725         if (cfglink_mbox) {
726                 phba->hba_state = LPFC_LOCAL_CFG_LINK;
727                 lpfc_config_link(phba, cfglink_mbox);
728                 cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_config_link;
729                 lpfc_sli_issue_mbox(phba, cfglink_mbox,
730                                                 (MBX_NOWAIT | MBX_STOP_IOCB));
731         }
732 }
733
734 static void
735 lpfc_mbx_issue_link_down(struct lpfc_hba *phba) {
736         uint32_t control;
737         struct lpfc_sli *psli = &phba->sli;
738
739         lpfc_linkdown(phba);
740
741         /* turn on Link Attention interrupts - no CLEAR_LA needed */
742         spin_lock_irq(phba->host->host_lock);
743         psli->sli_flag |= LPFC_PROCESS_LA;
744         control = readl(phba->HCregaddr);
745         control |= HC_LAINT_ENA;
746         writel(control, phba->HCregaddr);
747         readl(phba->HCregaddr); /* flush */
748         spin_unlock_irq(phba->host->host_lock);
749 }
750
751 /*
752  * This routine handles processing a READ_LA mailbox
753  * command upon completion. It is setup in the LPFC_MBOXQ
754  * as the completion routine when the command is
755  * handed off to the SLI layer.
756  */
757 void
758 lpfc_mbx_cmpl_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
759 {
760         READ_LA_VAR *la;
761         MAILBOX_t *mb = &pmb->mb;
762         struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
763
764         /* Check for error */
765         if (mb->mbxStatus) {
766                 lpfc_printf_log(phba,
767                                 KERN_INFO,
768                                 LOG_LINK_EVENT,
769                                 "%d:1307 READ_LA mbox error x%x state x%x\n",
770                                 phba->brd_no,
771                                 mb->mbxStatus, phba->hba_state);
772                 lpfc_mbx_issue_link_down(phba);
773                 phba->hba_state = LPFC_HBA_ERROR;
774                 goto lpfc_mbx_cmpl_read_la_free_mbuf;
775         }
776
777         la = (READ_LA_VAR *) & pmb->mb.un.varReadLA;
778
779         memcpy(&phba->alpa_map[0], mp->virt, 128);
780
781         if (((phba->fc_eventTag + 1) < la->eventTag) ||
782              (phba->fc_eventTag == la->eventTag)) {
783                 phba->fc_stat.LinkMultiEvent++;
784                 if (la->attType == AT_LINK_UP) {
785                         if (phba->fc_eventTag != 0)
786                                 lpfc_linkdown(phba);
787                 }
788         }
789
790         phba->fc_eventTag = la->eventTag;
791
792         if (la->attType == AT_LINK_UP) {
793                 phba->fc_stat.LinkUp++;
794                 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
795                                 "%d:1303 Link Up Event x%x received "
796                                 "Data: x%x x%x x%x x%x\n",
797                                 phba->brd_no, la->eventTag, phba->fc_eventTag,
798                                 la->granted_AL_PA, la->UlnkSpeed,
799                                 phba->alpa_map[0]);
800                 lpfc_mbx_process_link_up(phba, la);
801         } else {
802                 phba->fc_stat.LinkDown++;
803                 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
804                                 "%d:1305 Link Down Event x%x received "
805                                 "Data: x%x x%x x%x\n",
806                                 phba->brd_no, la->eventTag, phba->fc_eventTag,
807                                 phba->hba_state, phba->fc_flag);
808                 lpfc_mbx_issue_link_down(phba);
809         }
810
811 lpfc_mbx_cmpl_read_la_free_mbuf:
812         lpfc_mbuf_free(phba, mp->virt, mp->phys);
813         kfree(mp);
814         mempool_free(pmb, phba->mbox_mem_pool);
815         return;
816 }
817
818 /*
819  * This routine handles processing a REG_LOGIN mailbox
820  * command upon completion. It is setup in the LPFC_MBOXQ
821  * as the completion routine when the command is
822  * handed off to the SLI layer.
823  */
824 void
825 lpfc_mbx_cmpl_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
826 {
827         struct lpfc_sli *psli;
828         MAILBOX_t *mb;
829         struct lpfc_dmabuf *mp;
830         struct lpfc_nodelist *ndlp;
831
832         psli = &phba->sli;
833         mb = &pmb->mb;
834
835         ndlp = (struct lpfc_nodelist *) pmb->context2;
836         mp = (struct lpfc_dmabuf *) (pmb->context1);
837
838         pmb->context1 = NULL;
839
840         /* Good status, call state machine */
841         lpfc_disc_state_machine(phba, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN);
842         lpfc_mbuf_free(phba, mp->virt, mp->phys);
843         kfree(mp);
844         mempool_free( pmb, phba->mbox_mem_pool);
845
846         return;
847 }
848
849 /*
850  * This routine handles processing a Fabric REG_LOGIN mailbox
851  * command upon completion. It is setup in the LPFC_MBOXQ
852  * as the completion routine when the command is
853  * handed off to the SLI layer.
854  */
855 void
856 lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
857 {
858         struct lpfc_sli *psli;
859         MAILBOX_t *mb;
860         struct lpfc_dmabuf *mp;
861         struct lpfc_nodelist *ndlp;
862         struct lpfc_nodelist *ndlp_fdmi;
863
864
865         psli = &phba->sli;
866         mb = &pmb->mb;
867
868         ndlp = (struct lpfc_nodelist *) pmb->context2;
869         mp = (struct lpfc_dmabuf *) (pmb->context1);
870
871         if (mb->mbxStatus) {
872                 lpfc_mbuf_free(phba, mp->virt, mp->phys);
873                 kfree(mp);
874                 mempool_free( pmb, phba->mbox_mem_pool);
875                 mempool_free( ndlp, phba->nlp_mem_pool);
876
877                 /* FLOGI failed, so just use loop map to make discovery list */
878                 lpfc_disc_list_loopmap(phba);
879
880                 /* Start discovery */
881                 lpfc_disc_start(phba);
882                 return;
883         }
884
885         pmb->context1 = NULL;
886
887         if (ndlp->nlp_rpi != 0)
888                 lpfc_findnode_remove_rpi(phba, ndlp->nlp_rpi);
889         ndlp->nlp_rpi = mb->un.varWords[0];
890         lpfc_addnode_rpi(phba, ndlp, ndlp->nlp_rpi);
891         ndlp->nlp_type |= NLP_FABRIC;
892         ndlp->nlp_state = NLP_STE_UNMAPPED_NODE;
893         lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST);
894
895         if (phba->hba_state == LPFC_FABRIC_CFG_LINK) {
896                 /* This NPort has been assigned an NPort_ID by the fabric as a
897                  * result of the completed fabric login.  Issue a State Change
898                  * Registration (SCR) ELS request to the fabric controller
899                  * (SCR_DID) so that this NPort gets RSCN events from the
900                  * fabric.
901                  */
902                 lpfc_issue_els_scr(phba, SCR_DID, 0);
903
904                 /* Allocate a new node instance.  If the pool is empty, just
905                  * start the discovery process and skip the Nameserver login
906                  * process.  This is attempted again later on.  Otherwise, issue
907                  * a Port Login (PLOGI) to the NameServer
908                  */
909                 if ((ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL))
910                     == 0) {
911                         lpfc_disc_start(phba);
912                 } else {
913                         lpfc_nlp_init(phba, ndlp, NameServer_DID);
914                         ndlp->nlp_type |= NLP_FABRIC;
915                         ndlp->nlp_state = NLP_STE_PLOGI_ISSUE;
916                         lpfc_nlp_list(phba, ndlp, NLP_PLOGI_LIST);
917                         lpfc_issue_els_plogi(phba, ndlp, 0);
918                         if (phba->cfg_fdmi_on) {
919                                 if ((ndlp_fdmi = mempool_alloc(
920                                                        phba->nlp_mem_pool,
921                                                        GFP_KERNEL))) {
922                                         lpfc_nlp_init(phba, ndlp_fdmi,
923                                                 FDMI_DID);
924                                         ndlp_fdmi->nlp_type |= NLP_FABRIC;
925                                         ndlp_fdmi->nlp_state =
926                                             NLP_STE_PLOGI_ISSUE;
927                                         lpfc_issue_els_plogi(phba, ndlp_fdmi,
928                                                              0);
929                                 }
930                         }
931                 }
932         }
933
934         lpfc_mbuf_free(phba, mp->virt, mp->phys);
935         kfree(mp);
936         mempool_free( pmb, phba->mbox_mem_pool);
937
938         return;
939 }
940
941 /*
942  * This routine handles processing a NameServer REG_LOGIN mailbox
943  * command upon completion. It is setup in the LPFC_MBOXQ
944  * as the completion routine when the command is
945  * handed off to the SLI layer.
946  */
947 void
948 lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
949 {
950         struct lpfc_sli *psli;
951         MAILBOX_t *mb;
952         struct lpfc_dmabuf *mp;
953         struct lpfc_nodelist *ndlp;
954
955         psli = &phba->sli;
956         mb = &pmb->mb;
957
958         ndlp = (struct lpfc_nodelist *) pmb->context2;
959         mp = (struct lpfc_dmabuf *) (pmb->context1);
960
961         if (mb->mbxStatus) {
962                 lpfc_mbuf_free(phba, mp->virt, mp->phys);
963                 kfree(mp);
964                 mempool_free( pmb, phba->mbox_mem_pool);
965                 lpfc_nlp_list(phba, ndlp, NLP_NO_LIST);
966
967                 /* RegLogin failed, so just use loop map to make discovery
968                    list */
969                 lpfc_disc_list_loopmap(phba);
970
971                 /* Start discovery */
972                 lpfc_disc_start(phba);
973                 return;
974         }
975
976         pmb->context1 = NULL;
977
978         if (ndlp->nlp_rpi != 0)
979                 lpfc_findnode_remove_rpi(phba, ndlp->nlp_rpi);
980         ndlp->nlp_rpi = mb->un.varWords[0];
981         lpfc_addnode_rpi(phba, ndlp, ndlp->nlp_rpi);
982         ndlp->nlp_type |= NLP_FABRIC;
983         ndlp->nlp_state = NLP_STE_UNMAPPED_NODE;
984         lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST);
985
986         if (phba->hba_state < LPFC_HBA_READY) {
987                 /* Link up discovery requires Fabrib registration. */
988                 lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RNN_ID);
989                 lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RSNN_NN);
990                 lpfc_ns_cmd(phba, ndlp, SLI_CTNS_RFT_ID);
991         }
992
993         phba->fc_ns_retry = 0;
994         /* Good status, issue CT Request to NameServer */
995         if (lpfc_ns_cmd(phba, ndlp, SLI_CTNS_GID_FT)) {
996                 /* Cannot issue NameServer Query, so finish up discovery */
997                 lpfc_disc_start(phba);
998         }
999
1000         lpfc_mbuf_free(phba, mp->virt, mp->phys);
1001         kfree(mp);
1002         mempool_free( pmb, phba->mbox_mem_pool);
1003
1004         return;
1005 }
1006
1007 static void
1008 lpfc_register_remote_port(struct lpfc_hba * phba,
1009                             struct lpfc_nodelist * ndlp)
1010 {
1011         struct fc_rport *rport;
1012         struct lpfc_rport_data *rdata;
1013         struct fc_rport_identifiers rport_ids;
1014         uint64_t wwn;
1015
1016         /* Remote port has reappeared. Re-register w/ FC transport */
1017         memcpy(&wwn, &ndlp->nlp_nodename, sizeof(uint64_t));
1018         rport_ids.node_name = be64_to_cpu(wwn);
1019         memcpy(&wwn, &ndlp->nlp_portname, sizeof(uint64_t));
1020         rport_ids.port_name = be64_to_cpu(wwn);
1021         rport_ids.port_id = ndlp->nlp_DID;
1022         rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
1023         if (ndlp->nlp_type & NLP_FCP_TARGET)
1024                 rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
1025         if (ndlp->nlp_type & NLP_FCP_INITIATOR)
1026                 rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
1027
1028         ndlp->rport = rport = fc_remote_port_add(phba->host, 0, &rport_ids);
1029         if (!rport) {
1030                 dev_printk(KERN_WARNING, &phba->pcidev->dev,
1031                            "Warning: fc_remote_port_add failed\n");
1032                 return;
1033         }
1034
1035         /* initialize static port data */
1036         rport->maxframe_size = ndlp->nlp_maxframe;
1037         rport->supported_classes = ndlp->nlp_class_sup;
1038         if ((rport->scsi_target_id != -1) &&
1039                 (rport->scsi_target_id < MAX_FCP_TARGET)) {
1040                 ndlp->nlp_sid = rport->scsi_target_id;
1041         }
1042         rdata = rport->dd_data;
1043         rdata->pnode = ndlp;
1044
1045         return;
1046 }
1047
1048 int
1049 lpfc_nlp_list(struct lpfc_hba * phba, struct lpfc_nodelist * nlp, int list)
1050 {
1051         enum { none, unmapped, mapped } rport_add = none, rport_del = none;
1052         struct lpfc_sli      *psli;
1053
1054         psli = &phba->sli;
1055         /* Sanity check to ensure we are not moving to / from the same list */
1056         if ((nlp->nlp_flag & NLP_LIST_MASK) == list) {
1057                 if (list != NLP_NO_LIST)
1058                         return(0);
1059         }
1060
1061         switch(nlp->nlp_flag & NLP_LIST_MASK) {
1062         case NLP_NO_LIST: /* Not on any list */
1063                 break;
1064         case NLP_UNUSED_LIST:
1065                 phba->fc_unused_cnt--;
1066                 list_del(&nlp->nlp_listp);
1067                 break;
1068         case NLP_PLOGI_LIST:
1069                 phba->fc_plogi_cnt--;
1070                 list_del(&nlp->nlp_listp);
1071                 break;
1072         case NLP_ADISC_LIST:
1073                 phba->fc_adisc_cnt--;
1074                 list_del(&nlp->nlp_listp);
1075                 break;
1076         case NLP_REGLOGIN_LIST:
1077                 phba->fc_reglogin_cnt--;
1078                 list_del(&nlp->nlp_listp);
1079                 break;
1080         case NLP_PRLI_LIST:
1081                 phba->fc_prli_cnt--;
1082                 list_del(&nlp->nlp_listp);
1083                 break;
1084         case NLP_UNMAPPED_LIST:
1085                 phba->fc_unmap_cnt--;
1086                 list_del(&nlp->nlp_listp);
1087                 spin_lock_irq(phba->host->host_lock);
1088                 nlp->nlp_flag &= ~NLP_TGT_NO_SCSIID;
1089                 nlp->nlp_type &= ~NLP_FC_NODE;
1090                 spin_unlock_irq(phba->host->host_lock);
1091                 phba->nport_event_cnt++;
1092                 if (nlp->rport)
1093                         rport_del = unmapped;
1094                 break;
1095         case NLP_MAPPED_LIST:
1096                 phba->fc_map_cnt--;
1097                 list_del(&nlp->nlp_listp);
1098                 phba->nport_event_cnt++;
1099                 if (nlp->rport)
1100                         rport_del = mapped;
1101                 break;
1102         case NLP_NPR_LIST:
1103                 phba->fc_npr_cnt--;
1104                 list_del(&nlp->nlp_listp);
1105                 /* Stop delay tmo if taking node off NPR list */
1106                 if ((nlp->nlp_flag & NLP_DELAY_TMO) &&
1107                    (list != NLP_NPR_LIST)) {
1108                         spin_lock_irq(phba->host->host_lock);
1109                         nlp->nlp_flag &= ~NLP_DELAY_TMO;
1110                         spin_unlock_irq(phba->host->host_lock);
1111                         del_timer_sync(&nlp->nlp_delayfunc);
1112                         if (!list_empty(&nlp->els_retry_evt.evt_listp))
1113                                 list_del_init(&nlp->els_retry_evt.evt_listp);
1114                 }
1115                 break;
1116         }
1117
1118         spin_lock_irq(phba->host->host_lock);
1119         nlp->nlp_flag &= ~NLP_LIST_MASK;
1120         spin_unlock_irq(phba->host->host_lock);
1121
1122         /* Add NPort <did> to <num> list */
1123         lpfc_printf_log(phba,
1124                         KERN_INFO,
1125                         LOG_NODE,
1126                         "%d:0904 Add NPort x%x to %d list Data: x%x\n",
1127                         phba->brd_no,
1128                         nlp->nlp_DID, list, nlp->nlp_flag);
1129
1130         switch(list) {
1131         case NLP_NO_LIST: /* No list, just remove it */
1132                 lpfc_nlp_remove(phba, nlp);
1133                 break;
1134         case NLP_UNUSED_LIST:
1135                 spin_lock_irq(phba->host->host_lock);
1136                 nlp->nlp_flag |= list;
1137                 spin_unlock_irq(phba->host->host_lock);
1138                 /* Put it at the end of the unused list */
1139                 list_add_tail(&nlp->nlp_listp, &phba->fc_unused_list);
1140                 phba->fc_unused_cnt++;
1141                 break;
1142         case NLP_PLOGI_LIST:
1143                 spin_lock_irq(phba->host->host_lock);
1144                 nlp->nlp_flag |= list;
1145                 spin_unlock_irq(phba->host->host_lock);
1146                 /* Put it at the end of the plogi list */
1147                 list_add_tail(&nlp->nlp_listp, &phba->fc_plogi_list);
1148                 phba->fc_plogi_cnt++;
1149                 break;
1150         case NLP_ADISC_LIST:
1151                 spin_lock_irq(phba->host->host_lock);
1152                 nlp->nlp_flag |= list;
1153                 spin_unlock_irq(phba->host->host_lock);
1154                 /* Put it at the end of the adisc list */
1155                 list_add_tail(&nlp->nlp_listp, &phba->fc_adisc_list);
1156                 phba->fc_adisc_cnt++;
1157                 break;
1158         case NLP_REGLOGIN_LIST:
1159                 spin_lock_irq(phba->host->host_lock);
1160                 nlp->nlp_flag |= list;
1161                 spin_unlock_irq(phba->host->host_lock);
1162                 /* Put it at the end of the reglogin list */
1163                 list_add_tail(&nlp->nlp_listp, &phba->fc_reglogin_list);
1164                 phba->fc_reglogin_cnt++;
1165                 break;
1166         case NLP_PRLI_LIST:
1167                 spin_lock_irq(phba->host->host_lock);
1168                 nlp->nlp_flag |= list;
1169                 spin_unlock_irq(phba->host->host_lock);
1170                 /* Put it at the end of the prli list */
1171                 list_add_tail(&nlp->nlp_listp, &phba->fc_prli_list);
1172                 phba->fc_prli_cnt++;
1173                 break;
1174         case NLP_UNMAPPED_LIST:
1175                 rport_add = unmapped;
1176                 /* ensure all vestiges of "mapped" significance are gone */
1177                 nlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
1178                 spin_lock_irq(phba->host->host_lock);
1179                 nlp->nlp_flag |= list;
1180                 spin_unlock_irq(phba->host->host_lock);
1181                 /* Put it at the end of the unmap list */
1182                 list_add_tail(&nlp->nlp_listp, &phba->fc_nlpunmap_list);
1183                 phba->fc_unmap_cnt++;
1184                 phba->nport_event_cnt++;
1185                 /* stop nodev tmo if running */
1186                 if (nlp->nlp_flag & NLP_NODEV_TMO) {
1187                         spin_lock_irq(phba->host->host_lock);
1188                         nlp->nlp_flag &= ~NLP_NODEV_TMO;
1189                         spin_unlock_irq(phba->host->host_lock);
1190                         del_timer_sync(&nlp->nlp_tmofunc);
1191                         if (!list_empty(&nlp->nodev_timeout_evt.evt_listp))
1192                                 list_del_init(&nlp->nodev_timeout_evt.
1193                                                 evt_listp);
1194
1195                 }
1196                 nlp->nlp_type |= NLP_FC_NODE;
1197                 break;
1198         case NLP_MAPPED_LIST:
1199                 rport_add = mapped;
1200                 spin_lock_irq(phba->host->host_lock);
1201                 nlp->nlp_flag |= list;
1202                 spin_unlock_irq(phba->host->host_lock);
1203                 /* Put it at the end of the map list */
1204                 list_add_tail(&nlp->nlp_listp, &phba->fc_nlpmap_list);
1205                 phba->fc_map_cnt++;
1206                 phba->nport_event_cnt++;
1207                 /* stop nodev tmo if running */
1208                 if (nlp->nlp_flag & NLP_NODEV_TMO) {
1209                         nlp->nlp_flag &= ~NLP_NODEV_TMO;
1210                         del_timer_sync(&nlp->nlp_tmofunc);
1211                         if (!list_empty(&nlp->nodev_timeout_evt.evt_listp))
1212                                 list_del_init(&nlp->nodev_timeout_evt.
1213                                                 evt_listp);
1214
1215                 }
1216                 break;
1217         case NLP_NPR_LIST:
1218                 spin_lock_irq(phba->host->host_lock);
1219                 nlp->nlp_flag |= list;
1220                 spin_unlock_irq(phba->host->host_lock);
1221                 /* Put it at the end of the npr list */
1222                 list_add_tail(&nlp->nlp_listp, &phba->fc_npr_list);
1223                 phba->fc_npr_cnt++;
1224
1225                 /*
1226                  * Sanity check for Fabric entity.
1227                  * Set nodev_tmo for NPR state, for Fabric use 1 sec.
1228                  */
1229                 if (nlp->nlp_type & NLP_FABRIC) {
1230                         mod_timer(&nlp->nlp_tmofunc, jiffies + HZ);
1231                 }
1232                 else {
1233                         mod_timer(&nlp->nlp_tmofunc,
1234                             jiffies + HZ * phba->cfg_nodev_tmo);
1235                 }
1236                 spin_lock_irq(phba->host->host_lock);
1237                 nlp->nlp_flag |= NLP_NODEV_TMO;
1238                 nlp->nlp_flag &= ~NLP_RCV_PLOGI;
1239                 spin_unlock_irq(phba->host->host_lock);
1240                 break;
1241         case NLP_JUST_DQ:
1242                 break;
1243         }
1244
1245         /*
1246          * We make all the calls into the transport after we have
1247          * moved the node between lists. This so that we don't
1248          * release the lock while in-between lists.
1249          */
1250
1251         /* Don't upcall midlayer if we're unloading */
1252         if (!(phba->fc_flag & FC_UNLOADING)) {
1253                 /*
1254                  * We revalidate the rport pointer as the "add" function
1255                  * may have removed the remote port.
1256                  */
1257                 if ((rport_del != none) && nlp->rport)
1258                         fc_remote_port_block(nlp->rport);
1259
1260                 if (rport_add != none) {
1261                         /*
1262                          * Tell the fc transport about the port, if we haven't
1263                          * already. If we have, and it's a scsi entity, be
1264                          * sure to unblock any attached scsi devices
1265                          */
1266                         if (!nlp->rport)
1267                                 lpfc_register_remote_port(phba, nlp);
1268                         else
1269                                 fc_remote_port_unblock(nlp->rport);
1270
1271                         /*
1272                          * if we added to Mapped list, but the remote port
1273                          * registration failed or assigned a target id outside
1274                          * our presentable range - move the node to the
1275                          * Unmapped List
1276                          */
1277                         if ((rport_add == mapped) &&
1278                             ((!nlp->rport) ||
1279                              (nlp->rport->scsi_target_id == -1) ||
1280                              (nlp->rport->scsi_target_id >= MAX_FCP_TARGET))) {
1281                                 nlp->nlp_state = NLP_STE_UNMAPPED_NODE;
1282                                 spin_lock_irq(phba->host->host_lock);
1283                                 nlp->nlp_flag |= NLP_TGT_NO_SCSIID;
1284                                 spin_unlock_irq(phba->host->host_lock);
1285                                 lpfc_nlp_list(phba, nlp, NLP_UNMAPPED_LIST);
1286                         }
1287                 }
1288         }
1289         return (0);
1290 }
1291
1292 /*
1293  * Start / ReStart rescue timer for Discovery / RSCN handling
1294  */
1295 void
1296 lpfc_set_disctmo(struct lpfc_hba * phba)
1297 {
1298         uint32_t tmo;
1299
1300         tmo = ((phba->fc_ratov * 2) + 1);
1301
1302         mod_timer(&phba->fc_disctmo, jiffies + HZ * tmo);
1303         spin_lock_irq(phba->host->host_lock);
1304         phba->fc_flag |= FC_DISC_TMO;
1305         spin_unlock_irq(phba->host->host_lock);
1306
1307         /* Start Discovery Timer state <hba_state> */
1308         lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
1309                         "%d:0247 Start Discovery Timer state x%x "
1310                         "Data: x%x x%lx x%x x%x\n",
1311                         phba->brd_no,
1312                         phba->hba_state, tmo, (unsigned long)&phba->fc_disctmo,
1313                         phba->fc_plogi_cnt, phba->fc_adisc_cnt);
1314
1315         return;
1316 }
1317
1318 /*
1319  * Cancel rescue timer for Discovery / RSCN handling
1320  */
1321 int
1322 lpfc_can_disctmo(struct lpfc_hba * phba)
1323 {
1324         /* Turn off discovery timer if its running */
1325         if (phba->fc_flag & FC_DISC_TMO) {
1326                 spin_lock_irq(phba->host->host_lock);
1327                 phba->fc_flag &= ~FC_DISC_TMO;
1328                 spin_unlock_irq(phba->host->host_lock);
1329                 del_timer_sync(&phba->fc_disctmo);
1330                 phba->work_hba_events &= ~WORKER_DISC_TMO;
1331         }
1332
1333         /* Cancel Discovery Timer state <hba_state> */
1334         lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
1335                         "%d:0248 Cancel Discovery Timer state x%x "
1336                         "Data: x%x x%x x%x\n",
1337                         phba->brd_no, phba->hba_state, phba->fc_flag,
1338                         phba->fc_plogi_cnt, phba->fc_adisc_cnt);
1339
1340         return (0);
1341 }
1342
1343 /*
1344  * Check specified ring for outstanding IOCB on the SLI queue
1345  * Return true if iocb matches the specified nport
1346  */
1347 int
1348 lpfc_check_sli_ndlp(struct lpfc_hba * phba,
1349                     struct lpfc_sli_ring * pring,
1350                     struct lpfc_iocbq * iocb, struct lpfc_nodelist * ndlp)
1351 {
1352         struct lpfc_sli *psli;
1353         IOCB_t *icmd;
1354
1355         psli = &phba->sli;
1356         icmd = &iocb->iocb;
1357         if (pring->ringno == LPFC_ELS_RING) {
1358                 switch (icmd->ulpCommand) {
1359                 case CMD_GEN_REQUEST64_CR:
1360                         if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi)
1361                                 return (1);
1362                 case CMD_ELS_REQUEST64_CR:
1363                 case CMD_XMIT_ELS_RSP64_CX:
1364                         if (iocb->context1 == (uint8_t *) ndlp)
1365                                 return (1);
1366                 }
1367         } else if (pring->ringno == psli->ip_ring) {
1368
1369         } else if (pring->ringno == psli->fcp_ring) {
1370                 /* Skip match check if waiting to relogin to FCP target */
1371                 if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
1372                   (ndlp->nlp_flag & NLP_DELAY_TMO)) {
1373                         return (0);
1374                 }
1375                 if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) {
1376                         return (1);
1377                 }
1378         } else if (pring->ringno == psli->next_ring) {
1379
1380         }
1381         return (0);
1382 }
1383
1384 /*
1385  * Free resources / clean up outstanding I/Os
1386  * associated with nlp_rpi in the LPFC_NODELIST entry.
1387  */
1388 static int
1389 lpfc_no_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1390 {
1391         struct lpfc_sli *psli;
1392         struct lpfc_sli_ring *pring;
1393         struct lpfc_iocbq *iocb, *next_iocb;
1394         IOCB_t *icmd;
1395         uint32_t rpi, i;
1396
1397         /*
1398          * Everything that matches on txcmplq will be returned
1399          * by firmware with a no rpi error.
1400          */
1401         psli = &phba->sli;
1402         rpi = ndlp->nlp_rpi;
1403         if (rpi) {
1404                 /* Now process each ring */
1405                 for (i = 0; i < psli->num_rings; i++) {
1406                         pring = &psli->ring[i];
1407
1408                         spin_lock_irq(phba->host->host_lock);
1409                         list_for_each_entry_safe(iocb, next_iocb, &pring->txq,
1410                                                 list) {
1411                                 /*
1412                                  * Check to see if iocb matches the nport we are
1413                                  * looking for
1414                                  */
1415                                 if ((lpfc_check_sli_ndlp
1416                                      (phba, pring, iocb, ndlp))) {
1417                                         /* It matches, so deque and call compl
1418                                            with an error */
1419                                         list_del(&iocb->list);
1420                                         pring->txq_cnt--;
1421                                         if (iocb->iocb_cmpl) {
1422                                                 icmd = &iocb->iocb;
1423                                                 icmd->ulpStatus =
1424                                                     IOSTAT_LOCAL_REJECT;
1425                                                 icmd->un.ulpWord[4] =
1426                                                     IOERR_SLI_ABORTED;
1427                                                 spin_unlock_irq(phba->host->
1428                                                                 host_lock);
1429                                                 (iocb->iocb_cmpl) (phba,
1430                                                                    iocb, iocb);
1431                                                 spin_lock_irq(phba->host->
1432                                                               host_lock);
1433                                         } else {
1434                                                 list_add_tail(&iocb->list,
1435                                                         &phba->lpfc_iocb_list);
1436                                         }
1437                                 }
1438                         }
1439                         spin_unlock_irq(phba->host->host_lock);
1440
1441                 }
1442         }
1443         return (0);
1444 }
1445
1446 /*
1447  * Free rpi associated with LPFC_NODELIST entry.
1448  * This routine is called from lpfc_freenode(), when we are removing
1449  * a LPFC_NODELIST entry. It is also called if the driver initiates a
1450  * LOGO that completes successfully, and we are waiting to PLOGI back
1451  * to the remote NPort. In addition, it is called after we receive
1452  * and unsolicated ELS cmd, send back a rsp, the rsp completes and
1453  * we are waiting to PLOGI back to the remote NPort.
1454  */
1455 int
1456 lpfc_unreg_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1457 {
1458         LPFC_MBOXQ_t *mbox;
1459         int rc;
1460
1461         if (ndlp->nlp_rpi) {
1462                 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) {
1463                         lpfc_unreg_login(phba, ndlp->nlp_rpi, mbox);
1464                         mbox->mbox_cmpl=lpfc_sli_def_mbox_cmpl;
1465                         rc = lpfc_sli_issue_mbox
1466                                     (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB));
1467                         if (rc == MBX_NOT_FINISHED)
1468                                 mempool_free( mbox, phba->mbox_mem_pool);
1469                 }
1470                 lpfc_findnode_remove_rpi(phba, ndlp->nlp_rpi);
1471                 lpfc_no_rpi(phba, ndlp);
1472                 ndlp->nlp_rpi = 0;
1473                 return 1;
1474         }
1475         return 0;
1476 }
1477
1478 /*
1479  * Free resources associated with LPFC_NODELIST entry
1480  * so it can be freed.
1481  */
1482 static int
1483 lpfc_freenode(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1484 {
1485         LPFC_MBOXQ_t       *mb;
1486         LPFC_MBOXQ_t       *nextmb;
1487         struct lpfc_dmabuf *mp;
1488         struct fc_rport *rport;
1489
1490         /* Cleanup node for NPort <nlp_DID> */
1491         lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1492                         "%d:0900 Cleanup node for NPort x%x "
1493                         "Data: x%x x%x x%x\n",
1494                         phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag,
1495                         ndlp->nlp_state, ndlp->nlp_rpi);
1496
1497         lpfc_nlp_list(phba, ndlp, NLP_JUST_DQ);
1498
1499         /*
1500          * if unloading the driver - just leave the remote port in place.
1501          * The driver unload will force the attached devices to detach
1502          * and flush cache's w/o generating flush errors.
1503          */
1504         if ((ndlp->rport) && !(phba->fc_flag & FC_UNLOADING)) {
1505                 rport = ndlp->rport;
1506                 ndlp->rport = NULL;
1507                 fc_remote_port_unblock(rport);
1508                 fc_remote_port_delete(rport);
1509                 ndlp->nlp_sid = NLP_NO_SID;
1510         }
1511
1512         /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
1513         if ((mb = phba->sli.mbox_active)) {
1514                 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
1515                    (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1516                         mb->context2 = NULL;
1517                         mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1518                 }
1519         }
1520         list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
1521                 if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
1522                    (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1523                         mp = (struct lpfc_dmabuf *) (mb->context1);
1524                         if (mp) {
1525                                 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1526                                 kfree(mp);
1527                         }
1528                         list_del(&mb->list);
1529                         mempool_free(mb, phba->mbox_mem_pool);
1530                 }
1531         }
1532
1533         lpfc_els_abort(phba,ndlp,0);
1534         spin_lock_irq(phba->host->host_lock);
1535         ndlp->nlp_flag &= ~(NLP_NODEV_TMO|NLP_DELAY_TMO);
1536         spin_unlock_irq(phba->host->host_lock);
1537         del_timer_sync(&ndlp->nlp_tmofunc);
1538
1539         del_timer_sync(&ndlp->nlp_delayfunc);
1540
1541         if (!list_empty(&ndlp->nodev_timeout_evt.evt_listp))
1542                 list_del_init(&ndlp->nodev_timeout_evt.evt_listp);
1543         if (!list_empty(&ndlp->els_retry_evt.evt_listp))
1544                 list_del_init(&ndlp->els_retry_evt.evt_listp);
1545
1546         lpfc_unreg_rpi(phba, ndlp);
1547
1548         return (0);
1549 }
1550
1551 /*
1552  * Check to see if we can free the nlp back to the freelist.
1553  * If we are in the middle of using the nlp in the discovery state
1554  * machine, defer the free till we reach the end of the state machine.
1555  */
1556 int
1557 lpfc_nlp_remove(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
1558 {
1559         if (ndlp->nlp_flag & NLP_NODEV_TMO) {
1560                 spin_lock_irq(phba->host->host_lock);
1561                 ndlp->nlp_flag &= ~NLP_NODEV_TMO;
1562                 spin_unlock_irq(phba->host->host_lock);
1563                 del_timer_sync(&ndlp->nlp_tmofunc);
1564                 if (!list_empty(&ndlp->nodev_timeout_evt.evt_listp))
1565                         list_del_init(&ndlp->nodev_timeout_evt.evt_listp);
1566
1567         }
1568
1569
1570         if (ndlp->nlp_flag & NLP_DELAY_TMO) {
1571                 spin_lock_irq(phba->host->host_lock);
1572                 ndlp->nlp_flag &= ~NLP_DELAY_TMO;
1573                 spin_unlock_irq(phba->host->host_lock);
1574                 del_timer_sync(&ndlp->nlp_delayfunc);
1575                 if (!list_empty(&ndlp->els_retry_evt.evt_listp))
1576                         list_del_init(&ndlp->els_retry_evt.evt_listp);
1577         }
1578
1579         if (ndlp->nlp_disc_refcnt) {
1580                 spin_lock_irq(phba->host->host_lock);
1581                 ndlp->nlp_flag |= NLP_DELAY_REMOVE;
1582                 spin_unlock_irq(phba->host->host_lock);
1583         }
1584         else {
1585                 lpfc_freenode(phba, ndlp);
1586                 mempool_free( ndlp, phba->nlp_mem_pool);
1587         }
1588         return(0);
1589 }
1590
1591 static int
1592 lpfc_matchdid(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, uint32_t did)
1593 {
1594         D_ID mydid;
1595         D_ID ndlpdid;
1596         D_ID matchdid;
1597
1598         if (did == Bcast_DID)
1599                 return (0);
1600
1601         if (ndlp->nlp_DID == 0) {
1602                 return (0);
1603         }
1604
1605         /* First check for Direct match */
1606         if (ndlp->nlp_DID == did)
1607                 return (1);
1608
1609         /* Next check for area/domain identically equals 0 match */
1610         mydid.un.word = phba->fc_myDID;
1611         if ((mydid.un.b.domain == 0) && (mydid.un.b.area == 0)) {
1612                 return (0);
1613         }
1614
1615         matchdid.un.word = did;
1616         ndlpdid.un.word = ndlp->nlp_DID;
1617         if (matchdid.un.b.id == ndlpdid.un.b.id) {
1618                 if ((mydid.un.b.domain == matchdid.un.b.domain) &&
1619                     (mydid.un.b.area == matchdid.un.b.area)) {
1620                         if ((ndlpdid.un.b.domain == 0) &&
1621                             (ndlpdid.un.b.area == 0)) {
1622                                 if (ndlpdid.un.b.id)
1623                                         return (1);
1624                         }
1625                         return (0);
1626                 }
1627
1628                 matchdid.un.word = ndlp->nlp_DID;
1629                 if ((mydid.un.b.domain == ndlpdid.un.b.domain) &&
1630                     (mydid.un.b.area == ndlpdid.un.b.area)) {
1631                         if ((matchdid.un.b.domain == 0) &&
1632                             (matchdid.un.b.area == 0)) {
1633                                 if (matchdid.un.b.id)
1634                                         return (1);
1635                         }
1636                 }
1637         }
1638         return (0);
1639 }
1640
1641 /* Search for a nodelist entry on a specific list */
1642 struct lpfc_nodelist *
1643 lpfc_findnode_did(struct lpfc_hba * phba, uint32_t order, uint32_t did)
1644 {
1645         struct lpfc_nodelist *ndlp, *next_ndlp;
1646         uint32_t data1;
1647
1648         if (order & NLP_SEARCH_UNMAPPED) {
1649                 list_for_each_entry_safe(ndlp, next_ndlp,
1650                                          &phba->fc_nlpunmap_list, nlp_listp) {
1651                         if (lpfc_matchdid(phba, ndlp, did)) {
1652                                 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1653                                          ((uint32_t) ndlp->nlp_xri << 16) |
1654                                          ((uint32_t) ndlp->nlp_type << 8) |
1655                                          ((uint32_t) ndlp->nlp_rpi & 0xff));
1656                                 /* FIND node DID unmapped */
1657                                 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1658                                                 "%d:0929 FIND node DID unmapped"
1659                                                 " Data: x%p x%x x%x x%x\n",
1660                                                 phba->brd_no,
1661                                                 ndlp, ndlp->nlp_DID,
1662                                                 ndlp->nlp_flag, data1);
1663                                 return (ndlp);
1664                         }
1665                 }
1666         }
1667
1668         if (order & NLP_SEARCH_MAPPED) {
1669                 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_nlpmap_list,
1670                                         nlp_listp) {
1671                         if (lpfc_matchdid(phba, ndlp, did)) {
1672
1673                                 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1674                                          ((uint32_t) ndlp->nlp_xri << 16) |
1675                                          ((uint32_t) ndlp->nlp_type << 8) |
1676                                          ((uint32_t) ndlp->nlp_rpi & 0xff));
1677                                 /* FIND node DID mapped */
1678                                 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1679                                                 "%d:0930 FIND node DID mapped "
1680                                                 "Data: x%p x%x x%x x%x\n",
1681                                                 phba->brd_no,
1682                                                 ndlp, ndlp->nlp_DID,
1683                                                 ndlp->nlp_flag, data1);
1684                                 return (ndlp);
1685                         }
1686                 }
1687         }
1688
1689         if (order & NLP_SEARCH_PLOGI) {
1690                 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_plogi_list,
1691                                         nlp_listp) {
1692                         if (lpfc_matchdid(phba, ndlp, did)) {
1693
1694                                 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1695                                          ((uint32_t) ndlp->nlp_xri << 16) |
1696                                          ((uint32_t) ndlp->nlp_type << 8) |
1697                                          ((uint32_t) ndlp->nlp_rpi & 0xff));
1698                                 /* LOG change to PLOGI */
1699                                 /* FIND node DID plogi */
1700                                 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1701                                                 "%d:0908 FIND node DID plogi "
1702                                                 "Data: x%p x%x x%x x%x\n",
1703                                                 phba->brd_no,
1704                                                 ndlp, ndlp->nlp_DID,
1705                                                 ndlp->nlp_flag, data1);
1706                                 return (ndlp);
1707                         }
1708                 }
1709         }
1710
1711         if (order & NLP_SEARCH_ADISC) {
1712                 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_adisc_list,
1713                                         nlp_listp) {
1714                         if (lpfc_matchdid(phba, ndlp, did)) {
1715
1716                                 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1717                                          ((uint32_t) ndlp->nlp_xri << 16) |
1718                                          ((uint32_t) ndlp->nlp_type << 8) |
1719                                          ((uint32_t) ndlp->nlp_rpi & 0xff));
1720                                 /* LOG change to ADISC */
1721                                 /* FIND node DID adisc */
1722                                 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1723                                                 "%d:0931 FIND node DID adisc "
1724                                                 "Data: x%p x%x x%x x%x\n",
1725                                                 phba->brd_no,
1726                                                 ndlp, ndlp->nlp_DID,
1727                                                 ndlp->nlp_flag, data1);
1728                                 return (ndlp);
1729                         }
1730                 }
1731         }
1732
1733         if (order & NLP_SEARCH_REGLOGIN) {
1734                 list_for_each_entry_safe(ndlp, next_ndlp,
1735                                          &phba->fc_reglogin_list, nlp_listp) {
1736                         if (lpfc_matchdid(phba, ndlp, did)) {
1737
1738                                 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1739                                          ((uint32_t) ndlp->nlp_xri << 16) |
1740                                          ((uint32_t) ndlp->nlp_type << 8) |
1741                                          ((uint32_t) ndlp->nlp_rpi & 0xff));
1742                                 /* LOG change to REGLOGIN */
1743                                 /* FIND node DID reglogin */
1744                                 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1745                                                 "%d:0931 FIND node DID reglogin"
1746                                                 " Data: x%p x%x x%x x%x\n",
1747                                                 phba->brd_no,
1748                                                 ndlp, ndlp->nlp_DID,
1749                                                 ndlp->nlp_flag, data1);
1750                                 return (ndlp);
1751                         }
1752                 }
1753         }
1754
1755         if (order & NLP_SEARCH_PRLI) {
1756                 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_prli_list,
1757                                         nlp_listp) {
1758                         if (lpfc_matchdid(phba, ndlp, did)) {
1759
1760                                 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1761                                          ((uint32_t) ndlp->nlp_xri << 16) |
1762                                          ((uint32_t) ndlp->nlp_type << 8) |
1763                                          ((uint32_t) ndlp->nlp_rpi & 0xff));
1764                                 /* LOG change to PRLI */
1765                                 /* FIND node DID prli */
1766                                 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1767                                                 "%d:0931 FIND node DID prli "
1768                                                 "Data: x%p x%x x%x x%x\n",
1769                                                 phba->brd_no,
1770                                                 ndlp, ndlp->nlp_DID,
1771                                                 ndlp->nlp_flag, data1);
1772                                 return (ndlp);
1773                         }
1774                 }
1775         }
1776
1777         if (order & NLP_SEARCH_NPR) {
1778                 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_npr_list,
1779                                         nlp_listp) {
1780                         if (lpfc_matchdid(phba, ndlp, did)) {
1781
1782                                 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1783                                          ((uint32_t) ndlp->nlp_xri << 16) |
1784                                          ((uint32_t) ndlp->nlp_type << 8) |
1785                                          ((uint32_t) ndlp->nlp_rpi & 0xff));
1786                                 /* LOG change to NPR */
1787                                 /* FIND node DID npr */
1788                                 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1789                                                 "%d:0931 FIND node DID npr "
1790                                                 "Data: x%p x%x x%x x%x\n",
1791                                                 phba->brd_no,
1792                                                 ndlp, ndlp->nlp_DID,
1793                                                 ndlp->nlp_flag, data1);
1794                                 return (ndlp);
1795                         }
1796                 }
1797         }
1798
1799         if (order & NLP_SEARCH_UNUSED) {
1800                 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_adisc_list,
1801                                         nlp_listp) {
1802                         if (lpfc_matchdid(phba, ndlp, did)) {
1803
1804                                 data1 = (((uint32_t) ndlp->nlp_state << 24) |
1805                                          ((uint32_t) ndlp->nlp_xri << 16) |
1806                                          ((uint32_t) ndlp->nlp_type << 8) |
1807                                          ((uint32_t) ndlp->nlp_rpi & 0xff));
1808                                 /* LOG change to UNUSED */
1809                                 /* FIND node DID unused */
1810                                 lpfc_printf_log(phba, KERN_INFO, LOG_NODE,
1811                                                 "%d:0931 FIND node DID unused "
1812                                                 "Data: x%p x%x x%x x%x\n",
1813                                                 phba->brd_no,
1814                                                 ndlp, ndlp->nlp_DID,
1815                                                 ndlp->nlp_flag, data1);
1816                                 return (ndlp);
1817                         }
1818                 }
1819         }
1820
1821         /* FIND node did <did> NOT FOUND */
1822         lpfc_printf_log(phba,
1823                         KERN_INFO,
1824                         LOG_NODE,
1825                         "%d:0932 FIND node did x%x NOT FOUND Data: x%x\n",
1826                         phba->brd_no, did, order);
1827
1828         /* no match found */
1829         return NULL;
1830 }
1831
1832 struct lpfc_nodelist *
1833 lpfc_setup_disc_node(struct lpfc_hba * phba, uint32_t did)
1834 {
1835         struct lpfc_nodelist *ndlp;
1836         uint32_t flg;
1837
1838         if ((ndlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, did)) == 0) {
1839                 if ((phba->hba_state == LPFC_HBA_READY) &&
1840                    ((lpfc_rscn_payload_check(phba, did) == 0)))
1841                         return NULL;
1842                 ndlp = (struct lpfc_nodelist *)
1843                      mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
1844                 if (!ndlp)
1845                         return NULL;
1846                 lpfc_nlp_init(phba, ndlp, did);
1847                 ndlp->nlp_state = NLP_STE_NPR_NODE;
1848                 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
1849                 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1850                 return ndlp;
1851         }
1852         if ((phba->hba_state == LPFC_HBA_READY) &&
1853             (phba->fc_flag & FC_RSCN_MODE)) {
1854                 if (lpfc_rscn_payload_check(phba, did)) {
1855                         ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1856                 }
1857                 else {
1858                         ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
1859                         ndlp = NULL;
1860                 }
1861         }
1862         else {
1863                 flg = ndlp->nlp_flag & NLP_LIST_MASK;
1864                 if ((flg == NLP_ADISC_LIST) ||
1865                 (flg == NLP_PLOGI_LIST)) {
1866                         return NULL;
1867                 }
1868                 ndlp->nlp_state = NLP_STE_NPR_NODE;
1869                 lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);
1870                 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1871         }
1872         return ndlp;
1873 }
1874
1875 /* Build a list of nodes to discover based on the loopmap */
1876 void
1877 lpfc_disc_list_loopmap(struct lpfc_hba * phba)
1878 {
1879         int j;
1880         uint32_t alpa, index;
1881
1882         if (phba->hba_state <= LPFC_LINK_DOWN) {
1883                 return;
1884         }
1885         if (phba->fc_topology != TOPOLOGY_LOOP) {
1886                 return;
1887         }
1888
1889         /* Check for loop map present or not */
1890         if (phba->alpa_map[0]) {
1891                 for (j = 1; j <= phba->alpa_map[0]; j++) {
1892                         alpa = phba->alpa_map[j];
1893
1894                         if (((phba->fc_myDID & 0xff) == alpa) || (alpa == 0)) {
1895                                 continue;
1896                         }
1897                         lpfc_setup_disc_node(phba, alpa);
1898                 }
1899         } else {
1900                 /* No alpamap, so try all alpa's */
1901                 for (j = 0; j < FC_MAXLOOP; j++) {
1902                         /* If cfg_scan_down is set, start from highest
1903                          * ALPA (0xef) to lowest (0x1).
1904                          */
1905                         if (phba->cfg_scan_down)
1906                                 index = j;
1907                         else
1908                                 index = FC_MAXLOOP - j - 1;
1909                         alpa = lpfcAlpaArray[index];
1910                         if ((phba->fc_myDID & 0xff) == alpa) {
1911                                 continue;
1912                         }
1913
1914                         lpfc_setup_disc_node(phba, alpa);
1915                 }
1916         }
1917         return;
1918 }
1919
1920 /* Start Link up / RSCN discovery on NPR list */
1921 void
1922 lpfc_disc_start(struct lpfc_hba * phba)
1923 {
1924         struct lpfc_sli *psli;
1925         LPFC_MBOXQ_t *mbox;
1926         struct lpfc_nodelist *ndlp, *next_ndlp;
1927         uint32_t did_changed, num_sent;
1928         uint32_t clear_la_pending;
1929         int rc;
1930
1931         psli = &phba->sli;
1932
1933         if (phba->hba_state <= LPFC_LINK_DOWN) {
1934                 return;
1935         }
1936         if (phba->hba_state == LPFC_CLEAR_LA)
1937                 clear_la_pending = 1;
1938         else
1939                 clear_la_pending = 0;
1940
1941         if (phba->hba_state < LPFC_HBA_READY) {
1942                 phba->hba_state = LPFC_DISC_AUTH;
1943         }
1944         lpfc_set_disctmo(phba);
1945
1946         if (phba->fc_prevDID == phba->fc_myDID) {
1947                 did_changed = 0;
1948         } else {
1949                 did_changed = 1;
1950         }
1951         phba->fc_prevDID = phba->fc_myDID;
1952         phba->num_disc_nodes = 0;
1953
1954         /* Start Discovery state <hba_state> */
1955         lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
1956                         "%d:0202 Start Discovery hba state x%x "
1957                         "Data: x%x x%x x%x\n",
1958                         phba->brd_no, phba->hba_state, phba->fc_flag,
1959                         phba->fc_plogi_cnt, phba->fc_adisc_cnt);
1960
1961         /* If our did changed, we MUST do PLOGI */
1962         list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_npr_list,
1963                                 nlp_listp) {
1964                 if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1965                         if (did_changed) {
1966                                 spin_lock_irq(phba->host->host_lock);
1967                                 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
1968                                 spin_unlock_irq(phba->host->host_lock);
1969                         }
1970                 }
1971         }
1972
1973         /* First do ADISCs - if any */
1974         num_sent = lpfc_els_disc_adisc(phba);
1975
1976         if (num_sent)
1977                 return;
1978
1979         if ((phba->hba_state < LPFC_HBA_READY) && (!clear_la_pending)) {
1980                 /* If we get here, there is nothing to ADISC */
1981                 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) {
1982                         phba->hba_state = LPFC_CLEAR_LA;
1983                         lpfc_clear_la(phba, mbox);
1984                         mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
1985                         rc = lpfc_sli_issue_mbox(phba, mbox,
1986                                                  (MBX_NOWAIT | MBX_STOP_IOCB));
1987                         if (rc == MBX_NOT_FINISHED) {
1988                                 mempool_free( mbox, phba->mbox_mem_pool);
1989                                 lpfc_disc_flush_list(phba);
1990                                 psli->ring[(psli->ip_ring)].flag &=
1991                                         ~LPFC_STOP_IOCB_EVENT;
1992                                 psli->ring[(psli->fcp_ring)].flag &=
1993                                         ~LPFC_STOP_IOCB_EVENT;
1994                                 psli->ring[(psli->next_ring)].flag &=
1995                                         ~LPFC_STOP_IOCB_EVENT;
1996                                 phba->hba_state = LPFC_HBA_READY;
1997                         }
1998                 }
1999         } else {
2000                 /* Next do PLOGIs - if any */
2001                 num_sent = lpfc_els_disc_plogi(phba);
2002
2003                 if (num_sent)
2004                         return;
2005
2006                 if (phba->fc_flag & FC_RSCN_MODE) {
2007                         /* Check to see if more RSCNs came in while we
2008                          * were processing this one.
2009                          */
2010                         if ((phba->fc_rscn_id_cnt == 0) &&
2011                             (!(phba->fc_flag & FC_RSCN_DISCOVERY))) {
2012                                 spin_lock_irq(phba->host->host_lock);
2013                                 phba->fc_flag &= ~FC_RSCN_MODE;
2014                                 spin_unlock_irq(phba->host->host_lock);
2015                         }
2016                         else
2017                                 lpfc_els_handle_rscn(phba);
2018                 }
2019         }
2020         return;
2021 }
2022
2023 /*
2024  *  Ignore completion for all IOCBs on tx and txcmpl queue for ELS
2025  *  ring the match the sppecified nodelist.
2026  */
2027 static void
2028 lpfc_free_tx(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
2029 {
2030         struct lpfc_sli *psli;
2031         IOCB_t     *icmd;
2032         struct lpfc_iocbq    *iocb, *next_iocb;
2033         struct lpfc_sli_ring *pring;
2034         struct lpfc_dmabuf   *mp;
2035
2036         psli = &phba->sli;
2037         pring = &psli->ring[LPFC_ELS_RING];
2038
2039         /* Error matching iocb on txq or txcmplq
2040          * First check the txq.
2041          */
2042         list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
2043                 if (iocb->context1 != ndlp) {
2044                         continue;
2045                 }
2046                 icmd = &iocb->iocb;
2047                 if ((icmd->ulpCommand == CMD_ELS_REQUEST64_CR) ||
2048                     (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) {
2049
2050                         list_del(&iocb->list);
2051                         pring->txq_cnt--;
2052                         lpfc_els_free_iocb(phba, iocb);
2053                 }
2054         }
2055
2056         /* Next check the txcmplq */
2057         list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
2058                 if (iocb->context1 != ndlp) {
2059                         continue;
2060                 }
2061                 icmd = &iocb->iocb;
2062                 if ((icmd->ulpCommand == CMD_ELS_REQUEST64_CR) ||
2063                     (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) {
2064
2065                         iocb->iocb_cmpl = NULL;
2066                         /* context2 = cmd, context2->next = rsp, context3 =
2067                            bpl */
2068                         if (iocb->context2) {
2069                                 /* Free the response IOCB before handling the
2070                                    command. */
2071
2072                                 mp = (struct lpfc_dmabuf *) (iocb->context2);
2073                                 mp = list_get_first(&mp->list,
2074                                                     struct lpfc_dmabuf,
2075                                                     list);
2076                                 if (mp) {
2077                                         /* Delay before releasing rsp buffer to
2078                                          * give UNREG mbox a chance to take
2079                                          * effect.
2080                                          */
2081                                         list_add(&mp->list,
2082                                                 &phba->freebufList);
2083                                 }
2084                                 lpfc_mbuf_free(phba,
2085                                                ((struct lpfc_dmabuf *)
2086                                                 iocb->context2)->virt,
2087                                                ((struct lpfc_dmabuf *)
2088                                                 iocb->context2)->phys);
2089                                 kfree(iocb->context2);
2090                         }
2091
2092                         if (iocb->context3) {
2093                                 lpfc_mbuf_free(phba,
2094                                                ((struct lpfc_dmabuf *)
2095                                                 iocb->context3)->virt,
2096                                                ((struct lpfc_dmabuf *)
2097                                                 iocb->context3)->phys);
2098                                 kfree(iocb->context3);
2099                         }
2100                 }
2101         }
2102
2103         return;
2104 }
2105
2106 void
2107 lpfc_disc_flush_list(struct lpfc_hba * phba)
2108 {
2109         struct lpfc_nodelist *ndlp, *next_ndlp;
2110
2111         if (phba->fc_plogi_cnt) {
2112                 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_plogi_list,
2113                                         nlp_listp) {
2114                         lpfc_free_tx(phba, ndlp);
2115                         lpfc_nlp_remove(phba, ndlp);
2116                 }
2117         }
2118         if (phba->fc_adisc_cnt) {
2119                 list_for_each_entry_safe(ndlp, next_ndlp, &phba->fc_adisc_list,
2120                                         nlp_listp) {
2121                         lpfc_free_tx(phba, ndlp);
2122                         lpfc_nlp_remove(phba, ndlp);
2123                 }
2124         }
2125         return;
2126 }
2127
2128 /*****************************************************************************/
2129 /*
2130  * NAME:     lpfc_disc_timeout
2131  *
2132  * FUNCTION: Fibre Channel driver discovery timeout routine.
2133  *
2134  * EXECUTION ENVIRONMENT: interrupt only
2135  *
2136  * CALLED FROM:
2137  *      Timer function
2138  *
2139  * RETURNS:
2140  *      none
2141  */
2142 /*****************************************************************************/
2143 void
2144 lpfc_disc_timeout(unsigned long ptr)
2145 {
2146         struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
2147         unsigned long flags = 0;
2148
2149         if (unlikely(!phba))
2150                 return;
2151
2152         spin_lock_irqsave(phba->host->host_lock, flags);
2153         if (!(phba->work_hba_events & WORKER_DISC_TMO)) {
2154                 phba->work_hba_events |= WORKER_DISC_TMO;
2155                 if (phba->work_wait)
2156                         wake_up(phba->work_wait);
2157         }
2158         spin_unlock_irqrestore(phba->host->host_lock, flags);
2159         return;
2160 }
2161
2162 static void
2163 lpfc_disc_timeout_handler(struct lpfc_hba *phba)
2164 {
2165         struct lpfc_sli *psli;
2166         struct lpfc_nodelist *ndlp;
2167         LPFC_MBOXQ_t *clearlambox, *initlinkmbox;
2168         int rc, clrlaerr = 0;
2169
2170         if (unlikely(!phba))
2171                 return;
2172
2173         if (!(phba->fc_flag & FC_DISC_TMO))
2174                 return;
2175
2176         psli = &phba->sli;
2177
2178         spin_lock_irq(phba->host->host_lock);
2179         phba->fc_flag &= ~FC_DISC_TMO;
2180         spin_unlock_irq(phba->host->host_lock);
2181
2182         switch (phba->hba_state) {
2183
2184         case LPFC_LOCAL_CFG_LINK:
2185         /* hba_state is identically LPFC_LOCAL_CFG_LINK while waiting for FAN */
2186                 /* FAN timeout */
2187                 lpfc_printf_log(phba,
2188                                  KERN_WARNING,
2189                                  LOG_DISCOVERY,
2190                                  "%d:0221 FAN timeout\n",
2191                                  phba->brd_no);
2192
2193                 /* Forget about FAN, Start discovery by sending a FLOGI
2194                  * hba_state is identically LPFC_FLOGI while waiting for FLOGI
2195                  * cmpl
2196                  */
2197                 phba->hba_state = LPFC_FLOGI;
2198                 lpfc_set_disctmo(phba);
2199                 lpfc_initial_flogi(phba);
2200                 break;
2201
2202         case LPFC_FLOGI:
2203         /* hba_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */
2204                 /* Initial FLOGI timeout */
2205                 lpfc_printf_log(phba,
2206                                  KERN_ERR,
2207                                  LOG_DISCOVERY,
2208                                  "%d:0222 Initial FLOGI timeout\n",
2209                                  phba->brd_no);
2210
2211                 /* Assume no Fabric and go on with discovery.
2212                  * Check for outstanding ELS FLOGI to abort.
2213                  */
2214
2215                 /* FLOGI failed, so just use loop map to make discovery list */
2216                 lpfc_disc_list_loopmap(phba);
2217
2218                 /* Start discovery */
2219                 lpfc_disc_start(phba);
2220                 break;
2221
2222         case LPFC_FABRIC_CFG_LINK:
2223         /* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for
2224            NameServer login */
2225                 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2226                                 "%d:0223 Timeout while waiting for NameServer "
2227                                 "login\n", phba->brd_no);
2228
2229                 /* Next look for NameServer ndlp */
2230                 ndlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, NameServer_DID);
2231                 if (ndlp)
2232                         lpfc_nlp_remove(phba, ndlp);
2233                 /* Start discovery */
2234                 lpfc_disc_start(phba);
2235                 break;
2236
2237         case LPFC_NS_QRY:
2238         /* Check for wait for NameServer Rsp timeout */
2239                 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2240                                 "%d:0224 NameServer Query timeout "
2241                                 "Data: x%x x%x\n",
2242                                 phba->brd_no,
2243                                 phba->fc_ns_retry, LPFC_MAX_NS_RETRY);
2244
2245                 ndlp = lpfc_findnode_did(phba, NLP_SEARCH_UNMAPPED,
2246                                                                 NameServer_DID);
2247                 if (ndlp) {
2248                         if (phba->fc_ns_retry < LPFC_MAX_NS_RETRY) {
2249                                 /* Try it one more time */
2250                                 rc = lpfc_ns_cmd(phba, ndlp, SLI_CTNS_GID_FT);
2251                                 if (rc == 0)
2252                                         break;
2253                         }
2254                         phba->fc_ns_retry = 0;
2255                 }
2256
2257                 /* Nothing to authenticate, so CLEAR_LA right now */
2258                 clearlambox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2259                 if (!clearlambox) {
2260                         clrlaerr = 1;
2261                         lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2262                                         "%d:0226 Device Discovery "
2263                                         "completion error\n",
2264                                         phba->brd_no);
2265                         phba->hba_state = LPFC_HBA_ERROR;
2266                         break;
2267                 }
2268
2269                 phba->hba_state = LPFC_CLEAR_LA;
2270                 lpfc_clear_la(phba, clearlambox);
2271                 clearlambox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
2272                 rc = lpfc_sli_issue_mbox(phba, clearlambox,
2273                                          (MBX_NOWAIT | MBX_STOP_IOCB));
2274                 if (rc == MBX_NOT_FINISHED) {
2275                         mempool_free(clearlambox, phba->mbox_mem_pool);
2276                         clrlaerr = 1;
2277                         break;
2278                 }
2279
2280                 /* Setup and issue mailbox INITIALIZE LINK command */
2281                 initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2282                 if (!initlinkmbox) {
2283                         lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2284                                         "%d:0226 Device Discovery "
2285                                         "completion error\n",
2286                                         phba->brd_no);
2287                         phba->hba_state = LPFC_HBA_ERROR;
2288                         break;
2289                 }
2290
2291                 lpfc_linkdown(phba);
2292                 lpfc_init_link(phba, initlinkmbox, phba->cfg_topology,
2293                                phba->cfg_link_speed);
2294                 initlinkmbox->mb.un.varInitLnk.lipsr_AL_PA = 0;
2295                 rc = lpfc_sli_issue_mbox(phba, initlinkmbox,
2296                                          (MBX_NOWAIT | MBX_STOP_IOCB));
2297                 if (rc == MBX_NOT_FINISHED)
2298                         mempool_free(initlinkmbox, phba->mbox_mem_pool);
2299
2300                 break;
2301
2302         case LPFC_DISC_AUTH:
2303         /* Node Authentication timeout */
2304                 lpfc_printf_log(phba,
2305                                  KERN_ERR,
2306                                  LOG_DISCOVERY,
2307                                  "%d:0227 Node Authentication timeout\n",
2308                                  phba->brd_no);
2309                 lpfc_disc_flush_list(phba);
2310                 clearlambox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2311                 if (!clearlambox) {
2312                         clrlaerr = 1;
2313                         lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
2314                                         "%d:0226 Device Discovery "
2315                                         "completion error\n",
2316                                         phba->brd_no);
2317                         phba->hba_state = LPFC_HBA_ERROR;
2318                         break;
2319                 }
2320                 phba->hba_state = LPFC_CLEAR_LA;
2321                 lpfc_clear_la(phba, clearlambox);
2322                 clearlambox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
2323                 rc = lpfc_sli_issue_mbox(phba, clearlambox,
2324                                          (MBX_NOWAIT | MBX_STOP_IOCB));
2325                 if (rc == MBX_NOT_FINISHED) {
2326                         mempool_free(clearlambox, phba->mbox_mem_pool);
2327                         clrlaerr = 1;
2328                 }
2329                 break;
2330
2331         case LPFC_CLEAR_LA:
2332         /* CLEAR LA timeout */
2333                 lpfc_printf_log(phba,
2334                                  KERN_ERR,
2335                                  LOG_DISCOVERY,
2336                                  "%d:0228 CLEAR LA timeout\n",
2337                                  phba->brd_no);
2338                 clrlaerr = 1;
2339                 break;
2340
2341         case LPFC_HBA_READY:
2342                 if (phba->fc_flag & FC_RSCN_MODE) {
2343                         lpfc_printf_log(phba,
2344                                         KERN_ERR,
2345                                         LOG_DISCOVERY,
2346                                         "%d:0231 RSCN timeout Data: x%x x%x\n",
2347                                         phba->brd_no,
2348                                         phba->fc_ns_retry, LPFC_MAX_NS_RETRY);
2349
2350                         /* Cleanup any outstanding ELS commands */
2351                         lpfc_els_flush_cmd(phba);
2352
2353                         lpfc_els_flush_rscn(phba);
2354                         lpfc_disc_flush_list(phba);
2355                 }
2356                 break;
2357         }
2358
2359         if (clrlaerr) {
2360                 lpfc_disc_flush_list(phba);
2361                 psli->ring[(psli->ip_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
2362                 psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
2363                 psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
2364                 phba->hba_state = LPFC_HBA_READY;
2365         }
2366
2367         return;
2368 }
2369
2370 static void
2371 lpfc_nodev_timeout(unsigned long ptr)
2372 {
2373         struct lpfc_hba *phba;
2374         struct lpfc_nodelist *ndlp;
2375         unsigned long iflag;
2376         struct lpfc_work_evt  *evtp;
2377
2378         ndlp = (struct lpfc_nodelist *)ptr;
2379         phba = ndlp->nlp_phba;
2380         evtp = &ndlp->nodev_timeout_evt;
2381         spin_lock_irqsave(phba->host->host_lock, iflag);
2382
2383         if (!list_empty(&evtp->evt_listp)) {
2384                 spin_unlock_irqrestore(phba->host->host_lock, iflag);
2385                 return;
2386         }
2387         evtp->evt_arg1  = ndlp;
2388         evtp->evt       = LPFC_EVT_NODEV_TMO;
2389         list_add_tail(&evtp->evt_listp, &phba->work_list);
2390         if (phba->work_wait)
2391                 wake_up(phba->work_wait);
2392
2393         spin_unlock_irqrestore(phba->host->host_lock, iflag);
2394         return;
2395 }
2396
2397
2398 /*
2399  * This routine handles processing a NameServer REG_LOGIN mailbox
2400  * command upon completion. It is setup in the LPFC_MBOXQ
2401  * as the completion routine when the command is
2402  * handed off to the SLI layer.
2403  */
2404 void
2405 lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
2406 {
2407         struct lpfc_sli *psli;
2408         MAILBOX_t *mb;
2409         struct lpfc_dmabuf *mp;
2410         struct lpfc_nodelist *ndlp;
2411
2412         psli = &phba->sli;
2413         mb = &pmb->mb;
2414
2415         ndlp = (struct lpfc_nodelist *) pmb->context2;
2416         mp = (struct lpfc_dmabuf *) (pmb->context1);
2417
2418         pmb->context1 = NULL;
2419
2420         if (ndlp->nlp_rpi != 0)
2421                 lpfc_findnode_remove_rpi(phba, ndlp->nlp_rpi);
2422         ndlp->nlp_rpi = mb->un.varWords[0];
2423         lpfc_addnode_rpi(phba, ndlp, ndlp->nlp_rpi);
2424         ndlp->nlp_type |= NLP_FABRIC;
2425         ndlp->nlp_state = NLP_STE_UNMAPPED_NODE;
2426         lpfc_nlp_list(phba, ndlp, NLP_UNMAPPED_LIST);
2427
2428         /* Start issuing Fabric-Device Management Interface (FDMI)
2429          * command to 0xfffffa (FDMI well known port)
2430          */
2431         if (phba->cfg_fdmi_on == 1) {
2432                 lpfc_fdmi_cmd(phba, ndlp, SLI_MGMT_DHBA);
2433         } else {
2434                 /*
2435                  * Delay issuing FDMI command if fdmi-on=2
2436                  * (supporting RPA/hostnmae)
2437                  */
2438                 mod_timer(&phba->fc_fdmitmo, jiffies + HZ * 60);
2439         }
2440
2441         lpfc_mbuf_free(phba, mp->virt, mp->phys);
2442         kfree(mp);
2443         mempool_free( pmb, phba->mbox_mem_pool);
2444
2445         return;
2446 }
2447
2448 /*
2449  * This routine looks up the ndlp hash
2450  * table for the given RPI. If rpi found
2451  * it return the node list pointer
2452  * else return 0.
2453  */
2454 struct lpfc_nodelist *
2455 lpfc_findnode_rpi(struct lpfc_hba * phba, uint16_t rpi)
2456 {
2457         struct lpfc_nodelist *ret;
2458
2459         ret = phba->fc_nlplookup[LPFC_RPI_HASH_FUNC(rpi)];
2460         while ((ret != 0) && (ret->nlp_rpi != rpi)) {
2461                 ret = ret->nlp_rpi_hash_next;
2462         }
2463         return ret;
2464 }
2465
2466 /*
2467  * This routine looks up the ndlp hash table for the
2468  * given RPI. If rpi found it return the node list
2469  * pointer else return 0 after deleting the entry
2470  * from hash table.
2471  */
2472 struct lpfc_nodelist *
2473 lpfc_findnode_remove_rpi(struct lpfc_hba * phba, uint16_t rpi)
2474 {
2475         struct lpfc_nodelist *ret, *temp;;
2476
2477         ret = phba->fc_nlplookup[LPFC_RPI_HASH_FUNC(rpi)];
2478         if (ret == 0)
2479                 return NULL;
2480
2481         if (ret->nlp_rpi == rpi) {
2482                 phba->fc_nlplookup[LPFC_RPI_HASH_FUNC(rpi)] =
2483                     ret->nlp_rpi_hash_next;
2484                 ret->nlp_rpi_hash_next = NULL;
2485                 return ret;
2486         }
2487
2488         while ((ret->nlp_rpi_hash_next != 0) &&
2489                (ret->nlp_rpi_hash_next->nlp_rpi != rpi)) {
2490                 ret = ret->nlp_rpi_hash_next;
2491         }
2492
2493         if (ret->nlp_rpi_hash_next != 0) {
2494                 temp = ret->nlp_rpi_hash_next;
2495                 ret->nlp_rpi_hash_next = temp->nlp_rpi_hash_next;
2496                 temp->nlp_rpi_hash_next = NULL;
2497                 return temp;
2498         } else {
2499                 return NULL;
2500         }
2501 }
2502
2503 /*
2504  * This routine adds the node list entry to the
2505  * ndlp hash table.
2506  */
2507 void
2508 lpfc_addnode_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
2509                  uint16_t rpi)
2510 {
2511
2512         uint32_t index;
2513
2514         index = LPFC_RPI_HASH_FUNC(rpi);
2515         ndlp->nlp_rpi_hash_next = phba->fc_nlplookup[index];
2516         phba->fc_nlplookup[index] = ndlp;
2517         return;
2518 }
2519
2520 void
2521 lpfc_nlp_init(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
2522                  uint32_t did)
2523 {
2524         memset(ndlp, 0, sizeof (struct lpfc_nodelist));
2525         INIT_LIST_HEAD(&ndlp->nodev_timeout_evt.evt_listp);
2526         INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
2527         init_timer(&ndlp->nlp_tmofunc);
2528         ndlp->nlp_tmofunc.function = lpfc_nodev_timeout;
2529         ndlp->nlp_tmofunc.data = (unsigned long)ndlp;
2530         init_timer(&ndlp->nlp_delayfunc);
2531         ndlp->nlp_delayfunc.function = lpfc_els_retry_delay;
2532         ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
2533         ndlp->nlp_DID = did;
2534         ndlp->nlp_phba = phba;
2535         ndlp->nlp_sid = NLP_NO_SID;
2536         return;
2537 }