1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2004-2007 Emulex.  All rights reserved.           *
5  * EMULEX and SLI are trademarks of Emulex.                        *
6  * www.emulex.com                                                  *
7  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
8  *                                                                 *
9  * This program is free software; you can redistribute it and/or   *
10  * modify it under the terms of version 2 of the GNU General       *
11  * Public License as published by the Free Software Foundation.    *
12  * This program is distributed in the hope that it will be useful. *
13  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
14  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
15  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
16  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
18  * more details, a copy of which can be found in the file COPYING  *
19  * included with this package.                                     *
20  *******************************************************************/
21 
22 #include <linux/blkdev.h>
23 #include <linux/pci.h>
24 #include <linux/interrupt.h>
25 
26 #include <scsi/scsi.h>
27 #include <scsi/scsi_device.h>
28 #include <scsi/scsi_host.h>
29 #include <scsi/scsi_transport_fc.h>
30 
31 #include "lpfc_hw.h"
32 #include "lpfc_sli.h"
33 #include "lpfc_disc.h"
34 #include "lpfc_scsi.h"
35 #include "lpfc.h"
36 #include "lpfc_logmsg.h"
37 #include "lpfc_crtn.h"
38 
39 
40 /* Called to verify a rcv'ed ADISC was intended for us. */
41 static int
42 lpfc_check_adisc(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
43 		 struct lpfc_name * nn, struct lpfc_name * pn)
44 {
45 	/* Compare the ADISC rsp WWNN / WWPN matches our internal node
46 	 * table entry for that node.
47 	 */
48 	if (memcmp(nn, &ndlp->nlp_nodename, sizeof (struct lpfc_name)) != 0)
49 		return 0;
50 
51 	if (memcmp(pn, &ndlp->nlp_portname, sizeof (struct lpfc_name)) != 0)
52 		return 0;
53 
54 	/* we match, return success */
55 	return 1;
56 }
57 
58 int
59 lpfc_check_sparm(struct lpfc_hba * phba,
60 		 struct lpfc_nodelist * ndlp, struct serv_parm * sp,
61 		 uint32_t class)
62 {
63 	volatile struct serv_parm *hsp = &phba->fc_sparam;
64 	uint16_t hsp_value, ssp_value = 0;
65 
66 	/*
67 	 * The receive data field size and buffer-to-buffer receive data field
68 	 * size entries are 16 bits but are represented as two 8-bit fields in
69 	 * the driver data structure to account for rsvd bits and other control
70 	 * bits.  Reconstruct and compare the fields as a 16-bit values before
71 	 * correcting the byte values.
72 	 */
73 	if (sp->cls1.classValid) {
74 		hsp_value = (hsp->cls1.rcvDataSizeMsb << 8) |
75 				hsp->cls1.rcvDataSizeLsb;
76 		ssp_value = (sp->cls1.rcvDataSizeMsb << 8) |
77 				sp->cls1.rcvDataSizeLsb;
78 		if (ssp_value > hsp_value) {
79 			sp->cls1.rcvDataSizeLsb = hsp->cls1.rcvDataSizeLsb;
80 			sp->cls1.rcvDataSizeMsb = hsp->cls1.rcvDataSizeMsb;
81 		}
82 	} else if (class == CLASS1) {
83 		return 0;
84 	}
85 
86 	if (sp->cls2.classValid) {
87 		hsp_value = (hsp->cls2.rcvDataSizeMsb << 8) |
88 				hsp->cls2.rcvDataSizeLsb;
89 		ssp_value = (sp->cls2.rcvDataSizeMsb << 8) |
90 				sp->cls2.rcvDataSizeLsb;
91 		if (ssp_value > hsp_value) {
92 			sp->cls2.rcvDataSizeLsb = hsp->cls2.rcvDataSizeLsb;
93 			sp->cls2.rcvDataSizeMsb = hsp->cls2.rcvDataSizeMsb;
94 		}
95 	} else if (class == CLASS2) {
96 		return 0;
97 	}
98 
99 	if (sp->cls3.classValid) {
100 		hsp_value = (hsp->cls3.rcvDataSizeMsb << 8) |
101 				hsp->cls3.rcvDataSizeLsb;
102 		ssp_value = (sp->cls3.rcvDataSizeMsb << 8) |
103 				sp->cls3.rcvDataSizeLsb;
104 		if (ssp_value > hsp_value) {
105 			sp->cls3.rcvDataSizeLsb = hsp->cls3.rcvDataSizeLsb;
106 			sp->cls3.rcvDataSizeMsb = hsp->cls3.rcvDataSizeMsb;
107 		}
108 	} else if (class == CLASS3) {
109 		return 0;
110 	}
111 
112 	/*
113 	 * Preserve the upper four bits of the MSB from the PLOGI response.
114 	 * These bits contain the Buffer-to-Buffer State Change Number
115 	 * from the target and need to be passed to the FW.
116 	 */
117 	hsp_value = (hsp->cmn.bbRcvSizeMsb << 8) | hsp->cmn.bbRcvSizeLsb;
118 	ssp_value = (sp->cmn.bbRcvSizeMsb << 8) | sp->cmn.bbRcvSizeLsb;
119 	if (ssp_value > hsp_value) {
120 		sp->cmn.bbRcvSizeLsb = hsp->cmn.bbRcvSizeLsb;
121 		sp->cmn.bbRcvSizeMsb = (sp->cmn.bbRcvSizeMsb & 0xF0) |
122 				       (hsp->cmn.bbRcvSizeMsb & 0x0F);
123 	}
124 
125 	memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof (struct lpfc_name));
126 	memcpy(&ndlp->nlp_portname, &sp->portName, sizeof (struct lpfc_name));
127 	return 1;
128 }
129 
130 static void *
131 lpfc_check_elscmpl_iocb(struct lpfc_hba * phba,
132 		      struct lpfc_iocbq *cmdiocb,
133 		      struct lpfc_iocbq *rspiocb)
134 {
135 	struct lpfc_dmabuf *pcmd, *prsp;
136 	uint32_t *lp;
137 	void     *ptr = NULL;
138 	IOCB_t   *irsp;
139 
140 	irsp = &rspiocb->iocb;
141 	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
142 
143 	/* For lpfc_els_abort, context2 could be zero'ed to delay
144 	 * freeing associated memory till after ABTS completes.
145 	 */
146 	if (pcmd) {
147 		prsp =  list_get_first(&pcmd->list, struct lpfc_dmabuf,
148 				       list);
149 		if (prsp) {
150 			lp = (uint32_t *) prsp->virt;
151 			ptr = (void *)((uint8_t *)lp + sizeof(uint32_t));
152 		}
153 	} else {
154 		/* Force ulpStatus error since we are returning NULL ptr */
155 		if (!(irsp->ulpStatus)) {
156 			irsp->ulpStatus = IOSTAT_LOCAL_REJECT;
157 			irsp->un.ulpWord[4] = IOERR_SLI_ABORTED;
158 		}
159 		ptr = NULL;
160 	}
161 	return ptr;
162 }
163 
164 
165 /*
166  * Free resources / clean up outstanding I/Os
167  * associated with a LPFC_NODELIST entry. This
168  * routine effectively results in a "software abort".
169  */
170 int
171 lpfc_els_abort(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp)
172 {
173 	LIST_HEAD(completions);
174 	struct lpfc_sli *psli;
175 	struct lpfc_sli_ring *pring;
176 	struct lpfc_iocbq *iocb, *next_iocb;
177 	IOCB_t *cmd;
178 
179 	/* Abort outstanding I/O on NPort <nlp_DID> */
180 	lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
181 			"%d:0205 Abort outstanding I/O on NPort x%x "
182 			"Data: x%x x%x x%x\n",
183 			phba->brd_no, ndlp->nlp_DID, ndlp->nlp_flag,
184 			ndlp->nlp_state, ndlp->nlp_rpi);
185 
186 	psli = &phba->sli;
187 	pring = &psli->ring[LPFC_ELS_RING];
188 
189 	/* First check the txq */
190 	spin_lock_irq(phba->host->host_lock);
191 	list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
192 		/* Check to see if iocb matches the nport we are looking
193 		   for */
194 		if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) {
195 			/* It matches, so deque and call compl with an
196 			   error */
197 			list_move_tail(&iocb->list, &completions);
198 			pring->txq_cnt--;
199 		}
200 	}
201 
202 	/* Next check the txcmplq */
203 	list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
204 		/* Check to see if iocb matches the nport we are looking
205 		   for */
206 		if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp))
207 			lpfc_sli_issue_abort_iotag(phba, pring, iocb);
208 	}
209 	spin_unlock_irq(phba->host->host_lock);
210 
211 	while (!list_empty(&completions)) {
212 		iocb = list_get_first(&completions, struct lpfc_iocbq, list);
213 		cmd = &iocb->iocb;
214 		list_del(&iocb->list);
215 
216 		if (iocb->iocb_cmpl) {
217 			cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
218 			cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
219 			(iocb->iocb_cmpl) (phba, iocb, iocb);
220 		} else
221 			lpfc_sli_release_iocbq(phba, iocb);
222 	}
223 
224 	/* If we are delaying issuing an ELS command, cancel it */
225 	if (ndlp->nlp_flag & NLP_DELAY_TMO)
226 		lpfc_cancel_retry_delay_tmo(phba, ndlp);
227 	return 0;
228 }
229 
230 static int
231 lpfc_rcv_plogi(struct lpfc_hba * phba,
232 		      struct lpfc_nodelist * ndlp,
233 		      struct lpfc_iocbq *cmdiocb)
234 {
235 	struct lpfc_dmabuf *pcmd;
236 	uint32_t *lp;
237 	IOCB_t *icmd;
238 	struct serv_parm *sp;
239 	LPFC_MBOXQ_t *mbox;
240 	struct ls_rjt stat;
241 	int rc;
242 
243 	memset(&stat, 0, sizeof (struct ls_rjt));
244 	if (phba->hba_state <= LPFC_FLOGI) {
245 		/* Before responding to PLOGI, check for pt2pt mode.
246 		 * If we are pt2pt, with an outstanding FLOGI, abort
247 		 * the FLOGI and resend it first.
248 		 */
249 		if (phba->fc_flag & FC_PT2PT) {
250 			lpfc_els_abort_flogi(phba);
251 		        if (!(phba->fc_flag & FC_PT2PT_PLOGI)) {
252 				/* If the other side is supposed to initiate
253 				 * the PLOGI anyway, just ACC it now and
254 				 * move on with discovery.
255 				 */
256 				phba->fc_edtov = FF_DEF_EDTOV;
257 				phba->fc_ratov = FF_DEF_RATOV;
258 				/* Start discovery - this should just do
259 				   CLEAR_LA */
260 				lpfc_disc_start(phba);
261 			} else {
262 				lpfc_initial_flogi(phba);
263 			}
264 		} else {
265 			stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY;
266 			stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
267 			lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb,
268 					    ndlp);
269 			return 0;
270 		}
271 	}
272 	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
273 	lp = (uint32_t *) pcmd->virt;
274 	sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
275 	if ((lpfc_check_sparm(phba, ndlp, sp, CLASS3) == 0)) {
276 		/* Reject this request because invalid parameters */
277 		stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
278 		stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
279 		lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
280 		return 0;
281 	}
282 	icmd = &cmdiocb->iocb;
283 
284 	/* PLOGI chkparm OK */
285 	lpfc_printf_log(phba,
286 			KERN_INFO,
287 			LOG_ELS,
288 			"%d:0114 PLOGI chkparm OK Data: x%x x%x x%x x%x\n",
289 			phba->brd_no,
290 			ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag,
291 			ndlp->nlp_rpi);
292 
293 	if ((phba->cfg_fcp_class == 2) &&
294 	    (sp->cls2.classValid)) {
295 		ndlp->nlp_fcp_info |= CLASS2;
296 	} else {
297 		ndlp->nlp_fcp_info |= CLASS3;
298 	}
299 	ndlp->nlp_class_sup = 0;
300 	if (sp->cls1.classValid)
301 		ndlp->nlp_class_sup |= FC_COS_CLASS1;
302 	if (sp->cls2.classValid)
303 		ndlp->nlp_class_sup |= FC_COS_CLASS2;
304 	if (sp->cls3.classValid)
305 		ndlp->nlp_class_sup |= FC_COS_CLASS3;
306 	if (sp->cls4.classValid)
307 		ndlp->nlp_class_sup |= FC_COS_CLASS4;
308 	ndlp->nlp_maxframe =
309 		((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
310 
311 	/* no need to reg_login if we are already in one of these states */
312 	switch (ndlp->nlp_state) {
313 	case  NLP_STE_NPR_NODE:
314 		if (!(ndlp->nlp_flag & NLP_NPR_ADISC))
315 			break;
316 	case  NLP_STE_REG_LOGIN_ISSUE:
317 	case  NLP_STE_PRLI_ISSUE:
318 	case  NLP_STE_UNMAPPED_NODE:
319 	case  NLP_STE_MAPPED_NODE:
320 		lpfc_els_rsp_acc(phba, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL, 0);
321 		return 1;
322 	}
323 
324 	if ((phba->fc_flag & FC_PT2PT)
325 	    && !(phba->fc_flag & FC_PT2PT_PLOGI)) {
326 		/* rcv'ed PLOGI decides what our NPortId will be */
327 		phba->fc_myDID = icmd->un.rcvels.parmRo;
328 		mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
329 		if (mbox == NULL)
330 			goto out;
331 		lpfc_config_link(phba, mbox);
332 		mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
333 		rc = lpfc_sli_issue_mbox
334 			(phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB));
335 		if (rc == MBX_NOT_FINISHED) {
336 			mempool_free( mbox, phba->mbox_mem_pool);
337 			goto out;
338 		}
339 
340 		lpfc_can_disctmo(phba);
341 	}
342 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
343 	if (mbox == NULL)
344 		goto out;
345 
346 	if (lpfc_reg_login(phba, icmd->un.rcvels.remoteID,
347 			   (uint8_t *) sp, mbox, 0)) {
348 		mempool_free( mbox, phba->mbox_mem_pool);
349 		goto out;
350 	}
351 
352 	/* ACC PLOGI rsp command needs to execute first,
353 	 * queue this mbox command to be processed later.
354 	 */
355 	mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
356 	/*
357 	 * mbox->context2 = lpfc_nlp_get(ndlp) deferred until mailbox
358 	 * command issued in lpfc_cmpl_els_acc().
359 	 */
360 	ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI);
361 
362 	/*
363 	 * If there is an outstanding PLOGI issued, abort it before
364 	 * sending ACC rsp for received PLOGI. If pending plogi
365 	 * is not canceled here, the plogi will be rejected by
366 	 * remote port and will be retried. On a configuration with
367 	 * single discovery thread, this will cause a huge delay in
368 	 * discovery. Also this will cause multiple state machines
369 	 * running in parallel for this node.
370 	 */
371 	if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) {
372 		/* software abort outstanding PLOGI */
373 		lpfc_els_abort(phba, ndlp);
374 	}
375 
376 	lpfc_els_rsp_acc(phba, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox, 0);
377 	return 1;
378 
379 out:
380 	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
381 	stat.un.b.lsRjtRsnCodeExp = LSEXP_OUT_OF_RESOURCE;
382 	lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
383 	return 0;
384 }
385 
386 static int
387 lpfc_rcv_padisc(struct lpfc_hba * phba,
388 		struct lpfc_nodelist * ndlp,
389 		struct lpfc_iocbq *cmdiocb)
390 {
391 	struct lpfc_dmabuf *pcmd;
392 	struct serv_parm *sp;
393 	struct lpfc_name *pnn, *ppn;
394 	struct ls_rjt stat;
395 	ADISC *ap;
396 	IOCB_t *icmd;
397 	uint32_t *lp;
398 	uint32_t cmd;
399 
400 	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
401 	lp = (uint32_t *) pcmd->virt;
402 
403 	cmd = *lp++;
404 	if (cmd == ELS_CMD_ADISC) {
405 		ap = (ADISC *) lp;
406 		pnn = (struct lpfc_name *) & ap->nodeName;
407 		ppn = (struct lpfc_name *) & ap->portName;
408 	} else {
409 		sp = (struct serv_parm *) lp;
410 		pnn = (struct lpfc_name *) & sp->nodeName;
411 		ppn = (struct lpfc_name *) & sp->portName;
412 	}
413 
414 	icmd = &cmdiocb->iocb;
415 	if ((icmd->ulpStatus == 0) &&
416 	    (lpfc_check_adisc(phba, ndlp, pnn, ppn))) {
417 		if (cmd == ELS_CMD_ADISC) {
418 			lpfc_els_rsp_adisc_acc(phba, cmdiocb, ndlp);
419 		} else {
420 			lpfc_els_rsp_acc(phba, ELS_CMD_PLOGI, cmdiocb, ndlp,
421 				NULL, 0);
422 		}
423 		return 1;
424 	}
425 	/* Reject this request because invalid parameters */
426 	stat.un.b.lsRjtRsvd0 = 0;
427 	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
428 	stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
429 	stat.un.b.vendorUnique = 0;
430 	lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
431 
432 	/* 1 sec timeout */
433 	mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
434 
435 	spin_lock_irq(phba->host->host_lock);
436 	ndlp->nlp_flag |= NLP_DELAY_TMO;
437 	spin_unlock_irq(phba->host->host_lock);
438 	ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
439 	ndlp->nlp_prev_state = ndlp->nlp_state;
440 	lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE);
441 	return 0;
442 }
443 
444 static int
445 lpfc_rcv_logo(struct lpfc_hba * phba,
446 		      struct lpfc_nodelist * ndlp,
447 		      struct lpfc_iocbq *cmdiocb,
448 		      uint32_t els_cmd)
449 {
450 	/* Put ndlp on NPR list with 1 sec timeout for plogi, ACC logo */
451 	/* Only call LOGO ACC for first LOGO, this avoids sending unnecessary
452 	 * PLOGIs during LOGO storms from a device.
453 	 */
454 	ndlp->nlp_flag |= NLP_LOGO_ACC;
455 	if (els_cmd == ELS_CMD_PRLO)
456 		lpfc_els_rsp_acc(phba, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0);
457 	else
458 		lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
459 
460 	if (!(ndlp->nlp_type & NLP_FABRIC) ||
461 		(ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
462 		/* Only try to re-login if this is NOT a Fabric Node */
463 		mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
464 		spin_lock_irq(phba->host->host_lock);
465 		ndlp->nlp_flag |= NLP_DELAY_TMO;
466 		spin_unlock_irq(phba->host->host_lock);
467 
468 		ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
469 		ndlp->nlp_prev_state = ndlp->nlp_state;
470 		lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE);
471 	} else {
472 		ndlp->nlp_prev_state = ndlp->nlp_state;
473 		lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNUSED_NODE);
474 	}
475 
476 	spin_lock_irq(phba->host->host_lock);
477 	ndlp->nlp_flag &= ~NLP_NPR_ADISC;
478 	spin_unlock_irq(phba->host->host_lock);
479 	/* The driver has to wait until the ACC completes before it continues
480 	 * processing the LOGO.  The action will resume in
481 	 * lpfc_cmpl_els_logo_acc routine. Since part of processing includes an
482 	 * unreg_login, the driver waits so the ACC does not get aborted.
483 	 */
484 	return 0;
485 }
486 
487 static void
488 lpfc_rcv_prli(struct lpfc_hba * phba,
489 		      struct lpfc_nodelist * ndlp,
490 		      struct lpfc_iocbq *cmdiocb)
491 {
492 	struct lpfc_dmabuf *pcmd;
493 	uint32_t *lp;
494 	PRLI *npr;
495 	struct fc_rport *rport = ndlp->rport;
496 	u32 roles;
497 
498 	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
499 	lp = (uint32_t *) pcmd->virt;
500 	npr = (PRLI *) ((uint8_t *) lp + sizeof (uint32_t));
501 
502 	ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
503 	ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
504 	if ((npr->acceptRspCode == PRLI_REQ_EXECUTED) &&
505 	    (npr->prliType == PRLI_FCP_TYPE)) {
506 		if (npr->initiatorFunc)
507 			ndlp->nlp_type |= NLP_FCP_INITIATOR;
508 		if (npr->targetFunc)
509 			ndlp->nlp_type |= NLP_FCP_TARGET;
510 		if (npr->Retry)
511 			ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
512 	}
513 	if (rport) {
514 		/* We need to update the rport role values */
515 		roles = FC_RPORT_ROLE_UNKNOWN;
516 		if (ndlp->nlp_type & NLP_FCP_INITIATOR)
517 			roles |= FC_RPORT_ROLE_FCP_INITIATOR;
518 		if (ndlp->nlp_type & NLP_FCP_TARGET)
519 			roles |= FC_RPORT_ROLE_FCP_TARGET;
520 		fc_remote_port_rolechg(rport, roles);
521 	}
522 }
523 
524 static uint32_t
525 lpfc_disc_set_adisc(struct lpfc_hba * phba,
526 		      struct lpfc_nodelist * ndlp)
527 {
528 	/* Check config parameter use-adisc or FCP-2 */
529 	if ((phba->cfg_use_adisc == 0) &&
530 		!(phba->fc_flag & FC_RSCN_MODE)) {
531 		if (!(ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE))
532 			return 0;
533 	}
534 	spin_lock_irq(phba->host->host_lock);
535 	ndlp->nlp_flag |= NLP_NPR_ADISC;
536 	spin_unlock_irq(phba->host->host_lock);
537 	return 1;
538 }
539 
540 static uint32_t
541 lpfc_disc_illegal(struct lpfc_hba * phba,
542 		   struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
543 {
544 	lpfc_printf_log(phba,
545 			KERN_ERR,
546 			LOG_DISCOVERY,
547 			"%d:0253 Illegal State Transition: node x%x event x%x, "
548 			"state x%x Data: x%x x%x\n",
549 			phba->brd_no,
550 			ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi,
551 			ndlp->nlp_flag);
552 	return ndlp->nlp_state;
553 }
554 
555 /* Start of Discovery State Machine routines */
556 
557 static uint32_t
558 lpfc_rcv_plogi_unused_node(struct lpfc_hba * phba,
559 			   struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
560 {
561 	struct lpfc_iocbq *cmdiocb;
562 
563 	cmdiocb = (struct lpfc_iocbq *) arg;
564 
565 	if (lpfc_rcv_plogi(phba, ndlp, cmdiocb)) {
566 		ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE;
567 		lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNUSED_NODE);
568 		return ndlp->nlp_state;
569 	}
570 	lpfc_drop_node(phba, ndlp);
571 	return NLP_STE_FREED_NODE;
572 }
573 
574 static uint32_t
575 lpfc_rcv_els_unused_node(struct lpfc_hba * phba,
576 			 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
577 {
578 	lpfc_issue_els_logo(phba, ndlp, 0);
579 	lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNUSED_NODE);
580 	return ndlp->nlp_state;
581 }
582 
583 static uint32_t
584 lpfc_rcv_logo_unused_node(struct lpfc_hba * phba,
585 			  struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
586 {
587 	struct lpfc_iocbq     *cmdiocb;
588 
589 	cmdiocb = (struct lpfc_iocbq *) arg;
590 
591 	spin_lock_irq(phba->host->host_lock);
592 	ndlp->nlp_flag |= NLP_LOGO_ACC;
593 	spin_unlock_irq(phba->host->host_lock);
594 	lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
595 	lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNUSED_NODE);
596 
597 	return ndlp->nlp_state;
598 }
599 
600 static uint32_t
601 lpfc_cmpl_logo_unused_node(struct lpfc_hba * phba,
602 			  struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
603 {
604 	lpfc_drop_node(phba, ndlp);
605 	return NLP_STE_FREED_NODE;
606 }
607 
608 static uint32_t
609 lpfc_device_rm_unused_node(struct lpfc_hba * phba,
610 			   struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
611 {
612 	lpfc_drop_node(phba, ndlp);
613 	return NLP_STE_FREED_NODE;
614 }
615 
616 static uint32_t
617 lpfc_rcv_plogi_plogi_issue(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp,
618 			   void *arg, uint32_t evt)
619 {
620 	struct lpfc_iocbq *cmdiocb = arg;
621 	struct lpfc_dmabuf *pcmd;
622 	struct serv_parm *sp;
623 	uint32_t *lp;
624 	struct ls_rjt stat;
625 	int port_cmp;
626 
627 	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
628 	lp = (uint32_t *) pcmd->virt;
629 	sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
630 
631 	memset(&stat, 0, sizeof (struct ls_rjt));
632 
633 	/* For a PLOGI, we only accept if our portname is less
634 	 * than the remote portname.
635 	 */
636 	phba->fc_stat.elsLogiCol++;
637 	port_cmp = memcmp(&phba->fc_portname, &sp->portName,
638 			  sizeof (struct lpfc_name));
639 
640 	if (port_cmp >= 0) {
641 		/* Reject this request because the remote node will accept
642 		   ours */
643 		stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
644 		stat.un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS;
645 		lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
646 	} else {
647 		lpfc_rcv_plogi(phba, ndlp, cmdiocb);
648 	} /* if our portname was less */
649 
650 	return ndlp->nlp_state;
651 }
652 
653 static uint32_t
654 lpfc_rcv_logo_plogi_issue(struct lpfc_hba * phba,
655 			  struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
656 {
657 	struct lpfc_iocbq     *cmdiocb;
658 
659 	cmdiocb = (struct lpfc_iocbq *) arg;
660 
661 	/* software abort outstanding PLOGI */
662 	lpfc_els_abort(phba, ndlp);
663 
664 	lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO);
665 	return ndlp->nlp_state;
666 }
667 
668 static uint32_t
669 lpfc_rcv_els_plogi_issue(struct lpfc_hba * phba,
670 			  struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
671 {
672 	struct lpfc_iocbq     *cmdiocb;
673 
674 	cmdiocb = (struct lpfc_iocbq *) arg;
675 
676 	/* software abort outstanding PLOGI */
677 	lpfc_els_abort(phba, ndlp);
678 
679 	if (evt == NLP_EVT_RCV_LOGO) {
680 		lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
681 	} else {
682 		lpfc_issue_els_logo(phba, ndlp, 0);
683 	}
684 
685 	/* Put ndlp in npr list set plogi timer for 1 sec */
686 	mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
687 	spin_lock_irq(phba->host->host_lock);
688 	ndlp->nlp_flag |= NLP_DELAY_TMO;
689 	spin_unlock_irq(phba->host->host_lock);
690 	ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
691 	ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE;
692 	lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE);
693 
694 	return ndlp->nlp_state;
695 }
696 
697 static uint32_t
698 lpfc_cmpl_plogi_plogi_issue(struct lpfc_hba * phba,
699 			    struct lpfc_nodelist * ndlp, void *arg,
700 			    uint32_t evt)
701 {
702 	struct lpfc_iocbq *cmdiocb, *rspiocb;
703 	struct lpfc_dmabuf *pcmd, *prsp, *mp;
704 	uint32_t *lp;
705 	IOCB_t *irsp;
706 	struct serv_parm *sp;
707 	LPFC_MBOXQ_t *mbox;
708 
709 	cmdiocb = (struct lpfc_iocbq *) arg;
710 	rspiocb = cmdiocb->context_un.rsp_iocb;
711 
712 	if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
713 		/* Recovery from PLOGI collision logic */
714 		return ndlp->nlp_state;
715 	}
716 
717 	irsp = &rspiocb->iocb;
718 
719 	if (irsp->ulpStatus)
720 		goto out;
721 
722 	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
723 
724 	prsp = list_get_first(&pcmd->list,
725 			      struct lpfc_dmabuf,
726 			      list);
727 	lp = (uint32_t *) prsp->virt;
728 
729 	sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
730 	if (!lpfc_check_sparm(phba, ndlp, sp, CLASS3))
731 		goto out;
732 
733 	/* PLOGI chkparm OK */
734 	lpfc_printf_log(phba,
735 			KERN_INFO,
736 			LOG_ELS,
737 			"%d:0121 PLOGI chkparm OK "
738 			"Data: x%x x%x x%x x%x\n",
739 			phba->brd_no,
740 			ndlp->nlp_DID, ndlp->nlp_state,
741 			ndlp->nlp_flag, ndlp->nlp_rpi);
742 
743 	if ((phba->cfg_fcp_class == 2) &&
744 	    (sp->cls2.classValid)) {
745 		ndlp->nlp_fcp_info |= CLASS2;
746 	} else {
747 		ndlp->nlp_fcp_info |= CLASS3;
748 	}
749 	ndlp->nlp_class_sup = 0;
750 	if (sp->cls1.classValid)
751 		ndlp->nlp_class_sup |= FC_COS_CLASS1;
752 	if (sp->cls2.classValid)
753 		ndlp->nlp_class_sup |= FC_COS_CLASS2;
754 	if (sp->cls3.classValid)
755 		ndlp->nlp_class_sup |= FC_COS_CLASS3;
756 	if (sp->cls4.classValid)
757 		ndlp->nlp_class_sup |= FC_COS_CLASS4;
758 	ndlp->nlp_maxframe =
759 		((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) |
760 		sp->cmn.bbRcvSizeLsb;
761 
762 	if (!(mbox = mempool_alloc(phba->mbox_mem_pool,
763 				   GFP_KERNEL)))
764 		goto out;
765 
766 	lpfc_unreg_rpi(phba, ndlp);
767 	if (lpfc_reg_login(phba, irsp->un.elsreq64.remoteID, (uint8_t *) sp,
768 			   mbox, 0) == 0) {
769 		switch (ndlp->nlp_DID) {
770 		case NameServer_DID:
771 			mbox->mbox_cmpl = lpfc_mbx_cmpl_ns_reg_login;
772 			break;
773 		case FDMI_DID:
774 			mbox->mbox_cmpl = lpfc_mbx_cmpl_fdmi_reg_login;
775 			break;
776 		default:
777 			mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
778 		}
779 		mbox->context2 = lpfc_nlp_get(ndlp);
780 		if (lpfc_sli_issue_mbox(phba, mbox,
781 					(MBX_NOWAIT | MBX_STOP_IOCB))
782 		    != MBX_NOT_FINISHED) {
783 			lpfc_nlp_set_state(phba, ndlp, NLP_STE_REG_LOGIN_ISSUE);
784 			return ndlp->nlp_state;
785 		}
786 		lpfc_nlp_put(ndlp);
787 		mp = (struct lpfc_dmabuf *)mbox->context1;
788 		lpfc_mbuf_free(phba, mp->virt, mp->phys);
789 		kfree(mp);
790 		mempool_free(mbox, phba->mbox_mem_pool);
791 	} else {
792 		mempool_free(mbox, phba->mbox_mem_pool);
793 	}
794 
795 
796  out:
797 	/* Free this node since the driver cannot login or has the wrong
798 	   sparm */
799 	lpfc_drop_node(phba, ndlp);
800 	return NLP_STE_FREED_NODE;
801 }
802 
803 static uint32_t
804 lpfc_device_rm_plogi_issue(struct lpfc_hba * phba,
805 			   struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
806 {
807 	if(ndlp->nlp_flag & NLP_NPR_2B_DISC) {
808 		ndlp->nlp_flag |= NLP_NODEV_REMOVE;
809 		return ndlp->nlp_state;
810 	}
811 	else {
812 		/* software abort outstanding PLOGI */
813 		lpfc_els_abort(phba, ndlp);
814 
815 		lpfc_drop_node(phba, ndlp);
816 		return NLP_STE_FREED_NODE;
817 	}
818 }
819 
820 static uint32_t
821 lpfc_device_recov_plogi_issue(struct lpfc_hba * phba,
822 			    struct lpfc_nodelist * ndlp, void *arg,
823 			    uint32_t evt)
824 {
825 	/* software abort outstanding PLOGI */
826 	lpfc_els_abort(phba, ndlp);
827 
828 	ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE;
829 	lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE);
830 	spin_lock_irq(phba->host->host_lock);
831 	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
832 	spin_unlock_irq(phba->host->host_lock);
833 
834 	return ndlp->nlp_state;
835 }
836 
837 static uint32_t
838 lpfc_rcv_plogi_adisc_issue(struct lpfc_hba * phba,
839 			    struct lpfc_nodelist * ndlp, void *arg,
840 			    uint32_t evt)
841 {
842 	struct lpfc_iocbq *cmdiocb;
843 
844 	/* software abort outstanding ADISC */
845 	lpfc_els_abort(phba, ndlp);
846 
847 	cmdiocb = (struct lpfc_iocbq *) arg;
848 
849 	if (lpfc_rcv_plogi(phba, ndlp, cmdiocb)) {
850 		return ndlp->nlp_state;
851 	}
852 	ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
853 	lpfc_nlp_set_state(phba, ndlp, NLP_STE_PLOGI_ISSUE);
854 	lpfc_issue_els_plogi(phba, ndlp->nlp_DID, 0);
855 
856 	return ndlp->nlp_state;
857 }
858 
859 static uint32_t
860 lpfc_rcv_prli_adisc_issue(struct lpfc_hba * phba,
861 			    struct lpfc_nodelist * ndlp, void *arg,
862 			    uint32_t evt)
863 {
864 	struct lpfc_iocbq *cmdiocb;
865 
866 	cmdiocb = (struct lpfc_iocbq *) arg;
867 
868 	lpfc_els_rsp_prli_acc(phba, cmdiocb, ndlp);
869 	return ndlp->nlp_state;
870 }
871 
872 static uint32_t
873 lpfc_rcv_logo_adisc_issue(struct lpfc_hba * phba,
874 			    struct lpfc_nodelist * ndlp, void *arg,
875 			    uint32_t evt)
876 {
877 	struct lpfc_iocbq *cmdiocb;
878 
879 	cmdiocb = (struct lpfc_iocbq *) arg;
880 
881 	/* software abort outstanding ADISC */
882 	lpfc_els_abort(phba, ndlp);
883 
884 	lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO);
885 	return ndlp->nlp_state;
886 }
887 
888 static uint32_t
889 lpfc_rcv_padisc_adisc_issue(struct lpfc_hba * phba,
890 			    struct lpfc_nodelist * ndlp, void *arg,
891 			    uint32_t evt)
892 {
893 	struct lpfc_iocbq *cmdiocb;
894 
895 	cmdiocb = (struct lpfc_iocbq *) arg;
896 
897 	lpfc_rcv_padisc(phba, ndlp, cmdiocb);
898 	return ndlp->nlp_state;
899 }
900 
901 static uint32_t
902 lpfc_rcv_prlo_adisc_issue(struct lpfc_hba * phba,
903 			    struct lpfc_nodelist * ndlp, void *arg,
904 			    uint32_t evt)
905 {
906 	struct lpfc_iocbq *cmdiocb;
907 
908 	cmdiocb = (struct lpfc_iocbq *) arg;
909 
910 	/* Treat like rcv logo */
911 	lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_PRLO);
912 	return ndlp->nlp_state;
913 }
914 
915 static uint32_t
916 lpfc_cmpl_adisc_adisc_issue(struct lpfc_hba * phba,
917 			    struct lpfc_nodelist * ndlp, void *arg,
918 			    uint32_t evt)
919 {
920 	struct lpfc_iocbq *cmdiocb, *rspiocb;
921 	IOCB_t *irsp;
922 	ADISC *ap;
923 
924 	cmdiocb = (struct lpfc_iocbq *) arg;
925 	rspiocb = cmdiocb->context_un.rsp_iocb;
926 
927 	ap = (ADISC *)lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb);
928 	irsp = &rspiocb->iocb;
929 
930 	if ((irsp->ulpStatus) ||
931 		(!lpfc_check_adisc(phba, ndlp, &ap->nodeName, &ap->portName))) {
932 		/* 1 sec timeout */
933 		mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
934 		spin_lock_irq(phba->host->host_lock);
935 		ndlp->nlp_flag |= NLP_DELAY_TMO;
936 		spin_unlock_irq(phba->host->host_lock);
937 		ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
938 
939 		memset(&ndlp->nlp_nodename, 0, sizeof (struct lpfc_name));
940 		memset(&ndlp->nlp_portname, 0, sizeof (struct lpfc_name));
941 
942 		ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
943 		lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE);
944 		lpfc_unreg_rpi(phba, ndlp);
945 		return ndlp->nlp_state;
946 	}
947 
948 	if (ndlp->nlp_type & NLP_FCP_TARGET) {
949 		ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
950 		lpfc_nlp_set_state(phba, ndlp, NLP_STE_MAPPED_NODE);
951 	} else {
952 		ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
953 		lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNMAPPED_NODE);
954 	}
955 	return ndlp->nlp_state;
956 }
957 
958 static uint32_t
959 lpfc_device_rm_adisc_issue(struct lpfc_hba * phba,
960 			    struct lpfc_nodelist * ndlp, void *arg,
961 			    uint32_t evt)
962 {
963 	if(ndlp->nlp_flag & NLP_NPR_2B_DISC) {
964 		ndlp->nlp_flag |= NLP_NODEV_REMOVE;
965 		return ndlp->nlp_state;
966 	}
967 	else {
968 		/* software abort outstanding ADISC */
969 		lpfc_els_abort(phba, ndlp);
970 
971 		lpfc_drop_node(phba, ndlp);
972 		return NLP_STE_FREED_NODE;
973 	}
974 }
975 
976 static uint32_t
977 lpfc_device_recov_adisc_issue(struct lpfc_hba * phba,
978 			    struct lpfc_nodelist * ndlp, void *arg,
979 			    uint32_t evt)
980 {
981 	/* software abort outstanding ADISC */
982 	lpfc_els_abort(phba, ndlp);
983 
984 	ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
985 	lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE);
986 	spin_lock_irq(phba->host->host_lock);
987 	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
988 	ndlp->nlp_flag |= NLP_NPR_ADISC;
989 	spin_unlock_irq(phba->host->host_lock);
990 
991 	return ndlp->nlp_state;
992 }
993 
994 static uint32_t
995 lpfc_rcv_plogi_reglogin_issue(struct lpfc_hba * phba,
996 			      struct lpfc_nodelist * ndlp, void *arg,
997 			      uint32_t evt)
998 {
999 	struct lpfc_iocbq *cmdiocb;
1000 
1001 	cmdiocb = (struct lpfc_iocbq *) arg;
1002 
1003 	lpfc_rcv_plogi(phba, ndlp, cmdiocb);
1004 	return ndlp->nlp_state;
1005 }
1006 
1007 static uint32_t
1008 lpfc_rcv_prli_reglogin_issue(struct lpfc_hba * phba,
1009 			     struct lpfc_nodelist * ndlp, void *arg,
1010 			     uint32_t evt)
1011 {
1012 	struct lpfc_iocbq *cmdiocb;
1013 
1014 	cmdiocb = (struct lpfc_iocbq *) arg;
1015 
1016 	lpfc_els_rsp_prli_acc(phba, cmdiocb, ndlp);
1017 	return ndlp->nlp_state;
1018 }
1019 
1020 static uint32_t
1021 lpfc_rcv_logo_reglogin_issue(struct lpfc_hba * phba,
1022 			     struct lpfc_nodelist * ndlp, void *arg,
1023 			     uint32_t evt)
1024 {
1025 	struct lpfc_iocbq *cmdiocb;
1026 	LPFC_MBOXQ_t	  *mb;
1027 	LPFC_MBOXQ_t	  *nextmb;
1028 	struct lpfc_dmabuf *mp;
1029 
1030 	cmdiocb = (struct lpfc_iocbq *) arg;
1031 
1032 	/* cleanup any ndlp on mbox q waiting for reglogin cmpl */
1033 	if ((mb = phba->sli.mbox_active)) {
1034 		if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
1035 		   (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1036 			mb->context2 = NULL;
1037 			mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1038 		}
1039 	}
1040 
1041 	spin_lock_irq(phba->host->host_lock);
1042 	list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
1043 		if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
1044 		   (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1045 			mp = (struct lpfc_dmabuf *) (mb->context1);
1046 			if (mp) {
1047 				lpfc_mbuf_free(phba, mp->virt, mp->phys);
1048 				kfree(mp);
1049 			}
1050 			list_del(&mb->list);
1051 			mempool_free(mb, phba->mbox_mem_pool);
1052 		}
1053 	}
1054 	spin_unlock_irq(phba->host->host_lock);
1055 
1056 	lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO);
1057 	return ndlp->nlp_state;
1058 }
1059 
1060 static uint32_t
1061 lpfc_rcv_padisc_reglogin_issue(struct lpfc_hba * phba,
1062 			       struct lpfc_nodelist * ndlp, void *arg,
1063 			       uint32_t evt)
1064 {
1065 	struct lpfc_iocbq *cmdiocb;
1066 
1067 	cmdiocb = (struct lpfc_iocbq *) arg;
1068 
1069 	lpfc_rcv_padisc(phba, ndlp, cmdiocb);
1070 	return ndlp->nlp_state;
1071 }
1072 
1073 static uint32_t
1074 lpfc_rcv_prlo_reglogin_issue(struct lpfc_hba * phba,
1075 			     struct lpfc_nodelist * ndlp, void *arg,
1076 			     uint32_t evt)
1077 {
1078 	struct lpfc_iocbq *cmdiocb;
1079 
1080 	cmdiocb = (struct lpfc_iocbq *) arg;
1081 	lpfc_els_rsp_acc(phba, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0);
1082 	return ndlp->nlp_state;
1083 }
1084 
1085 static uint32_t
1086 lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_hba * phba,
1087 				  struct lpfc_nodelist * ndlp,
1088 				  void *arg, uint32_t evt)
1089 {
1090 	LPFC_MBOXQ_t *pmb;
1091 	MAILBOX_t *mb;
1092 	uint32_t did;
1093 
1094 	pmb = (LPFC_MBOXQ_t *) arg;
1095 	mb = &pmb->mb;
1096 	did = mb->un.varWords[1];
1097 	if (mb->mbxStatus) {
1098 		/* RegLogin failed */
1099 		lpfc_printf_log(phba,
1100 				KERN_ERR,
1101 				LOG_DISCOVERY,
1102 				"%d:0246 RegLogin failed Data: x%x x%x x%x\n",
1103 				phba->brd_no,
1104 				did, mb->mbxStatus, phba->hba_state);
1105 
1106 		/*
1107 		 * If RegLogin failed due to lack of HBA resources do not
1108 		 * retry discovery.
1109 		 */
1110 		if (mb->mbxStatus == MBXERR_RPI_FULL) {
1111 			ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE;
1112 			lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNUSED_NODE);
1113 			return ndlp->nlp_state;
1114 		}
1115 
1116 		/* Put ndlp in npr list set plogi timer for 1 sec */
1117 		mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
1118 		spin_lock_irq(phba->host->host_lock);
1119 		ndlp->nlp_flag |= NLP_DELAY_TMO;
1120 		spin_unlock_irq(phba->host->host_lock);
1121 		ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
1122 
1123 		lpfc_issue_els_logo(phba, ndlp, 0);
1124 		ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1125 		lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE);
1126 		return ndlp->nlp_state;
1127 	}
1128 
1129 	ndlp->nlp_rpi = mb->un.varWords[0];
1130 
1131 	/* Only if we are not a fabric nport do we issue PRLI */
1132 	if (!(ndlp->nlp_type & NLP_FABRIC)) {
1133 		ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1134 		lpfc_nlp_set_state(phba, ndlp, NLP_STE_PRLI_ISSUE);
1135 		lpfc_issue_els_prli(phba, ndlp, 0);
1136 	} else {
1137 		ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1138 		lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNMAPPED_NODE);
1139 	}
1140 	return ndlp->nlp_state;
1141 }
1142 
1143 static uint32_t
1144 lpfc_device_rm_reglogin_issue(struct lpfc_hba * phba,
1145 			      struct lpfc_nodelist * ndlp, void *arg,
1146 			      uint32_t evt)
1147 {
1148 	if(ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1149 		ndlp->nlp_flag |= NLP_NODEV_REMOVE;
1150 		return ndlp->nlp_state;
1151 	}
1152 	else {
1153 		lpfc_drop_node(phba, ndlp);
1154 		return NLP_STE_FREED_NODE;
1155 	}
1156 }
1157 
1158 static uint32_t
1159 lpfc_device_recov_reglogin_issue(struct lpfc_hba * phba,
1160 			       struct lpfc_nodelist * ndlp, void *arg,
1161 			       uint32_t evt)
1162 {
1163 	ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1164 	lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE);
1165 	spin_lock_irq(phba->host->host_lock);
1166 	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1167 	spin_unlock_irq(phba->host->host_lock);
1168 	return ndlp->nlp_state;
1169 }
1170 
1171 static uint32_t
1172 lpfc_rcv_plogi_prli_issue(struct lpfc_hba * phba,
1173 			  struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1174 {
1175 	struct lpfc_iocbq *cmdiocb;
1176 
1177 	cmdiocb = (struct lpfc_iocbq *) arg;
1178 
1179 	lpfc_rcv_plogi(phba, ndlp, cmdiocb);
1180 	return ndlp->nlp_state;
1181 }
1182 
1183 static uint32_t
1184 lpfc_rcv_prli_prli_issue(struct lpfc_hba * phba,
1185 			 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1186 {
1187 	struct lpfc_iocbq *cmdiocb;
1188 
1189 	cmdiocb = (struct lpfc_iocbq *) arg;
1190 
1191 	lpfc_els_rsp_prli_acc(phba, cmdiocb, ndlp);
1192 	return ndlp->nlp_state;
1193 }
1194 
1195 static uint32_t
1196 lpfc_rcv_logo_prli_issue(struct lpfc_hba * phba,
1197 			 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1198 {
1199 	struct lpfc_iocbq *cmdiocb;
1200 
1201 	cmdiocb = (struct lpfc_iocbq *) arg;
1202 
1203 	/* Software abort outstanding PRLI before sending acc */
1204 	lpfc_els_abort(phba, ndlp);
1205 
1206 	lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO);
1207 	return ndlp->nlp_state;
1208 }
1209 
1210 static uint32_t
1211 lpfc_rcv_padisc_prli_issue(struct lpfc_hba * phba,
1212 			   struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1213 {
1214 	struct lpfc_iocbq *cmdiocb;
1215 
1216 	cmdiocb = (struct lpfc_iocbq *) arg;
1217 
1218 	lpfc_rcv_padisc(phba, ndlp, cmdiocb);
1219 	return ndlp->nlp_state;
1220 }
1221 
1222 /* This routine is envoked when we rcv a PRLO request from a nport
1223  * we are logged into.  We should send back a PRLO rsp setting the
1224  * appropriate bits.
1225  * NEXT STATE = PRLI_ISSUE
1226  */
1227 static uint32_t
1228 lpfc_rcv_prlo_prli_issue(struct lpfc_hba * phba,
1229 			 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1230 {
1231 	struct lpfc_iocbq *cmdiocb;
1232 
1233 	cmdiocb = (struct lpfc_iocbq *) arg;
1234 	lpfc_els_rsp_acc(phba, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0);
1235 	return ndlp->nlp_state;
1236 }
1237 
1238 static uint32_t
1239 lpfc_cmpl_prli_prli_issue(struct lpfc_hba * phba,
1240 			  struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1241 {
1242 	struct lpfc_iocbq *cmdiocb, *rspiocb;
1243 	IOCB_t *irsp;
1244 	PRLI *npr;
1245 
1246 	cmdiocb = (struct lpfc_iocbq *) arg;
1247 	rspiocb = cmdiocb->context_un.rsp_iocb;
1248 	npr = (PRLI *)lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb);
1249 
1250 	irsp = &rspiocb->iocb;
1251 	if (irsp->ulpStatus) {
1252 		ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
1253 		lpfc_nlp_set_state(phba, ndlp, NLP_STE_UNMAPPED_NODE);
1254 		return ndlp->nlp_state;
1255 	}
1256 
1257 	/* Check out PRLI rsp */
1258 	ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
1259 	ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
1260 	if ((npr->acceptRspCode == PRLI_REQ_EXECUTED) &&
1261 	    (npr->prliType == PRLI_FCP_TYPE)) {
1262 		if (npr->initiatorFunc)
1263 			ndlp->nlp_type |= NLP_FCP_INITIATOR;
1264 		if (npr->targetFunc)
1265 			ndlp->nlp_type |= NLP_FCP_TARGET;
1266 		if (npr->Retry)
1267 			ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
1268 	}
1269 
1270 	ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
1271 	lpfc_nlp_set_state(phba, ndlp, NLP_STE_MAPPED_NODE);
1272 	return ndlp->nlp_state;
1273 }
1274 
1275 /*! lpfc_device_rm_prli_issue
1276   *
1277   * \pre
1278   * \post
1279   * \param   phba
1280   * \param   ndlp
1281   * \param   arg
1282   * \param   evt
1283   * \return  uint32_t
1284   *
1285   * \b Description:
1286   *    This routine is envoked when we a request to remove a nport we are in the
1287   *    process of PRLIing. We should software abort outstanding prli, unreg
1288   *    login, send a logout. We will change node state to UNUSED_NODE, put it
1289   *    on plogi list so it can be freed when LOGO completes.
1290   *
1291   */
1292 static uint32_t
1293 lpfc_device_rm_prli_issue(struct lpfc_hba * phba,
1294 			  struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1295 {
1296 	if(ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1297 		ndlp->nlp_flag |= NLP_NODEV_REMOVE;
1298 		return ndlp->nlp_state;
1299 	}
1300 	else {
1301 		/* software abort outstanding PLOGI */
1302 		lpfc_els_abort(phba, ndlp);
1303 
1304 		lpfc_drop_node(phba, ndlp);
1305 		return NLP_STE_FREED_NODE;
1306 	}
1307 }
1308 
1309 
1310 /*! lpfc_device_recov_prli_issue
1311   *
1312   * \pre
1313   * \post
1314   * \param   phba
1315   * \param   ndlp
1316   * \param   arg
1317   * \param   evt
1318   * \return  uint32_t
1319   *
1320   * \b Description:
1321   *    The routine is envoked when the state of a device is unknown, like
1322   *    during a link down. We should remove the nodelist entry from the
1323   *    unmapped list, issue a UNREG_LOGIN, do a software abort of the
1324   *    outstanding PRLI command, then free the node entry.
1325   */
1326 static uint32_t
1327 lpfc_device_recov_prli_issue(struct lpfc_hba * phba,
1328 			   struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1329 {
1330 	/* software abort outstanding PRLI */
1331 	lpfc_els_abort(phba, ndlp);
1332 
1333 	ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
1334 	lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE);
1335 	spin_lock_irq(phba->host->host_lock);
1336 	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1337 	spin_unlock_irq(phba->host->host_lock);
1338 	return ndlp->nlp_state;
1339 }
1340 
1341 static uint32_t
1342 lpfc_rcv_plogi_unmap_node(struct lpfc_hba * phba,
1343 			  struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1344 {
1345 	struct lpfc_iocbq *cmdiocb;
1346 
1347 	cmdiocb = (struct lpfc_iocbq *) arg;
1348 
1349 	lpfc_rcv_plogi(phba, ndlp, cmdiocb);
1350 	return ndlp->nlp_state;
1351 }
1352 
1353 static uint32_t
1354 lpfc_rcv_prli_unmap_node(struct lpfc_hba * phba,
1355 			 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1356 {
1357 	struct lpfc_iocbq *cmdiocb;
1358 
1359 	cmdiocb = (struct lpfc_iocbq *) arg;
1360 
1361 	lpfc_rcv_prli(phba, ndlp, cmdiocb);
1362 	lpfc_els_rsp_prli_acc(phba, cmdiocb, ndlp);
1363 	return ndlp->nlp_state;
1364 }
1365 
1366 static uint32_t
1367 lpfc_rcv_logo_unmap_node(struct lpfc_hba * phba,
1368 			 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1369 {
1370 	struct lpfc_iocbq *cmdiocb;
1371 
1372 	cmdiocb = (struct lpfc_iocbq *) arg;
1373 
1374 	lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO);
1375 	return ndlp->nlp_state;
1376 }
1377 
1378 static uint32_t
1379 lpfc_rcv_padisc_unmap_node(struct lpfc_hba * phba,
1380 			   struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1381 {
1382 	struct lpfc_iocbq *cmdiocb;
1383 
1384 	cmdiocb = (struct lpfc_iocbq *) arg;
1385 
1386 	lpfc_rcv_padisc(phba, ndlp, cmdiocb);
1387 	return ndlp->nlp_state;
1388 }
1389 
1390 static uint32_t
1391 lpfc_rcv_prlo_unmap_node(struct lpfc_hba * phba,
1392 			 struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1393 {
1394 	struct lpfc_iocbq *cmdiocb;
1395 
1396 	cmdiocb = (struct lpfc_iocbq *) arg;
1397 
1398 	lpfc_els_rsp_acc(phba, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0);
1399 	return ndlp->nlp_state;
1400 }
1401 
1402 static uint32_t
1403 lpfc_device_recov_unmap_node(struct lpfc_hba * phba,
1404 			   struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1405 {
1406 	ndlp->nlp_prev_state = NLP_STE_UNMAPPED_NODE;
1407 	lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE);
1408 	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1409 	lpfc_disc_set_adisc(phba, ndlp);
1410 
1411 	return ndlp->nlp_state;
1412 }
1413 
1414 static uint32_t
1415 lpfc_rcv_plogi_mapped_node(struct lpfc_hba * phba,
1416 			   struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1417 {
1418 	struct lpfc_iocbq *cmdiocb;
1419 
1420 	cmdiocb = (struct lpfc_iocbq *) arg;
1421 
1422 	lpfc_rcv_plogi(phba, ndlp, cmdiocb);
1423 	return ndlp->nlp_state;
1424 }
1425 
1426 static uint32_t
1427 lpfc_rcv_prli_mapped_node(struct lpfc_hba * phba,
1428 			  struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1429 {
1430 	struct lpfc_iocbq *cmdiocb;
1431 
1432 	cmdiocb = (struct lpfc_iocbq *) arg;
1433 
1434 	lpfc_els_rsp_prli_acc(phba, cmdiocb, ndlp);
1435 	return ndlp->nlp_state;
1436 }
1437 
1438 static uint32_t
1439 lpfc_rcv_logo_mapped_node(struct lpfc_hba * phba,
1440 			  struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1441 {
1442 	struct lpfc_iocbq *cmdiocb;
1443 
1444 	cmdiocb = (struct lpfc_iocbq *) arg;
1445 
1446 	lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO);
1447 	return ndlp->nlp_state;
1448 }
1449 
1450 static uint32_t
1451 lpfc_rcv_padisc_mapped_node(struct lpfc_hba * phba,
1452 			    struct lpfc_nodelist * ndlp, void *arg,
1453 			    uint32_t evt)
1454 {
1455 	struct lpfc_iocbq *cmdiocb;
1456 
1457 	cmdiocb = (struct lpfc_iocbq *) arg;
1458 
1459 	lpfc_rcv_padisc(phba, ndlp, cmdiocb);
1460 	return ndlp->nlp_state;
1461 }
1462 
1463 static uint32_t
1464 lpfc_rcv_prlo_mapped_node(struct lpfc_hba * phba,
1465 			  struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1466 {
1467 	struct lpfc_iocbq *cmdiocb;
1468 
1469 	cmdiocb = (struct lpfc_iocbq *) arg;
1470 
1471 	/* flush the target */
1472 	spin_lock_irq(phba->host->host_lock);
1473 	lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
1474 			       ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT);
1475 	spin_unlock_irq(phba->host->host_lock);
1476 
1477 	/* Treat like rcv logo */
1478 	lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_PRLO);
1479 	return ndlp->nlp_state;
1480 }
1481 
1482 static uint32_t
1483 lpfc_device_recov_mapped_node(struct lpfc_hba * phba,
1484 			    struct lpfc_nodelist * ndlp, void *arg,
1485 			    uint32_t evt)
1486 {
1487 	ndlp->nlp_prev_state = NLP_STE_MAPPED_NODE;
1488 	lpfc_nlp_set_state(phba, ndlp, NLP_STE_NPR_NODE);
1489 	spin_lock_irq(phba->host->host_lock);
1490 	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1491 	spin_unlock_irq(phba->host->host_lock);
1492 	lpfc_disc_set_adisc(phba, ndlp);
1493 	return ndlp->nlp_state;
1494 }
1495 
1496 static uint32_t
1497 lpfc_rcv_plogi_npr_node(struct lpfc_hba * phba,
1498 			    struct lpfc_nodelist * ndlp, void *arg,
1499 			    uint32_t evt)
1500 {
1501 	struct lpfc_iocbq *cmdiocb;
1502 
1503 	cmdiocb = (struct lpfc_iocbq *) arg;
1504 
1505 	/* Ignore PLOGI if we have an outstanding LOGO */
1506 	if (ndlp->nlp_flag & NLP_LOGO_SND) {
1507 		return ndlp->nlp_state;
1508 	}
1509 
1510 	if (lpfc_rcv_plogi(phba, ndlp, cmdiocb)) {
1511 		spin_lock_irq(phba->host->host_lock);
1512 		ndlp->nlp_flag &= ~NLP_NPR_ADISC;
1513 		spin_unlock_irq(phba->host->host_lock);
1514 		return ndlp->nlp_state;
1515 	}
1516 
1517 	/* send PLOGI immediately, move to PLOGI issue state */
1518 	if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
1519 		ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
1520 		lpfc_nlp_set_state(phba, ndlp, NLP_STE_PLOGI_ISSUE);
1521 		lpfc_issue_els_plogi(phba, ndlp->nlp_DID, 0);
1522 	}
1523 
1524 	return ndlp->nlp_state;
1525 }
1526 
1527 static uint32_t
1528 lpfc_rcv_prli_npr_node(struct lpfc_hba * phba,
1529 			    struct lpfc_nodelist * ndlp, void *arg,
1530 			    uint32_t evt)
1531 {
1532 	struct lpfc_iocbq     *cmdiocb;
1533 	struct ls_rjt          stat;
1534 
1535 	cmdiocb = (struct lpfc_iocbq *) arg;
1536 
1537 	memset(&stat, 0, sizeof (struct ls_rjt));
1538 	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
1539 	stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
1540 	lpfc_els_rsp_reject(phba, stat.un.lsRjtError, cmdiocb, ndlp);
1541 
1542 	if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
1543 		if (ndlp->nlp_flag & NLP_NPR_ADISC) {
1544 			spin_lock_irq(phba->host->host_lock);
1545 			ndlp->nlp_flag &= ~NLP_NPR_ADISC;
1546 			spin_unlock_irq(phba->host->host_lock);
1547 			ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
1548 			lpfc_nlp_set_state(phba, ndlp, NLP_STE_ADISC_ISSUE);
1549 			lpfc_issue_els_adisc(phba, ndlp, 0);
1550 		} else {
1551 			ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
1552 			lpfc_nlp_set_state(phba, ndlp, NLP_STE_PLOGI_ISSUE);
1553 			lpfc_issue_els_plogi(phba, ndlp->nlp_DID, 0);
1554 		}
1555 	}
1556 	return ndlp->nlp_state;
1557 }
1558 
1559 static uint32_t
1560 lpfc_rcv_logo_npr_node(struct lpfc_hba * phba,
1561 			    struct lpfc_nodelist * ndlp, void *arg,
1562 			    uint32_t evt)
1563 {
1564 	struct lpfc_iocbq     *cmdiocb;
1565 
1566 	cmdiocb = (struct lpfc_iocbq *) arg;
1567 
1568 	lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO);
1569 	return ndlp->nlp_state;
1570 }
1571 
1572 static uint32_t
1573 lpfc_rcv_padisc_npr_node(struct lpfc_hba * phba,
1574 			    struct lpfc_nodelist * ndlp, void *arg,
1575 			    uint32_t evt)
1576 {
1577 	struct lpfc_iocbq     *cmdiocb;
1578 
1579 	cmdiocb = (struct lpfc_iocbq *) arg;
1580 
1581 	lpfc_rcv_padisc(phba, ndlp, cmdiocb);
1582 
1583 	/*
1584 	 * Do not start discovery if discovery is about to start
1585 	 * or discovery in progress for this node. Starting discovery
1586 	 * here will affect the counting of discovery threads.
1587 	 */
1588 	if (!(ndlp->nlp_flag & NLP_DELAY_TMO) &&
1589 		!(ndlp->nlp_flag & NLP_NPR_2B_DISC)){
1590 		if (ndlp->nlp_flag & NLP_NPR_ADISC) {
1591 			ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
1592 			lpfc_nlp_set_state(phba, ndlp, NLP_STE_ADISC_ISSUE);
1593 			lpfc_issue_els_adisc(phba, ndlp, 0);
1594 		} else {
1595 			ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
1596 			lpfc_nlp_set_state(phba, ndlp, NLP_STE_PLOGI_ISSUE);
1597 			lpfc_issue_els_plogi(phba, ndlp->nlp_DID, 0);
1598 		}
1599 	}
1600 	return ndlp->nlp_state;
1601 }
1602 
1603 static uint32_t
1604 lpfc_rcv_prlo_npr_node(struct lpfc_hba * phba,
1605 			    struct lpfc_nodelist * ndlp, void *arg,
1606 			    uint32_t evt)
1607 {
1608 	struct lpfc_iocbq     *cmdiocb;
1609 
1610 	cmdiocb = (struct lpfc_iocbq *) arg;
1611 
1612 	spin_lock_irq(phba->host->host_lock);
1613 	ndlp->nlp_flag |= NLP_LOGO_ACC;
1614 	spin_unlock_irq(phba->host->host_lock);
1615 
1616 	lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
1617 
1618 	if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
1619 		mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
1620 		spin_lock_irq(phba->host->host_lock);
1621 		ndlp->nlp_flag |= NLP_DELAY_TMO;
1622 		ndlp->nlp_flag &= ~NLP_NPR_ADISC;
1623 		spin_unlock_irq(phba->host->host_lock);
1624 		ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
1625 	} else {
1626 		spin_lock_irq(phba->host->host_lock);
1627 		ndlp->nlp_flag &= ~NLP_NPR_ADISC;
1628 		spin_unlock_irq(phba->host->host_lock);
1629 	}
1630 	return ndlp->nlp_state;
1631 }
1632 
1633 static uint32_t
1634 lpfc_cmpl_plogi_npr_node(struct lpfc_hba * phba,
1635 			  struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1636 {
1637 	struct lpfc_iocbq *cmdiocb, *rspiocb;
1638 	IOCB_t *irsp;
1639 
1640 	cmdiocb = (struct lpfc_iocbq *) arg;
1641 	rspiocb = cmdiocb->context_un.rsp_iocb;
1642 
1643 	irsp = &rspiocb->iocb;
1644 	if (irsp->ulpStatus) {
1645 		lpfc_drop_node(phba, ndlp);
1646 		return NLP_STE_FREED_NODE;
1647 	}
1648 	return ndlp->nlp_state;
1649 }
1650 
1651 static uint32_t
1652 lpfc_cmpl_prli_npr_node(struct lpfc_hba * phba,
1653 			  struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1654 {
1655 	struct lpfc_iocbq *cmdiocb, *rspiocb;
1656 	IOCB_t *irsp;
1657 
1658 	cmdiocb = (struct lpfc_iocbq *) arg;
1659 	rspiocb = cmdiocb->context_un.rsp_iocb;
1660 
1661 	irsp = &rspiocb->iocb;
1662 	if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
1663 		lpfc_drop_node(phba, ndlp);
1664 		return NLP_STE_FREED_NODE;
1665 	}
1666 	return ndlp->nlp_state;
1667 }
1668 
1669 static uint32_t
1670 lpfc_cmpl_logo_npr_node(struct lpfc_hba * phba,
1671 		struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1672 {
1673 	lpfc_unreg_rpi(phba, ndlp);
1674 	/* This routine does nothing, just return the current state */
1675 	return ndlp->nlp_state;
1676 }
1677 
1678 static uint32_t
1679 lpfc_cmpl_adisc_npr_node(struct lpfc_hba * phba,
1680 			    struct lpfc_nodelist * ndlp, void *arg,
1681 			    uint32_t evt)
1682 {
1683 	struct lpfc_iocbq *cmdiocb, *rspiocb;
1684 	IOCB_t *irsp;
1685 
1686 	cmdiocb = (struct lpfc_iocbq *) arg;
1687 	rspiocb = cmdiocb->context_un.rsp_iocb;
1688 
1689 	irsp = &rspiocb->iocb;
1690 	if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
1691 		lpfc_drop_node(phba, ndlp);
1692 		return NLP_STE_FREED_NODE;
1693 	}
1694 	return ndlp->nlp_state;
1695 }
1696 
1697 static uint32_t
1698 lpfc_cmpl_reglogin_npr_node(struct lpfc_hba * phba,
1699 			    struct lpfc_nodelist * ndlp, void *arg,
1700 			    uint32_t evt)
1701 {
1702 	LPFC_MBOXQ_t *pmb;
1703 	MAILBOX_t *mb;
1704 
1705 	pmb = (LPFC_MBOXQ_t *) arg;
1706 	mb = &pmb->mb;
1707 
1708 	if (!mb->mbxStatus)
1709 		ndlp->nlp_rpi = mb->un.varWords[0];
1710 	else {
1711 		if (ndlp->nlp_flag & NLP_NODEV_REMOVE) {
1712 			lpfc_drop_node(phba, ndlp);
1713 			return NLP_STE_FREED_NODE;
1714 		}
1715 	}
1716 	return ndlp->nlp_state;
1717 }
1718 
1719 static uint32_t
1720 lpfc_device_rm_npr_node(struct lpfc_hba * phba,
1721 			    struct lpfc_nodelist * ndlp, void *arg,
1722 			    uint32_t evt)
1723 {
1724 	if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1725 		ndlp->nlp_flag |= NLP_NODEV_REMOVE;
1726 		return ndlp->nlp_state;
1727 	}
1728 	lpfc_drop_node(phba, ndlp);
1729 	return NLP_STE_FREED_NODE;
1730 }
1731 
1732 static uint32_t
1733 lpfc_device_recov_npr_node(struct lpfc_hba * phba,
1734 			    struct lpfc_nodelist * ndlp, void *arg,
1735 			    uint32_t evt)
1736 {
1737 	spin_lock_irq(phba->host->host_lock);
1738 	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1739 	spin_unlock_irq(phba->host->host_lock);
1740 	if (ndlp->nlp_flag & NLP_DELAY_TMO) {
1741 		lpfc_cancel_retry_delay_tmo(phba, ndlp);
1742 	}
1743 	return ndlp->nlp_state;
1744 }
1745 
1746 
1747 /* This next section defines the NPort Discovery State Machine */
1748 
1749 /* There are 4 different double linked lists nodelist entries can reside on.
1750  * The plogi list and adisc list are used when Link Up discovery or RSCN
1751  * processing is needed. Each list holds the nodes that we will send PLOGI
1752  * or ADISC on. These lists will keep track of what nodes will be effected
1753  * by an RSCN, or a Link Up (Typically, all nodes are effected on Link Up).
1754  * The unmapped_list will contain all nodes that we have successfully logged
1755  * into at the Fibre Channel level. The mapped_list will contain all nodes
1756  * that are mapped FCP targets.
1757  */
1758 /*
1759  * The bind list is a list of undiscovered (potentially non-existent) nodes
1760  * that we have saved binding information on. This information is used when
1761  * nodes transition from the unmapped to the mapped list.
1762  */
1763 /* For UNUSED_NODE state, the node has just been allocated .
1764  * For PLOGI_ISSUE and REG_LOGIN_ISSUE, the node is on
1765  * the PLOGI list. For REG_LOGIN_COMPL, the node is taken off the PLOGI list
1766  * and put on the unmapped list. For ADISC processing, the node is taken off
1767  * the ADISC list and placed on either the mapped or unmapped list (depending
1768  * on its previous state). Once on the unmapped list, a PRLI is issued and the
1769  * state changed to PRLI_ISSUE. When the PRLI completion occurs, the state is
1770  * changed to UNMAPPED_NODE. If the completion indicates a mapped
1771  * node, the node is taken off the unmapped list. The binding list is checked
1772  * for a valid binding, or a binding is automatically assigned. If binding
1773  * assignment is unsuccessful, the node is left on the unmapped list. If
1774  * binding assignment is successful, the associated binding list entry (if
1775  * any) is removed, and the node is placed on the mapped list.
1776  */
1777 /*
1778  * For a Link Down, all nodes on the ADISC, PLOGI, unmapped or mapped
1779  * lists will receive a DEVICE_RECOVERY event. If the linkdown or devloss timers
1780  * expire, all effected nodes will receive a DEVICE_RM event.
1781  */
1782 /*
1783  * For a Link Up or RSCN, all nodes will move from the mapped / unmapped lists
1784  * to either the ADISC or PLOGI list.  After a Nameserver query or ALPA loopmap
1785  * check, additional nodes may be added or removed (via DEVICE_RM) to / from
1786  * the PLOGI or ADISC lists. Once the PLOGI and ADISC lists are populated,
1787  * we will first process the ADISC list.  32 entries are processed initially and
1788  * ADISC is initited for each one.  Completions / Events for each node are
1789  * funnelled thru the state machine.  As each node finishes ADISC processing, it
1790  * starts ADISC for any nodes waiting for ADISC processing. If no nodes are
1791  * waiting, and the ADISC list count is identically 0, then we are done. For
1792  * Link Up discovery, since all nodes on the PLOGI list are UNREG_LOGIN'ed, we
1793  * can issue a CLEAR_LA and reenable Link Events. Next we will process the PLOGI
1794  * list.  32 entries are processed initially and PLOGI is initited for each one.
1795  * Completions / Events for each node are funnelled thru the state machine.  As
1796  * each node finishes PLOGI processing, it starts PLOGI for any nodes waiting
1797  * for PLOGI processing. If no nodes are waiting, and the PLOGI list count is
1798  * indentically 0, then we are done. We have now completed discovery / RSCN
1799  * handling. Upon completion, ALL nodes should be on either the mapped or
1800  * unmapped lists.
1801  */
1802 
1803 static uint32_t (*lpfc_disc_action[NLP_STE_MAX_STATE * NLP_EVT_MAX_EVENT])
1804      (struct lpfc_hba *, struct lpfc_nodelist *, void *, uint32_t) = {
1805 	/* Action routine                  Event       Current State  */
1806 	lpfc_rcv_plogi_unused_node,	/* RCV_PLOGI   UNUSED_NODE    */
1807 	lpfc_rcv_els_unused_node,	/* RCV_PRLI        */
1808 	lpfc_rcv_logo_unused_node,	/* RCV_LOGO        */
1809 	lpfc_rcv_els_unused_node,	/* RCV_ADISC       */
1810 	lpfc_rcv_els_unused_node,	/* RCV_PDISC       */
1811 	lpfc_rcv_els_unused_node,	/* RCV_PRLO        */
1812 	lpfc_disc_illegal,		/* CMPL_PLOGI      */
1813 	lpfc_disc_illegal,		/* CMPL_PRLI       */
1814 	lpfc_cmpl_logo_unused_node,	/* CMPL_LOGO       */
1815 	lpfc_disc_illegal,		/* CMPL_ADISC      */
1816 	lpfc_disc_illegal,		/* CMPL_REG_LOGIN  */
1817 	lpfc_device_rm_unused_node,	/* DEVICE_RM       */
1818 	lpfc_disc_illegal,		/* DEVICE_RECOVERY */
1819 
1820 	lpfc_rcv_plogi_plogi_issue,	/* RCV_PLOGI   PLOGI_ISSUE    */
1821 	lpfc_rcv_els_plogi_issue,	/* RCV_PRLI        */
1822 	lpfc_rcv_logo_plogi_issue,	/* RCV_LOGO        */
1823 	lpfc_rcv_els_plogi_issue,	/* RCV_ADISC       */
1824 	lpfc_rcv_els_plogi_issue,	/* RCV_PDISC       */
1825 	lpfc_rcv_els_plogi_issue,	/* RCV_PRLO        */
1826 	lpfc_cmpl_plogi_plogi_issue,	/* CMPL_PLOGI      */
1827 	lpfc_disc_illegal,		/* CMPL_PRLI       */
1828 	lpfc_disc_illegal,		/* CMPL_LOGO       */
1829 	lpfc_disc_illegal,		/* CMPL_ADISC      */
1830 	lpfc_disc_illegal,		/* CMPL_REG_LOGIN  */
1831 	lpfc_device_rm_plogi_issue,	/* DEVICE_RM       */
1832 	lpfc_device_recov_plogi_issue,	/* DEVICE_RECOVERY */
1833 
1834 	lpfc_rcv_plogi_adisc_issue,	/* RCV_PLOGI   ADISC_ISSUE    */
1835 	lpfc_rcv_prli_adisc_issue,	/* RCV_PRLI        */
1836 	lpfc_rcv_logo_adisc_issue,	/* RCV_LOGO        */
1837 	lpfc_rcv_padisc_adisc_issue,	/* RCV_ADISC       */
1838 	lpfc_rcv_padisc_adisc_issue,	/* RCV_PDISC       */
1839 	lpfc_rcv_prlo_adisc_issue,	/* RCV_PRLO        */
1840 	lpfc_disc_illegal,		/* CMPL_PLOGI      */
1841 	lpfc_disc_illegal,		/* CMPL_PRLI       */
1842 	lpfc_disc_illegal,		/* CMPL_LOGO       */
1843 	lpfc_cmpl_adisc_adisc_issue,	/* CMPL_ADISC      */
1844 	lpfc_disc_illegal,		/* CMPL_REG_LOGIN  */
1845 	lpfc_device_rm_adisc_issue,	/* DEVICE_RM       */
1846 	lpfc_device_recov_adisc_issue,	/* DEVICE_RECOVERY */
1847 
1848 	lpfc_rcv_plogi_reglogin_issue,	/* RCV_PLOGI  REG_LOGIN_ISSUE */
1849 	lpfc_rcv_prli_reglogin_issue,	/* RCV_PLOGI       */
1850 	lpfc_rcv_logo_reglogin_issue,	/* RCV_LOGO        */
1851 	lpfc_rcv_padisc_reglogin_issue,	/* RCV_ADISC       */
1852 	lpfc_rcv_padisc_reglogin_issue,	/* RCV_PDISC       */
1853 	lpfc_rcv_prlo_reglogin_issue,	/* RCV_PRLO        */
1854 	lpfc_disc_illegal,		/* CMPL_PLOGI      */
1855 	lpfc_disc_illegal,		/* CMPL_PRLI       */
1856 	lpfc_disc_illegal,		/* CMPL_LOGO       */
1857 	lpfc_disc_illegal,		/* CMPL_ADISC      */
1858 	lpfc_cmpl_reglogin_reglogin_issue,/* CMPL_REG_LOGIN  */
1859 	lpfc_device_rm_reglogin_issue,	/* DEVICE_RM       */
1860 	lpfc_device_recov_reglogin_issue,/* DEVICE_RECOVERY */
1861 
1862 	lpfc_rcv_plogi_prli_issue,	/* RCV_PLOGI   PRLI_ISSUE     */
1863 	lpfc_rcv_prli_prli_issue,	/* RCV_PRLI        */
1864 	lpfc_rcv_logo_prli_issue,	/* RCV_LOGO        */
1865 	lpfc_rcv_padisc_prli_issue,	/* RCV_ADISC       */
1866 	lpfc_rcv_padisc_prli_issue,	/* RCV_PDISC       */
1867 	lpfc_rcv_prlo_prli_issue,	/* RCV_PRLO        */
1868 	lpfc_disc_illegal,		/* CMPL_PLOGI      */
1869 	lpfc_cmpl_prli_prli_issue,	/* CMPL_PRLI       */
1870 	lpfc_disc_illegal,		/* CMPL_LOGO       */
1871 	lpfc_disc_illegal,		/* CMPL_ADISC      */
1872 	lpfc_disc_illegal,		/* CMPL_REG_LOGIN  */
1873 	lpfc_device_rm_prli_issue,	/* DEVICE_RM       */
1874 	lpfc_device_recov_prli_issue,	/* DEVICE_RECOVERY */
1875 
1876 	lpfc_rcv_plogi_unmap_node,	/* RCV_PLOGI   UNMAPPED_NODE  */
1877 	lpfc_rcv_prli_unmap_node,	/* RCV_PRLI        */
1878 	lpfc_rcv_logo_unmap_node,	/* RCV_LOGO        */
1879 	lpfc_rcv_padisc_unmap_node,	/* RCV_ADISC       */
1880 	lpfc_rcv_padisc_unmap_node,	/* RCV_PDISC       */
1881 	lpfc_rcv_prlo_unmap_node,	/* RCV_PRLO        */
1882 	lpfc_disc_illegal,		/* CMPL_PLOGI      */
1883 	lpfc_disc_illegal,		/* CMPL_PRLI       */
1884 	lpfc_disc_illegal,		/* CMPL_LOGO       */
1885 	lpfc_disc_illegal,		/* CMPL_ADISC      */
1886 	lpfc_disc_illegal,		/* CMPL_REG_LOGIN  */
1887 	lpfc_disc_illegal,		/* DEVICE_RM       */
1888 	lpfc_device_recov_unmap_node,	/* DEVICE_RECOVERY */
1889 
1890 	lpfc_rcv_plogi_mapped_node,	/* RCV_PLOGI   MAPPED_NODE    */
1891 	lpfc_rcv_prli_mapped_node,	/* RCV_PRLI        */
1892 	lpfc_rcv_logo_mapped_node,	/* RCV_LOGO        */
1893 	lpfc_rcv_padisc_mapped_node,	/* RCV_ADISC       */
1894 	lpfc_rcv_padisc_mapped_node,	/* RCV_PDISC       */
1895 	lpfc_rcv_prlo_mapped_node,	/* RCV_PRLO        */
1896 	lpfc_disc_illegal,		/* CMPL_PLOGI      */
1897 	lpfc_disc_illegal,		/* CMPL_PRLI       */
1898 	lpfc_disc_illegal,		/* CMPL_LOGO       */
1899 	lpfc_disc_illegal,		/* CMPL_ADISC      */
1900 	lpfc_disc_illegal,		/* CMPL_REG_LOGIN  */
1901 	lpfc_disc_illegal,		/* DEVICE_RM       */
1902 	lpfc_device_recov_mapped_node,	/* DEVICE_RECOVERY */
1903 
1904 	lpfc_rcv_plogi_npr_node,        /* RCV_PLOGI   NPR_NODE    */
1905 	lpfc_rcv_prli_npr_node,         /* RCV_PRLI        */
1906 	lpfc_rcv_logo_npr_node,         /* RCV_LOGO        */
1907 	lpfc_rcv_padisc_npr_node,       /* RCV_ADISC       */
1908 	lpfc_rcv_padisc_npr_node,       /* RCV_PDISC       */
1909 	lpfc_rcv_prlo_npr_node,         /* RCV_PRLO        */
1910 	lpfc_cmpl_plogi_npr_node,	/* CMPL_PLOGI      */
1911 	lpfc_cmpl_prli_npr_node,	/* CMPL_PRLI       */
1912 	lpfc_cmpl_logo_npr_node,        /* CMPL_LOGO       */
1913 	lpfc_cmpl_adisc_npr_node,       /* CMPL_ADISC      */
1914 	lpfc_cmpl_reglogin_npr_node,    /* CMPL_REG_LOGIN  */
1915 	lpfc_device_rm_npr_node,        /* DEVICE_RM       */
1916 	lpfc_device_recov_npr_node,     /* DEVICE_RECOVERY */
1917 };
1918 
1919 int
1920 lpfc_disc_state_machine(struct lpfc_hba * phba,
1921 			struct lpfc_nodelist * ndlp, void *arg, uint32_t evt)
1922 {
1923 	uint32_t cur_state, rc;
1924 	uint32_t(*func) (struct lpfc_hba *, struct lpfc_nodelist *, void *,
1925 			 uint32_t);
1926 
1927 	lpfc_nlp_get(ndlp);
1928 	cur_state = ndlp->nlp_state;
1929 
1930 	/* DSM in event <evt> on NPort <nlp_DID> in state <cur_state> */
1931 	lpfc_printf_log(phba,
1932 			KERN_INFO,
1933 			LOG_DISCOVERY,
1934 			"%d:0211 DSM in event x%x on NPort x%x in state %d "
1935 			"Data: x%x\n",
1936 			phba->brd_no,
1937 			evt, ndlp->nlp_DID, cur_state, ndlp->nlp_flag);
1938 
1939 	func = lpfc_disc_action[(cur_state * NLP_EVT_MAX_EVENT) + evt];
1940 	rc = (func) (phba, ndlp, arg, evt);
1941 
1942 	/* DSM out state <rc> on NPort <nlp_DID> */
1943 	lpfc_printf_log(phba,
1944 		       KERN_INFO,
1945 		       LOG_DISCOVERY,
1946 		       "%d:0212 DSM out state %d on NPort x%x Data: x%x\n",
1947 		       phba->brd_no,
1948 		       rc, ndlp->nlp_DID, ndlp->nlp_flag);
1949 
1950 	lpfc_nlp_put(ndlp);
1951 
1952 	return rc;
1953 }
1954