1  /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2004-2013 Emulex.  All rights reserved.           *
5  * EMULEX and SLI are trademarks of Emulex.                        *
6  * www.emulex.com                                                  *
7  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
8  *                                                                 *
9  * This program is free software; you can redistribute it and/or   *
10  * modify it under the terms of version 2 of the GNU General       *
11  * Public License as published by the Free Software Foundation.    *
12  * This program is distributed in the hope that it will be useful. *
13  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
14  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
15  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
16  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
18  * more details, a copy of which can be found in the file COPYING  *
19  * included with this package.                                     *
20  *******************************************************************/
21 
22 #include <linux/blkdev.h>
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/interrupt.h>
26 
27 #include <scsi/scsi.h>
28 #include <scsi/scsi_device.h>
29 #include <scsi/scsi_host.h>
30 #include <scsi/scsi_transport_fc.h>
31 
32 #include "lpfc_hw4.h"
33 #include "lpfc_hw.h"
34 #include "lpfc_sli.h"
35 #include "lpfc_sli4.h"
36 #include "lpfc_nl.h"
37 #include "lpfc_disc.h"
38 #include "lpfc_scsi.h"
39 #include "lpfc.h"
40 #include "lpfc_logmsg.h"
41 #include "lpfc_crtn.h"
42 #include "lpfc_vport.h"
43 #include "lpfc_debugfs.h"
44 
45 
46 /* Called to verify a rcv'ed ADISC was intended for us. */
47 static int
48 lpfc_check_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
49 		 struct lpfc_name *nn, struct lpfc_name *pn)
50 {
51 	/* First, we MUST have a RPI registered */
52 	if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED))
53 		return 0;
54 
55 	/* Compare the ADISC rsp WWNN / WWPN matches our internal node
56 	 * table entry for that node.
57 	 */
58 	if (memcmp(nn, &ndlp->nlp_nodename, sizeof (struct lpfc_name)))
59 		return 0;
60 
61 	if (memcmp(pn, &ndlp->nlp_portname, sizeof (struct lpfc_name)))
62 		return 0;
63 
64 	/* we match, return success */
65 	return 1;
66 }
67 
68 int
69 lpfc_check_sparm(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
70 		 struct serv_parm *sp, uint32_t class, int flogi)
71 {
72 	volatile struct serv_parm *hsp = &vport->fc_sparam;
73 	uint16_t hsp_value, ssp_value = 0;
74 
75 	/*
76 	 * The receive data field size and buffer-to-buffer receive data field
77 	 * size entries are 16 bits but are represented as two 8-bit fields in
78 	 * the driver data structure to account for rsvd bits and other control
79 	 * bits.  Reconstruct and compare the fields as a 16-bit values before
80 	 * correcting the byte values.
81 	 */
82 	if (sp->cls1.classValid) {
83 		if (!flogi) {
84 			hsp_value = ((hsp->cls1.rcvDataSizeMsb << 8) |
85 				     hsp->cls1.rcvDataSizeLsb);
86 			ssp_value = ((sp->cls1.rcvDataSizeMsb << 8) |
87 				     sp->cls1.rcvDataSizeLsb);
88 			if (!ssp_value)
89 				goto bad_service_param;
90 			if (ssp_value > hsp_value) {
91 				sp->cls1.rcvDataSizeLsb =
92 					hsp->cls1.rcvDataSizeLsb;
93 				sp->cls1.rcvDataSizeMsb =
94 					hsp->cls1.rcvDataSizeMsb;
95 			}
96 		}
97 	} else if (class == CLASS1)
98 		goto bad_service_param;
99 	if (sp->cls2.classValid) {
100 		if (!flogi) {
101 			hsp_value = ((hsp->cls2.rcvDataSizeMsb << 8) |
102 				     hsp->cls2.rcvDataSizeLsb);
103 			ssp_value = ((sp->cls2.rcvDataSizeMsb << 8) |
104 				     sp->cls2.rcvDataSizeLsb);
105 			if (!ssp_value)
106 				goto bad_service_param;
107 			if (ssp_value > hsp_value) {
108 				sp->cls2.rcvDataSizeLsb =
109 					hsp->cls2.rcvDataSizeLsb;
110 				sp->cls2.rcvDataSizeMsb =
111 					hsp->cls2.rcvDataSizeMsb;
112 			}
113 		}
114 	} else if (class == CLASS2)
115 		goto bad_service_param;
116 	if (sp->cls3.classValid) {
117 		if (!flogi) {
118 			hsp_value = ((hsp->cls3.rcvDataSizeMsb << 8) |
119 				     hsp->cls3.rcvDataSizeLsb);
120 			ssp_value = ((sp->cls3.rcvDataSizeMsb << 8) |
121 				     sp->cls3.rcvDataSizeLsb);
122 			if (!ssp_value)
123 				goto bad_service_param;
124 			if (ssp_value > hsp_value) {
125 				sp->cls3.rcvDataSizeLsb =
126 					hsp->cls3.rcvDataSizeLsb;
127 				sp->cls3.rcvDataSizeMsb =
128 					hsp->cls3.rcvDataSizeMsb;
129 			}
130 		}
131 	} else if (class == CLASS3)
132 		goto bad_service_param;
133 
134 	/*
135 	 * Preserve the upper four bits of the MSB from the PLOGI response.
136 	 * These bits contain the Buffer-to-Buffer State Change Number
137 	 * from the target and need to be passed to the FW.
138 	 */
139 	hsp_value = (hsp->cmn.bbRcvSizeMsb << 8) | hsp->cmn.bbRcvSizeLsb;
140 	ssp_value = (sp->cmn.bbRcvSizeMsb << 8) | sp->cmn.bbRcvSizeLsb;
141 	if (ssp_value > hsp_value) {
142 		sp->cmn.bbRcvSizeLsb = hsp->cmn.bbRcvSizeLsb;
143 		sp->cmn.bbRcvSizeMsb = (sp->cmn.bbRcvSizeMsb & 0xF0) |
144 				       (hsp->cmn.bbRcvSizeMsb & 0x0F);
145 	}
146 
147 	memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof (struct lpfc_name));
148 	memcpy(&ndlp->nlp_portname, &sp->portName, sizeof (struct lpfc_name));
149 	return 1;
150 bad_service_param:
151 	lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
152 			 "0207 Device %x "
153 			 "(%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x) sent "
154 			 "invalid service parameters.  Ignoring device.\n",
155 			 ndlp->nlp_DID,
156 			 sp->nodeName.u.wwn[0], sp->nodeName.u.wwn[1],
157 			 sp->nodeName.u.wwn[2], sp->nodeName.u.wwn[3],
158 			 sp->nodeName.u.wwn[4], sp->nodeName.u.wwn[5],
159 			 sp->nodeName.u.wwn[6], sp->nodeName.u.wwn[7]);
160 	return 0;
161 }
162 
163 static void *
164 lpfc_check_elscmpl_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
165 			struct lpfc_iocbq *rspiocb)
166 {
167 	struct lpfc_dmabuf *pcmd, *prsp;
168 	uint32_t *lp;
169 	void     *ptr = NULL;
170 	IOCB_t   *irsp;
171 
172 	irsp = &rspiocb->iocb;
173 	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
174 
175 	/* For lpfc_els_abort, context2 could be zero'ed to delay
176 	 * freeing associated memory till after ABTS completes.
177 	 */
178 	if (pcmd) {
179 		prsp =  list_get_first(&pcmd->list, struct lpfc_dmabuf,
180 				       list);
181 		if (prsp) {
182 			lp = (uint32_t *) prsp->virt;
183 			ptr = (void *)((uint8_t *)lp + sizeof(uint32_t));
184 		}
185 	} else {
186 		/* Force ulpStatus error since we are returning NULL ptr */
187 		if (!(irsp->ulpStatus)) {
188 			irsp->ulpStatus = IOSTAT_LOCAL_REJECT;
189 			irsp->un.ulpWord[4] = IOERR_SLI_ABORTED;
190 		}
191 		ptr = NULL;
192 	}
193 	return ptr;
194 }
195 
196 
197 
198 /*
199  * Free resources / clean up outstanding I/Os
200  * associated with a LPFC_NODELIST entry. This
201  * routine effectively results in a "software abort".
202  */
203 int
204 lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
205 {
206 	LIST_HEAD(completions);
207 	LIST_HEAD(txcmplq_completions);
208 	LIST_HEAD(abort_list);
209 	struct lpfc_sli  *psli = &phba->sli;
210 	struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
211 	struct lpfc_iocbq *iocb, *next_iocb;
212 
213 	/* Abort outstanding I/O on NPort <nlp_DID> */
214 	lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_DISCOVERY,
215 			 "2819 Abort outstanding I/O on NPort x%x "
216 			 "Data: x%x x%x x%x\n",
217 			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
218 			 ndlp->nlp_rpi);
219 
220 	lpfc_fabric_abort_nport(ndlp);
221 
222 	/* First check the txq */
223 	spin_lock_irq(&phba->hbalock);
224 	list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
225 		/* Check to see if iocb matches the nport we are looking for */
226 		if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) {
227 			/* It matches, so deque and call compl with anp error */
228 			list_move_tail(&iocb->list, &completions);
229 		}
230 	}
231 
232 	/* Next check the txcmplq */
233 	list_splice_init(&pring->txcmplq, &txcmplq_completions);
234 	spin_unlock_irq(&phba->hbalock);
235 
236 	list_for_each_entry_safe(iocb, next_iocb, &txcmplq_completions, list) {
237 		/* Check to see if iocb matches the nport we are looking for */
238 		if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp))
239 			list_add_tail(&iocb->dlist, &abort_list);
240 	}
241 	spin_lock_irq(&phba->hbalock);
242 	list_splice(&txcmplq_completions, &pring->txcmplq);
243 	spin_unlock_irq(&phba->hbalock);
244 
245 	list_for_each_entry_safe(iocb, next_iocb, &abort_list, dlist) {
246 			spin_lock_irq(&phba->hbalock);
247 			list_del_init(&iocb->dlist);
248 			lpfc_sli_issue_abort_iotag(phba, pring, iocb);
249 			spin_unlock_irq(&phba->hbalock);
250 	}
251 
252 	/* Cancel all the IOCBs from the completions list */
253 	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
254 			      IOERR_SLI_ABORTED);
255 
256 	lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
257 	return 0;
258 }
259 
260 static int
261 lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
262 	       struct lpfc_iocbq *cmdiocb)
263 {
264 	struct Scsi_Host   *shost = lpfc_shost_from_vport(vport);
265 	struct lpfc_hba    *phba = vport->phba;
266 	struct lpfc_dmabuf *pcmd;
267 	uint32_t *lp;
268 	IOCB_t *icmd;
269 	struct serv_parm *sp;
270 	LPFC_MBOXQ_t *mbox;
271 	struct ls_rjt stat;
272 	int rc;
273 
274 	memset(&stat, 0, sizeof (struct ls_rjt));
275 	if (vport->port_state <= LPFC_FDISC) {
276 		/* Before responding to PLOGI, check for pt2pt mode.
277 		 * If we are pt2pt, with an outstanding FLOGI, abort
278 		 * the FLOGI and resend it first.
279 		 */
280 		if (vport->fc_flag & FC_PT2PT) {
281 			 lpfc_els_abort_flogi(phba);
282 		        if (!(vport->fc_flag & FC_PT2PT_PLOGI)) {
283 				/* If the other side is supposed to initiate
284 				 * the PLOGI anyway, just ACC it now and
285 				 * move on with discovery.
286 				 */
287 				phba->fc_edtov = FF_DEF_EDTOV;
288 				phba->fc_ratov = FF_DEF_RATOV;
289 				/* Start discovery - this should just do
290 				   CLEAR_LA */
291 				lpfc_disc_start(vport);
292 			} else
293 				lpfc_initial_flogi(vport);
294 		} else {
295 			stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY;
296 			stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
297 			lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
298 					    ndlp, NULL);
299 			return 0;
300 		}
301 	}
302 	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
303 	lp = (uint32_t *) pcmd->virt;
304 	sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
305 	if (wwn_to_u64(sp->portName.u.wwn) == 0) {
306 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
307 				 "0140 PLOGI Reject: invalid nname\n");
308 		stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
309 		stat.un.b.lsRjtRsnCodeExp = LSEXP_INVALID_PNAME;
310 		lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
311 			NULL);
312 		return 0;
313 	}
314 	if (wwn_to_u64(sp->nodeName.u.wwn) == 0) {
315 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
316 				 "0141 PLOGI Reject: invalid pname\n");
317 		stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
318 		stat.un.b.lsRjtRsnCodeExp = LSEXP_INVALID_NNAME;
319 		lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
320 			NULL);
321 		return 0;
322 	}
323 	if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3, 0) == 0)) {
324 		/* Reject this request because invalid parameters */
325 		stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
326 		stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
327 		lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
328 			NULL);
329 		return 0;
330 	}
331 	icmd = &cmdiocb->iocb;
332 
333 	/* PLOGI chkparm OK */
334 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
335 			 "0114 PLOGI chkparm OK Data: x%x x%x x%x "
336 			 "x%x x%x x%x\n",
337 			 ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag,
338 			 ndlp->nlp_rpi, vport->port_state,
339 			 vport->fc_flag);
340 
341 	if (vport->cfg_fcp_class == 2 && sp->cls2.classValid)
342 		ndlp->nlp_fcp_info |= CLASS2;
343 	else
344 		ndlp->nlp_fcp_info |= CLASS3;
345 
346 	ndlp->nlp_class_sup = 0;
347 	if (sp->cls1.classValid)
348 		ndlp->nlp_class_sup |= FC_COS_CLASS1;
349 	if (sp->cls2.classValid)
350 		ndlp->nlp_class_sup |= FC_COS_CLASS2;
351 	if (sp->cls3.classValid)
352 		ndlp->nlp_class_sup |= FC_COS_CLASS3;
353 	if (sp->cls4.classValid)
354 		ndlp->nlp_class_sup |= FC_COS_CLASS4;
355 	ndlp->nlp_maxframe =
356 		((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
357 
358 	/* no need to reg_login if we are already in one of these states */
359 	switch (ndlp->nlp_state) {
360 	case  NLP_STE_NPR_NODE:
361 		if (!(ndlp->nlp_flag & NLP_NPR_ADISC))
362 			break;
363 	case  NLP_STE_REG_LOGIN_ISSUE:
364 	case  NLP_STE_PRLI_ISSUE:
365 	case  NLP_STE_UNMAPPED_NODE:
366 	case  NLP_STE_MAPPED_NODE:
367 		lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL);
368 		return 1;
369 	}
370 
371 	/* Check for Nport to NPort pt2pt protocol */
372 	if ((vport->fc_flag & FC_PT2PT) &&
373 	    !(vport->fc_flag & FC_PT2PT_PLOGI)) {
374 
375 		/* rcv'ed PLOGI decides what our NPortId will be */
376 		vport->fc_myDID = icmd->un.rcvels.parmRo;
377 		mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
378 		if (mbox == NULL)
379 			goto out;
380 		lpfc_config_link(phba, mbox);
381 		mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
382 		mbox->vport = vport;
383 		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
384 		if (rc == MBX_NOT_FINISHED) {
385 			mempool_free(mbox, phba->mbox_mem_pool);
386 			goto out;
387 		}
388 		/*
389 		 * For SLI4, the VFI/VPI are registered AFTER the
390 		 * Nport with the higher WWPN sends us a PLOGI with
391 		 * our assigned NPortId.
392 		 */
393 		if (phba->sli_rev == LPFC_SLI_REV4)
394 			lpfc_issue_reg_vfi(vport);
395 
396 		lpfc_can_disctmo(vport);
397 	}
398 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
399 	if (!mbox)
400 		goto out;
401 
402 	/* Registering an existing RPI behaves differently for SLI3 vs SLI4 */
403 	if (phba->sli_rev == LPFC_SLI_REV4)
404 		lpfc_unreg_rpi(vport, ndlp);
405 
406 	rc = lpfc_reg_rpi(phba, vport->vpi, icmd->un.rcvels.remoteID,
407 			    (uint8_t *) sp, mbox, ndlp->nlp_rpi);
408 	if (rc) {
409 		mempool_free(mbox, phba->mbox_mem_pool);
410 		goto out;
411 	}
412 
413 	/* ACC PLOGI rsp command needs to execute first,
414 	 * queue this mbox command to be processed later.
415 	 */
416 	mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
417 	/*
418 	 * mbox->context2 = lpfc_nlp_get(ndlp) deferred until mailbox
419 	 * command issued in lpfc_cmpl_els_acc().
420 	 */
421 	mbox->vport = vport;
422 	spin_lock_irq(shost->host_lock);
423 	ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI);
424 	spin_unlock_irq(shost->host_lock);
425 
426 	/*
427 	 * If there is an outstanding PLOGI issued, abort it before
428 	 * sending ACC rsp for received PLOGI. If pending plogi
429 	 * is not canceled here, the plogi will be rejected by
430 	 * remote port and will be retried. On a configuration with
431 	 * single discovery thread, this will cause a huge delay in
432 	 * discovery. Also this will cause multiple state machines
433 	 * running in parallel for this node.
434 	 */
435 	if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) {
436 		/* software abort outstanding PLOGI */
437 		lpfc_els_abort(phba, ndlp);
438 	}
439 
440 	if ((vport->port_type == LPFC_NPIV_PORT &&
441 	     vport->cfg_restrict_login)) {
442 
443 		/* In order to preserve RPIs, we want to cleanup
444 		 * the default RPI the firmware created to rcv
445 		 * this ELS request. The only way to do this is
446 		 * to register, then unregister the RPI.
447 		 */
448 		spin_lock_irq(shost->host_lock);
449 		ndlp->nlp_flag |= NLP_RM_DFLT_RPI;
450 		spin_unlock_irq(shost->host_lock);
451 		stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD;
452 		stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
453 		rc = lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
454 			ndlp, mbox);
455 		if (rc)
456 			mempool_free(mbox, phba->mbox_mem_pool);
457 		return 1;
458 	}
459 	rc = lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox);
460 	if (rc)
461 		mempool_free(mbox, phba->mbox_mem_pool);
462 	return 1;
463 out:
464 	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
465 	stat.un.b.lsRjtRsnCodeExp = LSEXP_OUT_OF_RESOURCE;
466 	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
467 	return 0;
468 }
469 
470 /**
471  * lpfc_mbx_cmpl_resume_rpi - Resume RPI completion routine
472  * @phba: pointer to lpfc hba data structure.
473  * @mboxq: pointer to mailbox object
474  *
475  * This routine is invoked to issue a completion to a rcv'ed
476  * ADISC or PDISC after the paused RPI has been resumed.
477  **/
478 static void
479 lpfc_mbx_cmpl_resume_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
480 {
481 	struct lpfc_vport *vport;
482 	struct lpfc_iocbq *elsiocb;
483 	struct lpfc_nodelist *ndlp;
484 	uint32_t cmd;
485 
486 	elsiocb = (struct lpfc_iocbq *)mboxq->context1;
487 	ndlp = (struct lpfc_nodelist *) mboxq->context2;
488 	vport = mboxq->vport;
489 	cmd = elsiocb->drvrTimeout;
490 
491 	if (cmd == ELS_CMD_ADISC) {
492 		lpfc_els_rsp_adisc_acc(vport, elsiocb, ndlp);
493 	} else {
494 		lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, elsiocb,
495 			ndlp, NULL);
496 	}
497 	kfree(elsiocb);
498 	mempool_free(mboxq, phba->mbox_mem_pool);
499 }
500 
501 static int
502 lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
503 		struct lpfc_iocbq *cmdiocb)
504 {
505 	struct Scsi_Host   *shost = lpfc_shost_from_vport(vport);
506 	struct lpfc_iocbq  *elsiocb;
507 	struct lpfc_dmabuf *pcmd;
508 	struct serv_parm   *sp;
509 	struct lpfc_name   *pnn, *ppn;
510 	struct ls_rjt stat;
511 	ADISC *ap;
512 	IOCB_t *icmd;
513 	uint32_t *lp;
514 	uint32_t cmd;
515 
516 	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
517 	lp = (uint32_t *) pcmd->virt;
518 
519 	cmd = *lp++;
520 	if (cmd == ELS_CMD_ADISC) {
521 		ap = (ADISC *) lp;
522 		pnn = (struct lpfc_name *) & ap->nodeName;
523 		ppn = (struct lpfc_name *) & ap->portName;
524 	} else {
525 		sp = (struct serv_parm *) lp;
526 		pnn = (struct lpfc_name *) & sp->nodeName;
527 		ppn = (struct lpfc_name *) & sp->portName;
528 	}
529 
530 	icmd = &cmdiocb->iocb;
531 	if (icmd->ulpStatus == 0 && lpfc_check_adisc(vport, ndlp, pnn, ppn)) {
532 
533 		/*
534 		 * As soon as  we send ACC, the remote NPort can
535 		 * start sending us data. Thus, for SLI4 we must
536 		 * resume the RPI before the ACC goes out.
537 		 */
538 		if (vport->phba->sli_rev == LPFC_SLI_REV4) {
539 			elsiocb = kmalloc(sizeof(struct lpfc_iocbq),
540 				GFP_KERNEL);
541 			if (elsiocb) {
542 
543 				/* Save info from cmd IOCB used in rsp */
544 				memcpy((uint8_t *)elsiocb, (uint8_t *)cmdiocb,
545 					sizeof(struct lpfc_iocbq));
546 
547 				/* Save the ELS cmd */
548 				elsiocb->drvrTimeout = cmd;
549 
550 				lpfc_sli4_resume_rpi(ndlp,
551 					lpfc_mbx_cmpl_resume_rpi, elsiocb);
552 				goto out;
553 			}
554 		}
555 
556 		if (cmd == ELS_CMD_ADISC) {
557 			lpfc_els_rsp_adisc_acc(vport, cmdiocb, ndlp);
558 		} else {
559 			lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb,
560 				ndlp, NULL);
561 		}
562 out:
563 		/* If we are authenticated, move to the proper state */
564 		if (ndlp->nlp_type & NLP_FCP_TARGET)
565 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
566 		else
567 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
568 
569 		return 1;
570 	}
571 	/* Reject this request because invalid parameters */
572 	stat.un.b.lsRjtRsvd0 = 0;
573 	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
574 	stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
575 	stat.un.b.vendorUnique = 0;
576 	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
577 
578 	/* 1 sec timeout */
579 	mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000));
580 
581 	spin_lock_irq(shost->host_lock);
582 	ndlp->nlp_flag |= NLP_DELAY_TMO;
583 	spin_unlock_irq(shost->host_lock);
584 	ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
585 	ndlp->nlp_prev_state = ndlp->nlp_state;
586 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
587 	return 0;
588 }
589 
590 static int
591 lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
592 	      struct lpfc_iocbq *cmdiocb, uint32_t els_cmd)
593 {
594 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
595 	struct lpfc_hba    *phba = vport->phba;
596 	struct lpfc_vport **vports;
597 	int i, active_vlink_present = 0 ;
598 
599 	/* Put ndlp in NPR state with 1 sec timeout for plogi, ACC logo */
600 	/* Only call LOGO ACC for first LOGO, this avoids sending unnecessary
601 	 * PLOGIs during LOGO storms from a device.
602 	 */
603 	spin_lock_irq(shost->host_lock);
604 	ndlp->nlp_flag |= NLP_LOGO_ACC;
605 	spin_unlock_irq(shost->host_lock);
606 	if (els_cmd == ELS_CMD_PRLO)
607 		lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
608 	else
609 		lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
610 	if (ndlp->nlp_DID == Fabric_DID) {
611 		if (vport->port_state <= LPFC_FDISC)
612 			goto out;
613 		lpfc_linkdown_port(vport);
614 		spin_lock_irq(shost->host_lock);
615 		vport->fc_flag |= FC_VPORT_LOGO_RCVD;
616 		spin_unlock_irq(shost->host_lock);
617 		vports = lpfc_create_vport_work_array(phba);
618 		if (vports) {
619 			for (i = 0; i <= phba->max_vports && vports[i] != NULL;
620 					i++) {
621 				if ((!(vports[i]->fc_flag &
622 					FC_VPORT_LOGO_RCVD)) &&
623 					(vports[i]->port_state > LPFC_FDISC)) {
624 					active_vlink_present = 1;
625 					break;
626 				}
627 			}
628 			lpfc_destroy_vport_work_array(phba, vports);
629 		}
630 
631 		if (active_vlink_present) {
632 			/*
633 			 * If there are other active VLinks present,
634 			 * re-instantiate the Vlink using FDISC.
635 			 */
636 			mod_timer(&ndlp->nlp_delayfunc,
637 				  jiffies + msecs_to_jiffies(1000));
638 			spin_lock_irq(shost->host_lock);
639 			ndlp->nlp_flag |= NLP_DELAY_TMO;
640 			spin_unlock_irq(shost->host_lock);
641 			ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
642 			vport->port_state = LPFC_FDISC;
643 		} else {
644 			spin_lock_irq(shost->host_lock);
645 			phba->pport->fc_flag &= ~FC_LOGO_RCVD_DID_CHNG;
646 			spin_unlock_irq(shost->host_lock);
647 			lpfc_retry_pport_discovery(phba);
648 		}
649 	} else if ((!(ndlp->nlp_type & NLP_FABRIC) &&
650 		((ndlp->nlp_type & NLP_FCP_TARGET) ||
651 		!(ndlp->nlp_type & NLP_FCP_INITIATOR))) ||
652 		(ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
653 		/* Only try to re-login if this is NOT a Fabric Node */
654 		mod_timer(&ndlp->nlp_delayfunc,
655 			  jiffies + msecs_to_jiffies(1000 * 1));
656 		spin_lock_irq(shost->host_lock);
657 		ndlp->nlp_flag |= NLP_DELAY_TMO;
658 		spin_unlock_irq(shost->host_lock);
659 
660 		ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
661 	}
662 out:
663 	ndlp->nlp_prev_state = ndlp->nlp_state;
664 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
665 
666 	spin_lock_irq(shost->host_lock);
667 	ndlp->nlp_flag &= ~NLP_NPR_ADISC;
668 	spin_unlock_irq(shost->host_lock);
669 	/* The driver has to wait until the ACC completes before it continues
670 	 * processing the LOGO.  The action will resume in
671 	 * lpfc_cmpl_els_logo_acc routine. Since part of processing includes an
672 	 * unreg_login, the driver waits so the ACC does not get aborted.
673 	 */
674 	return 0;
675 }
676 
677 static void
678 lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
679 	      struct lpfc_iocbq *cmdiocb)
680 {
681 	struct lpfc_dmabuf *pcmd;
682 	uint32_t *lp;
683 	PRLI *npr;
684 	struct fc_rport *rport = ndlp->rport;
685 	u32 roles;
686 
687 	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
688 	lp = (uint32_t *) pcmd->virt;
689 	npr = (PRLI *) ((uint8_t *) lp + sizeof (uint32_t));
690 
691 	ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
692 	ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
693 	ndlp->nlp_flag &= ~NLP_FIRSTBURST;
694 	if (npr->prliType == PRLI_FCP_TYPE) {
695 		if (npr->initiatorFunc)
696 			ndlp->nlp_type |= NLP_FCP_INITIATOR;
697 		if (npr->targetFunc) {
698 			ndlp->nlp_type |= NLP_FCP_TARGET;
699 			if (npr->writeXferRdyDis)
700 				ndlp->nlp_flag |= NLP_FIRSTBURST;
701 		}
702 		if (npr->Retry)
703 			ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
704 	}
705 	if (rport) {
706 		/* We need to update the rport role values */
707 		roles = FC_RPORT_ROLE_UNKNOWN;
708 		if (ndlp->nlp_type & NLP_FCP_INITIATOR)
709 			roles |= FC_RPORT_ROLE_FCP_INITIATOR;
710 		if (ndlp->nlp_type & NLP_FCP_TARGET)
711 			roles |= FC_RPORT_ROLE_FCP_TARGET;
712 
713 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
714 			"rport rolechg:   role:x%x did:x%x flg:x%x",
715 			roles, ndlp->nlp_DID, ndlp->nlp_flag);
716 
717 		fc_remote_port_rolechg(rport, roles);
718 	}
719 }
720 
721 static uint32_t
722 lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
723 {
724 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
725 
726 	if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED)) {
727 		ndlp->nlp_flag &= ~NLP_NPR_ADISC;
728 		return 0;
729 	}
730 
731 	if (!(vport->fc_flag & FC_PT2PT)) {
732 		/* Check config parameter use-adisc or FCP-2 */
733 		if ((vport->cfg_use_adisc && (vport->fc_flag & FC_RSCN_MODE)) ||
734 		    ((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) &&
735 		     (ndlp->nlp_type & NLP_FCP_TARGET))) {
736 			spin_lock_irq(shost->host_lock);
737 			ndlp->nlp_flag |= NLP_NPR_ADISC;
738 			spin_unlock_irq(shost->host_lock);
739 			return 1;
740 		}
741 	}
742 	ndlp->nlp_flag &= ~NLP_NPR_ADISC;
743 	lpfc_unreg_rpi(vport, ndlp);
744 	return 0;
745 }
746 
747 /**
748  * lpfc_release_rpi - Release a RPI by issuing unreg_login mailbox cmd.
749  * @phba : Pointer to lpfc_hba structure.
750  * @vport: Pointer to lpfc_vport structure.
751  * @rpi  : rpi to be release.
752  *
753  * This function will send a unreg_login mailbox command to the firmware
754  * to release a rpi.
755  **/
756 void
757 lpfc_release_rpi(struct lpfc_hba *phba,
758 		struct lpfc_vport *vport,
759 		uint16_t rpi)
760 {
761 	LPFC_MBOXQ_t *pmb;
762 	int rc;
763 
764 	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
765 			GFP_KERNEL);
766 	if (!pmb)
767 		lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
768 			"2796 mailbox memory allocation failed \n");
769 	else {
770 		lpfc_unreg_login(phba, vport->vpi, rpi, pmb);
771 		pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
772 		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
773 		if (rc == MBX_NOT_FINISHED)
774 			mempool_free(pmb, phba->mbox_mem_pool);
775 	}
776 }
777 
778 static uint32_t
779 lpfc_disc_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
780 		  void *arg, uint32_t evt)
781 {
782 	struct lpfc_hba *phba;
783 	LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
784 	MAILBOX_t *mb;
785 	uint16_t rpi;
786 
787 	phba = vport->phba;
788 	/* Release the RPI if reglogin completing */
789 	if (!(phba->pport->load_flag & FC_UNLOADING) &&
790 		(evt == NLP_EVT_CMPL_REG_LOGIN) &&
791 		(!pmb->u.mb.mbxStatus)) {
792 		mb = &pmb->u.mb;
793 		rpi = pmb->u.mb.un.varWords[0];
794 		lpfc_release_rpi(phba, vport, rpi);
795 	}
796 	lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
797 			 "0271 Illegal State Transition: node x%x "
798 			 "event x%x, state x%x Data: x%x x%x\n",
799 			 ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi,
800 			 ndlp->nlp_flag);
801 	return ndlp->nlp_state;
802 }
803 
804 static uint32_t
805 lpfc_cmpl_plogi_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
806 		  void *arg, uint32_t evt)
807 {
808 	/* This transition is only legal if we previously
809 	 * rcv'ed a PLOGI. Since we don't want 2 discovery threads
810 	 * working on the same NPortID, do nothing for this thread
811 	 * to stop it.
812 	 */
813 	if (!(ndlp->nlp_flag & NLP_RCV_PLOGI)) {
814 		lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
815 			 "0272 Illegal State Transition: node x%x "
816 			 "event x%x, state x%x Data: x%x x%x\n",
817 			 ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi,
818 			 ndlp->nlp_flag);
819 	}
820 	return ndlp->nlp_state;
821 }
822 
823 /* Start of Discovery State Machine routines */
824 
825 static uint32_t
826 lpfc_rcv_plogi_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
827 			   void *arg, uint32_t evt)
828 {
829 	struct lpfc_iocbq *cmdiocb;
830 
831 	cmdiocb = (struct lpfc_iocbq *) arg;
832 
833 	if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
834 		return ndlp->nlp_state;
835 	}
836 	return NLP_STE_FREED_NODE;
837 }
838 
839 static uint32_t
840 lpfc_rcv_els_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
841 			 void *arg, uint32_t evt)
842 {
843 	lpfc_issue_els_logo(vport, ndlp, 0);
844 	return ndlp->nlp_state;
845 }
846 
847 static uint32_t
848 lpfc_rcv_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
849 			  void *arg, uint32_t evt)
850 {
851 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
852 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
853 
854 	spin_lock_irq(shost->host_lock);
855 	ndlp->nlp_flag |= NLP_LOGO_ACC;
856 	spin_unlock_irq(shost->host_lock);
857 	lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
858 
859 	return ndlp->nlp_state;
860 }
861 
862 static uint32_t
863 lpfc_cmpl_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
864 			   void *arg, uint32_t evt)
865 {
866 	return NLP_STE_FREED_NODE;
867 }
868 
869 static uint32_t
870 lpfc_device_rm_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
871 			   void *arg, uint32_t evt)
872 {
873 	return NLP_STE_FREED_NODE;
874 }
875 
876 static uint32_t
877 lpfc_device_recov_unused_node(struct lpfc_vport *vport,
878 			struct lpfc_nodelist *ndlp,
879 			   void *arg, uint32_t evt)
880 {
881 	return ndlp->nlp_state;
882 }
883 
884 static uint32_t
885 lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
886 			   void *arg, uint32_t evt)
887 {
888 	struct Scsi_Host   *shost = lpfc_shost_from_vport(vport);
889 	struct lpfc_hba   *phba = vport->phba;
890 	struct lpfc_iocbq *cmdiocb = arg;
891 	struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
892 	uint32_t *lp = (uint32_t *) pcmd->virt;
893 	struct serv_parm *sp = (struct serv_parm *) (lp + 1);
894 	struct ls_rjt stat;
895 	int port_cmp;
896 
897 	memset(&stat, 0, sizeof (struct ls_rjt));
898 
899 	/* For a PLOGI, we only accept if our portname is less
900 	 * than the remote portname.
901 	 */
902 	phba->fc_stat.elsLogiCol++;
903 	port_cmp = memcmp(&vport->fc_portname, &sp->portName,
904 			  sizeof(struct lpfc_name));
905 
906 	if (port_cmp >= 0) {
907 		/* Reject this request because the remote node will accept
908 		   ours */
909 		stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
910 		stat.un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS;
911 		lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
912 			NULL);
913 	} else {
914 		if (lpfc_rcv_plogi(vport, ndlp, cmdiocb) &&
915 		    (ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
916 		    (vport->num_disc_nodes)) {
917 			spin_lock_irq(shost->host_lock);
918 			ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
919 			spin_unlock_irq(shost->host_lock);
920 			/* Check if there are more PLOGIs to be sent */
921 			lpfc_more_plogi(vport);
922 			if (vport->num_disc_nodes == 0) {
923 				spin_lock_irq(shost->host_lock);
924 				vport->fc_flag &= ~FC_NDISC_ACTIVE;
925 				spin_unlock_irq(shost->host_lock);
926 				lpfc_can_disctmo(vport);
927 				lpfc_end_rscn(vport);
928 			}
929 		}
930 	} /* If our portname was less */
931 
932 	return ndlp->nlp_state;
933 }
934 
935 static uint32_t
936 lpfc_rcv_prli_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
937 			  void *arg, uint32_t evt)
938 {
939 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
940 	struct ls_rjt     stat;
941 
942 	memset(&stat, 0, sizeof (struct ls_rjt));
943 	stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY;
944 	stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
945 	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
946 	return ndlp->nlp_state;
947 }
948 
949 static uint32_t
950 lpfc_rcv_logo_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
951 			  void *arg, uint32_t evt)
952 {
953 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
954 
955 				/* software abort outstanding PLOGI */
956 	lpfc_els_abort(vport->phba, ndlp);
957 
958 	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
959 	return ndlp->nlp_state;
960 }
961 
962 static uint32_t
963 lpfc_rcv_els_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
964 			 void *arg, uint32_t evt)
965 {
966 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
967 	struct lpfc_hba   *phba = vport->phba;
968 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
969 
970 	/* software abort outstanding PLOGI */
971 	lpfc_els_abort(phba, ndlp);
972 
973 	if (evt == NLP_EVT_RCV_LOGO) {
974 		lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
975 	} else {
976 		lpfc_issue_els_logo(vport, ndlp, 0);
977 	}
978 
979 	/* Put ndlp in npr state set plogi timer for 1 sec */
980 	mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000 * 1));
981 	spin_lock_irq(shost->host_lock);
982 	ndlp->nlp_flag |= NLP_DELAY_TMO;
983 	spin_unlock_irq(shost->host_lock);
984 	ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
985 	ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE;
986 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
987 
988 	return ndlp->nlp_state;
989 }
990 
991 static uint32_t
992 lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
993 			    struct lpfc_nodelist *ndlp,
994 			    void *arg,
995 			    uint32_t evt)
996 {
997 	struct lpfc_hba    *phba = vport->phba;
998 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
999 	struct lpfc_iocbq  *cmdiocb, *rspiocb;
1000 	struct lpfc_dmabuf *pcmd, *prsp, *mp;
1001 	uint32_t *lp;
1002 	IOCB_t *irsp;
1003 	struct serv_parm *sp;
1004 	LPFC_MBOXQ_t *mbox;
1005 
1006 	cmdiocb = (struct lpfc_iocbq *) arg;
1007 	rspiocb = cmdiocb->context_un.rsp_iocb;
1008 
1009 	if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
1010 		/* Recovery from PLOGI collision logic */
1011 		return ndlp->nlp_state;
1012 	}
1013 
1014 	irsp = &rspiocb->iocb;
1015 
1016 	if (irsp->ulpStatus)
1017 		goto out;
1018 
1019 	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
1020 
1021 	prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
1022 
1023 	lp = (uint32_t *) prsp->virt;
1024 	sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
1025 
1026 	/* Some switches have FDMI servers returning 0 for WWN */
1027 	if ((ndlp->nlp_DID != FDMI_DID) &&
1028 		(wwn_to_u64(sp->portName.u.wwn) == 0 ||
1029 		wwn_to_u64(sp->nodeName.u.wwn) == 0)) {
1030 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1031 				 "0142 PLOGI RSP: Invalid WWN.\n");
1032 		goto out;
1033 	}
1034 	if (!lpfc_check_sparm(vport, ndlp, sp, CLASS3, 0))
1035 		goto out;
1036 	/* PLOGI chkparm OK */
1037 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1038 			 "0121 PLOGI chkparm OK Data: x%x x%x x%x x%x\n",
1039 			 ndlp->nlp_DID, ndlp->nlp_state,
1040 			 ndlp->nlp_flag, ndlp->nlp_rpi);
1041 	if (vport->cfg_fcp_class == 2 && (sp->cls2.classValid))
1042 		ndlp->nlp_fcp_info |= CLASS2;
1043 	else
1044 		ndlp->nlp_fcp_info |= CLASS3;
1045 
1046 	ndlp->nlp_class_sup = 0;
1047 	if (sp->cls1.classValid)
1048 		ndlp->nlp_class_sup |= FC_COS_CLASS1;
1049 	if (sp->cls2.classValid)
1050 		ndlp->nlp_class_sup |= FC_COS_CLASS2;
1051 	if (sp->cls3.classValid)
1052 		ndlp->nlp_class_sup |= FC_COS_CLASS3;
1053 	if (sp->cls4.classValid)
1054 		ndlp->nlp_class_sup |= FC_COS_CLASS4;
1055 	ndlp->nlp_maxframe =
1056 		((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
1057 
1058 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1059 	if (!mbox) {
1060 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1061 			"0133 PLOGI: no memory for reg_login "
1062 			"Data: x%x x%x x%x x%x\n",
1063 			ndlp->nlp_DID, ndlp->nlp_state,
1064 			ndlp->nlp_flag, ndlp->nlp_rpi);
1065 		goto out;
1066 	}
1067 
1068 	lpfc_unreg_rpi(vport, ndlp);
1069 
1070 	if (lpfc_reg_rpi(phba, vport->vpi, irsp->un.elsreq64.remoteID,
1071 			 (uint8_t *) sp, mbox, ndlp->nlp_rpi) == 0) {
1072 		switch (ndlp->nlp_DID) {
1073 		case NameServer_DID:
1074 			mbox->mbox_cmpl = lpfc_mbx_cmpl_ns_reg_login;
1075 			break;
1076 		case FDMI_DID:
1077 			mbox->mbox_cmpl = lpfc_mbx_cmpl_fdmi_reg_login;
1078 			break;
1079 		default:
1080 			ndlp->nlp_flag |= NLP_REG_LOGIN_SEND;
1081 			mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
1082 		}
1083 		mbox->context2 = lpfc_nlp_get(ndlp);
1084 		mbox->vport = vport;
1085 		if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
1086 		    != MBX_NOT_FINISHED) {
1087 			lpfc_nlp_set_state(vport, ndlp,
1088 					   NLP_STE_REG_LOGIN_ISSUE);
1089 			return ndlp->nlp_state;
1090 		}
1091 		if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
1092 			ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
1093 		/* decrement node reference count to the failed mbox
1094 		 * command
1095 		 */
1096 		lpfc_nlp_put(ndlp);
1097 		mp = (struct lpfc_dmabuf *) mbox->context1;
1098 		lpfc_mbuf_free(phba, mp->virt, mp->phys);
1099 		kfree(mp);
1100 		mempool_free(mbox, phba->mbox_mem_pool);
1101 
1102 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1103 				 "0134 PLOGI: cannot issue reg_login "
1104 				 "Data: x%x x%x x%x x%x\n",
1105 				 ndlp->nlp_DID, ndlp->nlp_state,
1106 				 ndlp->nlp_flag, ndlp->nlp_rpi);
1107 	} else {
1108 		mempool_free(mbox, phba->mbox_mem_pool);
1109 
1110 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1111 				 "0135 PLOGI: cannot format reg_login "
1112 				 "Data: x%x x%x x%x x%x\n",
1113 				 ndlp->nlp_DID, ndlp->nlp_state,
1114 				 ndlp->nlp_flag, ndlp->nlp_rpi);
1115 	}
1116 
1117 
1118 out:
1119 	if (ndlp->nlp_DID == NameServer_DID) {
1120 		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1121 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1122 				 "0261 Cannot Register NameServer login\n");
1123 	}
1124 
1125 	/*
1126 	** In case the node reference counter does not go to zero, ensure that
1127 	** the stale state for the node is not processed.
1128 	*/
1129 
1130 	ndlp->nlp_prev_state = ndlp->nlp_state;
1131 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1132 	spin_lock_irq(shost->host_lock);
1133 	ndlp->nlp_flag |= NLP_DEFER_RM;
1134 	spin_unlock_irq(shost->host_lock);
1135 	return NLP_STE_FREED_NODE;
1136 }
1137 
1138 static uint32_t
1139 lpfc_cmpl_logo_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1140 			   void *arg, uint32_t evt)
1141 {
1142 	return ndlp->nlp_state;
1143 }
1144 
1145 static uint32_t
1146 lpfc_cmpl_reglogin_plogi_issue(struct lpfc_vport *vport,
1147 	struct lpfc_nodelist *ndlp, void *arg, uint32_t evt)
1148 {
1149 	struct lpfc_hba *phba;
1150 	LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
1151 	MAILBOX_t *mb = &pmb->u.mb;
1152 	uint16_t rpi;
1153 
1154 	phba = vport->phba;
1155 	/* Release the RPI */
1156 	if (!(phba->pport->load_flag & FC_UNLOADING) &&
1157 		!mb->mbxStatus) {
1158 		rpi = pmb->u.mb.un.varWords[0];
1159 		lpfc_release_rpi(phba, vport, rpi);
1160 	}
1161 	return ndlp->nlp_state;
1162 }
1163 
1164 static uint32_t
1165 lpfc_device_rm_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1166 			   void *arg, uint32_t evt)
1167 {
1168 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1169 
1170 	if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1171 		spin_lock_irq(shost->host_lock);
1172 		ndlp->nlp_flag |= NLP_NODEV_REMOVE;
1173 		spin_unlock_irq(shost->host_lock);
1174 		return ndlp->nlp_state;
1175 	} else {
1176 		/* software abort outstanding PLOGI */
1177 		lpfc_els_abort(vport->phba, ndlp);
1178 
1179 		lpfc_drop_node(vport, ndlp);
1180 		return NLP_STE_FREED_NODE;
1181 	}
1182 }
1183 
1184 static uint32_t
1185 lpfc_device_recov_plogi_issue(struct lpfc_vport *vport,
1186 			      struct lpfc_nodelist *ndlp,
1187 			      void *arg,
1188 			      uint32_t evt)
1189 {
1190 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1191 	struct lpfc_hba  *phba = vport->phba;
1192 
1193 	/* Don't do anything that will mess up processing of the
1194 	 * previous RSCN.
1195 	 */
1196 	if (vport->fc_flag & FC_RSCN_DEFERRED)
1197 		return ndlp->nlp_state;
1198 
1199 	/* software abort outstanding PLOGI */
1200 	lpfc_els_abort(phba, ndlp);
1201 
1202 	ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE;
1203 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1204 	spin_lock_irq(shost->host_lock);
1205 	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1206 	spin_unlock_irq(shost->host_lock);
1207 
1208 	return ndlp->nlp_state;
1209 }
1210 
1211 static uint32_t
1212 lpfc_rcv_plogi_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1213 			   void *arg, uint32_t evt)
1214 {
1215 	struct Scsi_Host   *shost = lpfc_shost_from_vport(vport);
1216 	struct lpfc_hba   *phba = vport->phba;
1217 	struct lpfc_iocbq *cmdiocb;
1218 
1219 	/* software abort outstanding ADISC */
1220 	lpfc_els_abort(phba, ndlp);
1221 
1222 	cmdiocb = (struct lpfc_iocbq *) arg;
1223 
1224 	if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
1225 		if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1226 			spin_lock_irq(shost->host_lock);
1227 			ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
1228 			spin_unlock_irq(shost->host_lock);
1229 			if (vport->num_disc_nodes)
1230 				lpfc_more_adisc(vport);
1231 		}
1232 		return ndlp->nlp_state;
1233 	}
1234 	ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1235 	lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
1236 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
1237 
1238 	return ndlp->nlp_state;
1239 }
1240 
1241 static uint32_t
1242 lpfc_rcv_prli_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1243 			  void *arg, uint32_t evt)
1244 {
1245 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1246 
1247 	lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
1248 	return ndlp->nlp_state;
1249 }
1250 
1251 static uint32_t
1252 lpfc_rcv_logo_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1253 			  void *arg, uint32_t evt)
1254 {
1255 	struct lpfc_hba *phba = vport->phba;
1256 	struct lpfc_iocbq *cmdiocb;
1257 
1258 	cmdiocb = (struct lpfc_iocbq *) arg;
1259 
1260 	/* software abort outstanding ADISC */
1261 	lpfc_els_abort(phba, ndlp);
1262 
1263 	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
1264 	return ndlp->nlp_state;
1265 }
1266 
1267 static uint32_t
1268 lpfc_rcv_padisc_adisc_issue(struct lpfc_vport *vport,
1269 			    struct lpfc_nodelist *ndlp,
1270 			    void *arg, uint32_t evt)
1271 {
1272 	struct lpfc_iocbq *cmdiocb;
1273 
1274 	cmdiocb = (struct lpfc_iocbq *) arg;
1275 
1276 	lpfc_rcv_padisc(vport, ndlp, cmdiocb);
1277 	return ndlp->nlp_state;
1278 }
1279 
1280 static uint32_t
1281 lpfc_rcv_prlo_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1282 			  void *arg, uint32_t evt)
1283 {
1284 	struct lpfc_iocbq *cmdiocb;
1285 
1286 	cmdiocb = (struct lpfc_iocbq *) arg;
1287 
1288 	/* Treat like rcv logo */
1289 	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO);
1290 	return ndlp->nlp_state;
1291 }
1292 
1293 static uint32_t
1294 lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
1295 			    struct lpfc_nodelist *ndlp,
1296 			    void *arg, uint32_t evt)
1297 {
1298 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
1299 	struct lpfc_hba   *phba = vport->phba;
1300 	struct lpfc_iocbq *cmdiocb, *rspiocb;
1301 	IOCB_t *irsp;
1302 	ADISC *ap;
1303 	int rc;
1304 
1305 	cmdiocb = (struct lpfc_iocbq *) arg;
1306 	rspiocb = cmdiocb->context_un.rsp_iocb;
1307 
1308 	ap = (ADISC *)lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb);
1309 	irsp = &rspiocb->iocb;
1310 
1311 	if ((irsp->ulpStatus) ||
1312 	    (!lpfc_check_adisc(vport, ndlp, &ap->nodeName, &ap->portName))) {
1313 		/* 1 sec timeout */
1314 		mod_timer(&ndlp->nlp_delayfunc,
1315 			  jiffies + msecs_to_jiffies(1000));
1316 		spin_lock_irq(shost->host_lock);
1317 		ndlp->nlp_flag |= NLP_DELAY_TMO;
1318 		spin_unlock_irq(shost->host_lock);
1319 		ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
1320 
1321 		memset(&ndlp->nlp_nodename, 0, sizeof(struct lpfc_name));
1322 		memset(&ndlp->nlp_portname, 0, sizeof(struct lpfc_name));
1323 
1324 		ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1325 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1326 		lpfc_unreg_rpi(vport, ndlp);
1327 		return ndlp->nlp_state;
1328 	}
1329 
1330 	if (phba->sli_rev == LPFC_SLI_REV4) {
1331 		rc = lpfc_sli4_resume_rpi(ndlp, NULL, NULL);
1332 		if (rc) {
1333 			/* Stay in state and retry. */
1334 			ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1335 			return ndlp->nlp_state;
1336 		}
1337 	}
1338 
1339 	if (ndlp->nlp_type & NLP_FCP_TARGET) {
1340 		ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1341 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
1342 	} else {
1343 		ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1344 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1345 	}
1346 
1347 	return ndlp->nlp_state;
1348 }
1349 
1350 static uint32_t
1351 lpfc_device_rm_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1352 			   void *arg, uint32_t evt)
1353 {
1354 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1355 
1356 	if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1357 		spin_lock_irq(shost->host_lock);
1358 		ndlp->nlp_flag |= NLP_NODEV_REMOVE;
1359 		spin_unlock_irq(shost->host_lock);
1360 		return ndlp->nlp_state;
1361 	} else {
1362 		/* software abort outstanding ADISC */
1363 		lpfc_els_abort(vport->phba, ndlp);
1364 
1365 		lpfc_drop_node(vport, ndlp);
1366 		return NLP_STE_FREED_NODE;
1367 	}
1368 }
1369 
1370 static uint32_t
1371 lpfc_device_recov_adisc_issue(struct lpfc_vport *vport,
1372 			      struct lpfc_nodelist *ndlp,
1373 			      void *arg,
1374 			      uint32_t evt)
1375 {
1376 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1377 	struct lpfc_hba  *phba = vport->phba;
1378 
1379 	/* Don't do anything that will mess up processing of the
1380 	 * previous RSCN.
1381 	 */
1382 	if (vport->fc_flag & FC_RSCN_DEFERRED)
1383 		return ndlp->nlp_state;
1384 
1385 	/* software abort outstanding ADISC */
1386 	lpfc_els_abort(phba, ndlp);
1387 
1388 	ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1389 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1390 	spin_lock_irq(shost->host_lock);
1391 	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1392 	spin_unlock_irq(shost->host_lock);
1393 	lpfc_disc_set_adisc(vport, ndlp);
1394 	return ndlp->nlp_state;
1395 }
1396 
1397 static uint32_t
1398 lpfc_rcv_plogi_reglogin_issue(struct lpfc_vport *vport,
1399 			      struct lpfc_nodelist *ndlp,
1400 			      void *arg,
1401 			      uint32_t evt)
1402 {
1403 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1404 
1405 	lpfc_rcv_plogi(vport, ndlp, cmdiocb);
1406 	return ndlp->nlp_state;
1407 }
1408 
1409 static uint32_t
1410 lpfc_rcv_prli_reglogin_issue(struct lpfc_vport *vport,
1411 			     struct lpfc_nodelist *ndlp,
1412 			     void *arg,
1413 			     uint32_t evt)
1414 {
1415 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1416 
1417 	lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
1418 	return ndlp->nlp_state;
1419 }
1420 
1421 static uint32_t
1422 lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
1423 			     struct lpfc_nodelist *ndlp,
1424 			     void *arg,
1425 			     uint32_t evt)
1426 {
1427 	struct lpfc_hba   *phba = vport->phba;
1428 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1429 	LPFC_MBOXQ_t	  *mb;
1430 	LPFC_MBOXQ_t	  *nextmb;
1431 	struct lpfc_dmabuf *mp;
1432 
1433 	cmdiocb = (struct lpfc_iocbq *) arg;
1434 
1435 	/* cleanup any ndlp on mbox q waiting for reglogin cmpl */
1436 	if ((mb = phba->sli.mbox_active)) {
1437 		if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
1438 		   (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1439 			lpfc_nlp_put(ndlp);
1440 			mb->context2 = NULL;
1441 			mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1442 		}
1443 	}
1444 
1445 	spin_lock_irq(&phba->hbalock);
1446 	list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
1447 		if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
1448 		   (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1449 			mp = (struct lpfc_dmabuf *) (mb->context1);
1450 			if (mp) {
1451 				__lpfc_mbuf_free(phba, mp->virt, mp->phys);
1452 				kfree(mp);
1453 			}
1454 			lpfc_nlp_put(ndlp);
1455 			list_del(&mb->list);
1456 			phba->sli.mboxq_cnt--;
1457 			mempool_free(mb, phba->mbox_mem_pool);
1458 		}
1459 	}
1460 	spin_unlock_irq(&phba->hbalock);
1461 
1462 	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
1463 	return ndlp->nlp_state;
1464 }
1465 
1466 static uint32_t
1467 lpfc_rcv_padisc_reglogin_issue(struct lpfc_vport *vport,
1468 			       struct lpfc_nodelist *ndlp,
1469 			       void *arg,
1470 			       uint32_t evt)
1471 {
1472 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1473 
1474 	lpfc_rcv_padisc(vport, ndlp, cmdiocb);
1475 	return ndlp->nlp_state;
1476 }
1477 
1478 static uint32_t
1479 lpfc_rcv_prlo_reglogin_issue(struct lpfc_vport *vport,
1480 			     struct lpfc_nodelist *ndlp,
1481 			     void *arg,
1482 			     uint32_t evt)
1483 {
1484 	struct lpfc_iocbq *cmdiocb;
1485 
1486 	cmdiocb = (struct lpfc_iocbq *) arg;
1487 	lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
1488 	return ndlp->nlp_state;
1489 }
1490 
1491 static uint32_t
1492 lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
1493 				  struct lpfc_nodelist *ndlp,
1494 				  void *arg,
1495 				  uint32_t evt)
1496 {
1497 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1498 	LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
1499 	MAILBOX_t *mb = &pmb->u.mb;
1500 	uint32_t did  = mb->un.varWords[1];
1501 
1502 	if (mb->mbxStatus) {
1503 		/* RegLogin failed */
1504 		lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
1505 				"0246 RegLogin failed Data: x%x x%x x%x x%x "
1506 				 "x%x\n",
1507 				 did, mb->mbxStatus, vport->port_state,
1508 				 mb->un.varRegLogin.vpi,
1509 				 mb->un.varRegLogin.rpi);
1510 		/*
1511 		 * If RegLogin failed due to lack of HBA resources do not
1512 		 * retry discovery.
1513 		 */
1514 		if (mb->mbxStatus == MBXERR_RPI_FULL) {
1515 			ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1516 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1517 			return ndlp->nlp_state;
1518 		}
1519 
1520 		/* Put ndlp in npr state set plogi timer for 1 sec */
1521 		mod_timer(&ndlp->nlp_delayfunc,
1522 			  jiffies + msecs_to_jiffies(1000 * 1));
1523 		spin_lock_irq(shost->host_lock);
1524 		ndlp->nlp_flag |= NLP_DELAY_TMO;
1525 		spin_unlock_irq(shost->host_lock);
1526 		ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
1527 
1528 		lpfc_issue_els_logo(vport, ndlp, 0);
1529 		ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1530 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1531 		return ndlp->nlp_state;
1532 	}
1533 
1534 	/* SLI4 ports have preallocated logical rpis. */
1535 	if (vport->phba->sli_rev < LPFC_SLI_REV4)
1536 		ndlp->nlp_rpi = mb->un.varWords[0];
1537 
1538 	ndlp->nlp_flag |= NLP_RPI_REGISTERED;
1539 
1540 	/* Only if we are not a fabric nport do we issue PRLI */
1541 	if (!(ndlp->nlp_type & NLP_FABRIC)) {
1542 		ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1543 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
1544 		lpfc_issue_els_prli(vport, ndlp, 0);
1545 	} else {
1546 		ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1547 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1548 	}
1549 	return ndlp->nlp_state;
1550 }
1551 
1552 static uint32_t
1553 lpfc_device_rm_reglogin_issue(struct lpfc_vport *vport,
1554 			      struct lpfc_nodelist *ndlp,
1555 			      void *arg,
1556 			      uint32_t evt)
1557 {
1558 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1559 
1560 	if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1561 		spin_lock_irq(shost->host_lock);
1562 		ndlp->nlp_flag |= NLP_NODEV_REMOVE;
1563 		spin_unlock_irq(shost->host_lock);
1564 		return ndlp->nlp_state;
1565 	} else {
1566 		lpfc_drop_node(vport, ndlp);
1567 		return NLP_STE_FREED_NODE;
1568 	}
1569 }
1570 
1571 static uint32_t
1572 lpfc_device_recov_reglogin_issue(struct lpfc_vport *vport,
1573 				 struct lpfc_nodelist *ndlp,
1574 				 void *arg,
1575 				 uint32_t evt)
1576 {
1577 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1578 
1579 	/* Don't do anything that will mess up processing of the
1580 	 * previous RSCN.
1581 	 */
1582 	if (vport->fc_flag & FC_RSCN_DEFERRED)
1583 		return ndlp->nlp_state;
1584 
1585 	ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1586 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1587 	spin_lock_irq(shost->host_lock);
1588 	ndlp->nlp_flag |= NLP_IGNR_REG_CMPL;
1589 	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1590 	spin_unlock_irq(shost->host_lock);
1591 	lpfc_disc_set_adisc(vport, ndlp);
1592 	return ndlp->nlp_state;
1593 }
1594 
1595 static uint32_t
1596 lpfc_rcv_plogi_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1597 			  void *arg, uint32_t evt)
1598 {
1599 	struct lpfc_iocbq *cmdiocb;
1600 
1601 	cmdiocb = (struct lpfc_iocbq *) arg;
1602 
1603 	lpfc_rcv_plogi(vport, ndlp, cmdiocb);
1604 	return ndlp->nlp_state;
1605 }
1606 
1607 static uint32_t
1608 lpfc_rcv_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1609 			 void *arg, uint32_t evt)
1610 {
1611 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1612 
1613 	lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
1614 	return ndlp->nlp_state;
1615 }
1616 
1617 static uint32_t
1618 lpfc_rcv_logo_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1619 			 void *arg, uint32_t evt)
1620 {
1621 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1622 
1623 	/* Software abort outstanding PRLI before sending acc */
1624 	lpfc_els_abort(vport->phba, ndlp);
1625 
1626 	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
1627 	return ndlp->nlp_state;
1628 }
1629 
1630 static uint32_t
1631 lpfc_rcv_padisc_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1632 			   void *arg, uint32_t evt)
1633 {
1634 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1635 
1636 	lpfc_rcv_padisc(vport, ndlp, cmdiocb);
1637 	return ndlp->nlp_state;
1638 }
1639 
1640 /* This routine is envoked when we rcv a PRLO request from a nport
1641  * we are logged into.  We should send back a PRLO rsp setting the
1642  * appropriate bits.
1643  * NEXT STATE = PRLI_ISSUE
1644  */
1645 static uint32_t
1646 lpfc_rcv_prlo_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1647 			 void *arg, uint32_t evt)
1648 {
1649 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1650 
1651 	lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
1652 	return ndlp->nlp_state;
1653 }
1654 
1655 static uint32_t
1656 lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1657 			  void *arg, uint32_t evt)
1658 {
1659 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1660 	struct lpfc_iocbq *cmdiocb, *rspiocb;
1661 	struct lpfc_hba   *phba = vport->phba;
1662 	IOCB_t *irsp;
1663 	PRLI *npr;
1664 
1665 	cmdiocb = (struct lpfc_iocbq *) arg;
1666 	rspiocb = cmdiocb->context_un.rsp_iocb;
1667 	npr = (PRLI *)lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb);
1668 
1669 	irsp = &rspiocb->iocb;
1670 	if (irsp->ulpStatus) {
1671 		if ((vport->port_type == LPFC_NPIV_PORT) &&
1672 		    vport->cfg_restrict_login) {
1673 			goto out;
1674 		}
1675 		ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
1676 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1677 		return ndlp->nlp_state;
1678 	}
1679 
1680 	/* Check out PRLI rsp */
1681 	ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
1682 	ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
1683 	ndlp->nlp_flag &= ~NLP_FIRSTBURST;
1684 	if ((npr->acceptRspCode == PRLI_REQ_EXECUTED) &&
1685 	    (npr->prliType == PRLI_FCP_TYPE)) {
1686 		if (npr->initiatorFunc)
1687 			ndlp->nlp_type |= NLP_FCP_INITIATOR;
1688 		if (npr->targetFunc) {
1689 			ndlp->nlp_type |= NLP_FCP_TARGET;
1690 			if (npr->writeXferRdyDis)
1691 				ndlp->nlp_flag |= NLP_FIRSTBURST;
1692 		}
1693 		if (npr->Retry)
1694 			ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
1695 	}
1696 	if (!(ndlp->nlp_type & NLP_FCP_TARGET) &&
1697 	    (vport->port_type == LPFC_NPIV_PORT) &&
1698 	     vport->cfg_restrict_login) {
1699 out:
1700 		spin_lock_irq(shost->host_lock);
1701 		ndlp->nlp_flag |= NLP_TARGET_REMOVE;
1702 		spin_unlock_irq(shost->host_lock);
1703 		lpfc_issue_els_logo(vport, ndlp, 0);
1704 
1705 		ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
1706 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1707 		return ndlp->nlp_state;
1708 	}
1709 
1710 	ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
1711 	if (ndlp->nlp_type & NLP_FCP_TARGET)
1712 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
1713 	else
1714 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1715 	return ndlp->nlp_state;
1716 }
1717 
1718 /*! lpfc_device_rm_prli_issue
1719  *
1720  * \pre
1721  * \post
1722  * \param   phba
1723  * \param   ndlp
1724  * \param   arg
1725  * \param   evt
1726  * \return  uint32_t
1727  *
1728  * \b Description:
1729  *    This routine is envoked when we a request to remove a nport we are in the
1730  *    process of PRLIing. We should software abort outstanding prli, unreg
1731  *    login, send a logout. We will change node state to UNUSED_NODE, put it
1732  *    on plogi list so it can be freed when LOGO completes.
1733  *
1734  */
1735 
1736 static uint32_t
1737 lpfc_device_rm_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1738 			  void *arg, uint32_t evt)
1739 {
1740 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1741 
1742 	if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1743 		spin_lock_irq(shost->host_lock);
1744 		ndlp->nlp_flag |= NLP_NODEV_REMOVE;
1745 		spin_unlock_irq(shost->host_lock);
1746 		return ndlp->nlp_state;
1747 	} else {
1748 		/* software abort outstanding PLOGI */
1749 		lpfc_els_abort(vport->phba, ndlp);
1750 
1751 		lpfc_drop_node(vport, ndlp);
1752 		return NLP_STE_FREED_NODE;
1753 	}
1754 }
1755 
1756 
1757 /*! lpfc_device_recov_prli_issue
1758  *
1759  * \pre
1760  * \post
1761  * \param   phba
1762  * \param   ndlp
1763  * \param   arg
1764  * \param   evt
1765  * \return  uint32_t
1766  *
1767  * \b Description:
1768  *    The routine is envoked when the state of a device is unknown, like
1769  *    during a link down. We should remove the nodelist entry from the
1770  *    unmapped list, issue a UNREG_LOGIN, do a software abort of the
1771  *    outstanding PRLI command, then free the node entry.
1772  */
1773 static uint32_t
1774 lpfc_device_recov_prli_issue(struct lpfc_vport *vport,
1775 			     struct lpfc_nodelist *ndlp,
1776 			     void *arg,
1777 			     uint32_t evt)
1778 {
1779 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1780 	struct lpfc_hba  *phba = vport->phba;
1781 
1782 	/* Don't do anything that will mess up processing of the
1783 	 * previous RSCN.
1784 	 */
1785 	if (vport->fc_flag & FC_RSCN_DEFERRED)
1786 		return ndlp->nlp_state;
1787 
1788 	/* software abort outstanding PRLI */
1789 	lpfc_els_abort(phba, ndlp);
1790 
1791 	ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
1792 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1793 	spin_lock_irq(shost->host_lock);
1794 	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1795 	spin_unlock_irq(shost->host_lock);
1796 	lpfc_disc_set_adisc(vport, ndlp);
1797 	return ndlp->nlp_state;
1798 }
1799 
1800 static uint32_t
1801 lpfc_rcv_plogi_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1802 			  void *arg, uint32_t evt)
1803 {
1804 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
1805 	struct ls_rjt     stat;
1806 
1807 	memset(&stat, 0, sizeof(struct ls_rjt));
1808 	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
1809 	stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
1810 	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
1811 	return ndlp->nlp_state;
1812 }
1813 
1814 static uint32_t
1815 lpfc_rcv_prli_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1816 			 void *arg, uint32_t evt)
1817 {
1818 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
1819 	struct ls_rjt     stat;
1820 
1821 	memset(&stat, 0, sizeof(struct ls_rjt));
1822 	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
1823 	stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
1824 	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
1825 	return ndlp->nlp_state;
1826 }
1827 
1828 static uint32_t
1829 lpfc_rcv_logo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1830 			 void *arg, uint32_t evt)
1831 {
1832 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
1833 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
1834 
1835 	spin_lock_irq(shost->host_lock);
1836 	ndlp->nlp_flag &= NLP_LOGO_ACC;
1837 	spin_unlock_irq(shost->host_lock);
1838 	lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
1839 	return ndlp->nlp_state;
1840 }
1841 
1842 static uint32_t
1843 lpfc_rcv_padisc_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1844 			   void *arg, uint32_t evt)
1845 {
1846 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
1847 	struct ls_rjt     stat;
1848 
1849 	memset(&stat, 0, sizeof(struct ls_rjt));
1850 	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
1851 	stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
1852 	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
1853 	return ndlp->nlp_state;
1854 }
1855 
1856 static uint32_t
1857 lpfc_rcv_prlo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1858 			 void *arg, uint32_t evt)
1859 {
1860 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
1861 	struct ls_rjt     stat;
1862 
1863 	memset(&stat, 0, sizeof(struct ls_rjt));
1864 	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
1865 	stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
1866 	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
1867 	return ndlp->nlp_state;
1868 }
1869 
1870 static uint32_t
1871 lpfc_cmpl_logo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1872 			  void *arg, uint32_t evt)
1873 {
1874 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1875 
1876 	ndlp->nlp_prev_state = NLP_STE_LOGO_ISSUE;
1877 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1878 	spin_lock_irq(shost->host_lock);
1879 	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1880 	spin_unlock_irq(shost->host_lock);
1881 	lpfc_disc_set_adisc(vport, ndlp);
1882 	return ndlp->nlp_state;
1883 }
1884 
1885 static uint32_t
1886 lpfc_device_rm_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1887 			  void *arg, uint32_t evt)
1888 {
1889 	/*
1890 	 * Take no action.  If a LOGO is outstanding, then possibly DevLoss has
1891 	 * timed out and is calling for Device Remove.  In this case, the LOGO
1892 	 * must be allowed to complete in state LOGO_ISSUE so that the rpi
1893 	 * and other NLP flags are correctly cleaned up.
1894 	 */
1895 	return ndlp->nlp_state;
1896 }
1897 
1898 static uint32_t
1899 lpfc_device_recov_logo_issue(struct lpfc_vport *vport,
1900 			     struct lpfc_nodelist *ndlp,
1901 			     void *arg, uint32_t evt)
1902 {
1903 	/*
1904 	 * Device Recovery events have no meaning for a node with a LOGO
1905 	 * outstanding.  The LOGO has to complete first and handle the
1906 	 * node from that point.
1907 	 */
1908 	return ndlp->nlp_state;
1909 }
1910 
1911 static uint32_t
1912 lpfc_rcv_plogi_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1913 			  void *arg, uint32_t evt)
1914 {
1915 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1916 
1917 	lpfc_rcv_plogi(vport, ndlp, cmdiocb);
1918 	return ndlp->nlp_state;
1919 }
1920 
1921 static uint32_t
1922 lpfc_rcv_prli_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1923 			 void *arg, uint32_t evt)
1924 {
1925 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1926 
1927 	lpfc_rcv_prli(vport, ndlp, cmdiocb);
1928 	lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
1929 	return ndlp->nlp_state;
1930 }
1931 
1932 static uint32_t
1933 lpfc_rcv_logo_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1934 			 void *arg, uint32_t evt)
1935 {
1936 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1937 
1938 	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
1939 	return ndlp->nlp_state;
1940 }
1941 
1942 static uint32_t
1943 lpfc_rcv_padisc_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1944 			   void *arg, uint32_t evt)
1945 {
1946 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1947 
1948 	lpfc_rcv_padisc(vport, ndlp, cmdiocb);
1949 	return ndlp->nlp_state;
1950 }
1951 
1952 static uint32_t
1953 lpfc_rcv_prlo_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1954 			 void *arg, uint32_t evt)
1955 {
1956 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1957 
1958 	lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
1959 	return ndlp->nlp_state;
1960 }
1961 
1962 static uint32_t
1963 lpfc_device_recov_unmap_node(struct lpfc_vport *vport,
1964 			     struct lpfc_nodelist *ndlp,
1965 			     void *arg,
1966 			     uint32_t evt)
1967 {
1968 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1969 
1970 	ndlp->nlp_prev_state = NLP_STE_UNMAPPED_NODE;
1971 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1972 	spin_lock_irq(shost->host_lock);
1973 	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1974 	spin_unlock_irq(shost->host_lock);
1975 	lpfc_disc_set_adisc(vport, ndlp);
1976 
1977 	return ndlp->nlp_state;
1978 }
1979 
1980 static uint32_t
1981 lpfc_rcv_plogi_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1982 			   void *arg, uint32_t evt)
1983 {
1984 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1985 
1986 	lpfc_rcv_plogi(vport, ndlp, cmdiocb);
1987 	return ndlp->nlp_state;
1988 }
1989 
1990 static uint32_t
1991 lpfc_rcv_prli_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1992 			  void *arg, uint32_t evt)
1993 {
1994 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1995 
1996 	lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
1997 	return ndlp->nlp_state;
1998 }
1999 
2000 static uint32_t
2001 lpfc_rcv_logo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2002 			  void *arg, uint32_t evt)
2003 {
2004 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2005 
2006 	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
2007 	return ndlp->nlp_state;
2008 }
2009 
2010 static uint32_t
2011 lpfc_rcv_padisc_mapped_node(struct lpfc_vport *vport,
2012 			    struct lpfc_nodelist *ndlp,
2013 			    void *arg, uint32_t evt)
2014 {
2015 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2016 
2017 	lpfc_rcv_padisc(vport, ndlp, cmdiocb);
2018 	return ndlp->nlp_state;
2019 }
2020 
2021 static uint32_t
2022 lpfc_rcv_prlo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2023 			  void *arg, uint32_t evt)
2024 {
2025 	struct lpfc_hba  *phba = vport->phba;
2026 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2027 
2028 	/* flush the target */
2029 	lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
2030 			    ndlp->nlp_sid, 0, LPFC_CTX_TGT);
2031 
2032 	/* Treat like rcv logo */
2033 	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO);
2034 	return ndlp->nlp_state;
2035 }
2036 
2037 static uint32_t
2038 lpfc_device_recov_mapped_node(struct lpfc_vport *vport,
2039 			      struct lpfc_nodelist *ndlp,
2040 			      void *arg,
2041 			      uint32_t evt)
2042 {
2043 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2044 
2045 	ndlp->nlp_prev_state = NLP_STE_MAPPED_NODE;
2046 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
2047 	spin_lock_irq(shost->host_lock);
2048 	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
2049 	spin_unlock_irq(shost->host_lock);
2050 	lpfc_disc_set_adisc(vport, ndlp);
2051 	return ndlp->nlp_state;
2052 }
2053 
2054 static uint32_t
2055 lpfc_rcv_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2056 			void *arg, uint32_t evt)
2057 {
2058 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2059 	struct lpfc_iocbq *cmdiocb  = (struct lpfc_iocbq *) arg;
2060 
2061 	/* Ignore PLOGI if we have an outstanding LOGO */
2062 	if (ndlp->nlp_flag & (NLP_LOGO_SND | NLP_LOGO_ACC))
2063 		return ndlp->nlp_state;
2064 	if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
2065 		lpfc_cancel_retry_delay_tmo(vport, ndlp);
2066 		spin_lock_irq(shost->host_lock);
2067 		ndlp->nlp_flag &= ~(NLP_NPR_ADISC | NLP_NPR_2B_DISC);
2068 		spin_unlock_irq(shost->host_lock);
2069 	} else if (!(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
2070 		/* send PLOGI immediately, move to PLOGI issue state */
2071 		if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
2072 			ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
2073 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
2074 			lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
2075 		}
2076 	}
2077 	return ndlp->nlp_state;
2078 }
2079 
2080 static uint32_t
2081 lpfc_rcv_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2082 		       void *arg, uint32_t evt)
2083 {
2084 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
2085 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2086 	struct ls_rjt     stat;
2087 
2088 	memset(&stat, 0, sizeof (struct ls_rjt));
2089 	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
2090 	stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
2091 	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
2092 
2093 	if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
2094 		if (ndlp->nlp_flag & NLP_NPR_ADISC) {
2095 			spin_lock_irq(shost->host_lock);
2096 			ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2097 			ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
2098 			spin_unlock_irq(shost->host_lock);
2099 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
2100 			lpfc_issue_els_adisc(vport, ndlp, 0);
2101 		} else {
2102 			ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
2103 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
2104 			lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
2105 		}
2106 	}
2107 	return ndlp->nlp_state;
2108 }
2109 
2110 static uint32_t
2111 lpfc_rcv_logo_npr_node(struct lpfc_vport *vport,  struct lpfc_nodelist *ndlp,
2112 		       void *arg, uint32_t evt)
2113 {
2114 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2115 
2116 	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
2117 	return ndlp->nlp_state;
2118 }
2119 
2120 static uint32_t
2121 lpfc_rcv_padisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2122 			 void *arg, uint32_t evt)
2123 {
2124 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2125 
2126 	lpfc_rcv_padisc(vport, ndlp, cmdiocb);
2127 	/*
2128 	 * Do not start discovery if discovery is about to start
2129 	 * or discovery in progress for this node. Starting discovery
2130 	 * here will affect the counting of discovery threads.
2131 	 */
2132 	if (!(ndlp->nlp_flag & NLP_DELAY_TMO) &&
2133 	    !(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
2134 		if (ndlp->nlp_flag & NLP_NPR_ADISC) {
2135 			ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2136 			ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
2137 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
2138 			lpfc_issue_els_adisc(vport, ndlp, 0);
2139 		} else {
2140 			ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
2141 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
2142 			lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
2143 		}
2144 	}
2145 	return ndlp->nlp_state;
2146 }
2147 
2148 static uint32_t
2149 lpfc_rcv_prlo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2150 		       void *arg, uint32_t evt)
2151 {
2152 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2153 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2154 
2155 	spin_lock_irq(shost->host_lock);
2156 	ndlp->nlp_flag |= NLP_LOGO_ACC;
2157 	spin_unlock_irq(shost->host_lock);
2158 
2159 	lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
2160 
2161 	if ((ndlp->nlp_flag & NLP_DELAY_TMO) == 0) {
2162 		mod_timer(&ndlp->nlp_delayfunc,
2163 			  jiffies + msecs_to_jiffies(1000 * 1));
2164 		spin_lock_irq(shost->host_lock);
2165 		ndlp->nlp_flag |= NLP_DELAY_TMO;
2166 		ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2167 		spin_unlock_irq(shost->host_lock);
2168 		ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
2169 	} else {
2170 		spin_lock_irq(shost->host_lock);
2171 		ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2172 		spin_unlock_irq(shost->host_lock);
2173 	}
2174 	return ndlp->nlp_state;
2175 }
2176 
2177 static uint32_t
2178 lpfc_cmpl_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2179 			 void *arg, uint32_t evt)
2180 {
2181 	struct lpfc_iocbq *cmdiocb, *rspiocb;
2182 	IOCB_t *irsp;
2183 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2184 
2185 	cmdiocb = (struct lpfc_iocbq *) arg;
2186 	rspiocb = cmdiocb->context_un.rsp_iocb;
2187 
2188 	irsp = &rspiocb->iocb;
2189 	if (irsp->ulpStatus) {
2190 		spin_lock_irq(shost->host_lock);
2191 		ndlp->nlp_flag |= NLP_DEFER_RM;
2192 		spin_unlock_irq(shost->host_lock);
2193 		return NLP_STE_FREED_NODE;
2194 	}
2195 	return ndlp->nlp_state;
2196 }
2197 
2198 static uint32_t
2199 lpfc_cmpl_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2200 			void *arg, uint32_t evt)
2201 {
2202 	struct lpfc_iocbq *cmdiocb, *rspiocb;
2203 	IOCB_t *irsp;
2204 
2205 	cmdiocb = (struct lpfc_iocbq *) arg;
2206 	rspiocb = cmdiocb->context_un.rsp_iocb;
2207 
2208 	irsp = &rspiocb->iocb;
2209 	if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
2210 		lpfc_drop_node(vport, ndlp);
2211 		return NLP_STE_FREED_NODE;
2212 	}
2213 	return ndlp->nlp_state;
2214 }
2215 
2216 static uint32_t
2217 lpfc_cmpl_logo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2218 			void *arg, uint32_t evt)
2219 {
2220 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2221 
2222 	/* For the fabric port just clear the fc flags. */
2223 	if (ndlp->nlp_DID == Fabric_DID) {
2224 		spin_lock_irq(shost->host_lock);
2225 		vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
2226 		spin_unlock_irq(shost->host_lock);
2227 	}
2228 	lpfc_unreg_rpi(vport, ndlp);
2229 	return ndlp->nlp_state;
2230 }
2231 
2232 static uint32_t
2233 lpfc_cmpl_adisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2234 			 void *arg, uint32_t evt)
2235 {
2236 	struct lpfc_iocbq *cmdiocb, *rspiocb;
2237 	IOCB_t *irsp;
2238 
2239 	cmdiocb = (struct lpfc_iocbq *) arg;
2240 	rspiocb = cmdiocb->context_un.rsp_iocb;
2241 
2242 	irsp = &rspiocb->iocb;
2243 	if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
2244 		lpfc_drop_node(vport, ndlp);
2245 		return NLP_STE_FREED_NODE;
2246 	}
2247 	return ndlp->nlp_state;
2248 }
2249 
2250 static uint32_t
2251 lpfc_cmpl_reglogin_npr_node(struct lpfc_vport *vport,
2252 			    struct lpfc_nodelist *ndlp,
2253 			    void *arg, uint32_t evt)
2254 {
2255 	LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
2256 	MAILBOX_t    *mb = &pmb->u.mb;
2257 
2258 	if (!mb->mbxStatus) {
2259 		/* SLI4 ports have preallocated logical rpis. */
2260 		if (vport->phba->sli_rev < LPFC_SLI_REV4)
2261 			ndlp->nlp_rpi = mb->un.varWords[0];
2262 		ndlp->nlp_flag |= NLP_RPI_REGISTERED;
2263 	} else {
2264 		if (ndlp->nlp_flag & NLP_NODEV_REMOVE) {
2265 			lpfc_drop_node(vport, ndlp);
2266 			return NLP_STE_FREED_NODE;
2267 		}
2268 	}
2269 	return ndlp->nlp_state;
2270 }
2271 
2272 static uint32_t
2273 lpfc_device_rm_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2274 			void *arg, uint32_t evt)
2275 {
2276 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2277 
2278 	if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
2279 		spin_lock_irq(shost->host_lock);
2280 		ndlp->nlp_flag |= NLP_NODEV_REMOVE;
2281 		spin_unlock_irq(shost->host_lock);
2282 		return ndlp->nlp_state;
2283 	}
2284 	lpfc_drop_node(vport, ndlp);
2285 	return NLP_STE_FREED_NODE;
2286 }
2287 
2288 static uint32_t
2289 lpfc_device_recov_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2290 			   void *arg, uint32_t evt)
2291 {
2292 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2293 
2294 	/* Don't do anything that will mess up processing of the
2295 	 * previous RSCN.
2296 	 */
2297 	if (vport->fc_flag & FC_RSCN_DEFERRED)
2298 		return ndlp->nlp_state;
2299 
2300 	lpfc_cancel_retry_delay_tmo(vport, ndlp);
2301 	spin_lock_irq(shost->host_lock);
2302 	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
2303 	spin_unlock_irq(shost->host_lock);
2304 	return ndlp->nlp_state;
2305 }
2306 
2307 
2308 /* This next section defines the NPort Discovery State Machine */
2309 
2310 /* There are 4 different double linked lists nodelist entries can reside on.
2311  * The plogi list and adisc list are used when Link Up discovery or RSCN
2312  * processing is needed. Each list holds the nodes that we will send PLOGI
2313  * or ADISC on. These lists will keep track of what nodes will be effected
2314  * by an RSCN, or a Link Up (Typically, all nodes are effected on Link Up).
2315  * The unmapped_list will contain all nodes that we have successfully logged
2316  * into at the Fibre Channel level. The mapped_list will contain all nodes
2317  * that are mapped FCP targets.
2318  */
2319 /*
2320  * The bind list is a list of undiscovered (potentially non-existent) nodes
2321  * that we have saved binding information on. This information is used when
2322  * nodes transition from the unmapped to the mapped list.
2323  */
2324 /* For UNUSED_NODE state, the node has just been allocated .
2325  * For PLOGI_ISSUE and REG_LOGIN_ISSUE, the node is on
2326  * the PLOGI list. For REG_LOGIN_COMPL, the node is taken off the PLOGI list
2327  * and put on the unmapped list. For ADISC processing, the node is taken off
2328  * the ADISC list and placed on either the mapped or unmapped list (depending
2329  * on its previous state). Once on the unmapped list, a PRLI is issued and the
2330  * state changed to PRLI_ISSUE. When the PRLI completion occurs, the state is
2331  * changed to UNMAPPED_NODE. If the completion indicates a mapped
2332  * node, the node is taken off the unmapped list. The binding list is checked
2333  * for a valid binding, or a binding is automatically assigned. If binding
2334  * assignment is unsuccessful, the node is left on the unmapped list. If
2335  * binding assignment is successful, the associated binding list entry (if
2336  * any) is removed, and the node is placed on the mapped list.
2337  */
2338 /*
2339  * For a Link Down, all nodes on the ADISC, PLOGI, unmapped or mapped
2340  * lists will receive a DEVICE_RECOVERY event. If the linkdown or devloss timers
2341  * expire, all effected nodes will receive a DEVICE_RM event.
2342  */
2343 /*
2344  * For a Link Up or RSCN, all nodes will move from the mapped / unmapped lists
2345  * to either the ADISC or PLOGI list.  After a Nameserver query or ALPA loopmap
2346  * check, additional nodes may be added or removed (via DEVICE_RM) to / from
2347  * the PLOGI or ADISC lists. Once the PLOGI and ADISC lists are populated,
2348  * we will first process the ADISC list.  32 entries are processed initially and
2349  * ADISC is initited for each one.  Completions / Events for each node are
2350  * funnelled thru the state machine.  As each node finishes ADISC processing, it
2351  * starts ADISC for any nodes waiting for ADISC processing. If no nodes are
2352  * waiting, and the ADISC list count is identically 0, then we are done. For
2353  * Link Up discovery, since all nodes on the PLOGI list are UNREG_LOGIN'ed, we
2354  * can issue a CLEAR_LA and reenable Link Events. Next we will process the PLOGI
2355  * list.  32 entries are processed initially and PLOGI is initited for each one.
2356  * Completions / Events for each node are funnelled thru the state machine.  As
2357  * each node finishes PLOGI processing, it starts PLOGI for any nodes waiting
2358  * for PLOGI processing. If no nodes are waiting, and the PLOGI list count is
2359  * indentically 0, then we are done. We have now completed discovery / RSCN
2360  * handling. Upon completion, ALL nodes should be on either the mapped or
2361  * unmapped lists.
2362  */
2363 
2364 static uint32_t (*lpfc_disc_action[NLP_STE_MAX_STATE * NLP_EVT_MAX_EVENT])
2365      (struct lpfc_vport *, struct lpfc_nodelist *, void *, uint32_t) = {
2366 	/* Action routine                  Event       Current State  */
2367 	lpfc_rcv_plogi_unused_node,	/* RCV_PLOGI   UNUSED_NODE    */
2368 	lpfc_rcv_els_unused_node,	/* RCV_PRLI        */
2369 	lpfc_rcv_logo_unused_node,	/* RCV_LOGO        */
2370 	lpfc_rcv_els_unused_node,	/* RCV_ADISC       */
2371 	lpfc_rcv_els_unused_node,	/* RCV_PDISC       */
2372 	lpfc_rcv_els_unused_node,	/* RCV_PRLO        */
2373 	lpfc_disc_illegal,		/* CMPL_PLOGI      */
2374 	lpfc_disc_illegal,		/* CMPL_PRLI       */
2375 	lpfc_cmpl_logo_unused_node,	/* CMPL_LOGO       */
2376 	lpfc_disc_illegal,		/* CMPL_ADISC      */
2377 	lpfc_disc_illegal,		/* CMPL_REG_LOGIN  */
2378 	lpfc_device_rm_unused_node,	/* DEVICE_RM       */
2379 	lpfc_device_recov_unused_node,	/* DEVICE_RECOVERY */
2380 
2381 	lpfc_rcv_plogi_plogi_issue,	/* RCV_PLOGI   PLOGI_ISSUE    */
2382 	lpfc_rcv_prli_plogi_issue,	/* RCV_PRLI        */
2383 	lpfc_rcv_logo_plogi_issue,	/* RCV_LOGO        */
2384 	lpfc_rcv_els_plogi_issue,	/* RCV_ADISC       */
2385 	lpfc_rcv_els_plogi_issue,	/* RCV_PDISC       */
2386 	lpfc_rcv_els_plogi_issue,	/* RCV_PRLO        */
2387 	lpfc_cmpl_plogi_plogi_issue,	/* CMPL_PLOGI      */
2388 	lpfc_disc_illegal,		/* CMPL_PRLI       */
2389 	lpfc_cmpl_logo_plogi_issue,	/* CMPL_LOGO       */
2390 	lpfc_disc_illegal,		/* CMPL_ADISC      */
2391 	lpfc_cmpl_reglogin_plogi_issue,/* CMPL_REG_LOGIN  */
2392 	lpfc_device_rm_plogi_issue,	/* DEVICE_RM       */
2393 	lpfc_device_recov_plogi_issue,	/* DEVICE_RECOVERY */
2394 
2395 	lpfc_rcv_plogi_adisc_issue,	/* RCV_PLOGI   ADISC_ISSUE    */
2396 	lpfc_rcv_prli_adisc_issue,	/* RCV_PRLI        */
2397 	lpfc_rcv_logo_adisc_issue,	/* RCV_LOGO        */
2398 	lpfc_rcv_padisc_adisc_issue,	/* RCV_ADISC       */
2399 	lpfc_rcv_padisc_adisc_issue,	/* RCV_PDISC       */
2400 	lpfc_rcv_prlo_adisc_issue,	/* RCV_PRLO        */
2401 	lpfc_disc_illegal,		/* CMPL_PLOGI      */
2402 	lpfc_disc_illegal,		/* CMPL_PRLI       */
2403 	lpfc_disc_illegal,		/* CMPL_LOGO       */
2404 	lpfc_cmpl_adisc_adisc_issue,	/* CMPL_ADISC      */
2405 	lpfc_disc_illegal,		/* CMPL_REG_LOGIN  */
2406 	lpfc_device_rm_adisc_issue,	/* DEVICE_RM       */
2407 	lpfc_device_recov_adisc_issue,	/* DEVICE_RECOVERY */
2408 
2409 	lpfc_rcv_plogi_reglogin_issue,	/* RCV_PLOGI  REG_LOGIN_ISSUE */
2410 	lpfc_rcv_prli_reglogin_issue,	/* RCV_PLOGI       */
2411 	lpfc_rcv_logo_reglogin_issue,	/* RCV_LOGO        */
2412 	lpfc_rcv_padisc_reglogin_issue,	/* RCV_ADISC       */
2413 	lpfc_rcv_padisc_reglogin_issue,	/* RCV_PDISC       */
2414 	lpfc_rcv_prlo_reglogin_issue,	/* RCV_PRLO        */
2415 	lpfc_cmpl_plogi_illegal,	/* CMPL_PLOGI      */
2416 	lpfc_disc_illegal,		/* CMPL_PRLI       */
2417 	lpfc_disc_illegal,		/* CMPL_LOGO       */
2418 	lpfc_disc_illegal,		/* CMPL_ADISC      */
2419 	lpfc_cmpl_reglogin_reglogin_issue,/* CMPL_REG_LOGIN  */
2420 	lpfc_device_rm_reglogin_issue,	/* DEVICE_RM       */
2421 	lpfc_device_recov_reglogin_issue,/* DEVICE_RECOVERY */
2422 
2423 	lpfc_rcv_plogi_prli_issue,	/* RCV_PLOGI   PRLI_ISSUE     */
2424 	lpfc_rcv_prli_prli_issue,	/* RCV_PRLI        */
2425 	lpfc_rcv_logo_prli_issue,	/* RCV_LOGO        */
2426 	lpfc_rcv_padisc_prli_issue,	/* RCV_ADISC       */
2427 	lpfc_rcv_padisc_prli_issue,	/* RCV_PDISC       */
2428 	lpfc_rcv_prlo_prli_issue,	/* RCV_PRLO        */
2429 	lpfc_cmpl_plogi_illegal,	/* CMPL_PLOGI      */
2430 	lpfc_cmpl_prli_prli_issue,	/* CMPL_PRLI       */
2431 	lpfc_disc_illegal,		/* CMPL_LOGO       */
2432 	lpfc_disc_illegal,		/* CMPL_ADISC      */
2433 	lpfc_disc_illegal,		/* CMPL_REG_LOGIN  */
2434 	lpfc_device_rm_prli_issue,	/* DEVICE_RM       */
2435 	lpfc_device_recov_prli_issue,	/* DEVICE_RECOVERY */
2436 
2437 	lpfc_rcv_plogi_logo_issue,	/* RCV_PLOGI   LOGO_ISSUE     */
2438 	lpfc_rcv_prli_logo_issue,	/* RCV_PRLI        */
2439 	lpfc_rcv_logo_logo_issue,	/* RCV_LOGO        */
2440 	lpfc_rcv_padisc_logo_issue,	/* RCV_ADISC       */
2441 	lpfc_rcv_padisc_logo_issue,	/* RCV_PDISC       */
2442 	lpfc_rcv_prlo_logo_issue,	/* RCV_PRLO        */
2443 	lpfc_cmpl_plogi_illegal,	/* CMPL_PLOGI      */
2444 	lpfc_disc_illegal,		/* CMPL_PRLI       */
2445 	lpfc_cmpl_logo_logo_issue,	/* CMPL_LOGO       */
2446 	lpfc_disc_illegal,		/* CMPL_ADISC      */
2447 	lpfc_disc_illegal,		/* CMPL_REG_LOGIN  */
2448 	lpfc_device_rm_logo_issue,	/* DEVICE_RM       */
2449 	lpfc_device_recov_logo_issue,	/* DEVICE_RECOVERY */
2450 
2451 	lpfc_rcv_plogi_unmap_node,	/* RCV_PLOGI   UNMAPPED_NODE  */
2452 	lpfc_rcv_prli_unmap_node,	/* RCV_PRLI        */
2453 	lpfc_rcv_logo_unmap_node,	/* RCV_LOGO        */
2454 	lpfc_rcv_padisc_unmap_node,	/* RCV_ADISC       */
2455 	lpfc_rcv_padisc_unmap_node,	/* RCV_PDISC       */
2456 	lpfc_rcv_prlo_unmap_node,	/* RCV_PRLO        */
2457 	lpfc_disc_illegal,		/* CMPL_PLOGI      */
2458 	lpfc_disc_illegal,		/* CMPL_PRLI       */
2459 	lpfc_disc_illegal,		/* CMPL_LOGO       */
2460 	lpfc_disc_illegal,		/* CMPL_ADISC      */
2461 	lpfc_disc_illegal,		/* CMPL_REG_LOGIN  */
2462 	lpfc_disc_illegal,		/* DEVICE_RM       */
2463 	lpfc_device_recov_unmap_node,	/* DEVICE_RECOVERY */
2464 
2465 	lpfc_rcv_plogi_mapped_node,	/* RCV_PLOGI   MAPPED_NODE    */
2466 	lpfc_rcv_prli_mapped_node,	/* RCV_PRLI        */
2467 	lpfc_rcv_logo_mapped_node,	/* RCV_LOGO        */
2468 	lpfc_rcv_padisc_mapped_node,	/* RCV_ADISC       */
2469 	lpfc_rcv_padisc_mapped_node,	/* RCV_PDISC       */
2470 	lpfc_rcv_prlo_mapped_node,	/* RCV_PRLO        */
2471 	lpfc_disc_illegal,		/* CMPL_PLOGI      */
2472 	lpfc_disc_illegal,		/* CMPL_PRLI       */
2473 	lpfc_disc_illegal,		/* CMPL_LOGO       */
2474 	lpfc_disc_illegal,		/* CMPL_ADISC      */
2475 	lpfc_disc_illegal,		/* CMPL_REG_LOGIN  */
2476 	lpfc_disc_illegal,		/* DEVICE_RM       */
2477 	lpfc_device_recov_mapped_node,	/* DEVICE_RECOVERY */
2478 
2479 	lpfc_rcv_plogi_npr_node,        /* RCV_PLOGI   NPR_NODE    */
2480 	lpfc_rcv_prli_npr_node,         /* RCV_PRLI        */
2481 	lpfc_rcv_logo_npr_node,         /* RCV_LOGO        */
2482 	lpfc_rcv_padisc_npr_node,       /* RCV_ADISC       */
2483 	lpfc_rcv_padisc_npr_node,       /* RCV_PDISC       */
2484 	lpfc_rcv_prlo_npr_node,         /* RCV_PRLO        */
2485 	lpfc_cmpl_plogi_npr_node,	/* CMPL_PLOGI      */
2486 	lpfc_cmpl_prli_npr_node,	/* CMPL_PRLI       */
2487 	lpfc_cmpl_logo_npr_node,        /* CMPL_LOGO       */
2488 	lpfc_cmpl_adisc_npr_node,       /* CMPL_ADISC      */
2489 	lpfc_cmpl_reglogin_npr_node,    /* CMPL_REG_LOGIN  */
2490 	lpfc_device_rm_npr_node,        /* DEVICE_RM       */
2491 	lpfc_device_recov_npr_node,     /* DEVICE_RECOVERY */
2492 };
2493 
2494 int
2495 lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2496 			void *arg, uint32_t evt)
2497 {
2498 	uint32_t cur_state, rc;
2499 	uint32_t(*func) (struct lpfc_vport *, struct lpfc_nodelist *, void *,
2500 			 uint32_t);
2501 	uint32_t got_ndlp = 0;
2502 
2503 	if (lpfc_nlp_get(ndlp))
2504 		got_ndlp = 1;
2505 
2506 	cur_state = ndlp->nlp_state;
2507 
2508 	/* DSM in event <evt> on NPort <nlp_DID> in state <cur_state> */
2509 	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2510 			 "0211 DSM in event x%x on NPort x%x in "
2511 			 "state %d Data: x%x\n",
2512 			 evt, ndlp->nlp_DID, cur_state, ndlp->nlp_flag);
2513 
2514 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
2515 		 "DSM in:          evt:%d ste:%d did:x%x",
2516 		evt, cur_state, ndlp->nlp_DID);
2517 
2518 	func = lpfc_disc_action[(cur_state * NLP_EVT_MAX_EVENT) + evt];
2519 	rc = (func) (vport, ndlp, arg, evt);
2520 
2521 	/* DSM out state <rc> on NPort <nlp_DID> */
2522 	if (got_ndlp) {
2523 		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2524 			 "0212 DSM out state %d on NPort x%x Data: x%x\n",
2525 			 rc, ndlp->nlp_DID, ndlp->nlp_flag);
2526 
2527 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
2528 			"DSM out:         ste:%d did:x%x flg:x%x",
2529 			rc, ndlp->nlp_DID, ndlp->nlp_flag);
2530 		/* Decrement the ndlp reference count held for this function */
2531 		lpfc_nlp_put(ndlp);
2532 	} else {
2533 		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2534 			"0213 DSM out state %d on NPort free\n", rc);
2535 
2536 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
2537 			"DSM out:         ste:%d did:x%x flg:x%x",
2538 			rc, 0, 0);
2539 	}
2540 
2541 	return rc;
2542 }
2543