1  /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2004-2007 Emulex.  All rights reserved.           *
5  * EMULEX and SLI are trademarks of Emulex.                        *
6  * www.emulex.com                                                  *
7  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
8  *                                                                 *
9  * This program is free software; you can redistribute it and/or   *
10  * modify it under the terms of version 2 of the GNU General       *
11  * Public License as published by the Free Software Foundation.    *
12  * This program is distributed in the hope that it will be useful. *
13  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
14  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
15  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
16  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
18  * more details, a copy of which can be found in the file COPYING  *
19  * included with this package.                                     *
20  *******************************************************************/
21 
22 #include <linux/blkdev.h>
23 #include <linux/pci.h>
24 #include <linux/interrupt.h>
25 
26 #include <scsi/scsi.h>
27 #include <scsi/scsi_device.h>
28 #include <scsi/scsi_host.h>
29 #include <scsi/scsi_transport_fc.h>
30 
31 #include "lpfc_hw.h"
32 #include "lpfc_sli.h"
33 #include "lpfc_disc.h"
34 #include "lpfc_scsi.h"
35 #include "lpfc.h"
36 #include "lpfc_logmsg.h"
37 #include "lpfc_crtn.h"
38 #include "lpfc_vport.h"
39 #include "lpfc_debugfs.h"
40 
41 
42 /* Called to verify a rcv'ed ADISC was intended for us. */
43 static int
44 lpfc_check_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
45 		 struct lpfc_name *nn, struct lpfc_name *pn)
46 {
47 	/* Compare the ADISC rsp WWNN / WWPN matches our internal node
48 	 * table entry for that node.
49 	 */
50 	if (memcmp(nn, &ndlp->nlp_nodename, sizeof (struct lpfc_name)))
51 		return 0;
52 
53 	if (memcmp(pn, &ndlp->nlp_portname, sizeof (struct lpfc_name)))
54 		return 0;
55 
56 	/* we match, return success */
57 	return 1;
58 }
59 
60 int
61 lpfc_check_sparm(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
62 		 struct serv_parm * sp, uint32_t class)
63 {
64 	volatile struct serv_parm *hsp = &vport->fc_sparam;
65 	uint16_t hsp_value, ssp_value = 0;
66 
67 	/*
68 	 * The receive data field size and buffer-to-buffer receive data field
69 	 * size entries are 16 bits but are represented as two 8-bit fields in
70 	 * the driver data structure to account for rsvd bits and other control
71 	 * bits.  Reconstruct and compare the fields as a 16-bit values before
72 	 * correcting the byte values.
73 	 */
74 	if (sp->cls1.classValid) {
75 		hsp_value = (hsp->cls1.rcvDataSizeMsb << 8) |
76 				hsp->cls1.rcvDataSizeLsb;
77 		ssp_value = (sp->cls1.rcvDataSizeMsb << 8) |
78 				sp->cls1.rcvDataSizeLsb;
79 		if (!ssp_value)
80 			goto bad_service_param;
81 		if (ssp_value > hsp_value) {
82 			sp->cls1.rcvDataSizeLsb = hsp->cls1.rcvDataSizeLsb;
83 			sp->cls1.rcvDataSizeMsb = hsp->cls1.rcvDataSizeMsb;
84 		}
85 	} else if (class == CLASS1) {
86 		goto bad_service_param;
87 	}
88 
89 	if (sp->cls2.classValid) {
90 		hsp_value = (hsp->cls2.rcvDataSizeMsb << 8) |
91 				hsp->cls2.rcvDataSizeLsb;
92 		ssp_value = (sp->cls2.rcvDataSizeMsb << 8) |
93 				sp->cls2.rcvDataSizeLsb;
94 		if (!ssp_value)
95 			goto bad_service_param;
96 		if (ssp_value > hsp_value) {
97 			sp->cls2.rcvDataSizeLsb = hsp->cls2.rcvDataSizeLsb;
98 			sp->cls2.rcvDataSizeMsb = hsp->cls2.rcvDataSizeMsb;
99 		}
100 	} else if (class == CLASS2) {
101 		goto bad_service_param;
102 	}
103 
104 	if (sp->cls3.classValid) {
105 		hsp_value = (hsp->cls3.rcvDataSizeMsb << 8) |
106 				hsp->cls3.rcvDataSizeLsb;
107 		ssp_value = (sp->cls3.rcvDataSizeMsb << 8) |
108 				sp->cls3.rcvDataSizeLsb;
109 		if (!ssp_value)
110 			goto bad_service_param;
111 		if (ssp_value > hsp_value) {
112 			sp->cls3.rcvDataSizeLsb = hsp->cls3.rcvDataSizeLsb;
113 			sp->cls3.rcvDataSizeMsb = hsp->cls3.rcvDataSizeMsb;
114 		}
115 	} else if (class == CLASS3) {
116 		goto bad_service_param;
117 	}
118 
119 	/*
120 	 * Preserve the upper four bits of the MSB from the PLOGI response.
121 	 * These bits contain the Buffer-to-Buffer State Change Number
122 	 * from the target and need to be passed to the FW.
123 	 */
124 	hsp_value = (hsp->cmn.bbRcvSizeMsb << 8) | hsp->cmn.bbRcvSizeLsb;
125 	ssp_value = (sp->cmn.bbRcvSizeMsb << 8) | sp->cmn.bbRcvSizeLsb;
126 	if (ssp_value > hsp_value) {
127 		sp->cmn.bbRcvSizeLsb = hsp->cmn.bbRcvSizeLsb;
128 		sp->cmn.bbRcvSizeMsb = (sp->cmn.bbRcvSizeMsb & 0xF0) |
129 				       (hsp->cmn.bbRcvSizeMsb & 0x0F);
130 	}
131 
132 	memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof (struct lpfc_name));
133 	memcpy(&ndlp->nlp_portname, &sp->portName, sizeof (struct lpfc_name));
134 	return 1;
135 bad_service_param:
136 	lpfc_printf_log(vport->phba, KERN_ERR, LOG_DISCOVERY,
137 			"%d (%d):0207 Device %x "
138 			"(%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x) sent "
139 			"invalid service parameters.  Ignoring device.\n",
140 			vport->phba->brd_no, ndlp->vport->vpi, ndlp->nlp_DID,
141 			sp->nodeName.u.wwn[0], sp->nodeName.u.wwn[1],
142 			sp->nodeName.u.wwn[2], sp->nodeName.u.wwn[3],
143 			sp->nodeName.u.wwn[4], sp->nodeName.u.wwn[5],
144 			sp->nodeName.u.wwn[6], sp->nodeName.u.wwn[7]);
145 	return 0;
146 }
147 
148 static void *
149 lpfc_check_elscmpl_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
150 			struct lpfc_iocbq *rspiocb)
151 {
152 	struct lpfc_dmabuf *pcmd, *prsp;
153 	uint32_t *lp;
154 	void     *ptr = NULL;
155 	IOCB_t   *irsp;
156 
157 	irsp = &rspiocb->iocb;
158 	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
159 
160 	/* For lpfc_els_abort, context2 could be zero'ed to delay
161 	 * freeing associated memory till after ABTS completes.
162 	 */
163 	if (pcmd) {
164 		prsp =  list_get_first(&pcmd->list, struct lpfc_dmabuf,
165 				       list);
166 		if (prsp) {
167 			lp = (uint32_t *) prsp->virt;
168 			ptr = (void *)((uint8_t *)lp + sizeof(uint32_t));
169 		}
170 	} else {
171 		/* Force ulpStatus error since we are returning NULL ptr */
172 		if (!(irsp->ulpStatus)) {
173 			irsp->ulpStatus = IOSTAT_LOCAL_REJECT;
174 			irsp->un.ulpWord[4] = IOERR_SLI_ABORTED;
175 		}
176 		ptr = NULL;
177 	}
178 	return ptr;
179 }
180 
181 
182 /*
183  * Free resources / clean up outstanding I/Os
184  * associated with a LPFC_NODELIST entry. This
185  * routine effectively results in a "software abort".
186  */
187 int
188 lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
189 {
190 	LIST_HEAD(completions);
191 	struct lpfc_sli  *psli = &phba->sli;
192 	struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
193 	struct lpfc_iocbq *iocb, *next_iocb;
194 	IOCB_t *cmd;
195 
196 	/* Abort outstanding I/O on NPort <nlp_DID> */
197 	lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
198 			"%d (%d):0205 Abort outstanding I/O on NPort x%x "
199 			"Data: x%x x%x x%x\n",
200 			phba->brd_no, ndlp->vport->vpi, ndlp->nlp_DID,
201 			ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
202 
203 	lpfc_fabric_abort_nport(ndlp);
204 
205 	/* First check the txq */
206 	spin_lock_irq(&phba->hbalock);
207 	list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
208 		/* Check to see if iocb matches the nport we are looking for */
209 		if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) {
210 			/* It matches, so deque and call compl with anp error */
211 			list_move_tail(&iocb->list, &completions);
212 			pring->txq_cnt--;
213 		}
214 	}
215 
216 	/* Next check the txcmplq */
217 	list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
218 		/* Check to see if iocb matches the nport we are looking for */
219 		if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) {
220 			lpfc_sli_issue_abort_iotag(phba, pring, iocb);
221 		}
222 	}
223 	spin_unlock_irq(&phba->hbalock);
224 
225 	while (!list_empty(&completions)) {
226 		iocb = list_get_first(&completions, struct lpfc_iocbq, list);
227 		cmd = &iocb->iocb;
228 		list_del_init(&iocb->list);
229 
230 		if (!iocb->iocb_cmpl)
231 			lpfc_sli_release_iocbq(phba, iocb);
232 		else {
233 			cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
234 			cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
235 			(iocb->iocb_cmpl) (phba, iocb, iocb);
236 		}
237 	}
238 
239 	/* If we are delaying issuing an ELS command, cancel it */
240 	if (ndlp->nlp_flag & NLP_DELAY_TMO)
241 		lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
242 	return 0;
243 }
244 
245 static int
246 lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
247 	       struct lpfc_iocbq *cmdiocb)
248 {
249 	struct Scsi_Host   *shost = lpfc_shost_from_vport(vport);
250 	struct lpfc_hba    *phba = vport->phba;
251 	struct lpfc_dmabuf *pcmd;
252 	uint32_t *lp;
253 	IOCB_t *icmd;
254 	struct serv_parm *sp;
255 	LPFC_MBOXQ_t *mbox;
256 	struct ls_rjt stat;
257 	int rc;
258 
259 	memset(&stat, 0, sizeof (struct ls_rjt));
260 	if (vport->port_state <= LPFC_FLOGI) {
261 		/* Before responding to PLOGI, check for pt2pt mode.
262 		 * If we are pt2pt, with an outstanding FLOGI, abort
263 		 * the FLOGI and resend it first.
264 		 */
265 		if (vport->fc_flag & FC_PT2PT) {
266 			 lpfc_els_abort_flogi(phba);
267 		        if (!(vport->fc_flag & FC_PT2PT_PLOGI)) {
268 				/* If the other side is supposed to initiate
269 				 * the PLOGI anyway, just ACC it now and
270 				 * move on with discovery.
271 				 */
272 				phba->fc_edtov = FF_DEF_EDTOV;
273 				phba->fc_ratov = FF_DEF_RATOV;
274 				/* Start discovery - this should just do
275 				   CLEAR_LA */
276 				lpfc_disc_start(vport);
277 			} else
278 				lpfc_initial_flogi(vport);
279 		} else {
280 			stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY;
281 			stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
282 			lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
283 					    ndlp, NULL);
284 			return 0;
285 		}
286 	}
287 	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
288 	lp = (uint32_t *) pcmd->virt;
289 	sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
290 	if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3) == 0)) {
291 		/* Reject this request because invalid parameters */
292 		stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
293 		stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
294 		lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
295 			NULL);
296 		return 0;
297 	}
298 	icmd = &cmdiocb->iocb;
299 
300 	/* PLOGI chkparm OK */
301 	lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
302 			"%d (%d):0114 PLOGI chkparm OK Data: x%x x%x x%x x%x\n",
303 			phba->brd_no, vport->vpi,
304 			ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag,
305 			ndlp->nlp_rpi);
306 
307 	if (phba->cfg_fcp_class == 2 && sp->cls2.classValid)
308 		ndlp->nlp_fcp_info |= CLASS2;
309 	else
310 		ndlp->nlp_fcp_info |= CLASS3;
311 
312 	ndlp->nlp_class_sup = 0;
313 	if (sp->cls1.classValid)
314 		ndlp->nlp_class_sup |= FC_COS_CLASS1;
315 	if (sp->cls2.classValid)
316 		ndlp->nlp_class_sup |= FC_COS_CLASS2;
317 	if (sp->cls3.classValid)
318 		ndlp->nlp_class_sup |= FC_COS_CLASS3;
319 	if (sp->cls4.classValid)
320 		ndlp->nlp_class_sup |= FC_COS_CLASS4;
321 	ndlp->nlp_maxframe =
322 		((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
323 
324 	/* no need to reg_login if we are already in one of these states */
325 	switch (ndlp->nlp_state) {
326 	case  NLP_STE_NPR_NODE:
327 		if (!(ndlp->nlp_flag & NLP_NPR_ADISC))
328 			break;
329 	case  NLP_STE_REG_LOGIN_ISSUE:
330 	case  NLP_STE_PRLI_ISSUE:
331 	case  NLP_STE_UNMAPPED_NODE:
332 	case  NLP_STE_MAPPED_NODE:
333 		lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL, 0);
334 		return 1;
335 	}
336 
337 	if ((vport->fc_flag & FC_PT2PT) &&
338 	    !(vport->fc_flag & FC_PT2PT_PLOGI)) {
339 		/* rcv'ed PLOGI decides what our NPortId will be */
340 		vport->fc_myDID = icmd->un.rcvels.parmRo;
341 		mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
342 		if (mbox == NULL)
343 			goto out;
344 		lpfc_config_link(phba, mbox);
345 		mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
346 		mbox->vport = vport;
347 		rc = lpfc_sli_issue_mbox
348 			(phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB));
349 		if (rc == MBX_NOT_FINISHED) {
350 			mempool_free(mbox, phba->mbox_mem_pool);
351 			goto out;
352 		}
353 
354 		lpfc_can_disctmo(vport);
355 	}
356 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
357 	if (!mbox)
358 		goto out;
359 
360 	rc = lpfc_reg_login(phba, vport->vpi, icmd->un.rcvels.remoteID,
361 			    (uint8_t *) sp, mbox, 0);
362 	if (rc) {
363 		mempool_free(mbox, phba->mbox_mem_pool);
364 		goto out;
365 	}
366 
367 	/* ACC PLOGI rsp command needs to execute first,
368 	 * queue this mbox command to be processed later.
369 	 */
370 	mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
371 	/*
372 	 * mbox->context2 = lpfc_nlp_get(ndlp) deferred until mailbox
373 	 * command issued in lpfc_cmpl_els_acc().
374 	 */
375 	mbox->vport = vport;
376 	spin_lock_irq(shost->host_lock);
377 	ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI);
378 	spin_unlock_irq(shost->host_lock);
379 
380 	/*
381 	 * If there is an outstanding PLOGI issued, abort it before
382 	 * sending ACC rsp for received PLOGI. If pending plogi
383 	 * is not canceled here, the plogi will be rejected by
384 	 * remote port and will be retried. On a configuration with
385 	 * single discovery thread, this will cause a huge delay in
386 	 * discovery. Also this will cause multiple state machines
387 	 * running in parallel for this node.
388 	 */
389 	if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) {
390 		/* software abort outstanding PLOGI */
391 		lpfc_els_abort(phba, ndlp);
392 	}
393 
394 	if ((vport->port_type == LPFC_NPIV_PORT &&
395 	      phba->cfg_vport_restrict_login)) {
396 
397 		/* In order to preserve RPIs, we want to cleanup
398 		 * the default RPI the firmware created to rcv
399 		 * this ELS request. The only way to do this is
400 		 * to register, then unregister the RPI.
401 		 */
402 		spin_lock_irq(shost->host_lock);
403 		ndlp->nlp_flag |= NLP_RM_DFLT_RPI;
404 		spin_unlock_irq(shost->host_lock);
405 		stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD;
406 		stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
407 		lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
408 			ndlp, mbox);
409 		return 1;
410 	}
411 	lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox, 0);
412 	return 1;
413 
414 out:
415 	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
416 	stat.un.b.lsRjtRsnCodeExp = LSEXP_OUT_OF_RESOURCE;
417 	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
418 	return 0;
419 }
420 
421 static int
422 lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
423 		struct lpfc_iocbq *cmdiocb)
424 {
425 	struct Scsi_Host   *shost = lpfc_shost_from_vport(vport);
426 	struct lpfc_dmabuf *pcmd;
427 	struct serv_parm   *sp;
428 	struct lpfc_name   *pnn, *ppn;
429 	struct ls_rjt stat;
430 	ADISC *ap;
431 	IOCB_t *icmd;
432 	uint32_t *lp;
433 	uint32_t cmd;
434 
435 	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
436 	lp = (uint32_t *) pcmd->virt;
437 
438 	cmd = *lp++;
439 	if (cmd == ELS_CMD_ADISC) {
440 		ap = (ADISC *) lp;
441 		pnn = (struct lpfc_name *) & ap->nodeName;
442 		ppn = (struct lpfc_name *) & ap->portName;
443 	} else {
444 		sp = (struct serv_parm *) lp;
445 		pnn = (struct lpfc_name *) & sp->nodeName;
446 		ppn = (struct lpfc_name *) & sp->portName;
447 	}
448 
449 	icmd = &cmdiocb->iocb;
450 	if (icmd->ulpStatus == 0 && lpfc_check_adisc(vport, ndlp, pnn, ppn)) {
451 		if (cmd == ELS_CMD_ADISC) {
452 			lpfc_els_rsp_adisc_acc(vport, cmdiocb, ndlp);
453 		} else {
454 			lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp,
455 					 NULL, 0);
456 		}
457 		return 1;
458 	}
459 	/* Reject this request because invalid parameters */
460 	stat.un.b.lsRjtRsvd0 = 0;
461 	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
462 	stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
463 	stat.un.b.vendorUnique = 0;
464 	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
465 
466 	/* 1 sec timeout */
467 	mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
468 
469 	spin_lock_irq(shost->host_lock);
470 	ndlp->nlp_flag |= NLP_DELAY_TMO;
471 	spin_unlock_irq(shost->host_lock);
472 	ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
473 	ndlp->nlp_prev_state = ndlp->nlp_state;
474 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
475 	return 0;
476 }
477 
478 static int
479 lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
480 	      struct lpfc_iocbq *cmdiocb, uint32_t els_cmd)
481 {
482 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
483 
484 	/* Put ndlp in NPR state with 1 sec timeout for plogi, ACC logo */
485 	/* Only call LOGO ACC for first LOGO, this avoids sending unnecessary
486 	 * PLOGIs during LOGO storms from a device.
487 	 */
488 	spin_lock_irq(shost->host_lock);
489 	ndlp->nlp_flag |= NLP_LOGO_ACC;
490 	spin_unlock_irq(shost->host_lock);
491 	if (els_cmd == ELS_CMD_PRLO)
492 		lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0);
493 	else
494 		lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
495 
496 	if (!(ndlp->nlp_type & NLP_FABRIC) ||
497 	    (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
498 		/* Only try to re-login if this is NOT a Fabric Node */
499 		mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
500 		spin_lock_irq(shost->host_lock);
501 		ndlp->nlp_flag |= NLP_DELAY_TMO;
502 		spin_unlock_irq(shost->host_lock);
503 
504 		ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
505 		ndlp->nlp_prev_state = ndlp->nlp_state;
506 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
507 	} else {
508 		ndlp->nlp_prev_state = ndlp->nlp_state;
509 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
510 	}
511 
512 	spin_lock_irq(shost->host_lock);
513 	ndlp->nlp_flag &= ~NLP_NPR_ADISC;
514 	spin_unlock_irq(shost->host_lock);
515 	/* The driver has to wait until the ACC completes before it continues
516 	 * processing the LOGO.  The action will resume in
517 	 * lpfc_cmpl_els_logo_acc routine. Since part of processing includes an
518 	 * unreg_login, the driver waits so the ACC does not get aborted.
519 	 */
520 	return 0;
521 }
522 
523 static void
524 lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
525 	      struct lpfc_iocbq *cmdiocb)
526 {
527 	struct lpfc_dmabuf *pcmd;
528 	uint32_t *lp;
529 	PRLI *npr;
530 	struct fc_rport *rport = ndlp->rport;
531 	u32 roles;
532 
533 	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
534 	lp = (uint32_t *) pcmd->virt;
535 	npr = (PRLI *) ((uint8_t *) lp + sizeof (uint32_t));
536 
537 	ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
538 	ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
539 	if (npr->prliType == PRLI_FCP_TYPE) {
540 		if (npr->initiatorFunc)
541 			ndlp->nlp_type |= NLP_FCP_INITIATOR;
542 		if (npr->targetFunc)
543 			ndlp->nlp_type |= NLP_FCP_TARGET;
544 		if (npr->Retry)
545 			ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
546 	}
547 	if (rport) {
548 		/* We need to update the rport role values */
549 		roles = FC_RPORT_ROLE_UNKNOWN;
550 		if (ndlp->nlp_type & NLP_FCP_INITIATOR)
551 			roles |= FC_RPORT_ROLE_FCP_INITIATOR;
552 		if (ndlp->nlp_type & NLP_FCP_TARGET)
553 			roles |= FC_RPORT_ROLE_FCP_TARGET;
554 
555 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
556 			"rport rolechg:   role:x%x did:x%x flg:x%x",
557 			roles, ndlp->nlp_DID, ndlp->nlp_flag);
558 
559 		fc_remote_port_rolechg(rport, roles);
560 	}
561 }
562 
563 static uint32_t
564 lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
565 {
566 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
567 	struct lpfc_hba  *phba = vport->phba;
568 
569 	/* Check config parameter use-adisc or FCP-2 */
570 	if ((phba->cfg_use_adisc && (vport->fc_flag & FC_RSCN_MODE)) ||
571 	    ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
572 		spin_lock_irq(shost->host_lock);
573 		ndlp->nlp_flag |= NLP_NPR_ADISC;
574 		spin_unlock_irq(shost->host_lock);
575 		return 1;
576 	}
577 	ndlp->nlp_flag &= ~NLP_NPR_ADISC;
578 	lpfc_unreg_rpi(vport, ndlp);
579 	return 0;
580 }
581 
582 static uint32_t
583 lpfc_disc_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
584 		  void *arg, uint32_t evt)
585 {
586 	lpfc_printf_log(vport->phba, KERN_ERR, LOG_DISCOVERY,
587 			"%d (%d):0253 Illegal State Transition: node x%x "
588 			"event x%x, state x%x Data: x%x x%x\n",
589 			vport->phba->brd_no, vport->vpi,
590 			ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi,
591 			ndlp->nlp_flag);
592 	return ndlp->nlp_state;
593 }
594 
595 /* Start of Discovery State Machine routines */
596 
597 static uint32_t
598 lpfc_rcv_plogi_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
599 			   void *arg, uint32_t evt)
600 {
601 	struct lpfc_iocbq *cmdiocb;
602 
603 	cmdiocb = (struct lpfc_iocbq *) arg;
604 
605 	if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
606 		ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE;
607 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
608 		return ndlp->nlp_state;
609 	}
610 	lpfc_drop_node(vport, ndlp);
611 	return NLP_STE_FREED_NODE;
612 }
613 
614 static uint32_t
615 lpfc_rcv_els_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
616 			 void *arg, uint32_t evt)
617 {
618 	lpfc_issue_els_logo(vport, ndlp, 0);
619 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
620 	return ndlp->nlp_state;
621 }
622 
623 static uint32_t
624 lpfc_rcv_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
625 			  void *arg, uint32_t evt)
626 {
627 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
628 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
629 
630 	spin_lock_irq(shost->host_lock);
631 	ndlp->nlp_flag |= NLP_LOGO_ACC;
632 	spin_unlock_irq(shost->host_lock);
633 	lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
634 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
635 
636 	return ndlp->nlp_state;
637 }
638 
639 static uint32_t
640 lpfc_cmpl_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
641 			   void *arg, uint32_t evt)
642 {
643 	lpfc_drop_node(vport, ndlp);
644 	return NLP_STE_FREED_NODE;
645 }
646 
647 static uint32_t
648 lpfc_device_rm_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
649 			   void *arg, uint32_t evt)
650 {
651 	lpfc_drop_node(vport, ndlp);
652 	return NLP_STE_FREED_NODE;
653 }
654 
655 static uint32_t
656 lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
657 			   void *arg, uint32_t evt)
658 {
659 	struct lpfc_hba   *phba = vport->phba;
660 	struct lpfc_iocbq *cmdiocb = arg;
661 	struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
662 	uint32_t *lp = (uint32_t *) pcmd->virt;
663 	struct serv_parm *sp = (struct serv_parm *) (lp + 1);
664 	struct ls_rjt stat;
665 	int port_cmp;
666 
667 	memset(&stat, 0, sizeof (struct ls_rjt));
668 
669 	/* For a PLOGI, we only accept if our portname is less
670 	 * than the remote portname.
671 	 */
672 	phba->fc_stat.elsLogiCol++;
673 	port_cmp = memcmp(&vport->fc_portname, &sp->portName,
674 			  sizeof(struct lpfc_name));
675 
676 	if (port_cmp >= 0) {
677 		/* Reject this request because the remote node will accept
678 		   ours */
679 		stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
680 		stat.un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS;
681 		lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
682 			NULL);
683 	} else {
684 		lpfc_rcv_plogi(vport, ndlp, cmdiocb);
685 	} /* If our portname was less */
686 
687 	return ndlp->nlp_state;
688 }
689 
690 static uint32_t
691 lpfc_rcv_prli_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
692 			  void *arg, uint32_t evt)
693 {
694 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
695 	struct ls_rjt     stat;
696 
697 	memset(&stat, 0, sizeof (struct ls_rjt));
698 	stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY;
699 	stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
700 	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
701 	return ndlp->nlp_state;
702 }
703 
704 static uint32_t
705 lpfc_rcv_logo_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
706 			  void *arg, uint32_t evt)
707 {
708 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
709 
710 				/* software abort outstanding PLOGI */
711 	lpfc_els_abort(vport->phba, ndlp);
712 
713 	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
714 	return ndlp->nlp_state;
715 }
716 
717 static uint32_t
718 lpfc_rcv_els_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
719 			 void *arg, uint32_t evt)
720 {
721 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
722 	struct lpfc_hba   *phba = vport->phba;
723 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
724 
725 	/* software abort outstanding PLOGI */
726 	lpfc_els_abort(phba, ndlp);
727 
728 	if (evt == NLP_EVT_RCV_LOGO) {
729 		lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
730 	} else {
731 		lpfc_issue_els_logo(vport, ndlp, 0);
732 	}
733 
734 	/* Put ndlp in npr state set plogi timer for 1 sec */
735 	mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
736 	spin_lock_irq(shost->host_lock);
737 	ndlp->nlp_flag |= NLP_DELAY_TMO;
738 	spin_unlock_irq(shost->host_lock);
739 	ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
740 	ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE;
741 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
742 
743 	return ndlp->nlp_state;
744 }
745 
746 static uint32_t
747 lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
748 			    struct lpfc_nodelist *ndlp,
749 			    void *arg,
750 			    uint32_t evt)
751 {
752 	struct lpfc_hba    *phba = vport->phba;
753 	struct lpfc_iocbq  *cmdiocb, *rspiocb;
754 	struct lpfc_dmabuf *pcmd, *prsp, *mp;
755 	uint32_t *lp;
756 	IOCB_t *irsp;
757 	struct serv_parm *sp;
758 	LPFC_MBOXQ_t *mbox;
759 
760 	cmdiocb = (struct lpfc_iocbq *) arg;
761 	rspiocb = cmdiocb->context_un.rsp_iocb;
762 
763 	if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
764 		/* Recovery from PLOGI collision logic */
765 		return ndlp->nlp_state;
766 	}
767 
768 	irsp = &rspiocb->iocb;
769 
770 	if (irsp->ulpStatus)
771 		goto out;
772 
773 	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
774 
775 	prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
776 
777 	lp = (uint32_t *) prsp->virt;
778 	sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
779 	if (!lpfc_check_sparm(vport, ndlp, sp, CLASS3))
780 		goto out;
781 
782 	/* PLOGI chkparm OK */
783 	lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
784 			"%d (%d):0121 PLOGI chkparm OK "
785 			"Data: x%x x%x x%x x%x\n",
786 			phba->brd_no, vport->vpi,
787 			ndlp->nlp_DID, ndlp->nlp_state,
788 			ndlp->nlp_flag, ndlp->nlp_rpi);
789 
790 	if (phba->cfg_fcp_class == 2 && (sp->cls2.classValid))
791 		ndlp->nlp_fcp_info |= CLASS2;
792 	else
793 		ndlp->nlp_fcp_info |= CLASS3;
794 
795 	ndlp->nlp_class_sup = 0;
796 	if (sp->cls1.classValid)
797 		ndlp->nlp_class_sup |= FC_COS_CLASS1;
798 	if (sp->cls2.classValid)
799 		ndlp->nlp_class_sup |= FC_COS_CLASS2;
800 	if (sp->cls3.classValid)
801 		ndlp->nlp_class_sup |= FC_COS_CLASS3;
802 	if (sp->cls4.classValid)
803 		ndlp->nlp_class_sup |= FC_COS_CLASS4;
804 	ndlp->nlp_maxframe =
805 		((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
806 
807 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
808 	if (!mbox) {
809 		lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
810 			"%d (%d):0133 PLOGI: no memory for reg_login "
811 			"Data: x%x x%x x%x x%x\n",
812 			phba->brd_no, vport->vpi,
813 			ndlp->nlp_DID, ndlp->nlp_state,
814 			ndlp->nlp_flag, ndlp->nlp_rpi);
815 		goto out;
816 	}
817 
818 	lpfc_unreg_rpi(vport, ndlp);
819 
820 	if (lpfc_reg_login(phba, vport->vpi, irsp->un.elsreq64.remoteID,
821 			   (uint8_t *) sp, mbox, 0) == 0) {
822 		switch (ndlp->nlp_DID) {
823 		case NameServer_DID:
824 			mbox->mbox_cmpl = lpfc_mbx_cmpl_ns_reg_login;
825 			break;
826 		case FDMI_DID:
827 			mbox->mbox_cmpl = lpfc_mbx_cmpl_fdmi_reg_login;
828 			break;
829 		default:
830 			mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
831 		}
832 		mbox->context2 = lpfc_nlp_get(ndlp);
833 		mbox->vport = vport;
834 		if (lpfc_sli_issue_mbox(phba, mbox,
835 					(MBX_NOWAIT | MBX_STOP_IOCB))
836 		    != MBX_NOT_FINISHED) {
837 			lpfc_nlp_set_state(vport, ndlp,
838 					   NLP_STE_REG_LOGIN_ISSUE);
839 			return ndlp->nlp_state;
840 		}
841 		lpfc_nlp_put(ndlp);
842 		mp = (struct lpfc_dmabuf *) mbox->context1;
843 		lpfc_mbuf_free(phba, mp->virt, mp->phys);
844 		kfree(mp);
845 		mempool_free(mbox, phba->mbox_mem_pool);
846 
847 		lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
848 			"%d (%d):0134 PLOGI: cannot issue reg_login "
849 			"Data: x%x x%x x%x x%x\n",
850 			phba->brd_no, vport->vpi,
851 			ndlp->nlp_DID, ndlp->nlp_state,
852 			ndlp->nlp_flag, ndlp->nlp_rpi);
853 	} else {
854 		mempool_free(mbox, phba->mbox_mem_pool);
855 
856 		lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
857 			"%d (%d):0135 PLOGI: cannot format reg_login "
858 			"Data: x%x x%x x%x x%x\n",
859 			phba->brd_no, vport->vpi,
860 			ndlp->nlp_DID, ndlp->nlp_state,
861 			ndlp->nlp_flag, ndlp->nlp_rpi);
862 	}
863 
864 
865 out:
866 	if (ndlp->nlp_DID == NameServer_DID) {
867 		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
868 		lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
869 			"%d (%d):0261 Cannot Register NameServer login\n",
870 			phba->brd_no, vport->vpi);
871 	}
872 
873 	/* Free this node since the driver cannot login or has the wrong
874 	   sparm */
875 	lpfc_drop_node(vport, ndlp);
876 	return NLP_STE_FREED_NODE;
877 }
878 
879 static uint32_t
880 lpfc_device_rm_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
881 			   void *arg, uint32_t evt)
882 {
883 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
884 
885 	if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
886 		spin_lock_irq(shost->host_lock);
887 		ndlp->nlp_flag |= NLP_NODEV_REMOVE;
888 		spin_unlock_irq(shost->host_lock);
889 		return ndlp->nlp_state;
890 	} else {
891 		/* software abort outstanding PLOGI */
892 		lpfc_els_abort(vport->phba, ndlp);
893 
894 		lpfc_drop_node(vport, ndlp);
895 		return NLP_STE_FREED_NODE;
896 	}
897 }
898 
899 static uint32_t
900 lpfc_device_recov_plogi_issue(struct lpfc_vport *vport,
901 			      struct lpfc_nodelist *ndlp,
902 			      void *arg,
903 			      uint32_t evt)
904 {
905 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
906 	struct lpfc_hba  *phba = vport->phba;
907 
908 	/* Don't do anything that will mess up processing of the
909 	 * previous RSCN.
910 	 */
911 	if (vport->fc_flag & FC_RSCN_DEFERRED)
912 		return ndlp->nlp_state;
913 
914 	/* software abort outstanding PLOGI */
915 	lpfc_els_abort(phba, ndlp);
916 
917 	ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE;
918 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
919 	spin_lock_irq(shost->host_lock);
920 	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
921 	spin_unlock_irq(shost->host_lock);
922 
923 	return ndlp->nlp_state;
924 }
925 
926 static uint32_t
927 lpfc_rcv_plogi_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
928 			   void *arg, uint32_t evt)
929 {
930 	struct lpfc_hba   *phba = vport->phba;
931 	struct lpfc_iocbq *cmdiocb;
932 
933 	/* software abort outstanding ADISC */
934 	lpfc_els_abort(phba, ndlp);
935 
936 	cmdiocb = (struct lpfc_iocbq *) arg;
937 
938 	if (lpfc_rcv_plogi(vport, ndlp, cmdiocb))
939 		return ndlp->nlp_state;
940 
941 	ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
942 	lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
943 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
944 
945 	return ndlp->nlp_state;
946 }
947 
948 static uint32_t
949 lpfc_rcv_prli_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
950 			  void *arg, uint32_t evt)
951 {
952 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
953 
954 	lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
955 	return ndlp->nlp_state;
956 }
957 
958 static uint32_t
959 lpfc_rcv_logo_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
960 			  void *arg, uint32_t evt)
961 {
962 	struct lpfc_hba *phba = vport->phba;
963 	struct lpfc_iocbq *cmdiocb;
964 
965 	cmdiocb = (struct lpfc_iocbq *) arg;
966 
967 	/* software abort outstanding ADISC */
968 	lpfc_els_abort(phba, ndlp);
969 
970 	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
971 	return ndlp->nlp_state;
972 }
973 
974 static uint32_t
975 lpfc_rcv_padisc_adisc_issue(struct lpfc_vport *vport,
976 			    struct lpfc_nodelist *ndlp,
977 			    void *arg, uint32_t evt)
978 {
979 	struct lpfc_iocbq *cmdiocb;
980 
981 	cmdiocb = (struct lpfc_iocbq *) arg;
982 
983 	lpfc_rcv_padisc(vport, ndlp, cmdiocb);
984 	return ndlp->nlp_state;
985 }
986 
987 static uint32_t
988 lpfc_rcv_prlo_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
989 			  void *arg, uint32_t evt)
990 {
991 	struct lpfc_iocbq *cmdiocb;
992 
993 	cmdiocb = (struct lpfc_iocbq *) arg;
994 
995 	/* Treat like rcv logo */
996 	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO);
997 	return ndlp->nlp_state;
998 }
999 
1000 static uint32_t
1001 lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
1002 			    struct lpfc_nodelist *ndlp,
1003 			    void *arg, uint32_t evt)
1004 {
1005 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
1006 	struct lpfc_hba   *phba = vport->phba;
1007 	struct lpfc_iocbq *cmdiocb, *rspiocb;
1008 	IOCB_t *irsp;
1009 	ADISC *ap;
1010 
1011 	cmdiocb = (struct lpfc_iocbq *) arg;
1012 	rspiocb = cmdiocb->context_un.rsp_iocb;
1013 
1014 	ap = (ADISC *)lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb);
1015 	irsp = &rspiocb->iocb;
1016 
1017 	if ((irsp->ulpStatus) ||
1018 	    (!lpfc_check_adisc(vport, ndlp, &ap->nodeName, &ap->portName))) {
1019 		/* 1 sec timeout */
1020 		mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
1021 		spin_lock_irq(shost->host_lock);
1022 		ndlp->nlp_flag |= NLP_DELAY_TMO;
1023 		spin_unlock_irq(shost->host_lock);
1024 		ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
1025 
1026 		memset(&ndlp->nlp_nodename, 0, sizeof(struct lpfc_name));
1027 		memset(&ndlp->nlp_portname, 0, sizeof(struct lpfc_name));
1028 
1029 		ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1030 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1031 		lpfc_unreg_rpi(vport, ndlp);
1032 		return ndlp->nlp_state;
1033 	}
1034 
1035 	if (ndlp->nlp_type & NLP_FCP_TARGET) {
1036 		ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1037 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
1038 	} else {
1039 		ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1040 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1041 	}
1042 	return ndlp->nlp_state;
1043 }
1044 
1045 static uint32_t
1046 lpfc_device_rm_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1047 			   void *arg, uint32_t evt)
1048 {
1049 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1050 
1051 	if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1052 		spin_lock_irq(shost->host_lock);
1053 		ndlp->nlp_flag |= NLP_NODEV_REMOVE;
1054 		spin_unlock_irq(shost->host_lock);
1055 		return ndlp->nlp_state;
1056 	} else {
1057 		/* software abort outstanding ADISC */
1058 		lpfc_els_abort(vport->phba, ndlp);
1059 
1060 		lpfc_drop_node(vport, ndlp);
1061 		return NLP_STE_FREED_NODE;
1062 	}
1063 }
1064 
1065 static uint32_t
1066 lpfc_device_recov_adisc_issue(struct lpfc_vport *vport,
1067 			      struct lpfc_nodelist *ndlp,
1068 			      void *arg,
1069 			      uint32_t evt)
1070 {
1071 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1072 	struct lpfc_hba  *phba = vport->phba;
1073 
1074 	/* Don't do anything that will mess up processing of the
1075 	 * previous RSCN.
1076 	 */
1077 	if (vport->fc_flag & FC_RSCN_DEFERRED)
1078 		return ndlp->nlp_state;
1079 
1080 	/* software abort outstanding ADISC */
1081 	lpfc_els_abort(phba, ndlp);
1082 
1083 	ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1084 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1085 	spin_lock_irq(shost->host_lock);
1086 	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1087 	spin_unlock_irq(shost->host_lock);
1088 	lpfc_disc_set_adisc(vport, ndlp);
1089 	return ndlp->nlp_state;
1090 }
1091 
1092 static uint32_t
1093 lpfc_rcv_plogi_reglogin_issue(struct lpfc_vport *vport,
1094 			      struct lpfc_nodelist *ndlp,
1095 			      void *arg,
1096 			      uint32_t evt)
1097 {
1098 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1099 
1100 	lpfc_rcv_plogi(vport, ndlp, cmdiocb);
1101 	return ndlp->nlp_state;
1102 }
1103 
1104 static uint32_t
1105 lpfc_rcv_prli_reglogin_issue(struct lpfc_vport *vport,
1106 			     struct lpfc_nodelist *ndlp,
1107 			     void *arg,
1108 			     uint32_t evt)
1109 {
1110 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1111 
1112 	lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
1113 	return ndlp->nlp_state;
1114 }
1115 
1116 static uint32_t
1117 lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
1118 			     struct lpfc_nodelist *ndlp,
1119 			     void *arg,
1120 			     uint32_t evt)
1121 {
1122 	struct lpfc_hba   *phba = vport->phba;
1123 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1124 	LPFC_MBOXQ_t	  *mb;
1125 	LPFC_MBOXQ_t	  *nextmb;
1126 	struct lpfc_dmabuf *mp;
1127 
1128 	cmdiocb = (struct lpfc_iocbq *) arg;
1129 
1130 	/* cleanup any ndlp on mbox q waiting for reglogin cmpl */
1131 	if ((mb = phba->sli.mbox_active)) {
1132 		if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
1133 		   (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1134 			lpfc_nlp_put(ndlp);
1135 			mb->context2 = NULL;
1136 			mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1137 		}
1138 	}
1139 
1140 	spin_lock_irq(&phba->hbalock);
1141 	list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
1142 		if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
1143 		   (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1144 			mp = (struct lpfc_dmabuf *) (mb->context1);
1145 			if (mp) {
1146 				lpfc_mbuf_free(phba, mp->virt, mp->phys);
1147 				kfree(mp);
1148 			}
1149 			lpfc_nlp_put(ndlp);
1150 			list_del(&mb->list);
1151 			mempool_free(mb, phba->mbox_mem_pool);
1152 		}
1153 	}
1154 	spin_unlock_irq(&phba->hbalock);
1155 
1156 	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
1157 	return ndlp->nlp_state;
1158 }
1159 
1160 static uint32_t
1161 lpfc_rcv_padisc_reglogin_issue(struct lpfc_vport *vport,
1162 			       struct lpfc_nodelist *ndlp,
1163 			       void *arg,
1164 			       uint32_t evt)
1165 {
1166 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1167 
1168 	lpfc_rcv_padisc(vport, ndlp, cmdiocb);
1169 	return ndlp->nlp_state;
1170 }
1171 
1172 static uint32_t
1173 lpfc_rcv_prlo_reglogin_issue(struct lpfc_vport *vport,
1174 			     struct lpfc_nodelist *ndlp,
1175 			     void *arg,
1176 			     uint32_t evt)
1177 {
1178 	struct lpfc_iocbq *cmdiocb;
1179 
1180 	cmdiocb = (struct lpfc_iocbq *) arg;
1181 	lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0);
1182 	return ndlp->nlp_state;
1183 }
1184 
1185 static uint32_t
1186 lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
1187 				  struct lpfc_nodelist *ndlp,
1188 				  void *arg,
1189 				  uint32_t evt)
1190 {
1191 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1192 	struct lpfc_hba  *phba = vport->phba;
1193 	LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
1194 	MAILBOX_t *mb = &pmb->mb;
1195 	uint32_t did  = mb->un.varWords[1];
1196 
1197 	if (mb->mbxStatus) {
1198 		/* RegLogin failed */
1199 		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
1200 				"%d (%d):0246 RegLogin failed Data: x%x x%x "
1201 				"x%x\n",
1202 				phba->brd_no, vport->vpi,
1203 				did, mb->mbxStatus, vport->port_state);
1204 
1205 		/*
1206 		 * If RegLogin failed due to lack of HBA resources do not
1207 		 * retry discovery.
1208 		 */
1209 		if (mb->mbxStatus == MBXERR_RPI_FULL) {
1210 			ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE;
1211 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
1212 			return ndlp->nlp_state;
1213 		}
1214 
1215 		/* Put ndlp in npr state set plogi timer for 1 sec */
1216 		mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
1217 		spin_lock_irq(shost->host_lock);
1218 		ndlp->nlp_flag |= NLP_DELAY_TMO;
1219 		spin_unlock_irq(shost->host_lock);
1220 		ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
1221 
1222 		lpfc_issue_els_logo(vport, ndlp, 0);
1223 		ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1224 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1225 		return ndlp->nlp_state;
1226 	}
1227 
1228 	ndlp->nlp_rpi = mb->un.varWords[0];
1229 
1230 	/* Only if we are not a fabric nport do we issue PRLI */
1231 	if (!(ndlp->nlp_type & NLP_FABRIC)) {
1232 		ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1233 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
1234 		lpfc_issue_els_prli(vport, ndlp, 0);
1235 	} else {
1236 		ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1237 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1238 	}
1239 	return ndlp->nlp_state;
1240 }
1241 
1242 static uint32_t
1243 lpfc_device_rm_reglogin_issue(struct lpfc_vport *vport,
1244 			      struct lpfc_nodelist *ndlp,
1245 			      void *arg,
1246 			      uint32_t evt)
1247 {
1248 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1249 
1250 	if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1251 		spin_lock_irq(shost->host_lock);
1252 		ndlp->nlp_flag |= NLP_NODEV_REMOVE;
1253 		spin_unlock_irq(shost->host_lock);
1254 		return ndlp->nlp_state;
1255 	} else {
1256 		lpfc_drop_node(vport, ndlp);
1257 		return NLP_STE_FREED_NODE;
1258 	}
1259 }
1260 
1261 static uint32_t
1262 lpfc_device_recov_reglogin_issue(struct lpfc_vport *vport,
1263 				 struct lpfc_nodelist *ndlp,
1264 				 void *arg,
1265 				 uint32_t evt)
1266 {
1267 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1268 
1269 	/* Don't do anything that will mess up processing of the
1270 	 * previous RSCN.
1271 	 */
1272 	if (vport->fc_flag & FC_RSCN_DEFERRED)
1273 		return ndlp->nlp_state;
1274 
1275 	ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1276 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1277 	spin_lock_irq(shost->host_lock);
1278 	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1279 	spin_unlock_irq(shost->host_lock);
1280 	lpfc_disc_set_adisc(vport, ndlp);
1281 	return ndlp->nlp_state;
1282 }
1283 
1284 static uint32_t
1285 lpfc_rcv_plogi_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1286 			  void *arg, uint32_t evt)
1287 {
1288 	struct lpfc_iocbq *cmdiocb;
1289 
1290 	cmdiocb = (struct lpfc_iocbq *) arg;
1291 
1292 	lpfc_rcv_plogi(vport, ndlp, cmdiocb);
1293 	return ndlp->nlp_state;
1294 }
1295 
1296 static uint32_t
1297 lpfc_rcv_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1298 			 void *arg, uint32_t evt)
1299 {
1300 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1301 
1302 	lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
1303 	return ndlp->nlp_state;
1304 }
1305 
1306 static uint32_t
1307 lpfc_rcv_logo_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1308 			 void *arg, uint32_t evt)
1309 {
1310 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1311 
1312 	/* Software abort outstanding PRLI before sending acc */
1313 	lpfc_els_abort(vport->phba, ndlp);
1314 
1315 	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
1316 	return ndlp->nlp_state;
1317 }
1318 
1319 static uint32_t
1320 lpfc_rcv_padisc_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1321 			   void *arg, uint32_t evt)
1322 {
1323 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1324 
1325 	lpfc_rcv_padisc(vport, ndlp, cmdiocb);
1326 	return ndlp->nlp_state;
1327 }
1328 
1329 /* This routine is envoked when we rcv a PRLO request from a nport
1330  * we are logged into.  We should send back a PRLO rsp setting the
1331  * appropriate bits.
1332  * NEXT STATE = PRLI_ISSUE
1333  */
1334 static uint32_t
1335 lpfc_rcv_prlo_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1336 			 void *arg, uint32_t evt)
1337 {
1338 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1339 
1340 	lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0);
1341 	return ndlp->nlp_state;
1342 }
1343 
1344 static uint32_t
1345 lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1346 			  void *arg, uint32_t evt)
1347 {
1348 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1349 	struct lpfc_iocbq *cmdiocb, *rspiocb;
1350 	struct lpfc_hba   *phba = vport->phba;
1351 	IOCB_t *irsp;
1352 	PRLI *npr;
1353 
1354 	cmdiocb = (struct lpfc_iocbq *) arg;
1355 	rspiocb = cmdiocb->context_un.rsp_iocb;
1356 	npr = (PRLI *)lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb);
1357 
1358 	irsp = &rspiocb->iocb;
1359 	if (irsp->ulpStatus) {
1360 		if ((vport->port_type == LPFC_NPIV_PORT) &&
1361 			phba->cfg_vport_restrict_login) {
1362 			goto out;
1363 		}
1364 		ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
1365 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1366 		return ndlp->nlp_state;
1367 	}
1368 
1369 	/* Check out PRLI rsp */
1370 	ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
1371 	ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
1372 	if ((npr->acceptRspCode == PRLI_REQ_EXECUTED) &&
1373 	    (npr->prliType == PRLI_FCP_TYPE)) {
1374 		if (npr->initiatorFunc)
1375 			ndlp->nlp_type |= NLP_FCP_INITIATOR;
1376 		if (npr->targetFunc)
1377 			ndlp->nlp_type |= NLP_FCP_TARGET;
1378 		if (npr->Retry)
1379 			ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
1380 	}
1381 	if (!(ndlp->nlp_type & NLP_FCP_TARGET) &&
1382 	    (vport->port_type == LPFC_NPIV_PORT) &&
1383 	     phba->cfg_vport_restrict_login) {
1384 out:
1385 		spin_lock_irq(shost->host_lock);
1386 		ndlp->nlp_flag |= NLP_TARGET_REMOVE;
1387 		spin_unlock_irq(shost->host_lock);
1388 		lpfc_issue_els_logo(vport, ndlp, 0);
1389 
1390 		ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
1391 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
1392 		return ndlp->nlp_state;
1393 	}
1394 
1395 	ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
1396 	if (ndlp->nlp_type & NLP_FCP_TARGET)
1397 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
1398 	else
1399 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1400 	return ndlp->nlp_state;
1401 }
1402 
1403 /*! lpfc_device_rm_prli_issue
1404  *
1405  * \pre
1406  * \post
1407  * \param   phba
1408  * \param   ndlp
1409  * \param   arg
1410  * \param   evt
1411  * \return  uint32_t
1412  *
1413  * \b Description:
1414  *    This routine is envoked when we a request to remove a nport we are in the
1415  *    process of PRLIing. We should software abort outstanding prli, unreg
1416  *    login, send a logout. We will change node state to UNUSED_NODE, put it
1417  *    on plogi list so it can be freed when LOGO completes.
1418  *
1419  */
1420 
1421 static uint32_t
1422 lpfc_device_rm_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1423 			  void *arg, uint32_t evt)
1424 {
1425 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1426 
1427 	if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1428 		spin_lock_irq(shost->host_lock);
1429 		ndlp->nlp_flag |= NLP_NODEV_REMOVE;
1430 		spin_unlock_irq(shost->host_lock);
1431 		return ndlp->nlp_state;
1432 	} else {
1433 		/* software abort outstanding PLOGI */
1434 		lpfc_els_abort(vport->phba, ndlp);
1435 
1436 		lpfc_drop_node(vport, ndlp);
1437 		return NLP_STE_FREED_NODE;
1438 	}
1439 }
1440 
1441 
1442 /*! lpfc_device_recov_prli_issue
1443  *
1444  * \pre
1445  * \post
1446  * \param   phba
1447  * \param   ndlp
1448  * \param   arg
1449  * \param   evt
1450  * \return  uint32_t
1451  *
1452  * \b Description:
1453  *    The routine is envoked when the state of a device is unknown, like
1454  *    during a link down. We should remove the nodelist entry from the
1455  *    unmapped list, issue a UNREG_LOGIN, do a software abort of the
1456  *    outstanding PRLI command, then free the node entry.
1457  */
1458 static uint32_t
1459 lpfc_device_recov_prli_issue(struct lpfc_vport *vport,
1460 			     struct lpfc_nodelist *ndlp,
1461 			     void *arg,
1462 			     uint32_t evt)
1463 {
1464 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1465 	struct lpfc_hba  *phba = vport->phba;
1466 
1467 	/* Don't do anything that will mess up processing of the
1468 	 * previous RSCN.
1469 	 */
1470 	if (vport->fc_flag & FC_RSCN_DEFERRED)
1471 		return ndlp->nlp_state;
1472 
1473 	/* software abort outstanding PRLI */
1474 	lpfc_els_abort(phba, ndlp);
1475 
1476 	ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
1477 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1478 	spin_lock_irq(shost->host_lock);
1479 	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1480 	spin_unlock_irq(shost->host_lock);
1481 	lpfc_disc_set_adisc(vport, ndlp);
1482 	return ndlp->nlp_state;
1483 }
1484 
1485 static uint32_t
1486 lpfc_rcv_plogi_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1487 			  void *arg, uint32_t evt)
1488 {
1489 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1490 
1491 	lpfc_rcv_plogi(vport, ndlp, cmdiocb);
1492 	return ndlp->nlp_state;
1493 }
1494 
1495 static uint32_t
1496 lpfc_rcv_prli_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1497 			 void *arg, uint32_t evt)
1498 {
1499 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1500 
1501 	lpfc_rcv_prli(vport, ndlp, cmdiocb);
1502 	lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
1503 	return ndlp->nlp_state;
1504 }
1505 
1506 static uint32_t
1507 lpfc_rcv_logo_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1508 			 void *arg, uint32_t evt)
1509 {
1510 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1511 
1512 	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
1513 	return ndlp->nlp_state;
1514 }
1515 
1516 static uint32_t
1517 lpfc_rcv_padisc_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1518 			   void *arg, uint32_t evt)
1519 {
1520 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1521 
1522 	lpfc_rcv_padisc(vport, ndlp, cmdiocb);
1523 	return ndlp->nlp_state;
1524 }
1525 
1526 static uint32_t
1527 lpfc_rcv_prlo_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1528 			 void *arg, uint32_t evt)
1529 {
1530 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1531 
1532 	lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0);
1533 	return ndlp->nlp_state;
1534 }
1535 
1536 static uint32_t
1537 lpfc_device_recov_unmap_node(struct lpfc_vport *vport,
1538 			     struct lpfc_nodelist *ndlp,
1539 			     void *arg,
1540 			     uint32_t evt)
1541 {
1542 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1543 
1544 	ndlp->nlp_prev_state = NLP_STE_UNMAPPED_NODE;
1545 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1546 	spin_lock_irq(shost->host_lock);
1547 	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1548 	spin_unlock_irq(shost->host_lock);
1549 	lpfc_disc_set_adisc(vport, ndlp);
1550 
1551 	return ndlp->nlp_state;
1552 }
1553 
1554 static uint32_t
1555 lpfc_rcv_plogi_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1556 			   void *arg, uint32_t evt)
1557 {
1558 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1559 
1560 	lpfc_rcv_plogi(vport, ndlp, cmdiocb);
1561 	return ndlp->nlp_state;
1562 }
1563 
1564 static uint32_t
1565 lpfc_rcv_prli_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1566 			  void *arg, uint32_t evt)
1567 {
1568 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1569 
1570 	lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
1571 	return ndlp->nlp_state;
1572 }
1573 
1574 static uint32_t
1575 lpfc_rcv_logo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1576 			  void *arg, uint32_t evt)
1577 {
1578 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1579 
1580 	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
1581 	return ndlp->nlp_state;
1582 }
1583 
1584 static uint32_t
1585 lpfc_rcv_padisc_mapped_node(struct lpfc_vport *vport,
1586 			    struct lpfc_nodelist *ndlp,
1587 			    void *arg, uint32_t evt)
1588 {
1589 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1590 
1591 	lpfc_rcv_padisc(vport, ndlp, cmdiocb);
1592 	return ndlp->nlp_state;
1593 }
1594 
1595 static uint32_t
1596 lpfc_rcv_prlo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1597 			  void *arg, uint32_t evt)
1598 {
1599 	struct lpfc_hba  *phba = vport->phba;
1600 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1601 
1602 	/* flush the target */
1603 	lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
1604 			    ndlp->nlp_sid, 0, 0, LPFC_CTX_TGT);
1605 
1606 	/* Treat like rcv logo */
1607 	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO);
1608 	return ndlp->nlp_state;
1609 }
1610 
1611 static uint32_t
1612 lpfc_device_recov_mapped_node(struct lpfc_vport *vport,
1613 			      struct lpfc_nodelist *ndlp,
1614 			      void *arg,
1615 			      uint32_t evt)
1616 {
1617 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1618 
1619 	ndlp->nlp_prev_state = NLP_STE_MAPPED_NODE;
1620 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1621 	spin_lock_irq(shost->host_lock);
1622 	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1623 	spin_unlock_irq(shost->host_lock);
1624 	lpfc_disc_set_adisc(vport, ndlp);
1625 	return ndlp->nlp_state;
1626 }
1627 
1628 static uint32_t
1629 lpfc_rcv_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1630 			void *arg, uint32_t evt)
1631 {
1632 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1633 	struct lpfc_iocbq *cmdiocb  = (struct lpfc_iocbq *) arg;
1634 
1635 	/* Ignore PLOGI if we have an outstanding LOGO */
1636 	if (ndlp->nlp_flag & (NLP_LOGO_SND | NLP_LOGO_ACC)) {
1637 		return ndlp->nlp_state;
1638 	}
1639 
1640 	if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
1641 		spin_lock_irq(shost->host_lock);
1642 		ndlp->nlp_flag &= ~NLP_NPR_ADISC;
1643 		spin_unlock_irq(shost->host_lock);
1644 		return ndlp->nlp_state;
1645 	}
1646 
1647 	/* send PLOGI immediately, move to PLOGI issue state */
1648 	if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
1649 		ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
1650 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
1651 		lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
1652 	}
1653 
1654 	return ndlp->nlp_state;
1655 }
1656 
1657 static uint32_t
1658 lpfc_rcv_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1659 		       void *arg, uint32_t evt)
1660 {
1661 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
1662 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1663 	struct ls_rjt     stat;
1664 
1665 	memset(&stat, 0, sizeof (struct ls_rjt));
1666 	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
1667 	stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
1668 	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
1669 
1670 	if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
1671 		if (ndlp->nlp_flag & NLP_NPR_ADISC) {
1672 			spin_lock_irq(shost->host_lock);
1673 			ndlp->nlp_flag &= ~NLP_NPR_ADISC;
1674 			ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
1675 			spin_unlock_irq(shost->host_lock);
1676 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
1677 			lpfc_issue_els_adisc(vport, ndlp, 0);
1678 		} else {
1679 			ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
1680 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
1681 			lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
1682 		}
1683 	}
1684 	return ndlp->nlp_state;
1685 }
1686 
1687 static uint32_t
1688 lpfc_rcv_logo_npr_node(struct lpfc_vport *vport,  struct lpfc_nodelist *ndlp,
1689 		       void *arg, uint32_t evt)
1690 {
1691 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1692 
1693 	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
1694 	return ndlp->nlp_state;
1695 }
1696 
1697 static uint32_t
1698 lpfc_rcv_padisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1699 			 void *arg, uint32_t evt)
1700 {
1701 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1702 
1703 	lpfc_rcv_padisc(vport, ndlp, cmdiocb);
1704 
1705 	/*
1706 	 * Do not start discovery if discovery is about to start
1707 	 * or discovery in progress for this node. Starting discovery
1708 	 * here will affect the counting of discovery threads.
1709 	 */
1710 	if (!(ndlp->nlp_flag & NLP_DELAY_TMO) &&
1711 	    !(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
1712 		if (ndlp->nlp_flag & NLP_NPR_ADISC) {
1713 			ndlp->nlp_flag &= ~NLP_NPR_ADISC;
1714 			ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
1715 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
1716 			lpfc_issue_els_adisc(vport, ndlp, 0);
1717 		} else {
1718 			ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
1719 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
1720 			lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
1721 		}
1722 	}
1723 	return ndlp->nlp_state;
1724 }
1725 
1726 static uint32_t
1727 lpfc_rcv_prlo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1728 		       void *arg, uint32_t evt)
1729 {
1730 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1731 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1732 
1733 	spin_lock_irq(shost->host_lock);
1734 	ndlp->nlp_flag |= NLP_LOGO_ACC;
1735 	spin_unlock_irq(shost->host_lock);
1736 
1737 	lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0);
1738 
1739 	if ((ndlp->nlp_flag & NLP_DELAY_TMO) == 0) {
1740 		mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
1741 		spin_lock_irq(shost->host_lock);
1742 		ndlp->nlp_flag |= NLP_DELAY_TMO;
1743 		ndlp->nlp_flag &= ~NLP_NPR_ADISC;
1744 		spin_unlock_irq(shost->host_lock);
1745 		ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
1746 	} else {
1747 		spin_lock_irq(shost->host_lock);
1748 		ndlp->nlp_flag &= ~NLP_NPR_ADISC;
1749 		spin_unlock_irq(shost->host_lock);
1750 	}
1751 	return ndlp->nlp_state;
1752 }
1753 
1754 static uint32_t
1755 lpfc_cmpl_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1756 			 void *arg, uint32_t evt)
1757 {
1758 	struct lpfc_iocbq *cmdiocb, *rspiocb;
1759 	IOCB_t *irsp;
1760 
1761 	cmdiocb = (struct lpfc_iocbq *) arg;
1762 	rspiocb = cmdiocb->context_un.rsp_iocb;
1763 
1764 	irsp = &rspiocb->iocb;
1765 	if (irsp->ulpStatus) {
1766 		lpfc_drop_node(vport, ndlp);
1767 		return NLP_STE_FREED_NODE;
1768 	}
1769 	return ndlp->nlp_state;
1770 }
1771 
1772 static uint32_t
1773 lpfc_cmpl_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1774 			void *arg, uint32_t evt)
1775 {
1776 	struct lpfc_iocbq *cmdiocb, *rspiocb;
1777 	IOCB_t *irsp;
1778 
1779 	cmdiocb = (struct lpfc_iocbq *) arg;
1780 	rspiocb = cmdiocb->context_un.rsp_iocb;
1781 
1782 	irsp = &rspiocb->iocb;
1783 	if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
1784 		lpfc_drop_node(vport, ndlp);
1785 		return NLP_STE_FREED_NODE;
1786 	}
1787 	return ndlp->nlp_state;
1788 }
1789 
1790 static uint32_t
1791 lpfc_cmpl_logo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1792 			void *arg, uint32_t evt)
1793 {
1794 	lpfc_unreg_rpi(vport, ndlp);
1795 	/* This routine does nothing, just return the current state */
1796 	return ndlp->nlp_state;
1797 }
1798 
1799 static uint32_t
1800 lpfc_cmpl_adisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1801 			 void *arg, uint32_t evt)
1802 {
1803 	struct lpfc_iocbq *cmdiocb, *rspiocb;
1804 	IOCB_t *irsp;
1805 
1806 	cmdiocb = (struct lpfc_iocbq *) arg;
1807 	rspiocb = cmdiocb->context_un.rsp_iocb;
1808 
1809 	irsp = &rspiocb->iocb;
1810 	if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
1811 		lpfc_drop_node(vport, ndlp);
1812 		return NLP_STE_FREED_NODE;
1813 	}
1814 	return ndlp->nlp_state;
1815 }
1816 
1817 static uint32_t
1818 lpfc_cmpl_reglogin_npr_node(struct lpfc_vport *vport,
1819 			    struct lpfc_nodelist *ndlp,
1820 			    void *arg, uint32_t evt)
1821 {
1822 	LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
1823 	MAILBOX_t    *mb = &pmb->mb;
1824 
1825 	if (!mb->mbxStatus)
1826 		ndlp->nlp_rpi = mb->un.varWords[0];
1827 	else {
1828 		if (ndlp->nlp_flag & NLP_NODEV_REMOVE) {
1829 			lpfc_drop_node(vport, ndlp);
1830 			return NLP_STE_FREED_NODE;
1831 		}
1832 	}
1833 	return ndlp->nlp_state;
1834 }
1835 
1836 static uint32_t
1837 lpfc_device_rm_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1838 			void *arg, uint32_t evt)
1839 {
1840 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1841 
1842 	if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1843 		spin_lock_irq(shost->host_lock);
1844 		ndlp->nlp_flag |= NLP_NODEV_REMOVE;
1845 		spin_unlock_irq(shost->host_lock);
1846 		return ndlp->nlp_state;
1847 	}
1848 	lpfc_drop_node(vport, ndlp);
1849 	return NLP_STE_FREED_NODE;
1850 }
1851 
1852 static uint32_t
1853 lpfc_device_recov_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1854 			   void *arg, uint32_t evt)
1855 {
1856 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1857 
1858 	/* Don't do anything that will mess up processing of the
1859 	 * previous RSCN.
1860 	 */
1861 	if (vport->fc_flag & FC_RSCN_DEFERRED)
1862 		return ndlp->nlp_state;
1863 
1864 	spin_lock_irq(shost->host_lock);
1865 	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1866 	spin_unlock_irq(shost->host_lock);
1867 	if (ndlp->nlp_flag & NLP_DELAY_TMO) {
1868 		lpfc_cancel_retry_delay_tmo(vport, ndlp);
1869 	}
1870 	return ndlp->nlp_state;
1871 }
1872 
1873 
1874 /* This next section defines the NPort Discovery State Machine */
1875 
1876 /* There are 4 different double linked lists nodelist entries can reside on.
1877  * The plogi list and adisc list are used when Link Up discovery or RSCN
1878  * processing is needed. Each list holds the nodes that we will send PLOGI
1879  * or ADISC on. These lists will keep track of what nodes will be effected
1880  * by an RSCN, or a Link Up (Typically, all nodes are effected on Link Up).
1881  * The unmapped_list will contain all nodes that we have successfully logged
1882  * into at the Fibre Channel level. The mapped_list will contain all nodes
1883  * that are mapped FCP targets.
1884  */
1885 /*
1886  * The bind list is a list of undiscovered (potentially non-existent) nodes
1887  * that we have saved binding information on. This information is used when
1888  * nodes transition from the unmapped to the mapped list.
1889  */
1890 /* For UNUSED_NODE state, the node has just been allocated .
1891  * For PLOGI_ISSUE and REG_LOGIN_ISSUE, the node is on
1892  * the PLOGI list. For REG_LOGIN_COMPL, the node is taken off the PLOGI list
1893  * and put on the unmapped list. For ADISC processing, the node is taken off
1894  * the ADISC list and placed on either the mapped or unmapped list (depending
1895  * on its previous state). Once on the unmapped list, a PRLI is issued and the
1896  * state changed to PRLI_ISSUE. When the PRLI completion occurs, the state is
1897  * changed to UNMAPPED_NODE. If the completion indicates a mapped
1898  * node, the node is taken off the unmapped list. The binding list is checked
1899  * for a valid binding, or a binding is automatically assigned. If binding
1900  * assignment is unsuccessful, the node is left on the unmapped list. If
1901  * binding assignment is successful, the associated binding list entry (if
1902  * any) is removed, and the node is placed on the mapped list.
1903  */
1904 /*
1905  * For a Link Down, all nodes on the ADISC, PLOGI, unmapped or mapped
1906  * lists will receive a DEVICE_RECOVERY event. If the linkdown or devloss timers
1907  * expire, all effected nodes will receive a DEVICE_RM event.
1908  */
1909 /*
1910  * For a Link Up or RSCN, all nodes will move from the mapped / unmapped lists
1911  * to either the ADISC or PLOGI list.  After a Nameserver query or ALPA loopmap
1912  * check, additional nodes may be added or removed (via DEVICE_RM) to / from
1913  * the PLOGI or ADISC lists. Once the PLOGI and ADISC lists are populated,
1914  * we will first process the ADISC list.  32 entries are processed initially and
1915  * ADISC is initited for each one.  Completions / Events for each node are
1916  * funnelled thru the state machine.  As each node finishes ADISC processing, it
1917  * starts ADISC for any nodes waiting for ADISC processing. If no nodes are
1918  * waiting, and the ADISC list count is identically 0, then we are done. For
1919  * Link Up discovery, since all nodes on the PLOGI list are UNREG_LOGIN'ed, we
1920  * can issue a CLEAR_LA and reenable Link Events. Next we will process the PLOGI
1921  * list.  32 entries are processed initially and PLOGI is initited for each one.
1922  * Completions / Events for each node are funnelled thru the state machine.  As
1923  * each node finishes PLOGI processing, it starts PLOGI for any nodes waiting
1924  * for PLOGI processing. If no nodes are waiting, and the PLOGI list count is
1925  * indentically 0, then we are done. We have now completed discovery / RSCN
1926  * handling. Upon completion, ALL nodes should be on either the mapped or
1927  * unmapped lists.
1928  */
1929 
1930 static uint32_t (*lpfc_disc_action[NLP_STE_MAX_STATE * NLP_EVT_MAX_EVENT])
1931      (struct lpfc_vport *, struct lpfc_nodelist *, void *, uint32_t) = {
1932 	/* Action routine                  Event       Current State  */
1933 	lpfc_rcv_plogi_unused_node,	/* RCV_PLOGI   UNUSED_NODE    */
1934 	lpfc_rcv_els_unused_node,	/* RCV_PRLI        */
1935 	lpfc_rcv_logo_unused_node,	/* RCV_LOGO        */
1936 	lpfc_rcv_els_unused_node,	/* RCV_ADISC       */
1937 	lpfc_rcv_els_unused_node,	/* RCV_PDISC       */
1938 	lpfc_rcv_els_unused_node,	/* RCV_PRLO        */
1939 	lpfc_disc_illegal,		/* CMPL_PLOGI      */
1940 	lpfc_disc_illegal,		/* CMPL_PRLI       */
1941 	lpfc_cmpl_logo_unused_node,	/* CMPL_LOGO       */
1942 	lpfc_disc_illegal,		/* CMPL_ADISC      */
1943 	lpfc_disc_illegal,		/* CMPL_REG_LOGIN  */
1944 	lpfc_device_rm_unused_node,	/* DEVICE_RM       */
1945 	lpfc_disc_illegal,		/* DEVICE_RECOVERY */
1946 
1947 	lpfc_rcv_plogi_plogi_issue,	/* RCV_PLOGI   PLOGI_ISSUE    */
1948 	lpfc_rcv_prli_plogi_issue,	/* RCV_PRLI        */
1949 	lpfc_rcv_logo_plogi_issue,	/* RCV_LOGO        */
1950 	lpfc_rcv_els_plogi_issue,	/* RCV_ADISC       */
1951 	lpfc_rcv_els_plogi_issue,	/* RCV_PDISC       */
1952 	lpfc_rcv_els_plogi_issue,	/* RCV_PRLO        */
1953 	lpfc_cmpl_plogi_plogi_issue,	/* CMPL_PLOGI      */
1954 	lpfc_disc_illegal,		/* CMPL_PRLI       */
1955 	lpfc_disc_illegal,		/* CMPL_LOGO       */
1956 	lpfc_disc_illegal,		/* CMPL_ADISC      */
1957 	lpfc_disc_illegal,		/* CMPL_REG_LOGIN  */
1958 	lpfc_device_rm_plogi_issue,	/* DEVICE_RM       */
1959 	lpfc_device_recov_plogi_issue,	/* DEVICE_RECOVERY */
1960 
1961 	lpfc_rcv_plogi_adisc_issue,	/* RCV_PLOGI   ADISC_ISSUE    */
1962 	lpfc_rcv_prli_adisc_issue,	/* RCV_PRLI        */
1963 	lpfc_rcv_logo_adisc_issue,	/* RCV_LOGO        */
1964 	lpfc_rcv_padisc_adisc_issue,	/* RCV_ADISC       */
1965 	lpfc_rcv_padisc_adisc_issue,	/* RCV_PDISC       */
1966 	lpfc_rcv_prlo_adisc_issue,	/* RCV_PRLO        */
1967 	lpfc_disc_illegal,		/* CMPL_PLOGI      */
1968 	lpfc_disc_illegal,		/* CMPL_PRLI       */
1969 	lpfc_disc_illegal,		/* CMPL_LOGO       */
1970 	lpfc_cmpl_adisc_adisc_issue,	/* CMPL_ADISC      */
1971 	lpfc_disc_illegal,		/* CMPL_REG_LOGIN  */
1972 	lpfc_device_rm_adisc_issue,	/* DEVICE_RM       */
1973 	lpfc_device_recov_adisc_issue,	/* DEVICE_RECOVERY */
1974 
1975 	lpfc_rcv_plogi_reglogin_issue,	/* RCV_PLOGI  REG_LOGIN_ISSUE */
1976 	lpfc_rcv_prli_reglogin_issue,	/* RCV_PLOGI       */
1977 	lpfc_rcv_logo_reglogin_issue,	/* RCV_LOGO        */
1978 	lpfc_rcv_padisc_reglogin_issue,	/* RCV_ADISC       */
1979 	lpfc_rcv_padisc_reglogin_issue,	/* RCV_PDISC       */
1980 	lpfc_rcv_prlo_reglogin_issue,	/* RCV_PRLO        */
1981 	lpfc_disc_illegal,		/* CMPL_PLOGI      */
1982 	lpfc_disc_illegal,		/* CMPL_PRLI       */
1983 	lpfc_disc_illegal,		/* CMPL_LOGO       */
1984 	lpfc_disc_illegal,		/* CMPL_ADISC      */
1985 	lpfc_cmpl_reglogin_reglogin_issue,/* CMPL_REG_LOGIN  */
1986 	lpfc_device_rm_reglogin_issue,	/* DEVICE_RM       */
1987 	lpfc_device_recov_reglogin_issue,/* DEVICE_RECOVERY */
1988 
1989 	lpfc_rcv_plogi_prli_issue,	/* RCV_PLOGI   PRLI_ISSUE     */
1990 	lpfc_rcv_prli_prli_issue,	/* RCV_PRLI        */
1991 	lpfc_rcv_logo_prli_issue,	/* RCV_LOGO        */
1992 	lpfc_rcv_padisc_prli_issue,	/* RCV_ADISC       */
1993 	lpfc_rcv_padisc_prli_issue,	/* RCV_PDISC       */
1994 	lpfc_rcv_prlo_prli_issue,	/* RCV_PRLO        */
1995 	lpfc_disc_illegal,		/* CMPL_PLOGI      */
1996 	lpfc_cmpl_prli_prli_issue,	/* CMPL_PRLI       */
1997 	lpfc_disc_illegal,		/* CMPL_LOGO       */
1998 	lpfc_disc_illegal,		/* CMPL_ADISC      */
1999 	lpfc_disc_illegal,		/* CMPL_REG_LOGIN  */
2000 	lpfc_device_rm_prli_issue,	/* DEVICE_RM       */
2001 	lpfc_device_recov_prli_issue,	/* DEVICE_RECOVERY */
2002 
2003 	lpfc_rcv_plogi_unmap_node,	/* RCV_PLOGI   UNMAPPED_NODE  */
2004 	lpfc_rcv_prli_unmap_node,	/* RCV_PRLI        */
2005 	lpfc_rcv_logo_unmap_node,	/* RCV_LOGO        */
2006 	lpfc_rcv_padisc_unmap_node,	/* RCV_ADISC       */
2007 	lpfc_rcv_padisc_unmap_node,	/* RCV_PDISC       */
2008 	lpfc_rcv_prlo_unmap_node,	/* RCV_PRLO        */
2009 	lpfc_disc_illegal,		/* CMPL_PLOGI      */
2010 	lpfc_disc_illegal,		/* CMPL_PRLI       */
2011 	lpfc_disc_illegal,		/* CMPL_LOGO       */
2012 	lpfc_disc_illegal,		/* CMPL_ADISC      */
2013 	lpfc_disc_illegal,		/* CMPL_REG_LOGIN  */
2014 	lpfc_disc_illegal,		/* DEVICE_RM       */
2015 	lpfc_device_recov_unmap_node,	/* DEVICE_RECOVERY */
2016 
2017 	lpfc_rcv_plogi_mapped_node,	/* RCV_PLOGI   MAPPED_NODE    */
2018 	lpfc_rcv_prli_mapped_node,	/* RCV_PRLI        */
2019 	lpfc_rcv_logo_mapped_node,	/* RCV_LOGO        */
2020 	lpfc_rcv_padisc_mapped_node,	/* RCV_ADISC       */
2021 	lpfc_rcv_padisc_mapped_node,	/* RCV_PDISC       */
2022 	lpfc_rcv_prlo_mapped_node,	/* RCV_PRLO        */
2023 	lpfc_disc_illegal,		/* CMPL_PLOGI      */
2024 	lpfc_disc_illegal,		/* CMPL_PRLI       */
2025 	lpfc_disc_illegal,		/* CMPL_LOGO       */
2026 	lpfc_disc_illegal,		/* CMPL_ADISC      */
2027 	lpfc_disc_illegal,		/* CMPL_REG_LOGIN  */
2028 	lpfc_disc_illegal,		/* DEVICE_RM       */
2029 	lpfc_device_recov_mapped_node,	/* DEVICE_RECOVERY */
2030 
2031 	lpfc_rcv_plogi_npr_node,        /* RCV_PLOGI   NPR_NODE    */
2032 	lpfc_rcv_prli_npr_node,         /* RCV_PRLI        */
2033 	lpfc_rcv_logo_npr_node,         /* RCV_LOGO        */
2034 	lpfc_rcv_padisc_npr_node,       /* RCV_ADISC       */
2035 	lpfc_rcv_padisc_npr_node,       /* RCV_PDISC       */
2036 	lpfc_rcv_prlo_npr_node,         /* RCV_PRLO        */
2037 	lpfc_cmpl_plogi_npr_node,	/* CMPL_PLOGI      */
2038 	lpfc_cmpl_prli_npr_node,	/* CMPL_PRLI       */
2039 	lpfc_cmpl_logo_npr_node,        /* CMPL_LOGO       */
2040 	lpfc_cmpl_adisc_npr_node,       /* CMPL_ADISC      */
2041 	lpfc_cmpl_reglogin_npr_node,    /* CMPL_REG_LOGIN  */
2042 	lpfc_device_rm_npr_node,        /* DEVICE_RM       */
2043 	lpfc_device_recov_npr_node,     /* DEVICE_RECOVERY */
2044 };
2045 
2046 int
2047 lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2048 			void *arg, uint32_t evt)
2049 {
2050 	struct lpfc_hba  *phba = vport->phba;
2051 	uint32_t cur_state, rc;
2052 	uint32_t(*func) (struct lpfc_vport *, struct lpfc_nodelist *, void *,
2053 			 uint32_t);
2054 
2055 	lpfc_nlp_get(ndlp);
2056 	cur_state = ndlp->nlp_state;
2057 
2058 	/* DSM in event <evt> on NPort <nlp_DID> in state <cur_state> */
2059 	lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
2060 			"%d (%d):0211 DSM in event x%x on NPort x%x in "
2061 			"state %d Data: x%x\n",
2062 			phba->brd_no, vport->vpi,
2063 			evt, ndlp->nlp_DID, cur_state, ndlp->nlp_flag);
2064 
2065 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
2066 		 "DSM in:          evt:%d ste:%d did:x%x",
2067 		evt, cur_state, ndlp->nlp_DID);
2068 
2069 	func = lpfc_disc_action[(cur_state * NLP_EVT_MAX_EVENT) + evt];
2070 	rc = (func) (vport, ndlp, arg, evt);
2071 
2072 	/* DSM out state <rc> on NPort <nlp_DID> */
2073 	lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
2074 			"%d (%d):0212 DSM out state %d on NPort x%x "
2075 			"Data: x%x\n",
2076 			phba->brd_no, vport->vpi,
2077 			rc, ndlp->nlp_DID, ndlp->nlp_flag);
2078 
2079 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
2080 		 "DSM out:         ste:%d did:x%x flg:x%x",
2081 		rc, ndlp->nlp_DID, ndlp->nlp_flag);
2082 
2083 	lpfc_nlp_put(ndlp);
2084 
2085 	return rc;
2086 }
2087