xref: /openbmc/linux/drivers/scsi/lpfc/lpfc_nportdisc.c (revision b802fb99ae964681d1754428f67970911e0476e9)
1  /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2004-2015 Emulex.  All rights reserved.           *
5  * EMULEX and SLI are trademarks of Emulex.                        *
6  * www.emulex.com                                                  *
7  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
8  *                                                                 *
9  * This program is free software; you can redistribute it and/or   *
10  * modify it under the terms of version 2 of the GNU General       *
11  * Public License as published by the Free Software Foundation.    *
12  * This program is distributed in the hope that it will be useful. *
13  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
14  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
15  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
16  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
18  * more details, a copy of which can be found in the file COPYING  *
19  * included with this package.                                     *
20  *******************************************************************/
21 
22 #include <linux/blkdev.h>
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/interrupt.h>
26 
27 #include <scsi/scsi.h>
28 #include <scsi/scsi_device.h>
29 #include <scsi/scsi_host.h>
30 #include <scsi/scsi_transport_fc.h>
31 
32 #include "lpfc_hw4.h"
33 #include "lpfc_hw.h"
34 #include "lpfc_sli.h"
35 #include "lpfc_sli4.h"
36 #include "lpfc_nl.h"
37 #include "lpfc_disc.h"
38 #include "lpfc_scsi.h"
39 #include "lpfc.h"
40 #include "lpfc_logmsg.h"
41 #include "lpfc_crtn.h"
42 #include "lpfc_vport.h"
43 #include "lpfc_debugfs.h"
44 
45 
46 /* Called to verify a rcv'ed ADISC was intended for us. */
47 static int
48 lpfc_check_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
49 		 struct lpfc_name *nn, struct lpfc_name *pn)
50 {
51 	/* First, we MUST have a RPI registered */
52 	if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED))
53 		return 0;
54 
55 	/* Compare the ADISC rsp WWNN / WWPN matches our internal node
56 	 * table entry for that node.
57 	 */
58 	if (memcmp(nn, &ndlp->nlp_nodename, sizeof (struct lpfc_name)))
59 		return 0;
60 
61 	if (memcmp(pn, &ndlp->nlp_portname, sizeof (struct lpfc_name)))
62 		return 0;
63 
64 	/* we match, return success */
65 	return 1;
66 }
67 
68 int
69 lpfc_check_sparm(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
70 		 struct serv_parm *sp, uint32_t class, int flogi)
71 {
72 	volatile struct serv_parm *hsp = &vport->fc_sparam;
73 	uint16_t hsp_value, ssp_value = 0;
74 
75 	/*
76 	 * The receive data field size and buffer-to-buffer receive data field
77 	 * size entries are 16 bits but are represented as two 8-bit fields in
78 	 * the driver data structure to account for rsvd bits and other control
79 	 * bits.  Reconstruct and compare the fields as a 16-bit values before
80 	 * correcting the byte values.
81 	 */
82 	if (sp->cls1.classValid) {
83 		if (!flogi) {
84 			hsp_value = ((hsp->cls1.rcvDataSizeMsb << 8) |
85 				     hsp->cls1.rcvDataSizeLsb);
86 			ssp_value = ((sp->cls1.rcvDataSizeMsb << 8) |
87 				     sp->cls1.rcvDataSizeLsb);
88 			if (!ssp_value)
89 				goto bad_service_param;
90 			if (ssp_value > hsp_value) {
91 				sp->cls1.rcvDataSizeLsb =
92 					hsp->cls1.rcvDataSizeLsb;
93 				sp->cls1.rcvDataSizeMsb =
94 					hsp->cls1.rcvDataSizeMsb;
95 			}
96 		}
97 	} else if (class == CLASS1)
98 		goto bad_service_param;
99 	if (sp->cls2.classValid) {
100 		if (!flogi) {
101 			hsp_value = ((hsp->cls2.rcvDataSizeMsb << 8) |
102 				     hsp->cls2.rcvDataSizeLsb);
103 			ssp_value = ((sp->cls2.rcvDataSizeMsb << 8) |
104 				     sp->cls2.rcvDataSizeLsb);
105 			if (!ssp_value)
106 				goto bad_service_param;
107 			if (ssp_value > hsp_value) {
108 				sp->cls2.rcvDataSizeLsb =
109 					hsp->cls2.rcvDataSizeLsb;
110 				sp->cls2.rcvDataSizeMsb =
111 					hsp->cls2.rcvDataSizeMsb;
112 			}
113 		}
114 	} else if (class == CLASS2)
115 		goto bad_service_param;
116 	if (sp->cls3.classValid) {
117 		if (!flogi) {
118 			hsp_value = ((hsp->cls3.rcvDataSizeMsb << 8) |
119 				     hsp->cls3.rcvDataSizeLsb);
120 			ssp_value = ((sp->cls3.rcvDataSizeMsb << 8) |
121 				     sp->cls3.rcvDataSizeLsb);
122 			if (!ssp_value)
123 				goto bad_service_param;
124 			if (ssp_value > hsp_value) {
125 				sp->cls3.rcvDataSizeLsb =
126 					hsp->cls3.rcvDataSizeLsb;
127 				sp->cls3.rcvDataSizeMsb =
128 					hsp->cls3.rcvDataSizeMsb;
129 			}
130 		}
131 	} else if (class == CLASS3)
132 		goto bad_service_param;
133 
134 	/*
135 	 * Preserve the upper four bits of the MSB from the PLOGI response.
136 	 * These bits contain the Buffer-to-Buffer State Change Number
137 	 * from the target and need to be passed to the FW.
138 	 */
139 	hsp_value = (hsp->cmn.bbRcvSizeMsb << 8) | hsp->cmn.bbRcvSizeLsb;
140 	ssp_value = (sp->cmn.bbRcvSizeMsb << 8) | sp->cmn.bbRcvSizeLsb;
141 	if (ssp_value > hsp_value) {
142 		sp->cmn.bbRcvSizeLsb = hsp->cmn.bbRcvSizeLsb;
143 		sp->cmn.bbRcvSizeMsb = (sp->cmn.bbRcvSizeMsb & 0xF0) |
144 				       (hsp->cmn.bbRcvSizeMsb & 0x0F);
145 	}
146 
147 	memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof (struct lpfc_name));
148 	memcpy(&ndlp->nlp_portname, &sp->portName, sizeof (struct lpfc_name));
149 	return 1;
150 bad_service_param:
151 	lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
152 			 "0207 Device %x "
153 			 "(%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x) sent "
154 			 "invalid service parameters.  Ignoring device.\n",
155 			 ndlp->nlp_DID,
156 			 sp->nodeName.u.wwn[0], sp->nodeName.u.wwn[1],
157 			 sp->nodeName.u.wwn[2], sp->nodeName.u.wwn[3],
158 			 sp->nodeName.u.wwn[4], sp->nodeName.u.wwn[5],
159 			 sp->nodeName.u.wwn[6], sp->nodeName.u.wwn[7]);
160 	return 0;
161 }
162 
163 static void *
164 lpfc_check_elscmpl_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
165 			struct lpfc_iocbq *rspiocb)
166 {
167 	struct lpfc_dmabuf *pcmd, *prsp;
168 	uint32_t *lp;
169 	void     *ptr = NULL;
170 	IOCB_t   *irsp;
171 
172 	irsp = &rspiocb->iocb;
173 	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
174 
175 	/* For lpfc_els_abort, context2 could be zero'ed to delay
176 	 * freeing associated memory till after ABTS completes.
177 	 */
178 	if (pcmd) {
179 		prsp =  list_get_first(&pcmd->list, struct lpfc_dmabuf,
180 				       list);
181 		if (prsp) {
182 			lp = (uint32_t *) prsp->virt;
183 			ptr = (void *)((uint8_t *)lp + sizeof(uint32_t));
184 		}
185 	} else {
186 		/* Force ulpStatus error since we are returning NULL ptr */
187 		if (!(irsp->ulpStatus)) {
188 			irsp->ulpStatus = IOSTAT_LOCAL_REJECT;
189 			irsp->un.ulpWord[4] = IOERR_SLI_ABORTED;
190 		}
191 		ptr = NULL;
192 	}
193 	return ptr;
194 }
195 
196 
197 
198 /*
199  * Free resources / clean up outstanding I/Os
200  * associated with a LPFC_NODELIST entry. This
201  * routine effectively results in a "software abort".
202  */
203 int
204 lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
205 {
206 	LIST_HEAD(abort_list);
207 	struct lpfc_sli  *psli = &phba->sli;
208 	struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
209 	struct lpfc_iocbq *iocb, *next_iocb;
210 
211 	/* Abort outstanding I/O on NPort <nlp_DID> */
212 	lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_DISCOVERY,
213 			 "2819 Abort outstanding I/O on NPort x%x "
214 			 "Data: x%x x%x x%x\n",
215 			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
216 			 ndlp->nlp_rpi);
217 	/* Clean up all fabric IOs first.*/
218 	lpfc_fabric_abort_nport(ndlp);
219 
220 	/*
221 	 * Lock the ELS ring txcmplq for SLI3/SLI4 and build a local list
222 	 * of all ELS IOs that need an ABTS.  The IOs need to stay on the
223 	 * txcmplq so that the abort operation completes them successfully.
224 	 */
225 	spin_lock_irq(&phba->hbalock);
226 	if (phba->sli_rev == LPFC_SLI_REV4)
227 		spin_lock(&pring->ring_lock);
228 	list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
229 	/* Add to abort_list on on NDLP match. */
230 		if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp))
231 			list_add_tail(&iocb->dlist, &abort_list);
232 	}
233 	if (phba->sli_rev == LPFC_SLI_REV4)
234 		spin_unlock(&pring->ring_lock);
235 	spin_unlock_irq(&phba->hbalock);
236 
237 	/* Abort the targeted IOs and remove them from the abort list. */
238 	list_for_each_entry_safe(iocb, next_iocb, &abort_list, dlist) {
239 			spin_lock_irq(&phba->hbalock);
240 			list_del_init(&iocb->dlist);
241 			lpfc_sli_issue_abort_iotag(phba, pring, iocb);
242 			spin_unlock_irq(&phba->hbalock);
243 	}
244 
245 	INIT_LIST_HEAD(&abort_list);
246 
247 	/* Now process the txq */
248 	spin_lock_irq(&phba->hbalock);
249 	if (phba->sli_rev == LPFC_SLI_REV4)
250 		spin_lock(&pring->ring_lock);
251 
252 	list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
253 		/* Check to see if iocb matches the nport we are looking for */
254 		if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) {
255 			list_del_init(&iocb->list);
256 			list_add_tail(&iocb->list, &abort_list);
257 		}
258 	}
259 
260 	if (phba->sli_rev == LPFC_SLI_REV4)
261 		spin_unlock(&pring->ring_lock);
262 	spin_unlock_irq(&phba->hbalock);
263 
264 	/* Cancel all the IOCBs from the completions list */
265 	lpfc_sli_cancel_iocbs(phba, &abort_list,
266 			      IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
267 
268 	lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
269 	return 0;
270 }
271 
272 static int
273 lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
274 	       struct lpfc_iocbq *cmdiocb)
275 {
276 	struct Scsi_Host   *shost = lpfc_shost_from_vport(vport);
277 	struct lpfc_hba    *phba = vport->phba;
278 	struct lpfc_dmabuf *pcmd;
279 	uint64_t nlp_portwwn = 0;
280 	uint32_t *lp;
281 	IOCB_t *icmd;
282 	struct serv_parm *sp;
283 	uint32_t ed_tov;
284 	LPFC_MBOXQ_t *mbox;
285 	struct ls_rjt stat;
286 	int rc;
287 
288 	memset(&stat, 0, sizeof (struct ls_rjt));
289 	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
290 	lp = (uint32_t *) pcmd->virt;
291 	sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
292 	if (wwn_to_u64(sp->portName.u.wwn) == 0) {
293 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
294 				 "0140 PLOGI Reject: invalid nname\n");
295 		stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
296 		stat.un.b.lsRjtRsnCodeExp = LSEXP_INVALID_PNAME;
297 		lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
298 			NULL);
299 		return 0;
300 	}
301 	if (wwn_to_u64(sp->nodeName.u.wwn) == 0) {
302 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
303 				 "0141 PLOGI Reject: invalid pname\n");
304 		stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
305 		stat.un.b.lsRjtRsnCodeExp = LSEXP_INVALID_NNAME;
306 		lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
307 			NULL);
308 		return 0;
309 	}
310 
311 	nlp_portwwn = wwn_to_u64(ndlp->nlp_portname.u.wwn);
312 	if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3, 0) == 0)) {
313 		/* Reject this request because invalid parameters */
314 		stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
315 		stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
316 		lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
317 			NULL);
318 		return 0;
319 	}
320 	icmd = &cmdiocb->iocb;
321 
322 	/* PLOGI chkparm OK */
323 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
324 			 "0114 PLOGI chkparm OK Data: x%x x%x x%x "
325 			 "x%x x%x x%x\n",
326 			 ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag,
327 			 ndlp->nlp_rpi, vport->port_state,
328 			 vport->fc_flag);
329 
330 	if (vport->cfg_fcp_class == 2 && sp->cls2.classValid)
331 		ndlp->nlp_fcp_info |= CLASS2;
332 	else
333 		ndlp->nlp_fcp_info |= CLASS3;
334 
335 	ndlp->nlp_class_sup = 0;
336 	if (sp->cls1.classValid)
337 		ndlp->nlp_class_sup |= FC_COS_CLASS1;
338 	if (sp->cls2.classValid)
339 		ndlp->nlp_class_sup |= FC_COS_CLASS2;
340 	if (sp->cls3.classValid)
341 		ndlp->nlp_class_sup |= FC_COS_CLASS3;
342 	if (sp->cls4.classValid)
343 		ndlp->nlp_class_sup |= FC_COS_CLASS4;
344 	ndlp->nlp_maxframe =
345 		((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
346 
347 	/* if already logged in, do implicit logout */
348 	switch (ndlp->nlp_state) {
349 	case  NLP_STE_NPR_NODE:
350 		if (!(ndlp->nlp_flag & NLP_NPR_ADISC))
351 			break;
352 	case  NLP_STE_REG_LOGIN_ISSUE:
353 	case  NLP_STE_PRLI_ISSUE:
354 	case  NLP_STE_UNMAPPED_NODE:
355 	case  NLP_STE_MAPPED_NODE:
356 		/* lpfc_plogi_confirm_nport skips fabric did, handle it here */
357 		if (!(ndlp->nlp_type & NLP_FABRIC)) {
358 			lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb,
359 					 ndlp, NULL);
360 			return 1;
361 		}
362 		if (nlp_portwwn != 0 &&
363 		    nlp_portwwn != wwn_to_u64(sp->portName.u.wwn))
364 			lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
365 					 "0143 PLOGI recv'd from DID: x%x "
366 					 "WWPN changed: old %llx new %llx\n",
367 					 ndlp->nlp_DID,
368 					 (unsigned long long)nlp_portwwn,
369 					 (unsigned long long)
370 					 wwn_to_u64(sp->portName.u.wwn));
371 
372 		ndlp->nlp_prev_state = ndlp->nlp_state;
373 		/* rport needs to be unregistered first */
374 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
375 		break;
376 	}
377 
378 	/* Check for Nport to NPort pt2pt protocol */
379 	if ((vport->fc_flag & FC_PT2PT) &&
380 	    !(vport->fc_flag & FC_PT2PT_PLOGI)) {
381 		/* rcv'ed PLOGI decides what our NPortId will be */
382 		vport->fc_myDID = icmd->un.rcvels.parmRo;
383 
384 		ed_tov = be32_to_cpu(sp->cmn.e_d_tov);
385 		if (sp->cmn.edtovResolution) {
386 			/* E_D_TOV ticks are in nanoseconds */
387 			ed_tov = (phba->fc_edtov + 999999) / 1000000;
388 		}
389 
390 		/*
391 		 * For pt-to-pt, use the larger EDTOV
392 		 * RATOV = 2 * EDTOV
393 		 */
394 		if (ed_tov > phba->fc_edtov)
395 			phba->fc_edtov = ed_tov;
396 		phba->fc_ratov = (2 * phba->fc_edtov) / 1000;
397 
398 		memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
399 
400 		/* Issue config_link / reg_vfi to account for updated TOV's */
401 
402 		if (phba->sli_rev == LPFC_SLI_REV4)
403 			lpfc_issue_reg_vfi(vport);
404 		else {
405 			mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
406 			if (mbox == NULL)
407 				goto out;
408 			lpfc_config_link(phba, mbox);
409 			mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
410 			mbox->vport = vport;
411 			rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
412 			if (rc == MBX_NOT_FINISHED) {
413 				mempool_free(mbox, phba->mbox_mem_pool);
414 				goto out;
415 			}
416 		}
417 
418 		lpfc_can_disctmo(vport);
419 	}
420 
421 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
422 	if (!mbox)
423 		goto out;
424 
425 	/* Registering an existing RPI behaves differently for SLI3 vs SLI4 */
426 	if (phba->sli_rev == LPFC_SLI_REV4)
427 		lpfc_unreg_rpi(vport, ndlp);
428 
429 	rc = lpfc_reg_rpi(phba, vport->vpi, icmd->un.rcvels.remoteID,
430 			    (uint8_t *) sp, mbox, ndlp->nlp_rpi);
431 	if (rc) {
432 		mempool_free(mbox, phba->mbox_mem_pool);
433 		goto out;
434 	}
435 
436 	/* ACC PLOGI rsp command needs to execute first,
437 	 * queue this mbox command to be processed later.
438 	 */
439 	mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
440 	/*
441 	 * mbox->context2 = lpfc_nlp_get(ndlp) deferred until mailbox
442 	 * command issued in lpfc_cmpl_els_acc().
443 	 */
444 	mbox->vport = vport;
445 	spin_lock_irq(shost->host_lock);
446 	ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI);
447 	spin_unlock_irq(shost->host_lock);
448 
449 	/*
450 	 * If there is an outstanding PLOGI issued, abort it before
451 	 * sending ACC rsp for received PLOGI. If pending plogi
452 	 * is not canceled here, the plogi will be rejected by
453 	 * remote port and will be retried. On a configuration with
454 	 * single discovery thread, this will cause a huge delay in
455 	 * discovery. Also this will cause multiple state machines
456 	 * running in parallel for this node.
457 	 */
458 	if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) {
459 		/* software abort outstanding PLOGI */
460 		lpfc_els_abort(phba, ndlp);
461 	}
462 
463 	if ((vport->port_type == LPFC_NPIV_PORT &&
464 	     vport->cfg_restrict_login)) {
465 
466 		/* In order to preserve RPIs, we want to cleanup
467 		 * the default RPI the firmware created to rcv
468 		 * this ELS request. The only way to do this is
469 		 * to register, then unregister the RPI.
470 		 */
471 		spin_lock_irq(shost->host_lock);
472 		ndlp->nlp_flag |= NLP_RM_DFLT_RPI;
473 		spin_unlock_irq(shost->host_lock);
474 		stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD;
475 		stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
476 		rc = lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
477 			ndlp, mbox);
478 		if (rc)
479 			mempool_free(mbox, phba->mbox_mem_pool);
480 		return 1;
481 	}
482 	rc = lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox);
483 	if (rc)
484 		mempool_free(mbox, phba->mbox_mem_pool);
485 	return 1;
486 out:
487 	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
488 	stat.un.b.lsRjtRsnCodeExp = LSEXP_OUT_OF_RESOURCE;
489 	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
490 	return 0;
491 }
492 
493 /**
494  * lpfc_mbx_cmpl_resume_rpi - Resume RPI completion routine
495  * @phba: pointer to lpfc hba data structure.
496  * @mboxq: pointer to mailbox object
497  *
498  * This routine is invoked to issue a completion to a rcv'ed
499  * ADISC or PDISC after the paused RPI has been resumed.
500  **/
501 static void
502 lpfc_mbx_cmpl_resume_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
503 {
504 	struct lpfc_vport *vport;
505 	struct lpfc_iocbq *elsiocb;
506 	struct lpfc_nodelist *ndlp;
507 	uint32_t cmd;
508 
509 	elsiocb = (struct lpfc_iocbq *)mboxq->context1;
510 	ndlp = (struct lpfc_nodelist *) mboxq->context2;
511 	vport = mboxq->vport;
512 	cmd = elsiocb->drvrTimeout;
513 
514 	if (cmd == ELS_CMD_ADISC) {
515 		lpfc_els_rsp_adisc_acc(vport, elsiocb, ndlp);
516 	} else {
517 		lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, elsiocb,
518 			ndlp, NULL);
519 	}
520 	kfree(elsiocb);
521 	mempool_free(mboxq, phba->mbox_mem_pool);
522 }
523 
524 static int
525 lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
526 		struct lpfc_iocbq *cmdiocb)
527 {
528 	struct Scsi_Host   *shost = lpfc_shost_from_vport(vport);
529 	struct lpfc_iocbq  *elsiocb;
530 	struct lpfc_dmabuf *pcmd;
531 	struct serv_parm   *sp;
532 	struct lpfc_name   *pnn, *ppn;
533 	struct ls_rjt stat;
534 	ADISC *ap;
535 	IOCB_t *icmd;
536 	uint32_t *lp;
537 	uint32_t cmd;
538 
539 	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
540 	lp = (uint32_t *) pcmd->virt;
541 
542 	cmd = *lp++;
543 	if (cmd == ELS_CMD_ADISC) {
544 		ap = (ADISC *) lp;
545 		pnn = (struct lpfc_name *) & ap->nodeName;
546 		ppn = (struct lpfc_name *) & ap->portName;
547 	} else {
548 		sp = (struct serv_parm *) lp;
549 		pnn = (struct lpfc_name *) & sp->nodeName;
550 		ppn = (struct lpfc_name *) & sp->portName;
551 	}
552 
553 	icmd = &cmdiocb->iocb;
554 	if (icmd->ulpStatus == 0 && lpfc_check_adisc(vport, ndlp, pnn, ppn)) {
555 
556 		/*
557 		 * As soon as  we send ACC, the remote NPort can
558 		 * start sending us data. Thus, for SLI4 we must
559 		 * resume the RPI before the ACC goes out.
560 		 */
561 		if (vport->phba->sli_rev == LPFC_SLI_REV4) {
562 			elsiocb = kmalloc(sizeof(struct lpfc_iocbq),
563 				GFP_KERNEL);
564 			if (elsiocb) {
565 
566 				/* Save info from cmd IOCB used in rsp */
567 				memcpy((uint8_t *)elsiocb, (uint8_t *)cmdiocb,
568 					sizeof(struct lpfc_iocbq));
569 
570 				/* Save the ELS cmd */
571 				elsiocb->drvrTimeout = cmd;
572 
573 				lpfc_sli4_resume_rpi(ndlp,
574 					lpfc_mbx_cmpl_resume_rpi, elsiocb);
575 				goto out;
576 			}
577 		}
578 
579 		if (cmd == ELS_CMD_ADISC) {
580 			lpfc_els_rsp_adisc_acc(vport, cmdiocb, ndlp);
581 		} else {
582 			lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb,
583 				ndlp, NULL);
584 		}
585 out:
586 		/* If we are authenticated, move to the proper state */
587 		if (ndlp->nlp_type & NLP_FCP_TARGET)
588 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
589 		else
590 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
591 
592 		return 1;
593 	}
594 	/* Reject this request because invalid parameters */
595 	stat.un.b.lsRjtRsvd0 = 0;
596 	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
597 	stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
598 	stat.un.b.vendorUnique = 0;
599 	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
600 
601 	/* 1 sec timeout */
602 	mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000));
603 
604 	spin_lock_irq(shost->host_lock);
605 	ndlp->nlp_flag |= NLP_DELAY_TMO;
606 	spin_unlock_irq(shost->host_lock);
607 	ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
608 	ndlp->nlp_prev_state = ndlp->nlp_state;
609 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
610 	return 0;
611 }
612 
613 static int
614 lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
615 	      struct lpfc_iocbq *cmdiocb, uint32_t els_cmd)
616 {
617 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
618 	struct lpfc_hba    *phba = vport->phba;
619 	struct lpfc_vport **vports;
620 	int i, active_vlink_present = 0 ;
621 
622 	/* Put ndlp in NPR state with 1 sec timeout for plogi, ACC logo */
623 	/* Only call LOGO ACC for first LOGO, this avoids sending unnecessary
624 	 * PLOGIs during LOGO storms from a device.
625 	 */
626 	spin_lock_irq(shost->host_lock);
627 	ndlp->nlp_flag |= NLP_LOGO_ACC;
628 	spin_unlock_irq(shost->host_lock);
629 	if (els_cmd == ELS_CMD_PRLO)
630 		lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
631 	else
632 		lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
633 	if (ndlp->nlp_DID == Fabric_DID) {
634 		if (vport->port_state <= LPFC_FDISC)
635 			goto out;
636 		lpfc_linkdown_port(vport);
637 		spin_lock_irq(shost->host_lock);
638 		vport->fc_flag |= FC_VPORT_LOGO_RCVD;
639 		spin_unlock_irq(shost->host_lock);
640 		vports = lpfc_create_vport_work_array(phba);
641 		if (vports) {
642 			for (i = 0; i <= phba->max_vports && vports[i] != NULL;
643 					i++) {
644 				if ((!(vports[i]->fc_flag &
645 					FC_VPORT_LOGO_RCVD)) &&
646 					(vports[i]->port_state > LPFC_FDISC)) {
647 					active_vlink_present = 1;
648 					break;
649 				}
650 			}
651 			lpfc_destroy_vport_work_array(phba, vports);
652 		}
653 
654 		/*
655 		 * Don't re-instantiate if vport is marked for deletion.
656 		 * If we are here first then vport_delete is going to wait
657 		 * for discovery to complete.
658 		 */
659 		if (!(vport->load_flag & FC_UNLOADING) &&
660 					active_vlink_present) {
661 			/*
662 			 * If there are other active VLinks present,
663 			 * re-instantiate the Vlink using FDISC.
664 			 */
665 			mod_timer(&ndlp->nlp_delayfunc,
666 				  jiffies + msecs_to_jiffies(1000));
667 			spin_lock_irq(shost->host_lock);
668 			ndlp->nlp_flag |= NLP_DELAY_TMO;
669 			spin_unlock_irq(shost->host_lock);
670 			ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
671 			vport->port_state = LPFC_FDISC;
672 		} else {
673 			spin_lock_irq(shost->host_lock);
674 			phba->pport->fc_flag &= ~FC_LOGO_RCVD_DID_CHNG;
675 			spin_unlock_irq(shost->host_lock);
676 			lpfc_retry_pport_discovery(phba);
677 		}
678 	} else if ((!(ndlp->nlp_type & NLP_FABRIC) &&
679 		((ndlp->nlp_type & NLP_FCP_TARGET) ||
680 		!(ndlp->nlp_type & NLP_FCP_INITIATOR))) ||
681 		(ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
682 		/* Only try to re-login if this is NOT a Fabric Node */
683 		mod_timer(&ndlp->nlp_delayfunc,
684 			  jiffies + msecs_to_jiffies(1000 * 1));
685 		spin_lock_irq(shost->host_lock);
686 		ndlp->nlp_flag |= NLP_DELAY_TMO;
687 		spin_unlock_irq(shost->host_lock);
688 
689 		ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
690 	}
691 out:
692 	ndlp->nlp_prev_state = ndlp->nlp_state;
693 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
694 
695 	spin_lock_irq(shost->host_lock);
696 	ndlp->nlp_flag &= ~NLP_NPR_ADISC;
697 	spin_unlock_irq(shost->host_lock);
698 	/* The driver has to wait until the ACC completes before it continues
699 	 * processing the LOGO.  The action will resume in
700 	 * lpfc_cmpl_els_logo_acc routine. Since part of processing includes an
701 	 * unreg_login, the driver waits so the ACC does not get aborted.
702 	 */
703 	return 0;
704 }
705 
706 static void
707 lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
708 	      struct lpfc_iocbq *cmdiocb)
709 {
710 	struct lpfc_dmabuf *pcmd;
711 	uint32_t *lp;
712 	PRLI *npr;
713 	struct fc_rport *rport = ndlp->rport;
714 	u32 roles;
715 
716 	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
717 	lp = (uint32_t *) pcmd->virt;
718 	npr = (PRLI *) ((uint8_t *) lp + sizeof (uint32_t));
719 
720 	ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
721 	ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
722 	ndlp->nlp_flag &= ~NLP_FIRSTBURST;
723 	if (npr->prliType == PRLI_FCP_TYPE) {
724 		if (npr->initiatorFunc)
725 			ndlp->nlp_type |= NLP_FCP_INITIATOR;
726 		if (npr->targetFunc) {
727 			ndlp->nlp_type |= NLP_FCP_TARGET;
728 			if (npr->writeXferRdyDis)
729 				ndlp->nlp_flag |= NLP_FIRSTBURST;
730 		}
731 		if (npr->Retry)
732 			ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
733 	}
734 	if (rport) {
735 		/* We need to update the rport role values */
736 		roles = FC_RPORT_ROLE_UNKNOWN;
737 		if (ndlp->nlp_type & NLP_FCP_INITIATOR)
738 			roles |= FC_RPORT_ROLE_FCP_INITIATOR;
739 		if (ndlp->nlp_type & NLP_FCP_TARGET)
740 			roles |= FC_RPORT_ROLE_FCP_TARGET;
741 
742 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
743 			"rport rolechg:   role:x%x did:x%x flg:x%x",
744 			roles, ndlp->nlp_DID, ndlp->nlp_flag);
745 
746 		fc_remote_port_rolechg(rport, roles);
747 	}
748 }
749 
750 static uint32_t
751 lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
752 {
753 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
754 
755 	if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED)) {
756 		ndlp->nlp_flag &= ~NLP_NPR_ADISC;
757 		return 0;
758 	}
759 
760 	if (!(vport->fc_flag & FC_PT2PT)) {
761 		/* Check config parameter use-adisc or FCP-2 */
762 		if ((vport->cfg_use_adisc && (vport->fc_flag & FC_RSCN_MODE)) ||
763 		    ((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) &&
764 		     (ndlp->nlp_type & NLP_FCP_TARGET))) {
765 			spin_lock_irq(shost->host_lock);
766 			ndlp->nlp_flag |= NLP_NPR_ADISC;
767 			spin_unlock_irq(shost->host_lock);
768 			return 1;
769 		}
770 	}
771 	ndlp->nlp_flag &= ~NLP_NPR_ADISC;
772 	lpfc_unreg_rpi(vport, ndlp);
773 	return 0;
774 }
775 
776 /**
777  * lpfc_release_rpi - Release a RPI by issuing unreg_login mailbox cmd.
778  * @phba : Pointer to lpfc_hba structure.
779  * @vport: Pointer to lpfc_vport structure.
780  * @rpi  : rpi to be release.
781  *
782  * This function will send a unreg_login mailbox command to the firmware
783  * to release a rpi.
784  **/
785 void
786 lpfc_release_rpi(struct lpfc_hba *phba,
787 		struct lpfc_vport *vport,
788 		uint16_t rpi)
789 {
790 	LPFC_MBOXQ_t *pmb;
791 	int rc;
792 
793 	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
794 			GFP_KERNEL);
795 	if (!pmb)
796 		lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
797 			"2796 mailbox memory allocation failed \n");
798 	else {
799 		lpfc_unreg_login(phba, vport->vpi, rpi, pmb);
800 		pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
801 		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
802 		if (rc == MBX_NOT_FINISHED)
803 			mempool_free(pmb, phba->mbox_mem_pool);
804 	}
805 }
806 
807 static uint32_t
808 lpfc_disc_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
809 		  void *arg, uint32_t evt)
810 {
811 	struct lpfc_hba *phba;
812 	LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
813 	uint16_t rpi;
814 
815 	phba = vport->phba;
816 	/* Release the RPI if reglogin completing */
817 	if (!(phba->pport->load_flag & FC_UNLOADING) &&
818 		(evt == NLP_EVT_CMPL_REG_LOGIN) &&
819 		(!pmb->u.mb.mbxStatus)) {
820 		rpi = pmb->u.mb.un.varWords[0];
821 		lpfc_release_rpi(phba, vport, rpi);
822 	}
823 	lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
824 			 "0271 Illegal State Transition: node x%x "
825 			 "event x%x, state x%x Data: x%x x%x\n",
826 			 ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi,
827 			 ndlp->nlp_flag);
828 	return ndlp->nlp_state;
829 }
830 
831 static uint32_t
832 lpfc_cmpl_plogi_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
833 		  void *arg, uint32_t evt)
834 {
835 	/* This transition is only legal if we previously
836 	 * rcv'ed a PLOGI. Since we don't want 2 discovery threads
837 	 * working on the same NPortID, do nothing for this thread
838 	 * to stop it.
839 	 */
840 	if (!(ndlp->nlp_flag & NLP_RCV_PLOGI)) {
841 		lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
842 			 "0272 Illegal State Transition: node x%x "
843 			 "event x%x, state x%x Data: x%x x%x\n",
844 			 ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi,
845 			 ndlp->nlp_flag);
846 	}
847 	return ndlp->nlp_state;
848 }
849 
850 /* Start of Discovery State Machine routines */
851 
852 static uint32_t
853 lpfc_rcv_plogi_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
854 			   void *arg, uint32_t evt)
855 {
856 	struct lpfc_iocbq *cmdiocb;
857 
858 	cmdiocb = (struct lpfc_iocbq *) arg;
859 
860 	if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
861 		return ndlp->nlp_state;
862 	}
863 	return NLP_STE_FREED_NODE;
864 }
865 
866 static uint32_t
867 lpfc_rcv_els_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
868 			 void *arg, uint32_t evt)
869 {
870 	lpfc_issue_els_logo(vport, ndlp, 0);
871 	return ndlp->nlp_state;
872 }
873 
874 static uint32_t
875 lpfc_rcv_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
876 			  void *arg, uint32_t evt)
877 {
878 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
879 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
880 
881 	spin_lock_irq(shost->host_lock);
882 	ndlp->nlp_flag |= NLP_LOGO_ACC;
883 	spin_unlock_irq(shost->host_lock);
884 	lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
885 
886 	return ndlp->nlp_state;
887 }
888 
889 static uint32_t
890 lpfc_cmpl_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
891 			   void *arg, uint32_t evt)
892 {
893 	return NLP_STE_FREED_NODE;
894 }
895 
896 static uint32_t
897 lpfc_device_rm_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
898 			   void *arg, uint32_t evt)
899 {
900 	return NLP_STE_FREED_NODE;
901 }
902 
903 static uint32_t
904 lpfc_device_recov_unused_node(struct lpfc_vport *vport,
905 			struct lpfc_nodelist *ndlp,
906 			   void *arg, uint32_t evt)
907 {
908 	return ndlp->nlp_state;
909 }
910 
911 static uint32_t
912 lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
913 			   void *arg, uint32_t evt)
914 {
915 	struct Scsi_Host   *shost = lpfc_shost_from_vport(vport);
916 	struct lpfc_hba   *phba = vport->phba;
917 	struct lpfc_iocbq *cmdiocb = arg;
918 	struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
919 	uint32_t *lp = (uint32_t *) pcmd->virt;
920 	struct serv_parm *sp = (struct serv_parm *) (lp + 1);
921 	struct ls_rjt stat;
922 	int port_cmp;
923 
924 	memset(&stat, 0, sizeof (struct ls_rjt));
925 
926 	/* For a PLOGI, we only accept if our portname is less
927 	 * than the remote portname.
928 	 */
929 	phba->fc_stat.elsLogiCol++;
930 	port_cmp = memcmp(&vport->fc_portname, &sp->portName,
931 			  sizeof(struct lpfc_name));
932 
933 	if (port_cmp >= 0) {
934 		/* Reject this request because the remote node will accept
935 		   ours */
936 		stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
937 		stat.un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS;
938 		lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
939 			NULL);
940 	} else {
941 		if (lpfc_rcv_plogi(vport, ndlp, cmdiocb) &&
942 		    (ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
943 		    (vport->num_disc_nodes)) {
944 			spin_lock_irq(shost->host_lock);
945 			ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
946 			spin_unlock_irq(shost->host_lock);
947 			/* Check if there are more PLOGIs to be sent */
948 			lpfc_more_plogi(vport);
949 			if (vport->num_disc_nodes == 0) {
950 				spin_lock_irq(shost->host_lock);
951 				vport->fc_flag &= ~FC_NDISC_ACTIVE;
952 				spin_unlock_irq(shost->host_lock);
953 				lpfc_can_disctmo(vport);
954 				lpfc_end_rscn(vport);
955 			}
956 		}
957 	} /* If our portname was less */
958 
959 	return ndlp->nlp_state;
960 }
961 
962 static uint32_t
963 lpfc_rcv_prli_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
964 			  void *arg, uint32_t evt)
965 {
966 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
967 	struct ls_rjt     stat;
968 
969 	memset(&stat, 0, sizeof (struct ls_rjt));
970 	stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY;
971 	stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
972 	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
973 	return ndlp->nlp_state;
974 }
975 
976 static uint32_t
977 lpfc_rcv_logo_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
978 			  void *arg, uint32_t evt)
979 {
980 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
981 
982 				/* software abort outstanding PLOGI */
983 	lpfc_els_abort(vport->phba, ndlp);
984 
985 	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
986 	return ndlp->nlp_state;
987 }
988 
989 static uint32_t
990 lpfc_rcv_els_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
991 			 void *arg, uint32_t evt)
992 {
993 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
994 	struct lpfc_hba   *phba = vport->phba;
995 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
996 
997 	/* software abort outstanding PLOGI */
998 	lpfc_els_abort(phba, ndlp);
999 
1000 	if (evt == NLP_EVT_RCV_LOGO) {
1001 		lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
1002 	} else {
1003 		lpfc_issue_els_logo(vport, ndlp, 0);
1004 	}
1005 
1006 	/* Put ndlp in npr state set plogi timer for 1 sec */
1007 	mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000 * 1));
1008 	spin_lock_irq(shost->host_lock);
1009 	ndlp->nlp_flag |= NLP_DELAY_TMO;
1010 	spin_unlock_irq(shost->host_lock);
1011 	ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
1012 	ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE;
1013 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1014 
1015 	return ndlp->nlp_state;
1016 }
1017 
1018 static uint32_t
1019 lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
1020 			    struct lpfc_nodelist *ndlp,
1021 			    void *arg,
1022 			    uint32_t evt)
1023 {
1024 	struct lpfc_hba    *phba = vport->phba;
1025 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1026 	struct lpfc_iocbq  *cmdiocb, *rspiocb;
1027 	struct lpfc_dmabuf *pcmd, *prsp, *mp;
1028 	uint32_t *lp;
1029 	IOCB_t *irsp;
1030 	struct serv_parm *sp;
1031 	uint32_t ed_tov;
1032 	LPFC_MBOXQ_t *mbox;
1033 	int rc;
1034 
1035 	cmdiocb = (struct lpfc_iocbq *) arg;
1036 	rspiocb = cmdiocb->context_un.rsp_iocb;
1037 
1038 	if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
1039 		/* Recovery from PLOGI collision logic */
1040 		return ndlp->nlp_state;
1041 	}
1042 
1043 	irsp = &rspiocb->iocb;
1044 
1045 	if (irsp->ulpStatus)
1046 		goto out;
1047 
1048 	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
1049 
1050 	prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
1051 	if (!prsp)
1052 		goto out;
1053 
1054 	lp = (uint32_t *) prsp->virt;
1055 	sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
1056 
1057 	/* Some switches have FDMI servers returning 0 for WWN */
1058 	if ((ndlp->nlp_DID != FDMI_DID) &&
1059 		(wwn_to_u64(sp->portName.u.wwn) == 0 ||
1060 		wwn_to_u64(sp->nodeName.u.wwn) == 0)) {
1061 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1062 				 "0142 PLOGI RSP: Invalid WWN.\n");
1063 		goto out;
1064 	}
1065 	if (!lpfc_check_sparm(vport, ndlp, sp, CLASS3, 0))
1066 		goto out;
1067 	/* PLOGI chkparm OK */
1068 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1069 			 "0121 PLOGI chkparm OK Data: x%x x%x x%x x%x\n",
1070 			 ndlp->nlp_DID, ndlp->nlp_state,
1071 			 ndlp->nlp_flag, ndlp->nlp_rpi);
1072 	if (vport->cfg_fcp_class == 2 && (sp->cls2.classValid))
1073 		ndlp->nlp_fcp_info |= CLASS2;
1074 	else
1075 		ndlp->nlp_fcp_info |= CLASS3;
1076 
1077 	ndlp->nlp_class_sup = 0;
1078 	if (sp->cls1.classValid)
1079 		ndlp->nlp_class_sup |= FC_COS_CLASS1;
1080 	if (sp->cls2.classValid)
1081 		ndlp->nlp_class_sup |= FC_COS_CLASS2;
1082 	if (sp->cls3.classValid)
1083 		ndlp->nlp_class_sup |= FC_COS_CLASS3;
1084 	if (sp->cls4.classValid)
1085 		ndlp->nlp_class_sup |= FC_COS_CLASS4;
1086 	ndlp->nlp_maxframe =
1087 		((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
1088 
1089 	if ((vport->fc_flag & FC_PT2PT) &&
1090 	    (vport->fc_flag & FC_PT2PT_PLOGI)) {
1091 		ed_tov = be32_to_cpu(sp->cmn.e_d_tov);
1092 		if (sp->cmn.edtovResolution) {
1093 			/* E_D_TOV ticks are in nanoseconds */
1094 			ed_tov = (phba->fc_edtov + 999999) / 1000000;
1095 		}
1096 
1097 		/*
1098 		 * Use the larger EDTOV
1099 		 * RATOV = 2 * EDTOV for pt-to-pt
1100 		 */
1101 		if (ed_tov > phba->fc_edtov)
1102 			phba->fc_edtov = ed_tov;
1103 		phba->fc_ratov = (2 * phba->fc_edtov) / 1000;
1104 
1105 		memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
1106 
1107 		/* Issue config_link / reg_vfi to account for updated TOV's */
1108 		if (phba->sli_rev == LPFC_SLI_REV4) {
1109 			lpfc_issue_reg_vfi(vport);
1110 		} else {
1111 			mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1112 			if (!mbox) {
1113 				lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1114 						 "0133 PLOGI: no memory "
1115 						 "for config_link "
1116 						 "Data: x%x x%x x%x x%x\n",
1117 						 ndlp->nlp_DID, ndlp->nlp_state,
1118 						 ndlp->nlp_flag, ndlp->nlp_rpi);
1119 				goto out;
1120 			}
1121 
1122 			lpfc_config_link(phba, mbox);
1123 
1124 			mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1125 			mbox->vport = vport;
1126 			rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
1127 			if (rc == MBX_NOT_FINISHED) {
1128 				mempool_free(mbox, phba->mbox_mem_pool);
1129 				goto out;
1130 			}
1131 		}
1132 	}
1133 
1134 	lpfc_unreg_rpi(vport, ndlp);
1135 
1136 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1137 	if (!mbox) {
1138 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1139 				 "0018 PLOGI: no memory for reg_login "
1140 				 "Data: x%x x%x x%x x%x\n",
1141 				 ndlp->nlp_DID, ndlp->nlp_state,
1142 				 ndlp->nlp_flag, ndlp->nlp_rpi);
1143 		goto out;
1144 	}
1145 
1146 	if (lpfc_reg_rpi(phba, vport->vpi, irsp->un.elsreq64.remoteID,
1147 			 (uint8_t *) sp, mbox, ndlp->nlp_rpi) == 0) {
1148 		switch (ndlp->nlp_DID) {
1149 		case NameServer_DID:
1150 			mbox->mbox_cmpl = lpfc_mbx_cmpl_ns_reg_login;
1151 			break;
1152 		case FDMI_DID:
1153 			mbox->mbox_cmpl = lpfc_mbx_cmpl_fdmi_reg_login;
1154 			break;
1155 		default:
1156 			ndlp->nlp_flag |= NLP_REG_LOGIN_SEND;
1157 			mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
1158 		}
1159 		mbox->context2 = lpfc_nlp_get(ndlp);
1160 		mbox->vport = vport;
1161 		if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
1162 		    != MBX_NOT_FINISHED) {
1163 			lpfc_nlp_set_state(vport, ndlp,
1164 					   NLP_STE_REG_LOGIN_ISSUE);
1165 			return ndlp->nlp_state;
1166 		}
1167 		if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
1168 			ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
1169 		/* decrement node reference count to the failed mbox
1170 		 * command
1171 		 */
1172 		lpfc_nlp_put(ndlp);
1173 		mp = (struct lpfc_dmabuf *) mbox->context1;
1174 		lpfc_mbuf_free(phba, mp->virt, mp->phys);
1175 		kfree(mp);
1176 		mempool_free(mbox, phba->mbox_mem_pool);
1177 
1178 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1179 				 "0134 PLOGI: cannot issue reg_login "
1180 				 "Data: x%x x%x x%x x%x\n",
1181 				 ndlp->nlp_DID, ndlp->nlp_state,
1182 				 ndlp->nlp_flag, ndlp->nlp_rpi);
1183 	} else {
1184 		mempool_free(mbox, phba->mbox_mem_pool);
1185 
1186 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1187 				 "0135 PLOGI: cannot format reg_login "
1188 				 "Data: x%x x%x x%x x%x\n",
1189 				 ndlp->nlp_DID, ndlp->nlp_state,
1190 				 ndlp->nlp_flag, ndlp->nlp_rpi);
1191 	}
1192 
1193 
1194 out:
1195 	if (ndlp->nlp_DID == NameServer_DID) {
1196 		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1197 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1198 				 "0261 Cannot Register NameServer login\n");
1199 	}
1200 
1201 	/*
1202 	** In case the node reference counter does not go to zero, ensure that
1203 	** the stale state for the node is not processed.
1204 	*/
1205 
1206 	ndlp->nlp_prev_state = ndlp->nlp_state;
1207 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1208 	spin_lock_irq(shost->host_lock);
1209 	ndlp->nlp_flag |= NLP_DEFER_RM;
1210 	spin_unlock_irq(shost->host_lock);
1211 	return NLP_STE_FREED_NODE;
1212 }
1213 
1214 static uint32_t
1215 lpfc_cmpl_logo_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1216 			   void *arg, uint32_t evt)
1217 {
1218 	return ndlp->nlp_state;
1219 }
1220 
1221 static uint32_t
1222 lpfc_cmpl_reglogin_plogi_issue(struct lpfc_vport *vport,
1223 	struct lpfc_nodelist *ndlp, void *arg, uint32_t evt)
1224 {
1225 	struct lpfc_hba *phba;
1226 	LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
1227 	MAILBOX_t *mb = &pmb->u.mb;
1228 	uint16_t rpi;
1229 
1230 	phba = vport->phba;
1231 	/* Release the RPI */
1232 	if (!(phba->pport->load_flag & FC_UNLOADING) &&
1233 		!mb->mbxStatus) {
1234 		rpi = pmb->u.mb.un.varWords[0];
1235 		lpfc_release_rpi(phba, vport, rpi);
1236 	}
1237 	return ndlp->nlp_state;
1238 }
1239 
1240 static uint32_t
1241 lpfc_device_rm_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1242 			   void *arg, uint32_t evt)
1243 {
1244 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1245 
1246 	if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1247 		spin_lock_irq(shost->host_lock);
1248 		ndlp->nlp_flag |= NLP_NODEV_REMOVE;
1249 		spin_unlock_irq(shost->host_lock);
1250 		return ndlp->nlp_state;
1251 	} else {
1252 		/* software abort outstanding PLOGI */
1253 		lpfc_els_abort(vport->phba, ndlp);
1254 
1255 		lpfc_drop_node(vport, ndlp);
1256 		return NLP_STE_FREED_NODE;
1257 	}
1258 }
1259 
1260 static uint32_t
1261 lpfc_device_recov_plogi_issue(struct lpfc_vport *vport,
1262 			      struct lpfc_nodelist *ndlp,
1263 			      void *arg,
1264 			      uint32_t evt)
1265 {
1266 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1267 	struct lpfc_hba  *phba = vport->phba;
1268 
1269 	/* Don't do anything that will mess up processing of the
1270 	 * previous RSCN.
1271 	 */
1272 	if (vport->fc_flag & FC_RSCN_DEFERRED)
1273 		return ndlp->nlp_state;
1274 
1275 	/* software abort outstanding PLOGI */
1276 	lpfc_els_abort(phba, ndlp);
1277 
1278 	ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE;
1279 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1280 	spin_lock_irq(shost->host_lock);
1281 	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1282 	spin_unlock_irq(shost->host_lock);
1283 
1284 	return ndlp->nlp_state;
1285 }
1286 
1287 static uint32_t
1288 lpfc_rcv_plogi_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1289 			   void *arg, uint32_t evt)
1290 {
1291 	struct Scsi_Host   *shost = lpfc_shost_from_vport(vport);
1292 	struct lpfc_hba   *phba = vport->phba;
1293 	struct lpfc_iocbq *cmdiocb;
1294 
1295 	/* software abort outstanding ADISC */
1296 	lpfc_els_abort(phba, ndlp);
1297 
1298 	cmdiocb = (struct lpfc_iocbq *) arg;
1299 
1300 	if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
1301 		if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1302 			spin_lock_irq(shost->host_lock);
1303 			ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
1304 			spin_unlock_irq(shost->host_lock);
1305 			if (vport->num_disc_nodes)
1306 				lpfc_more_adisc(vport);
1307 		}
1308 		return ndlp->nlp_state;
1309 	}
1310 	ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1311 	lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
1312 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
1313 
1314 	return ndlp->nlp_state;
1315 }
1316 
1317 static uint32_t
1318 lpfc_rcv_prli_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1319 			  void *arg, uint32_t evt)
1320 {
1321 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1322 
1323 	lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
1324 	return ndlp->nlp_state;
1325 }
1326 
1327 static uint32_t
1328 lpfc_rcv_logo_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1329 			  void *arg, uint32_t evt)
1330 {
1331 	struct lpfc_hba *phba = vport->phba;
1332 	struct lpfc_iocbq *cmdiocb;
1333 
1334 	cmdiocb = (struct lpfc_iocbq *) arg;
1335 
1336 	/* software abort outstanding ADISC */
1337 	lpfc_els_abort(phba, ndlp);
1338 
1339 	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
1340 	return ndlp->nlp_state;
1341 }
1342 
1343 static uint32_t
1344 lpfc_rcv_padisc_adisc_issue(struct lpfc_vport *vport,
1345 			    struct lpfc_nodelist *ndlp,
1346 			    void *arg, uint32_t evt)
1347 {
1348 	struct lpfc_iocbq *cmdiocb;
1349 
1350 	cmdiocb = (struct lpfc_iocbq *) arg;
1351 
1352 	lpfc_rcv_padisc(vport, ndlp, cmdiocb);
1353 	return ndlp->nlp_state;
1354 }
1355 
1356 static uint32_t
1357 lpfc_rcv_prlo_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1358 			  void *arg, uint32_t evt)
1359 {
1360 	struct lpfc_iocbq *cmdiocb;
1361 
1362 	cmdiocb = (struct lpfc_iocbq *) arg;
1363 
1364 	/* Treat like rcv logo */
1365 	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO);
1366 	return ndlp->nlp_state;
1367 }
1368 
1369 static uint32_t
1370 lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
1371 			    struct lpfc_nodelist *ndlp,
1372 			    void *arg, uint32_t evt)
1373 {
1374 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
1375 	struct lpfc_hba   *phba = vport->phba;
1376 	struct lpfc_iocbq *cmdiocb, *rspiocb;
1377 	IOCB_t *irsp;
1378 	ADISC *ap;
1379 	int rc;
1380 
1381 	cmdiocb = (struct lpfc_iocbq *) arg;
1382 	rspiocb = cmdiocb->context_un.rsp_iocb;
1383 
1384 	ap = (ADISC *)lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb);
1385 	irsp = &rspiocb->iocb;
1386 
1387 	if ((irsp->ulpStatus) ||
1388 	    (!lpfc_check_adisc(vport, ndlp, &ap->nodeName, &ap->portName))) {
1389 		/* 1 sec timeout */
1390 		mod_timer(&ndlp->nlp_delayfunc,
1391 			  jiffies + msecs_to_jiffies(1000));
1392 		spin_lock_irq(shost->host_lock);
1393 		ndlp->nlp_flag |= NLP_DELAY_TMO;
1394 		spin_unlock_irq(shost->host_lock);
1395 		ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
1396 
1397 		memset(&ndlp->nlp_nodename, 0, sizeof(struct lpfc_name));
1398 		memset(&ndlp->nlp_portname, 0, sizeof(struct lpfc_name));
1399 
1400 		ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1401 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1402 		lpfc_unreg_rpi(vport, ndlp);
1403 		return ndlp->nlp_state;
1404 	}
1405 
1406 	if (phba->sli_rev == LPFC_SLI_REV4) {
1407 		rc = lpfc_sli4_resume_rpi(ndlp, NULL, NULL);
1408 		if (rc) {
1409 			/* Stay in state and retry. */
1410 			ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1411 			return ndlp->nlp_state;
1412 		}
1413 	}
1414 
1415 	if (ndlp->nlp_type & NLP_FCP_TARGET) {
1416 		ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1417 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
1418 	} else {
1419 		ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1420 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1421 	}
1422 
1423 	return ndlp->nlp_state;
1424 }
1425 
1426 static uint32_t
1427 lpfc_device_rm_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1428 			   void *arg, uint32_t evt)
1429 {
1430 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1431 
1432 	if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1433 		spin_lock_irq(shost->host_lock);
1434 		ndlp->nlp_flag |= NLP_NODEV_REMOVE;
1435 		spin_unlock_irq(shost->host_lock);
1436 		return ndlp->nlp_state;
1437 	} else {
1438 		/* software abort outstanding ADISC */
1439 		lpfc_els_abort(vport->phba, ndlp);
1440 
1441 		lpfc_drop_node(vport, ndlp);
1442 		return NLP_STE_FREED_NODE;
1443 	}
1444 }
1445 
1446 static uint32_t
1447 lpfc_device_recov_adisc_issue(struct lpfc_vport *vport,
1448 			      struct lpfc_nodelist *ndlp,
1449 			      void *arg,
1450 			      uint32_t evt)
1451 {
1452 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1453 	struct lpfc_hba  *phba = vport->phba;
1454 
1455 	/* Don't do anything that will mess up processing of the
1456 	 * previous RSCN.
1457 	 */
1458 	if (vport->fc_flag & FC_RSCN_DEFERRED)
1459 		return ndlp->nlp_state;
1460 
1461 	/* software abort outstanding ADISC */
1462 	lpfc_els_abort(phba, ndlp);
1463 
1464 	ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1465 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1466 	spin_lock_irq(shost->host_lock);
1467 	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1468 	spin_unlock_irq(shost->host_lock);
1469 	lpfc_disc_set_adisc(vport, ndlp);
1470 	return ndlp->nlp_state;
1471 }
1472 
1473 static uint32_t
1474 lpfc_rcv_plogi_reglogin_issue(struct lpfc_vport *vport,
1475 			      struct lpfc_nodelist *ndlp,
1476 			      void *arg,
1477 			      uint32_t evt)
1478 {
1479 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1480 
1481 	lpfc_rcv_plogi(vport, ndlp, cmdiocb);
1482 	return ndlp->nlp_state;
1483 }
1484 
1485 static uint32_t
1486 lpfc_rcv_prli_reglogin_issue(struct lpfc_vport *vport,
1487 			     struct lpfc_nodelist *ndlp,
1488 			     void *arg,
1489 			     uint32_t evt)
1490 {
1491 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1492 
1493 	lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
1494 	return ndlp->nlp_state;
1495 }
1496 
1497 static uint32_t
1498 lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
1499 			     struct lpfc_nodelist *ndlp,
1500 			     void *arg,
1501 			     uint32_t evt)
1502 {
1503 	struct lpfc_hba   *phba = vport->phba;
1504 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1505 	LPFC_MBOXQ_t	  *mb;
1506 	LPFC_MBOXQ_t	  *nextmb;
1507 	struct lpfc_dmabuf *mp;
1508 
1509 	cmdiocb = (struct lpfc_iocbq *) arg;
1510 
1511 	/* cleanup any ndlp on mbox q waiting for reglogin cmpl */
1512 	if ((mb = phba->sli.mbox_active)) {
1513 		if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
1514 		   (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1515 			lpfc_nlp_put(ndlp);
1516 			mb->context2 = NULL;
1517 			mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1518 		}
1519 	}
1520 
1521 	spin_lock_irq(&phba->hbalock);
1522 	list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
1523 		if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
1524 		   (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1525 			mp = (struct lpfc_dmabuf *) (mb->context1);
1526 			if (mp) {
1527 				__lpfc_mbuf_free(phba, mp->virt, mp->phys);
1528 				kfree(mp);
1529 			}
1530 			lpfc_nlp_put(ndlp);
1531 			list_del(&mb->list);
1532 			phba->sli.mboxq_cnt--;
1533 			mempool_free(mb, phba->mbox_mem_pool);
1534 		}
1535 	}
1536 	spin_unlock_irq(&phba->hbalock);
1537 
1538 	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
1539 	return ndlp->nlp_state;
1540 }
1541 
1542 static uint32_t
1543 lpfc_rcv_padisc_reglogin_issue(struct lpfc_vport *vport,
1544 			       struct lpfc_nodelist *ndlp,
1545 			       void *arg,
1546 			       uint32_t evt)
1547 {
1548 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1549 
1550 	lpfc_rcv_padisc(vport, ndlp, cmdiocb);
1551 	return ndlp->nlp_state;
1552 }
1553 
1554 static uint32_t
1555 lpfc_rcv_prlo_reglogin_issue(struct lpfc_vport *vport,
1556 			     struct lpfc_nodelist *ndlp,
1557 			     void *arg,
1558 			     uint32_t evt)
1559 {
1560 	struct lpfc_iocbq *cmdiocb;
1561 
1562 	cmdiocb = (struct lpfc_iocbq *) arg;
1563 	lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
1564 	return ndlp->nlp_state;
1565 }
1566 
1567 static uint32_t
1568 lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
1569 				  struct lpfc_nodelist *ndlp,
1570 				  void *arg,
1571 				  uint32_t evt)
1572 {
1573 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1574 	LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
1575 	MAILBOX_t *mb = &pmb->u.mb;
1576 	uint32_t did  = mb->un.varWords[1];
1577 
1578 	if (mb->mbxStatus) {
1579 		/* RegLogin failed */
1580 		lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
1581 				"0246 RegLogin failed Data: x%x x%x x%x x%x "
1582 				 "x%x\n",
1583 				 did, mb->mbxStatus, vport->port_state,
1584 				 mb->un.varRegLogin.vpi,
1585 				 mb->un.varRegLogin.rpi);
1586 		/*
1587 		 * If RegLogin failed due to lack of HBA resources do not
1588 		 * retry discovery.
1589 		 */
1590 		if (mb->mbxStatus == MBXERR_RPI_FULL) {
1591 			ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1592 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1593 			return ndlp->nlp_state;
1594 		}
1595 
1596 		/* Put ndlp in npr state set plogi timer for 1 sec */
1597 		mod_timer(&ndlp->nlp_delayfunc,
1598 			  jiffies + msecs_to_jiffies(1000 * 1));
1599 		spin_lock_irq(shost->host_lock);
1600 		ndlp->nlp_flag |= NLP_DELAY_TMO;
1601 		spin_unlock_irq(shost->host_lock);
1602 		ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
1603 
1604 		lpfc_issue_els_logo(vport, ndlp, 0);
1605 		ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1606 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1607 		return ndlp->nlp_state;
1608 	}
1609 
1610 	/* SLI4 ports have preallocated logical rpis. */
1611 	if (vport->phba->sli_rev < LPFC_SLI_REV4)
1612 		ndlp->nlp_rpi = mb->un.varWords[0];
1613 
1614 	ndlp->nlp_flag |= NLP_RPI_REGISTERED;
1615 
1616 	/* Only if we are not a fabric nport do we issue PRLI */
1617 	if (!(ndlp->nlp_type & NLP_FABRIC)) {
1618 		ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1619 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
1620 		lpfc_issue_els_prli(vport, ndlp, 0);
1621 	} else {
1622 		ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1623 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1624 	}
1625 	return ndlp->nlp_state;
1626 }
1627 
1628 static uint32_t
1629 lpfc_device_rm_reglogin_issue(struct lpfc_vport *vport,
1630 			      struct lpfc_nodelist *ndlp,
1631 			      void *arg,
1632 			      uint32_t evt)
1633 {
1634 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1635 
1636 	if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1637 		spin_lock_irq(shost->host_lock);
1638 		ndlp->nlp_flag |= NLP_NODEV_REMOVE;
1639 		spin_unlock_irq(shost->host_lock);
1640 		return ndlp->nlp_state;
1641 	} else {
1642 		lpfc_drop_node(vport, ndlp);
1643 		return NLP_STE_FREED_NODE;
1644 	}
1645 }
1646 
1647 static uint32_t
1648 lpfc_device_recov_reglogin_issue(struct lpfc_vport *vport,
1649 				 struct lpfc_nodelist *ndlp,
1650 				 void *arg,
1651 				 uint32_t evt)
1652 {
1653 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1654 
1655 	/* Don't do anything that will mess up processing of the
1656 	 * previous RSCN.
1657 	 */
1658 	if (vport->fc_flag & FC_RSCN_DEFERRED)
1659 		return ndlp->nlp_state;
1660 
1661 	ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1662 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1663 	spin_lock_irq(shost->host_lock);
1664 	ndlp->nlp_flag |= NLP_IGNR_REG_CMPL;
1665 	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1666 	spin_unlock_irq(shost->host_lock);
1667 	lpfc_disc_set_adisc(vport, ndlp);
1668 	return ndlp->nlp_state;
1669 }
1670 
1671 static uint32_t
1672 lpfc_rcv_plogi_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1673 			  void *arg, uint32_t evt)
1674 {
1675 	struct lpfc_iocbq *cmdiocb;
1676 
1677 	cmdiocb = (struct lpfc_iocbq *) arg;
1678 
1679 	lpfc_rcv_plogi(vport, ndlp, cmdiocb);
1680 	return ndlp->nlp_state;
1681 }
1682 
1683 static uint32_t
1684 lpfc_rcv_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1685 			 void *arg, uint32_t evt)
1686 {
1687 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1688 
1689 	lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
1690 	return ndlp->nlp_state;
1691 }
1692 
1693 static uint32_t
1694 lpfc_rcv_logo_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1695 			 void *arg, uint32_t evt)
1696 {
1697 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1698 
1699 	/* Software abort outstanding PRLI before sending acc */
1700 	lpfc_els_abort(vport->phba, ndlp);
1701 
1702 	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
1703 	return ndlp->nlp_state;
1704 }
1705 
1706 static uint32_t
1707 lpfc_rcv_padisc_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1708 			   void *arg, uint32_t evt)
1709 {
1710 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1711 
1712 	lpfc_rcv_padisc(vport, ndlp, cmdiocb);
1713 	return ndlp->nlp_state;
1714 }
1715 
1716 /* This routine is envoked when we rcv a PRLO request from a nport
1717  * we are logged into.  We should send back a PRLO rsp setting the
1718  * appropriate bits.
1719  * NEXT STATE = PRLI_ISSUE
1720  */
1721 static uint32_t
1722 lpfc_rcv_prlo_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1723 			 void *arg, uint32_t evt)
1724 {
1725 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1726 
1727 	lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
1728 	return ndlp->nlp_state;
1729 }
1730 
1731 static uint32_t
1732 lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1733 			  void *arg, uint32_t evt)
1734 {
1735 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1736 	struct lpfc_iocbq *cmdiocb, *rspiocb;
1737 	struct lpfc_hba   *phba = vport->phba;
1738 	IOCB_t *irsp;
1739 	PRLI *npr;
1740 
1741 	cmdiocb = (struct lpfc_iocbq *) arg;
1742 	rspiocb = cmdiocb->context_un.rsp_iocb;
1743 	npr = (PRLI *)lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb);
1744 
1745 	irsp = &rspiocb->iocb;
1746 	if (irsp->ulpStatus) {
1747 		if ((vport->port_type == LPFC_NPIV_PORT) &&
1748 		    vport->cfg_restrict_login) {
1749 			goto out;
1750 		}
1751 		ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
1752 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1753 		return ndlp->nlp_state;
1754 	}
1755 
1756 	/* Check out PRLI rsp */
1757 	ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
1758 	ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
1759 	ndlp->nlp_flag &= ~NLP_FIRSTBURST;
1760 	if ((npr->acceptRspCode == PRLI_REQ_EXECUTED) &&
1761 	    (npr->prliType == PRLI_FCP_TYPE)) {
1762 		if (npr->initiatorFunc)
1763 			ndlp->nlp_type |= NLP_FCP_INITIATOR;
1764 		if (npr->targetFunc) {
1765 			ndlp->nlp_type |= NLP_FCP_TARGET;
1766 			if (npr->writeXferRdyDis)
1767 				ndlp->nlp_flag |= NLP_FIRSTBURST;
1768 		}
1769 		if (npr->Retry)
1770 			ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
1771 	}
1772 	if (!(ndlp->nlp_type & NLP_FCP_TARGET) &&
1773 	    (vport->port_type == LPFC_NPIV_PORT) &&
1774 	     vport->cfg_restrict_login) {
1775 out:
1776 		spin_lock_irq(shost->host_lock);
1777 		ndlp->nlp_flag |= NLP_TARGET_REMOVE;
1778 		spin_unlock_irq(shost->host_lock);
1779 		lpfc_issue_els_logo(vport, ndlp, 0);
1780 
1781 		ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
1782 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1783 		return ndlp->nlp_state;
1784 	}
1785 
1786 	ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
1787 	if (ndlp->nlp_type & NLP_FCP_TARGET)
1788 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
1789 	else
1790 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1791 	return ndlp->nlp_state;
1792 }
1793 
1794 /*! lpfc_device_rm_prli_issue
1795  *
1796  * \pre
1797  * \post
1798  * \param   phba
1799  * \param   ndlp
1800  * \param   arg
1801  * \param   evt
1802  * \return  uint32_t
1803  *
1804  * \b Description:
1805  *    This routine is envoked when we a request to remove a nport we are in the
1806  *    process of PRLIing. We should software abort outstanding prli, unreg
1807  *    login, send a logout. We will change node state to UNUSED_NODE, put it
1808  *    on plogi list so it can be freed when LOGO completes.
1809  *
1810  */
1811 
1812 static uint32_t
1813 lpfc_device_rm_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1814 			  void *arg, uint32_t evt)
1815 {
1816 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1817 
1818 	if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1819 		spin_lock_irq(shost->host_lock);
1820 		ndlp->nlp_flag |= NLP_NODEV_REMOVE;
1821 		spin_unlock_irq(shost->host_lock);
1822 		return ndlp->nlp_state;
1823 	} else {
1824 		/* software abort outstanding PLOGI */
1825 		lpfc_els_abort(vport->phba, ndlp);
1826 
1827 		lpfc_drop_node(vport, ndlp);
1828 		return NLP_STE_FREED_NODE;
1829 	}
1830 }
1831 
1832 
1833 /*! lpfc_device_recov_prli_issue
1834  *
1835  * \pre
1836  * \post
1837  * \param   phba
1838  * \param   ndlp
1839  * \param   arg
1840  * \param   evt
1841  * \return  uint32_t
1842  *
1843  * \b Description:
1844  *    The routine is envoked when the state of a device is unknown, like
1845  *    during a link down. We should remove the nodelist entry from the
1846  *    unmapped list, issue a UNREG_LOGIN, do a software abort of the
1847  *    outstanding PRLI command, then free the node entry.
1848  */
1849 static uint32_t
1850 lpfc_device_recov_prli_issue(struct lpfc_vport *vport,
1851 			     struct lpfc_nodelist *ndlp,
1852 			     void *arg,
1853 			     uint32_t evt)
1854 {
1855 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1856 	struct lpfc_hba  *phba = vport->phba;
1857 
1858 	/* Don't do anything that will mess up processing of the
1859 	 * previous RSCN.
1860 	 */
1861 	if (vport->fc_flag & FC_RSCN_DEFERRED)
1862 		return ndlp->nlp_state;
1863 
1864 	/* software abort outstanding PRLI */
1865 	lpfc_els_abort(phba, ndlp);
1866 
1867 	ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
1868 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1869 	spin_lock_irq(shost->host_lock);
1870 	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1871 	spin_unlock_irq(shost->host_lock);
1872 	lpfc_disc_set_adisc(vport, ndlp);
1873 	return ndlp->nlp_state;
1874 }
1875 
1876 static uint32_t
1877 lpfc_rcv_plogi_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1878 			  void *arg, uint32_t evt)
1879 {
1880 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
1881 	struct ls_rjt     stat;
1882 
1883 	memset(&stat, 0, sizeof(struct ls_rjt));
1884 	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
1885 	stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
1886 	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
1887 	return ndlp->nlp_state;
1888 }
1889 
1890 static uint32_t
1891 lpfc_rcv_prli_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1892 			 void *arg, uint32_t evt)
1893 {
1894 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
1895 	struct ls_rjt     stat;
1896 
1897 	memset(&stat, 0, sizeof(struct ls_rjt));
1898 	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
1899 	stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
1900 	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
1901 	return ndlp->nlp_state;
1902 }
1903 
1904 static uint32_t
1905 lpfc_rcv_logo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1906 			 void *arg, uint32_t evt)
1907 {
1908 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
1909 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
1910 
1911 	spin_lock_irq(shost->host_lock);
1912 	ndlp->nlp_flag |= NLP_LOGO_ACC;
1913 	spin_unlock_irq(shost->host_lock);
1914 	lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
1915 	return ndlp->nlp_state;
1916 }
1917 
1918 static uint32_t
1919 lpfc_rcv_padisc_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1920 			   void *arg, uint32_t evt)
1921 {
1922 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
1923 	struct ls_rjt     stat;
1924 
1925 	memset(&stat, 0, sizeof(struct ls_rjt));
1926 	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
1927 	stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
1928 	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
1929 	return ndlp->nlp_state;
1930 }
1931 
1932 static uint32_t
1933 lpfc_rcv_prlo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1934 			 void *arg, uint32_t evt)
1935 {
1936 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg;
1937 	struct ls_rjt     stat;
1938 
1939 	memset(&stat, 0, sizeof(struct ls_rjt));
1940 	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
1941 	stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
1942 	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
1943 	return ndlp->nlp_state;
1944 }
1945 
1946 static uint32_t
1947 lpfc_cmpl_logo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1948 			  void *arg, uint32_t evt)
1949 {
1950 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1951 
1952 	ndlp->nlp_prev_state = NLP_STE_LOGO_ISSUE;
1953 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1954 	spin_lock_irq(shost->host_lock);
1955 	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1956 	spin_unlock_irq(shost->host_lock);
1957 	lpfc_disc_set_adisc(vport, ndlp);
1958 	return ndlp->nlp_state;
1959 }
1960 
1961 static uint32_t
1962 lpfc_device_rm_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1963 			  void *arg, uint32_t evt)
1964 {
1965 	/*
1966 	 * Take no action.  If a LOGO is outstanding, then possibly DevLoss has
1967 	 * timed out and is calling for Device Remove.  In this case, the LOGO
1968 	 * must be allowed to complete in state LOGO_ISSUE so that the rpi
1969 	 * and other NLP flags are correctly cleaned up.
1970 	 */
1971 	return ndlp->nlp_state;
1972 }
1973 
1974 static uint32_t
1975 lpfc_device_recov_logo_issue(struct lpfc_vport *vport,
1976 			     struct lpfc_nodelist *ndlp,
1977 			     void *arg, uint32_t evt)
1978 {
1979 	/*
1980 	 * Device Recovery events have no meaning for a node with a LOGO
1981 	 * outstanding.  The LOGO has to complete first and handle the
1982 	 * node from that point.
1983 	 */
1984 	return ndlp->nlp_state;
1985 }
1986 
1987 static uint32_t
1988 lpfc_rcv_plogi_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1989 			  void *arg, uint32_t evt)
1990 {
1991 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1992 
1993 	lpfc_rcv_plogi(vport, ndlp, cmdiocb);
1994 	return ndlp->nlp_state;
1995 }
1996 
1997 static uint32_t
1998 lpfc_rcv_prli_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1999 			 void *arg, uint32_t evt)
2000 {
2001 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2002 
2003 	lpfc_rcv_prli(vport, ndlp, cmdiocb);
2004 	lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
2005 	return ndlp->nlp_state;
2006 }
2007 
2008 static uint32_t
2009 lpfc_rcv_logo_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2010 			 void *arg, uint32_t evt)
2011 {
2012 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2013 
2014 	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
2015 	return ndlp->nlp_state;
2016 }
2017 
2018 static uint32_t
2019 lpfc_rcv_padisc_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2020 			   void *arg, uint32_t evt)
2021 {
2022 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2023 
2024 	lpfc_rcv_padisc(vport, ndlp, cmdiocb);
2025 	return ndlp->nlp_state;
2026 }
2027 
2028 static uint32_t
2029 lpfc_rcv_prlo_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2030 			 void *arg, uint32_t evt)
2031 {
2032 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2033 
2034 	lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
2035 	return ndlp->nlp_state;
2036 }
2037 
2038 static uint32_t
2039 lpfc_device_recov_unmap_node(struct lpfc_vport *vport,
2040 			     struct lpfc_nodelist *ndlp,
2041 			     void *arg,
2042 			     uint32_t evt)
2043 {
2044 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2045 
2046 	ndlp->nlp_prev_state = NLP_STE_UNMAPPED_NODE;
2047 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
2048 	spin_lock_irq(shost->host_lock);
2049 	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
2050 	spin_unlock_irq(shost->host_lock);
2051 	lpfc_disc_set_adisc(vport, ndlp);
2052 
2053 	return ndlp->nlp_state;
2054 }
2055 
2056 static uint32_t
2057 lpfc_rcv_plogi_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2058 			   void *arg, uint32_t evt)
2059 {
2060 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2061 
2062 	lpfc_rcv_plogi(vport, ndlp, cmdiocb);
2063 	return ndlp->nlp_state;
2064 }
2065 
2066 static uint32_t
2067 lpfc_rcv_prli_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2068 			  void *arg, uint32_t evt)
2069 {
2070 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2071 
2072 	lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
2073 	return ndlp->nlp_state;
2074 }
2075 
2076 static uint32_t
2077 lpfc_rcv_logo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2078 			  void *arg, uint32_t evt)
2079 {
2080 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2081 
2082 	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
2083 	return ndlp->nlp_state;
2084 }
2085 
2086 static uint32_t
2087 lpfc_rcv_padisc_mapped_node(struct lpfc_vport *vport,
2088 			    struct lpfc_nodelist *ndlp,
2089 			    void *arg, uint32_t evt)
2090 {
2091 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2092 
2093 	lpfc_rcv_padisc(vport, ndlp, cmdiocb);
2094 	return ndlp->nlp_state;
2095 }
2096 
2097 static uint32_t
2098 lpfc_rcv_prlo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2099 			  void *arg, uint32_t evt)
2100 {
2101 	struct lpfc_hba  *phba = vport->phba;
2102 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2103 
2104 	/* flush the target */
2105 	lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
2106 			    ndlp->nlp_sid, 0, LPFC_CTX_TGT);
2107 
2108 	/* Treat like rcv logo */
2109 	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO);
2110 	return ndlp->nlp_state;
2111 }
2112 
2113 static uint32_t
2114 lpfc_device_recov_mapped_node(struct lpfc_vport *vport,
2115 			      struct lpfc_nodelist *ndlp,
2116 			      void *arg,
2117 			      uint32_t evt)
2118 {
2119 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2120 
2121 	ndlp->nlp_prev_state = NLP_STE_MAPPED_NODE;
2122 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
2123 	spin_lock_irq(shost->host_lock);
2124 	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
2125 	spin_unlock_irq(shost->host_lock);
2126 	lpfc_disc_set_adisc(vport, ndlp);
2127 	return ndlp->nlp_state;
2128 }
2129 
2130 static uint32_t
2131 lpfc_rcv_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2132 			void *arg, uint32_t evt)
2133 {
2134 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2135 	struct lpfc_iocbq *cmdiocb  = (struct lpfc_iocbq *) arg;
2136 
2137 	/* Ignore PLOGI if we have an outstanding LOGO */
2138 	if (ndlp->nlp_flag & (NLP_LOGO_SND | NLP_LOGO_ACC))
2139 		return ndlp->nlp_state;
2140 	if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
2141 		lpfc_cancel_retry_delay_tmo(vport, ndlp);
2142 		spin_lock_irq(shost->host_lock);
2143 		ndlp->nlp_flag &= ~(NLP_NPR_ADISC | NLP_NPR_2B_DISC);
2144 		spin_unlock_irq(shost->host_lock);
2145 	} else if (!(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
2146 		/* send PLOGI immediately, move to PLOGI issue state */
2147 		if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
2148 			ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
2149 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
2150 			lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
2151 		}
2152 	}
2153 	return ndlp->nlp_state;
2154 }
2155 
2156 static uint32_t
2157 lpfc_rcv_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2158 		       void *arg, uint32_t evt)
2159 {
2160 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
2161 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2162 	struct ls_rjt     stat;
2163 
2164 	memset(&stat, 0, sizeof (struct ls_rjt));
2165 	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
2166 	stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
2167 	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
2168 
2169 	if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
2170 		if (ndlp->nlp_flag & NLP_NPR_ADISC) {
2171 			spin_lock_irq(shost->host_lock);
2172 			ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2173 			ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
2174 			spin_unlock_irq(shost->host_lock);
2175 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
2176 			lpfc_issue_els_adisc(vport, ndlp, 0);
2177 		} else {
2178 			ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
2179 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
2180 			lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
2181 		}
2182 	}
2183 	return ndlp->nlp_state;
2184 }
2185 
2186 static uint32_t
2187 lpfc_rcv_logo_npr_node(struct lpfc_vport *vport,  struct lpfc_nodelist *ndlp,
2188 		       void *arg, uint32_t evt)
2189 {
2190 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2191 
2192 	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
2193 	return ndlp->nlp_state;
2194 }
2195 
2196 static uint32_t
2197 lpfc_rcv_padisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2198 			 void *arg, uint32_t evt)
2199 {
2200 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2201 
2202 	lpfc_rcv_padisc(vport, ndlp, cmdiocb);
2203 	/*
2204 	 * Do not start discovery if discovery is about to start
2205 	 * or discovery in progress for this node. Starting discovery
2206 	 * here will affect the counting of discovery threads.
2207 	 */
2208 	if (!(ndlp->nlp_flag & NLP_DELAY_TMO) &&
2209 	    !(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
2210 		if (ndlp->nlp_flag & NLP_NPR_ADISC) {
2211 			ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2212 			ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
2213 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
2214 			lpfc_issue_els_adisc(vport, ndlp, 0);
2215 		} else {
2216 			ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
2217 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
2218 			lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
2219 		}
2220 	}
2221 	return ndlp->nlp_state;
2222 }
2223 
2224 static uint32_t
2225 lpfc_rcv_prlo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2226 		       void *arg, uint32_t evt)
2227 {
2228 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2229 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
2230 
2231 	spin_lock_irq(shost->host_lock);
2232 	ndlp->nlp_flag |= NLP_LOGO_ACC;
2233 	spin_unlock_irq(shost->host_lock);
2234 
2235 	lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
2236 
2237 	if ((ndlp->nlp_flag & NLP_DELAY_TMO) == 0) {
2238 		mod_timer(&ndlp->nlp_delayfunc,
2239 			  jiffies + msecs_to_jiffies(1000 * 1));
2240 		spin_lock_irq(shost->host_lock);
2241 		ndlp->nlp_flag |= NLP_DELAY_TMO;
2242 		ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2243 		spin_unlock_irq(shost->host_lock);
2244 		ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
2245 	} else {
2246 		spin_lock_irq(shost->host_lock);
2247 		ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2248 		spin_unlock_irq(shost->host_lock);
2249 	}
2250 	return ndlp->nlp_state;
2251 }
2252 
2253 static uint32_t
2254 lpfc_cmpl_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2255 			 void *arg, uint32_t evt)
2256 {
2257 	struct lpfc_iocbq *cmdiocb, *rspiocb;
2258 	IOCB_t *irsp;
2259 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2260 
2261 	cmdiocb = (struct lpfc_iocbq *) arg;
2262 	rspiocb = cmdiocb->context_un.rsp_iocb;
2263 
2264 	irsp = &rspiocb->iocb;
2265 	if (irsp->ulpStatus) {
2266 		spin_lock_irq(shost->host_lock);
2267 		ndlp->nlp_flag |= NLP_DEFER_RM;
2268 		spin_unlock_irq(shost->host_lock);
2269 		return NLP_STE_FREED_NODE;
2270 	}
2271 	return ndlp->nlp_state;
2272 }
2273 
2274 static uint32_t
2275 lpfc_cmpl_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2276 			void *arg, uint32_t evt)
2277 {
2278 	struct lpfc_iocbq *cmdiocb, *rspiocb;
2279 	IOCB_t *irsp;
2280 
2281 	cmdiocb = (struct lpfc_iocbq *) arg;
2282 	rspiocb = cmdiocb->context_un.rsp_iocb;
2283 
2284 	irsp = &rspiocb->iocb;
2285 	if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
2286 		lpfc_drop_node(vport, ndlp);
2287 		return NLP_STE_FREED_NODE;
2288 	}
2289 	return ndlp->nlp_state;
2290 }
2291 
2292 static uint32_t
2293 lpfc_cmpl_logo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2294 			void *arg, uint32_t evt)
2295 {
2296 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2297 
2298 	/* For the fabric port just clear the fc flags. */
2299 	if (ndlp->nlp_DID == Fabric_DID) {
2300 		spin_lock_irq(shost->host_lock);
2301 		vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
2302 		spin_unlock_irq(shost->host_lock);
2303 	}
2304 	lpfc_unreg_rpi(vport, ndlp);
2305 	return ndlp->nlp_state;
2306 }
2307 
2308 static uint32_t
2309 lpfc_cmpl_adisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2310 			 void *arg, uint32_t evt)
2311 {
2312 	struct lpfc_iocbq *cmdiocb, *rspiocb;
2313 	IOCB_t *irsp;
2314 
2315 	cmdiocb = (struct lpfc_iocbq *) arg;
2316 	rspiocb = cmdiocb->context_un.rsp_iocb;
2317 
2318 	irsp = &rspiocb->iocb;
2319 	if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
2320 		lpfc_drop_node(vport, ndlp);
2321 		return NLP_STE_FREED_NODE;
2322 	}
2323 	return ndlp->nlp_state;
2324 }
2325 
2326 static uint32_t
2327 lpfc_cmpl_reglogin_npr_node(struct lpfc_vport *vport,
2328 			    struct lpfc_nodelist *ndlp,
2329 			    void *arg, uint32_t evt)
2330 {
2331 	LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
2332 	MAILBOX_t    *mb = &pmb->u.mb;
2333 
2334 	if (!mb->mbxStatus) {
2335 		/* SLI4 ports have preallocated logical rpis. */
2336 		if (vport->phba->sli_rev < LPFC_SLI_REV4)
2337 			ndlp->nlp_rpi = mb->un.varWords[0];
2338 		ndlp->nlp_flag |= NLP_RPI_REGISTERED;
2339 		if (ndlp->nlp_flag & NLP_LOGO_ACC) {
2340 			lpfc_unreg_rpi(vport, ndlp);
2341 		}
2342 	} else {
2343 		if (ndlp->nlp_flag & NLP_NODEV_REMOVE) {
2344 			lpfc_drop_node(vport, ndlp);
2345 			return NLP_STE_FREED_NODE;
2346 		}
2347 	}
2348 	return ndlp->nlp_state;
2349 }
2350 
2351 static uint32_t
2352 lpfc_device_rm_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2353 			void *arg, uint32_t evt)
2354 {
2355 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2356 
2357 	if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
2358 		spin_lock_irq(shost->host_lock);
2359 		ndlp->nlp_flag |= NLP_NODEV_REMOVE;
2360 		spin_unlock_irq(shost->host_lock);
2361 		return ndlp->nlp_state;
2362 	}
2363 	lpfc_drop_node(vport, ndlp);
2364 	return NLP_STE_FREED_NODE;
2365 }
2366 
2367 static uint32_t
2368 lpfc_device_recov_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2369 			   void *arg, uint32_t evt)
2370 {
2371 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2372 
2373 	/* Don't do anything that will mess up processing of the
2374 	 * previous RSCN.
2375 	 */
2376 	if (vport->fc_flag & FC_RSCN_DEFERRED)
2377 		return ndlp->nlp_state;
2378 
2379 	lpfc_cancel_retry_delay_tmo(vport, ndlp);
2380 	spin_lock_irq(shost->host_lock);
2381 	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
2382 	spin_unlock_irq(shost->host_lock);
2383 	return ndlp->nlp_state;
2384 }
2385 
2386 
2387 /* This next section defines the NPort Discovery State Machine */
2388 
2389 /* There are 4 different double linked lists nodelist entries can reside on.
2390  * The plogi list and adisc list are used when Link Up discovery or RSCN
2391  * processing is needed. Each list holds the nodes that we will send PLOGI
2392  * or ADISC on. These lists will keep track of what nodes will be effected
2393  * by an RSCN, or a Link Up (Typically, all nodes are effected on Link Up).
2394  * The unmapped_list will contain all nodes that we have successfully logged
2395  * into at the Fibre Channel level. The mapped_list will contain all nodes
2396  * that are mapped FCP targets.
2397  */
2398 /*
2399  * The bind list is a list of undiscovered (potentially non-existent) nodes
2400  * that we have saved binding information on. This information is used when
2401  * nodes transition from the unmapped to the mapped list.
2402  */
2403 /* For UNUSED_NODE state, the node has just been allocated .
2404  * For PLOGI_ISSUE and REG_LOGIN_ISSUE, the node is on
2405  * the PLOGI list. For REG_LOGIN_COMPL, the node is taken off the PLOGI list
2406  * and put on the unmapped list. For ADISC processing, the node is taken off
2407  * the ADISC list and placed on either the mapped or unmapped list (depending
2408  * on its previous state). Once on the unmapped list, a PRLI is issued and the
2409  * state changed to PRLI_ISSUE. When the PRLI completion occurs, the state is
2410  * changed to UNMAPPED_NODE. If the completion indicates a mapped
2411  * node, the node is taken off the unmapped list. The binding list is checked
2412  * for a valid binding, or a binding is automatically assigned. If binding
2413  * assignment is unsuccessful, the node is left on the unmapped list. If
2414  * binding assignment is successful, the associated binding list entry (if
2415  * any) is removed, and the node is placed on the mapped list.
2416  */
2417 /*
2418  * For a Link Down, all nodes on the ADISC, PLOGI, unmapped or mapped
2419  * lists will receive a DEVICE_RECOVERY event. If the linkdown or devloss timers
2420  * expire, all effected nodes will receive a DEVICE_RM event.
2421  */
2422 /*
2423  * For a Link Up or RSCN, all nodes will move from the mapped / unmapped lists
2424  * to either the ADISC or PLOGI list.  After a Nameserver query or ALPA loopmap
2425  * check, additional nodes may be added or removed (via DEVICE_RM) to / from
2426  * the PLOGI or ADISC lists. Once the PLOGI and ADISC lists are populated,
2427  * we will first process the ADISC list.  32 entries are processed initially and
2428  * ADISC is initited for each one.  Completions / Events for each node are
2429  * funnelled thru the state machine.  As each node finishes ADISC processing, it
2430  * starts ADISC for any nodes waiting for ADISC processing. If no nodes are
2431  * waiting, and the ADISC list count is identically 0, then we are done. For
2432  * Link Up discovery, since all nodes on the PLOGI list are UNREG_LOGIN'ed, we
2433  * can issue a CLEAR_LA and reenable Link Events. Next we will process the PLOGI
2434  * list.  32 entries are processed initially and PLOGI is initited for each one.
2435  * Completions / Events for each node are funnelled thru the state machine.  As
2436  * each node finishes PLOGI processing, it starts PLOGI for any nodes waiting
2437  * for PLOGI processing. If no nodes are waiting, and the PLOGI list count is
2438  * indentically 0, then we are done. We have now completed discovery / RSCN
2439  * handling. Upon completion, ALL nodes should be on either the mapped or
2440  * unmapped lists.
2441  */
2442 
2443 static uint32_t (*lpfc_disc_action[NLP_STE_MAX_STATE * NLP_EVT_MAX_EVENT])
2444      (struct lpfc_vport *, struct lpfc_nodelist *, void *, uint32_t) = {
2445 	/* Action routine                  Event       Current State  */
2446 	lpfc_rcv_plogi_unused_node,	/* RCV_PLOGI   UNUSED_NODE    */
2447 	lpfc_rcv_els_unused_node,	/* RCV_PRLI        */
2448 	lpfc_rcv_logo_unused_node,	/* RCV_LOGO        */
2449 	lpfc_rcv_els_unused_node,	/* RCV_ADISC       */
2450 	lpfc_rcv_els_unused_node,	/* RCV_PDISC       */
2451 	lpfc_rcv_els_unused_node,	/* RCV_PRLO        */
2452 	lpfc_disc_illegal,		/* CMPL_PLOGI      */
2453 	lpfc_disc_illegal,		/* CMPL_PRLI       */
2454 	lpfc_cmpl_logo_unused_node,	/* CMPL_LOGO       */
2455 	lpfc_disc_illegal,		/* CMPL_ADISC      */
2456 	lpfc_disc_illegal,		/* CMPL_REG_LOGIN  */
2457 	lpfc_device_rm_unused_node,	/* DEVICE_RM       */
2458 	lpfc_device_recov_unused_node,	/* DEVICE_RECOVERY */
2459 
2460 	lpfc_rcv_plogi_plogi_issue,	/* RCV_PLOGI   PLOGI_ISSUE    */
2461 	lpfc_rcv_prli_plogi_issue,	/* RCV_PRLI        */
2462 	lpfc_rcv_logo_plogi_issue,	/* RCV_LOGO        */
2463 	lpfc_rcv_els_plogi_issue,	/* RCV_ADISC       */
2464 	lpfc_rcv_els_plogi_issue,	/* RCV_PDISC       */
2465 	lpfc_rcv_els_plogi_issue,	/* RCV_PRLO        */
2466 	lpfc_cmpl_plogi_plogi_issue,	/* CMPL_PLOGI      */
2467 	lpfc_disc_illegal,		/* CMPL_PRLI       */
2468 	lpfc_cmpl_logo_plogi_issue,	/* CMPL_LOGO       */
2469 	lpfc_disc_illegal,		/* CMPL_ADISC      */
2470 	lpfc_cmpl_reglogin_plogi_issue,/* CMPL_REG_LOGIN  */
2471 	lpfc_device_rm_plogi_issue,	/* DEVICE_RM       */
2472 	lpfc_device_recov_plogi_issue,	/* DEVICE_RECOVERY */
2473 
2474 	lpfc_rcv_plogi_adisc_issue,	/* RCV_PLOGI   ADISC_ISSUE    */
2475 	lpfc_rcv_prli_adisc_issue,	/* RCV_PRLI        */
2476 	lpfc_rcv_logo_adisc_issue,	/* RCV_LOGO        */
2477 	lpfc_rcv_padisc_adisc_issue,	/* RCV_ADISC       */
2478 	lpfc_rcv_padisc_adisc_issue,	/* RCV_PDISC       */
2479 	lpfc_rcv_prlo_adisc_issue,	/* RCV_PRLO        */
2480 	lpfc_disc_illegal,		/* CMPL_PLOGI      */
2481 	lpfc_disc_illegal,		/* CMPL_PRLI       */
2482 	lpfc_disc_illegal,		/* CMPL_LOGO       */
2483 	lpfc_cmpl_adisc_adisc_issue,	/* CMPL_ADISC      */
2484 	lpfc_disc_illegal,		/* CMPL_REG_LOGIN  */
2485 	lpfc_device_rm_adisc_issue,	/* DEVICE_RM       */
2486 	lpfc_device_recov_adisc_issue,	/* DEVICE_RECOVERY */
2487 
2488 	lpfc_rcv_plogi_reglogin_issue,	/* RCV_PLOGI  REG_LOGIN_ISSUE */
2489 	lpfc_rcv_prli_reglogin_issue,	/* RCV_PLOGI       */
2490 	lpfc_rcv_logo_reglogin_issue,	/* RCV_LOGO        */
2491 	lpfc_rcv_padisc_reglogin_issue,	/* RCV_ADISC       */
2492 	lpfc_rcv_padisc_reglogin_issue,	/* RCV_PDISC       */
2493 	lpfc_rcv_prlo_reglogin_issue,	/* RCV_PRLO        */
2494 	lpfc_cmpl_plogi_illegal,	/* CMPL_PLOGI      */
2495 	lpfc_disc_illegal,		/* CMPL_PRLI       */
2496 	lpfc_disc_illegal,		/* CMPL_LOGO       */
2497 	lpfc_disc_illegal,		/* CMPL_ADISC      */
2498 	lpfc_cmpl_reglogin_reglogin_issue,/* CMPL_REG_LOGIN  */
2499 	lpfc_device_rm_reglogin_issue,	/* DEVICE_RM       */
2500 	lpfc_device_recov_reglogin_issue,/* DEVICE_RECOVERY */
2501 
2502 	lpfc_rcv_plogi_prli_issue,	/* RCV_PLOGI   PRLI_ISSUE     */
2503 	lpfc_rcv_prli_prli_issue,	/* RCV_PRLI        */
2504 	lpfc_rcv_logo_prli_issue,	/* RCV_LOGO        */
2505 	lpfc_rcv_padisc_prli_issue,	/* RCV_ADISC       */
2506 	lpfc_rcv_padisc_prli_issue,	/* RCV_PDISC       */
2507 	lpfc_rcv_prlo_prli_issue,	/* RCV_PRLO        */
2508 	lpfc_cmpl_plogi_illegal,	/* CMPL_PLOGI      */
2509 	lpfc_cmpl_prli_prli_issue,	/* CMPL_PRLI       */
2510 	lpfc_disc_illegal,		/* CMPL_LOGO       */
2511 	lpfc_disc_illegal,		/* CMPL_ADISC      */
2512 	lpfc_disc_illegal,		/* CMPL_REG_LOGIN  */
2513 	lpfc_device_rm_prli_issue,	/* DEVICE_RM       */
2514 	lpfc_device_recov_prli_issue,	/* DEVICE_RECOVERY */
2515 
2516 	lpfc_rcv_plogi_logo_issue,	/* RCV_PLOGI   LOGO_ISSUE     */
2517 	lpfc_rcv_prli_logo_issue,	/* RCV_PRLI        */
2518 	lpfc_rcv_logo_logo_issue,	/* RCV_LOGO        */
2519 	lpfc_rcv_padisc_logo_issue,	/* RCV_ADISC       */
2520 	lpfc_rcv_padisc_logo_issue,	/* RCV_PDISC       */
2521 	lpfc_rcv_prlo_logo_issue,	/* RCV_PRLO        */
2522 	lpfc_cmpl_plogi_illegal,	/* CMPL_PLOGI      */
2523 	lpfc_disc_illegal,		/* CMPL_PRLI       */
2524 	lpfc_cmpl_logo_logo_issue,	/* CMPL_LOGO       */
2525 	lpfc_disc_illegal,		/* CMPL_ADISC      */
2526 	lpfc_disc_illegal,		/* CMPL_REG_LOGIN  */
2527 	lpfc_device_rm_logo_issue,	/* DEVICE_RM       */
2528 	lpfc_device_recov_logo_issue,	/* DEVICE_RECOVERY */
2529 
2530 	lpfc_rcv_plogi_unmap_node,	/* RCV_PLOGI   UNMAPPED_NODE  */
2531 	lpfc_rcv_prli_unmap_node,	/* RCV_PRLI        */
2532 	lpfc_rcv_logo_unmap_node,	/* RCV_LOGO        */
2533 	lpfc_rcv_padisc_unmap_node,	/* RCV_ADISC       */
2534 	lpfc_rcv_padisc_unmap_node,	/* RCV_PDISC       */
2535 	lpfc_rcv_prlo_unmap_node,	/* RCV_PRLO        */
2536 	lpfc_disc_illegal,		/* CMPL_PLOGI      */
2537 	lpfc_disc_illegal,		/* CMPL_PRLI       */
2538 	lpfc_disc_illegal,		/* CMPL_LOGO       */
2539 	lpfc_disc_illegal,		/* CMPL_ADISC      */
2540 	lpfc_disc_illegal,		/* CMPL_REG_LOGIN  */
2541 	lpfc_disc_illegal,		/* DEVICE_RM       */
2542 	lpfc_device_recov_unmap_node,	/* DEVICE_RECOVERY */
2543 
2544 	lpfc_rcv_plogi_mapped_node,	/* RCV_PLOGI   MAPPED_NODE    */
2545 	lpfc_rcv_prli_mapped_node,	/* RCV_PRLI        */
2546 	lpfc_rcv_logo_mapped_node,	/* RCV_LOGO        */
2547 	lpfc_rcv_padisc_mapped_node,	/* RCV_ADISC       */
2548 	lpfc_rcv_padisc_mapped_node,	/* RCV_PDISC       */
2549 	lpfc_rcv_prlo_mapped_node,	/* RCV_PRLO        */
2550 	lpfc_disc_illegal,		/* CMPL_PLOGI      */
2551 	lpfc_disc_illegal,		/* CMPL_PRLI       */
2552 	lpfc_disc_illegal,		/* CMPL_LOGO       */
2553 	lpfc_disc_illegal,		/* CMPL_ADISC      */
2554 	lpfc_disc_illegal,		/* CMPL_REG_LOGIN  */
2555 	lpfc_disc_illegal,		/* DEVICE_RM       */
2556 	lpfc_device_recov_mapped_node,	/* DEVICE_RECOVERY */
2557 
2558 	lpfc_rcv_plogi_npr_node,        /* RCV_PLOGI   NPR_NODE    */
2559 	lpfc_rcv_prli_npr_node,         /* RCV_PRLI        */
2560 	lpfc_rcv_logo_npr_node,         /* RCV_LOGO        */
2561 	lpfc_rcv_padisc_npr_node,       /* RCV_ADISC       */
2562 	lpfc_rcv_padisc_npr_node,       /* RCV_PDISC       */
2563 	lpfc_rcv_prlo_npr_node,         /* RCV_PRLO        */
2564 	lpfc_cmpl_plogi_npr_node,	/* CMPL_PLOGI      */
2565 	lpfc_cmpl_prli_npr_node,	/* CMPL_PRLI       */
2566 	lpfc_cmpl_logo_npr_node,        /* CMPL_LOGO       */
2567 	lpfc_cmpl_adisc_npr_node,       /* CMPL_ADISC      */
2568 	lpfc_cmpl_reglogin_npr_node,    /* CMPL_REG_LOGIN  */
2569 	lpfc_device_rm_npr_node,        /* DEVICE_RM       */
2570 	lpfc_device_recov_npr_node,     /* DEVICE_RECOVERY */
2571 };
2572 
2573 int
2574 lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2575 			void *arg, uint32_t evt)
2576 {
2577 	uint32_t cur_state, rc;
2578 	uint32_t(*func) (struct lpfc_vport *, struct lpfc_nodelist *, void *,
2579 			 uint32_t);
2580 	uint32_t got_ndlp = 0;
2581 
2582 	if (lpfc_nlp_get(ndlp))
2583 		got_ndlp = 1;
2584 
2585 	cur_state = ndlp->nlp_state;
2586 
2587 	/* DSM in event <evt> on NPort <nlp_DID> in state <cur_state> */
2588 	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2589 			 "0211 DSM in event x%x on NPort x%x in "
2590 			 "state %d Data: x%x\n",
2591 			 evt, ndlp->nlp_DID, cur_state, ndlp->nlp_flag);
2592 
2593 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
2594 		 "DSM in:          evt:%d ste:%d did:x%x",
2595 		evt, cur_state, ndlp->nlp_DID);
2596 
2597 	func = lpfc_disc_action[(cur_state * NLP_EVT_MAX_EVENT) + evt];
2598 	rc = (func) (vport, ndlp, arg, evt);
2599 
2600 	/* DSM out state <rc> on NPort <nlp_DID> */
2601 	if (got_ndlp) {
2602 		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2603 			 "0212 DSM out state %d on NPort x%x Data: x%x\n",
2604 			 rc, ndlp->nlp_DID, ndlp->nlp_flag);
2605 
2606 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
2607 			"DSM out:         ste:%d did:x%x flg:x%x",
2608 			rc, ndlp->nlp_DID, ndlp->nlp_flag);
2609 		/* Decrement the ndlp reference count held for this function */
2610 		lpfc_nlp_put(ndlp);
2611 	} else {
2612 		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2613 			"0213 DSM out state %d on NPort free\n", rc);
2614 
2615 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
2616 			"DSM out:         ste:%d did:x%x flg:x%x",
2617 			rc, 0, 0);
2618 	}
2619 
2620 	return rc;
2621 }
2622