1  /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2004-2009 Emulex.  All rights reserved.           *
5  * EMULEX and SLI are trademarks of Emulex.                        *
6  * www.emulex.com                                                  *
7  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
8  *                                                                 *
9  * This program is free software; you can redistribute it and/or   *
10  * modify it under the terms of version 2 of the GNU General       *
11  * Public License as published by the Free Software Foundation.    *
12  * This program is distributed in the hope that it will be useful. *
13  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
14  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
15  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
16  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
18  * more details, a copy of which can be found in the file COPYING  *
19  * included with this package.                                     *
20  *******************************************************************/
21 
22 #include <linux/blkdev.h>
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/interrupt.h>
26 
27 #include <scsi/scsi.h>
28 #include <scsi/scsi_device.h>
29 #include <scsi/scsi_host.h>
30 #include <scsi/scsi_transport_fc.h>
31 
32 #include "lpfc_hw4.h"
33 #include "lpfc_hw.h"
34 #include "lpfc_sli.h"
35 #include "lpfc_sli4.h"
36 #include "lpfc_nl.h"
37 #include "lpfc_disc.h"
38 #include "lpfc_scsi.h"
39 #include "lpfc.h"
40 #include "lpfc_logmsg.h"
41 #include "lpfc_crtn.h"
42 #include "lpfc_vport.h"
43 #include "lpfc_debugfs.h"
44 
45 
46 /* Called to verify a rcv'ed ADISC was intended for us. */
47 static int
48 lpfc_check_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
49 		 struct lpfc_name *nn, struct lpfc_name *pn)
50 {
51 	/* Compare the ADISC rsp WWNN / WWPN matches our internal node
52 	 * table entry for that node.
53 	 */
54 	if (memcmp(nn, &ndlp->nlp_nodename, sizeof (struct lpfc_name)))
55 		return 0;
56 
57 	if (memcmp(pn, &ndlp->nlp_portname, sizeof (struct lpfc_name)))
58 		return 0;
59 
60 	/* we match, return success */
61 	return 1;
62 }
63 
64 int
65 lpfc_check_sparm(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
66 		 struct serv_parm *sp, uint32_t class, int flogi)
67 {
68 	volatile struct serv_parm *hsp = &vport->fc_sparam;
69 	uint16_t hsp_value, ssp_value = 0;
70 
71 	/*
72 	 * The receive data field size and buffer-to-buffer receive data field
73 	 * size entries are 16 bits but are represented as two 8-bit fields in
74 	 * the driver data structure to account for rsvd bits and other control
75 	 * bits.  Reconstruct and compare the fields as a 16-bit values before
76 	 * correcting the byte values.
77 	 */
78 	if (sp->cls1.classValid) {
79 		if (!flogi) {
80 			hsp_value = ((hsp->cls1.rcvDataSizeMsb << 8) |
81 				     hsp->cls1.rcvDataSizeLsb);
82 			ssp_value = ((sp->cls1.rcvDataSizeMsb << 8) |
83 				     sp->cls1.rcvDataSizeLsb);
84 			if (!ssp_value)
85 				goto bad_service_param;
86 			if (ssp_value > hsp_value) {
87 				sp->cls1.rcvDataSizeLsb =
88 					hsp->cls1.rcvDataSizeLsb;
89 				sp->cls1.rcvDataSizeMsb =
90 					hsp->cls1.rcvDataSizeMsb;
91 			}
92 		}
93 	} else if (class == CLASS1)
94 		goto bad_service_param;
95 	if (sp->cls2.classValid) {
96 		if (!flogi) {
97 			hsp_value = ((hsp->cls2.rcvDataSizeMsb << 8) |
98 				     hsp->cls2.rcvDataSizeLsb);
99 			ssp_value = ((sp->cls2.rcvDataSizeMsb << 8) |
100 				     sp->cls2.rcvDataSizeLsb);
101 			if (!ssp_value)
102 				goto bad_service_param;
103 			if (ssp_value > hsp_value) {
104 				sp->cls2.rcvDataSizeLsb =
105 					hsp->cls2.rcvDataSizeLsb;
106 				sp->cls2.rcvDataSizeMsb =
107 					hsp->cls2.rcvDataSizeMsb;
108 			}
109 		}
110 	} else if (class == CLASS2)
111 		goto bad_service_param;
112 	if (sp->cls3.classValid) {
113 		if (!flogi) {
114 			hsp_value = ((hsp->cls3.rcvDataSizeMsb << 8) |
115 				     hsp->cls3.rcvDataSizeLsb);
116 			ssp_value = ((sp->cls3.rcvDataSizeMsb << 8) |
117 				     sp->cls3.rcvDataSizeLsb);
118 			if (!ssp_value)
119 				goto bad_service_param;
120 			if (ssp_value > hsp_value) {
121 				sp->cls3.rcvDataSizeLsb =
122 					hsp->cls3.rcvDataSizeLsb;
123 				sp->cls3.rcvDataSizeMsb =
124 					hsp->cls3.rcvDataSizeMsb;
125 			}
126 		}
127 	} else if (class == CLASS3)
128 		goto bad_service_param;
129 
130 	/*
131 	 * Preserve the upper four bits of the MSB from the PLOGI response.
132 	 * These bits contain the Buffer-to-Buffer State Change Number
133 	 * from the target and need to be passed to the FW.
134 	 */
135 	hsp_value = (hsp->cmn.bbRcvSizeMsb << 8) | hsp->cmn.bbRcvSizeLsb;
136 	ssp_value = (sp->cmn.bbRcvSizeMsb << 8) | sp->cmn.bbRcvSizeLsb;
137 	if (ssp_value > hsp_value) {
138 		sp->cmn.bbRcvSizeLsb = hsp->cmn.bbRcvSizeLsb;
139 		sp->cmn.bbRcvSizeMsb = (sp->cmn.bbRcvSizeMsb & 0xF0) |
140 				       (hsp->cmn.bbRcvSizeMsb & 0x0F);
141 	}
142 
143 	memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof (struct lpfc_name));
144 	memcpy(&ndlp->nlp_portname, &sp->portName, sizeof (struct lpfc_name));
145 	return 1;
146 bad_service_param:
147 	lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
148 			 "0207 Device %x "
149 			 "(%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x) sent "
150 			 "invalid service parameters.  Ignoring device.\n",
151 			 ndlp->nlp_DID,
152 			 sp->nodeName.u.wwn[0], sp->nodeName.u.wwn[1],
153 			 sp->nodeName.u.wwn[2], sp->nodeName.u.wwn[3],
154 			 sp->nodeName.u.wwn[4], sp->nodeName.u.wwn[5],
155 			 sp->nodeName.u.wwn[6], sp->nodeName.u.wwn[7]);
156 	return 0;
157 }
158 
159 static void *
160 lpfc_check_elscmpl_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
161 			struct lpfc_iocbq *rspiocb)
162 {
163 	struct lpfc_dmabuf *pcmd, *prsp;
164 	uint32_t *lp;
165 	void     *ptr = NULL;
166 	IOCB_t   *irsp;
167 
168 	irsp = &rspiocb->iocb;
169 	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
170 
171 	/* For lpfc_els_abort, context2 could be zero'ed to delay
172 	 * freeing associated memory till after ABTS completes.
173 	 */
174 	if (pcmd) {
175 		prsp =  list_get_first(&pcmd->list, struct lpfc_dmabuf,
176 				       list);
177 		if (prsp) {
178 			lp = (uint32_t *) prsp->virt;
179 			ptr = (void *)((uint8_t *)lp + sizeof(uint32_t));
180 		}
181 	} else {
182 		/* Force ulpStatus error since we are returning NULL ptr */
183 		if (!(irsp->ulpStatus)) {
184 			irsp->ulpStatus = IOSTAT_LOCAL_REJECT;
185 			irsp->un.ulpWord[4] = IOERR_SLI_ABORTED;
186 		}
187 		ptr = NULL;
188 	}
189 	return ptr;
190 }
191 
192 
193 /*
194  * Free resources / clean up outstanding I/Os
195  * associated with a LPFC_NODELIST entry. This
196  * routine effectively results in a "software abort".
197  */
198 int
199 lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
200 {
201 	LIST_HEAD(completions);
202 	struct lpfc_sli  *psli = &phba->sli;
203 	struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
204 	struct lpfc_iocbq *iocb, *next_iocb;
205 
206 	/* Abort outstanding I/O on NPort <nlp_DID> */
207 	lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_DISCOVERY,
208 			 "0205 Abort outstanding I/O on NPort x%x "
209 			 "Data: x%x x%x x%x\n",
210 			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
211 			 ndlp->nlp_rpi);
212 
213 	lpfc_fabric_abort_nport(ndlp);
214 
215 	/* First check the txq */
216 	spin_lock_irq(&phba->hbalock);
217 	list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
218 		/* Check to see if iocb matches the nport we are looking for */
219 		if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) {
220 			/* It matches, so deque and call compl with anp error */
221 			list_move_tail(&iocb->list, &completions);
222 			pring->txq_cnt--;
223 		}
224 	}
225 
226 	/* Next check the txcmplq */
227 	list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
228 		/* Check to see if iocb matches the nport we are looking for */
229 		if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) {
230 			lpfc_sli_issue_abort_iotag(phba, pring, iocb);
231 		}
232 	}
233 	spin_unlock_irq(&phba->hbalock);
234 
235 	/* Cancel all the IOCBs from the completions list */
236 	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
237 			      IOERR_SLI_ABORTED);
238 
239 	lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
240 	return 0;
241 }
242 
243 static int
244 lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
245 	       struct lpfc_iocbq *cmdiocb)
246 {
247 	struct Scsi_Host   *shost = lpfc_shost_from_vport(vport);
248 	struct lpfc_hba    *phba = vport->phba;
249 	struct lpfc_dmabuf *pcmd;
250 	uint32_t *lp;
251 	IOCB_t *icmd;
252 	struct serv_parm *sp;
253 	LPFC_MBOXQ_t *mbox;
254 	struct ls_rjt stat;
255 	int rc;
256 
257 	memset(&stat, 0, sizeof (struct ls_rjt));
258 	if (vport->port_state <= LPFC_FDISC) {
259 		/* Before responding to PLOGI, check for pt2pt mode.
260 		 * If we are pt2pt, with an outstanding FLOGI, abort
261 		 * the FLOGI and resend it first.
262 		 */
263 		if (vport->fc_flag & FC_PT2PT) {
264 			 lpfc_els_abort_flogi(phba);
265 		        if (!(vport->fc_flag & FC_PT2PT_PLOGI)) {
266 				/* If the other side is supposed to initiate
267 				 * the PLOGI anyway, just ACC it now and
268 				 * move on with discovery.
269 				 */
270 				phba->fc_edtov = FF_DEF_EDTOV;
271 				phba->fc_ratov = FF_DEF_RATOV;
272 				/* Start discovery - this should just do
273 				   CLEAR_LA */
274 				lpfc_disc_start(vport);
275 			} else
276 				lpfc_initial_flogi(vport);
277 		} else {
278 			stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY;
279 			stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
280 			lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
281 					    ndlp, NULL);
282 			return 0;
283 		}
284 	}
285 	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
286 	lp = (uint32_t *) pcmd->virt;
287 	sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
288 	if (wwn_to_u64(sp->portName.u.wwn) == 0) {
289 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
290 				 "0140 PLOGI Reject: invalid nname\n");
291 		stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
292 		stat.un.b.lsRjtRsnCodeExp = LSEXP_INVALID_PNAME;
293 		lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
294 			NULL);
295 		return 0;
296 	}
297 	if (wwn_to_u64(sp->nodeName.u.wwn) == 0) {
298 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
299 				 "0141 PLOGI Reject: invalid pname\n");
300 		stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
301 		stat.un.b.lsRjtRsnCodeExp = LSEXP_INVALID_NNAME;
302 		lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
303 			NULL);
304 		return 0;
305 	}
306 	if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3, 0) == 0)) {
307 		/* Reject this request because invalid parameters */
308 		stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
309 		stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
310 		lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
311 			NULL);
312 		return 0;
313 	}
314 	icmd = &cmdiocb->iocb;
315 
316 	/* PLOGI chkparm OK */
317 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
318 			 "0114 PLOGI chkparm OK Data: x%x x%x x%x x%x\n",
319 			 ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag,
320 			 ndlp->nlp_rpi);
321 
322 	if (vport->cfg_fcp_class == 2 && sp->cls2.classValid)
323 		ndlp->nlp_fcp_info |= CLASS2;
324 	else
325 		ndlp->nlp_fcp_info |= CLASS3;
326 
327 	ndlp->nlp_class_sup = 0;
328 	if (sp->cls1.classValid)
329 		ndlp->nlp_class_sup |= FC_COS_CLASS1;
330 	if (sp->cls2.classValid)
331 		ndlp->nlp_class_sup |= FC_COS_CLASS2;
332 	if (sp->cls3.classValid)
333 		ndlp->nlp_class_sup |= FC_COS_CLASS3;
334 	if (sp->cls4.classValid)
335 		ndlp->nlp_class_sup |= FC_COS_CLASS4;
336 	ndlp->nlp_maxframe =
337 		((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
338 
339 	/* no need to reg_login if we are already in one of these states */
340 	switch (ndlp->nlp_state) {
341 	case  NLP_STE_NPR_NODE:
342 		if (!(ndlp->nlp_flag & NLP_NPR_ADISC))
343 			break;
344 	case  NLP_STE_REG_LOGIN_ISSUE:
345 	case  NLP_STE_PRLI_ISSUE:
346 	case  NLP_STE_UNMAPPED_NODE:
347 	case  NLP_STE_MAPPED_NODE:
348 		lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL);
349 		return 1;
350 	}
351 
352 	if ((vport->fc_flag & FC_PT2PT) &&
353 	    !(vport->fc_flag & FC_PT2PT_PLOGI)) {
354 		/* rcv'ed PLOGI decides what our NPortId will be */
355 		vport->fc_myDID = icmd->un.rcvels.parmRo;
356 		mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
357 		if (mbox == NULL)
358 			goto out;
359 		lpfc_config_link(phba, mbox);
360 		mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
361 		mbox->vport = vport;
362 		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
363 		if (rc == MBX_NOT_FINISHED) {
364 			mempool_free(mbox, phba->mbox_mem_pool);
365 			goto out;
366 		}
367 
368 		lpfc_can_disctmo(vport);
369 	}
370 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
371 	if (!mbox)
372 		goto out;
373 
374 	rc = lpfc_reg_rpi(phba, vport->vpi, icmd->un.rcvels.remoteID,
375 			    (uint8_t *) sp, mbox, 0);
376 	if (rc) {
377 		mempool_free(mbox, phba->mbox_mem_pool);
378 		goto out;
379 	}
380 
381 	/* ACC PLOGI rsp command needs to execute first,
382 	 * queue this mbox command to be processed later.
383 	 */
384 	mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
385 	/*
386 	 * mbox->context2 = lpfc_nlp_get(ndlp) deferred until mailbox
387 	 * command issued in lpfc_cmpl_els_acc().
388 	 */
389 	mbox->vport = vport;
390 	spin_lock_irq(shost->host_lock);
391 	ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI);
392 	spin_unlock_irq(shost->host_lock);
393 
394 	/*
395 	 * If there is an outstanding PLOGI issued, abort it before
396 	 * sending ACC rsp for received PLOGI. If pending plogi
397 	 * is not canceled here, the plogi will be rejected by
398 	 * remote port and will be retried. On a configuration with
399 	 * single discovery thread, this will cause a huge delay in
400 	 * discovery. Also this will cause multiple state machines
401 	 * running in parallel for this node.
402 	 */
403 	if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) {
404 		/* software abort outstanding PLOGI */
405 		lpfc_els_abort(phba, ndlp);
406 	}
407 
408 	if ((vport->port_type == LPFC_NPIV_PORT &&
409 	     vport->cfg_restrict_login)) {
410 
411 		/* In order to preserve RPIs, we want to cleanup
412 		 * the default RPI the firmware created to rcv
413 		 * this ELS request. The only way to do this is
414 		 * to register, then unregister the RPI.
415 		 */
416 		spin_lock_irq(shost->host_lock);
417 		ndlp->nlp_flag |= NLP_RM_DFLT_RPI;
418 		spin_unlock_irq(shost->host_lock);
419 		stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD;
420 		stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
421 		lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
422 			ndlp, mbox);
423 		return 1;
424 	}
425 	lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox);
426 	return 1;
427 out:
428 	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
429 	stat.un.b.lsRjtRsnCodeExp = LSEXP_OUT_OF_RESOURCE;
430 	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
431 	return 0;
432 }
433 
434 static int
435 lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
436 		struct lpfc_iocbq *cmdiocb)
437 {
438 	struct Scsi_Host   *shost = lpfc_shost_from_vport(vport);
439 	struct lpfc_dmabuf *pcmd;
440 	struct serv_parm   *sp;
441 	struct lpfc_name   *pnn, *ppn;
442 	struct ls_rjt stat;
443 	ADISC *ap;
444 	IOCB_t *icmd;
445 	uint32_t *lp;
446 	uint32_t cmd;
447 
448 	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
449 	lp = (uint32_t *) pcmd->virt;
450 
451 	cmd = *lp++;
452 	if (cmd == ELS_CMD_ADISC) {
453 		ap = (ADISC *) lp;
454 		pnn = (struct lpfc_name *) & ap->nodeName;
455 		ppn = (struct lpfc_name *) & ap->portName;
456 	} else {
457 		sp = (struct serv_parm *) lp;
458 		pnn = (struct lpfc_name *) & sp->nodeName;
459 		ppn = (struct lpfc_name *) & sp->portName;
460 	}
461 
462 	icmd = &cmdiocb->iocb;
463 	if (icmd->ulpStatus == 0 && lpfc_check_adisc(vport, ndlp, pnn, ppn)) {
464 		if (cmd == ELS_CMD_ADISC) {
465 			lpfc_els_rsp_adisc_acc(vport, cmdiocb, ndlp);
466 		} else {
467 			lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp,
468 					 NULL);
469 		}
470 		return 1;
471 	}
472 	/* Reject this request because invalid parameters */
473 	stat.un.b.lsRjtRsvd0 = 0;
474 	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
475 	stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
476 	stat.un.b.vendorUnique = 0;
477 	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
478 
479 	/* 1 sec timeout */
480 	mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
481 
482 	spin_lock_irq(shost->host_lock);
483 	ndlp->nlp_flag |= NLP_DELAY_TMO;
484 	spin_unlock_irq(shost->host_lock);
485 	ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
486 	ndlp->nlp_prev_state = ndlp->nlp_state;
487 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
488 	return 0;
489 }
490 
491 static int
492 lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
493 	      struct lpfc_iocbq *cmdiocb, uint32_t els_cmd)
494 {
495 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
496 	struct lpfc_hba    *phba = vport->phba;
497 	struct lpfc_vport **vports;
498 	int i, active_vlink_present = 0 ;
499 
500 	/* Put ndlp in NPR state with 1 sec timeout for plogi, ACC logo */
501 	/* Only call LOGO ACC for first LOGO, this avoids sending unnecessary
502 	 * PLOGIs during LOGO storms from a device.
503 	 */
504 	spin_lock_irq(shost->host_lock);
505 	ndlp->nlp_flag |= NLP_LOGO_ACC;
506 	spin_unlock_irq(shost->host_lock);
507 	if (els_cmd == ELS_CMD_PRLO)
508 		lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
509 	else
510 		lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
511 	if (ndlp->nlp_DID == Fabric_DID) {
512 		if (vport->port_state <= LPFC_FDISC)
513 			goto out;
514 		lpfc_linkdown_port(vport);
515 		spin_lock_irq(shost->host_lock);
516 		vport->fc_flag |= FC_VPORT_LOGO_RCVD;
517 		spin_unlock_irq(shost->host_lock);
518 		vports = lpfc_create_vport_work_array(phba);
519 		if (vports) {
520 			for (i = 0; i <= phba->max_vports && vports[i] != NULL;
521 					i++) {
522 				if ((!(vports[i]->fc_flag &
523 					FC_VPORT_LOGO_RCVD)) &&
524 					(vports[i]->port_state > LPFC_FDISC)) {
525 					active_vlink_present = 1;
526 					break;
527 				}
528 			}
529 			lpfc_destroy_vport_work_array(phba, vports);
530 		}
531 
532 		if (active_vlink_present) {
533 			/*
534 			 * If there are other active VLinks present,
535 			 * re-instantiate the Vlink using FDISC.
536 			 */
537 			mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
538 			spin_lock_irq(shost->host_lock);
539 			ndlp->nlp_flag |= NLP_DELAY_TMO;
540 			spin_unlock_irq(shost->host_lock);
541 			ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
542 			vport->port_state = LPFC_FDISC;
543 		} else {
544 			spin_lock_irq(shost->host_lock);
545 			phba->pport->fc_flag &= ~FC_LOGO_RCVD_DID_CHNG;
546 			spin_unlock_irq(shost->host_lock);
547 			lpfc_retry_pport_discovery(phba);
548 		}
549 	} else if ((!(ndlp->nlp_type & NLP_FABRIC) &&
550 		((ndlp->nlp_type & NLP_FCP_TARGET) ||
551 		!(ndlp->nlp_type & NLP_FCP_INITIATOR))) ||
552 		(ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
553 		/* Only try to re-login if this is NOT a Fabric Node */
554 		mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
555 		spin_lock_irq(shost->host_lock);
556 		ndlp->nlp_flag |= NLP_DELAY_TMO;
557 		spin_unlock_irq(shost->host_lock);
558 
559 		ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
560 	}
561 out:
562 	ndlp->nlp_prev_state = ndlp->nlp_state;
563 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
564 
565 	spin_lock_irq(shost->host_lock);
566 	ndlp->nlp_flag &= ~NLP_NPR_ADISC;
567 	spin_unlock_irq(shost->host_lock);
568 	/* The driver has to wait until the ACC completes before it continues
569 	 * processing the LOGO.  The action will resume in
570 	 * lpfc_cmpl_els_logo_acc routine. Since part of processing includes an
571 	 * unreg_login, the driver waits so the ACC does not get aborted.
572 	 */
573 	return 0;
574 }
575 
576 static void
577 lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
578 	      struct lpfc_iocbq *cmdiocb)
579 {
580 	struct lpfc_dmabuf *pcmd;
581 	uint32_t *lp;
582 	PRLI *npr;
583 	struct fc_rport *rport = ndlp->rport;
584 	u32 roles;
585 
586 	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
587 	lp = (uint32_t *) pcmd->virt;
588 	npr = (PRLI *) ((uint8_t *) lp + sizeof (uint32_t));
589 
590 	ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
591 	ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
592 	if (npr->prliType == PRLI_FCP_TYPE) {
593 		if (npr->initiatorFunc)
594 			ndlp->nlp_type |= NLP_FCP_INITIATOR;
595 		if (npr->targetFunc)
596 			ndlp->nlp_type |= NLP_FCP_TARGET;
597 		if (npr->Retry)
598 			ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
599 	}
600 	if (rport) {
601 		/* We need to update the rport role values */
602 		roles = FC_RPORT_ROLE_UNKNOWN;
603 		if (ndlp->nlp_type & NLP_FCP_INITIATOR)
604 			roles |= FC_RPORT_ROLE_FCP_INITIATOR;
605 		if (ndlp->nlp_type & NLP_FCP_TARGET)
606 			roles |= FC_RPORT_ROLE_FCP_TARGET;
607 
608 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
609 			"rport rolechg:   role:x%x did:x%x flg:x%x",
610 			roles, ndlp->nlp_DID, ndlp->nlp_flag);
611 
612 		fc_remote_port_rolechg(rport, roles);
613 	}
614 }
615 
616 static uint32_t
617 lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
618 {
619 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
620 
621 	if (!(ndlp->nlp_flag & NLP_RPI_VALID)) {
622 		ndlp->nlp_flag &= ~NLP_NPR_ADISC;
623 		return 0;
624 	}
625 
626 	if (!(vport->fc_flag & FC_PT2PT)) {
627 		/* Check config parameter use-adisc or FCP-2 */
628 		if ((vport->cfg_use_adisc && (vport->fc_flag & FC_RSCN_MODE)) ||
629 		    ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
630 			spin_lock_irq(shost->host_lock);
631 			ndlp->nlp_flag |= NLP_NPR_ADISC;
632 			spin_unlock_irq(shost->host_lock);
633 			return 1;
634 		}
635 	}
636 	ndlp->nlp_flag &= ~NLP_NPR_ADISC;
637 	lpfc_unreg_rpi(vport, ndlp);
638 	return 0;
639 }
640 /**
641  * lpfc_release_rpi - Release a RPI by issueing unreg_login mailbox cmd.
642  * @phba : Pointer to lpfc_hba structure.
643  * @vport: Pointer to lpfc_vport structure.
644  * @rpi  : rpi to be release.
645  *
646  * This function will send a unreg_login mailbox command to the firmware
647  * to release a rpi.
648  **/
649 void
650 lpfc_release_rpi(struct lpfc_hba *phba,
651 		struct lpfc_vport *vport,
652 		uint16_t rpi)
653 {
654 	LPFC_MBOXQ_t *pmb;
655 	int rc;
656 
657 	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
658 			GFP_KERNEL);
659 	if (!pmb)
660 		lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
661 			"2796 mailbox memory allocation failed \n");
662 	else {
663 		lpfc_unreg_login(phba, vport->vpi, rpi, pmb);
664 		pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
665 		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
666 		if (rc == MBX_NOT_FINISHED)
667 			mempool_free(pmb, phba->mbox_mem_pool);
668 	}
669 }
670 
671 static uint32_t
672 lpfc_disc_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
673 		  void *arg, uint32_t evt)
674 {
675 	struct lpfc_hba *phba;
676 	LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
677 	MAILBOX_t *mb;
678 	uint16_t rpi;
679 
680 	phba = vport->phba;
681 	/* Release the RPI if reglogin completing */
682 	if (!(phba->pport->load_flag & FC_UNLOADING) &&
683 		(evt == NLP_EVT_CMPL_REG_LOGIN) &&
684 		(!pmb->u.mb.mbxStatus)) {
685 		mb = &pmb->u.mb;
686 		rpi = pmb->u.mb.un.varWords[0];
687 		lpfc_release_rpi(phba, vport, rpi);
688 	}
689 	lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
690 			 "0271 Illegal State Transition: node x%x "
691 			 "event x%x, state x%x Data: x%x x%x\n",
692 			 ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi,
693 			 ndlp->nlp_flag);
694 	return ndlp->nlp_state;
695 }
696 
697 static uint32_t
698 lpfc_cmpl_plogi_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
699 		  void *arg, uint32_t evt)
700 {
701 	/* This transition is only legal if we previously
702 	 * rcv'ed a PLOGI. Since we don't want 2 discovery threads
703 	 * working on the same NPortID, do nothing for this thread
704 	 * to stop it.
705 	 */
706 	if (!(ndlp->nlp_flag & NLP_RCV_PLOGI)) {
707 		lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
708 			 "0272 Illegal State Transition: node x%x "
709 			 "event x%x, state x%x Data: x%x x%x\n",
710 			 ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi,
711 			 ndlp->nlp_flag);
712 	}
713 	return ndlp->nlp_state;
714 }
715 
716 /* Start of Discovery State Machine routines */
717 
718 static uint32_t
719 lpfc_rcv_plogi_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
720 			   void *arg, uint32_t evt)
721 {
722 	struct lpfc_iocbq *cmdiocb;
723 
724 	cmdiocb = (struct lpfc_iocbq *) arg;
725 
726 	if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
727 		return ndlp->nlp_state;
728 	}
729 	return NLP_STE_FREED_NODE;
730 }
731 
732 static uint32_t
733 lpfc_rcv_els_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
734 			 void *arg, uint32_t evt)
735 {
736 	lpfc_issue_els_logo(vport, ndlp, 0);
737 	return ndlp->nlp_state;
738 }
739 
740 static uint32_t
741 lpfc_rcv_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
742 			  void *arg, uint32_t evt)
743 {
744 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
745 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
746 
747 	spin_lock_irq(shost->host_lock);
748 	ndlp->nlp_flag |= NLP_LOGO_ACC;
749 	spin_unlock_irq(shost->host_lock);
750 	lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
751 
752 	return ndlp->nlp_state;
753 }
754 
755 static uint32_t
756 lpfc_cmpl_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
757 			   void *arg, uint32_t evt)
758 {
759 	return NLP_STE_FREED_NODE;
760 }
761 
762 static uint32_t
763 lpfc_device_rm_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
764 			   void *arg, uint32_t evt)
765 {
766 	return NLP_STE_FREED_NODE;
767 }
768 
769 static uint32_t
770 lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
771 			   void *arg, uint32_t evt)
772 {
773 	struct Scsi_Host   *shost = lpfc_shost_from_vport(vport);
774 	struct lpfc_hba   *phba = vport->phba;
775 	struct lpfc_iocbq *cmdiocb = arg;
776 	struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
777 	uint32_t *lp = (uint32_t *) pcmd->virt;
778 	struct serv_parm *sp = (struct serv_parm *) (lp + 1);
779 	struct ls_rjt stat;
780 	int port_cmp;
781 
782 	memset(&stat, 0, sizeof (struct ls_rjt));
783 
784 	/* For a PLOGI, we only accept if our portname is less
785 	 * than the remote portname.
786 	 */
787 	phba->fc_stat.elsLogiCol++;
788 	port_cmp = memcmp(&vport->fc_portname, &sp->portName,
789 			  sizeof(struct lpfc_name));
790 
791 	if (port_cmp >= 0) {
792 		/* Reject this request because the remote node will accept
793 		   ours */
794 		stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
795 		stat.un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS;
796 		lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
797 			NULL);
798 	} else {
799 		if (lpfc_rcv_plogi(vport, ndlp, cmdiocb) &&
800 		    (ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
801 		    (vport->num_disc_nodes)) {
802 			spin_lock_irq(shost->host_lock);
803 			ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
804 			spin_unlock_irq(shost->host_lock);
805 			/* Check if there are more PLOGIs to be sent */
806 			lpfc_more_plogi(vport);
807 			if (vport->num_disc_nodes == 0) {
808 				spin_lock_irq(shost->host_lock);
809 				vport->fc_flag &= ~FC_NDISC_ACTIVE;
810 				spin_unlock_irq(shost->host_lock);
811 				lpfc_can_disctmo(vport);
812 				lpfc_end_rscn(vport);
813 			}
814 		}
815 	} /* If our portname was less */
816 
817 	return ndlp->nlp_state;
818 }
819 
820 static uint32_t
821 lpfc_rcv_prli_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
822 			  void *arg, uint32_t evt)
823 {
824 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
825 	struct ls_rjt     stat;
826 
827 	memset(&stat, 0, sizeof (struct ls_rjt));
828 	stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY;
829 	stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
830 	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
831 	return ndlp->nlp_state;
832 }
833 
834 static uint32_t
835 lpfc_rcv_logo_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
836 			  void *arg, uint32_t evt)
837 {
838 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
839 
840 				/* software abort outstanding PLOGI */
841 	lpfc_els_abort(vport->phba, ndlp);
842 
843 	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
844 	return ndlp->nlp_state;
845 }
846 
847 static uint32_t
848 lpfc_rcv_els_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
849 			 void *arg, uint32_t evt)
850 {
851 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
852 	struct lpfc_hba   *phba = vport->phba;
853 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
854 
855 	/* software abort outstanding PLOGI */
856 	lpfc_els_abort(phba, ndlp);
857 
858 	if (evt == NLP_EVT_RCV_LOGO) {
859 		lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
860 	} else {
861 		lpfc_issue_els_logo(vport, ndlp, 0);
862 	}
863 
864 	/* Put ndlp in npr state set plogi timer for 1 sec */
865 	mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
866 	spin_lock_irq(shost->host_lock);
867 	ndlp->nlp_flag |= NLP_DELAY_TMO;
868 	spin_unlock_irq(shost->host_lock);
869 	ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
870 	ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE;
871 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
872 
873 	return ndlp->nlp_state;
874 }
875 
876 static uint32_t
877 lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
878 			    struct lpfc_nodelist *ndlp,
879 			    void *arg,
880 			    uint32_t evt)
881 {
882 	struct lpfc_hba    *phba = vport->phba;
883 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
884 	struct lpfc_iocbq  *cmdiocb, *rspiocb;
885 	struct lpfc_dmabuf *pcmd, *prsp, *mp;
886 	uint32_t *lp;
887 	IOCB_t *irsp;
888 	struct serv_parm *sp;
889 	LPFC_MBOXQ_t *mbox;
890 
891 	cmdiocb = (struct lpfc_iocbq *) arg;
892 	rspiocb = cmdiocb->context_un.rsp_iocb;
893 
894 	if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
895 		/* Recovery from PLOGI collision logic */
896 		return ndlp->nlp_state;
897 	}
898 
899 	irsp = &rspiocb->iocb;
900 
901 	if (irsp->ulpStatus)
902 		goto out;
903 
904 	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
905 
906 	prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
907 
908 	lp = (uint32_t *) prsp->virt;
909 	sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
910 
911 	/* Some switches have FDMI servers returning 0 for WWN */
912 	if ((ndlp->nlp_DID != FDMI_DID) &&
913 		(wwn_to_u64(sp->portName.u.wwn) == 0 ||
914 		wwn_to_u64(sp->nodeName.u.wwn) == 0)) {
915 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
916 				 "0142 PLOGI RSP: Invalid WWN.\n");
917 		goto out;
918 	}
919 	if (!lpfc_check_sparm(vport, ndlp, sp, CLASS3, 0))
920 		goto out;
921 	/* PLOGI chkparm OK */
922 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
923 			 "0121 PLOGI chkparm OK Data: x%x x%x x%x x%x\n",
924 			 ndlp->nlp_DID, ndlp->nlp_state,
925 			 ndlp->nlp_flag, ndlp->nlp_rpi);
926 	if (vport->cfg_fcp_class == 2 && (sp->cls2.classValid))
927 		ndlp->nlp_fcp_info |= CLASS2;
928 	else
929 		ndlp->nlp_fcp_info |= CLASS3;
930 
931 	ndlp->nlp_class_sup = 0;
932 	if (sp->cls1.classValid)
933 		ndlp->nlp_class_sup |= FC_COS_CLASS1;
934 	if (sp->cls2.classValid)
935 		ndlp->nlp_class_sup |= FC_COS_CLASS2;
936 	if (sp->cls3.classValid)
937 		ndlp->nlp_class_sup |= FC_COS_CLASS3;
938 	if (sp->cls4.classValid)
939 		ndlp->nlp_class_sup |= FC_COS_CLASS4;
940 	ndlp->nlp_maxframe =
941 		((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
942 
943 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
944 	if (!mbox) {
945 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
946 			"0133 PLOGI: no memory for reg_login "
947 			"Data: x%x x%x x%x x%x\n",
948 			ndlp->nlp_DID, ndlp->nlp_state,
949 			ndlp->nlp_flag, ndlp->nlp_rpi);
950 		goto out;
951 	}
952 
953 	lpfc_unreg_rpi(vport, ndlp);
954 
955 	if (lpfc_reg_rpi(phba, vport->vpi, irsp->un.elsreq64.remoteID,
956 			   (uint8_t *) sp, mbox, 0) == 0) {
957 		switch (ndlp->nlp_DID) {
958 		case NameServer_DID:
959 			mbox->mbox_cmpl = lpfc_mbx_cmpl_ns_reg_login;
960 			break;
961 		case FDMI_DID:
962 			mbox->mbox_cmpl = lpfc_mbx_cmpl_fdmi_reg_login;
963 			break;
964 		default:
965 			mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
966 		}
967 		mbox->context2 = lpfc_nlp_get(ndlp);
968 		mbox->vport = vport;
969 		if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
970 		    != MBX_NOT_FINISHED) {
971 			lpfc_nlp_set_state(vport, ndlp,
972 					   NLP_STE_REG_LOGIN_ISSUE);
973 			return ndlp->nlp_state;
974 		}
975 		/* decrement node reference count to the failed mbox
976 		 * command
977 		 */
978 		lpfc_nlp_put(ndlp);
979 		mp = (struct lpfc_dmabuf *) mbox->context1;
980 		lpfc_mbuf_free(phba, mp->virt, mp->phys);
981 		kfree(mp);
982 		mempool_free(mbox, phba->mbox_mem_pool);
983 
984 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
985 				 "0134 PLOGI: cannot issue reg_login "
986 				 "Data: x%x x%x x%x x%x\n",
987 				 ndlp->nlp_DID, ndlp->nlp_state,
988 				 ndlp->nlp_flag, ndlp->nlp_rpi);
989 	} else {
990 		mempool_free(mbox, phba->mbox_mem_pool);
991 
992 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
993 				 "0135 PLOGI: cannot format reg_login "
994 				 "Data: x%x x%x x%x x%x\n",
995 				 ndlp->nlp_DID, ndlp->nlp_state,
996 				 ndlp->nlp_flag, ndlp->nlp_rpi);
997 	}
998 
999 
1000 out:
1001 	if (ndlp->nlp_DID == NameServer_DID) {
1002 		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1003 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1004 				 "0261 Cannot Register NameServer login\n");
1005 	}
1006 
1007 	spin_lock_irq(shost->host_lock);
1008 	ndlp->nlp_flag |= NLP_DEFER_RM;
1009 	spin_unlock_irq(shost->host_lock);
1010 	return NLP_STE_FREED_NODE;
1011 }
1012 
1013 static uint32_t
1014 lpfc_cmpl_logo_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1015 			   void *arg, uint32_t evt)
1016 {
1017 	return ndlp->nlp_state;
1018 }
1019 
1020 static uint32_t
1021 lpfc_cmpl_reglogin_plogi_issue(struct lpfc_vport *vport,
1022 	struct lpfc_nodelist *ndlp, void *arg, uint32_t evt)
1023 {
1024 	struct lpfc_hba *phba;
1025 	LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
1026 	MAILBOX_t *mb = &pmb->u.mb;
1027 	uint16_t rpi;
1028 
1029 	phba = vport->phba;
1030 	/* Release the RPI */
1031 	if (!(phba->pport->load_flag & FC_UNLOADING) &&
1032 		!mb->mbxStatus) {
1033 		rpi = pmb->u.mb.un.varWords[0];
1034 		lpfc_release_rpi(phba, vport, rpi);
1035 	}
1036 	return ndlp->nlp_state;
1037 }
1038 
1039 static uint32_t
1040 lpfc_device_rm_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1041 			   void *arg, uint32_t evt)
1042 {
1043 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1044 
1045 	if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1046 		spin_lock_irq(shost->host_lock);
1047 		ndlp->nlp_flag |= NLP_NODEV_REMOVE;
1048 		spin_unlock_irq(shost->host_lock);
1049 		return ndlp->nlp_state;
1050 	} else {
1051 		/* software abort outstanding PLOGI */
1052 		lpfc_els_abort(vport->phba, ndlp);
1053 
1054 		lpfc_drop_node(vport, ndlp);
1055 		return NLP_STE_FREED_NODE;
1056 	}
1057 }
1058 
1059 static uint32_t
1060 lpfc_device_recov_plogi_issue(struct lpfc_vport *vport,
1061 			      struct lpfc_nodelist *ndlp,
1062 			      void *arg,
1063 			      uint32_t evt)
1064 {
1065 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1066 	struct lpfc_hba  *phba = vport->phba;
1067 
1068 	/* Don't do anything that will mess up processing of the
1069 	 * previous RSCN.
1070 	 */
1071 	if (vport->fc_flag & FC_RSCN_DEFERRED)
1072 		return ndlp->nlp_state;
1073 
1074 	/* software abort outstanding PLOGI */
1075 	lpfc_els_abort(phba, ndlp);
1076 
1077 	ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE;
1078 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1079 	spin_lock_irq(shost->host_lock);
1080 	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1081 	spin_unlock_irq(shost->host_lock);
1082 
1083 	return ndlp->nlp_state;
1084 }
1085 
1086 static uint32_t
1087 lpfc_rcv_plogi_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1088 			   void *arg, uint32_t evt)
1089 {
1090 	struct Scsi_Host   *shost = lpfc_shost_from_vport(vport);
1091 	struct lpfc_hba   *phba = vport->phba;
1092 	struct lpfc_iocbq *cmdiocb;
1093 
1094 	/* software abort outstanding ADISC */
1095 	lpfc_els_abort(phba, ndlp);
1096 
1097 	cmdiocb = (struct lpfc_iocbq *) arg;
1098 
1099 	if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
1100 		if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1101 			spin_lock_irq(shost->host_lock);
1102 			ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
1103 			spin_unlock_irq(shost->host_lock);
1104 			if (vport->num_disc_nodes)
1105 				lpfc_more_adisc(vport);
1106 		}
1107 		return ndlp->nlp_state;
1108 	}
1109 	ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1110 	lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
1111 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
1112 
1113 	return ndlp->nlp_state;
1114 }
1115 
1116 static uint32_t
1117 lpfc_rcv_prli_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1118 			  void *arg, uint32_t evt)
1119 {
1120 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1121 
1122 	lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
1123 	return ndlp->nlp_state;
1124 }
1125 
1126 static uint32_t
1127 lpfc_rcv_logo_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1128 			  void *arg, uint32_t evt)
1129 {
1130 	struct lpfc_hba *phba = vport->phba;
1131 	struct lpfc_iocbq *cmdiocb;
1132 
1133 	cmdiocb = (struct lpfc_iocbq *) arg;
1134 
1135 	/* software abort outstanding ADISC */
1136 	lpfc_els_abort(phba, ndlp);
1137 
1138 	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
1139 	return ndlp->nlp_state;
1140 }
1141 
1142 static uint32_t
1143 lpfc_rcv_padisc_adisc_issue(struct lpfc_vport *vport,
1144 			    struct lpfc_nodelist *ndlp,
1145 			    void *arg, uint32_t evt)
1146 {
1147 	struct lpfc_iocbq *cmdiocb;
1148 
1149 	cmdiocb = (struct lpfc_iocbq *) arg;
1150 
1151 	lpfc_rcv_padisc(vport, ndlp, cmdiocb);
1152 	return ndlp->nlp_state;
1153 }
1154 
1155 static uint32_t
1156 lpfc_rcv_prlo_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1157 			  void *arg, uint32_t evt)
1158 {
1159 	struct lpfc_iocbq *cmdiocb;
1160 
1161 	cmdiocb = (struct lpfc_iocbq *) arg;
1162 
1163 	/* Treat like rcv logo */
1164 	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO);
1165 	return ndlp->nlp_state;
1166 }
1167 
1168 static uint32_t
1169 lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
1170 			    struct lpfc_nodelist *ndlp,
1171 			    void *arg, uint32_t evt)
1172 {
1173 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
1174 	struct lpfc_hba   *phba = vport->phba;
1175 	struct lpfc_iocbq *cmdiocb, *rspiocb;
1176 	IOCB_t *irsp;
1177 	ADISC *ap;
1178 	int rc;
1179 
1180 	cmdiocb = (struct lpfc_iocbq *) arg;
1181 	rspiocb = cmdiocb->context_un.rsp_iocb;
1182 
1183 	ap = (ADISC *)lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb);
1184 	irsp = &rspiocb->iocb;
1185 
1186 	if ((irsp->ulpStatus) ||
1187 	    (!lpfc_check_adisc(vport, ndlp, &ap->nodeName, &ap->portName))) {
1188 		/* 1 sec timeout */
1189 		mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
1190 		spin_lock_irq(shost->host_lock);
1191 		ndlp->nlp_flag |= NLP_DELAY_TMO;
1192 		spin_unlock_irq(shost->host_lock);
1193 		ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
1194 
1195 		memset(&ndlp->nlp_nodename, 0, sizeof(struct lpfc_name));
1196 		memset(&ndlp->nlp_portname, 0, sizeof(struct lpfc_name));
1197 
1198 		ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1199 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1200 		lpfc_unreg_rpi(vport, ndlp);
1201 		return ndlp->nlp_state;
1202 	}
1203 
1204 	if (phba->sli_rev == LPFC_SLI_REV4) {
1205 		rc = lpfc_sli4_resume_rpi(ndlp);
1206 		if (rc) {
1207 			/* Stay in state and retry. */
1208 			ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1209 			return ndlp->nlp_state;
1210 		}
1211 	}
1212 
1213 	if (ndlp->nlp_type & NLP_FCP_TARGET) {
1214 		ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1215 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
1216 	} else {
1217 		ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1218 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1219 	}
1220 
1221 	return ndlp->nlp_state;
1222 }
1223 
1224 static uint32_t
1225 lpfc_device_rm_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1226 			   void *arg, uint32_t evt)
1227 {
1228 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1229 
1230 	if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1231 		spin_lock_irq(shost->host_lock);
1232 		ndlp->nlp_flag |= NLP_NODEV_REMOVE;
1233 		spin_unlock_irq(shost->host_lock);
1234 		return ndlp->nlp_state;
1235 	} else {
1236 		/* software abort outstanding ADISC */
1237 		lpfc_els_abort(vport->phba, ndlp);
1238 
1239 		lpfc_drop_node(vport, ndlp);
1240 		return NLP_STE_FREED_NODE;
1241 	}
1242 }
1243 
1244 static uint32_t
1245 lpfc_device_recov_adisc_issue(struct lpfc_vport *vport,
1246 			      struct lpfc_nodelist *ndlp,
1247 			      void *arg,
1248 			      uint32_t evt)
1249 {
1250 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1251 	struct lpfc_hba  *phba = vport->phba;
1252 
1253 	/* Don't do anything that will mess up processing of the
1254 	 * previous RSCN.
1255 	 */
1256 	if (vport->fc_flag & FC_RSCN_DEFERRED)
1257 		return ndlp->nlp_state;
1258 
1259 	/* software abort outstanding ADISC */
1260 	lpfc_els_abort(phba, ndlp);
1261 
1262 	ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1263 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1264 	spin_lock_irq(shost->host_lock);
1265 	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1266 	spin_unlock_irq(shost->host_lock);
1267 	lpfc_disc_set_adisc(vport, ndlp);
1268 	return ndlp->nlp_state;
1269 }
1270 
1271 static uint32_t
1272 lpfc_rcv_plogi_reglogin_issue(struct lpfc_vport *vport,
1273 			      struct lpfc_nodelist *ndlp,
1274 			      void *arg,
1275 			      uint32_t evt)
1276 {
1277 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1278 
1279 	lpfc_rcv_plogi(vport, ndlp, cmdiocb);
1280 	return ndlp->nlp_state;
1281 }
1282 
1283 static uint32_t
1284 lpfc_rcv_prli_reglogin_issue(struct lpfc_vport *vport,
1285 			     struct lpfc_nodelist *ndlp,
1286 			     void *arg,
1287 			     uint32_t evt)
1288 {
1289 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1290 
1291 	lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
1292 	return ndlp->nlp_state;
1293 }
1294 
1295 static uint32_t
1296 lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
1297 			     struct lpfc_nodelist *ndlp,
1298 			     void *arg,
1299 			     uint32_t evt)
1300 {
1301 	struct lpfc_hba   *phba = vport->phba;
1302 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1303 	LPFC_MBOXQ_t	  *mb;
1304 	LPFC_MBOXQ_t	  *nextmb;
1305 	struct lpfc_dmabuf *mp;
1306 
1307 	cmdiocb = (struct lpfc_iocbq *) arg;
1308 
1309 	/* cleanup any ndlp on mbox q waiting for reglogin cmpl */
1310 	if ((mb = phba->sli.mbox_active)) {
1311 		if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
1312 		   (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1313 			lpfc_nlp_put(ndlp);
1314 			mb->context2 = NULL;
1315 			mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1316 		}
1317 	}
1318 
1319 	spin_lock_irq(&phba->hbalock);
1320 	list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
1321 		if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
1322 		   (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1323 			if (phba->sli_rev == LPFC_SLI_REV4) {
1324 				spin_unlock_irq(&phba->hbalock);
1325 				lpfc_sli4_free_rpi(phba,
1326 					mb->u.mb.un.varRegLogin.rpi);
1327 				spin_lock_irq(&phba->hbalock);
1328 			}
1329 			mp = (struct lpfc_dmabuf *) (mb->context1);
1330 			if (mp) {
1331 				__lpfc_mbuf_free(phba, mp->virt, mp->phys);
1332 				kfree(mp);
1333 			}
1334 			lpfc_nlp_put(ndlp);
1335 			list_del(&mb->list);
1336 			phba->sli.mboxq_cnt--;
1337 			mempool_free(mb, phba->mbox_mem_pool);
1338 		}
1339 	}
1340 	spin_unlock_irq(&phba->hbalock);
1341 
1342 	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
1343 	return ndlp->nlp_state;
1344 }
1345 
1346 static uint32_t
1347 lpfc_rcv_padisc_reglogin_issue(struct lpfc_vport *vport,
1348 			       struct lpfc_nodelist *ndlp,
1349 			       void *arg,
1350 			       uint32_t evt)
1351 {
1352 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1353 
1354 	lpfc_rcv_padisc(vport, ndlp, cmdiocb);
1355 	return ndlp->nlp_state;
1356 }
1357 
1358 static uint32_t
1359 lpfc_rcv_prlo_reglogin_issue(struct lpfc_vport *vport,
1360 			     struct lpfc_nodelist *ndlp,
1361 			     void *arg,
1362 			     uint32_t evt)
1363 {
1364 	struct lpfc_iocbq *cmdiocb;
1365 
1366 	cmdiocb = (struct lpfc_iocbq *) arg;
1367 	lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
1368 	return ndlp->nlp_state;
1369 }
1370 
1371 static uint32_t
1372 lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
1373 				  struct lpfc_nodelist *ndlp,
1374 				  void *arg,
1375 				  uint32_t evt)
1376 {
1377 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1378 	LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
1379 	MAILBOX_t *mb = &pmb->u.mb;
1380 	uint32_t did  = mb->un.varWords[1];
1381 
1382 	if (mb->mbxStatus) {
1383 		/* RegLogin failed */
1384 		lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
1385 				"0246 RegLogin failed Data: x%x x%x x%x\n",
1386 				did, mb->mbxStatus, vport->port_state);
1387 		/*
1388 		 * If RegLogin failed due to lack of HBA resources do not
1389 		 * retry discovery.
1390 		 */
1391 		if (mb->mbxStatus == MBXERR_RPI_FULL) {
1392 			ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1393 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1394 			return ndlp->nlp_state;
1395 		}
1396 
1397 		/* Put ndlp in npr state set plogi timer for 1 sec */
1398 		mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
1399 		spin_lock_irq(shost->host_lock);
1400 		ndlp->nlp_flag |= NLP_DELAY_TMO;
1401 		spin_unlock_irq(shost->host_lock);
1402 		ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
1403 
1404 		lpfc_issue_els_logo(vport, ndlp, 0);
1405 		ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1406 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1407 		return ndlp->nlp_state;
1408 	}
1409 
1410 	ndlp->nlp_rpi = mb->un.varWords[0];
1411 	ndlp->nlp_flag |= NLP_RPI_VALID;
1412 
1413 	/* Only if we are not a fabric nport do we issue PRLI */
1414 	if (!(ndlp->nlp_type & NLP_FABRIC)) {
1415 		ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1416 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
1417 		lpfc_issue_els_prli(vport, ndlp, 0);
1418 	} else {
1419 		ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1420 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1421 	}
1422 	return ndlp->nlp_state;
1423 }
1424 
1425 static uint32_t
1426 lpfc_device_rm_reglogin_issue(struct lpfc_vport *vport,
1427 			      struct lpfc_nodelist *ndlp,
1428 			      void *arg,
1429 			      uint32_t evt)
1430 {
1431 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1432 
1433 	if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1434 		spin_lock_irq(shost->host_lock);
1435 		ndlp->nlp_flag |= NLP_NODEV_REMOVE;
1436 		spin_unlock_irq(shost->host_lock);
1437 		return ndlp->nlp_state;
1438 	} else {
1439 		lpfc_drop_node(vport, ndlp);
1440 		return NLP_STE_FREED_NODE;
1441 	}
1442 }
1443 
1444 static uint32_t
1445 lpfc_device_recov_reglogin_issue(struct lpfc_vport *vport,
1446 				 struct lpfc_nodelist *ndlp,
1447 				 void *arg,
1448 				 uint32_t evt)
1449 {
1450 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1451 
1452 	/* Don't do anything that will mess up processing of the
1453 	 * previous RSCN.
1454 	 */
1455 	if (vport->fc_flag & FC_RSCN_DEFERRED)
1456 		return ndlp->nlp_state;
1457 
1458 	ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1459 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1460 	spin_lock_irq(shost->host_lock);
1461 	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1462 	spin_unlock_irq(shost->host_lock);
1463 	lpfc_disc_set_adisc(vport, ndlp);
1464 	return ndlp->nlp_state;
1465 }
1466 
1467 static uint32_t
1468 lpfc_rcv_plogi_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1469 			  void *arg, uint32_t evt)
1470 {
1471 	struct lpfc_iocbq *cmdiocb;
1472 
1473 	cmdiocb = (struct lpfc_iocbq *) arg;
1474 
1475 	lpfc_rcv_plogi(vport, ndlp, cmdiocb);
1476 	return ndlp->nlp_state;
1477 }
1478 
1479 static uint32_t
1480 lpfc_rcv_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1481 			 void *arg, uint32_t evt)
1482 {
1483 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1484 
1485 	lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
1486 	return ndlp->nlp_state;
1487 }
1488 
1489 static uint32_t
1490 lpfc_rcv_logo_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1491 			 void *arg, uint32_t evt)
1492 {
1493 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1494 
1495 	/* Software abort outstanding PRLI before sending acc */
1496 	lpfc_els_abort(vport->phba, ndlp);
1497 
1498 	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
1499 	return ndlp->nlp_state;
1500 }
1501 
1502 static uint32_t
1503 lpfc_rcv_padisc_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1504 			   void *arg, uint32_t evt)
1505 {
1506 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1507 
1508 	lpfc_rcv_padisc(vport, ndlp, cmdiocb);
1509 	return ndlp->nlp_state;
1510 }
1511 
1512 /* This routine is envoked when we rcv a PRLO request from a nport
1513  * we are logged into.  We should send back a PRLO rsp setting the
1514  * appropriate bits.
1515  * NEXT STATE = PRLI_ISSUE
1516  */
1517 static uint32_t
1518 lpfc_rcv_prlo_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1519 			 void *arg, uint32_t evt)
1520 {
1521 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1522 
1523 	lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
1524 	return ndlp->nlp_state;
1525 }
1526 
1527 static uint32_t
1528 lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1529 			  void *arg, uint32_t evt)
1530 {
1531 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1532 	struct lpfc_iocbq *cmdiocb, *rspiocb;
1533 	struct lpfc_hba   *phba = vport->phba;
1534 	IOCB_t *irsp;
1535 	PRLI *npr;
1536 
1537 	cmdiocb = (struct lpfc_iocbq *) arg;
1538 	rspiocb = cmdiocb->context_un.rsp_iocb;
1539 	npr = (PRLI *)lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb);
1540 
1541 	irsp = &rspiocb->iocb;
1542 	if (irsp->ulpStatus) {
1543 		if ((vport->port_type == LPFC_NPIV_PORT) &&
1544 		    vport->cfg_restrict_login) {
1545 			goto out;
1546 		}
1547 		ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
1548 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1549 		return ndlp->nlp_state;
1550 	}
1551 
1552 	/* Check out PRLI rsp */
1553 	ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
1554 	ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
1555 	if ((npr->acceptRspCode == PRLI_REQ_EXECUTED) &&
1556 	    (npr->prliType == PRLI_FCP_TYPE)) {
1557 		if (npr->initiatorFunc)
1558 			ndlp->nlp_type |= NLP_FCP_INITIATOR;
1559 		if (npr->targetFunc)
1560 			ndlp->nlp_type |= NLP_FCP_TARGET;
1561 		if (npr->Retry)
1562 			ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
1563 	}
1564 	if (!(ndlp->nlp_type & NLP_FCP_TARGET) &&
1565 	    (vport->port_type == LPFC_NPIV_PORT) &&
1566 	     vport->cfg_restrict_login) {
1567 out:
1568 		spin_lock_irq(shost->host_lock);
1569 		ndlp->nlp_flag |= NLP_TARGET_REMOVE;
1570 		spin_unlock_irq(shost->host_lock);
1571 		lpfc_issue_els_logo(vport, ndlp, 0);
1572 
1573 		ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
1574 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1575 		return ndlp->nlp_state;
1576 	}
1577 
1578 	ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
1579 	if (ndlp->nlp_type & NLP_FCP_TARGET)
1580 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
1581 	else
1582 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1583 	return ndlp->nlp_state;
1584 }
1585 
1586 /*! lpfc_device_rm_prli_issue
1587  *
1588  * \pre
1589  * \post
1590  * \param   phba
1591  * \param   ndlp
1592  * \param   arg
1593  * \param   evt
1594  * \return  uint32_t
1595  *
1596  * \b Description:
1597  *    This routine is envoked when we a request to remove a nport we are in the
1598  *    process of PRLIing. We should software abort outstanding prli, unreg
1599  *    login, send a logout. We will change node state to UNUSED_NODE, put it
1600  *    on plogi list so it can be freed when LOGO completes.
1601  *
1602  */
1603 
1604 static uint32_t
1605 lpfc_device_rm_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1606 			  void *arg, uint32_t evt)
1607 {
1608 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1609 
1610 	if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1611 		spin_lock_irq(shost->host_lock);
1612 		ndlp->nlp_flag |= NLP_NODEV_REMOVE;
1613 		spin_unlock_irq(shost->host_lock);
1614 		return ndlp->nlp_state;
1615 	} else {
1616 		/* software abort outstanding PLOGI */
1617 		lpfc_els_abort(vport->phba, ndlp);
1618 
1619 		lpfc_drop_node(vport, ndlp);
1620 		return NLP_STE_FREED_NODE;
1621 	}
1622 }
1623 
1624 
1625 /*! lpfc_device_recov_prli_issue
1626  *
1627  * \pre
1628  * \post
1629  * \param   phba
1630  * \param   ndlp
1631  * \param   arg
1632  * \param   evt
1633  * \return  uint32_t
1634  *
1635  * \b Description:
1636  *    The routine is envoked when the state of a device is unknown, like
1637  *    during a link down. We should remove the nodelist entry from the
1638  *    unmapped list, issue a UNREG_LOGIN, do a software abort of the
1639  *    outstanding PRLI command, then free the node entry.
1640  */
1641 static uint32_t
1642 lpfc_device_recov_prli_issue(struct lpfc_vport *vport,
1643 			     struct lpfc_nodelist *ndlp,
1644 			     void *arg,
1645 			     uint32_t evt)
1646 {
1647 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1648 	struct lpfc_hba  *phba = vport->phba;
1649 
1650 	/* Don't do anything that will mess up processing of the
1651 	 * previous RSCN.
1652 	 */
1653 	if (vport->fc_flag & FC_RSCN_DEFERRED)
1654 		return ndlp->nlp_state;
1655 
1656 	/* software abort outstanding PRLI */
1657 	lpfc_els_abort(phba, ndlp);
1658 
1659 	ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
1660 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1661 	spin_lock_irq(shost->host_lock);
1662 	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1663 	spin_unlock_irq(shost->host_lock);
1664 	lpfc_disc_set_adisc(vport, ndlp);
1665 	return ndlp->nlp_state;
1666 }
1667 
1668 static uint32_t
1669 lpfc_rcv_plogi_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1670 			  void *arg, uint32_t evt)
1671 {
1672 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1673 
1674 	lpfc_rcv_plogi(vport, ndlp, cmdiocb);
1675 	return ndlp->nlp_state;
1676 }
1677 
1678 static uint32_t
1679 lpfc_rcv_prli_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1680 			 void *arg, uint32_t evt)
1681 {
1682 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1683 
1684 	lpfc_rcv_prli(vport, ndlp, cmdiocb);
1685 	lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
1686 	return ndlp->nlp_state;
1687 }
1688 
1689 static uint32_t
1690 lpfc_rcv_logo_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1691 			 void *arg, uint32_t evt)
1692 {
1693 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1694 
1695 	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
1696 	return ndlp->nlp_state;
1697 }
1698 
1699 static uint32_t
1700 lpfc_rcv_padisc_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1701 			   void *arg, uint32_t evt)
1702 {
1703 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1704 
1705 	lpfc_rcv_padisc(vport, ndlp, cmdiocb);
1706 	return ndlp->nlp_state;
1707 }
1708 
1709 static uint32_t
1710 lpfc_rcv_prlo_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1711 			 void *arg, uint32_t evt)
1712 {
1713 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1714 
1715 	lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
1716 	return ndlp->nlp_state;
1717 }
1718 
1719 static uint32_t
1720 lpfc_device_recov_unmap_node(struct lpfc_vport *vport,
1721 			     struct lpfc_nodelist *ndlp,
1722 			     void *arg,
1723 			     uint32_t evt)
1724 {
1725 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1726 
1727 	ndlp->nlp_prev_state = NLP_STE_UNMAPPED_NODE;
1728 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1729 	spin_lock_irq(shost->host_lock);
1730 	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1731 	spin_unlock_irq(shost->host_lock);
1732 	lpfc_disc_set_adisc(vport, ndlp);
1733 
1734 	return ndlp->nlp_state;
1735 }
1736 
1737 static uint32_t
1738 lpfc_rcv_plogi_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1739 			   void *arg, uint32_t evt)
1740 {
1741 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1742 
1743 	lpfc_rcv_plogi(vport, ndlp, cmdiocb);
1744 	return ndlp->nlp_state;
1745 }
1746 
1747 static uint32_t
1748 lpfc_rcv_prli_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1749 			  void *arg, uint32_t evt)
1750 {
1751 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1752 
1753 	lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
1754 	return ndlp->nlp_state;
1755 }
1756 
1757 static uint32_t
1758 lpfc_rcv_logo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1759 			  void *arg, uint32_t evt)
1760 {
1761 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1762 
1763 	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
1764 	return ndlp->nlp_state;
1765 }
1766 
1767 static uint32_t
1768 lpfc_rcv_padisc_mapped_node(struct lpfc_vport *vport,
1769 			    struct lpfc_nodelist *ndlp,
1770 			    void *arg, uint32_t evt)
1771 {
1772 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1773 
1774 	lpfc_rcv_padisc(vport, ndlp, cmdiocb);
1775 	return ndlp->nlp_state;
1776 }
1777 
1778 static uint32_t
1779 lpfc_rcv_prlo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1780 			  void *arg, uint32_t evt)
1781 {
1782 	struct lpfc_hba  *phba = vport->phba;
1783 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1784 
1785 	/* flush the target */
1786 	lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
1787 			    ndlp->nlp_sid, 0, LPFC_CTX_TGT);
1788 
1789 	/* Treat like rcv logo */
1790 	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO);
1791 	return ndlp->nlp_state;
1792 }
1793 
1794 static uint32_t
1795 lpfc_device_recov_mapped_node(struct lpfc_vport *vport,
1796 			      struct lpfc_nodelist *ndlp,
1797 			      void *arg,
1798 			      uint32_t evt)
1799 {
1800 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1801 
1802 	ndlp->nlp_prev_state = NLP_STE_MAPPED_NODE;
1803 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1804 	spin_lock_irq(shost->host_lock);
1805 	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1806 	spin_unlock_irq(shost->host_lock);
1807 	lpfc_disc_set_adisc(vport, ndlp);
1808 	return ndlp->nlp_state;
1809 }
1810 
1811 static uint32_t
1812 lpfc_rcv_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1813 			void *arg, uint32_t evt)
1814 {
1815 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1816 	struct lpfc_iocbq *cmdiocb  = (struct lpfc_iocbq *) arg;
1817 
1818 	/* Ignore PLOGI if we have an outstanding LOGO */
1819 	if (ndlp->nlp_flag & (NLP_LOGO_SND | NLP_LOGO_ACC))
1820 		return ndlp->nlp_state;
1821 	if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
1822 		lpfc_cancel_retry_delay_tmo(vport, ndlp);
1823 		spin_lock_irq(shost->host_lock);
1824 		ndlp->nlp_flag &= ~(NLP_NPR_ADISC | NLP_NPR_2B_DISC);
1825 		spin_unlock_irq(shost->host_lock);
1826 	} else if (!(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
1827 		/* send PLOGI immediately, move to PLOGI issue state */
1828 		if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
1829 			ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
1830 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
1831 			lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
1832 		}
1833 	}
1834 	return ndlp->nlp_state;
1835 }
1836 
1837 static uint32_t
1838 lpfc_rcv_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1839 		       void *arg, uint32_t evt)
1840 {
1841 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
1842 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1843 	struct ls_rjt     stat;
1844 
1845 	memset(&stat, 0, sizeof (struct ls_rjt));
1846 	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
1847 	stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
1848 	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
1849 
1850 	if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
1851 		if (ndlp->nlp_flag & NLP_NPR_ADISC) {
1852 			spin_lock_irq(shost->host_lock);
1853 			ndlp->nlp_flag &= ~NLP_NPR_ADISC;
1854 			ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
1855 			spin_unlock_irq(shost->host_lock);
1856 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
1857 			lpfc_issue_els_adisc(vport, ndlp, 0);
1858 		} else {
1859 			ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
1860 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
1861 			lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
1862 		}
1863 	}
1864 	return ndlp->nlp_state;
1865 }
1866 
1867 static uint32_t
1868 lpfc_rcv_logo_npr_node(struct lpfc_vport *vport,  struct lpfc_nodelist *ndlp,
1869 		       void *arg, uint32_t evt)
1870 {
1871 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1872 
1873 	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
1874 	return ndlp->nlp_state;
1875 }
1876 
1877 static uint32_t
1878 lpfc_rcv_padisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1879 			 void *arg, uint32_t evt)
1880 {
1881 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1882 
1883 	lpfc_rcv_padisc(vport, ndlp, cmdiocb);
1884 	/*
1885 	 * Do not start discovery if discovery is about to start
1886 	 * or discovery in progress for this node. Starting discovery
1887 	 * here will affect the counting of discovery threads.
1888 	 */
1889 	if (!(ndlp->nlp_flag & NLP_DELAY_TMO) &&
1890 	    !(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
1891 		if (ndlp->nlp_flag & NLP_NPR_ADISC) {
1892 			ndlp->nlp_flag &= ~NLP_NPR_ADISC;
1893 			ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
1894 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
1895 			lpfc_issue_els_adisc(vport, ndlp, 0);
1896 		} else {
1897 			ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
1898 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
1899 			lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
1900 		}
1901 	}
1902 	return ndlp->nlp_state;
1903 }
1904 
1905 static uint32_t
1906 lpfc_rcv_prlo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1907 		       void *arg, uint32_t evt)
1908 {
1909 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1910 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1911 
1912 	spin_lock_irq(shost->host_lock);
1913 	ndlp->nlp_flag |= NLP_LOGO_ACC;
1914 	spin_unlock_irq(shost->host_lock);
1915 
1916 	lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
1917 
1918 	if ((ndlp->nlp_flag & NLP_DELAY_TMO) == 0) {
1919 		mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
1920 		spin_lock_irq(shost->host_lock);
1921 		ndlp->nlp_flag |= NLP_DELAY_TMO;
1922 		ndlp->nlp_flag &= ~NLP_NPR_ADISC;
1923 		spin_unlock_irq(shost->host_lock);
1924 		ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
1925 	} else {
1926 		spin_lock_irq(shost->host_lock);
1927 		ndlp->nlp_flag &= ~NLP_NPR_ADISC;
1928 		spin_unlock_irq(shost->host_lock);
1929 	}
1930 	return ndlp->nlp_state;
1931 }
1932 
1933 static uint32_t
1934 lpfc_cmpl_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1935 			 void *arg, uint32_t evt)
1936 {
1937 	struct lpfc_iocbq *cmdiocb, *rspiocb;
1938 	IOCB_t *irsp;
1939 
1940 	cmdiocb = (struct lpfc_iocbq *) arg;
1941 	rspiocb = cmdiocb->context_un.rsp_iocb;
1942 
1943 	irsp = &rspiocb->iocb;
1944 	if (irsp->ulpStatus) {
1945 		ndlp->nlp_flag |= NLP_DEFER_RM;
1946 		return NLP_STE_FREED_NODE;
1947 	}
1948 	return ndlp->nlp_state;
1949 }
1950 
1951 static uint32_t
1952 lpfc_cmpl_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1953 			void *arg, uint32_t evt)
1954 {
1955 	struct lpfc_iocbq *cmdiocb, *rspiocb;
1956 	IOCB_t *irsp;
1957 
1958 	cmdiocb = (struct lpfc_iocbq *) arg;
1959 	rspiocb = cmdiocb->context_un.rsp_iocb;
1960 
1961 	irsp = &rspiocb->iocb;
1962 	if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
1963 		lpfc_drop_node(vport, ndlp);
1964 		return NLP_STE_FREED_NODE;
1965 	}
1966 	return ndlp->nlp_state;
1967 }
1968 
1969 static uint32_t
1970 lpfc_cmpl_logo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1971 			void *arg, uint32_t evt)
1972 {
1973 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1974 	if (ndlp->nlp_DID == Fabric_DID) {
1975 		spin_lock_irq(shost->host_lock);
1976 		vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
1977 		spin_unlock_irq(shost->host_lock);
1978 	}
1979 	lpfc_unreg_rpi(vport, ndlp);
1980 	return ndlp->nlp_state;
1981 }
1982 
1983 static uint32_t
1984 lpfc_cmpl_adisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1985 			 void *arg, uint32_t evt)
1986 {
1987 	struct lpfc_iocbq *cmdiocb, *rspiocb;
1988 	IOCB_t *irsp;
1989 
1990 	cmdiocb = (struct lpfc_iocbq *) arg;
1991 	rspiocb = cmdiocb->context_un.rsp_iocb;
1992 
1993 	irsp = &rspiocb->iocb;
1994 	if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
1995 		lpfc_drop_node(vport, ndlp);
1996 		return NLP_STE_FREED_NODE;
1997 	}
1998 	return ndlp->nlp_state;
1999 }
2000 
2001 static uint32_t
2002 lpfc_cmpl_reglogin_npr_node(struct lpfc_vport *vport,
2003 			    struct lpfc_nodelist *ndlp,
2004 			    void *arg, uint32_t evt)
2005 {
2006 	LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
2007 	MAILBOX_t    *mb = &pmb->u.mb;
2008 
2009 	if (!mb->mbxStatus) {
2010 		ndlp->nlp_rpi = mb->un.varWords[0];
2011 		ndlp->nlp_flag |= NLP_RPI_VALID;
2012 	} else {
2013 		if (ndlp->nlp_flag & NLP_NODEV_REMOVE) {
2014 			lpfc_drop_node(vport, ndlp);
2015 			return NLP_STE_FREED_NODE;
2016 		}
2017 	}
2018 	return ndlp->nlp_state;
2019 }
2020 
2021 static uint32_t
2022 lpfc_device_rm_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2023 			void *arg, uint32_t evt)
2024 {
2025 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2026 
2027 	if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
2028 		spin_lock_irq(shost->host_lock);
2029 		ndlp->nlp_flag |= NLP_NODEV_REMOVE;
2030 		spin_unlock_irq(shost->host_lock);
2031 		return ndlp->nlp_state;
2032 	}
2033 	lpfc_drop_node(vport, ndlp);
2034 	return NLP_STE_FREED_NODE;
2035 }
2036 
2037 static uint32_t
2038 lpfc_device_recov_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2039 			   void *arg, uint32_t evt)
2040 {
2041 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2042 
2043 	/* Don't do anything that will mess up processing of the
2044 	 * previous RSCN.
2045 	 */
2046 	if (vport->fc_flag & FC_RSCN_DEFERRED)
2047 		return ndlp->nlp_state;
2048 
2049 	lpfc_cancel_retry_delay_tmo(vport, ndlp);
2050 	spin_lock_irq(shost->host_lock);
2051 	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
2052 	spin_unlock_irq(shost->host_lock);
2053 	return ndlp->nlp_state;
2054 }
2055 
2056 
2057 /* This next section defines the NPort Discovery State Machine */
2058 
2059 /* There are 4 different double linked lists nodelist entries can reside on.
2060  * The plogi list and adisc list are used when Link Up discovery or RSCN
2061  * processing is needed. Each list holds the nodes that we will send PLOGI
2062  * or ADISC on. These lists will keep track of what nodes will be effected
2063  * by an RSCN, or a Link Up (Typically, all nodes are effected on Link Up).
2064  * The unmapped_list will contain all nodes that we have successfully logged
2065  * into at the Fibre Channel level. The mapped_list will contain all nodes
2066  * that are mapped FCP targets.
2067  */
2068 /*
2069  * The bind list is a list of undiscovered (potentially non-existent) nodes
2070  * that we have saved binding information on. This information is used when
2071  * nodes transition from the unmapped to the mapped list.
2072  */
2073 /* For UNUSED_NODE state, the node has just been allocated .
2074  * For PLOGI_ISSUE and REG_LOGIN_ISSUE, the node is on
2075  * the PLOGI list. For REG_LOGIN_COMPL, the node is taken off the PLOGI list
2076  * and put on the unmapped list. For ADISC processing, the node is taken off
2077  * the ADISC list and placed on either the mapped or unmapped list (depending
2078  * on its previous state). Once on the unmapped list, a PRLI is issued and the
2079  * state changed to PRLI_ISSUE. When the PRLI completion occurs, the state is
2080  * changed to UNMAPPED_NODE. If the completion indicates a mapped
2081  * node, the node is taken off the unmapped list. The binding list is checked
2082  * for a valid binding, or a binding is automatically assigned. If binding
2083  * assignment is unsuccessful, the node is left on the unmapped list. If
2084  * binding assignment is successful, the associated binding list entry (if
2085  * any) is removed, and the node is placed on the mapped list.
2086  */
2087 /*
2088  * For a Link Down, all nodes on the ADISC, PLOGI, unmapped or mapped
2089  * lists will receive a DEVICE_RECOVERY event. If the linkdown or devloss timers
2090  * expire, all effected nodes will receive a DEVICE_RM event.
2091  */
2092 /*
2093  * For a Link Up or RSCN, all nodes will move from the mapped / unmapped lists
2094  * to either the ADISC or PLOGI list.  After a Nameserver query or ALPA loopmap
2095  * check, additional nodes may be added or removed (via DEVICE_RM) to / from
2096  * the PLOGI or ADISC lists. Once the PLOGI and ADISC lists are populated,
2097  * we will first process the ADISC list.  32 entries are processed initially and
2098  * ADISC is initited for each one.  Completions / Events for each node are
2099  * funnelled thru the state machine.  As each node finishes ADISC processing, it
2100  * starts ADISC for any nodes waiting for ADISC processing. If no nodes are
2101  * waiting, and the ADISC list count is identically 0, then we are done. For
2102  * Link Up discovery, since all nodes on the PLOGI list are UNREG_LOGIN'ed, we
2103  * can issue a CLEAR_LA and reenable Link Events. Next we will process the PLOGI
2104  * list.  32 entries are processed initially and PLOGI is initited for each one.
2105  * Completions / Events for each node are funnelled thru the state machine.  As
2106  * each node finishes PLOGI processing, it starts PLOGI for any nodes waiting
2107  * for PLOGI processing. If no nodes are waiting, and the PLOGI list count is
2108  * indentically 0, then we are done. We have now completed discovery / RSCN
2109  * handling. Upon completion, ALL nodes should be on either the mapped or
2110  * unmapped lists.
2111  */
2112 
2113 static uint32_t (*lpfc_disc_action[NLP_STE_MAX_STATE * NLP_EVT_MAX_EVENT])
2114      (struct lpfc_vport *, struct lpfc_nodelist *, void *, uint32_t) = {
2115 	/* Action routine                  Event       Current State  */
2116 	lpfc_rcv_plogi_unused_node,	/* RCV_PLOGI   UNUSED_NODE    */
2117 	lpfc_rcv_els_unused_node,	/* RCV_PRLI        */
2118 	lpfc_rcv_logo_unused_node,	/* RCV_LOGO        */
2119 	lpfc_rcv_els_unused_node,	/* RCV_ADISC       */
2120 	lpfc_rcv_els_unused_node,	/* RCV_PDISC       */
2121 	lpfc_rcv_els_unused_node,	/* RCV_PRLO        */
2122 	lpfc_disc_illegal,		/* CMPL_PLOGI      */
2123 	lpfc_disc_illegal,		/* CMPL_PRLI       */
2124 	lpfc_cmpl_logo_unused_node,	/* CMPL_LOGO       */
2125 	lpfc_disc_illegal,		/* CMPL_ADISC      */
2126 	lpfc_disc_illegal,		/* CMPL_REG_LOGIN  */
2127 	lpfc_device_rm_unused_node,	/* DEVICE_RM       */
2128 	lpfc_disc_illegal,		/* DEVICE_RECOVERY */
2129 
2130 	lpfc_rcv_plogi_plogi_issue,	/* RCV_PLOGI   PLOGI_ISSUE    */
2131 	lpfc_rcv_prli_plogi_issue,	/* RCV_PRLI        */
2132 	lpfc_rcv_logo_plogi_issue,	/* RCV_LOGO        */
2133 	lpfc_rcv_els_plogi_issue,	/* RCV_ADISC       */
2134 	lpfc_rcv_els_plogi_issue,	/* RCV_PDISC       */
2135 	lpfc_rcv_els_plogi_issue,	/* RCV_PRLO        */
2136 	lpfc_cmpl_plogi_plogi_issue,	/* CMPL_PLOGI      */
2137 	lpfc_disc_illegal,		/* CMPL_PRLI       */
2138 	lpfc_cmpl_logo_plogi_issue,	/* CMPL_LOGO       */
2139 	lpfc_disc_illegal,		/* CMPL_ADISC      */
2140 	lpfc_cmpl_reglogin_plogi_issue,/* CMPL_REG_LOGIN  */
2141 	lpfc_device_rm_plogi_issue,	/* DEVICE_RM       */
2142 	lpfc_device_recov_plogi_issue,	/* DEVICE_RECOVERY */
2143 
2144 	lpfc_rcv_plogi_adisc_issue,	/* RCV_PLOGI   ADISC_ISSUE    */
2145 	lpfc_rcv_prli_adisc_issue,	/* RCV_PRLI        */
2146 	lpfc_rcv_logo_adisc_issue,	/* RCV_LOGO        */
2147 	lpfc_rcv_padisc_adisc_issue,	/* RCV_ADISC       */
2148 	lpfc_rcv_padisc_adisc_issue,	/* RCV_PDISC       */
2149 	lpfc_rcv_prlo_adisc_issue,	/* RCV_PRLO        */
2150 	lpfc_disc_illegal,		/* CMPL_PLOGI      */
2151 	lpfc_disc_illegal,		/* CMPL_PRLI       */
2152 	lpfc_disc_illegal,		/* CMPL_LOGO       */
2153 	lpfc_cmpl_adisc_adisc_issue,	/* CMPL_ADISC      */
2154 	lpfc_disc_illegal,		/* CMPL_REG_LOGIN  */
2155 	lpfc_device_rm_adisc_issue,	/* DEVICE_RM       */
2156 	lpfc_device_recov_adisc_issue,	/* DEVICE_RECOVERY */
2157 
2158 	lpfc_rcv_plogi_reglogin_issue,	/* RCV_PLOGI  REG_LOGIN_ISSUE */
2159 	lpfc_rcv_prli_reglogin_issue,	/* RCV_PLOGI       */
2160 	lpfc_rcv_logo_reglogin_issue,	/* RCV_LOGO        */
2161 	lpfc_rcv_padisc_reglogin_issue,	/* RCV_ADISC       */
2162 	lpfc_rcv_padisc_reglogin_issue,	/* RCV_PDISC       */
2163 	lpfc_rcv_prlo_reglogin_issue,	/* RCV_PRLO        */
2164 	lpfc_cmpl_plogi_illegal,	/* CMPL_PLOGI      */
2165 	lpfc_disc_illegal,		/* CMPL_PRLI       */
2166 	lpfc_disc_illegal,		/* CMPL_LOGO       */
2167 	lpfc_disc_illegal,		/* CMPL_ADISC      */
2168 	lpfc_cmpl_reglogin_reglogin_issue,/* CMPL_REG_LOGIN  */
2169 	lpfc_device_rm_reglogin_issue,	/* DEVICE_RM       */
2170 	lpfc_device_recov_reglogin_issue,/* DEVICE_RECOVERY */
2171 
2172 	lpfc_rcv_plogi_prli_issue,	/* RCV_PLOGI   PRLI_ISSUE     */
2173 	lpfc_rcv_prli_prli_issue,	/* RCV_PRLI        */
2174 	lpfc_rcv_logo_prli_issue,	/* RCV_LOGO        */
2175 	lpfc_rcv_padisc_prli_issue,	/* RCV_ADISC       */
2176 	lpfc_rcv_padisc_prli_issue,	/* RCV_PDISC       */
2177 	lpfc_rcv_prlo_prli_issue,	/* RCV_PRLO        */
2178 	lpfc_cmpl_plogi_illegal,	/* CMPL_PLOGI      */
2179 	lpfc_cmpl_prli_prli_issue,	/* CMPL_PRLI       */
2180 	lpfc_disc_illegal,		/* CMPL_LOGO       */
2181 	lpfc_disc_illegal,		/* CMPL_ADISC      */
2182 	lpfc_disc_illegal,		/* CMPL_REG_LOGIN  */
2183 	lpfc_device_rm_prli_issue,	/* DEVICE_RM       */
2184 	lpfc_device_recov_prli_issue,	/* DEVICE_RECOVERY */
2185 
2186 	lpfc_rcv_plogi_unmap_node,	/* RCV_PLOGI   UNMAPPED_NODE  */
2187 	lpfc_rcv_prli_unmap_node,	/* RCV_PRLI        */
2188 	lpfc_rcv_logo_unmap_node,	/* RCV_LOGO        */
2189 	lpfc_rcv_padisc_unmap_node,	/* RCV_ADISC       */
2190 	lpfc_rcv_padisc_unmap_node,	/* RCV_PDISC       */
2191 	lpfc_rcv_prlo_unmap_node,	/* RCV_PRLO        */
2192 	lpfc_disc_illegal,		/* CMPL_PLOGI      */
2193 	lpfc_disc_illegal,		/* CMPL_PRLI       */
2194 	lpfc_disc_illegal,		/* CMPL_LOGO       */
2195 	lpfc_disc_illegal,		/* CMPL_ADISC      */
2196 	lpfc_disc_illegal,		/* CMPL_REG_LOGIN  */
2197 	lpfc_disc_illegal,		/* DEVICE_RM       */
2198 	lpfc_device_recov_unmap_node,	/* DEVICE_RECOVERY */
2199 
2200 	lpfc_rcv_plogi_mapped_node,	/* RCV_PLOGI   MAPPED_NODE    */
2201 	lpfc_rcv_prli_mapped_node,	/* RCV_PRLI        */
2202 	lpfc_rcv_logo_mapped_node,	/* RCV_LOGO        */
2203 	lpfc_rcv_padisc_mapped_node,	/* RCV_ADISC       */
2204 	lpfc_rcv_padisc_mapped_node,	/* RCV_PDISC       */
2205 	lpfc_rcv_prlo_mapped_node,	/* RCV_PRLO        */
2206 	lpfc_disc_illegal,		/* CMPL_PLOGI      */
2207 	lpfc_disc_illegal,		/* CMPL_PRLI       */
2208 	lpfc_disc_illegal,		/* CMPL_LOGO       */
2209 	lpfc_disc_illegal,		/* CMPL_ADISC      */
2210 	lpfc_disc_illegal,		/* CMPL_REG_LOGIN  */
2211 	lpfc_disc_illegal,		/* DEVICE_RM       */
2212 	lpfc_device_recov_mapped_node,	/* DEVICE_RECOVERY */
2213 
2214 	lpfc_rcv_plogi_npr_node,        /* RCV_PLOGI   NPR_NODE    */
2215 	lpfc_rcv_prli_npr_node,         /* RCV_PRLI        */
2216 	lpfc_rcv_logo_npr_node,         /* RCV_LOGO        */
2217 	lpfc_rcv_padisc_npr_node,       /* RCV_ADISC       */
2218 	lpfc_rcv_padisc_npr_node,       /* RCV_PDISC       */
2219 	lpfc_rcv_prlo_npr_node,         /* RCV_PRLO        */
2220 	lpfc_cmpl_plogi_npr_node,	/* CMPL_PLOGI      */
2221 	lpfc_cmpl_prli_npr_node,	/* CMPL_PRLI       */
2222 	lpfc_cmpl_logo_npr_node,        /* CMPL_LOGO       */
2223 	lpfc_cmpl_adisc_npr_node,       /* CMPL_ADISC      */
2224 	lpfc_cmpl_reglogin_npr_node,    /* CMPL_REG_LOGIN  */
2225 	lpfc_device_rm_npr_node,        /* DEVICE_RM       */
2226 	lpfc_device_recov_npr_node,     /* DEVICE_RECOVERY */
2227 };
2228 
2229 int
2230 lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2231 			void *arg, uint32_t evt)
2232 {
2233 	uint32_t cur_state, rc;
2234 	uint32_t(*func) (struct lpfc_vport *, struct lpfc_nodelist *, void *,
2235 			 uint32_t);
2236 	uint32_t got_ndlp = 0;
2237 
2238 	if (lpfc_nlp_get(ndlp))
2239 		got_ndlp = 1;
2240 
2241 	cur_state = ndlp->nlp_state;
2242 
2243 	/* DSM in event <evt> on NPort <nlp_DID> in state <cur_state> */
2244 	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2245 			 "0211 DSM in event x%x on NPort x%x in "
2246 			 "state %d Data: x%x\n",
2247 			 evt, ndlp->nlp_DID, cur_state, ndlp->nlp_flag);
2248 
2249 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
2250 		 "DSM in:          evt:%d ste:%d did:x%x",
2251 		evt, cur_state, ndlp->nlp_DID);
2252 
2253 	func = lpfc_disc_action[(cur_state * NLP_EVT_MAX_EVENT) + evt];
2254 	rc = (func) (vport, ndlp, arg, evt);
2255 
2256 	/* DSM out state <rc> on NPort <nlp_DID> */
2257 	if (got_ndlp) {
2258 		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2259 			 "0212 DSM out state %d on NPort x%x Data: x%x\n",
2260 			 rc, ndlp->nlp_DID, ndlp->nlp_flag);
2261 
2262 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
2263 			"DSM out:         ste:%d did:x%x flg:x%x",
2264 			rc, ndlp->nlp_DID, ndlp->nlp_flag);
2265 		/* Decrement the ndlp reference count held for this function */
2266 		lpfc_nlp_put(ndlp);
2267 	} else {
2268 		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2269 			"0213 DSM out state %d on NPort free\n", rc);
2270 
2271 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
2272 			"DSM out:         ste:%d did:x%x flg:x%x",
2273 			rc, 0, 0);
2274 	}
2275 
2276 	return rc;
2277 }
2278