1  /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2004-2008 Emulex.  All rights reserved.           *
5  * EMULEX and SLI are trademarks of Emulex.                        *
6  * www.emulex.com                                                  *
7  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
8  *                                                                 *
9  * This program is free software; you can redistribute it and/or   *
10  * modify it under the terms of version 2 of the GNU General       *
11  * Public License as published by the Free Software Foundation.    *
12  * This program is distributed in the hope that it will be useful. *
13  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
14  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
15  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
16  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
18  * more details, a copy of which can be found in the file COPYING  *
19  * included with this package.                                     *
20  *******************************************************************/
21 
22 #include <linux/blkdev.h>
23 #include <linux/pci.h>
24 #include <linux/interrupt.h>
25 
26 #include <scsi/scsi.h>
27 #include <scsi/scsi_device.h>
28 #include <scsi/scsi_host.h>
29 #include <scsi/scsi_transport_fc.h>
30 
31 #include "lpfc_hw.h"
32 #include "lpfc_sli.h"
33 #include "lpfc_nl.h"
34 #include "lpfc_disc.h"
35 #include "lpfc_scsi.h"
36 #include "lpfc.h"
37 #include "lpfc_logmsg.h"
38 #include "lpfc_crtn.h"
39 #include "lpfc_vport.h"
40 #include "lpfc_debugfs.h"
41 
42 
43 /* Called to verify a rcv'ed ADISC was intended for us. */
44 static int
45 lpfc_check_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
46 		 struct lpfc_name *nn, struct lpfc_name *pn)
47 {
48 	/* Compare the ADISC rsp WWNN / WWPN matches our internal node
49 	 * table entry for that node.
50 	 */
51 	if (memcmp(nn, &ndlp->nlp_nodename, sizeof (struct lpfc_name)))
52 		return 0;
53 
54 	if (memcmp(pn, &ndlp->nlp_portname, sizeof (struct lpfc_name)))
55 		return 0;
56 
57 	/* we match, return success */
58 	return 1;
59 }
60 
61 int
62 lpfc_check_sparm(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
63 		 struct serv_parm * sp, uint32_t class)
64 {
65 	volatile struct serv_parm *hsp = &vport->fc_sparam;
66 	uint16_t hsp_value, ssp_value = 0;
67 
68 	/*
69 	 * The receive data field size and buffer-to-buffer receive data field
70 	 * size entries are 16 bits but are represented as two 8-bit fields in
71 	 * the driver data structure to account for rsvd bits and other control
72 	 * bits.  Reconstruct and compare the fields as a 16-bit values before
73 	 * correcting the byte values.
74 	 */
75 	if (sp->cls1.classValid) {
76 		hsp_value = (hsp->cls1.rcvDataSizeMsb << 8) |
77 				hsp->cls1.rcvDataSizeLsb;
78 		ssp_value = (sp->cls1.rcvDataSizeMsb << 8) |
79 				sp->cls1.rcvDataSizeLsb;
80 		if (!ssp_value)
81 			goto bad_service_param;
82 		if (ssp_value > hsp_value) {
83 			sp->cls1.rcvDataSizeLsb = hsp->cls1.rcvDataSizeLsb;
84 			sp->cls1.rcvDataSizeMsb = hsp->cls1.rcvDataSizeMsb;
85 		}
86 	} else if (class == CLASS1) {
87 		goto bad_service_param;
88 	}
89 
90 	if (sp->cls2.classValid) {
91 		hsp_value = (hsp->cls2.rcvDataSizeMsb << 8) |
92 				hsp->cls2.rcvDataSizeLsb;
93 		ssp_value = (sp->cls2.rcvDataSizeMsb << 8) |
94 				sp->cls2.rcvDataSizeLsb;
95 		if (!ssp_value)
96 			goto bad_service_param;
97 		if (ssp_value > hsp_value) {
98 			sp->cls2.rcvDataSizeLsb = hsp->cls2.rcvDataSizeLsb;
99 			sp->cls2.rcvDataSizeMsb = hsp->cls2.rcvDataSizeMsb;
100 		}
101 	} else if (class == CLASS2) {
102 		goto bad_service_param;
103 	}
104 
105 	if (sp->cls3.classValid) {
106 		hsp_value = (hsp->cls3.rcvDataSizeMsb << 8) |
107 				hsp->cls3.rcvDataSizeLsb;
108 		ssp_value = (sp->cls3.rcvDataSizeMsb << 8) |
109 				sp->cls3.rcvDataSizeLsb;
110 		if (!ssp_value)
111 			goto bad_service_param;
112 		if (ssp_value > hsp_value) {
113 			sp->cls3.rcvDataSizeLsb = hsp->cls3.rcvDataSizeLsb;
114 			sp->cls3.rcvDataSizeMsb = hsp->cls3.rcvDataSizeMsb;
115 		}
116 	} else if (class == CLASS3) {
117 		goto bad_service_param;
118 	}
119 
120 	/*
121 	 * Preserve the upper four bits of the MSB from the PLOGI response.
122 	 * These bits contain the Buffer-to-Buffer State Change Number
123 	 * from the target and need to be passed to the FW.
124 	 */
125 	hsp_value = (hsp->cmn.bbRcvSizeMsb << 8) | hsp->cmn.bbRcvSizeLsb;
126 	ssp_value = (sp->cmn.bbRcvSizeMsb << 8) | sp->cmn.bbRcvSizeLsb;
127 	if (ssp_value > hsp_value) {
128 		sp->cmn.bbRcvSizeLsb = hsp->cmn.bbRcvSizeLsb;
129 		sp->cmn.bbRcvSizeMsb = (sp->cmn.bbRcvSizeMsb & 0xF0) |
130 				       (hsp->cmn.bbRcvSizeMsb & 0x0F);
131 	}
132 
133 	memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof (struct lpfc_name));
134 	memcpy(&ndlp->nlp_portname, &sp->portName, sizeof (struct lpfc_name));
135 	return 1;
136 bad_service_param:
137 	lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
138 			 "0207 Device %x "
139 			 "(%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x) sent "
140 			 "invalid service parameters.  Ignoring device.\n",
141 			 ndlp->nlp_DID,
142 			 sp->nodeName.u.wwn[0], sp->nodeName.u.wwn[1],
143 			 sp->nodeName.u.wwn[2], sp->nodeName.u.wwn[3],
144 			 sp->nodeName.u.wwn[4], sp->nodeName.u.wwn[5],
145 			 sp->nodeName.u.wwn[6], sp->nodeName.u.wwn[7]);
146 	return 0;
147 }
148 
149 static void *
150 lpfc_check_elscmpl_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
151 			struct lpfc_iocbq *rspiocb)
152 {
153 	struct lpfc_dmabuf *pcmd, *prsp;
154 	uint32_t *lp;
155 	void     *ptr = NULL;
156 	IOCB_t   *irsp;
157 
158 	irsp = &rspiocb->iocb;
159 	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
160 
161 	/* For lpfc_els_abort, context2 could be zero'ed to delay
162 	 * freeing associated memory till after ABTS completes.
163 	 */
164 	if (pcmd) {
165 		prsp =  list_get_first(&pcmd->list, struct lpfc_dmabuf,
166 				       list);
167 		if (prsp) {
168 			lp = (uint32_t *) prsp->virt;
169 			ptr = (void *)((uint8_t *)lp + sizeof(uint32_t));
170 		}
171 	} else {
172 		/* Force ulpStatus error since we are returning NULL ptr */
173 		if (!(irsp->ulpStatus)) {
174 			irsp->ulpStatus = IOSTAT_LOCAL_REJECT;
175 			irsp->un.ulpWord[4] = IOERR_SLI_ABORTED;
176 		}
177 		ptr = NULL;
178 	}
179 	return ptr;
180 }
181 
182 
183 /*
184  * Free resources / clean up outstanding I/Os
185  * associated with a LPFC_NODELIST entry. This
186  * routine effectively results in a "software abort".
187  */
188 int
189 lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
190 {
191 	LIST_HEAD(completions);
192 	struct lpfc_sli  *psli = &phba->sli;
193 	struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
194 	struct lpfc_iocbq *iocb, *next_iocb;
195 
196 	/* Abort outstanding I/O on NPort <nlp_DID> */
197 	lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_DISCOVERY,
198 			 "0205 Abort outstanding I/O on NPort x%x "
199 			 "Data: x%x x%x x%x\n",
200 			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
201 			 ndlp->nlp_rpi);
202 
203 	lpfc_fabric_abort_nport(ndlp);
204 
205 	/* First check the txq */
206 	spin_lock_irq(&phba->hbalock);
207 	list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
208 		/* Check to see if iocb matches the nport we are looking for */
209 		if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) {
210 			/* It matches, so deque and call compl with anp error */
211 			list_move_tail(&iocb->list, &completions);
212 			pring->txq_cnt--;
213 		}
214 	}
215 
216 	/* Next check the txcmplq */
217 	list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
218 		/* Check to see if iocb matches the nport we are looking for */
219 		if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) {
220 			lpfc_sli_issue_abort_iotag(phba, pring, iocb);
221 		}
222 	}
223 	spin_unlock_irq(&phba->hbalock);
224 
225 	/* Cancel all the IOCBs from the completions list */
226 	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
227 			      IOERR_SLI_ABORTED);
228 
229 	lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
230 	return 0;
231 }
232 
233 static int
234 lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
235 	       struct lpfc_iocbq *cmdiocb)
236 {
237 	struct Scsi_Host   *shost = lpfc_shost_from_vport(vport);
238 	struct lpfc_hba    *phba = vport->phba;
239 	struct lpfc_dmabuf *pcmd;
240 	uint32_t *lp;
241 	IOCB_t *icmd;
242 	struct serv_parm *sp;
243 	LPFC_MBOXQ_t *mbox;
244 	struct ls_rjt stat;
245 	int rc;
246 
247 	memset(&stat, 0, sizeof (struct ls_rjt));
248 	if (vport->port_state <= LPFC_FLOGI) {
249 		/* Before responding to PLOGI, check for pt2pt mode.
250 		 * If we are pt2pt, with an outstanding FLOGI, abort
251 		 * the FLOGI and resend it first.
252 		 */
253 		if (vport->fc_flag & FC_PT2PT) {
254 			 lpfc_els_abort_flogi(phba);
255 		        if (!(vport->fc_flag & FC_PT2PT_PLOGI)) {
256 				/* If the other side is supposed to initiate
257 				 * the PLOGI anyway, just ACC it now and
258 				 * move on with discovery.
259 				 */
260 				phba->fc_edtov = FF_DEF_EDTOV;
261 				phba->fc_ratov = FF_DEF_RATOV;
262 				/* Start discovery - this should just do
263 				   CLEAR_LA */
264 				lpfc_disc_start(vport);
265 			} else
266 				lpfc_initial_flogi(vport);
267 		} else {
268 			stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY;
269 			stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
270 			lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
271 					    ndlp, NULL);
272 			return 0;
273 		}
274 	}
275 	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
276 	lp = (uint32_t *) pcmd->virt;
277 	sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
278 	if (wwn_to_u64(sp->portName.u.wwn) == 0) {
279 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
280 				 "0140 PLOGI Reject: invalid nname\n");
281 		stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
282 		stat.un.b.lsRjtRsnCodeExp = LSEXP_INVALID_PNAME;
283 		lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
284 			NULL);
285 		return 0;
286 	}
287 	if (wwn_to_u64(sp->nodeName.u.wwn) == 0) {
288 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
289 				 "0141 PLOGI Reject: invalid pname\n");
290 		stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
291 		stat.un.b.lsRjtRsnCodeExp = LSEXP_INVALID_NNAME;
292 		lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
293 			NULL);
294 		return 0;
295 	}
296 	if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3) == 0)) {
297 		/* Reject this request because invalid parameters */
298 		stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
299 		stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
300 		lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
301 			NULL);
302 		return 0;
303 	}
304 	icmd = &cmdiocb->iocb;
305 
306 	/* PLOGI chkparm OK */
307 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
308 			 "0114 PLOGI chkparm OK Data: x%x x%x x%x x%x\n",
309 			 ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag,
310 			 ndlp->nlp_rpi);
311 
312 	if (vport->cfg_fcp_class == 2 && sp->cls2.classValid)
313 		ndlp->nlp_fcp_info |= CLASS2;
314 	else
315 		ndlp->nlp_fcp_info |= CLASS3;
316 
317 	ndlp->nlp_class_sup = 0;
318 	if (sp->cls1.classValid)
319 		ndlp->nlp_class_sup |= FC_COS_CLASS1;
320 	if (sp->cls2.classValid)
321 		ndlp->nlp_class_sup |= FC_COS_CLASS2;
322 	if (sp->cls3.classValid)
323 		ndlp->nlp_class_sup |= FC_COS_CLASS3;
324 	if (sp->cls4.classValid)
325 		ndlp->nlp_class_sup |= FC_COS_CLASS4;
326 	ndlp->nlp_maxframe =
327 		((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
328 
329 	/* no need to reg_login if we are already in one of these states */
330 	switch (ndlp->nlp_state) {
331 	case  NLP_STE_NPR_NODE:
332 		if (!(ndlp->nlp_flag & NLP_NPR_ADISC))
333 			break;
334 	case  NLP_STE_REG_LOGIN_ISSUE:
335 	case  NLP_STE_PRLI_ISSUE:
336 	case  NLP_STE_UNMAPPED_NODE:
337 	case  NLP_STE_MAPPED_NODE:
338 		lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL);
339 		return 1;
340 	}
341 
342 	if ((vport->fc_flag & FC_PT2PT) &&
343 	    !(vport->fc_flag & FC_PT2PT_PLOGI)) {
344 		/* rcv'ed PLOGI decides what our NPortId will be */
345 		vport->fc_myDID = icmd->un.rcvels.parmRo;
346 		mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
347 		if (mbox == NULL)
348 			goto out;
349 		lpfc_config_link(phba, mbox);
350 		mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
351 		mbox->vport = vport;
352 		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
353 		if (rc == MBX_NOT_FINISHED) {
354 			mempool_free(mbox, phba->mbox_mem_pool);
355 			goto out;
356 		}
357 
358 		lpfc_can_disctmo(vport);
359 	}
360 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
361 	if (!mbox)
362 		goto out;
363 
364 	rc = lpfc_reg_login(phba, vport->vpi, icmd->un.rcvels.remoteID,
365 			    (uint8_t *) sp, mbox, 0);
366 	if (rc) {
367 		mempool_free(mbox, phba->mbox_mem_pool);
368 		goto out;
369 	}
370 
371 	/* ACC PLOGI rsp command needs to execute first,
372 	 * queue this mbox command to be processed later.
373 	 */
374 	mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
375 	/*
376 	 * mbox->context2 = lpfc_nlp_get(ndlp) deferred until mailbox
377 	 * command issued in lpfc_cmpl_els_acc().
378 	 */
379 	mbox->vport = vport;
380 	spin_lock_irq(shost->host_lock);
381 	ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI);
382 	spin_unlock_irq(shost->host_lock);
383 
384 	/*
385 	 * If there is an outstanding PLOGI issued, abort it before
386 	 * sending ACC rsp for received PLOGI. If pending plogi
387 	 * is not canceled here, the plogi will be rejected by
388 	 * remote port and will be retried. On a configuration with
389 	 * single discovery thread, this will cause a huge delay in
390 	 * discovery. Also this will cause multiple state machines
391 	 * running in parallel for this node.
392 	 */
393 	if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) {
394 		/* software abort outstanding PLOGI */
395 		lpfc_els_abort(phba, ndlp);
396 	}
397 
398 	if ((vport->port_type == LPFC_NPIV_PORT &&
399 	     vport->cfg_restrict_login)) {
400 
401 		/* In order to preserve RPIs, we want to cleanup
402 		 * the default RPI the firmware created to rcv
403 		 * this ELS request. The only way to do this is
404 		 * to register, then unregister the RPI.
405 		 */
406 		spin_lock_irq(shost->host_lock);
407 		ndlp->nlp_flag |= NLP_RM_DFLT_RPI;
408 		spin_unlock_irq(shost->host_lock);
409 		stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD;
410 		stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
411 		lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
412 			ndlp, mbox);
413 		return 1;
414 	}
415 	lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox);
416 	return 1;
417 out:
418 	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
419 	stat.un.b.lsRjtRsnCodeExp = LSEXP_OUT_OF_RESOURCE;
420 	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
421 	return 0;
422 }
423 
424 static int
425 lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
426 		struct lpfc_iocbq *cmdiocb)
427 {
428 	struct Scsi_Host   *shost = lpfc_shost_from_vport(vport);
429 	struct lpfc_dmabuf *pcmd;
430 	struct serv_parm   *sp;
431 	struct lpfc_name   *pnn, *ppn;
432 	struct ls_rjt stat;
433 	ADISC *ap;
434 	IOCB_t *icmd;
435 	uint32_t *lp;
436 	uint32_t cmd;
437 
438 	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
439 	lp = (uint32_t *) pcmd->virt;
440 
441 	cmd = *lp++;
442 	if (cmd == ELS_CMD_ADISC) {
443 		ap = (ADISC *) lp;
444 		pnn = (struct lpfc_name *) & ap->nodeName;
445 		ppn = (struct lpfc_name *) & ap->portName;
446 	} else {
447 		sp = (struct serv_parm *) lp;
448 		pnn = (struct lpfc_name *) & sp->nodeName;
449 		ppn = (struct lpfc_name *) & sp->portName;
450 	}
451 
452 	icmd = &cmdiocb->iocb;
453 	if (icmd->ulpStatus == 0 && lpfc_check_adisc(vport, ndlp, pnn, ppn)) {
454 		if (cmd == ELS_CMD_ADISC) {
455 			lpfc_els_rsp_adisc_acc(vport, cmdiocb, ndlp);
456 		} else {
457 			lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp,
458 					 NULL);
459 		}
460 		return 1;
461 	}
462 	/* Reject this request because invalid parameters */
463 	stat.un.b.lsRjtRsvd0 = 0;
464 	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
465 	stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
466 	stat.un.b.vendorUnique = 0;
467 	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
468 
469 	/* 1 sec timeout */
470 	mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
471 
472 	spin_lock_irq(shost->host_lock);
473 	ndlp->nlp_flag |= NLP_DELAY_TMO;
474 	spin_unlock_irq(shost->host_lock);
475 	ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
476 	ndlp->nlp_prev_state = ndlp->nlp_state;
477 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
478 	return 0;
479 }
480 
481 static int
482 lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
483 	      struct lpfc_iocbq *cmdiocb, uint32_t els_cmd)
484 {
485 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
486 
487 	/* Put ndlp in NPR state with 1 sec timeout for plogi, ACC logo */
488 	/* Only call LOGO ACC for first LOGO, this avoids sending unnecessary
489 	 * PLOGIs during LOGO storms from a device.
490 	 */
491 	spin_lock_irq(shost->host_lock);
492 	ndlp->nlp_flag |= NLP_LOGO_ACC;
493 	spin_unlock_irq(shost->host_lock);
494 	if (els_cmd == ELS_CMD_PRLO)
495 		lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
496 	else
497 		lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
498 
499 	if ((!(ndlp->nlp_type & NLP_FABRIC) &&
500 	     ((ndlp->nlp_type & NLP_FCP_TARGET) ||
501 	      !(ndlp->nlp_type & NLP_FCP_INITIATOR))) ||
502 	    (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
503 		/* Only try to re-login if this is NOT a Fabric Node */
504 		mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
505 		spin_lock_irq(shost->host_lock);
506 		ndlp->nlp_flag |= NLP_DELAY_TMO;
507 		spin_unlock_irq(shost->host_lock);
508 
509 		ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
510 	}
511 	ndlp->nlp_prev_state = ndlp->nlp_state;
512 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
513 
514 	spin_lock_irq(shost->host_lock);
515 	ndlp->nlp_flag &= ~NLP_NPR_ADISC;
516 	spin_unlock_irq(shost->host_lock);
517 	/* The driver has to wait until the ACC completes before it continues
518 	 * processing the LOGO.  The action will resume in
519 	 * lpfc_cmpl_els_logo_acc routine. Since part of processing includes an
520 	 * unreg_login, the driver waits so the ACC does not get aborted.
521 	 */
522 	return 0;
523 }
524 
525 static void
526 lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
527 	      struct lpfc_iocbq *cmdiocb)
528 {
529 	struct lpfc_dmabuf *pcmd;
530 	uint32_t *lp;
531 	PRLI *npr;
532 	struct fc_rport *rport = ndlp->rport;
533 	u32 roles;
534 
535 	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
536 	lp = (uint32_t *) pcmd->virt;
537 	npr = (PRLI *) ((uint8_t *) lp + sizeof (uint32_t));
538 
539 	ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
540 	ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
541 	if (npr->prliType == PRLI_FCP_TYPE) {
542 		if (npr->initiatorFunc)
543 			ndlp->nlp_type |= NLP_FCP_INITIATOR;
544 		if (npr->targetFunc)
545 			ndlp->nlp_type |= NLP_FCP_TARGET;
546 		if (npr->Retry)
547 			ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
548 	}
549 	if (rport) {
550 		/* We need to update the rport role values */
551 		roles = FC_RPORT_ROLE_UNKNOWN;
552 		if (ndlp->nlp_type & NLP_FCP_INITIATOR)
553 			roles |= FC_RPORT_ROLE_FCP_INITIATOR;
554 		if (ndlp->nlp_type & NLP_FCP_TARGET)
555 			roles |= FC_RPORT_ROLE_FCP_TARGET;
556 
557 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
558 			"rport rolechg:   role:x%x did:x%x flg:x%x",
559 			roles, ndlp->nlp_DID, ndlp->nlp_flag);
560 
561 		fc_remote_port_rolechg(rport, roles);
562 	}
563 }
564 
565 static uint32_t
566 lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
567 {
568 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
569 
570 	if (!ndlp->nlp_rpi) {
571 		ndlp->nlp_flag &= ~NLP_NPR_ADISC;
572 		return 0;
573 	}
574 
575 	if (!(vport->fc_flag & FC_PT2PT)) {
576 		/* Check config parameter use-adisc or FCP-2 */
577 		if ((vport->cfg_use_adisc && (vport->fc_flag & FC_RSCN_MODE)) ||
578 		    ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
579 			spin_lock_irq(shost->host_lock);
580 			ndlp->nlp_flag |= NLP_NPR_ADISC;
581 			spin_unlock_irq(shost->host_lock);
582 			return 1;
583 		}
584 	}
585 	ndlp->nlp_flag &= ~NLP_NPR_ADISC;
586 	lpfc_unreg_rpi(vport, ndlp);
587 	return 0;
588 }
589 
590 static uint32_t
591 lpfc_disc_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
592 		  void *arg, uint32_t evt)
593 {
594 	lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
595 			 "0271 Illegal State Transition: node x%x "
596 			 "event x%x, state x%x Data: x%x x%x\n",
597 			 ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi,
598 			 ndlp->nlp_flag);
599 	return ndlp->nlp_state;
600 }
601 
602 static uint32_t
603 lpfc_cmpl_plogi_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
604 		  void *arg, uint32_t evt)
605 {
606 	/* This transition is only legal if we previously
607 	 * rcv'ed a PLOGI. Since we don't want 2 discovery threads
608 	 * working on the same NPortID, do nothing for this thread
609 	 * to stop it.
610 	 */
611 	if (!(ndlp->nlp_flag & NLP_RCV_PLOGI)) {
612 		lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
613 			 "0272 Illegal State Transition: node x%x "
614 			 "event x%x, state x%x Data: x%x x%x\n",
615 			 ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi,
616 			 ndlp->nlp_flag);
617 	}
618 	return ndlp->nlp_state;
619 }
620 
621 /* Start of Discovery State Machine routines */
622 
623 static uint32_t
624 lpfc_rcv_plogi_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
625 			   void *arg, uint32_t evt)
626 {
627 	struct lpfc_iocbq *cmdiocb;
628 
629 	cmdiocb = (struct lpfc_iocbq *) arg;
630 
631 	if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
632 		return ndlp->nlp_state;
633 	}
634 	return NLP_STE_FREED_NODE;
635 }
636 
637 static uint32_t
638 lpfc_rcv_els_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
639 			 void *arg, uint32_t evt)
640 {
641 	lpfc_issue_els_logo(vport, ndlp, 0);
642 	return ndlp->nlp_state;
643 }
644 
645 static uint32_t
646 lpfc_rcv_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
647 			  void *arg, uint32_t evt)
648 {
649 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
650 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
651 
652 	spin_lock_irq(shost->host_lock);
653 	ndlp->nlp_flag |= NLP_LOGO_ACC;
654 	spin_unlock_irq(shost->host_lock);
655 	lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
656 
657 	return ndlp->nlp_state;
658 }
659 
660 static uint32_t
661 lpfc_cmpl_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
662 			   void *arg, uint32_t evt)
663 {
664 	return NLP_STE_FREED_NODE;
665 }
666 
667 static uint32_t
668 lpfc_device_rm_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
669 			   void *arg, uint32_t evt)
670 {
671 	return NLP_STE_FREED_NODE;
672 }
673 
674 static uint32_t
675 lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
676 			   void *arg, uint32_t evt)
677 {
678 	struct Scsi_Host   *shost = lpfc_shost_from_vport(vport);
679 	struct lpfc_hba   *phba = vport->phba;
680 	struct lpfc_iocbq *cmdiocb = arg;
681 	struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
682 	uint32_t *lp = (uint32_t *) pcmd->virt;
683 	struct serv_parm *sp = (struct serv_parm *) (lp + 1);
684 	struct ls_rjt stat;
685 	int port_cmp;
686 
687 	memset(&stat, 0, sizeof (struct ls_rjt));
688 
689 	/* For a PLOGI, we only accept if our portname is less
690 	 * than the remote portname.
691 	 */
692 	phba->fc_stat.elsLogiCol++;
693 	port_cmp = memcmp(&vport->fc_portname, &sp->portName,
694 			  sizeof(struct lpfc_name));
695 
696 	if (port_cmp >= 0) {
697 		/* Reject this request because the remote node will accept
698 		   ours */
699 		stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
700 		stat.un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS;
701 		lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
702 			NULL);
703 	} else {
704 		if (lpfc_rcv_plogi(vport, ndlp, cmdiocb) &&
705 		    (ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
706 		    (vport->num_disc_nodes)) {
707 			spin_lock_irq(shost->host_lock);
708 			ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
709 			spin_unlock_irq(shost->host_lock);
710 			/* Check if there are more PLOGIs to be sent */
711 			lpfc_more_plogi(vport);
712 			if (vport->num_disc_nodes == 0) {
713 				spin_lock_irq(shost->host_lock);
714 				vport->fc_flag &= ~FC_NDISC_ACTIVE;
715 				spin_unlock_irq(shost->host_lock);
716 				lpfc_can_disctmo(vport);
717 				lpfc_end_rscn(vport);
718 			}
719 		}
720 	} /* If our portname was less */
721 
722 	return ndlp->nlp_state;
723 }
724 
725 static uint32_t
726 lpfc_rcv_prli_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
727 			  void *arg, uint32_t evt)
728 {
729 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
730 	struct ls_rjt     stat;
731 
732 	memset(&stat, 0, sizeof (struct ls_rjt));
733 	stat.un.b.lsRjtRsnCode = LSRJT_LOGICAL_BSY;
734 	stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
735 	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
736 	return ndlp->nlp_state;
737 }
738 
739 static uint32_t
740 lpfc_rcv_logo_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
741 			  void *arg, uint32_t evt)
742 {
743 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
744 
745 				/* software abort outstanding PLOGI */
746 	lpfc_els_abort(vport->phba, ndlp);
747 
748 	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
749 	return ndlp->nlp_state;
750 }
751 
752 static uint32_t
753 lpfc_rcv_els_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
754 			 void *arg, uint32_t evt)
755 {
756 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
757 	struct lpfc_hba   *phba = vport->phba;
758 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
759 
760 	/* software abort outstanding PLOGI */
761 	lpfc_els_abort(phba, ndlp);
762 
763 	if (evt == NLP_EVT_RCV_LOGO) {
764 		lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
765 	} else {
766 		lpfc_issue_els_logo(vport, ndlp, 0);
767 	}
768 
769 	/* Put ndlp in npr state set plogi timer for 1 sec */
770 	mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
771 	spin_lock_irq(shost->host_lock);
772 	ndlp->nlp_flag |= NLP_DELAY_TMO;
773 	spin_unlock_irq(shost->host_lock);
774 	ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
775 	ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE;
776 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
777 
778 	return ndlp->nlp_state;
779 }
780 
781 static uint32_t
782 lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
783 			    struct lpfc_nodelist *ndlp,
784 			    void *arg,
785 			    uint32_t evt)
786 {
787 	struct lpfc_hba    *phba = vport->phba;
788 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
789 	struct lpfc_iocbq  *cmdiocb, *rspiocb;
790 	struct lpfc_dmabuf *pcmd, *prsp, *mp;
791 	uint32_t *lp;
792 	IOCB_t *irsp;
793 	struct serv_parm *sp;
794 	LPFC_MBOXQ_t *mbox;
795 
796 	cmdiocb = (struct lpfc_iocbq *) arg;
797 	rspiocb = cmdiocb->context_un.rsp_iocb;
798 
799 	if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
800 		/* Recovery from PLOGI collision logic */
801 		return ndlp->nlp_state;
802 	}
803 
804 	irsp = &rspiocb->iocb;
805 
806 	if (irsp->ulpStatus)
807 		goto out;
808 
809 	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
810 
811 	prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
812 
813 	lp = (uint32_t *) prsp->virt;
814 	sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
815 
816 	/* Some switches have FDMI servers returning 0 for WWN */
817 	if ((ndlp->nlp_DID != FDMI_DID) &&
818 		(wwn_to_u64(sp->portName.u.wwn) == 0 ||
819 		wwn_to_u64(sp->nodeName.u.wwn) == 0)) {
820 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
821 				 "0142 PLOGI RSP: Invalid WWN.\n");
822 		goto out;
823 	}
824 	if (!lpfc_check_sparm(vport, ndlp, sp, CLASS3))
825 		goto out;
826 	/* PLOGI chkparm OK */
827 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
828 			 "0121 PLOGI chkparm OK Data: x%x x%x x%x x%x\n",
829 			 ndlp->nlp_DID, ndlp->nlp_state,
830 			 ndlp->nlp_flag, ndlp->nlp_rpi);
831 	if (vport->cfg_fcp_class == 2 && (sp->cls2.classValid))
832 		ndlp->nlp_fcp_info |= CLASS2;
833 	else
834 		ndlp->nlp_fcp_info |= CLASS3;
835 
836 	ndlp->nlp_class_sup = 0;
837 	if (sp->cls1.classValid)
838 		ndlp->nlp_class_sup |= FC_COS_CLASS1;
839 	if (sp->cls2.classValid)
840 		ndlp->nlp_class_sup |= FC_COS_CLASS2;
841 	if (sp->cls3.classValid)
842 		ndlp->nlp_class_sup |= FC_COS_CLASS3;
843 	if (sp->cls4.classValid)
844 		ndlp->nlp_class_sup |= FC_COS_CLASS4;
845 	ndlp->nlp_maxframe =
846 		((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
847 
848 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
849 	if (!mbox) {
850 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
851 			"0133 PLOGI: no memory for reg_login "
852 			"Data: x%x x%x x%x x%x\n",
853 			ndlp->nlp_DID, ndlp->nlp_state,
854 			ndlp->nlp_flag, ndlp->nlp_rpi);
855 		goto out;
856 	}
857 
858 	lpfc_unreg_rpi(vport, ndlp);
859 
860 	if (lpfc_reg_login(phba, vport->vpi, irsp->un.elsreq64.remoteID,
861 			   (uint8_t *) sp, mbox, 0) == 0) {
862 		switch (ndlp->nlp_DID) {
863 		case NameServer_DID:
864 			mbox->mbox_cmpl = lpfc_mbx_cmpl_ns_reg_login;
865 			break;
866 		case FDMI_DID:
867 			mbox->mbox_cmpl = lpfc_mbx_cmpl_fdmi_reg_login;
868 			break;
869 		default:
870 			mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
871 		}
872 		mbox->context2 = lpfc_nlp_get(ndlp);
873 		mbox->vport = vport;
874 		if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
875 		    != MBX_NOT_FINISHED) {
876 			lpfc_nlp_set_state(vport, ndlp,
877 					   NLP_STE_REG_LOGIN_ISSUE);
878 			return ndlp->nlp_state;
879 		}
880 		/* decrement node reference count to the failed mbox
881 		 * command
882 		 */
883 		lpfc_nlp_put(ndlp);
884 		mp = (struct lpfc_dmabuf *) mbox->context1;
885 		lpfc_mbuf_free(phba, mp->virt, mp->phys);
886 		kfree(mp);
887 		mempool_free(mbox, phba->mbox_mem_pool);
888 
889 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
890 				 "0134 PLOGI: cannot issue reg_login "
891 				 "Data: x%x x%x x%x x%x\n",
892 				 ndlp->nlp_DID, ndlp->nlp_state,
893 				 ndlp->nlp_flag, ndlp->nlp_rpi);
894 	} else {
895 		mempool_free(mbox, phba->mbox_mem_pool);
896 
897 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
898 				 "0135 PLOGI: cannot format reg_login "
899 				 "Data: x%x x%x x%x x%x\n",
900 				 ndlp->nlp_DID, ndlp->nlp_state,
901 				 ndlp->nlp_flag, ndlp->nlp_rpi);
902 	}
903 
904 
905 out:
906 	if (ndlp->nlp_DID == NameServer_DID) {
907 		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
908 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
909 				 "0261 Cannot Register NameServer login\n");
910 	}
911 
912 	spin_lock_irq(shost->host_lock);
913 	ndlp->nlp_flag |= NLP_DEFER_RM;
914 	spin_unlock_irq(shost->host_lock);
915 	return NLP_STE_FREED_NODE;
916 }
917 
918 static uint32_t
919 lpfc_cmpl_logo_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
920 			   void *arg, uint32_t evt)
921 {
922 	return ndlp->nlp_state;
923 }
924 
925 static uint32_t
926 lpfc_cmpl_reglogin_plogi_issue(struct lpfc_vport *vport,
927 	struct lpfc_nodelist *ndlp, void *arg, uint32_t evt)
928 {
929 	return ndlp->nlp_state;
930 }
931 
932 static uint32_t
933 lpfc_device_rm_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
934 			   void *arg, uint32_t evt)
935 {
936 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
937 
938 	if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
939 		spin_lock_irq(shost->host_lock);
940 		ndlp->nlp_flag |= NLP_NODEV_REMOVE;
941 		spin_unlock_irq(shost->host_lock);
942 		return ndlp->nlp_state;
943 	} else {
944 		/* software abort outstanding PLOGI */
945 		lpfc_els_abort(vport->phba, ndlp);
946 
947 		lpfc_drop_node(vport, ndlp);
948 		return NLP_STE_FREED_NODE;
949 	}
950 }
951 
952 static uint32_t
953 lpfc_device_recov_plogi_issue(struct lpfc_vport *vport,
954 			      struct lpfc_nodelist *ndlp,
955 			      void *arg,
956 			      uint32_t evt)
957 {
958 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
959 	struct lpfc_hba  *phba = vport->phba;
960 
961 	/* Don't do anything that will mess up processing of the
962 	 * previous RSCN.
963 	 */
964 	if (vport->fc_flag & FC_RSCN_DEFERRED)
965 		return ndlp->nlp_state;
966 
967 	/* software abort outstanding PLOGI */
968 	lpfc_els_abort(phba, ndlp);
969 
970 	ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE;
971 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
972 	spin_lock_irq(shost->host_lock);
973 	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
974 	spin_unlock_irq(shost->host_lock);
975 
976 	return ndlp->nlp_state;
977 }
978 
979 static uint32_t
980 lpfc_rcv_plogi_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
981 			   void *arg, uint32_t evt)
982 {
983 	struct Scsi_Host   *shost = lpfc_shost_from_vport(vport);
984 	struct lpfc_hba   *phba = vport->phba;
985 	struct lpfc_iocbq *cmdiocb;
986 
987 	/* software abort outstanding ADISC */
988 	lpfc_els_abort(phba, ndlp);
989 
990 	cmdiocb = (struct lpfc_iocbq *) arg;
991 
992 	if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
993 		if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
994 			spin_lock_irq(shost->host_lock);
995 			ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
996 			spin_unlock_irq(shost->host_lock);
997 			if (vport->num_disc_nodes)
998 				lpfc_more_adisc(vport);
999 		}
1000 		return ndlp->nlp_state;
1001 	}
1002 	ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1003 	lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
1004 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
1005 
1006 	return ndlp->nlp_state;
1007 }
1008 
1009 static uint32_t
1010 lpfc_rcv_prli_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1011 			  void *arg, uint32_t evt)
1012 {
1013 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1014 
1015 	lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
1016 	return ndlp->nlp_state;
1017 }
1018 
1019 static uint32_t
1020 lpfc_rcv_logo_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1021 			  void *arg, uint32_t evt)
1022 {
1023 	struct lpfc_hba *phba = vport->phba;
1024 	struct lpfc_iocbq *cmdiocb;
1025 
1026 	cmdiocb = (struct lpfc_iocbq *) arg;
1027 
1028 	/* software abort outstanding ADISC */
1029 	lpfc_els_abort(phba, ndlp);
1030 
1031 	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
1032 	return ndlp->nlp_state;
1033 }
1034 
1035 static uint32_t
1036 lpfc_rcv_padisc_adisc_issue(struct lpfc_vport *vport,
1037 			    struct lpfc_nodelist *ndlp,
1038 			    void *arg, uint32_t evt)
1039 {
1040 	struct lpfc_iocbq *cmdiocb;
1041 
1042 	cmdiocb = (struct lpfc_iocbq *) arg;
1043 
1044 	lpfc_rcv_padisc(vport, ndlp, cmdiocb);
1045 	return ndlp->nlp_state;
1046 }
1047 
1048 static uint32_t
1049 lpfc_rcv_prlo_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1050 			  void *arg, uint32_t evt)
1051 {
1052 	struct lpfc_iocbq *cmdiocb;
1053 
1054 	cmdiocb = (struct lpfc_iocbq *) arg;
1055 
1056 	/* Treat like rcv logo */
1057 	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO);
1058 	return ndlp->nlp_state;
1059 }
1060 
1061 static uint32_t
1062 lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
1063 			    struct lpfc_nodelist *ndlp,
1064 			    void *arg, uint32_t evt)
1065 {
1066 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
1067 	struct lpfc_hba   *phba = vport->phba;
1068 	struct lpfc_iocbq *cmdiocb, *rspiocb;
1069 	IOCB_t *irsp;
1070 	ADISC *ap;
1071 
1072 	cmdiocb = (struct lpfc_iocbq *) arg;
1073 	rspiocb = cmdiocb->context_un.rsp_iocb;
1074 
1075 	ap = (ADISC *)lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb);
1076 	irsp = &rspiocb->iocb;
1077 
1078 	if ((irsp->ulpStatus) ||
1079 	    (!lpfc_check_adisc(vport, ndlp, &ap->nodeName, &ap->portName))) {
1080 		/* 1 sec timeout */
1081 		mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
1082 		spin_lock_irq(shost->host_lock);
1083 		ndlp->nlp_flag |= NLP_DELAY_TMO;
1084 		spin_unlock_irq(shost->host_lock);
1085 		ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
1086 
1087 		memset(&ndlp->nlp_nodename, 0, sizeof(struct lpfc_name));
1088 		memset(&ndlp->nlp_portname, 0, sizeof(struct lpfc_name));
1089 
1090 		ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1091 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1092 		lpfc_unreg_rpi(vport, ndlp);
1093 		return ndlp->nlp_state;
1094 	}
1095 
1096 	if (ndlp->nlp_type & NLP_FCP_TARGET) {
1097 		ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1098 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
1099 	} else {
1100 		ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1101 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1102 	}
1103 	return ndlp->nlp_state;
1104 }
1105 
1106 static uint32_t
1107 lpfc_device_rm_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1108 			   void *arg, uint32_t evt)
1109 {
1110 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1111 
1112 	if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1113 		spin_lock_irq(shost->host_lock);
1114 		ndlp->nlp_flag |= NLP_NODEV_REMOVE;
1115 		spin_unlock_irq(shost->host_lock);
1116 		return ndlp->nlp_state;
1117 	} else {
1118 		/* software abort outstanding ADISC */
1119 		lpfc_els_abort(vport->phba, ndlp);
1120 
1121 		lpfc_drop_node(vport, ndlp);
1122 		return NLP_STE_FREED_NODE;
1123 	}
1124 }
1125 
1126 static uint32_t
1127 lpfc_device_recov_adisc_issue(struct lpfc_vport *vport,
1128 			      struct lpfc_nodelist *ndlp,
1129 			      void *arg,
1130 			      uint32_t evt)
1131 {
1132 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1133 	struct lpfc_hba  *phba = vport->phba;
1134 
1135 	/* Don't do anything that will mess up processing of the
1136 	 * previous RSCN.
1137 	 */
1138 	if (vport->fc_flag & FC_RSCN_DEFERRED)
1139 		return ndlp->nlp_state;
1140 
1141 	/* software abort outstanding ADISC */
1142 	lpfc_els_abort(phba, ndlp);
1143 
1144 	ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
1145 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1146 	spin_lock_irq(shost->host_lock);
1147 	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1148 	spin_unlock_irq(shost->host_lock);
1149 	lpfc_disc_set_adisc(vport, ndlp);
1150 	return ndlp->nlp_state;
1151 }
1152 
1153 static uint32_t
1154 lpfc_rcv_plogi_reglogin_issue(struct lpfc_vport *vport,
1155 			      struct lpfc_nodelist *ndlp,
1156 			      void *arg,
1157 			      uint32_t evt)
1158 {
1159 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1160 
1161 	lpfc_rcv_plogi(vport, ndlp, cmdiocb);
1162 	return ndlp->nlp_state;
1163 }
1164 
1165 static uint32_t
1166 lpfc_rcv_prli_reglogin_issue(struct lpfc_vport *vport,
1167 			     struct lpfc_nodelist *ndlp,
1168 			     void *arg,
1169 			     uint32_t evt)
1170 {
1171 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1172 
1173 	lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
1174 	return ndlp->nlp_state;
1175 }
1176 
1177 static uint32_t
1178 lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
1179 			     struct lpfc_nodelist *ndlp,
1180 			     void *arg,
1181 			     uint32_t evt)
1182 {
1183 	struct lpfc_hba   *phba = vport->phba;
1184 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1185 	LPFC_MBOXQ_t	  *mb;
1186 	LPFC_MBOXQ_t	  *nextmb;
1187 	struct lpfc_dmabuf *mp;
1188 
1189 	cmdiocb = (struct lpfc_iocbq *) arg;
1190 
1191 	/* cleanup any ndlp on mbox q waiting for reglogin cmpl */
1192 	if ((mb = phba->sli.mbox_active)) {
1193 		if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
1194 		   (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1195 			lpfc_nlp_put(ndlp);
1196 			mb->context2 = NULL;
1197 			mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1198 		}
1199 	}
1200 
1201 	spin_lock_irq(&phba->hbalock);
1202 	list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
1203 		if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
1204 		   (ndlp == (struct lpfc_nodelist *) mb->context2)) {
1205 			mp = (struct lpfc_dmabuf *) (mb->context1);
1206 			if (mp) {
1207 				__lpfc_mbuf_free(phba, mp->virt, mp->phys);
1208 				kfree(mp);
1209 			}
1210 			lpfc_nlp_put(ndlp);
1211 			list_del(&mb->list);
1212 			mempool_free(mb, phba->mbox_mem_pool);
1213 		}
1214 	}
1215 	spin_unlock_irq(&phba->hbalock);
1216 
1217 	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
1218 	return ndlp->nlp_state;
1219 }
1220 
1221 static uint32_t
1222 lpfc_rcv_padisc_reglogin_issue(struct lpfc_vport *vport,
1223 			       struct lpfc_nodelist *ndlp,
1224 			       void *arg,
1225 			       uint32_t evt)
1226 {
1227 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1228 
1229 	lpfc_rcv_padisc(vport, ndlp, cmdiocb);
1230 	return ndlp->nlp_state;
1231 }
1232 
1233 static uint32_t
1234 lpfc_rcv_prlo_reglogin_issue(struct lpfc_vport *vport,
1235 			     struct lpfc_nodelist *ndlp,
1236 			     void *arg,
1237 			     uint32_t evt)
1238 {
1239 	struct lpfc_iocbq *cmdiocb;
1240 
1241 	cmdiocb = (struct lpfc_iocbq *) arg;
1242 	lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
1243 	return ndlp->nlp_state;
1244 }
1245 
1246 static uint32_t
1247 lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
1248 				  struct lpfc_nodelist *ndlp,
1249 				  void *arg,
1250 				  uint32_t evt)
1251 {
1252 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1253 	LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
1254 	MAILBOX_t *mb = &pmb->mb;
1255 	uint32_t did  = mb->un.varWords[1];
1256 
1257 	if (mb->mbxStatus) {
1258 		/* RegLogin failed */
1259 		lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
1260 				"0246 RegLogin failed Data: x%x x%x x%x\n",
1261 				did, mb->mbxStatus, vport->port_state);
1262 		/*
1263 		 * If RegLogin failed due to lack of HBA resources do not
1264 		 * retry discovery.
1265 		 */
1266 		if (mb->mbxStatus == MBXERR_RPI_FULL) {
1267 			ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1268 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1269 			return ndlp->nlp_state;
1270 		}
1271 
1272 		/* Put ndlp in npr state set plogi timer for 1 sec */
1273 		mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
1274 		spin_lock_irq(shost->host_lock);
1275 		ndlp->nlp_flag |= NLP_DELAY_TMO;
1276 		spin_unlock_irq(shost->host_lock);
1277 		ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
1278 
1279 		lpfc_issue_els_logo(vport, ndlp, 0);
1280 		ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1281 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1282 		return ndlp->nlp_state;
1283 	}
1284 
1285 	ndlp->nlp_rpi = mb->un.varWords[0];
1286 
1287 	/* Only if we are not a fabric nport do we issue PRLI */
1288 	if (!(ndlp->nlp_type & NLP_FABRIC)) {
1289 		ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1290 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
1291 		lpfc_issue_els_prli(vport, ndlp, 0);
1292 	} else {
1293 		ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1294 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1295 	}
1296 	return ndlp->nlp_state;
1297 }
1298 
1299 static uint32_t
1300 lpfc_device_rm_reglogin_issue(struct lpfc_vport *vport,
1301 			      struct lpfc_nodelist *ndlp,
1302 			      void *arg,
1303 			      uint32_t evt)
1304 {
1305 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1306 
1307 	if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1308 		spin_lock_irq(shost->host_lock);
1309 		ndlp->nlp_flag |= NLP_NODEV_REMOVE;
1310 		spin_unlock_irq(shost->host_lock);
1311 		return ndlp->nlp_state;
1312 	} else {
1313 		lpfc_drop_node(vport, ndlp);
1314 		return NLP_STE_FREED_NODE;
1315 	}
1316 }
1317 
1318 static uint32_t
1319 lpfc_device_recov_reglogin_issue(struct lpfc_vport *vport,
1320 				 struct lpfc_nodelist *ndlp,
1321 				 void *arg,
1322 				 uint32_t evt)
1323 {
1324 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1325 
1326 	/* Don't do anything that will mess up processing of the
1327 	 * previous RSCN.
1328 	 */
1329 	if (vport->fc_flag & FC_RSCN_DEFERRED)
1330 		return ndlp->nlp_state;
1331 
1332 	ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1333 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1334 	spin_lock_irq(shost->host_lock);
1335 	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1336 	spin_unlock_irq(shost->host_lock);
1337 	lpfc_disc_set_adisc(vport, ndlp);
1338 	return ndlp->nlp_state;
1339 }
1340 
1341 static uint32_t
1342 lpfc_rcv_plogi_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1343 			  void *arg, uint32_t evt)
1344 {
1345 	struct lpfc_iocbq *cmdiocb;
1346 
1347 	cmdiocb = (struct lpfc_iocbq *) arg;
1348 
1349 	lpfc_rcv_plogi(vport, ndlp, cmdiocb);
1350 	return ndlp->nlp_state;
1351 }
1352 
1353 static uint32_t
1354 lpfc_rcv_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1355 			 void *arg, uint32_t evt)
1356 {
1357 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1358 
1359 	lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
1360 	return ndlp->nlp_state;
1361 }
1362 
1363 static uint32_t
1364 lpfc_rcv_logo_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1365 			 void *arg, uint32_t evt)
1366 {
1367 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1368 
1369 	/* Software abort outstanding PRLI before sending acc */
1370 	lpfc_els_abort(vport->phba, ndlp);
1371 
1372 	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
1373 	return ndlp->nlp_state;
1374 }
1375 
1376 static uint32_t
1377 lpfc_rcv_padisc_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1378 			   void *arg, uint32_t evt)
1379 {
1380 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1381 
1382 	lpfc_rcv_padisc(vport, ndlp, cmdiocb);
1383 	return ndlp->nlp_state;
1384 }
1385 
1386 /* This routine is envoked when we rcv a PRLO request from a nport
1387  * we are logged into.  We should send back a PRLO rsp setting the
1388  * appropriate bits.
1389  * NEXT STATE = PRLI_ISSUE
1390  */
1391 static uint32_t
1392 lpfc_rcv_prlo_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1393 			 void *arg, uint32_t evt)
1394 {
1395 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1396 
1397 	lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
1398 	return ndlp->nlp_state;
1399 }
1400 
1401 static uint32_t
1402 lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1403 			  void *arg, uint32_t evt)
1404 {
1405 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1406 	struct lpfc_iocbq *cmdiocb, *rspiocb;
1407 	struct lpfc_hba   *phba = vport->phba;
1408 	IOCB_t *irsp;
1409 	PRLI *npr;
1410 
1411 	cmdiocb = (struct lpfc_iocbq *) arg;
1412 	rspiocb = cmdiocb->context_un.rsp_iocb;
1413 	npr = (PRLI *)lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb);
1414 
1415 	irsp = &rspiocb->iocb;
1416 	if (irsp->ulpStatus) {
1417 		if ((vport->port_type == LPFC_NPIV_PORT) &&
1418 		    vport->cfg_restrict_login) {
1419 			goto out;
1420 		}
1421 		ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
1422 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1423 		return ndlp->nlp_state;
1424 	}
1425 
1426 	/* Check out PRLI rsp */
1427 	ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
1428 	ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
1429 	if ((npr->acceptRspCode == PRLI_REQ_EXECUTED) &&
1430 	    (npr->prliType == PRLI_FCP_TYPE)) {
1431 		if (npr->initiatorFunc)
1432 			ndlp->nlp_type |= NLP_FCP_INITIATOR;
1433 		if (npr->targetFunc)
1434 			ndlp->nlp_type |= NLP_FCP_TARGET;
1435 		if (npr->Retry)
1436 			ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
1437 	}
1438 	if (!(ndlp->nlp_type & NLP_FCP_TARGET) &&
1439 	    (vport->port_type == LPFC_NPIV_PORT) &&
1440 	     vport->cfg_restrict_login) {
1441 out:
1442 		spin_lock_irq(shost->host_lock);
1443 		ndlp->nlp_flag |= NLP_TARGET_REMOVE;
1444 		spin_unlock_irq(shost->host_lock);
1445 		lpfc_issue_els_logo(vport, ndlp, 0);
1446 
1447 		ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
1448 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1449 		return ndlp->nlp_state;
1450 	}
1451 
1452 	ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
1453 	if (ndlp->nlp_type & NLP_FCP_TARGET)
1454 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
1455 	else
1456 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
1457 	return ndlp->nlp_state;
1458 }
1459 
1460 /*! lpfc_device_rm_prli_issue
1461  *
1462  * \pre
1463  * \post
1464  * \param   phba
1465  * \param   ndlp
1466  * \param   arg
1467  * \param   evt
1468  * \return  uint32_t
1469  *
1470  * \b Description:
1471  *    This routine is envoked when we a request to remove a nport we are in the
1472  *    process of PRLIing. We should software abort outstanding prli, unreg
1473  *    login, send a logout. We will change node state to UNUSED_NODE, put it
1474  *    on plogi list so it can be freed when LOGO completes.
1475  *
1476  */
1477 
1478 static uint32_t
1479 lpfc_device_rm_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1480 			  void *arg, uint32_t evt)
1481 {
1482 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1483 
1484 	if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1485 		spin_lock_irq(shost->host_lock);
1486 		ndlp->nlp_flag |= NLP_NODEV_REMOVE;
1487 		spin_unlock_irq(shost->host_lock);
1488 		return ndlp->nlp_state;
1489 	} else {
1490 		/* software abort outstanding PLOGI */
1491 		lpfc_els_abort(vport->phba, ndlp);
1492 
1493 		lpfc_drop_node(vport, ndlp);
1494 		return NLP_STE_FREED_NODE;
1495 	}
1496 }
1497 
1498 
1499 /*! lpfc_device_recov_prli_issue
1500  *
1501  * \pre
1502  * \post
1503  * \param   phba
1504  * \param   ndlp
1505  * \param   arg
1506  * \param   evt
1507  * \return  uint32_t
1508  *
1509  * \b Description:
1510  *    The routine is envoked when the state of a device is unknown, like
1511  *    during a link down. We should remove the nodelist entry from the
1512  *    unmapped list, issue a UNREG_LOGIN, do a software abort of the
1513  *    outstanding PRLI command, then free the node entry.
1514  */
1515 static uint32_t
1516 lpfc_device_recov_prli_issue(struct lpfc_vport *vport,
1517 			     struct lpfc_nodelist *ndlp,
1518 			     void *arg,
1519 			     uint32_t evt)
1520 {
1521 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1522 	struct lpfc_hba  *phba = vport->phba;
1523 
1524 	/* Don't do anything that will mess up processing of the
1525 	 * previous RSCN.
1526 	 */
1527 	if (vport->fc_flag & FC_RSCN_DEFERRED)
1528 		return ndlp->nlp_state;
1529 
1530 	/* software abort outstanding PRLI */
1531 	lpfc_els_abort(phba, ndlp);
1532 
1533 	ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
1534 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1535 	spin_lock_irq(shost->host_lock);
1536 	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1537 	spin_unlock_irq(shost->host_lock);
1538 	lpfc_disc_set_adisc(vport, ndlp);
1539 	return ndlp->nlp_state;
1540 }
1541 
1542 static uint32_t
1543 lpfc_rcv_plogi_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1544 			  void *arg, uint32_t evt)
1545 {
1546 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1547 
1548 	lpfc_rcv_plogi(vport, ndlp, cmdiocb);
1549 	return ndlp->nlp_state;
1550 }
1551 
1552 static uint32_t
1553 lpfc_rcv_prli_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1554 			 void *arg, uint32_t evt)
1555 {
1556 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1557 
1558 	lpfc_rcv_prli(vport, ndlp, cmdiocb);
1559 	lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
1560 	return ndlp->nlp_state;
1561 }
1562 
1563 static uint32_t
1564 lpfc_rcv_logo_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1565 			 void *arg, uint32_t evt)
1566 {
1567 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1568 
1569 	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
1570 	return ndlp->nlp_state;
1571 }
1572 
1573 static uint32_t
1574 lpfc_rcv_padisc_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1575 			   void *arg, uint32_t evt)
1576 {
1577 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1578 
1579 	lpfc_rcv_padisc(vport, ndlp, cmdiocb);
1580 	return ndlp->nlp_state;
1581 }
1582 
1583 static uint32_t
1584 lpfc_rcv_prlo_unmap_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1585 			 void *arg, uint32_t evt)
1586 {
1587 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1588 
1589 	lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
1590 	return ndlp->nlp_state;
1591 }
1592 
1593 static uint32_t
1594 lpfc_device_recov_unmap_node(struct lpfc_vport *vport,
1595 			     struct lpfc_nodelist *ndlp,
1596 			     void *arg,
1597 			     uint32_t evt)
1598 {
1599 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1600 
1601 	ndlp->nlp_prev_state = NLP_STE_UNMAPPED_NODE;
1602 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1603 	spin_lock_irq(shost->host_lock);
1604 	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1605 	spin_unlock_irq(shost->host_lock);
1606 	lpfc_disc_set_adisc(vport, ndlp);
1607 
1608 	return ndlp->nlp_state;
1609 }
1610 
1611 static uint32_t
1612 lpfc_rcv_plogi_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1613 			   void *arg, uint32_t evt)
1614 {
1615 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1616 
1617 	lpfc_rcv_plogi(vport, ndlp, cmdiocb);
1618 	return ndlp->nlp_state;
1619 }
1620 
1621 static uint32_t
1622 lpfc_rcv_prli_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1623 			  void *arg, uint32_t evt)
1624 {
1625 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1626 
1627 	lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
1628 	return ndlp->nlp_state;
1629 }
1630 
1631 static uint32_t
1632 lpfc_rcv_logo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1633 			  void *arg, uint32_t evt)
1634 {
1635 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1636 
1637 	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
1638 	return ndlp->nlp_state;
1639 }
1640 
1641 static uint32_t
1642 lpfc_rcv_padisc_mapped_node(struct lpfc_vport *vport,
1643 			    struct lpfc_nodelist *ndlp,
1644 			    void *arg, uint32_t evt)
1645 {
1646 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1647 
1648 	lpfc_rcv_padisc(vport, ndlp, cmdiocb);
1649 	return ndlp->nlp_state;
1650 }
1651 
1652 static uint32_t
1653 lpfc_rcv_prlo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1654 			  void *arg, uint32_t evt)
1655 {
1656 	struct lpfc_hba  *phba = vport->phba;
1657 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1658 
1659 	/* flush the target */
1660 	lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
1661 			    ndlp->nlp_sid, 0, LPFC_CTX_TGT);
1662 
1663 	/* Treat like rcv logo */
1664 	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_PRLO);
1665 	return ndlp->nlp_state;
1666 }
1667 
1668 static uint32_t
1669 lpfc_device_recov_mapped_node(struct lpfc_vport *vport,
1670 			      struct lpfc_nodelist *ndlp,
1671 			      void *arg,
1672 			      uint32_t evt)
1673 {
1674 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1675 
1676 	ndlp->nlp_prev_state = NLP_STE_MAPPED_NODE;
1677 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1678 	spin_lock_irq(shost->host_lock);
1679 	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1680 	spin_unlock_irq(shost->host_lock);
1681 	lpfc_disc_set_adisc(vport, ndlp);
1682 	return ndlp->nlp_state;
1683 }
1684 
1685 static uint32_t
1686 lpfc_rcv_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1687 			void *arg, uint32_t evt)
1688 {
1689 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1690 	struct lpfc_iocbq *cmdiocb  = (struct lpfc_iocbq *) arg;
1691 
1692 	/* Ignore PLOGI if we have an outstanding LOGO */
1693 	if (ndlp->nlp_flag & (NLP_LOGO_SND | NLP_LOGO_ACC))
1694 		return ndlp->nlp_state;
1695 	if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) {
1696 		lpfc_cancel_retry_delay_tmo(vport, ndlp);
1697 		spin_lock_irq(shost->host_lock);
1698 		ndlp->nlp_flag &= ~(NLP_NPR_ADISC | NLP_NPR_2B_DISC);
1699 		spin_unlock_irq(shost->host_lock);
1700 	} else if (!(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
1701 		/* send PLOGI immediately, move to PLOGI issue state */
1702 		if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
1703 			ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
1704 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
1705 			lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
1706 		}
1707 	}
1708 	return ndlp->nlp_state;
1709 }
1710 
1711 static uint32_t
1712 lpfc_rcv_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1713 		       void *arg, uint32_t evt)
1714 {
1715 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
1716 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1717 	struct ls_rjt     stat;
1718 
1719 	memset(&stat, 0, sizeof (struct ls_rjt));
1720 	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
1721 	stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
1722 	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
1723 
1724 	if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
1725 		if (ndlp->nlp_flag & NLP_NPR_ADISC) {
1726 			spin_lock_irq(shost->host_lock);
1727 			ndlp->nlp_flag &= ~NLP_NPR_ADISC;
1728 			ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
1729 			spin_unlock_irq(shost->host_lock);
1730 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
1731 			lpfc_issue_els_adisc(vport, ndlp, 0);
1732 		} else {
1733 			ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
1734 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
1735 			lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
1736 		}
1737 	}
1738 	return ndlp->nlp_state;
1739 }
1740 
1741 static uint32_t
1742 lpfc_rcv_logo_npr_node(struct lpfc_vport *vport,  struct lpfc_nodelist *ndlp,
1743 		       void *arg, uint32_t evt)
1744 {
1745 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1746 
1747 	lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
1748 	return ndlp->nlp_state;
1749 }
1750 
1751 static uint32_t
1752 lpfc_rcv_padisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1753 			 void *arg, uint32_t evt)
1754 {
1755 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1756 
1757 	lpfc_rcv_padisc(vport, ndlp, cmdiocb);
1758 	/*
1759 	 * Do not start discovery if discovery is about to start
1760 	 * or discovery in progress for this node. Starting discovery
1761 	 * here will affect the counting of discovery threads.
1762 	 */
1763 	if (!(ndlp->nlp_flag & NLP_DELAY_TMO) &&
1764 	    !(ndlp->nlp_flag & NLP_NPR_2B_DISC)) {
1765 		if (ndlp->nlp_flag & NLP_NPR_ADISC) {
1766 			ndlp->nlp_flag &= ~NLP_NPR_ADISC;
1767 			ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
1768 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
1769 			lpfc_issue_els_adisc(vport, ndlp, 0);
1770 		} else {
1771 			ndlp->nlp_prev_state = NLP_STE_NPR_NODE;
1772 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
1773 			lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
1774 		}
1775 	}
1776 	return ndlp->nlp_state;
1777 }
1778 
1779 static uint32_t
1780 lpfc_rcv_prlo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1781 		       void *arg, uint32_t evt)
1782 {
1783 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1784 	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
1785 
1786 	spin_lock_irq(shost->host_lock);
1787 	ndlp->nlp_flag |= NLP_LOGO_ACC;
1788 	spin_unlock_irq(shost->host_lock);
1789 
1790 	lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
1791 
1792 	if ((ndlp->nlp_flag & NLP_DELAY_TMO) == 0) {
1793 		mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
1794 		spin_lock_irq(shost->host_lock);
1795 		ndlp->nlp_flag |= NLP_DELAY_TMO;
1796 		ndlp->nlp_flag &= ~NLP_NPR_ADISC;
1797 		spin_unlock_irq(shost->host_lock);
1798 		ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
1799 	} else {
1800 		spin_lock_irq(shost->host_lock);
1801 		ndlp->nlp_flag &= ~NLP_NPR_ADISC;
1802 		spin_unlock_irq(shost->host_lock);
1803 	}
1804 	return ndlp->nlp_state;
1805 }
1806 
1807 static uint32_t
1808 lpfc_cmpl_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1809 			 void *arg, uint32_t evt)
1810 {
1811 	struct lpfc_iocbq *cmdiocb, *rspiocb;
1812 	IOCB_t *irsp;
1813 
1814 	cmdiocb = (struct lpfc_iocbq *) arg;
1815 	rspiocb = cmdiocb->context_un.rsp_iocb;
1816 
1817 	irsp = &rspiocb->iocb;
1818 	if (irsp->ulpStatus) {
1819 		ndlp->nlp_flag |= NLP_DEFER_RM;
1820 		return NLP_STE_FREED_NODE;
1821 	}
1822 	return ndlp->nlp_state;
1823 }
1824 
1825 static uint32_t
1826 lpfc_cmpl_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1827 			void *arg, uint32_t evt)
1828 {
1829 	struct lpfc_iocbq *cmdiocb, *rspiocb;
1830 	IOCB_t *irsp;
1831 
1832 	cmdiocb = (struct lpfc_iocbq *) arg;
1833 	rspiocb = cmdiocb->context_un.rsp_iocb;
1834 
1835 	irsp = &rspiocb->iocb;
1836 	if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
1837 		lpfc_drop_node(vport, ndlp);
1838 		return NLP_STE_FREED_NODE;
1839 	}
1840 	return ndlp->nlp_state;
1841 }
1842 
1843 static uint32_t
1844 lpfc_cmpl_logo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1845 			void *arg, uint32_t evt)
1846 {
1847 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1848 	if (ndlp->nlp_DID == Fabric_DID) {
1849 		spin_lock_irq(shost->host_lock);
1850 		vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
1851 		spin_unlock_irq(shost->host_lock);
1852 	}
1853 	lpfc_unreg_rpi(vport, ndlp);
1854 	return ndlp->nlp_state;
1855 }
1856 
1857 static uint32_t
1858 lpfc_cmpl_adisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1859 			 void *arg, uint32_t evt)
1860 {
1861 	struct lpfc_iocbq *cmdiocb, *rspiocb;
1862 	IOCB_t *irsp;
1863 
1864 	cmdiocb = (struct lpfc_iocbq *) arg;
1865 	rspiocb = cmdiocb->context_un.rsp_iocb;
1866 
1867 	irsp = &rspiocb->iocb;
1868 	if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) {
1869 		lpfc_drop_node(vport, ndlp);
1870 		return NLP_STE_FREED_NODE;
1871 	}
1872 	return ndlp->nlp_state;
1873 }
1874 
1875 static uint32_t
1876 lpfc_cmpl_reglogin_npr_node(struct lpfc_vport *vport,
1877 			    struct lpfc_nodelist *ndlp,
1878 			    void *arg, uint32_t evt)
1879 {
1880 	LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
1881 	MAILBOX_t    *mb = &pmb->mb;
1882 
1883 	if (!mb->mbxStatus)
1884 		ndlp->nlp_rpi = mb->un.varWords[0];
1885 	else {
1886 		if (ndlp->nlp_flag & NLP_NODEV_REMOVE) {
1887 			lpfc_drop_node(vport, ndlp);
1888 			return NLP_STE_FREED_NODE;
1889 		}
1890 	}
1891 	return ndlp->nlp_state;
1892 }
1893 
1894 static uint32_t
1895 lpfc_device_rm_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1896 			void *arg, uint32_t evt)
1897 {
1898 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1899 
1900 	if (ndlp->nlp_flag & NLP_NPR_2B_DISC) {
1901 		spin_lock_irq(shost->host_lock);
1902 		ndlp->nlp_flag |= NLP_NODEV_REMOVE;
1903 		spin_unlock_irq(shost->host_lock);
1904 		return ndlp->nlp_state;
1905 	}
1906 	lpfc_drop_node(vport, ndlp);
1907 	return NLP_STE_FREED_NODE;
1908 }
1909 
1910 static uint32_t
1911 lpfc_device_recov_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1912 			   void *arg, uint32_t evt)
1913 {
1914 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1915 
1916 	/* Don't do anything that will mess up processing of the
1917 	 * previous RSCN.
1918 	 */
1919 	if (vport->fc_flag & FC_RSCN_DEFERRED)
1920 		return ndlp->nlp_state;
1921 
1922 	lpfc_cancel_retry_delay_tmo(vport, ndlp);
1923 	spin_lock_irq(shost->host_lock);
1924 	ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
1925 	spin_unlock_irq(shost->host_lock);
1926 	return ndlp->nlp_state;
1927 }
1928 
1929 
1930 /* This next section defines the NPort Discovery State Machine */
1931 
1932 /* There are 4 different double linked lists nodelist entries can reside on.
1933  * The plogi list and adisc list are used when Link Up discovery or RSCN
1934  * processing is needed. Each list holds the nodes that we will send PLOGI
1935  * or ADISC on. These lists will keep track of what nodes will be effected
1936  * by an RSCN, or a Link Up (Typically, all nodes are effected on Link Up).
1937  * The unmapped_list will contain all nodes that we have successfully logged
1938  * into at the Fibre Channel level. The mapped_list will contain all nodes
1939  * that are mapped FCP targets.
1940  */
1941 /*
1942  * The bind list is a list of undiscovered (potentially non-existent) nodes
1943  * that we have saved binding information on. This information is used when
1944  * nodes transition from the unmapped to the mapped list.
1945  */
1946 /* For UNUSED_NODE state, the node has just been allocated .
1947  * For PLOGI_ISSUE and REG_LOGIN_ISSUE, the node is on
1948  * the PLOGI list. For REG_LOGIN_COMPL, the node is taken off the PLOGI list
1949  * and put on the unmapped list. For ADISC processing, the node is taken off
1950  * the ADISC list and placed on either the mapped or unmapped list (depending
1951  * on its previous state). Once on the unmapped list, a PRLI is issued and the
1952  * state changed to PRLI_ISSUE. When the PRLI completion occurs, the state is
1953  * changed to UNMAPPED_NODE. If the completion indicates a mapped
1954  * node, the node is taken off the unmapped list. The binding list is checked
1955  * for a valid binding, or a binding is automatically assigned. If binding
1956  * assignment is unsuccessful, the node is left on the unmapped list. If
1957  * binding assignment is successful, the associated binding list entry (if
1958  * any) is removed, and the node is placed on the mapped list.
1959  */
1960 /*
1961  * For a Link Down, all nodes on the ADISC, PLOGI, unmapped or mapped
1962  * lists will receive a DEVICE_RECOVERY event. If the linkdown or devloss timers
1963  * expire, all effected nodes will receive a DEVICE_RM event.
1964  */
1965 /*
1966  * For a Link Up or RSCN, all nodes will move from the mapped / unmapped lists
1967  * to either the ADISC or PLOGI list.  After a Nameserver query or ALPA loopmap
1968  * check, additional nodes may be added or removed (via DEVICE_RM) to / from
1969  * the PLOGI or ADISC lists. Once the PLOGI and ADISC lists are populated,
1970  * we will first process the ADISC list.  32 entries are processed initially and
1971  * ADISC is initited for each one.  Completions / Events for each node are
1972  * funnelled thru the state machine.  As each node finishes ADISC processing, it
1973  * starts ADISC for any nodes waiting for ADISC processing. If no nodes are
1974  * waiting, and the ADISC list count is identically 0, then we are done. For
1975  * Link Up discovery, since all nodes on the PLOGI list are UNREG_LOGIN'ed, we
1976  * can issue a CLEAR_LA and reenable Link Events. Next we will process the PLOGI
1977  * list.  32 entries are processed initially and PLOGI is initited for each one.
1978  * Completions / Events for each node are funnelled thru the state machine.  As
1979  * each node finishes PLOGI processing, it starts PLOGI for any nodes waiting
1980  * for PLOGI processing. If no nodes are waiting, and the PLOGI list count is
1981  * indentically 0, then we are done. We have now completed discovery / RSCN
1982  * handling. Upon completion, ALL nodes should be on either the mapped or
1983  * unmapped lists.
1984  */
1985 
1986 static uint32_t (*lpfc_disc_action[NLP_STE_MAX_STATE * NLP_EVT_MAX_EVENT])
1987      (struct lpfc_vport *, struct lpfc_nodelist *, void *, uint32_t) = {
1988 	/* Action routine                  Event       Current State  */
1989 	lpfc_rcv_plogi_unused_node,	/* RCV_PLOGI   UNUSED_NODE    */
1990 	lpfc_rcv_els_unused_node,	/* RCV_PRLI        */
1991 	lpfc_rcv_logo_unused_node,	/* RCV_LOGO        */
1992 	lpfc_rcv_els_unused_node,	/* RCV_ADISC       */
1993 	lpfc_rcv_els_unused_node,	/* RCV_PDISC       */
1994 	lpfc_rcv_els_unused_node,	/* RCV_PRLO        */
1995 	lpfc_disc_illegal,		/* CMPL_PLOGI      */
1996 	lpfc_disc_illegal,		/* CMPL_PRLI       */
1997 	lpfc_cmpl_logo_unused_node,	/* CMPL_LOGO       */
1998 	lpfc_disc_illegal,		/* CMPL_ADISC      */
1999 	lpfc_disc_illegal,		/* CMPL_REG_LOGIN  */
2000 	lpfc_device_rm_unused_node,	/* DEVICE_RM       */
2001 	lpfc_disc_illegal,		/* DEVICE_RECOVERY */
2002 
2003 	lpfc_rcv_plogi_plogi_issue,	/* RCV_PLOGI   PLOGI_ISSUE    */
2004 	lpfc_rcv_prli_plogi_issue,	/* RCV_PRLI        */
2005 	lpfc_rcv_logo_plogi_issue,	/* RCV_LOGO        */
2006 	lpfc_rcv_els_plogi_issue,	/* RCV_ADISC       */
2007 	lpfc_rcv_els_plogi_issue,	/* RCV_PDISC       */
2008 	lpfc_rcv_els_plogi_issue,	/* RCV_PRLO        */
2009 	lpfc_cmpl_plogi_plogi_issue,	/* CMPL_PLOGI      */
2010 	lpfc_disc_illegal,		/* CMPL_PRLI       */
2011 	lpfc_cmpl_logo_plogi_issue,	/* CMPL_LOGO       */
2012 	lpfc_disc_illegal,		/* CMPL_ADISC      */
2013 	lpfc_cmpl_reglogin_plogi_issue,/* CMPL_REG_LOGIN  */
2014 	lpfc_device_rm_plogi_issue,	/* DEVICE_RM       */
2015 	lpfc_device_recov_plogi_issue,	/* DEVICE_RECOVERY */
2016 
2017 	lpfc_rcv_plogi_adisc_issue,	/* RCV_PLOGI   ADISC_ISSUE    */
2018 	lpfc_rcv_prli_adisc_issue,	/* RCV_PRLI        */
2019 	lpfc_rcv_logo_adisc_issue,	/* RCV_LOGO        */
2020 	lpfc_rcv_padisc_adisc_issue,	/* RCV_ADISC       */
2021 	lpfc_rcv_padisc_adisc_issue,	/* RCV_PDISC       */
2022 	lpfc_rcv_prlo_adisc_issue,	/* RCV_PRLO        */
2023 	lpfc_disc_illegal,		/* CMPL_PLOGI      */
2024 	lpfc_disc_illegal,		/* CMPL_PRLI       */
2025 	lpfc_disc_illegal,		/* CMPL_LOGO       */
2026 	lpfc_cmpl_adisc_adisc_issue,	/* CMPL_ADISC      */
2027 	lpfc_disc_illegal,		/* CMPL_REG_LOGIN  */
2028 	lpfc_device_rm_adisc_issue,	/* DEVICE_RM       */
2029 	lpfc_device_recov_adisc_issue,	/* DEVICE_RECOVERY */
2030 
2031 	lpfc_rcv_plogi_reglogin_issue,	/* RCV_PLOGI  REG_LOGIN_ISSUE */
2032 	lpfc_rcv_prli_reglogin_issue,	/* RCV_PLOGI       */
2033 	lpfc_rcv_logo_reglogin_issue,	/* RCV_LOGO        */
2034 	lpfc_rcv_padisc_reglogin_issue,	/* RCV_ADISC       */
2035 	lpfc_rcv_padisc_reglogin_issue,	/* RCV_PDISC       */
2036 	lpfc_rcv_prlo_reglogin_issue,	/* RCV_PRLO        */
2037 	lpfc_cmpl_plogi_illegal,	/* CMPL_PLOGI      */
2038 	lpfc_disc_illegal,		/* CMPL_PRLI       */
2039 	lpfc_disc_illegal,		/* CMPL_LOGO       */
2040 	lpfc_disc_illegal,		/* CMPL_ADISC      */
2041 	lpfc_cmpl_reglogin_reglogin_issue,/* CMPL_REG_LOGIN  */
2042 	lpfc_device_rm_reglogin_issue,	/* DEVICE_RM       */
2043 	lpfc_device_recov_reglogin_issue,/* DEVICE_RECOVERY */
2044 
2045 	lpfc_rcv_plogi_prli_issue,	/* RCV_PLOGI   PRLI_ISSUE     */
2046 	lpfc_rcv_prli_prli_issue,	/* RCV_PRLI        */
2047 	lpfc_rcv_logo_prli_issue,	/* RCV_LOGO        */
2048 	lpfc_rcv_padisc_prli_issue,	/* RCV_ADISC       */
2049 	lpfc_rcv_padisc_prli_issue,	/* RCV_PDISC       */
2050 	lpfc_rcv_prlo_prli_issue,	/* RCV_PRLO        */
2051 	lpfc_cmpl_plogi_illegal,	/* CMPL_PLOGI      */
2052 	lpfc_cmpl_prli_prli_issue,	/* CMPL_PRLI       */
2053 	lpfc_disc_illegal,		/* CMPL_LOGO       */
2054 	lpfc_disc_illegal,		/* CMPL_ADISC      */
2055 	lpfc_disc_illegal,		/* CMPL_REG_LOGIN  */
2056 	lpfc_device_rm_prli_issue,	/* DEVICE_RM       */
2057 	lpfc_device_recov_prli_issue,	/* DEVICE_RECOVERY */
2058 
2059 	lpfc_rcv_plogi_unmap_node,	/* RCV_PLOGI   UNMAPPED_NODE  */
2060 	lpfc_rcv_prli_unmap_node,	/* RCV_PRLI        */
2061 	lpfc_rcv_logo_unmap_node,	/* RCV_LOGO        */
2062 	lpfc_rcv_padisc_unmap_node,	/* RCV_ADISC       */
2063 	lpfc_rcv_padisc_unmap_node,	/* RCV_PDISC       */
2064 	lpfc_rcv_prlo_unmap_node,	/* RCV_PRLO        */
2065 	lpfc_disc_illegal,		/* CMPL_PLOGI      */
2066 	lpfc_disc_illegal,		/* CMPL_PRLI       */
2067 	lpfc_disc_illegal,		/* CMPL_LOGO       */
2068 	lpfc_disc_illegal,		/* CMPL_ADISC      */
2069 	lpfc_disc_illegal,		/* CMPL_REG_LOGIN  */
2070 	lpfc_disc_illegal,		/* DEVICE_RM       */
2071 	lpfc_device_recov_unmap_node,	/* DEVICE_RECOVERY */
2072 
2073 	lpfc_rcv_plogi_mapped_node,	/* RCV_PLOGI   MAPPED_NODE    */
2074 	lpfc_rcv_prli_mapped_node,	/* RCV_PRLI        */
2075 	lpfc_rcv_logo_mapped_node,	/* RCV_LOGO        */
2076 	lpfc_rcv_padisc_mapped_node,	/* RCV_ADISC       */
2077 	lpfc_rcv_padisc_mapped_node,	/* RCV_PDISC       */
2078 	lpfc_rcv_prlo_mapped_node,	/* RCV_PRLO        */
2079 	lpfc_disc_illegal,		/* CMPL_PLOGI      */
2080 	lpfc_disc_illegal,		/* CMPL_PRLI       */
2081 	lpfc_disc_illegal,		/* CMPL_LOGO       */
2082 	lpfc_disc_illegal,		/* CMPL_ADISC      */
2083 	lpfc_disc_illegal,		/* CMPL_REG_LOGIN  */
2084 	lpfc_disc_illegal,		/* DEVICE_RM       */
2085 	lpfc_device_recov_mapped_node,	/* DEVICE_RECOVERY */
2086 
2087 	lpfc_rcv_plogi_npr_node,        /* RCV_PLOGI   NPR_NODE    */
2088 	lpfc_rcv_prli_npr_node,         /* RCV_PRLI        */
2089 	lpfc_rcv_logo_npr_node,         /* RCV_LOGO        */
2090 	lpfc_rcv_padisc_npr_node,       /* RCV_ADISC       */
2091 	lpfc_rcv_padisc_npr_node,       /* RCV_PDISC       */
2092 	lpfc_rcv_prlo_npr_node,         /* RCV_PRLO        */
2093 	lpfc_cmpl_plogi_npr_node,	/* CMPL_PLOGI      */
2094 	lpfc_cmpl_prli_npr_node,	/* CMPL_PRLI       */
2095 	lpfc_cmpl_logo_npr_node,        /* CMPL_LOGO       */
2096 	lpfc_cmpl_adisc_npr_node,       /* CMPL_ADISC      */
2097 	lpfc_cmpl_reglogin_npr_node,    /* CMPL_REG_LOGIN  */
2098 	lpfc_device_rm_npr_node,        /* DEVICE_RM       */
2099 	lpfc_device_recov_npr_node,     /* DEVICE_RECOVERY */
2100 };
2101 
2102 int
2103 lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2104 			void *arg, uint32_t evt)
2105 {
2106 	uint32_t cur_state, rc;
2107 	uint32_t(*func) (struct lpfc_vport *, struct lpfc_nodelist *, void *,
2108 			 uint32_t);
2109 	uint32_t got_ndlp = 0;
2110 
2111 	if (lpfc_nlp_get(ndlp))
2112 		got_ndlp = 1;
2113 
2114 	cur_state = ndlp->nlp_state;
2115 
2116 	/* DSM in event <evt> on NPort <nlp_DID> in state <cur_state> */
2117 	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2118 			 "0211 DSM in event x%x on NPort x%x in "
2119 			 "state %d Data: x%x\n",
2120 			 evt, ndlp->nlp_DID, cur_state, ndlp->nlp_flag);
2121 
2122 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
2123 		 "DSM in:          evt:%d ste:%d did:x%x",
2124 		evt, cur_state, ndlp->nlp_DID);
2125 
2126 	func = lpfc_disc_action[(cur_state * NLP_EVT_MAX_EVENT) + evt];
2127 	rc = (func) (vport, ndlp, arg, evt);
2128 
2129 	/* DSM out state <rc> on NPort <nlp_DID> */
2130 	if (got_ndlp) {
2131 		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2132 			 "0212 DSM out state %d on NPort x%x Data: x%x\n",
2133 			 rc, ndlp->nlp_DID, ndlp->nlp_flag);
2134 
2135 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
2136 			"DSM out:         ste:%d did:x%x flg:x%x",
2137 			rc, ndlp->nlp_DID, ndlp->nlp_flag);
2138 		/* Decrement the ndlp reference count held for this function */
2139 		lpfc_nlp_put(ndlp);
2140 	} else {
2141 		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2142 			"0213 DSM out state %d on NPort free\n", rc);
2143 
2144 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM,
2145 			"DSM out:         ste:%d did:x%x flg:x%x",
2146 			rc, 0, 0);
2147 	}
2148 
2149 	return rc;
2150 }
2151