xref: /openbmc/linux/drivers/scsi/lpfc/lpfc_els.c (revision c0891ac1)
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
5  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
6  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
7  * EMULEX and SLI are trademarks of Emulex.                        *
8  * www.broadcom.com                                                *
9  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
10  *                                                                 *
11  * This program is free software; you can redistribute it and/or   *
12  * modify it under the terms of version 2 of the GNU General       *
13  * Public License as published by the Free Software Foundation.    *
14  * This program is distributed in the hope that it will be useful. *
15  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
16  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
17  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
18  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
20  * more details, a copy of which can be found in the file COPYING  *
21  * included with this package.                                     *
22  *******************************************************************/
23 /* See Fibre Channel protocol T11 FC-LS for details */
24 #include <linux/blkdev.h>
25 #include <linux/pci.h>
26 #include <linux/slab.h>
27 #include <linux/interrupt.h>
28 #include <linux/delay.h>
29 
30 #include <scsi/scsi.h>
31 #include <scsi/scsi_device.h>
32 #include <scsi/scsi_host.h>
33 #include <scsi/scsi_transport_fc.h>
34 #include <uapi/scsi/fc/fc_fs.h>
35 #include <uapi/scsi/fc/fc_els.h>
36 
37 #include "lpfc_hw4.h"
38 #include "lpfc_hw.h"
39 #include "lpfc_sli.h"
40 #include "lpfc_sli4.h"
41 #include "lpfc_nl.h"
42 #include "lpfc_disc.h"
43 #include "lpfc_scsi.h"
44 #include "lpfc.h"
45 #include "lpfc_logmsg.h"
46 #include "lpfc_crtn.h"
47 #include "lpfc_vport.h"
48 #include "lpfc_debugfs.h"
49 
50 static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *,
51 			  struct lpfc_iocbq *);
52 static void lpfc_cmpl_fabric_iocb(struct lpfc_hba *, struct lpfc_iocbq *,
53 			struct lpfc_iocbq *);
54 static void lpfc_fabric_abort_vport(struct lpfc_vport *vport);
55 static int lpfc_issue_els_fdisc(struct lpfc_vport *vport,
56 				struct lpfc_nodelist *ndlp, uint8_t retry);
57 static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba,
58 				  struct lpfc_iocbq *iocb);
59 static void lpfc_cmpl_els_uvem(struct lpfc_hba *, struct lpfc_iocbq *,
60 			       struct lpfc_iocbq *);
61 
62 static int lpfc_max_els_tries = 3;
63 
64 static void lpfc_init_cs_ctl_bitmap(struct lpfc_vport *vport);
65 static void lpfc_vmid_set_cs_ctl_range(struct lpfc_vport *vport, u32 min, u32 max);
66 static void lpfc_vmid_put_cs_ctl(struct lpfc_vport *vport, u32 ctcl_vmid);
67 
68 /**
69  * lpfc_els_chk_latt - Check host link attention event for a vport
70  * @vport: pointer to a host virtual N_Port data structure.
71  *
72  * This routine checks whether there is an outstanding host link
73  * attention event during the discovery process with the @vport. It is done
74  * by reading the HBA's Host Attention (HA) register. If there is any host
75  * link attention events during this @vport's discovery process, the @vport
76  * shall be marked as FC_ABORT_DISCOVERY, a host link attention clear shall
77  * be issued if the link state is not already in host link cleared state,
78  * and a return code shall indicate whether the host link attention event
79  * had happened.
80  *
81  * Note that, if either the host link is in state LPFC_LINK_DOWN or @vport
82  * state in LPFC_VPORT_READY, the request for checking host link attention
83  * event will be ignored and a return code shall indicate no host link
84  * attention event had happened.
85  *
86  * Return codes
87  *   0 - no host link attention event happened
88  *   1 - host link attention event happened
89  **/
90 int
91 lpfc_els_chk_latt(struct lpfc_vport *vport)
92 {
93 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
94 	struct lpfc_hba  *phba = vport->phba;
95 	uint32_t ha_copy;
96 
97 	if (vport->port_state >= LPFC_VPORT_READY ||
98 	    phba->link_state == LPFC_LINK_DOWN ||
99 	    phba->sli_rev > LPFC_SLI_REV3)
100 		return 0;
101 
102 	/* Read the HBA Host Attention Register */
103 	if (lpfc_readl(phba->HAregaddr, &ha_copy))
104 		return 1;
105 
106 	if (!(ha_copy & HA_LATT))
107 		return 0;
108 
109 	/* Pending Link Event during Discovery */
110 	lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
111 			 "0237 Pending Link Event during "
112 			 "Discovery: State x%x\n",
113 			 phba->pport->port_state);
114 
115 	/* CLEAR_LA should re-enable link attention events and
116 	 * we should then immediately take a LATT event. The
117 	 * LATT processing should call lpfc_linkdown() which
118 	 * will cleanup any left over in-progress discovery
119 	 * events.
120 	 */
121 	spin_lock_irq(shost->host_lock);
122 	vport->fc_flag |= FC_ABORT_DISCOVERY;
123 	spin_unlock_irq(shost->host_lock);
124 
125 	if (phba->link_state != LPFC_CLEAR_LA)
126 		lpfc_issue_clear_la(phba, vport);
127 
128 	return 1;
129 }
130 
131 /**
132  * lpfc_prep_els_iocb - Allocate and prepare a lpfc iocb data structure
133  * @vport: pointer to a host virtual N_Port data structure.
134  * @expectRsp: flag indicating whether response is expected.
135  * @cmdSize: size of the ELS command.
136  * @retry: number of retries to the command IOCB when it fails.
137  * @ndlp: pointer to a node-list data structure.
138  * @did: destination identifier.
139  * @elscmd: the ELS command code.
140  *
141  * This routine is used for allocating a lpfc-IOCB data structure from
142  * the driver lpfc-IOCB free-list and prepare the IOCB with the parameters
143  * passed into the routine for discovery state machine to issue an Extended
144  * Link Service (ELS) commands. It is a generic lpfc-IOCB allocation
145  * and preparation routine that is used by all the discovery state machine
146  * routines and the ELS command-specific fields will be later set up by
147  * the individual discovery machine routines after calling this routine
148  * allocating and preparing a generic IOCB data structure. It fills in the
149  * Buffer Descriptor Entries (BDEs), allocates buffers for both command
150  * payload and response payload (if expected). The reference count on the
151  * ndlp is incremented by 1 and the reference to the ndlp is put into
152  * context1 of the IOCB data structure for this IOCB to hold the ndlp
153  * reference for the command's callback function to access later.
154  *
155  * Return code
156  *   Pointer to the newly allocated/prepared els iocb data structure
157  *   NULL - when els iocb data structure allocation/preparation failed
158  **/
159 struct lpfc_iocbq *
160 lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
161 		   uint16_t cmdSize, uint8_t retry,
162 		   struct lpfc_nodelist *ndlp, uint32_t did,
163 		   uint32_t elscmd)
164 {
165 	struct lpfc_hba  *phba = vport->phba;
166 	struct lpfc_iocbq *elsiocb;
167 	struct lpfc_dmabuf *pcmd, *prsp, *pbuflist;
168 	struct ulp_bde64 *bpl;
169 	IOCB_t *icmd;
170 
171 
172 	if (!lpfc_is_link_up(phba))
173 		return NULL;
174 
175 	/* Allocate buffer for  command iocb */
176 	elsiocb = lpfc_sli_get_iocbq(phba);
177 
178 	if (elsiocb == NULL)
179 		return NULL;
180 
181 	/*
182 	 * If this command is for fabric controller and HBA running
183 	 * in FIP mode send FLOGI, FDISC and LOGO as FIP frames.
184 	 */
185 	if ((did == Fabric_DID) &&
186 		(phba->hba_flag & HBA_FIP_SUPPORT) &&
187 		((elscmd == ELS_CMD_FLOGI) ||
188 		 (elscmd == ELS_CMD_FDISC) ||
189 		 (elscmd == ELS_CMD_LOGO)))
190 		switch (elscmd) {
191 		case ELS_CMD_FLOGI:
192 		elsiocb->iocb_flag |=
193 			((LPFC_ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT)
194 					& LPFC_FIP_ELS_ID_MASK);
195 		break;
196 		case ELS_CMD_FDISC:
197 		elsiocb->iocb_flag |=
198 			((LPFC_ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT)
199 					& LPFC_FIP_ELS_ID_MASK);
200 		break;
201 		case ELS_CMD_LOGO:
202 		elsiocb->iocb_flag |=
203 			((LPFC_ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT)
204 					& LPFC_FIP_ELS_ID_MASK);
205 		break;
206 		}
207 	else
208 		elsiocb->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK;
209 
210 	icmd = &elsiocb->iocb;
211 
212 	/* fill in BDEs for command */
213 	/* Allocate buffer for command payload */
214 	pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
215 	if (pcmd)
216 		pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys);
217 	if (!pcmd || !pcmd->virt)
218 		goto els_iocb_free_pcmb_exit;
219 
220 	INIT_LIST_HEAD(&pcmd->list);
221 
222 	/* Allocate buffer for response payload */
223 	if (expectRsp) {
224 		prsp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
225 		if (prsp)
226 			prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
227 						     &prsp->phys);
228 		if (!prsp || !prsp->virt)
229 			goto els_iocb_free_prsp_exit;
230 		INIT_LIST_HEAD(&prsp->list);
231 	} else
232 		prsp = NULL;
233 
234 	/* Allocate buffer for Buffer ptr list */
235 	pbuflist = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
236 	if (pbuflist)
237 		pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
238 						 &pbuflist->phys);
239 	if (!pbuflist || !pbuflist->virt)
240 		goto els_iocb_free_pbuf_exit;
241 
242 	INIT_LIST_HEAD(&pbuflist->list);
243 
244 	if (expectRsp) {
245 		icmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys);
246 		icmd->un.elsreq64.bdl.addrLow = putPaddrLow(pbuflist->phys);
247 		icmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
248 		icmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
249 
250 		icmd->un.elsreq64.remoteID = did;		/* DID */
251 		icmd->ulpCommand = CMD_ELS_REQUEST64_CR;
252 		if (elscmd == ELS_CMD_FLOGI)
253 			icmd->ulpTimeout = FF_DEF_RATOV * 2;
254 		else if (elscmd == ELS_CMD_LOGO)
255 			icmd->ulpTimeout = phba->fc_ratov;
256 		else
257 			icmd->ulpTimeout = phba->fc_ratov * 2;
258 	} else {
259 		icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys);
260 		icmd->un.xseq64.bdl.addrLow = putPaddrLow(pbuflist->phys);
261 		icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
262 		icmd->un.xseq64.bdl.bdeSize = sizeof(struct ulp_bde64);
263 		icmd->un.xseq64.xmit_els_remoteID = did;	/* DID */
264 		icmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX;
265 	}
266 	icmd->ulpBdeCount = 1;
267 	icmd->ulpLe = 1;
268 	icmd->ulpClass = CLASS3;
269 
270 	/*
271 	 * If we have NPIV enabled, we want to send ELS traffic by VPI.
272 	 * For SLI4, since the driver controls VPIs we also want to include
273 	 * all ELS pt2pt protocol traffic as well.
274 	 */
275 	if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) ||
276 		((phba->sli_rev == LPFC_SLI_REV4) &&
277 		    (vport->fc_flag & FC_PT2PT))) {
278 
279 		if (expectRsp) {
280 			icmd->un.elsreq64.myID = vport->fc_myDID;
281 
282 			/* For ELS_REQUEST64_CR, use the VPI by default */
283 			icmd->ulpContext = phba->vpi_ids[vport->vpi];
284 		}
285 
286 		icmd->ulpCt_h = 0;
287 		/* The CT field must be 0=INVALID_RPI for the ECHO cmd */
288 		if (elscmd == ELS_CMD_ECHO)
289 			icmd->ulpCt_l = 0; /* context = invalid RPI */
290 		else
291 			icmd->ulpCt_l = 1; /* context = VPI */
292 	}
293 
294 	bpl = (struct ulp_bde64 *) pbuflist->virt;
295 	bpl->addrLow = le32_to_cpu(putPaddrLow(pcmd->phys));
296 	bpl->addrHigh = le32_to_cpu(putPaddrHigh(pcmd->phys));
297 	bpl->tus.f.bdeSize = cmdSize;
298 	bpl->tus.f.bdeFlags = 0;
299 	bpl->tus.w = le32_to_cpu(bpl->tus.w);
300 
301 	if (expectRsp) {
302 		bpl++;
303 		bpl->addrLow = le32_to_cpu(putPaddrLow(prsp->phys));
304 		bpl->addrHigh = le32_to_cpu(putPaddrHigh(prsp->phys));
305 		bpl->tus.f.bdeSize = FCELSSIZE;
306 		bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
307 		bpl->tus.w = le32_to_cpu(bpl->tus.w);
308 	}
309 
310 	elsiocb->context2 = pcmd;
311 	elsiocb->context3 = pbuflist;
312 	elsiocb->retry = retry;
313 	elsiocb->vport = vport;
314 	elsiocb->drvrTimeout = (phba->fc_ratov << 1) + LPFC_DRVR_TIMEOUT;
315 
316 	if (prsp) {
317 		list_add(&prsp->list, &pcmd->list);
318 	}
319 	if (expectRsp) {
320 		/* Xmit ELS command <elsCmd> to remote NPORT <did> */
321 		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
322 				 "0116 Xmit ELS command x%x to remote "
323 				 "NPORT x%x I/O tag: x%x, port state:x%x "
324 				 "rpi x%x fc_flag:x%x nlp_flag:x%x vport:x%p\n",
325 				 elscmd, did, elsiocb->iotag,
326 				 vport->port_state, ndlp->nlp_rpi,
327 				 vport->fc_flag, ndlp->nlp_flag, vport);
328 	} else {
329 		/* Xmit ELS response <elsCmd> to remote NPORT <did> */
330 		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
331 				 "0117 Xmit ELS response x%x to remote "
332 				 "NPORT x%x I/O tag: x%x, size: x%x "
333 				 "port_state x%x  rpi x%x fc_flag x%x\n",
334 				 elscmd, ndlp->nlp_DID, elsiocb->iotag,
335 				 cmdSize, vport->port_state,
336 				 ndlp->nlp_rpi, vport->fc_flag);
337 	}
338 	return elsiocb;
339 
340 els_iocb_free_pbuf_exit:
341 	if (expectRsp)
342 		lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
343 	kfree(pbuflist);
344 
345 els_iocb_free_prsp_exit:
346 	lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
347 	kfree(prsp);
348 
349 els_iocb_free_pcmb_exit:
350 	kfree(pcmd);
351 	lpfc_sli_release_iocbq(phba, elsiocb);
352 	return NULL;
353 }
354 
355 /**
356  * lpfc_issue_fabric_reglogin - Issue fabric registration login for a vport
357  * @vport: pointer to a host virtual N_Port data structure.
358  *
359  * This routine issues a fabric registration login for a @vport. An
360  * active ndlp node with Fabric_DID must already exist for this @vport.
361  * The routine invokes two mailbox commands to carry out fabric registration
362  * login through the HBA firmware: the first mailbox command requests the
363  * HBA to perform link configuration for the @vport; and the second mailbox
364  * command requests the HBA to perform the actual fabric registration login
365  * with the @vport.
366  *
367  * Return code
368  *   0 - successfully issued fabric registration login for @vport
369  *   -ENXIO -- failed to issue fabric registration login for @vport
370  **/
371 int
372 lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
373 {
374 	struct lpfc_hba  *phba = vport->phba;
375 	LPFC_MBOXQ_t *mbox;
376 	struct lpfc_dmabuf *mp;
377 	struct lpfc_nodelist *ndlp;
378 	struct serv_parm *sp;
379 	int rc;
380 	int err = 0;
381 
382 	sp = &phba->fc_fabparam;
383 	ndlp = lpfc_findnode_did(vport, Fabric_DID);
384 	if (!ndlp) {
385 		err = 1;
386 		goto fail;
387 	}
388 
389 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
390 	if (!mbox) {
391 		err = 2;
392 		goto fail;
393 	}
394 
395 	vport->port_state = LPFC_FABRIC_CFG_LINK;
396 	lpfc_config_link(phba, mbox);
397 	mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
398 	mbox->vport = vport;
399 
400 	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
401 	if (rc == MBX_NOT_FINISHED) {
402 		err = 3;
403 		goto fail_free_mbox;
404 	}
405 
406 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
407 	if (!mbox) {
408 		err = 4;
409 		goto fail;
410 	}
411 	rc = lpfc_reg_rpi(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox,
412 			  ndlp->nlp_rpi);
413 	if (rc) {
414 		err = 5;
415 		goto fail_free_mbox;
416 	}
417 
418 	mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login;
419 	mbox->vport = vport;
420 	/* increment the reference count on ndlp to hold reference
421 	 * for the callback routine.
422 	 */
423 	mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
424 	if (!mbox->ctx_ndlp) {
425 		err = 6;
426 		goto fail_no_ndlp;
427 	}
428 
429 	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
430 	if (rc == MBX_NOT_FINISHED) {
431 		err = 7;
432 		goto fail_issue_reg_login;
433 	}
434 
435 	return 0;
436 
437 fail_issue_reg_login:
438 	/* decrement the reference count on ndlp just incremented
439 	 * for the failed mbox command.
440 	 */
441 	lpfc_nlp_put(ndlp);
442 fail_no_ndlp:
443 	mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
444 	lpfc_mbuf_free(phba, mp->virt, mp->phys);
445 	kfree(mp);
446 fail_free_mbox:
447 	mempool_free(mbox, phba->mbox_mem_pool);
448 
449 fail:
450 	lpfc_vport_set_state(vport, FC_VPORT_FAILED);
451 	lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
452 			 "0249 Cannot issue Register Fabric login: Err %d\n",
453 			 err);
454 	return -ENXIO;
455 }
456 
457 /**
458  * lpfc_issue_reg_vfi - Register VFI for this vport's fabric login
459  * @vport: pointer to a host virtual N_Port data structure.
460  *
461  * This routine issues a REG_VFI mailbox for the vfi, vpi, fcfi triplet for
462  * the @vport. This mailbox command is necessary for SLI4 port only.
463  *
464  * Return code
465  *   0 - successfully issued REG_VFI for @vport
466  *   A failure code otherwise.
467  **/
468 int
469 lpfc_issue_reg_vfi(struct lpfc_vport *vport)
470 {
471 	struct lpfc_hba  *phba = vport->phba;
472 	LPFC_MBOXQ_t *mboxq = NULL;
473 	struct lpfc_nodelist *ndlp;
474 	struct lpfc_dmabuf *dmabuf = NULL;
475 	int rc = 0;
476 
477 	/* move forward in case of SLI4 FC port loopback test and pt2pt mode */
478 	if ((phba->sli_rev == LPFC_SLI_REV4) &&
479 	    !(phba->link_flag & LS_LOOPBACK_MODE) &&
480 	    !(vport->fc_flag & FC_PT2PT)) {
481 		ndlp = lpfc_findnode_did(vport, Fabric_DID);
482 		if (!ndlp) {
483 			rc = -ENODEV;
484 			goto fail;
485 		}
486 	}
487 
488 	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
489 	if (!mboxq) {
490 		rc = -ENOMEM;
491 		goto fail;
492 	}
493 
494 	/* Supply CSP's only if we are fabric connect or pt-to-pt connect */
495 	if ((vport->fc_flag & FC_FABRIC) || (vport->fc_flag & FC_PT2PT)) {
496 		dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
497 		if (!dmabuf) {
498 			rc = -ENOMEM;
499 			goto fail;
500 		}
501 		dmabuf->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &dmabuf->phys);
502 		if (!dmabuf->virt) {
503 			rc = -ENOMEM;
504 			goto fail;
505 		}
506 		memcpy(dmabuf->virt, &phba->fc_fabparam,
507 		       sizeof(struct serv_parm));
508 	}
509 
510 	vport->port_state = LPFC_FABRIC_CFG_LINK;
511 	if (dmabuf)
512 		lpfc_reg_vfi(mboxq, vport, dmabuf->phys);
513 	else
514 		lpfc_reg_vfi(mboxq, vport, 0);
515 
516 	mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi;
517 	mboxq->vport = vport;
518 	mboxq->ctx_buf = dmabuf;
519 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
520 	if (rc == MBX_NOT_FINISHED) {
521 		rc = -ENXIO;
522 		goto fail;
523 	}
524 	return 0;
525 
526 fail:
527 	if (mboxq)
528 		mempool_free(mboxq, phba->mbox_mem_pool);
529 	if (dmabuf) {
530 		if (dmabuf->virt)
531 			lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
532 		kfree(dmabuf);
533 	}
534 
535 	lpfc_vport_set_state(vport, FC_VPORT_FAILED);
536 	lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
537 			 "0289 Issue Register VFI failed: Err %d\n", rc);
538 	return rc;
539 }
540 
541 /**
542  * lpfc_issue_unreg_vfi - Unregister VFI for this vport's fabric login
543  * @vport: pointer to a host virtual N_Port data structure.
544  *
545  * This routine issues a UNREG_VFI mailbox with the vfi, vpi, fcfi triplet for
546  * the @vport. This mailbox command is necessary for SLI4 port only.
547  *
548  * Return code
549  *   0 - successfully issued REG_VFI for @vport
550  *   A failure code otherwise.
551  **/
552 int
553 lpfc_issue_unreg_vfi(struct lpfc_vport *vport)
554 {
555 	struct lpfc_hba *phba = vport->phba;
556 	struct Scsi_Host *shost;
557 	LPFC_MBOXQ_t *mboxq;
558 	int rc;
559 
560 	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
561 	if (!mboxq) {
562 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
563 				"2556 UNREG_VFI mbox allocation failed"
564 				"HBA state x%x\n", phba->pport->port_state);
565 		return -ENOMEM;
566 	}
567 
568 	lpfc_unreg_vfi(mboxq, vport);
569 	mboxq->vport = vport;
570 	mboxq->mbox_cmpl = lpfc_unregister_vfi_cmpl;
571 
572 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
573 	if (rc == MBX_NOT_FINISHED) {
574 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
575 				"2557 UNREG_VFI issue mbox failed rc x%x "
576 				"HBA state x%x\n",
577 				rc, phba->pport->port_state);
578 		mempool_free(mboxq, phba->mbox_mem_pool);
579 		return -EIO;
580 	}
581 
582 	shost = lpfc_shost_from_vport(vport);
583 	spin_lock_irq(shost->host_lock);
584 	vport->fc_flag &= ~FC_VFI_REGISTERED;
585 	spin_unlock_irq(shost->host_lock);
586 	return 0;
587 }
588 
589 /**
590  * lpfc_check_clean_addr_bit - Check whether assigned FCID is clean.
591  * @vport: pointer to a host virtual N_Port data structure.
592  * @sp: pointer to service parameter data structure.
593  *
594  * This routine is called from FLOGI/FDISC completion handler functions.
595  * lpfc_check_clean_addr_bit return 1 when FCID/Fabric portname/ Fabric
596  * node nodename is changed in the completion service parameter else return
597  * 0. This function also set flag in the vport data structure to delay
598  * NP_Port discovery after the FLOGI/FDISC completion if Clean address bit
599  * in FLOGI/FDISC response is cleared and FCID/Fabric portname/ Fabric
600  * node nodename is changed in the completion service parameter.
601  *
602  * Return code
603  *   0 - FCID and Fabric Nodename and Fabric portname is not changed.
604  *   1 - FCID or Fabric Nodename or Fabric portname is changed.
605  *
606  **/
607 static uint8_t
608 lpfc_check_clean_addr_bit(struct lpfc_vport *vport,
609 		struct serv_parm *sp)
610 {
611 	struct lpfc_hba *phba = vport->phba;
612 	uint8_t fabric_param_changed = 0;
613 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
614 
615 	if ((vport->fc_prevDID != vport->fc_myDID) ||
616 		memcmp(&vport->fabric_portname, &sp->portName,
617 			sizeof(struct lpfc_name)) ||
618 		memcmp(&vport->fabric_nodename, &sp->nodeName,
619 			sizeof(struct lpfc_name)) ||
620 		(vport->vport_flag & FAWWPN_PARAM_CHG)) {
621 		fabric_param_changed = 1;
622 		vport->vport_flag &= ~FAWWPN_PARAM_CHG;
623 	}
624 	/*
625 	 * Word 1 Bit 31 in common service parameter is overloaded.
626 	 * Word 1 Bit 31 in FLOGI request is multiple NPort request
627 	 * Word 1 Bit 31 in FLOGI response is clean address bit
628 	 *
629 	 * If fabric parameter is changed and clean address bit is
630 	 * cleared delay nport discovery if
631 	 * - vport->fc_prevDID != 0 (not initial discovery) OR
632 	 * - lpfc_delay_discovery module parameter is set.
633 	 */
634 	if (fabric_param_changed && !sp->cmn.clean_address_bit &&
635 	    (vport->fc_prevDID || phba->cfg_delay_discovery)) {
636 		spin_lock_irq(shost->host_lock);
637 		vport->fc_flag |= FC_DISC_DELAYED;
638 		spin_unlock_irq(shost->host_lock);
639 	}
640 
641 	return fabric_param_changed;
642 }
643 
644 
645 /**
646  * lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port
647  * @vport: pointer to a host virtual N_Port data structure.
648  * @ndlp: pointer to a node-list data structure.
649  * @sp: pointer to service parameter data structure.
650  * @irsp: pointer to the IOCB within the lpfc response IOCB.
651  *
652  * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback
653  * function to handle the completion of a Fabric Login (FLOGI) into a fabric
654  * port in a fabric topology. It properly sets up the parameters to the @ndlp
655  * from the IOCB response. It also check the newly assigned N_Port ID to the
656  * @vport against the previously assigned N_Port ID. If it is different from
657  * the previously assigned Destination ID (DID), the lpfc_unreg_rpi() routine
658  * is invoked on all the remaining nodes with the @vport to unregister the
659  * Remote Port Indicators (RPIs). Finally, the lpfc_issue_fabric_reglogin()
660  * is invoked to register login to the fabric.
661  *
662  * Return code
663  *   0 - Success (currently, always return 0)
664  **/
665 static int
666 lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
667 			   struct serv_parm *sp, IOCB_t *irsp)
668 {
669 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
670 	struct lpfc_hba  *phba = vport->phba;
671 	struct lpfc_nodelist *np;
672 	struct lpfc_nodelist *next_np;
673 	uint8_t fabric_param_changed;
674 
675 	spin_lock_irq(shost->host_lock);
676 	vport->fc_flag |= FC_FABRIC;
677 	spin_unlock_irq(shost->host_lock);
678 
679 	phba->fc_edtov = be32_to_cpu(sp->cmn.e_d_tov);
680 	if (sp->cmn.edtovResolution)	/* E_D_TOV ticks are in nanoseconds */
681 		phba->fc_edtov = (phba->fc_edtov + 999999) / 1000000;
682 
683 	phba->fc_edtovResol = sp->cmn.edtovResolution;
684 	phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000;
685 
686 	if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
687 		spin_lock_irq(shost->host_lock);
688 		vport->fc_flag |= FC_PUBLIC_LOOP;
689 		spin_unlock_irq(shost->host_lock);
690 	}
691 
692 	vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
693 	memcpy(&ndlp->nlp_portname, &sp->portName, sizeof(struct lpfc_name));
694 	memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof(struct lpfc_name));
695 	ndlp->nlp_class_sup = 0;
696 	if (sp->cls1.classValid)
697 		ndlp->nlp_class_sup |= FC_COS_CLASS1;
698 	if (sp->cls2.classValid)
699 		ndlp->nlp_class_sup |= FC_COS_CLASS2;
700 	if (sp->cls3.classValid)
701 		ndlp->nlp_class_sup |= FC_COS_CLASS3;
702 	if (sp->cls4.classValid)
703 		ndlp->nlp_class_sup |= FC_COS_CLASS4;
704 	ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) |
705 				sp->cmn.bbRcvSizeLsb;
706 
707 	fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp);
708 	if (fabric_param_changed) {
709 		/* Reset FDMI attribute masks based on config parameter */
710 		if (phba->cfg_enable_SmartSAN ||
711 		    (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) {
712 			/* Setup appropriate attribute masks */
713 			vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR;
714 			if (phba->cfg_enable_SmartSAN)
715 				vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR;
716 			else
717 				vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
718 		} else {
719 			vport->fdmi_hba_mask = 0;
720 			vport->fdmi_port_mask = 0;
721 		}
722 
723 	}
724 	memcpy(&vport->fabric_portname, &sp->portName,
725 			sizeof(struct lpfc_name));
726 	memcpy(&vport->fabric_nodename, &sp->nodeName,
727 			sizeof(struct lpfc_name));
728 	memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
729 
730 	if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
731 		if (sp->cmn.response_multiple_NPort) {
732 			lpfc_printf_vlog(vport, KERN_WARNING,
733 					 LOG_ELS | LOG_VPORT,
734 					 "1816 FLOGI NPIV supported, "
735 					 "response data 0x%x\n",
736 					 sp->cmn.response_multiple_NPort);
737 			spin_lock_irq(&phba->hbalock);
738 			phba->link_flag |= LS_NPIV_FAB_SUPPORTED;
739 			spin_unlock_irq(&phba->hbalock);
740 		} else {
741 			/* Because we asked f/w for NPIV it still expects us
742 			to call reg_vnpid at least for the physical host */
743 			lpfc_printf_vlog(vport, KERN_WARNING,
744 					 LOG_ELS | LOG_VPORT,
745 					 "1817 Fabric does not support NPIV "
746 					 "- configuring single port mode.\n");
747 			spin_lock_irq(&phba->hbalock);
748 			phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
749 			spin_unlock_irq(&phba->hbalock);
750 		}
751 	}
752 
753 	/*
754 	 * For FC we need to do some special processing because of the SLI
755 	 * Port's default settings of the Common Service Parameters.
756 	 */
757 	if ((phba->sli_rev == LPFC_SLI_REV4) &&
758 	    (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC)) {
759 		/* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */
760 		if (fabric_param_changed)
761 			lpfc_unregister_fcf_prep(phba);
762 
763 		/* This should just update the VFI CSPs*/
764 		if (vport->fc_flag & FC_VFI_REGISTERED)
765 			lpfc_issue_reg_vfi(vport);
766 	}
767 
768 	if (fabric_param_changed &&
769 		!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
770 
771 		/* If our NportID changed, we need to ensure all
772 		 * remaining NPORTs get unreg_login'ed.
773 		 */
774 		list_for_each_entry_safe(np, next_np,
775 					&vport->fc_nodes, nlp_listp) {
776 			if ((np->nlp_state != NLP_STE_NPR_NODE) ||
777 				   !(np->nlp_flag & NLP_NPR_ADISC))
778 				continue;
779 			spin_lock_irq(&np->lock);
780 			np->nlp_flag &= ~NLP_NPR_ADISC;
781 			spin_unlock_irq(&np->lock);
782 			lpfc_unreg_rpi(vport, np);
783 		}
784 		lpfc_cleanup_pending_mbox(vport);
785 
786 		if (phba->sli_rev == LPFC_SLI_REV4) {
787 			lpfc_sli4_unreg_all_rpis(vport);
788 			lpfc_mbx_unreg_vpi(vport);
789 			spin_lock_irq(shost->host_lock);
790 			vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
791 			spin_unlock_irq(shost->host_lock);
792 		}
793 
794 		/*
795 		 * For SLI3 and SLI4, the VPI needs to be reregistered in
796 		 * response to this fabric parameter change event.
797 		 */
798 		spin_lock_irq(shost->host_lock);
799 		vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
800 		spin_unlock_irq(shost->host_lock);
801 	} else if ((phba->sli_rev == LPFC_SLI_REV4) &&
802 		!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
803 			/*
804 			 * Driver needs to re-reg VPI in order for f/w
805 			 * to update the MAC address.
806 			 */
807 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
808 			lpfc_register_new_vport(phba, vport, ndlp);
809 			return 0;
810 	}
811 
812 	if (phba->sli_rev < LPFC_SLI_REV4) {
813 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE);
814 		if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED &&
815 		    vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
816 			lpfc_register_new_vport(phba, vport, ndlp);
817 		else
818 			lpfc_issue_fabric_reglogin(vport);
819 	} else {
820 		ndlp->nlp_type |= NLP_FABRIC;
821 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
822 		if ((!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) &&
823 			(vport->vpi_state & LPFC_VPI_REGISTERED)) {
824 			lpfc_start_fdiscs(phba);
825 			lpfc_do_scr_ns_plogi(phba, vport);
826 		} else if (vport->fc_flag & FC_VFI_REGISTERED)
827 			lpfc_issue_init_vpi(vport);
828 		else {
829 			lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
830 					"3135 Need register VFI: (x%x/%x)\n",
831 					vport->fc_prevDID, vport->fc_myDID);
832 			lpfc_issue_reg_vfi(vport);
833 		}
834 	}
835 	return 0;
836 }
837 
838 /**
839  * lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port
840  * @vport: pointer to a host virtual N_Port data structure.
841  * @ndlp: pointer to a node-list data structure.
842  * @sp: pointer to service parameter data structure.
843  *
844  * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback
845  * function to handle the completion of a Fabric Login (FLOGI) into an N_Port
846  * in a point-to-point topology. First, the @vport's N_Port Name is compared
847  * with the received N_Port Name: if the @vport's N_Port Name is greater than
848  * the received N_Port Name lexicographically, this node shall assign local
849  * N_Port ID (PT2PT_LocalID: 1) and remote N_Port ID (PT2PT_RemoteID: 2) and
850  * will send out Port Login (PLOGI) with the N_Port IDs assigned. Otherwise,
851  * this node shall just wait for the remote node to issue PLOGI and assign
852  * N_Port IDs.
853  *
854  * Return code
855  *   0 - Success
856  *   -ENXIO - Fail
857  **/
858 static int
859 lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
860 			  struct serv_parm *sp)
861 {
862 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
863 	struct lpfc_hba  *phba = vport->phba;
864 	LPFC_MBOXQ_t *mbox;
865 	int rc;
866 
867 	spin_lock_irq(shost->host_lock);
868 	vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
869 	vport->fc_flag |= FC_PT2PT;
870 	spin_unlock_irq(shost->host_lock);
871 
872 	/* If we are pt2pt with another NPort, force NPIV off! */
873 	phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
874 
875 	/* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */
876 	if ((phba->sli_rev == LPFC_SLI_REV4) && phba->fc_topology_changed) {
877 		lpfc_unregister_fcf_prep(phba);
878 
879 		spin_lock_irq(shost->host_lock);
880 		vport->fc_flag &= ~FC_VFI_REGISTERED;
881 		spin_unlock_irq(shost->host_lock);
882 		phba->fc_topology_changed = 0;
883 	}
884 
885 	rc = memcmp(&vport->fc_portname, &sp->portName,
886 		    sizeof(vport->fc_portname));
887 
888 	if (rc >= 0) {
889 		/* This side will initiate the PLOGI */
890 		spin_lock_irq(shost->host_lock);
891 		vport->fc_flag |= FC_PT2PT_PLOGI;
892 		spin_unlock_irq(shost->host_lock);
893 
894 		/*
895 		 * N_Port ID cannot be 0, set our Id to LocalID
896 		 * the other side will be RemoteID.
897 		 */
898 
899 		/* not equal */
900 		if (rc)
901 			vport->fc_myDID = PT2PT_LocalID;
902 
903 		/* Decrement ndlp reference count indicating that ndlp can be
904 		 * safely released when other references to it are done.
905 		 */
906 		lpfc_nlp_put(ndlp);
907 
908 		ndlp = lpfc_findnode_did(vport, PT2PT_RemoteID);
909 		if (!ndlp) {
910 			/*
911 			 * Cannot find existing Fabric ndlp, so allocate a
912 			 * new one
913 			 */
914 			ndlp = lpfc_nlp_init(vport, PT2PT_RemoteID);
915 			if (!ndlp)
916 				goto fail;
917 		}
918 
919 		memcpy(&ndlp->nlp_portname, &sp->portName,
920 		       sizeof(struct lpfc_name));
921 		memcpy(&ndlp->nlp_nodename, &sp->nodeName,
922 		       sizeof(struct lpfc_name));
923 		/* Set state will put ndlp onto node list if not already done */
924 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
925 		spin_lock_irq(&ndlp->lock);
926 		ndlp->nlp_flag |= NLP_NPR_2B_DISC;
927 		spin_unlock_irq(&ndlp->lock);
928 
929 		mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
930 		if (!mbox)
931 			goto fail;
932 
933 		lpfc_config_link(phba, mbox);
934 
935 		mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
936 		mbox->vport = vport;
937 		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
938 		if (rc == MBX_NOT_FINISHED) {
939 			mempool_free(mbox, phba->mbox_mem_pool);
940 			goto fail;
941 		}
942 	} else {
943 		/* This side will wait for the PLOGI, decrement ndlp reference
944 		 * count indicating that ndlp can be released when other
945 		 * references to it are done.
946 		 */
947 		lpfc_nlp_put(ndlp);
948 
949 		/* Start discovery - this should just do CLEAR_LA */
950 		lpfc_disc_start(vport);
951 	}
952 
953 	return 0;
954 fail:
955 	return -ENXIO;
956 }
957 
958 /**
959  * lpfc_cmpl_els_flogi - Completion callback function for flogi
960  * @phba: pointer to lpfc hba data structure.
961  * @cmdiocb: pointer to lpfc command iocb data structure.
962  * @rspiocb: pointer to lpfc response iocb data structure.
963  *
964  * This routine is the top-level completion callback function for issuing
965  * a Fabric Login (FLOGI) command. If the response IOCB reported error,
966  * the lpfc_els_retry() routine shall be invoked to retry the FLOGI. If
967  * retry has been made (either immediately or delayed with lpfc_els_retry()
968  * returning 1), the command IOCB will be released and function returned.
969  * If the retry attempt has been given up (possibly reach the maximum
970  * number of retries), one additional decrement of ndlp reference shall be
971  * invoked before going out after releasing the command IOCB. This will
972  * actually release the remote node (Note, lpfc_els_free_iocb() will also
973  * invoke one decrement of ndlp reference count). If no error reported in
974  * the IOCB status, the command Port ID field is used to determine whether
975  * this is a point-to-point topology or a fabric topology: if the Port ID
976  * field is assigned, it is a fabric topology; otherwise, it is a
977  * point-to-point topology. The routine lpfc_cmpl_els_flogi_fabric() or
978  * lpfc_cmpl_els_flogi_nport() shall be invoked accordingly to handle the
979  * specific topology completion conditions.
980  **/
981 static void
982 lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
983 		    struct lpfc_iocbq *rspiocb)
984 {
985 	struct lpfc_vport *vport = cmdiocb->vport;
986 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
987 	IOCB_t *irsp = &rspiocb->iocb;
988 	struct lpfc_nodelist *ndlp = cmdiocb->context1;
989 	struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp;
990 	struct serv_parm *sp;
991 	uint16_t fcf_index;
992 	int rc;
993 
994 	/* Check to see if link went down during discovery */
995 	if (lpfc_els_chk_latt(vport)) {
996 		/* One additional decrement on node reference count to
997 		 * trigger the release of the node
998 		 */
999 		lpfc_nlp_put(ndlp);
1000 		goto out;
1001 	}
1002 
1003 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1004 		"FLOGI cmpl:      status:x%x/x%x state:x%x",
1005 		irsp->ulpStatus, irsp->un.ulpWord[4],
1006 		vport->port_state);
1007 
1008 	if (irsp->ulpStatus) {
1009 		/*
1010 		 * In case of FIP mode, perform roundrobin FCF failover
1011 		 * due to new FCF discovery
1012 		 */
1013 		if ((phba->hba_flag & HBA_FIP_SUPPORT) &&
1014 		    (phba->fcf.fcf_flag & FCF_DISCOVERY)) {
1015 			if (phba->link_state < LPFC_LINK_UP)
1016 				goto stop_rr_fcf_flogi;
1017 			if ((phba->fcoe_cvl_eventtag_attn ==
1018 			     phba->fcoe_cvl_eventtag) &&
1019 			    (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
1020 			    ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
1021 			    IOERR_SLI_ABORTED))
1022 				goto stop_rr_fcf_flogi;
1023 			else
1024 				phba->fcoe_cvl_eventtag_attn =
1025 					phba->fcoe_cvl_eventtag;
1026 			lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS,
1027 					"2611 FLOGI failed on FCF (x%x), "
1028 					"status:x%x/x%x, tmo:x%x, perform "
1029 					"roundrobin FCF failover\n",
1030 					phba->fcf.current_rec.fcf_indx,
1031 					irsp->ulpStatus, irsp->un.ulpWord[4],
1032 					irsp->ulpTimeout);
1033 			lpfc_sli4_set_fcf_flogi_fail(phba,
1034 					phba->fcf.current_rec.fcf_indx);
1035 			fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba);
1036 			rc = lpfc_sli4_fcf_rr_next_proc(vport, fcf_index);
1037 			if (rc)
1038 				goto out;
1039 		}
1040 
1041 stop_rr_fcf_flogi:
1042 		/* FLOGI failure */
1043 		if (!(irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
1044 		      ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
1045 					IOERR_LOOP_OPEN_FAILURE)))
1046 			lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1047 					 "2858 FLOGI failure Status:x%x/x%x TMO"
1048 					 ":x%x Data x%x x%x\n",
1049 					 irsp->ulpStatus, irsp->un.ulpWord[4],
1050 					 irsp->ulpTimeout, phba->hba_flag,
1051 					 phba->fcf.fcf_flag);
1052 
1053 		/* Check for retry */
1054 		if (lpfc_els_retry(phba, cmdiocb, rspiocb))
1055 			goto out;
1056 
1057 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_TRACE_EVENT,
1058 				 "0150 FLOGI failure Status:x%x/x%x "
1059 				 "xri x%x TMO:x%x\n",
1060 				 irsp->ulpStatus, irsp->un.ulpWord[4],
1061 				 cmdiocb->sli4_xritag, irsp->ulpTimeout);
1062 
1063 		/* If this is not a loop open failure, bail out */
1064 		if (!(irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
1065 		      ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
1066 					IOERR_LOOP_OPEN_FAILURE)))
1067 			goto flogifail;
1068 
1069 		/* FLOGI failed, so there is no fabric */
1070 		spin_lock_irq(shost->host_lock);
1071 		vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
1072 		spin_unlock_irq(shost->host_lock);
1073 
1074 		/* If private loop, then allow max outstanding els to be
1075 		 * LPFC_MAX_DISC_THREADS (32). Scanning in the case of no
1076 		 * alpa map would take too long otherwise.
1077 		 */
1078 		if (phba->alpa_map[0] == 0)
1079 			vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS;
1080 		if ((phba->sli_rev == LPFC_SLI_REV4) &&
1081 		    (!(vport->fc_flag & FC_VFI_REGISTERED) ||
1082 		     (vport->fc_prevDID != vport->fc_myDID) ||
1083 			phba->fc_topology_changed)) {
1084 			if (vport->fc_flag & FC_VFI_REGISTERED) {
1085 				if (phba->fc_topology_changed) {
1086 					lpfc_unregister_fcf_prep(phba);
1087 					spin_lock_irq(shost->host_lock);
1088 					vport->fc_flag &= ~FC_VFI_REGISTERED;
1089 					spin_unlock_irq(shost->host_lock);
1090 					phba->fc_topology_changed = 0;
1091 				} else {
1092 					lpfc_sli4_unreg_all_rpis(vport);
1093 				}
1094 			}
1095 
1096 			/* Do not register VFI if the driver aborted FLOGI */
1097 			if (!lpfc_error_lost_link(irsp))
1098 				lpfc_issue_reg_vfi(vport);
1099 
1100 			lpfc_nlp_put(ndlp);
1101 			goto out;
1102 		}
1103 		goto flogifail;
1104 	}
1105 	spin_lock_irq(shost->host_lock);
1106 	vport->fc_flag &= ~FC_VPORT_CVL_RCVD;
1107 	vport->fc_flag &= ~FC_VPORT_LOGO_RCVD;
1108 	spin_unlock_irq(shost->host_lock);
1109 
1110 	/*
1111 	 * The FLogI succeeded.  Sync the data for the CPU before
1112 	 * accessing it.
1113 	 */
1114 	prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
1115 	if (!prsp)
1116 		goto out;
1117 	sp = prsp->virt + sizeof(uint32_t);
1118 
1119 	/* FLOGI completes successfully */
1120 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1121 			 "0101 FLOGI completes successfully, I/O tag:x%x, "
1122 			 "xri x%x Data: x%x x%x x%x x%x x%x x%x x%x\n",
1123 			 cmdiocb->iotag, cmdiocb->sli4_xritag,
1124 			 irsp->un.ulpWord[4], sp->cmn.e_d_tov,
1125 			 sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution,
1126 			 vport->port_state, vport->fc_flag,
1127 			 sp->cmn.priority_tagging);
1128 
1129 	if (sp->cmn.priority_tagging)
1130 		vport->vmid_flag |= LPFC_VMID_ISSUE_QFPA;
1131 
1132 	if (vport->port_state == LPFC_FLOGI) {
1133 		/*
1134 		 * If Common Service Parameters indicate Nport
1135 		 * we are point to point, if Fport we are Fabric.
1136 		 */
1137 		if (sp->cmn.fPort)
1138 			rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp, irsp);
1139 		else if (!(phba->hba_flag & HBA_FCOE_MODE))
1140 			rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp);
1141 		else {
1142 			lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1143 				"2831 FLOGI response with cleared Fabric "
1144 				"bit fcf_index 0x%x "
1145 				"Switch Name %02x%02x%02x%02x%02x%02x%02x%02x "
1146 				"Fabric Name "
1147 				"%02x%02x%02x%02x%02x%02x%02x%02x\n",
1148 				phba->fcf.current_rec.fcf_indx,
1149 				phba->fcf.current_rec.switch_name[0],
1150 				phba->fcf.current_rec.switch_name[1],
1151 				phba->fcf.current_rec.switch_name[2],
1152 				phba->fcf.current_rec.switch_name[3],
1153 				phba->fcf.current_rec.switch_name[4],
1154 				phba->fcf.current_rec.switch_name[5],
1155 				phba->fcf.current_rec.switch_name[6],
1156 				phba->fcf.current_rec.switch_name[7],
1157 				phba->fcf.current_rec.fabric_name[0],
1158 				phba->fcf.current_rec.fabric_name[1],
1159 				phba->fcf.current_rec.fabric_name[2],
1160 				phba->fcf.current_rec.fabric_name[3],
1161 				phba->fcf.current_rec.fabric_name[4],
1162 				phba->fcf.current_rec.fabric_name[5],
1163 				phba->fcf.current_rec.fabric_name[6],
1164 				phba->fcf.current_rec.fabric_name[7]);
1165 
1166 			lpfc_nlp_put(ndlp);
1167 			spin_lock_irq(&phba->hbalock);
1168 			phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
1169 			phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO);
1170 			spin_unlock_irq(&phba->hbalock);
1171 			phba->fcf.fcf_redisc_attempted = 0; /* reset */
1172 			goto out;
1173 		}
1174 		if (!rc) {
1175 			/* Mark the FCF discovery process done */
1176 			if (phba->hba_flag & HBA_FIP_SUPPORT)
1177 				lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP |
1178 						LOG_ELS,
1179 						"2769 FLOGI to FCF (x%x) "
1180 						"completed successfully\n",
1181 						phba->fcf.current_rec.fcf_indx);
1182 			spin_lock_irq(&phba->hbalock);
1183 			phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
1184 			phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO);
1185 			spin_unlock_irq(&phba->hbalock);
1186 			phba->fcf.fcf_redisc_attempted = 0; /* reset */
1187 			goto out;
1188 		}
1189 	} else if (vport->port_state > LPFC_FLOGI &&
1190 		   vport->fc_flag & FC_PT2PT) {
1191 		/*
1192 		 * In a p2p topology, it is possible that discovery has
1193 		 * already progressed, and this completion can be ignored.
1194 		 * Recheck the indicated topology.
1195 		 */
1196 		if (!sp->cmn.fPort)
1197 			goto out;
1198 	}
1199 
1200 flogifail:
1201 	spin_lock_irq(&phba->hbalock);
1202 	phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
1203 	spin_unlock_irq(&phba->hbalock);
1204 
1205 	if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)))
1206 		lpfc_nlp_put(ndlp);
1207 	if (!lpfc_error_lost_link(irsp)) {
1208 		/* FLOGI failed, so just use loop map to make discovery list */
1209 		lpfc_disc_list_loopmap(vport);
1210 
1211 		/* Start discovery */
1212 		lpfc_disc_start(vport);
1213 	} else if (((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
1214 			(((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
1215 			 IOERR_SLI_ABORTED) &&
1216 			((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
1217 			 IOERR_SLI_DOWN))) &&
1218 			(phba->link_state != LPFC_CLEAR_LA)) {
1219 		/* If FLOGI failed enable link interrupt. */
1220 		lpfc_issue_clear_la(phba, vport);
1221 	}
1222 out:
1223 	phba->hba_flag &= ~HBA_FLOGI_OUTSTANDING;
1224 	lpfc_els_free_iocb(phba, cmdiocb);
1225 	lpfc_nlp_put(ndlp);
1226 }
1227 
1228 /**
1229  * lpfc_cmpl_els_link_down - Completion callback function for ELS command
1230  *                           aborted during a link down
1231  * @phba: pointer to lpfc hba data structure.
1232  * @cmdiocb: pointer to lpfc command iocb data structure.
1233  * @rspiocb: pointer to lpfc response iocb data structure.
1234  *
1235  */
1236 static void
1237 lpfc_cmpl_els_link_down(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1238 			struct lpfc_iocbq *rspiocb)
1239 {
1240 	IOCB_t *irsp;
1241 	uint32_t *pcmd;
1242 	uint32_t cmd;
1243 
1244 	pcmd = (uint32_t *)(((struct lpfc_dmabuf *)cmdiocb->context2)->virt);
1245 	cmd = *pcmd;
1246 	irsp = &rspiocb->iocb;
1247 
1248 	lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1249 			"6445 ELS completes after LINK_DOWN: "
1250 			" Status %x/%x cmd x%x flg x%x\n",
1251 			irsp->ulpStatus, irsp->un.ulpWord[4], cmd,
1252 			cmdiocb->iocb_flag);
1253 
1254 	if (cmdiocb->iocb_flag & LPFC_IO_FABRIC) {
1255 		cmdiocb->iocb_flag &= ~LPFC_IO_FABRIC;
1256 		atomic_dec(&phba->fabric_iocb_count);
1257 	}
1258 	lpfc_els_free_iocb(phba, cmdiocb);
1259 }
1260 
1261 /**
1262  * lpfc_issue_els_flogi - Issue an flogi iocb command for a vport
1263  * @vport: pointer to a host virtual N_Port data structure.
1264  * @ndlp: pointer to a node-list data structure.
1265  * @retry: number of retries to the command IOCB.
1266  *
1267  * This routine issues a Fabric Login (FLOGI) Request ELS command
1268  * for a @vport. The initiator service parameters are put into the payload
1269  * of the FLOGI Request IOCB and the top-level callback function pointer
1270  * to lpfc_cmpl_els_flogi() routine is put to the IOCB completion callback
1271  * function field. The lpfc_issue_fabric_iocb routine is invoked to send
1272  * out FLOGI ELS command with one outstanding fabric IOCB at a time.
1273  *
1274  * Note that the ndlp reference count will be incremented by 1 for holding the
1275  * ndlp and the reference to ndlp will be stored into the context1 field of
1276  * the IOCB for the completion callback function to the FLOGI ELS command.
1277  *
1278  * Return code
1279  *   0 - successfully issued flogi iocb for @vport
1280  *   1 - failed to issue flogi iocb for @vport
1281  **/
1282 static int
1283 lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1284 		     uint8_t retry)
1285 {
1286 	struct lpfc_hba  *phba = vport->phba;
1287 	struct serv_parm *sp;
1288 	IOCB_t *icmd;
1289 	struct lpfc_iocbq *elsiocb;
1290 	struct lpfc_iocbq defer_flogi_acc;
1291 	uint8_t *pcmd;
1292 	uint16_t cmdsize;
1293 	uint32_t tmo, did;
1294 	int rc;
1295 
1296 	cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
1297 	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
1298 				     ndlp->nlp_DID, ELS_CMD_FLOGI);
1299 
1300 	if (!elsiocb)
1301 		return 1;
1302 
1303 	icmd = &elsiocb->iocb;
1304 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1305 
1306 	/* For FLOGI request, remainder of payload is service parameters */
1307 	*((uint32_t *) (pcmd)) = ELS_CMD_FLOGI;
1308 	pcmd += sizeof(uint32_t);
1309 	memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
1310 	sp = (struct serv_parm *) pcmd;
1311 
1312 	/* Setup CSPs accordingly for Fabric */
1313 	sp->cmn.e_d_tov = 0;
1314 	sp->cmn.w2.r_a_tov = 0;
1315 	sp->cmn.virtual_fabric_support = 0;
1316 	sp->cls1.classValid = 0;
1317 	if (sp->cmn.fcphLow < FC_PH3)
1318 		sp->cmn.fcphLow = FC_PH3;
1319 	if (sp->cmn.fcphHigh < FC_PH3)
1320 		sp->cmn.fcphHigh = FC_PH3;
1321 
1322 	/* Determine if switch supports priority tagging */
1323 	if (phba->cfg_vmid_priority_tagging) {
1324 		sp->cmn.priority_tagging = 1;
1325 		/* lpfc_vmid_host_uuid is combination of wwpn and wwnn */
1326 		if (uuid_is_null((uuid_t *)vport->lpfc_vmid_host_uuid)) {
1327 			memcpy(vport->lpfc_vmid_host_uuid, phba->wwpn,
1328 			       sizeof(phba->wwpn));
1329 			memcpy(&vport->lpfc_vmid_host_uuid[8], phba->wwnn,
1330 			       sizeof(phba->wwnn));
1331 		}
1332 	}
1333 
1334 	if  (phba->sli_rev == LPFC_SLI_REV4) {
1335 		if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
1336 		    LPFC_SLI_INTF_IF_TYPE_0) {
1337 			elsiocb->iocb.ulpCt_h = ((SLI4_CT_FCFI >> 1) & 1);
1338 			elsiocb->iocb.ulpCt_l = (SLI4_CT_FCFI & 1);
1339 			/* FLOGI needs to be 3 for WQE FCFI */
1340 			/* Set the fcfi to the fcfi we registered with */
1341 			elsiocb->iocb.ulpContext = phba->fcf.fcfi;
1342 		}
1343 		/* Can't do SLI4 class2 without support sequence coalescing */
1344 		sp->cls2.classValid = 0;
1345 		sp->cls2.seqDelivery = 0;
1346 	} else {
1347 		/* Historical, setting sequential-delivery bit for SLI3 */
1348 		sp->cls2.seqDelivery = (sp->cls2.classValid) ? 1 : 0;
1349 		sp->cls3.seqDelivery = (sp->cls3.classValid) ? 1 : 0;
1350 		if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
1351 			sp->cmn.request_multiple_Nport = 1;
1352 			/* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */
1353 			icmd->ulpCt_h = 1;
1354 			icmd->ulpCt_l = 0;
1355 		} else
1356 			sp->cmn.request_multiple_Nport = 0;
1357 	}
1358 
1359 	if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) {
1360 		icmd->un.elsreq64.myID = 0;
1361 		icmd->un.elsreq64.fl = 1;
1362 	}
1363 
1364 	tmo = phba->fc_ratov;
1365 	phba->fc_ratov = LPFC_DISC_FLOGI_TMO;
1366 	lpfc_set_disctmo(vport);
1367 	phba->fc_ratov = tmo;
1368 
1369 	phba->fc_stat.elsXmitFLOGI++;
1370 	elsiocb->iocb_cmpl = lpfc_cmpl_els_flogi;
1371 
1372 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1373 		"Issue FLOGI:     opt:x%x",
1374 		phba->sli3_options, 0, 0);
1375 
1376 	elsiocb->context1 = lpfc_nlp_get(ndlp);
1377 	if (!elsiocb->context1) {
1378 		lpfc_els_free_iocb(phba, elsiocb);
1379 		return 1;
1380 	}
1381 
1382 	rc = lpfc_issue_fabric_iocb(phba, elsiocb);
1383 	if (rc == IOCB_ERROR) {
1384 		lpfc_els_free_iocb(phba, elsiocb);
1385 		lpfc_nlp_put(ndlp);
1386 		return 1;
1387 	}
1388 
1389 	phba->hba_flag |= (HBA_FLOGI_ISSUED | HBA_FLOGI_OUTSTANDING);
1390 
1391 	/* Check for a deferred FLOGI ACC condition */
1392 	if (phba->defer_flogi_acc_flag) {
1393 		did = vport->fc_myDID;
1394 		vport->fc_myDID = Fabric_DID;
1395 
1396 		memset(&defer_flogi_acc, 0, sizeof(struct lpfc_iocbq));
1397 
1398 		defer_flogi_acc.iocb.ulpContext = phba->defer_flogi_acc_rx_id;
1399 		defer_flogi_acc.iocb.unsli3.rcvsli3.ox_id =
1400 						phba->defer_flogi_acc_ox_id;
1401 
1402 		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1403 				 "3354 Xmit deferred FLOGI ACC: rx_id: x%x,"
1404 				 " ox_id: x%x, hba_flag x%x\n",
1405 				 phba->defer_flogi_acc_rx_id,
1406 				 phba->defer_flogi_acc_ox_id, phba->hba_flag);
1407 
1408 		/* Send deferred FLOGI ACC */
1409 		lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, &defer_flogi_acc,
1410 				 ndlp, NULL);
1411 
1412 		phba->defer_flogi_acc_flag = false;
1413 
1414 		vport->fc_myDID = did;
1415 	}
1416 
1417 	return 0;
1418 }
1419 
1420 /**
1421  * lpfc_els_abort_flogi - Abort all outstanding flogi iocbs
1422  * @phba: pointer to lpfc hba data structure.
1423  *
1424  * This routine aborts all the outstanding Fabric Login (FLOGI) IOCBs
1425  * with a @phba. This routine walks all the outstanding IOCBs on the txcmplq
1426  * list and issues an abort IOCB commond on each outstanding IOCB that
1427  * contains a active Fabric_DID ndlp. Note that this function is to issue
1428  * the abort IOCB command on all the outstanding IOCBs, thus when this
1429  * function returns, it does not guarantee all the IOCBs are actually aborted.
1430  *
1431  * Return code
1432  *   0 - Successfully issued abort iocb on all outstanding flogis (Always 0)
1433  **/
1434 int
1435 lpfc_els_abort_flogi(struct lpfc_hba *phba)
1436 {
1437 	struct lpfc_sli_ring *pring;
1438 	struct lpfc_iocbq *iocb, *next_iocb;
1439 	struct lpfc_nodelist *ndlp;
1440 	IOCB_t *icmd;
1441 
1442 	/* Abort outstanding I/O on NPort <nlp_DID> */
1443 	lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
1444 			"0201 Abort outstanding I/O on NPort x%x\n",
1445 			Fabric_DID);
1446 
1447 	pring = lpfc_phba_elsring(phba);
1448 	if (unlikely(!pring))
1449 		return -EIO;
1450 
1451 	/*
1452 	 * Check the txcmplq for an iocb that matches the nport the driver is
1453 	 * searching for.
1454 	 */
1455 	spin_lock_irq(&phba->hbalock);
1456 	list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
1457 		icmd = &iocb->iocb;
1458 		if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR) {
1459 			ndlp = (struct lpfc_nodelist *)(iocb->context1);
1460 			if (ndlp && ndlp->nlp_DID == Fabric_DID) {
1461 				if ((phba->pport->fc_flag & FC_PT2PT) &&
1462 				    !(phba->pport->fc_flag & FC_PT2PT_PLOGI))
1463 					iocb->fabric_iocb_cmpl =
1464 						lpfc_ignore_els_cmpl;
1465 				lpfc_sli_issue_abort_iotag(phba, pring, iocb,
1466 							   NULL);
1467 			}
1468 		}
1469 	}
1470 	/* Make sure HBA is alive */
1471 	lpfc_issue_hb_tmo(phba);
1472 
1473 	spin_unlock_irq(&phba->hbalock);
1474 
1475 	return 0;
1476 }
1477 
1478 /**
1479  * lpfc_initial_flogi - Issue an initial fabric login for a vport
1480  * @vport: pointer to a host virtual N_Port data structure.
1481  *
1482  * This routine issues an initial Fabric Login (FLOGI) for the @vport
1483  * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from
1484  * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and
1485  * put it into the @vport's ndlp list. If an inactive ndlp found on the list,
1486  * it will just be enabled and made active. The lpfc_issue_els_flogi() routine
1487  * is then invoked with the @vport and the ndlp to perform the FLOGI for the
1488  * @vport.
1489  *
1490  * Return code
1491  *   0 - failed to issue initial flogi for @vport
1492  *   1 - successfully issued initial flogi for @vport
1493  **/
1494 int
1495 lpfc_initial_flogi(struct lpfc_vport *vport)
1496 {
1497 	struct lpfc_nodelist *ndlp;
1498 
1499 	vport->port_state = LPFC_FLOGI;
1500 	lpfc_set_disctmo(vport);
1501 
1502 	/* First look for the Fabric ndlp */
1503 	ndlp = lpfc_findnode_did(vport, Fabric_DID);
1504 	if (!ndlp) {
1505 		/* Cannot find existing Fabric ndlp, so allocate a new one */
1506 		ndlp = lpfc_nlp_init(vport, Fabric_DID);
1507 		if (!ndlp)
1508 			return 0;
1509 		/* Set the node type */
1510 		ndlp->nlp_type |= NLP_FABRIC;
1511 
1512 		/* Put ndlp onto node list */
1513 		lpfc_enqueue_node(vport, ndlp);
1514 	}
1515 
1516 	if (lpfc_issue_els_flogi(vport, ndlp, 0)) {
1517 		/* This decrement of reference count to node shall kick off
1518 		 * the release of the node.
1519 		 */
1520 		lpfc_nlp_put(ndlp);
1521 		return 0;
1522 	}
1523 	return 1;
1524 }
1525 
1526 /**
1527  * lpfc_initial_fdisc - Issue an initial fabric discovery for a vport
1528  * @vport: pointer to a host virtual N_Port data structure.
1529  *
1530  * This routine issues an initial Fabric Discover (FDISC) for the @vport
1531  * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from
1532  * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and
1533  * put it into the @vport's ndlp list. If an inactive ndlp found on the list,
1534  * it will just be enabled and made active. The lpfc_issue_els_fdisc() routine
1535  * is then invoked with the @vport and the ndlp to perform the FDISC for the
1536  * @vport.
1537  *
1538  * Return code
1539  *   0 - failed to issue initial fdisc for @vport
1540  *   1 - successfully issued initial fdisc for @vport
1541  **/
1542 int
1543 lpfc_initial_fdisc(struct lpfc_vport *vport)
1544 {
1545 	struct lpfc_nodelist *ndlp;
1546 
1547 	/* First look for the Fabric ndlp */
1548 	ndlp = lpfc_findnode_did(vport, Fabric_DID);
1549 	if (!ndlp) {
1550 		/* Cannot find existing Fabric ndlp, so allocate a new one */
1551 		ndlp = lpfc_nlp_init(vport, Fabric_DID);
1552 		if (!ndlp)
1553 			return 0;
1554 
1555 		/* NPIV is only supported in Fabrics. */
1556 		ndlp->nlp_type |= NLP_FABRIC;
1557 
1558 		/* Put ndlp onto node list */
1559 		lpfc_enqueue_node(vport, ndlp);
1560 	}
1561 
1562 	if (lpfc_issue_els_fdisc(vport, ndlp, 0)) {
1563 		/* decrement node reference count to trigger the release of
1564 		 * the node.
1565 		 */
1566 		lpfc_nlp_put(ndlp);
1567 		return 0;
1568 	}
1569 	return 1;
1570 }
1571 
1572 /**
1573  * lpfc_more_plogi - Check and issue remaining plogis for a vport
1574  * @vport: pointer to a host virtual N_Port data structure.
1575  *
1576  * This routine checks whether there are more remaining Port Logins
1577  * (PLOGI) to be issued for the @vport. If so, it will invoke the routine
1578  * lpfc_els_disc_plogi() to go through the Node Port Recovery (NPR) nodes
1579  * to issue ELS PLOGIs up to the configured discover threads with the
1580  * @vport (@vport->cfg_discovery_threads). The function also decrement
1581  * the @vport's num_disc_node by 1 if it is not already 0.
1582  **/
1583 void
1584 lpfc_more_plogi(struct lpfc_vport *vport)
1585 {
1586 	if (vport->num_disc_nodes)
1587 		vport->num_disc_nodes--;
1588 
1589 	/* Continue discovery with <num_disc_nodes> PLOGIs to go */
1590 	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1591 			 "0232 Continue discovery with %d PLOGIs to go "
1592 			 "Data: x%x x%x x%x\n",
1593 			 vport->num_disc_nodes, vport->fc_plogi_cnt,
1594 			 vport->fc_flag, vport->port_state);
1595 	/* Check to see if there are more PLOGIs to be sent */
1596 	if (vport->fc_flag & FC_NLP_MORE)
1597 		/* go thru NPR nodes and issue any remaining ELS PLOGIs */
1598 		lpfc_els_disc_plogi(vport);
1599 
1600 	return;
1601 }
1602 
1603 /**
1604  * lpfc_plogi_confirm_nport - Confirm plogi wwpn matches stored ndlp
1605  * @phba: pointer to lpfc hba data structure.
1606  * @prsp: pointer to response IOCB payload.
1607  * @ndlp: pointer to a node-list data structure.
1608  *
1609  * This routine checks and indicates whether the WWPN of an N_Port, retrieved
1610  * from a PLOGI, matches the WWPN that is stored in the @ndlp for that N_POrt.
1611  * The following cases are considered N_Port confirmed:
1612  * 1) The N_Port is a Fabric ndlp; 2) The @ndlp is on vport list and matches
1613  * the WWPN of the N_Port logged into; 3) The @ndlp is not on vport list but
1614  * it does not have WWPN assigned either. If the WWPN is confirmed, the
1615  * pointer to the @ndlp will be returned. If the WWPN is not confirmed:
1616  * 1) if there is a node on vport list other than the @ndlp with the same
1617  * WWPN of the N_Port PLOGI logged into, the lpfc_unreg_rpi() will be invoked
1618  * on that node to release the RPI associated with the node; 2) if there is
1619  * no node found on vport list with the same WWPN of the N_Port PLOGI logged
1620  * into, a new node shall be allocated (or activated). In either case, the
1621  * parameters of the @ndlp shall be copied to the new_ndlp, the @ndlp shall
1622  * be released and the new_ndlp shall be put on to the vport node list and
1623  * its pointer returned as the confirmed node.
1624  *
1625  * Note that before the @ndlp got "released", the keepDID from not-matching
1626  * or inactive "new_ndlp" on the vport node list is assigned to the nlp_DID
1627  * of the @ndlp. This is because the release of @ndlp is actually to put it
1628  * into an inactive state on the vport node list and the vport node list
1629  * management algorithm does not allow two node with a same DID.
1630  *
1631  * Return code
1632  *   pointer to the PLOGI N_Port @ndlp
1633  **/
1634 static struct lpfc_nodelist *
1635 lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
1636 			 struct lpfc_nodelist *ndlp)
1637 {
1638 	struct lpfc_vport *vport = ndlp->vport;
1639 	struct lpfc_nodelist *new_ndlp;
1640 	struct serv_parm *sp;
1641 	uint8_t  name[sizeof(struct lpfc_name)];
1642 	uint32_t keepDID = 0, keep_nlp_flag = 0;
1643 	uint32_t keep_new_nlp_flag = 0;
1644 	uint16_t keep_nlp_state;
1645 	u32 keep_nlp_fc4_type = 0;
1646 	struct lpfc_nvme_rport *keep_nrport = NULL;
1647 	unsigned long *active_rrqs_xri_bitmap = NULL;
1648 
1649 	/* Fabric nodes can have the same WWPN so we don't bother searching
1650 	 * by WWPN.  Just return the ndlp that was given to us.
1651 	 */
1652 	if (ndlp->nlp_type & NLP_FABRIC)
1653 		return ndlp;
1654 
1655 	sp = (struct serv_parm *) ((uint8_t *) prsp + sizeof(uint32_t));
1656 	memset(name, 0, sizeof(struct lpfc_name));
1657 
1658 	/* Now we find out if the NPort we are logging into, matches the WWPN
1659 	 * we have for that ndlp. If not, we have some work to do.
1660 	 */
1661 	new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName);
1662 
1663 	/* return immediately if the WWPN matches ndlp */
1664 	if (!new_ndlp || (new_ndlp == ndlp))
1665 		return ndlp;
1666 
1667 	if (phba->sli_rev == LPFC_SLI_REV4) {
1668 		active_rrqs_xri_bitmap = mempool_alloc(phba->active_rrq_pool,
1669 						       GFP_KERNEL);
1670 		if (active_rrqs_xri_bitmap)
1671 			memset(active_rrqs_xri_bitmap, 0,
1672 			       phba->cfg_rrq_xri_bitmap_sz);
1673 	}
1674 
1675 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE,
1676 			 "3178 PLOGI confirm: ndlp x%x x%x x%x: "
1677 			 "new_ndlp x%x x%x x%x\n",
1678 			 ndlp->nlp_DID, ndlp->nlp_flag,  ndlp->nlp_fc4_type,
1679 			 (new_ndlp ? new_ndlp->nlp_DID : 0),
1680 			 (new_ndlp ? new_ndlp->nlp_flag : 0),
1681 			 (new_ndlp ? new_ndlp->nlp_fc4_type : 0));
1682 
1683 	keepDID = new_ndlp->nlp_DID;
1684 
1685 	if (phba->sli_rev == LPFC_SLI_REV4 && active_rrqs_xri_bitmap)
1686 		memcpy(active_rrqs_xri_bitmap, new_ndlp->active_rrqs_xri_bitmap,
1687 		       phba->cfg_rrq_xri_bitmap_sz);
1688 
1689 	/* At this point in this routine, we know new_ndlp will be
1690 	 * returned. however, any previous GID_FTs that were done
1691 	 * would have updated nlp_fc4_type in ndlp, so we must ensure
1692 	 * new_ndlp has the right value.
1693 	 */
1694 	if (vport->fc_flag & FC_FABRIC) {
1695 		keep_nlp_fc4_type = new_ndlp->nlp_fc4_type;
1696 		new_ndlp->nlp_fc4_type = ndlp->nlp_fc4_type;
1697 	}
1698 
1699 	lpfc_unreg_rpi(vport, new_ndlp);
1700 	new_ndlp->nlp_DID = ndlp->nlp_DID;
1701 	new_ndlp->nlp_prev_state = ndlp->nlp_prev_state;
1702 	if (phba->sli_rev == LPFC_SLI_REV4)
1703 		memcpy(new_ndlp->active_rrqs_xri_bitmap,
1704 		       ndlp->active_rrqs_xri_bitmap,
1705 		       phba->cfg_rrq_xri_bitmap_sz);
1706 
1707 	/* Lock both ndlps */
1708 	spin_lock_irq(&ndlp->lock);
1709 	spin_lock_irq(&new_ndlp->lock);
1710 	keep_new_nlp_flag = new_ndlp->nlp_flag;
1711 	keep_nlp_flag = ndlp->nlp_flag;
1712 	new_ndlp->nlp_flag = ndlp->nlp_flag;
1713 
1714 	/* if new_ndlp had NLP_UNREG_INP set, keep it */
1715 	if (keep_new_nlp_flag & NLP_UNREG_INP)
1716 		new_ndlp->nlp_flag |= NLP_UNREG_INP;
1717 	else
1718 		new_ndlp->nlp_flag &= ~NLP_UNREG_INP;
1719 
1720 	/* if new_ndlp had NLP_RPI_REGISTERED set, keep it */
1721 	if (keep_new_nlp_flag & NLP_RPI_REGISTERED)
1722 		new_ndlp->nlp_flag |= NLP_RPI_REGISTERED;
1723 	else
1724 		new_ndlp->nlp_flag &= ~NLP_RPI_REGISTERED;
1725 
1726 	/*
1727 	 * Retain the DROPPED flag. This will take care of the init
1728 	 * refcount when affecting the state change
1729 	 */
1730 	if (keep_new_nlp_flag & NLP_DROPPED)
1731 		new_ndlp->nlp_flag |= NLP_DROPPED;
1732 	else
1733 		new_ndlp->nlp_flag &= ~NLP_DROPPED;
1734 
1735 	ndlp->nlp_flag = keep_new_nlp_flag;
1736 
1737 	/* if ndlp had NLP_UNREG_INP set, keep it */
1738 	if (keep_nlp_flag & NLP_UNREG_INP)
1739 		ndlp->nlp_flag |= NLP_UNREG_INP;
1740 	else
1741 		ndlp->nlp_flag &= ~NLP_UNREG_INP;
1742 
1743 	/* if ndlp had NLP_RPI_REGISTERED set, keep it */
1744 	if (keep_nlp_flag & NLP_RPI_REGISTERED)
1745 		ndlp->nlp_flag |= NLP_RPI_REGISTERED;
1746 	else
1747 		ndlp->nlp_flag &= ~NLP_RPI_REGISTERED;
1748 
1749 	/*
1750 	 * Retain the DROPPED flag. This will take care of the init
1751 	 * refcount when affecting the state change
1752 	 */
1753 	if (keep_nlp_flag & NLP_DROPPED)
1754 		ndlp->nlp_flag |= NLP_DROPPED;
1755 	else
1756 		ndlp->nlp_flag &= ~NLP_DROPPED;
1757 
1758 	spin_unlock_irq(&new_ndlp->lock);
1759 	spin_unlock_irq(&ndlp->lock);
1760 
1761 	/* Set nlp_states accordingly */
1762 	keep_nlp_state = new_ndlp->nlp_state;
1763 	lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state);
1764 
1765 	/* interchange the nvme remoteport structs */
1766 	keep_nrport = new_ndlp->nrport;
1767 	new_ndlp->nrport = ndlp->nrport;
1768 
1769 	/* Move this back to NPR state */
1770 	if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) {
1771 		/* The new_ndlp is replacing ndlp totally, so we need
1772 		 * to put ndlp on UNUSED list and try to free it.
1773 		 */
1774 		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1775 			 "3179 PLOGI confirm NEW: %x %x\n",
1776 			 new_ndlp->nlp_DID, keepDID);
1777 
1778 		/* Two ndlps cannot have the same did on the nodelist.
1779 		 * Note: for this case, ndlp has a NULL WWPN so setting
1780 		 * the nlp_fc4_type isn't required.
1781 		 */
1782 		ndlp->nlp_DID = keepDID;
1783 		lpfc_nlp_set_state(vport, ndlp, keep_nlp_state);
1784 		if (phba->sli_rev == LPFC_SLI_REV4 &&
1785 		    active_rrqs_xri_bitmap)
1786 			memcpy(ndlp->active_rrqs_xri_bitmap,
1787 			       active_rrqs_xri_bitmap,
1788 			       phba->cfg_rrq_xri_bitmap_sz);
1789 
1790 	} else {
1791 		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1792 			 "3180 PLOGI confirm SWAP: %x %x\n",
1793 			 new_ndlp->nlp_DID, keepDID);
1794 
1795 		lpfc_unreg_rpi(vport, ndlp);
1796 
1797 		/* Two ndlps cannot have the same did and the fc4
1798 		 * type must be transferred because the ndlp is in
1799 		 * flight.
1800 		 */
1801 		ndlp->nlp_DID = keepDID;
1802 		ndlp->nlp_fc4_type = keep_nlp_fc4_type;
1803 
1804 		if (phba->sli_rev == LPFC_SLI_REV4 &&
1805 		    active_rrqs_xri_bitmap)
1806 			memcpy(ndlp->active_rrqs_xri_bitmap,
1807 			       active_rrqs_xri_bitmap,
1808 			       phba->cfg_rrq_xri_bitmap_sz);
1809 
1810 		/* Since we are switching over to the new_ndlp,
1811 		 * reset the old ndlp state
1812 		 */
1813 		if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) ||
1814 		    (ndlp->nlp_state == NLP_STE_MAPPED_NODE))
1815 			keep_nlp_state = NLP_STE_NPR_NODE;
1816 		lpfc_nlp_set_state(vport, ndlp, keep_nlp_state);
1817 		ndlp->nrport = keep_nrport;
1818 	}
1819 
1820 	/*
1821 	 * If ndlp is not associated with any rport we can drop it here else
1822 	 * let dev_loss_tmo_callbk trigger DEVICE_RM event
1823 	 */
1824 	if (!ndlp->rport && (ndlp->nlp_state == NLP_STE_NPR_NODE))
1825 		lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
1826 
1827 	if (phba->sli_rev == LPFC_SLI_REV4 &&
1828 	    active_rrqs_xri_bitmap)
1829 		mempool_free(active_rrqs_xri_bitmap,
1830 			     phba->active_rrq_pool);
1831 
1832 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE,
1833 			 "3173 PLOGI confirm exit: new_ndlp x%x x%x x%x\n",
1834 			 new_ndlp->nlp_DID, new_ndlp->nlp_flag,
1835 			 new_ndlp->nlp_fc4_type);
1836 
1837 	return new_ndlp;
1838 }
1839 
1840 /**
1841  * lpfc_end_rscn - Check and handle more rscn for a vport
1842  * @vport: pointer to a host virtual N_Port data structure.
1843  *
1844  * This routine checks whether more Registration State Change
1845  * Notifications (RSCNs) came in while the discovery state machine was in
1846  * the FC_RSCN_MODE. If so, the lpfc_els_handle_rscn() routine will be
1847  * invoked to handle the additional RSCNs for the @vport. Otherwise, the
1848  * FC_RSCN_MODE bit will be cleared with the @vport to mark as the end of
1849  * handling the RSCNs.
1850  **/
1851 void
1852 lpfc_end_rscn(struct lpfc_vport *vport)
1853 {
1854 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1855 
1856 	if (vport->fc_flag & FC_RSCN_MODE) {
1857 		/*
1858 		 * Check to see if more RSCNs came in while we were
1859 		 * processing this one.
1860 		 */
1861 		if (vport->fc_rscn_id_cnt ||
1862 		    (vport->fc_flag & FC_RSCN_DISCOVERY) != 0)
1863 			lpfc_els_handle_rscn(vport);
1864 		else {
1865 			spin_lock_irq(shost->host_lock);
1866 			vport->fc_flag &= ~FC_RSCN_MODE;
1867 			spin_unlock_irq(shost->host_lock);
1868 		}
1869 	}
1870 }
1871 
1872 /**
1873  * lpfc_cmpl_els_rrq - Completion handled for els RRQs.
1874  * @phba: pointer to lpfc hba data structure.
1875  * @cmdiocb: pointer to lpfc command iocb data structure.
1876  * @rspiocb: pointer to lpfc response iocb data structure.
1877  *
1878  * This routine will call the clear rrq function to free the rrq and
1879  * clear the xri's bit in the ndlp's xri_bitmap. If the ndlp does not
1880  * exist then the clear_rrq is still called because the rrq needs to
1881  * be freed.
1882  **/
1883 
1884 static void
1885 lpfc_cmpl_els_rrq(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1886 		    struct lpfc_iocbq *rspiocb)
1887 {
1888 	struct lpfc_vport *vport = cmdiocb->vport;
1889 	IOCB_t *irsp;
1890 	struct lpfc_nodelist *ndlp = cmdiocb->context1;
1891 	struct lpfc_node_rrq *rrq;
1892 
1893 	/* we pass cmdiocb to state machine which needs rspiocb as well */
1894 	rrq = cmdiocb->context_un.rrq;
1895 	cmdiocb->context_un.rsp_iocb = rspiocb;
1896 
1897 	irsp = &rspiocb->iocb;
1898 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1899 		"RRQ cmpl:      status:x%x/x%x did:x%x",
1900 		irsp->ulpStatus, irsp->un.ulpWord[4],
1901 		irsp->un.elsreq64.remoteID);
1902 
1903 	/* rrq completes to NPort <nlp_DID> */
1904 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1905 			 "2880 RRQ completes to DID x%x "
1906 			 "Data: x%x x%x x%x x%x x%x\n",
1907 			 irsp->un.elsreq64.remoteID,
1908 			 irsp->ulpStatus, irsp->un.ulpWord[4],
1909 			 irsp->ulpTimeout, rrq->xritag, rrq->rxid);
1910 
1911 	if (irsp->ulpStatus) {
1912 		/* Check for retry */
1913 		/* RRQ failed Don't print the vport to vport rjts */
1914 		if (irsp->ulpStatus != IOSTAT_LS_RJT ||
1915 			(((irsp->un.ulpWord[4]) >> 16 != LSRJT_INVALID_CMD) &&
1916 			((irsp->un.ulpWord[4]) >> 16 != LSRJT_UNABLE_TPC)) ||
1917 			(phba)->pport->cfg_log_verbose & LOG_ELS)
1918 			lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1919 					 "2881 RRQ failure DID:%06X Status:"
1920 					 "x%x/x%x\n",
1921 					 ndlp->nlp_DID, irsp->ulpStatus,
1922 					 irsp->un.ulpWord[4]);
1923 	}
1924 
1925 	lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
1926 	lpfc_els_free_iocb(phba, cmdiocb);
1927 	lpfc_nlp_put(ndlp);
1928 	return;
1929 }
1930 /**
1931  * lpfc_cmpl_els_plogi - Completion callback function for plogi
1932  * @phba: pointer to lpfc hba data structure.
1933  * @cmdiocb: pointer to lpfc command iocb data structure.
1934  * @rspiocb: pointer to lpfc response iocb data structure.
1935  *
1936  * This routine is the completion callback function for issuing the Port
1937  * Login (PLOGI) command. For PLOGI completion, there must be an active
1938  * ndlp on the vport node list that matches the remote node ID from the
1939  * PLOGI response IOCB. If such ndlp does not exist, the PLOGI is simply
1940  * ignored and command IOCB released. The PLOGI response IOCB status is
1941  * checked for error conditions. If there is error status reported, PLOGI
1942  * retry shall be attempted by invoking the lpfc_els_retry() routine.
1943  * Otherwise, the lpfc_plogi_confirm_nport() routine shall be invoked on
1944  * the ndlp and the NLP_EVT_CMPL_PLOGI state to the Discover State Machine
1945  * (DSM) is set for this PLOGI completion. Finally, it checks whether
1946  * there are additional N_Port nodes with the vport that need to perform
1947  * PLOGI. If so, the lpfc_more_plogi() routine is invoked to issue addition
1948  * PLOGIs.
1949  **/
1950 static void
1951 lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1952 		    struct lpfc_iocbq *rspiocb)
1953 {
1954 	struct lpfc_vport *vport = cmdiocb->vport;
1955 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
1956 	IOCB_t *irsp;
1957 	struct lpfc_nodelist *ndlp, *free_ndlp;
1958 	struct lpfc_dmabuf *prsp;
1959 	int disc;
1960 	struct serv_parm *sp = NULL;
1961 
1962 	/* we pass cmdiocb to state machine which needs rspiocb as well */
1963 	cmdiocb->context_un.rsp_iocb = rspiocb;
1964 
1965 	irsp = &rspiocb->iocb;
1966 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1967 		"PLOGI cmpl:      status:x%x/x%x did:x%x",
1968 		irsp->ulpStatus, irsp->un.ulpWord[4],
1969 		irsp->un.elsreq64.remoteID);
1970 
1971 	ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID);
1972 	if (!ndlp) {
1973 		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1974 				 "0136 PLOGI completes to NPort x%x "
1975 				 "with no ndlp. Data: x%x x%x x%x\n",
1976 				 irsp->un.elsreq64.remoteID,
1977 				 irsp->ulpStatus, irsp->un.ulpWord[4],
1978 				 irsp->ulpIoTag);
1979 		goto out_freeiocb;
1980 	}
1981 
1982 	/* Since ndlp can be freed in the disc state machine, note if this node
1983 	 * is being used during discovery.
1984 	 */
1985 	spin_lock_irq(&ndlp->lock);
1986 	disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
1987 	ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
1988 	spin_unlock_irq(&ndlp->lock);
1989 
1990 	/* PLOGI completes to NPort <nlp_DID> */
1991 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1992 			 "0102 PLOGI completes to NPort x%06x "
1993 			 "Data: x%x x%x x%x x%x x%x\n",
1994 			 ndlp->nlp_DID, ndlp->nlp_fc4_type,
1995 			 irsp->ulpStatus, irsp->un.ulpWord[4],
1996 			 disc, vport->num_disc_nodes);
1997 
1998 	/* Check to see if link went down during discovery */
1999 	if (lpfc_els_chk_latt(vport)) {
2000 		spin_lock_irq(&ndlp->lock);
2001 		ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2002 		spin_unlock_irq(&ndlp->lock);
2003 		goto out;
2004 	}
2005 
2006 	if (irsp->ulpStatus) {
2007 		/* Check for retry */
2008 		if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
2009 			/* ELS command is being retried */
2010 			if (disc) {
2011 				spin_lock_irq(&ndlp->lock);
2012 				ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2013 				spin_unlock_irq(&ndlp->lock);
2014 			}
2015 			goto out;
2016 		}
2017 		/* PLOGI failed Don't print the vport to vport rjts */
2018 		if (irsp->ulpStatus != IOSTAT_LS_RJT ||
2019 			(((irsp->un.ulpWord[4]) >> 16 != LSRJT_INVALID_CMD) &&
2020 			((irsp->un.ulpWord[4]) >> 16 != LSRJT_UNABLE_TPC)) ||
2021 			(phba)->pport->cfg_log_verbose & LOG_ELS)
2022 			lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2023 				 "2753 PLOGI failure DID:%06X Status:x%x/x%x\n",
2024 				 ndlp->nlp_DID, irsp->ulpStatus,
2025 				 irsp->un.ulpWord[4]);
2026 
2027 		/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
2028 		if (lpfc_error_lost_link(irsp))
2029 			goto check_plogi;
2030 		else
2031 			lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2032 						NLP_EVT_CMPL_PLOGI);
2033 
2034 		/* If a PLOGI collision occurred, the node needs to continue
2035 		 * with the reglogin process.
2036 		 */
2037 		spin_lock_irq(&ndlp->lock);
2038 		if ((ndlp->nlp_flag & (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI)) &&
2039 		    ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE) {
2040 			spin_unlock_irq(&ndlp->lock);
2041 			goto out;
2042 		}
2043 		spin_unlock_irq(&ndlp->lock);
2044 
2045 		/* No PLOGI collision and the node is not registered with the
2046 		 * scsi or nvme transport. It is no longer an active node. Just
2047 		 * start the device remove process.
2048 		 */
2049 		if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) {
2050 			spin_lock_irq(&ndlp->lock);
2051 			ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
2052 			spin_unlock_irq(&ndlp->lock);
2053 			lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2054 						NLP_EVT_DEVICE_RM);
2055 		}
2056 	} else {
2057 		/* Good status, call state machine */
2058 		prsp = list_entry(((struct lpfc_dmabuf *)
2059 				   cmdiocb->context2)->list.next,
2060 				  struct lpfc_dmabuf, list);
2061 		ndlp = lpfc_plogi_confirm_nport(phba, prsp->virt, ndlp);
2062 
2063 		sp = (struct serv_parm *)((u8 *)prsp->virt +
2064 					  sizeof(u32));
2065 
2066 		ndlp->vmid_support = 0;
2067 		if ((phba->cfg_vmid_app_header && sp->cmn.app_hdr_support) ||
2068 		    (phba->cfg_vmid_priority_tagging &&
2069 		     sp->cmn.priority_tagging)) {
2070 			lpfc_printf_log(phba, KERN_DEBUG, LOG_ELS,
2071 					"4018 app_hdr_support %d tagging %d DID x%x\n",
2072 					sp->cmn.app_hdr_support,
2073 					sp->cmn.priority_tagging,
2074 					ndlp->nlp_DID);
2075 			/* if the dest port supports VMID, mark it in ndlp */
2076 			ndlp->vmid_support = 1;
2077 		}
2078 
2079 		lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2080 					NLP_EVT_CMPL_PLOGI);
2081 	}
2082 
2083  check_plogi:
2084 	if (disc && vport->num_disc_nodes) {
2085 		/* Check to see if there are more PLOGIs to be sent */
2086 		lpfc_more_plogi(vport);
2087 
2088 		if (vport->num_disc_nodes == 0) {
2089 			spin_lock_irq(shost->host_lock);
2090 			vport->fc_flag &= ~FC_NDISC_ACTIVE;
2091 			spin_unlock_irq(shost->host_lock);
2092 
2093 			lpfc_can_disctmo(vport);
2094 			lpfc_end_rscn(vport);
2095 		}
2096 	}
2097 
2098 out:
2099 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
2100 			      "PLOGI Cmpl PUT:     did:x%x refcnt %d",
2101 			      ndlp->nlp_DID, kref_read(&ndlp->kref), 0);
2102 
2103 out_freeiocb:
2104 	/* Release the reference on the original I/O request. */
2105 	free_ndlp = (struct lpfc_nodelist *)cmdiocb->context1;
2106 
2107 	lpfc_els_free_iocb(phba, cmdiocb);
2108 	lpfc_nlp_put(free_ndlp);
2109 	return;
2110 }
2111 
2112 /**
2113  * lpfc_issue_els_plogi - Issue an plogi iocb command for a vport
2114  * @vport: pointer to a host virtual N_Port data structure.
2115  * @did: destination port identifier.
2116  * @retry: number of retries to the command IOCB.
2117  *
2118  * This routine issues a Port Login (PLOGI) command to a remote N_Port
2119  * (with the @did) for a @vport. Before issuing a PLOGI to a remote N_Port,
2120  * the ndlp with the remote N_Port DID must exist on the @vport's ndlp list.
2121  * This routine constructs the proper fields of the PLOGI IOCB and invokes
2122  * the lpfc_sli_issue_iocb() routine to send out PLOGI ELS command.
2123  *
2124  * Note that the ndlp reference count will be incremented by 1 for holding
2125  * the ndlp and the reference to ndlp will be stored into the context1 field
2126  * of the IOCB for the completion callback function to the PLOGI ELS command.
2127  *
2128  * Return code
2129  *   0 - Successfully issued a plogi for @vport
2130  *   1 - failed to issue a plogi for @vport
2131  **/
2132 int
2133 lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
2134 {
2135 	struct lpfc_hba  *phba = vport->phba;
2136 	struct serv_parm *sp;
2137 	struct lpfc_nodelist *ndlp;
2138 	struct lpfc_iocbq *elsiocb;
2139 	uint8_t *pcmd;
2140 	uint16_t cmdsize;
2141 	int ret;
2142 
2143 	ndlp = lpfc_findnode_did(vport, did);
2144 	if (!ndlp)
2145 		return 1;
2146 
2147 	/* Defer the processing of the issue PLOGI until after the
2148 	 * outstanding UNREG_RPI mbox command completes, unless we
2149 	 * are going offline. This logic does not apply for Fabric DIDs
2150 	 */
2151 	if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2152 	    ((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) &&
2153 	    !(vport->fc_flag & FC_OFFLINE_MODE)) {
2154 		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2155 				 "4110 Issue PLOGI x%x deferred "
2156 				 "on NPort x%x rpi x%x Data: x%px\n",
2157 				 ndlp->nlp_defer_did, ndlp->nlp_DID,
2158 				 ndlp->nlp_rpi, ndlp);
2159 
2160 		/* We can only defer 1st PLOGI */
2161 		if (ndlp->nlp_defer_did == NLP_EVT_NOTHING_PENDING)
2162 			ndlp->nlp_defer_did = did;
2163 		return 0;
2164 	}
2165 
2166 	cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
2167 	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did,
2168 				     ELS_CMD_PLOGI);
2169 	if (!elsiocb)
2170 		return 1;
2171 
2172 	spin_lock_irq(&ndlp->lock);
2173 	ndlp->nlp_flag &= ~NLP_FCP_PRLI_RJT;
2174 	spin_unlock_irq(&ndlp->lock);
2175 
2176 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2177 
2178 	/* For PLOGI request, remainder of payload is service parameters */
2179 	*((uint32_t *) (pcmd)) = ELS_CMD_PLOGI;
2180 	pcmd += sizeof(uint32_t);
2181 	memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
2182 	sp = (struct serv_parm *) pcmd;
2183 
2184 	/*
2185 	 * If we are a N-port connected to a Fabric, fix-up paramm's so logins
2186 	 * to device on remote loops work.
2187 	 */
2188 	if ((vport->fc_flag & FC_FABRIC) && !(vport->fc_flag & FC_PUBLIC_LOOP))
2189 		sp->cmn.altBbCredit = 1;
2190 
2191 	if (sp->cmn.fcphLow < FC_PH_4_3)
2192 		sp->cmn.fcphLow = FC_PH_4_3;
2193 
2194 	if (sp->cmn.fcphHigh < FC_PH3)
2195 		sp->cmn.fcphHigh = FC_PH3;
2196 
2197 	sp->cmn.valid_vendor_ver_level = 0;
2198 	memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion));
2199 	sp->cmn.bbRcvSizeMsb &= 0xF;
2200 
2201 	/* Check if the destination port supports VMID */
2202 	ndlp->vmid_support = 0;
2203 	if (vport->vmid_priority_tagging)
2204 		sp->cmn.priority_tagging = 1;
2205 	else if (phba->cfg_vmid_app_header &&
2206 		 bf_get(lpfc_ftr_ashdr, &phba->sli4_hba.sli4_flags))
2207 		sp->cmn.app_hdr_support = 1;
2208 
2209 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2210 		"Issue PLOGI:     did:x%x",
2211 		did, 0, 0);
2212 
2213 	/* If our firmware supports this feature, convey that
2214 	 * information to the target using the vendor specific field.
2215 	 */
2216 	if (phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) {
2217 		sp->cmn.valid_vendor_ver_level = 1;
2218 		sp->un.vv.vid = cpu_to_be32(LPFC_VV_EMLX_ID);
2219 		sp->un.vv.flags = cpu_to_be32(LPFC_VV_SUPPRESS_RSP);
2220 	}
2221 
2222 	phba->fc_stat.elsXmitPLOGI++;
2223 	elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi;
2224 
2225 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2226 			      "Issue PLOGI:     did:x%x refcnt %d",
2227 			      did, kref_read(&ndlp->kref), 0);
2228 	elsiocb->context1 = lpfc_nlp_get(ndlp);
2229 	if (!elsiocb->context1) {
2230 		lpfc_els_free_iocb(phba, elsiocb);
2231 		return 1;
2232 	}
2233 
2234 	ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
2235 	if (ret) {
2236 		lpfc_els_free_iocb(phba, elsiocb);
2237 		lpfc_nlp_put(ndlp);
2238 		return 1;
2239 	}
2240 
2241 	return 0;
2242 }
2243 
2244 /**
2245  * lpfc_cmpl_els_prli - Completion callback function for prli
2246  * @phba: pointer to lpfc hba data structure.
2247  * @cmdiocb: pointer to lpfc command iocb data structure.
2248  * @rspiocb: pointer to lpfc response iocb data structure.
2249  *
2250  * This routine is the completion callback function for a Process Login
2251  * (PRLI) ELS command. The PRLI response IOCB status is checked for error
2252  * status. If there is error status reported, PRLI retry shall be attempted
2253  * by invoking the lpfc_els_retry() routine. Otherwise, the state
2254  * NLP_EVT_CMPL_PRLI is sent to the Discover State Machine (DSM) for this
2255  * ndlp to mark the PRLI completion.
2256  **/
2257 static void
2258 lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2259 		   struct lpfc_iocbq *rspiocb)
2260 {
2261 	struct lpfc_vport *vport = cmdiocb->vport;
2262 	IOCB_t *irsp;
2263 	struct lpfc_nodelist *ndlp;
2264 	char *mode;
2265 	u32 loglevel;
2266 
2267 	/* we pass cmdiocb to state machine which needs rspiocb as well */
2268 	cmdiocb->context_un.rsp_iocb = rspiocb;
2269 
2270 	irsp = &(rspiocb->iocb);
2271 	ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
2272 	spin_lock_irq(&ndlp->lock);
2273 	ndlp->nlp_flag &= ~NLP_PRLI_SND;
2274 
2275 	/* Driver supports multiple FC4 types.  Counters matter. */
2276 	vport->fc_prli_sent--;
2277 	ndlp->fc4_prli_sent--;
2278 	spin_unlock_irq(&ndlp->lock);
2279 
2280 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2281 		"PRLI cmpl:       status:x%x/x%x did:x%x",
2282 		irsp->ulpStatus, irsp->un.ulpWord[4],
2283 		ndlp->nlp_DID);
2284 
2285 	/* PRLI completes to NPort <nlp_DID> */
2286 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2287 			 "0103 PRLI completes to NPort x%06x "
2288 			 "Data: x%x x%x x%x x%x\n",
2289 			 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
2290 			 vport->num_disc_nodes, ndlp->fc4_prli_sent);
2291 
2292 	/* Check to see if link went down during discovery */
2293 	if (lpfc_els_chk_latt(vport))
2294 		goto out;
2295 
2296 	if (irsp->ulpStatus) {
2297 		/* Check for retry */
2298 		if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
2299 			/* ELS command is being retried */
2300 			goto out;
2301 		}
2302 
2303 		/* If we don't send GFT_ID to Fabric, a PRLI error
2304 		 * could be expected.
2305 		 */
2306 		if ((vport->fc_flag & FC_FABRIC) ||
2307 		    (vport->cfg_enable_fc4_type != LPFC_ENABLE_BOTH)) {
2308 			mode = KERN_ERR;
2309 			loglevel =  LOG_TRACE_EVENT;
2310 		} else {
2311 			mode = KERN_INFO;
2312 			loglevel =  LOG_ELS;
2313 		}
2314 
2315 		/* PRLI failed */
2316 		lpfc_printf_vlog(vport, mode, loglevel,
2317 				 "2754 PRLI failure DID:%06X Status:x%x/x%x, "
2318 				 "data: x%x\n",
2319 				 ndlp->nlp_DID, irsp->ulpStatus,
2320 				 irsp->un.ulpWord[4], ndlp->fc4_prli_sent);
2321 
2322 		/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
2323 		if (!lpfc_error_lost_link(irsp))
2324 			lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2325 						NLP_EVT_CMPL_PRLI);
2326 
2327 		/* As long as this node is not registered with the SCSI
2328 		 * or NVMe transport and no other PRLIs are outstanding,
2329 		 * it is no longer an active node.  Otherwise devloss
2330 		 * handles the final cleanup.
2331 		 */
2332 		if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) &&
2333 		    !ndlp->fc4_prli_sent) {
2334 			spin_lock_irq(&ndlp->lock);
2335 			ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
2336 			spin_unlock_irq(&ndlp->lock);
2337 			lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2338 						NLP_EVT_DEVICE_RM);
2339 		}
2340 	} else {
2341 		/* Good status, call state machine.  However, if another
2342 		 * PRLI is outstanding, don't call the state machine
2343 		 * because final disposition to Mapped or Unmapped is
2344 		 * completed there.
2345 		 */
2346 		lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2347 					NLP_EVT_CMPL_PRLI);
2348 	}
2349 
2350 out:
2351 	lpfc_els_free_iocb(phba, cmdiocb);
2352 	lpfc_nlp_put(ndlp);
2353 	return;
2354 }
2355 
2356 /**
2357  * lpfc_issue_els_prli - Issue a prli iocb command for a vport
2358  * @vport: pointer to a host virtual N_Port data structure.
2359  * @ndlp: pointer to a node-list data structure.
2360  * @retry: number of retries to the command IOCB.
2361  *
2362  * This routine issues a Process Login (PRLI) ELS command for the
2363  * @vport. The PRLI service parameters are set up in the payload of the
2364  * PRLI Request command and the pointer to lpfc_cmpl_els_prli() routine
2365  * is put to the IOCB completion callback func field before invoking the
2366  * routine lpfc_sli_issue_iocb() to send out PRLI command.
2367  *
2368  * Note that the ndlp reference count will be incremented by 1 for holding the
2369  * ndlp and the reference to ndlp will be stored into the context1 field of
2370  * the IOCB for the completion callback function to the PRLI ELS command.
2371  *
2372  * Return code
2373  *   0 - successfully issued prli iocb command for @vport
2374  *   1 - failed to issue prli iocb command for @vport
2375  **/
2376 int
2377 lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2378 		    uint8_t retry)
2379 {
2380 	int rc = 0;
2381 	struct lpfc_hba *phba = vport->phba;
2382 	PRLI *npr;
2383 	struct lpfc_nvme_prli *npr_nvme;
2384 	struct lpfc_iocbq *elsiocb;
2385 	uint8_t *pcmd;
2386 	uint16_t cmdsize;
2387 	u32 local_nlp_type, elscmd;
2388 
2389 	/*
2390 	 * If we are in RSCN mode, the FC4 types supported from a
2391 	 * previous GFT_ID command may not be accurate. So, if we
2392 	 * are a NVME Initiator, always look for the possibility of
2393 	 * the remote NPort beng a NVME Target.
2394 	 */
2395 	if (phba->sli_rev == LPFC_SLI_REV4 &&
2396 	    vport->fc_flag & FC_RSCN_MODE &&
2397 	    vport->nvmei_support)
2398 		ndlp->nlp_fc4_type |= NLP_FC4_NVME;
2399 	local_nlp_type = ndlp->nlp_fc4_type;
2400 
2401 	/* This routine will issue 1 or 2 PRLIs, so zero all the ndlp
2402 	 * fields here before any of them can complete.
2403 	 */
2404 	ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
2405 	ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR);
2406 	ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
2407 	ndlp->nlp_flag &= ~(NLP_FIRSTBURST | NLP_NPR_2B_DISC);
2408 	ndlp->nvme_fb_size = 0;
2409 
2410  send_next_prli:
2411 	if (local_nlp_type & NLP_FC4_FCP) {
2412 		/* Payload is 4 + 16 = 20 x14 bytes. */
2413 		cmdsize = (sizeof(uint32_t) + sizeof(PRLI));
2414 		elscmd = ELS_CMD_PRLI;
2415 	} else if (local_nlp_type & NLP_FC4_NVME) {
2416 		/* Payload is 4 + 20 = 24 x18 bytes. */
2417 		cmdsize = (sizeof(uint32_t) + sizeof(struct lpfc_nvme_prli));
2418 		elscmd = ELS_CMD_NVMEPRLI;
2419 	} else {
2420 		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2421 				 "3083 Unknown FC_TYPE x%x ndlp x%06x\n",
2422 				 ndlp->nlp_fc4_type, ndlp->nlp_DID);
2423 		return 1;
2424 	}
2425 
2426 	/* SLI3 ports don't support NVME.  If this rport is a strict NVME
2427 	 * FC4 type, implicitly LOGO.
2428 	 */
2429 	if (phba->sli_rev == LPFC_SLI_REV3 &&
2430 	    ndlp->nlp_fc4_type == NLP_FC4_NVME) {
2431 		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2432 				 "3088 Rport fc4 type 0x%x not supported by SLI3 adapter\n",
2433 				 ndlp->nlp_type);
2434 		lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
2435 		return 1;
2436 	}
2437 
2438 	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
2439 				     ndlp->nlp_DID, elscmd);
2440 	if (!elsiocb)
2441 		return 1;
2442 
2443 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2444 
2445 	/* For PRLI request, remainder of payload is service parameters */
2446 	memset(pcmd, 0, cmdsize);
2447 
2448 	if (local_nlp_type & NLP_FC4_FCP) {
2449 		/* Remainder of payload is FCP PRLI parameter page.
2450 		 * Note: this data structure is defined as
2451 		 * BE/LE in the structure definition so no
2452 		 * byte swap call is made.
2453 		 */
2454 		*((uint32_t *)(pcmd)) = ELS_CMD_PRLI;
2455 		pcmd += sizeof(uint32_t);
2456 		npr = (PRLI *)pcmd;
2457 
2458 		/*
2459 		 * If our firmware version is 3.20 or later,
2460 		 * set the following bits for FC-TAPE support.
2461 		 */
2462 		if (phba->vpd.rev.feaLevelHigh >= 0x02) {
2463 			npr->ConfmComplAllowed = 1;
2464 			npr->Retry = 1;
2465 			npr->TaskRetryIdReq = 1;
2466 		}
2467 		npr->estabImagePair = 1;
2468 		npr->readXferRdyDis = 1;
2469 		if (vport->cfg_first_burst_size)
2470 			npr->writeXferRdyDis = 1;
2471 
2472 		/* For FCP support */
2473 		npr->prliType = PRLI_FCP_TYPE;
2474 		npr->initiatorFunc = 1;
2475 		elsiocb->iocb_flag |= LPFC_PRLI_FCP_REQ;
2476 
2477 		/* Remove FCP type - processed. */
2478 		local_nlp_type &= ~NLP_FC4_FCP;
2479 	} else if (local_nlp_type & NLP_FC4_NVME) {
2480 		/* Remainder of payload is NVME PRLI parameter page.
2481 		 * This data structure is the newer definition that
2482 		 * uses bf macros so a byte swap is required.
2483 		 */
2484 		*((uint32_t *)(pcmd)) = ELS_CMD_NVMEPRLI;
2485 		pcmd += sizeof(uint32_t);
2486 		npr_nvme = (struct lpfc_nvme_prli *)pcmd;
2487 		bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE);
2488 		bf_set(prli_estabImagePair, npr_nvme, 0);  /* Should be 0 */
2489 		if (phba->nsler) {
2490 			bf_set(prli_nsler, npr_nvme, 1);
2491 			bf_set(prli_conf, npr_nvme, 1);
2492 		}
2493 
2494 		/* Only initiators request first burst. */
2495 		if ((phba->cfg_nvme_enable_fb) &&
2496 		    !phba->nvmet_support)
2497 			bf_set(prli_fba, npr_nvme, 1);
2498 
2499 		if (phba->nvmet_support) {
2500 			bf_set(prli_tgt, npr_nvme, 1);
2501 			bf_set(prli_disc, npr_nvme, 1);
2502 		} else {
2503 			bf_set(prli_init, npr_nvme, 1);
2504 			bf_set(prli_conf, npr_nvme, 1);
2505 		}
2506 
2507 		npr_nvme->word1 = cpu_to_be32(npr_nvme->word1);
2508 		npr_nvme->word4 = cpu_to_be32(npr_nvme->word4);
2509 		elsiocb->iocb_flag |= LPFC_PRLI_NVME_REQ;
2510 
2511 		/* Remove NVME type - processed. */
2512 		local_nlp_type &= ~NLP_FC4_NVME;
2513 	}
2514 
2515 	phba->fc_stat.elsXmitPRLI++;
2516 	elsiocb->iocb_cmpl = lpfc_cmpl_els_prli;
2517 	spin_lock_irq(&ndlp->lock);
2518 	ndlp->nlp_flag |= NLP_PRLI_SND;
2519 
2520 	/* The vport counters are used for lpfc_scan_finished, but
2521 	 * the ndlp is used to track outstanding PRLIs for different
2522 	 * FC4 types.
2523 	 */
2524 	vport->fc_prli_sent++;
2525 	ndlp->fc4_prli_sent++;
2526 	spin_unlock_irq(&ndlp->lock);
2527 
2528 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2529 			      "Issue PRLI:  did:x%x refcnt %d",
2530 			      ndlp->nlp_DID, kref_read(&ndlp->kref), 0);
2531 	elsiocb->context1 = lpfc_nlp_get(ndlp);
2532 	if (!elsiocb->context1) {
2533 		lpfc_els_free_iocb(phba, elsiocb);
2534 		goto err;
2535 	}
2536 
2537 	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
2538 	if (rc == IOCB_ERROR) {
2539 		lpfc_els_free_iocb(phba, elsiocb);
2540 		lpfc_nlp_put(ndlp);
2541 		goto err;
2542 	}
2543 
2544 
2545 	/* The driver supports 2 FC4 types.  Make sure
2546 	 * a PRLI is issued for all types before exiting.
2547 	 */
2548 	if (phba->sli_rev == LPFC_SLI_REV4 &&
2549 	    local_nlp_type & (NLP_FC4_FCP | NLP_FC4_NVME))
2550 		goto send_next_prli;
2551 	else
2552 		return 0;
2553 
2554 err:
2555 	spin_lock_irq(&ndlp->lock);
2556 	ndlp->nlp_flag &= ~NLP_PRLI_SND;
2557 	spin_unlock_irq(&ndlp->lock);
2558 	return 1;
2559 }
2560 
2561 /**
2562  * lpfc_rscn_disc - Perform rscn discovery for a vport
2563  * @vport: pointer to a host virtual N_Port data structure.
2564  *
2565  * This routine performs Registration State Change Notification (RSCN)
2566  * discovery for a @vport. If the @vport's node port recovery count is not
2567  * zero, it will invoke the lpfc_els_disc_plogi() to perform PLOGI for all
2568  * the nodes that need recovery. If none of the PLOGI were needed through
2569  * the lpfc_els_disc_plogi() routine, the lpfc_end_rscn() routine shall be
2570  * invoked to check and handle possible more RSCN came in during the period
2571  * of processing the current ones.
2572  **/
2573 static void
2574 lpfc_rscn_disc(struct lpfc_vport *vport)
2575 {
2576 	lpfc_can_disctmo(vport);
2577 
2578 	/* RSCN discovery */
2579 	/* go thru NPR nodes and issue ELS PLOGIs */
2580 	if (vport->fc_npr_cnt)
2581 		if (lpfc_els_disc_plogi(vport))
2582 			return;
2583 
2584 	lpfc_end_rscn(vport);
2585 }
2586 
2587 /**
2588  * lpfc_adisc_done - Complete the adisc phase of discovery
2589  * @vport: pointer to lpfc_vport hba data structure that finished all ADISCs.
2590  *
2591  * This function is called when the final ADISC is completed during discovery.
2592  * This function handles clearing link attention or issuing reg_vpi depending
2593  * on whether npiv is enabled. This function also kicks off the PLOGI phase of
2594  * discovery.
2595  * This function is called with no locks held.
2596  **/
2597 static void
2598 lpfc_adisc_done(struct lpfc_vport *vport)
2599 {
2600 	struct Scsi_Host   *shost = lpfc_shost_from_vport(vport);
2601 	struct lpfc_hba   *phba = vport->phba;
2602 
2603 	/*
2604 	 * For NPIV, cmpl_reg_vpi will set port_state to READY,
2605 	 * and continue discovery.
2606 	 */
2607 	if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
2608 	    !(vport->fc_flag & FC_RSCN_MODE) &&
2609 	    (phba->sli_rev < LPFC_SLI_REV4)) {
2610 		/* The ADISCs are complete.  Doesn't matter if they
2611 		 * succeeded or failed because the ADISC completion
2612 		 * routine guarantees to call the state machine and
2613 		 * the RPI is either unregistered (failed ADISC response)
2614 		 * or the RPI is still valid and the node is marked
2615 		 * mapped for a target.  The exchanges should be in the
2616 		 * correct state. This code is specific to SLI3.
2617 		 */
2618 		lpfc_issue_clear_la(phba, vport);
2619 		lpfc_issue_reg_vpi(phba, vport);
2620 		return;
2621 	}
2622 	/*
2623 	* For SLI2, we need to set port_state to READY
2624 	* and continue discovery.
2625 	*/
2626 	if (vport->port_state < LPFC_VPORT_READY) {
2627 		/* If we get here, there is nothing to ADISC */
2628 		lpfc_issue_clear_la(phba, vport);
2629 		if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
2630 			vport->num_disc_nodes = 0;
2631 			/* go thru NPR list, issue ELS PLOGIs */
2632 			if (vport->fc_npr_cnt)
2633 				lpfc_els_disc_plogi(vport);
2634 			if (!vport->num_disc_nodes) {
2635 				spin_lock_irq(shost->host_lock);
2636 				vport->fc_flag &= ~FC_NDISC_ACTIVE;
2637 				spin_unlock_irq(shost->host_lock);
2638 				lpfc_can_disctmo(vport);
2639 				lpfc_end_rscn(vport);
2640 			}
2641 		}
2642 		vport->port_state = LPFC_VPORT_READY;
2643 	} else
2644 		lpfc_rscn_disc(vport);
2645 }
2646 
2647 /**
2648  * lpfc_more_adisc - Issue more adisc as needed
2649  * @vport: pointer to a host virtual N_Port data structure.
2650  *
2651  * This routine determines whether there are more ndlps on a @vport
2652  * node list need to have Address Discover (ADISC) issued. If so, it will
2653  * invoke the lpfc_els_disc_adisc() routine to issue ADISC on the @vport's
2654  * remaining nodes which need to have ADISC sent.
2655  **/
2656 void
2657 lpfc_more_adisc(struct lpfc_vport *vport)
2658 {
2659 	if (vport->num_disc_nodes)
2660 		vport->num_disc_nodes--;
2661 	/* Continue discovery with <num_disc_nodes> ADISCs to go */
2662 	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2663 			 "0210 Continue discovery with %d ADISCs to go "
2664 			 "Data: x%x x%x x%x\n",
2665 			 vport->num_disc_nodes, vport->fc_adisc_cnt,
2666 			 vport->fc_flag, vport->port_state);
2667 	/* Check to see if there are more ADISCs to be sent */
2668 	if (vport->fc_flag & FC_NLP_MORE) {
2669 		lpfc_set_disctmo(vport);
2670 		/* go thru NPR nodes and issue any remaining ELS ADISCs */
2671 		lpfc_els_disc_adisc(vport);
2672 	}
2673 	if (!vport->num_disc_nodes)
2674 		lpfc_adisc_done(vport);
2675 	return;
2676 }
2677 
2678 /**
2679  * lpfc_cmpl_els_adisc - Completion callback function for adisc
2680  * @phba: pointer to lpfc hba data structure.
2681  * @cmdiocb: pointer to lpfc command iocb data structure.
2682  * @rspiocb: pointer to lpfc response iocb data structure.
2683  *
2684  * This routine is the completion function for issuing the Address Discover
2685  * (ADISC) command. It first checks to see whether link went down during
2686  * the discovery process. If so, the node will be marked as node port
2687  * recovery for issuing discover IOCB by the link attention handler and
2688  * exit. Otherwise, the response status is checked. If error was reported
2689  * in the response status, the ADISC command shall be retried by invoking
2690  * the lpfc_els_retry() routine. Otherwise, if no error was reported in
2691  * the response status, the state machine is invoked to set transition
2692  * with respect to NLP_EVT_CMPL_ADISC event.
2693  **/
2694 static void
2695 lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2696 		    struct lpfc_iocbq *rspiocb)
2697 {
2698 	struct lpfc_vport *vport = cmdiocb->vport;
2699 	IOCB_t *irsp;
2700 	struct lpfc_nodelist *ndlp;
2701 	int  disc;
2702 
2703 	/* we pass cmdiocb to state machine which needs rspiocb as well */
2704 	cmdiocb->context_un.rsp_iocb = rspiocb;
2705 
2706 	irsp = &(rspiocb->iocb);
2707 	ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
2708 
2709 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2710 		"ADISC cmpl:      status:x%x/x%x did:x%x",
2711 		irsp->ulpStatus, irsp->un.ulpWord[4],
2712 		ndlp->nlp_DID);
2713 
2714 	/* Since ndlp can be freed in the disc state machine, note if this node
2715 	 * is being used during discovery.
2716 	 */
2717 	spin_lock_irq(&ndlp->lock);
2718 	disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
2719 	ndlp->nlp_flag &= ~(NLP_ADISC_SND | NLP_NPR_2B_DISC);
2720 	spin_unlock_irq(&ndlp->lock);
2721 	/* ADISC completes to NPort <nlp_DID> */
2722 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2723 			 "0104 ADISC completes to NPort x%x "
2724 			 "Data: x%x x%x x%x x%x x%x\n",
2725 			 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
2726 			 irsp->ulpTimeout, disc, vport->num_disc_nodes);
2727 	/* Check to see if link went down during discovery */
2728 	if (lpfc_els_chk_latt(vport)) {
2729 		spin_lock_irq(&ndlp->lock);
2730 		ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2731 		spin_unlock_irq(&ndlp->lock);
2732 		goto out;
2733 	}
2734 
2735 	if (irsp->ulpStatus) {
2736 		/* Check for retry */
2737 		if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
2738 			/* ELS command is being retried */
2739 			if (disc) {
2740 				spin_lock_irq(&ndlp->lock);
2741 				ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2742 				spin_unlock_irq(&ndlp->lock);
2743 				lpfc_set_disctmo(vport);
2744 			}
2745 			goto out;
2746 		}
2747 		/* ADISC failed */
2748 		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2749 				 "2755 ADISC failure DID:%06X Status:x%x/x%x\n",
2750 				 ndlp->nlp_DID, irsp->ulpStatus,
2751 				 irsp->un.ulpWord[4]);
2752 		/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
2753 		if (lpfc_error_lost_link(irsp))
2754 			goto check_adisc;
2755 		else
2756 			lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2757 						NLP_EVT_CMPL_ADISC);
2758 
2759 		/* As long as this node is not registered with the SCSI or NVMe
2760 		 * transport, it is no longer an active node. Otherwise
2761 		 * devloss handles the final cleanup.
2762 		 */
2763 		if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) {
2764 			spin_lock_irq(&ndlp->lock);
2765 			ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
2766 			spin_unlock_irq(&ndlp->lock);
2767 			lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2768 						NLP_EVT_DEVICE_RM);
2769 		}
2770 	} else
2771 		/* Good status, call state machine */
2772 		lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2773 					NLP_EVT_CMPL_ADISC);
2774 
2775  check_adisc:
2776 	/* Check to see if there are more ADISCs to be sent */
2777 	if (disc && vport->num_disc_nodes)
2778 		lpfc_more_adisc(vport);
2779 out:
2780 	lpfc_els_free_iocb(phba, cmdiocb);
2781 	lpfc_nlp_put(ndlp);
2782 	return;
2783 }
2784 
2785 /**
2786  * lpfc_issue_els_adisc - Issue an address discover iocb to an node on a vport
2787  * @vport: pointer to a virtual N_Port data structure.
2788  * @ndlp: pointer to a node-list data structure.
2789  * @retry: number of retries to the command IOCB.
2790  *
2791  * This routine issues an Address Discover (ADISC) for an @ndlp on a
2792  * @vport. It prepares the payload of the ADISC ELS command, updates the
2793  * and states of the ndlp, and invokes the lpfc_sli_issue_iocb() routine
2794  * to issue the ADISC ELS command.
2795  *
2796  * Note that the ndlp reference count will be incremented by 1 for holding the
2797  * ndlp and the reference to ndlp will be stored into the context1 field of
2798  * the IOCB for the completion callback function to the ADISC ELS command.
2799  *
2800  * Return code
2801  *   0 - successfully issued adisc
2802  *   1 - failed to issue adisc
2803  **/
2804 int
2805 lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2806 		     uint8_t retry)
2807 {
2808 	int rc = 0;
2809 	struct lpfc_hba  *phba = vport->phba;
2810 	ADISC *ap;
2811 	struct lpfc_iocbq *elsiocb;
2812 	uint8_t *pcmd;
2813 	uint16_t cmdsize;
2814 
2815 	cmdsize = (sizeof(uint32_t) + sizeof(ADISC));
2816 	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
2817 				     ndlp->nlp_DID, ELS_CMD_ADISC);
2818 	if (!elsiocb)
2819 		return 1;
2820 
2821 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2822 
2823 	/* For ADISC request, remainder of payload is service parameters */
2824 	*((uint32_t *) (pcmd)) = ELS_CMD_ADISC;
2825 	pcmd += sizeof(uint32_t);
2826 
2827 	/* Fill in ADISC payload */
2828 	ap = (ADISC *) pcmd;
2829 	ap->hardAL_PA = phba->fc_pref_ALPA;
2830 	memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name));
2831 	memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
2832 	ap->DID = be32_to_cpu(vport->fc_myDID);
2833 
2834 	phba->fc_stat.elsXmitADISC++;
2835 	elsiocb->iocb_cmpl = lpfc_cmpl_els_adisc;
2836 	spin_lock_irq(&ndlp->lock);
2837 	ndlp->nlp_flag |= NLP_ADISC_SND;
2838 	spin_unlock_irq(&ndlp->lock);
2839 	elsiocb->context1 = lpfc_nlp_get(ndlp);
2840 	if (!elsiocb->context1) {
2841 		lpfc_els_free_iocb(phba, elsiocb);
2842 		goto err;
2843 	}
2844 
2845 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2846 			      "Issue ADISC:   did:x%x refcnt %d",
2847 			      ndlp->nlp_DID, kref_read(&ndlp->kref), 0);
2848 	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
2849 	if (rc == IOCB_ERROR) {
2850 		lpfc_els_free_iocb(phba, elsiocb);
2851 		lpfc_nlp_put(ndlp);
2852 		goto err;
2853 	}
2854 
2855 	return 0;
2856 
2857 err:
2858 	spin_lock_irq(&ndlp->lock);
2859 	ndlp->nlp_flag &= ~NLP_ADISC_SND;
2860 	spin_unlock_irq(&ndlp->lock);
2861 	return 1;
2862 }
2863 
2864 /**
2865  * lpfc_cmpl_els_logo - Completion callback function for logo
2866  * @phba: pointer to lpfc hba data structure.
2867  * @cmdiocb: pointer to lpfc command iocb data structure.
2868  * @rspiocb: pointer to lpfc response iocb data structure.
2869  *
2870  * This routine is the completion function for issuing the ELS Logout (LOGO)
2871  * command. If no error status was reported from the LOGO response, the
2872  * state machine of the associated ndlp shall be invoked for transition with
2873  * respect to NLP_EVT_CMPL_LOGO event.
2874  **/
2875 static void
2876 lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2877 		   struct lpfc_iocbq *rspiocb)
2878 {
2879 	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
2880 	struct lpfc_vport *vport = ndlp->vport;
2881 	IOCB_t *irsp;
2882 	unsigned long flags;
2883 	uint32_t skip_recovery = 0;
2884 	int wake_up_waiter = 0;
2885 
2886 	/* we pass cmdiocb to state machine which needs rspiocb as well */
2887 	cmdiocb->context_un.rsp_iocb = rspiocb;
2888 
2889 	irsp = &(rspiocb->iocb);
2890 	spin_lock_irq(&ndlp->lock);
2891 	ndlp->nlp_flag &= ~NLP_LOGO_SND;
2892 	if (ndlp->upcall_flags & NLP_WAIT_FOR_LOGO) {
2893 		wake_up_waiter = 1;
2894 		ndlp->upcall_flags &= ~NLP_WAIT_FOR_LOGO;
2895 	}
2896 	spin_unlock_irq(&ndlp->lock);
2897 
2898 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2899 		"LOGO cmpl:       status:x%x/x%x did:x%x",
2900 		irsp->ulpStatus, irsp->un.ulpWord[4],
2901 		ndlp->nlp_DID);
2902 
2903 	/* LOGO completes to NPort <nlp_DID> */
2904 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2905 			 "0105 LOGO completes to NPort x%x "
2906 			 "refcnt %d nflags x%x Data: x%x x%x x%x x%x\n",
2907 			 ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp->nlp_flag,
2908 			 irsp->ulpStatus, irsp->un.ulpWord[4],
2909 			 irsp->ulpTimeout, vport->num_disc_nodes);
2910 
2911 	if (lpfc_els_chk_latt(vport)) {
2912 		skip_recovery = 1;
2913 		goto out;
2914 	}
2915 
2916 	/* The LOGO will not be retried on failure.  A LOGO was
2917 	 * issued to the remote rport and a ACC or RJT or no Answer are
2918 	 * all acceptable.  Note the failure and move forward with
2919 	 * discovery.  The PLOGI will retry.
2920 	 */
2921 	if (irsp->ulpStatus) {
2922 		/* LOGO failed */
2923 		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2924 				 "2756 LOGO failure, No Retry DID:%06X Status:x%x/x%x\n",
2925 				 ndlp->nlp_DID, irsp->ulpStatus,
2926 				 irsp->un.ulpWord[4]);
2927 		/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
2928 		if (lpfc_error_lost_link(irsp)) {
2929 			skip_recovery = 1;
2930 			goto out;
2931 		}
2932 	}
2933 
2934 	/* Call state machine. This will unregister the rpi if needed. */
2935 	lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_CMPL_LOGO);
2936 
2937 	/* The driver sets this flag for an NPIV instance that doesn't want to
2938 	 * log into the remote port.
2939 	 */
2940 	if (ndlp->nlp_flag & NLP_TARGET_REMOVE) {
2941 		spin_lock_irq(&ndlp->lock);
2942 		if (phba->sli_rev == LPFC_SLI_REV4)
2943 			ndlp->nlp_flag |= NLP_RELEASE_RPI;
2944 		ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
2945 		spin_unlock_irq(&ndlp->lock);
2946 		lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2947 					NLP_EVT_DEVICE_RM);
2948 		lpfc_els_free_iocb(phba, cmdiocb);
2949 		lpfc_nlp_put(ndlp);
2950 
2951 		/* Presume the node was released. */
2952 		return;
2953 	}
2954 
2955 out:
2956 	/* Driver is done with the IO.  */
2957 	lpfc_els_free_iocb(phba, cmdiocb);
2958 	lpfc_nlp_put(ndlp);
2959 
2960 	/* At this point, the LOGO processing is complete. NOTE: For a
2961 	 * pt2pt topology, we are assuming the NPortID will only change
2962 	 * on link up processing. For a LOGO / PLOGI initiated by the
2963 	 * Initiator, we are assuming the NPortID is not going to change.
2964 	 */
2965 
2966 	if (wake_up_waiter && ndlp->logo_waitq)
2967 		wake_up(ndlp->logo_waitq);
2968 	/*
2969 	 * If the node is a target, the handling attempts to recover the port.
2970 	 * For any other port type, the rpi is unregistered as an implicit
2971 	 * LOGO.
2972 	 */
2973 	if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET) &&
2974 	    skip_recovery == 0) {
2975 		lpfc_cancel_retry_delay_tmo(vport, ndlp);
2976 		spin_lock_irqsave(&ndlp->lock, flags);
2977 		ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2978 		spin_unlock_irqrestore(&ndlp->lock, flags);
2979 
2980 		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2981 				 "3187 LOGO completes to NPort x%x: Start "
2982 				 "Recovery Data: x%x x%x x%x x%x\n",
2983 				 ndlp->nlp_DID, irsp->ulpStatus,
2984 				 irsp->un.ulpWord[4], irsp->ulpTimeout,
2985 				 vport->num_disc_nodes);
2986 		lpfc_disc_start(vport);
2987 		return;
2988 	}
2989 
2990 	/* Cleanup path for failed REG_RPI handling. If REG_RPI fails, the
2991 	 * driver sends a LOGO to the rport to cleanup.  For fabric and
2992 	 * initiator ports cleanup the node as long as it the node is not
2993 	 * register with the transport.
2994 	 */
2995 	if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) {
2996 		spin_lock_irq(&ndlp->lock);
2997 		ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
2998 		spin_unlock_irq(&ndlp->lock);
2999 		lpfc_disc_state_machine(vport, ndlp, cmdiocb,
3000 					NLP_EVT_DEVICE_RM);
3001 	}
3002 }
3003 
3004 /**
3005  * lpfc_issue_els_logo - Issue a logo to an node on a vport
3006  * @vport: pointer to a virtual N_Port data structure.
3007  * @ndlp: pointer to a node-list data structure.
3008  * @retry: number of retries to the command IOCB.
3009  *
3010  * This routine constructs and issues an ELS Logout (LOGO) iocb command
3011  * to a remote node, referred by an @ndlp on a @vport. It constructs the
3012  * payload of the IOCB, properly sets up the @ndlp state, and invokes the
3013  * lpfc_sli_issue_iocb() routine to send out the LOGO ELS command.
3014  *
3015  * Note that the ndlp reference count will be incremented by 1 for holding the
3016  * ndlp and the reference to ndlp will be stored into the context1 field of
3017  * the IOCB for the completion callback function to the LOGO ELS command.
3018  *
3019  * Callers of this routine are expected to unregister the RPI first
3020  *
3021  * Return code
3022  *   0 - successfully issued logo
3023  *   1 - failed to issue logo
3024  **/
3025 int
3026 lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
3027 		    uint8_t retry)
3028 {
3029 	struct lpfc_hba  *phba = vport->phba;
3030 	struct lpfc_iocbq *elsiocb;
3031 	uint8_t *pcmd;
3032 	uint16_t cmdsize;
3033 	int rc;
3034 
3035 	spin_lock_irq(&ndlp->lock);
3036 	if (ndlp->nlp_flag & NLP_LOGO_SND) {
3037 		spin_unlock_irq(&ndlp->lock);
3038 		return 0;
3039 	}
3040 	spin_unlock_irq(&ndlp->lock);
3041 
3042 	cmdsize = (2 * sizeof(uint32_t)) + sizeof(struct lpfc_name);
3043 	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
3044 				     ndlp->nlp_DID, ELS_CMD_LOGO);
3045 	if (!elsiocb)
3046 		return 1;
3047 
3048 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3049 	*((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
3050 	pcmd += sizeof(uint32_t);
3051 
3052 	/* Fill in LOGO payload */
3053 	*((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID);
3054 	pcmd += sizeof(uint32_t);
3055 	memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name));
3056 
3057 	phba->fc_stat.elsXmitLOGO++;
3058 	elsiocb->iocb_cmpl = lpfc_cmpl_els_logo;
3059 	spin_lock_irq(&ndlp->lock);
3060 	ndlp->nlp_flag |= NLP_LOGO_SND;
3061 	ndlp->nlp_flag &= ~NLP_ISSUE_LOGO;
3062 	spin_unlock_irq(&ndlp->lock);
3063 	elsiocb->context1 = lpfc_nlp_get(ndlp);
3064 	if (!elsiocb->context1) {
3065 		lpfc_els_free_iocb(phba, elsiocb);
3066 		goto err;
3067 	}
3068 
3069 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
3070 			      "Issue LOGO:      did:x%x refcnt %d",
3071 			      ndlp->nlp_DID, kref_read(&ndlp->kref), 0);
3072 	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
3073 	if (rc == IOCB_ERROR) {
3074 		lpfc_els_free_iocb(phba, elsiocb);
3075 		lpfc_nlp_put(ndlp);
3076 		goto err;
3077 	}
3078 
3079 	spin_lock_irq(&ndlp->lock);
3080 	ndlp->nlp_prev_state = ndlp->nlp_state;
3081 	spin_unlock_irq(&ndlp->lock);
3082 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE);
3083 	return 0;
3084 
3085 err:
3086 	spin_lock_irq(&ndlp->lock);
3087 	ndlp->nlp_flag &= ~NLP_LOGO_SND;
3088 	spin_unlock_irq(&ndlp->lock);
3089 	return 1;
3090 }
3091 
3092 /**
3093  * lpfc_cmpl_els_cmd - Completion callback function for generic els command
3094  * @phba: pointer to lpfc hba data structure.
3095  * @cmdiocb: pointer to lpfc command iocb data structure.
3096  * @rspiocb: pointer to lpfc response iocb data structure.
3097  *
3098  * This routine is a generic completion callback function for ELS commands.
3099  * Specifically, it is the callback function which does not need to perform
3100  * any command specific operations. It is currently used by the ELS command
3101  * issuing routines for RSCN, lpfc_issue_els_rscn, and the ELS Fibre Channel
3102  * Address Resolution Protocol Response (FARPR) routine, lpfc_issue_els_farpr().
3103  * Other than certain debug loggings, this callback function simply invokes the
3104  * lpfc_els_chk_latt() routine to check whether link went down during the
3105  * discovery process.
3106  **/
3107 static void
3108 lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3109 		  struct lpfc_iocbq *rspiocb)
3110 {
3111 	struct lpfc_vport *vport = cmdiocb->vport;
3112 	struct lpfc_nodelist *free_ndlp;
3113 	IOCB_t *irsp;
3114 
3115 	irsp = &rspiocb->iocb;
3116 
3117 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
3118 			      "ELS cmd cmpl:    status:x%x/x%x did:x%x",
3119 			      irsp->ulpStatus, irsp->un.ulpWord[4],
3120 			      irsp->un.elsreq64.remoteID);
3121 
3122 	/* ELS cmd tag <ulpIoTag> completes */
3123 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3124 			 "0106 ELS cmd tag x%x completes Data: x%x x%x x%x\n",
3125 			 irsp->ulpIoTag, irsp->ulpStatus,
3126 			 irsp->un.ulpWord[4], irsp->ulpTimeout);
3127 
3128 	/* Check to see if link went down during discovery */
3129 	lpfc_els_chk_latt(vport);
3130 
3131 	free_ndlp = (struct lpfc_nodelist *)cmdiocb->context1;
3132 
3133 	lpfc_els_free_iocb(phba, cmdiocb);
3134 	lpfc_nlp_put(free_ndlp);
3135 }
3136 
3137 /**
3138  * lpfc_reg_fab_ctrl_node - RPI register the fabric controller node.
3139  * @vport: pointer to lpfc_vport data structure.
3140  * @fc_ndlp: pointer to the fabric controller (0xfffffd) node.
3141  *
3142  * This routine registers the rpi assigned to the fabric controller
3143  * NPort_ID (0xfffffd) with the port and moves the node to UNMAPPED
3144  * state triggering a registration with the SCSI transport.
3145  *
3146  * This routine is single out because the fabric controller node
3147  * does not receive a PLOGI.  This routine is consumed by the
3148  * SCR and RDF ELS commands.  Callers are expected to qualify
3149  * with SLI4 first.
3150  **/
3151 static int
3152 lpfc_reg_fab_ctrl_node(struct lpfc_vport *vport, struct lpfc_nodelist *fc_ndlp)
3153 {
3154 	int rc = 0;
3155 	struct lpfc_hba *phba = vport->phba;
3156 	struct lpfc_nodelist *ns_ndlp;
3157 	LPFC_MBOXQ_t *mbox;
3158 	struct lpfc_dmabuf *mp;
3159 
3160 	if (fc_ndlp->nlp_flag & NLP_RPI_REGISTERED)
3161 		return rc;
3162 
3163 	ns_ndlp = lpfc_findnode_did(vport, NameServer_DID);
3164 	if (!ns_ndlp)
3165 		return -ENODEV;
3166 
3167 	lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
3168 			 "0935 %s: Reg FC RPI x%x on FC DID x%x NSSte: x%x\n",
3169 			 __func__, fc_ndlp->nlp_rpi, fc_ndlp->nlp_DID,
3170 			 ns_ndlp->nlp_state);
3171 	if (ns_ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)
3172 		return -ENODEV;
3173 
3174 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3175 	if (!mbox) {
3176 		lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
3177 				 "0936 %s: no memory for reg_login "
3178 				 "Data: x%x x%x x%x x%x\n", __func__,
3179 				 fc_ndlp->nlp_DID, fc_ndlp->nlp_state,
3180 				 fc_ndlp->nlp_flag, fc_ndlp->nlp_rpi);
3181 		return -ENOMEM;
3182 	}
3183 	rc = lpfc_reg_rpi(phba, vport->vpi, fc_ndlp->nlp_DID,
3184 			  (u8 *)&vport->fc_sparam, mbox, fc_ndlp->nlp_rpi);
3185 	if (rc) {
3186 		rc = -EACCES;
3187 		goto out;
3188 	}
3189 
3190 	fc_ndlp->nlp_flag |= NLP_REG_LOGIN_SEND;
3191 	mbox->mbox_cmpl = lpfc_mbx_cmpl_fc_reg_login;
3192 	mbox->ctx_ndlp = lpfc_nlp_get(fc_ndlp);
3193 	if (!mbox->ctx_ndlp) {
3194 		rc = -ENOMEM;
3195 		goto out_mem;
3196 	}
3197 
3198 	mbox->vport = vport;
3199 	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
3200 	if (rc == MBX_NOT_FINISHED) {
3201 		rc = -ENODEV;
3202 		lpfc_nlp_put(fc_ndlp);
3203 		goto out_mem;
3204 	}
3205 	/* Success path. Exit. */
3206 	lpfc_nlp_set_state(vport, fc_ndlp,
3207 			   NLP_STE_REG_LOGIN_ISSUE);
3208 	return 0;
3209 
3210  out_mem:
3211 	fc_ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
3212 	mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
3213 	lpfc_mbuf_free(phba, mp->virt, mp->phys);
3214 	kfree(mp);
3215 
3216  out:
3217 	mempool_free(mbox, phba->mbox_mem_pool);
3218 	lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
3219 			 "0938 %s: failed to format reg_login "
3220 			 "Data: x%x x%x x%x x%x\n", __func__,
3221 			 fc_ndlp->nlp_DID, fc_ndlp->nlp_state,
3222 			 fc_ndlp->nlp_flag, fc_ndlp->nlp_rpi);
3223 	return rc;
3224 }
3225 
3226 /**
3227  * lpfc_cmpl_els_disc_cmd - Completion callback function for Discovery ELS cmd
3228  * @phba: pointer to lpfc hba data structure.
3229  * @cmdiocb: pointer to lpfc command iocb data structure.
3230  * @rspiocb: pointer to lpfc response iocb data structure.
3231  *
3232  * This routine is a generic completion callback function for Discovery ELS cmd.
3233  * Currently used by the ELS command issuing routines for the ELS State Change
3234  * Request (SCR), lpfc_issue_els_scr() and the ELS RDF, lpfc_issue_els_rdf().
3235  * These commands will be retried once only for ELS timeout errors.
3236  **/
3237 static void
3238 lpfc_cmpl_els_disc_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3239 		       struct lpfc_iocbq *rspiocb)
3240 {
3241 	struct lpfc_vport *vport = cmdiocb->vport;
3242 	IOCB_t *irsp;
3243 	struct lpfc_els_rdf_rsp *prdf;
3244 	struct lpfc_dmabuf *pcmd, *prsp;
3245 	u32 *pdata;
3246 	u32 cmd;
3247 	struct lpfc_nodelist *ndlp = cmdiocb->context1;
3248 
3249 	irsp = &rspiocb->iocb;
3250 
3251 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
3252 		"ELS cmd cmpl:    status:x%x/x%x did:x%x",
3253 		irsp->ulpStatus, irsp->un.ulpWord[4],
3254 		irsp->un.elsreq64.remoteID);
3255 	/* ELS cmd tag <ulpIoTag> completes */
3256 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3257 			 "0217 ELS cmd tag x%x completes Data: x%x x%x x%x "
3258 			 "x%x\n",
3259 			 irsp->ulpIoTag, irsp->ulpStatus,
3260 			 irsp->un.ulpWord[4], irsp->ulpTimeout,
3261 			 cmdiocb->retry);
3262 
3263 	pcmd = (struct lpfc_dmabuf *)cmdiocb->context2;
3264 	if (!pcmd)
3265 		goto out;
3266 
3267 	pdata = (u32 *)pcmd->virt;
3268 	if (!pdata)
3269 		goto out;
3270 	cmd = *pdata;
3271 
3272 	/* Only 1 retry for ELS Timeout only */
3273 	if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
3274 	    ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3275 	    IOERR_SEQUENCE_TIMEOUT)) {
3276 		cmdiocb->retry++;
3277 		if (cmdiocb->retry <= 1) {
3278 			switch (cmd) {
3279 			case ELS_CMD_SCR:
3280 				lpfc_issue_els_scr(vport, cmdiocb->retry);
3281 				break;
3282 			case ELS_CMD_RDF:
3283 				cmdiocb->context1 = NULL; /* save ndlp refcnt */
3284 				lpfc_issue_els_rdf(vport, cmdiocb->retry);
3285 				break;
3286 			}
3287 			goto out;
3288 		}
3289 		phba->fc_stat.elsRetryExceeded++;
3290 	}
3291 	if (irsp->ulpStatus) {
3292 		/* ELS discovery cmd completes with error */
3293 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
3294 				 "4203 ELS cmd x%x error: x%x x%X\n", cmd,
3295 				 irsp->ulpStatus, irsp->un.ulpWord[4]);
3296 		goto out;
3297 	}
3298 
3299 	/* The RDF response doesn't have any impact on the running driver
3300 	 * but the notification descriptors are dumped here for support.
3301 	 */
3302 	if (cmd == ELS_CMD_RDF) {
3303 		int i;
3304 
3305 		prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
3306 		if (!prsp)
3307 			goto out;
3308 
3309 		prdf = (struct lpfc_els_rdf_rsp *)prsp->virt;
3310 		if (!prdf)
3311 			goto out;
3312 
3313 		for (i = 0; i < ELS_RDF_REG_TAG_CNT &&
3314 			    i < be32_to_cpu(prdf->reg_d1.reg_desc.count); i++)
3315 			lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3316 				 "4677 Fabric RDF Notification Grant Data: "
3317 				 "0x%08x\n",
3318 				 be32_to_cpu(
3319 					prdf->reg_d1.desc_tags[i]));
3320 	}
3321 
3322 out:
3323 	/* Check to see if link went down during discovery */
3324 	lpfc_els_chk_latt(vport);
3325 	lpfc_els_free_iocb(phba, cmdiocb);
3326 	lpfc_nlp_put(ndlp);
3327 	return;
3328 }
3329 
3330 /**
3331  * lpfc_issue_els_scr - Issue a scr to an node on a vport
3332  * @vport: pointer to a host virtual N_Port data structure.
3333  * @retry: retry counter for the command IOCB.
3334  *
3335  * This routine issues a State Change Request (SCR) to a fabric node
3336  * on a @vport. The remote node is Fabric Controller (0xfffffd). It
3337  * first search the @vport node list to find the matching ndlp. If no such
3338  * ndlp is found, a new ndlp shall be created for this (SCR) purpose. An
3339  * IOCB is allocated, payload prepared, and the lpfc_sli_issue_iocb()
3340  * routine is invoked to send the SCR IOCB.
3341  *
3342  * Note that the ndlp reference count will be incremented by 1 for holding the
3343  * ndlp and the reference to ndlp will be stored into the context1 field of
3344  * the IOCB for the completion callback function to the SCR ELS command.
3345  *
3346  * Return code
3347  *   0 - Successfully issued scr command
3348  *   1 - Failed to issue scr command
3349  **/
3350 int
3351 lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry)
3352 {
3353 	int rc = 0;
3354 	struct lpfc_hba  *phba = vport->phba;
3355 	struct lpfc_iocbq *elsiocb;
3356 	uint8_t *pcmd;
3357 	uint16_t cmdsize;
3358 	struct lpfc_nodelist *ndlp;
3359 
3360 	cmdsize = (sizeof(uint32_t) + sizeof(SCR));
3361 
3362 	ndlp = lpfc_findnode_did(vport, Fabric_Cntl_DID);
3363 	if (!ndlp) {
3364 		ndlp = lpfc_nlp_init(vport, Fabric_Cntl_DID);
3365 		if (!ndlp)
3366 			return 1;
3367 		lpfc_enqueue_node(vport, ndlp);
3368 	}
3369 
3370 	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
3371 				     ndlp->nlp_DID, ELS_CMD_SCR);
3372 	if (!elsiocb)
3373 		return 1;
3374 
3375 	if (phba->sli_rev == LPFC_SLI_REV4) {
3376 		rc = lpfc_reg_fab_ctrl_node(vport, ndlp);
3377 		if (rc) {
3378 			lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
3379 					 "0937 %s: Failed to reg fc node, rc %d\n",
3380 					 __func__, rc);
3381 			return 1;
3382 		}
3383 	}
3384 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3385 
3386 	*((uint32_t *) (pcmd)) = ELS_CMD_SCR;
3387 	pcmd += sizeof(uint32_t);
3388 
3389 	/* For SCR, remainder of payload is SCR parameter page */
3390 	memset(pcmd, 0, sizeof(SCR));
3391 	((SCR *) pcmd)->Function = SCR_FUNC_FULL;
3392 
3393 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
3394 		"Issue SCR:       did:x%x",
3395 		ndlp->nlp_DID, 0, 0);
3396 
3397 	phba->fc_stat.elsXmitSCR++;
3398 	elsiocb->iocb_cmpl = lpfc_cmpl_els_disc_cmd;
3399 	elsiocb->context1 = lpfc_nlp_get(ndlp);
3400 	if (!elsiocb->context1) {
3401 		lpfc_els_free_iocb(phba, elsiocb);
3402 		return 1;
3403 	}
3404 
3405 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
3406 			      "Issue SCR:     did:x%x refcnt %d",
3407 			      ndlp->nlp_DID, kref_read(&ndlp->kref), 0);
3408 
3409 	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
3410 	if (rc == IOCB_ERROR) {
3411 		lpfc_els_free_iocb(phba, elsiocb);
3412 		lpfc_nlp_put(ndlp);
3413 		return 1;
3414 	}
3415 
3416 	/* Keep the ndlp just in case RDF is being sent */
3417 	return 0;
3418 }
3419 
3420 /**
3421  * lpfc_issue_els_rscn - Issue an RSCN to the Fabric Controller (Fabric)
3422  *   or the other nport (pt2pt).
3423  * @vport: pointer to a host virtual N_Port data structure.
3424  * @retry: number of retries to the command IOCB.
3425  *
3426  * This routine issues a RSCN to the Fabric Controller (DID 0xFFFFFD)
3427  *  when connected to a fabric, or to the remote port when connected
3428  *  in point-to-point mode. When sent to the Fabric Controller, it will
3429  *  replay the RSCN to registered recipients.
3430  *
3431  * Note that the ndlp reference count will be incremented by 1 for holding the
3432  * ndlp and the reference to ndlp will be stored into the context1 field of
3433  * the IOCB for the completion callback function to the RSCN ELS command.
3434  *
3435  * Return code
3436  *   0 - Successfully issued RSCN command
3437  *   1 - Failed to issue RSCN command
3438  **/
3439 int
3440 lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry)
3441 {
3442 	int rc = 0;
3443 	struct lpfc_hba *phba = vport->phba;
3444 	struct lpfc_iocbq *elsiocb;
3445 	struct lpfc_nodelist *ndlp;
3446 	struct {
3447 		struct fc_els_rscn rscn;
3448 		struct fc_els_rscn_page portid;
3449 	} *event;
3450 	uint32_t nportid;
3451 	uint16_t cmdsize = sizeof(*event);
3452 
3453 	/* Not supported for private loop */
3454 	if (phba->fc_topology == LPFC_TOPOLOGY_LOOP &&
3455 	    !(vport->fc_flag & FC_PUBLIC_LOOP))
3456 		return 1;
3457 
3458 	if (vport->fc_flag & FC_PT2PT) {
3459 		/* find any mapped nport - that would be the other nport */
3460 		ndlp = lpfc_findnode_mapped(vport);
3461 		if (!ndlp)
3462 			return 1;
3463 	} else {
3464 		nportid = FC_FID_FCTRL;
3465 		/* find the fabric controller node */
3466 		ndlp = lpfc_findnode_did(vport, nportid);
3467 		if (!ndlp) {
3468 			/* if one didn't exist, make one */
3469 			ndlp = lpfc_nlp_init(vport, nportid);
3470 			if (!ndlp)
3471 				return 1;
3472 			lpfc_enqueue_node(vport, ndlp);
3473 		}
3474 	}
3475 
3476 	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
3477 				     ndlp->nlp_DID, ELS_CMD_RSCN_XMT);
3478 
3479 	if (!elsiocb)
3480 		return 1;
3481 
3482 	event = ((struct lpfc_dmabuf *)elsiocb->context2)->virt;
3483 
3484 	event->rscn.rscn_cmd = ELS_RSCN;
3485 	event->rscn.rscn_page_len = sizeof(struct fc_els_rscn_page);
3486 	event->rscn.rscn_plen = cpu_to_be16(cmdsize);
3487 
3488 	nportid = vport->fc_myDID;
3489 	/* appears that page flags must be 0 for fabric to broadcast RSCN */
3490 	event->portid.rscn_page_flags = 0;
3491 	event->portid.rscn_fid[0] = (nportid & 0x00FF0000) >> 16;
3492 	event->portid.rscn_fid[1] = (nportid & 0x0000FF00) >> 8;
3493 	event->portid.rscn_fid[2] = nportid & 0x000000FF;
3494 
3495 	phba->fc_stat.elsXmitRSCN++;
3496 	elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
3497 	elsiocb->context1 = lpfc_nlp_get(ndlp);
3498 	if (!elsiocb->context1) {
3499 		lpfc_els_free_iocb(phba, elsiocb);
3500 		return 1;
3501 	}
3502 
3503 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
3504 			      "Issue RSCN:       did:x%x",
3505 			      ndlp->nlp_DID, 0, 0);
3506 
3507 	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
3508 	if (rc == IOCB_ERROR) {
3509 		lpfc_els_free_iocb(phba, elsiocb);
3510 		lpfc_nlp_put(ndlp);
3511 		return 1;
3512 	}
3513 
3514 	/* This will cause the callback-function lpfc_cmpl_els_cmd to
3515 	 * trigger the release of node.
3516 	 */
3517 	if (!(vport->fc_flag & FC_PT2PT))
3518 		lpfc_nlp_put(ndlp);
3519 	return 0;
3520 }
3521 
3522 /**
3523  * lpfc_issue_els_farpr - Issue a farp to an node on a vport
3524  * @vport: pointer to a host virtual N_Port data structure.
3525  * @nportid: N_Port identifier to the remote node.
3526  * @retry: number of retries to the command IOCB.
3527  *
3528  * This routine issues a Fibre Channel Address Resolution Response
3529  * (FARPR) to a node on a vport. The remote node N_Port identifier (@nportid)
3530  * is passed into the function. It first search the @vport node list to find
3531  * the matching ndlp. If no such ndlp is found, a new ndlp shall be created
3532  * for this (FARPR) purpose. An IOCB is allocated, payload prepared, and the
3533  * lpfc_sli_issue_iocb() routine is invoked to send the FARPR ELS command.
3534  *
3535  * Note that the ndlp reference count will be incremented by 1 for holding the
3536  * ndlp and the reference to ndlp will be stored into the context1 field of
3537  * the IOCB for the completion callback function to the FARPR ELS command.
3538  *
3539  * Return code
3540  *   0 - Successfully issued farpr command
3541  *   1 - Failed to issue farpr command
3542  **/
3543 static int
3544 lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
3545 {
3546 	int rc = 0;
3547 	struct lpfc_hba  *phba = vport->phba;
3548 	struct lpfc_iocbq *elsiocb;
3549 	FARP *fp;
3550 	uint8_t *pcmd;
3551 	uint32_t *lp;
3552 	uint16_t cmdsize;
3553 	struct lpfc_nodelist *ondlp;
3554 	struct lpfc_nodelist *ndlp;
3555 
3556 	cmdsize = (sizeof(uint32_t) + sizeof(FARP));
3557 
3558 	ndlp = lpfc_findnode_did(vport, nportid);
3559 	if (!ndlp) {
3560 		ndlp = lpfc_nlp_init(vport, nportid);
3561 		if (!ndlp)
3562 			return 1;
3563 		lpfc_enqueue_node(vport, ndlp);
3564 	}
3565 
3566 	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
3567 				     ndlp->nlp_DID, ELS_CMD_RNID);
3568 	if (!elsiocb)
3569 		return 1;
3570 
3571 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3572 
3573 	*((uint32_t *) (pcmd)) = ELS_CMD_FARPR;
3574 	pcmd += sizeof(uint32_t);
3575 
3576 	/* Fill in FARPR payload */
3577 	fp = (FARP *) (pcmd);
3578 	memset(fp, 0, sizeof(FARP));
3579 	lp = (uint32_t *) pcmd;
3580 	*lp++ = be32_to_cpu(nportid);
3581 	*lp++ = be32_to_cpu(vport->fc_myDID);
3582 	fp->Rflags = 0;
3583 	fp->Mflags = (FARP_MATCH_PORT | FARP_MATCH_NODE);
3584 
3585 	memcpy(&fp->RportName, &vport->fc_portname, sizeof(struct lpfc_name));
3586 	memcpy(&fp->RnodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
3587 	ondlp = lpfc_findnode_did(vport, nportid);
3588 	if (ondlp) {
3589 		memcpy(&fp->OportName, &ondlp->nlp_portname,
3590 		       sizeof(struct lpfc_name));
3591 		memcpy(&fp->OnodeName, &ondlp->nlp_nodename,
3592 		       sizeof(struct lpfc_name));
3593 	}
3594 
3595 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
3596 		"Issue FARPR:     did:x%x",
3597 		ndlp->nlp_DID, 0, 0);
3598 
3599 	phba->fc_stat.elsXmitFARPR++;
3600 	elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
3601 	elsiocb->context1 = lpfc_nlp_get(ndlp);
3602 	if (!elsiocb->context1) {
3603 		lpfc_els_free_iocb(phba, elsiocb);
3604 		return 1;
3605 	}
3606 
3607 	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
3608 	if (rc == IOCB_ERROR) {
3609 		/* The additional lpfc_nlp_put will cause the following
3610 		 * lpfc_els_free_iocb routine to trigger the release of
3611 		 * the node.
3612 		 */
3613 		lpfc_els_free_iocb(phba, elsiocb);
3614 		lpfc_nlp_put(ndlp);
3615 		return 1;
3616 	}
3617 	/* This will cause the callback-function lpfc_cmpl_els_cmd to
3618 	 * trigger the release of the node.
3619 	 */
3620 	/* Don't release reference count as RDF is likely outstanding */
3621 	return 0;
3622 }
3623 
3624 /**
3625  * lpfc_issue_els_rdf - Register for diagnostic functions from the fabric.
3626  * @vport: pointer to a host virtual N_Port data structure.
3627  * @retry: retry counter for the command IOCB.
3628  *
3629  * This routine issues an ELS RDF to the Fabric Controller to register
3630  * for diagnostic functions.
3631  *
3632  * Note that the ndlp reference count will be incremented by 1 for holding the
3633  * ndlp and the reference to ndlp will be stored into the context1 field of
3634  * the IOCB for the completion callback function to the RDF ELS command.
3635  *
3636  * Return code
3637  *   0 - Successfully issued rdf command
3638  *   1 - Failed to issue rdf command
3639  **/
3640 int
3641 lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry)
3642 {
3643 	struct lpfc_hba *phba = vport->phba;
3644 	struct lpfc_iocbq *elsiocb;
3645 	struct lpfc_els_rdf_req *prdf;
3646 	struct lpfc_nodelist *ndlp;
3647 	uint16_t cmdsize;
3648 	int rc;
3649 
3650 	cmdsize = sizeof(*prdf);
3651 
3652 	ndlp = lpfc_findnode_did(vport, Fabric_Cntl_DID);
3653 	if (!ndlp) {
3654 		ndlp = lpfc_nlp_init(vport, Fabric_Cntl_DID);
3655 		if (!ndlp)
3656 			return -ENODEV;
3657 		lpfc_enqueue_node(vport, ndlp);
3658 	}
3659 
3660 	/* RDF ELS is not required on an NPIV VN_Port.  */
3661 	if (vport->port_type == LPFC_NPIV_PORT) {
3662 		lpfc_nlp_put(ndlp);
3663 		return -EACCES;
3664 	}
3665 
3666 	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
3667 				     ndlp->nlp_DID, ELS_CMD_RDF);
3668 	if (!elsiocb)
3669 		return -ENOMEM;
3670 
3671 	if (phba->sli_rev == LPFC_SLI_REV4 &&
3672 	    !(ndlp->nlp_flag & NLP_RPI_REGISTERED)) {
3673 		lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
3674 				 "0939 %s: FC_NODE x%x RPI x%x flag x%x "
3675 				 "ste x%x type x%x Not registered\n",
3676 				 __func__, ndlp->nlp_DID, ndlp->nlp_rpi,
3677 				 ndlp->nlp_flag, ndlp->nlp_state,
3678 				 ndlp->nlp_type);
3679 		return -ENODEV;
3680 	}
3681 
3682 	/* Configure the payload for the supported FPIN events. */
3683 	prdf = (struct lpfc_els_rdf_req *)
3684 		(((struct lpfc_dmabuf *)elsiocb->context2)->virt);
3685 	memset(prdf, 0, cmdsize);
3686 	prdf->rdf.fpin_cmd = ELS_RDF;
3687 	prdf->rdf.desc_len = cpu_to_be32(sizeof(struct lpfc_els_rdf_req) -
3688 					 sizeof(struct fc_els_rdf));
3689 	prdf->reg_d1.reg_desc.desc_tag = cpu_to_be32(ELS_DTAG_FPIN_REGISTER);
3690 	prdf->reg_d1.reg_desc.desc_len = cpu_to_be32(
3691 				FC_TLV_DESC_LENGTH_FROM_SZ(prdf->reg_d1));
3692 	prdf->reg_d1.reg_desc.count = cpu_to_be32(ELS_RDF_REG_TAG_CNT);
3693 	prdf->reg_d1.desc_tags[0] = cpu_to_be32(ELS_DTAG_LNK_INTEGRITY);
3694 	prdf->reg_d1.desc_tags[1] = cpu_to_be32(ELS_DTAG_DELIVERY);
3695 	prdf->reg_d1.desc_tags[2] = cpu_to_be32(ELS_DTAG_PEER_CONGEST);
3696 	prdf->reg_d1.desc_tags[3] = cpu_to_be32(ELS_DTAG_CONGESTION);
3697 
3698 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3699 			 "6444 Xmit RDF to remote NPORT x%x\n",
3700 			 ndlp->nlp_DID);
3701 
3702 	elsiocb->iocb_cmpl = lpfc_cmpl_els_disc_cmd;
3703 	elsiocb->context1 = lpfc_nlp_get(ndlp);
3704 	if (!elsiocb->context1) {
3705 		lpfc_els_free_iocb(phba, elsiocb);
3706 		return -EIO;
3707 	}
3708 
3709 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
3710 			      "Issue RDF:     did:x%x refcnt %d",
3711 			      ndlp->nlp_DID, kref_read(&ndlp->kref), 0);
3712 
3713 	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
3714 	if (rc == IOCB_ERROR) {
3715 		lpfc_els_free_iocb(phba, elsiocb);
3716 		lpfc_nlp_put(ndlp);
3717 		return -EIO;
3718 	}
3719 	return 0;
3720 }
3721 
3722  /**
3723   * lpfc_els_rcv_rdf - Receive RDF ELS request from the fabric.
3724   * @vport: pointer to a host virtual N_Port data structure.
3725   * @cmdiocb: pointer to lpfc command iocb data structure.
3726   * @ndlp: pointer to a node-list data structure.
3727   *
3728   * A received RDF implies a possible change to fabric supported diagnostic
3729   * functions.  This routine sends LS_ACC and then has the Nx_Port issue a new
3730   * RDF request to reregister for supported diagnostic functions.
3731   *
3732   * Return code
3733   *   0 - Success
3734   *   -EIO - Failed to process received RDF
3735   **/
3736 static int
3737 lpfc_els_rcv_rdf(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
3738 		 struct lpfc_nodelist *ndlp)
3739 {
3740 	/* Send LS_ACC */
3741 	if (lpfc_els_rsp_acc(vport, ELS_CMD_RDF, cmdiocb, ndlp, NULL)) {
3742 		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3743 				 "1623 Failed to RDF_ACC from x%x for x%x\n",
3744 				 ndlp->nlp_DID, vport->fc_myDID);
3745 		return -EIO;
3746 	}
3747 
3748 	/* Issue new RDF for reregistering */
3749 	if (lpfc_issue_els_rdf(vport, 0)) {
3750 		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3751 				 "2623 Failed to re register RDF for x%x\n",
3752 				 vport->fc_myDID);
3753 		return -EIO;
3754 	}
3755 
3756 	return 0;
3757 }
3758 
3759 /**
3760  * lpfc_cancel_retry_delay_tmo - Cancel the timer with delayed iocb-cmd retry
3761  * @vport: pointer to a host virtual N_Port data structure.
3762  * @nlp: pointer to a node-list data structure.
3763  *
3764  * This routine cancels the timer with a delayed IOCB-command retry for
3765  * a @vport's @ndlp. It stops the timer for the delayed function retrial and
3766  * removes the ELS retry event if it presents. In addition, if the
3767  * NLP_NPR_2B_DISC bit is set in the @nlp's nlp_flag bitmap, ADISC IOCB
3768  * commands are sent for the @vport's nodes that require issuing discovery
3769  * ADISC.
3770  **/
3771 void
3772 lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp)
3773 {
3774 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3775 	struct lpfc_work_evt *evtp;
3776 
3777 	if (!(nlp->nlp_flag & NLP_DELAY_TMO))
3778 		return;
3779 	spin_lock_irq(&nlp->lock);
3780 	nlp->nlp_flag &= ~NLP_DELAY_TMO;
3781 	spin_unlock_irq(&nlp->lock);
3782 	del_timer_sync(&nlp->nlp_delayfunc);
3783 	nlp->nlp_last_elscmd = 0;
3784 	if (!list_empty(&nlp->els_retry_evt.evt_listp)) {
3785 		list_del_init(&nlp->els_retry_evt.evt_listp);
3786 		/* Decrement nlp reference count held for the delayed retry */
3787 		evtp = &nlp->els_retry_evt;
3788 		lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1);
3789 	}
3790 	if (nlp->nlp_flag & NLP_NPR_2B_DISC) {
3791 		spin_lock_irq(&nlp->lock);
3792 		nlp->nlp_flag &= ~NLP_NPR_2B_DISC;
3793 		spin_unlock_irq(&nlp->lock);
3794 		if (vport->num_disc_nodes) {
3795 			if (vport->port_state < LPFC_VPORT_READY) {
3796 				/* Check if there are more ADISCs to be sent */
3797 				lpfc_more_adisc(vport);
3798 			} else {
3799 				/* Check if there are more PLOGIs to be sent */
3800 				lpfc_more_plogi(vport);
3801 				if (vport->num_disc_nodes == 0) {
3802 					spin_lock_irq(shost->host_lock);
3803 					vport->fc_flag &= ~FC_NDISC_ACTIVE;
3804 					spin_unlock_irq(shost->host_lock);
3805 					lpfc_can_disctmo(vport);
3806 					lpfc_end_rscn(vport);
3807 				}
3808 			}
3809 		}
3810 	}
3811 	return;
3812 }
3813 
3814 /**
3815  * lpfc_els_retry_delay - Timer function with a ndlp delayed function timer
3816  * @t: pointer to the timer function associated data (ndlp).
3817  *
3818  * This routine is invoked by the ndlp delayed-function timer to check
3819  * whether there is any pending ELS retry event(s) with the node. If not, it
3820  * simply returns. Otherwise, if there is at least one ELS delayed event, it
3821  * adds the delayed events to the HBA work list and invokes the
3822  * lpfc_worker_wake_up() routine to wake up worker thread to process the
3823  * event. Note that lpfc_nlp_get() is called before posting the event to
3824  * the work list to hold reference count of ndlp so that it guarantees the
3825  * reference to ndlp will still be available when the worker thread gets
3826  * to the event associated with the ndlp.
3827  **/
3828 void
3829 lpfc_els_retry_delay(struct timer_list *t)
3830 {
3831 	struct lpfc_nodelist *ndlp = from_timer(ndlp, t, nlp_delayfunc);
3832 	struct lpfc_vport *vport = ndlp->vport;
3833 	struct lpfc_hba   *phba = vport->phba;
3834 	unsigned long flags;
3835 	struct lpfc_work_evt  *evtp = &ndlp->els_retry_evt;
3836 
3837 	spin_lock_irqsave(&phba->hbalock, flags);
3838 	if (!list_empty(&evtp->evt_listp)) {
3839 		spin_unlock_irqrestore(&phba->hbalock, flags);
3840 		return;
3841 	}
3842 
3843 	/* We need to hold the node by incrementing the reference
3844 	 * count until the queued work is done
3845 	 */
3846 	evtp->evt_arg1  = lpfc_nlp_get(ndlp);
3847 	if (evtp->evt_arg1) {
3848 		evtp->evt = LPFC_EVT_ELS_RETRY;
3849 		list_add_tail(&evtp->evt_listp, &phba->work_list);
3850 		lpfc_worker_wake_up(phba);
3851 	}
3852 	spin_unlock_irqrestore(&phba->hbalock, flags);
3853 	return;
3854 }
3855 
3856 /**
3857  * lpfc_els_retry_delay_handler - Work thread handler for ndlp delayed function
3858  * @ndlp: pointer to a node-list data structure.
3859  *
3860  * This routine is the worker-thread handler for processing the @ndlp delayed
3861  * event(s), posted by the lpfc_els_retry_delay() routine. It simply retrieves
3862  * the last ELS command from the associated ndlp and invokes the proper ELS
3863  * function according to the delayed ELS command to retry the command.
3864  **/
3865 void
3866 lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp)
3867 {
3868 	struct lpfc_vport *vport = ndlp->vport;
3869 	uint32_t cmd, retry;
3870 
3871 	spin_lock_irq(&ndlp->lock);
3872 	cmd = ndlp->nlp_last_elscmd;
3873 	ndlp->nlp_last_elscmd = 0;
3874 
3875 	if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
3876 		spin_unlock_irq(&ndlp->lock);
3877 		return;
3878 	}
3879 
3880 	ndlp->nlp_flag &= ~NLP_DELAY_TMO;
3881 	spin_unlock_irq(&ndlp->lock);
3882 	/*
3883 	 * If a discovery event readded nlp_delayfunc after timer
3884 	 * firing and before processing the timer, cancel the
3885 	 * nlp_delayfunc.
3886 	 */
3887 	del_timer_sync(&ndlp->nlp_delayfunc);
3888 	retry = ndlp->nlp_retry;
3889 	ndlp->nlp_retry = 0;
3890 
3891 	switch (cmd) {
3892 	case ELS_CMD_FLOGI:
3893 		lpfc_issue_els_flogi(vport, ndlp, retry);
3894 		break;
3895 	case ELS_CMD_PLOGI:
3896 		if (!lpfc_issue_els_plogi(vport, ndlp->nlp_DID, retry)) {
3897 			ndlp->nlp_prev_state = ndlp->nlp_state;
3898 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
3899 		}
3900 		break;
3901 	case ELS_CMD_ADISC:
3902 		if (!lpfc_issue_els_adisc(vport, ndlp, retry)) {
3903 			ndlp->nlp_prev_state = ndlp->nlp_state;
3904 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
3905 		}
3906 		break;
3907 	case ELS_CMD_PRLI:
3908 	case ELS_CMD_NVMEPRLI:
3909 		if (!lpfc_issue_els_prli(vport, ndlp, retry)) {
3910 			ndlp->nlp_prev_state = ndlp->nlp_state;
3911 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
3912 		}
3913 		break;
3914 	case ELS_CMD_LOGO:
3915 		if (!lpfc_issue_els_logo(vport, ndlp, retry)) {
3916 			ndlp->nlp_prev_state = ndlp->nlp_state;
3917 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE);
3918 		}
3919 		break;
3920 	case ELS_CMD_FDISC:
3921 		if (!(vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI))
3922 			lpfc_issue_els_fdisc(vport, ndlp, retry);
3923 		break;
3924 	}
3925 	return;
3926 }
3927 
3928 /**
3929  * lpfc_link_reset - Issue link reset
3930  * @vport: pointer to a virtual N_Port data structure.
3931  *
3932  * This routine performs link reset by sending INIT_LINK mailbox command.
3933  * For SLI-3 adapter, link attention interrupt is enabled before issuing
3934  * INIT_LINK mailbox command.
3935  *
3936  * Return code
3937  *   0 - Link reset initiated successfully
3938  *   1 - Failed to initiate link reset
3939  **/
3940 int
3941 lpfc_link_reset(struct lpfc_vport *vport)
3942 {
3943 	struct lpfc_hba *phba = vport->phba;
3944 	LPFC_MBOXQ_t *mbox;
3945 	uint32_t control;
3946 	int rc;
3947 
3948 	lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
3949 			 "2851 Attempt link reset\n");
3950 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3951 	if (!mbox) {
3952 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3953 				"2852 Failed to allocate mbox memory");
3954 		return 1;
3955 	}
3956 
3957 	/* Enable Link attention interrupts */
3958 	if (phba->sli_rev <= LPFC_SLI_REV3) {
3959 		spin_lock_irq(&phba->hbalock);
3960 		phba->sli.sli_flag |= LPFC_PROCESS_LA;
3961 		control = readl(phba->HCregaddr);
3962 		control |= HC_LAINT_ENA;
3963 		writel(control, phba->HCregaddr);
3964 		readl(phba->HCregaddr); /* flush */
3965 		spin_unlock_irq(&phba->hbalock);
3966 	}
3967 
3968 	lpfc_init_link(phba, mbox, phba->cfg_topology,
3969 		       phba->cfg_link_speed);
3970 	mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
3971 	mbox->vport = vport;
3972 	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
3973 	if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
3974 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3975 				"2853 Failed to issue INIT_LINK "
3976 				"mbox command, rc:x%x\n", rc);
3977 		mempool_free(mbox, phba->mbox_mem_pool);
3978 		return 1;
3979 	}
3980 
3981 	return 0;
3982 }
3983 
3984 /**
3985  * lpfc_els_retry - Make retry decision on an els command iocb
3986  * @phba: pointer to lpfc hba data structure.
3987  * @cmdiocb: pointer to lpfc command iocb data structure.
3988  * @rspiocb: pointer to lpfc response iocb data structure.
3989  *
3990  * This routine makes a retry decision on an ELS command IOCB, which has
3991  * failed. The following ELS IOCBs use this function for retrying the command
3992  * when previously issued command responsed with error status: FLOGI, PLOGI,
3993  * PRLI, ADISC and FDISC. Based on the ELS command type and the
3994  * returned error status, it makes the decision whether a retry shall be
3995  * issued for the command, and whether a retry shall be made immediately or
3996  * delayed. In the former case, the corresponding ELS command issuing-function
3997  * is called to retry the command. In the later case, the ELS command shall
3998  * be posted to the ndlp delayed event and delayed function timer set to the
3999  * ndlp for the delayed command issusing.
4000  *
4001  * Return code
4002  *   0 - No retry of els command is made
4003  *   1 - Immediate or delayed retry of els command is made
4004  **/
4005 static int
4006 lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
4007 	       struct lpfc_iocbq *rspiocb)
4008 {
4009 	struct lpfc_vport *vport = cmdiocb->vport;
4010 	IOCB_t *irsp = &rspiocb->iocb;
4011 	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
4012 	struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
4013 	uint32_t *elscmd;
4014 	struct ls_rjt stat;
4015 	int retry = 0, maxretry = lpfc_max_els_tries, delay = 0;
4016 	int logerr = 0;
4017 	uint32_t cmd = 0;
4018 	uint32_t did;
4019 	int link_reset = 0, rc;
4020 
4021 
4022 	/* Note: context2 may be 0 for internal driver abort
4023 	 * of delays ELS command.
4024 	 */
4025 
4026 	if (pcmd && pcmd->virt) {
4027 		elscmd = (uint32_t *) (pcmd->virt);
4028 		cmd = *elscmd++;
4029 	}
4030 
4031 	if (ndlp)
4032 		did = ndlp->nlp_DID;
4033 	else {
4034 		/* We should only hit this case for retrying PLOGI */
4035 		did = irsp->un.elsreq64.remoteID;
4036 		ndlp = lpfc_findnode_did(vport, did);
4037 		if (!ndlp && (cmd != ELS_CMD_PLOGI))
4038 			return 0;
4039 	}
4040 
4041 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
4042 		"Retry ELS:       wd7:x%x wd4:x%x did:x%x",
4043 		*(((uint32_t *)irsp) + 7), irsp->un.ulpWord[4], did);
4044 
4045 	switch (irsp->ulpStatus) {
4046 	case IOSTAT_FCP_RSP_ERROR:
4047 		break;
4048 	case IOSTAT_REMOTE_STOP:
4049 		if (phba->sli_rev == LPFC_SLI_REV4) {
4050 			/* This IO was aborted by the target, we don't
4051 			 * know the rxid and because we did not send the
4052 			 * ABTS we cannot generate and RRQ.
4053 			 */
4054 			lpfc_set_rrq_active(phba, ndlp,
4055 					 cmdiocb->sli4_lxritag, 0, 0);
4056 		}
4057 		break;
4058 	case IOSTAT_LOCAL_REJECT:
4059 		switch ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK)) {
4060 		case IOERR_LOOP_OPEN_FAILURE:
4061 			if (cmd == ELS_CMD_FLOGI) {
4062 				if (PCI_DEVICE_ID_HORNET ==
4063 					phba->pcidev->device) {
4064 					phba->fc_topology = LPFC_TOPOLOGY_LOOP;
4065 					phba->pport->fc_myDID = 0;
4066 					phba->alpa_map[0] = 0;
4067 					phba->alpa_map[1] = 0;
4068 				}
4069 			}
4070 			if (cmd == ELS_CMD_PLOGI && cmdiocb->retry == 0)
4071 				delay = 1000;
4072 			retry = 1;
4073 			break;
4074 
4075 		case IOERR_ILLEGAL_COMMAND:
4076 			lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
4077 					 "0124 Retry illegal cmd x%x "
4078 					 "retry:x%x delay:x%x\n",
4079 					 cmd, cmdiocb->retry, delay);
4080 			retry = 1;
4081 			/* All command's retry policy */
4082 			maxretry = 8;
4083 			if (cmdiocb->retry > 2)
4084 				delay = 1000;
4085 			break;
4086 
4087 		case IOERR_NO_RESOURCES:
4088 			logerr = 1; /* HBA out of resources */
4089 			retry = 1;
4090 			if (cmdiocb->retry > 100)
4091 				delay = 100;
4092 			maxretry = 250;
4093 			break;
4094 
4095 		case IOERR_ILLEGAL_FRAME:
4096 			delay = 100;
4097 			retry = 1;
4098 			break;
4099 
4100 		case IOERR_INVALID_RPI:
4101 			if (cmd == ELS_CMD_PLOGI &&
4102 			    did == NameServer_DID) {
4103 				/* Continue forever if plogi to */
4104 				/* the nameserver fails */
4105 				maxretry = 0;
4106 				delay = 100;
4107 			}
4108 			retry = 1;
4109 			break;
4110 
4111 		case IOERR_SEQUENCE_TIMEOUT:
4112 			if (cmd == ELS_CMD_PLOGI &&
4113 			    did == NameServer_DID &&
4114 			    (cmdiocb->retry + 1) == maxretry) {
4115 				/* Reset the Link */
4116 				link_reset = 1;
4117 				break;
4118 			}
4119 			retry = 1;
4120 			delay = 100;
4121 			break;
4122 		}
4123 		break;
4124 
4125 	case IOSTAT_NPORT_RJT:
4126 	case IOSTAT_FABRIC_RJT:
4127 		if (irsp->un.ulpWord[4] & RJT_UNAVAIL_TEMP) {
4128 			retry = 1;
4129 			break;
4130 		}
4131 		break;
4132 
4133 	case IOSTAT_NPORT_BSY:
4134 	case IOSTAT_FABRIC_BSY:
4135 		logerr = 1; /* Fabric / Remote NPort out of resources */
4136 		retry = 1;
4137 		break;
4138 
4139 	case IOSTAT_LS_RJT:
4140 		stat.un.lsRjtError = be32_to_cpu(irsp->un.ulpWord[4]);
4141 		/* Added for Vendor specifc support
4142 		 * Just keep retrying for these Rsn / Exp codes
4143 		 */
4144 		switch (stat.un.b.lsRjtRsnCode) {
4145 		case LSRJT_UNABLE_TPC:
4146 			/* The driver has a VALID PLOGI but the rport has
4147 			 * rejected the PRLI - can't do it now.  Delay
4148 			 * for 1 second and try again.
4149 			 *
4150 			 * However, if explanation is REQ_UNSUPPORTED there's
4151 			 * no point to retry PRLI.
4152 			 */
4153 			if ((cmd == ELS_CMD_PRLI || cmd == ELS_CMD_NVMEPRLI) &&
4154 			    stat.un.b.lsRjtRsnCodeExp !=
4155 			    LSEXP_REQ_UNSUPPORTED) {
4156 				delay = 1000;
4157 				maxretry = lpfc_max_els_tries + 1;
4158 				retry = 1;
4159 				break;
4160 			}
4161 
4162 			/* Legacy bug fix code for targets with PLOGI delays. */
4163 			if (stat.un.b.lsRjtRsnCodeExp ==
4164 			    LSEXP_CMD_IN_PROGRESS) {
4165 				if (cmd == ELS_CMD_PLOGI) {
4166 					delay = 1000;
4167 					maxretry = 48;
4168 				}
4169 				retry = 1;
4170 				break;
4171 			}
4172 			if (stat.un.b.lsRjtRsnCodeExp ==
4173 			    LSEXP_CANT_GIVE_DATA) {
4174 				if (cmd == ELS_CMD_PLOGI) {
4175 					delay = 1000;
4176 					maxretry = 48;
4177 				}
4178 				retry = 1;
4179 				break;
4180 			}
4181 			if (cmd == ELS_CMD_PLOGI) {
4182 				delay = 1000;
4183 				maxretry = lpfc_max_els_tries + 1;
4184 				retry = 1;
4185 				break;
4186 			}
4187 			if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
4188 			  (cmd == ELS_CMD_FDISC) &&
4189 			  (stat.un.b.lsRjtRsnCodeExp == LSEXP_OUT_OF_RESOURCE)){
4190 				lpfc_printf_vlog(vport, KERN_ERR,
4191 						 LOG_TRACE_EVENT,
4192 						 "0125 FDISC Failed (x%x). "
4193 						 "Fabric out of resources\n",
4194 						 stat.un.lsRjtError);
4195 				lpfc_vport_set_state(vport,
4196 						     FC_VPORT_NO_FABRIC_RSCS);
4197 			}
4198 			break;
4199 
4200 		case LSRJT_LOGICAL_BSY:
4201 			if ((cmd == ELS_CMD_PLOGI) ||
4202 			    (cmd == ELS_CMD_PRLI) ||
4203 			    (cmd == ELS_CMD_NVMEPRLI)) {
4204 				delay = 1000;
4205 				maxretry = 48;
4206 			} else if (cmd == ELS_CMD_FDISC) {
4207 				/* FDISC retry policy */
4208 				maxretry = 48;
4209 				if (cmdiocb->retry >= 32)
4210 					delay = 1000;
4211 			}
4212 			retry = 1;
4213 			break;
4214 
4215 		case LSRJT_LOGICAL_ERR:
4216 			/* There are some cases where switches return this
4217 			 * error when they are not ready and should be returning
4218 			 * Logical Busy. We should delay every time.
4219 			 */
4220 			if (cmd == ELS_CMD_FDISC &&
4221 			    stat.un.b.lsRjtRsnCodeExp == LSEXP_PORT_LOGIN_REQ) {
4222 				maxretry = 3;
4223 				delay = 1000;
4224 				retry = 1;
4225 			} else if (cmd == ELS_CMD_FLOGI &&
4226 				   stat.un.b.lsRjtRsnCodeExp ==
4227 						LSEXP_NOTHING_MORE) {
4228 				vport->fc_sparam.cmn.bbRcvSizeMsb &= 0xf;
4229 				retry = 1;
4230 				lpfc_printf_vlog(vport, KERN_ERR,
4231 						 LOG_TRACE_EVENT,
4232 						 "0820 FLOGI Failed (x%x). "
4233 						 "BBCredit Not Supported\n",
4234 						 stat.un.lsRjtError);
4235 			}
4236 			break;
4237 
4238 		case LSRJT_PROTOCOL_ERR:
4239 			if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
4240 			  (cmd == ELS_CMD_FDISC) &&
4241 			  ((stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_PNAME) ||
4242 			  (stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_NPORT_ID))
4243 			  ) {
4244 				lpfc_printf_vlog(vport, KERN_ERR,
4245 						 LOG_TRACE_EVENT,
4246 						 "0122 FDISC Failed (x%x). "
4247 						 "Fabric Detected Bad WWN\n",
4248 						 stat.un.lsRjtError);
4249 				lpfc_vport_set_state(vport,
4250 						     FC_VPORT_FABRIC_REJ_WWN);
4251 			}
4252 			break;
4253 		case LSRJT_VENDOR_UNIQUE:
4254 			if ((stat.un.b.vendorUnique == 0x45) &&
4255 			    (cmd == ELS_CMD_FLOGI)) {
4256 				goto out_retry;
4257 			}
4258 			break;
4259 		case LSRJT_CMD_UNSUPPORTED:
4260 			/* lpfc nvmet returns this type of LS_RJT when it
4261 			 * receives an FCP PRLI because lpfc nvmet only
4262 			 * support NVME.  ELS request is terminated for FCP4
4263 			 * on this rport.
4264 			 */
4265 			if (stat.un.b.lsRjtRsnCodeExp ==
4266 			    LSEXP_REQ_UNSUPPORTED && cmd == ELS_CMD_PRLI) {
4267 				spin_lock_irq(&ndlp->lock);
4268 				ndlp->nlp_flag |= NLP_FCP_PRLI_RJT;
4269 				spin_unlock_irq(&ndlp->lock);
4270 				retry = 0;
4271 				goto out_retry;
4272 			}
4273 			break;
4274 		}
4275 		break;
4276 
4277 	case IOSTAT_INTERMED_RSP:
4278 	case IOSTAT_BA_RJT:
4279 		break;
4280 
4281 	default:
4282 		break;
4283 	}
4284 
4285 	if (link_reset) {
4286 		rc = lpfc_link_reset(vport);
4287 		if (rc) {
4288 			/* Do not give up. Retry PLOGI one more time and attempt
4289 			 * link reset if PLOGI fails again.
4290 			 */
4291 			retry = 1;
4292 			delay = 100;
4293 			goto out_retry;
4294 		}
4295 		return 1;
4296 	}
4297 
4298 	if (did == FDMI_DID)
4299 		retry = 1;
4300 
4301 	if ((cmd == ELS_CMD_FLOGI) &&
4302 	    (phba->fc_topology != LPFC_TOPOLOGY_LOOP) &&
4303 	    !lpfc_error_lost_link(irsp)) {
4304 		/* FLOGI retry policy */
4305 		retry = 1;
4306 		/* retry FLOGI forever */
4307 		if (phba->link_flag != LS_LOOPBACK_MODE)
4308 			maxretry = 0;
4309 		else
4310 			maxretry = 2;
4311 
4312 		if (cmdiocb->retry >= 100)
4313 			delay = 5000;
4314 		else if (cmdiocb->retry >= 32)
4315 			delay = 1000;
4316 	} else if ((cmd == ELS_CMD_FDISC) && !lpfc_error_lost_link(irsp)) {
4317 		/* retry FDISCs every second up to devloss */
4318 		retry = 1;
4319 		maxretry = vport->cfg_devloss_tmo;
4320 		delay = 1000;
4321 	}
4322 
4323 	cmdiocb->retry++;
4324 	if (maxretry && (cmdiocb->retry >= maxretry)) {
4325 		phba->fc_stat.elsRetryExceeded++;
4326 		retry = 0;
4327 	}
4328 
4329 	if ((vport->load_flag & FC_UNLOADING) != 0)
4330 		retry = 0;
4331 
4332 out_retry:
4333 	if (retry) {
4334 		if ((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_FDISC)) {
4335 			/* Stop retrying PLOGI and FDISC if in FCF discovery */
4336 			if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
4337 				lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4338 						 "2849 Stop retry ELS command "
4339 						 "x%x to remote NPORT x%x, "
4340 						 "Data: x%x x%x\n", cmd, did,
4341 						 cmdiocb->retry, delay);
4342 				return 0;
4343 			}
4344 		}
4345 
4346 		/* Retry ELS command <elsCmd> to remote NPORT <did> */
4347 		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4348 				 "0107 Retry ELS command x%x to remote "
4349 				 "NPORT x%x Data: x%x x%x\n",
4350 				 cmd, did, cmdiocb->retry, delay);
4351 
4352 		if (((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) &&
4353 			((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
4354 			((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
4355 			IOERR_NO_RESOURCES))) {
4356 			/* Don't reset timer for no resources */
4357 
4358 			/* If discovery / RSCN timer is running, reset it */
4359 			if (timer_pending(&vport->fc_disctmo) ||
4360 			    (vport->fc_flag & FC_RSCN_MODE))
4361 				lpfc_set_disctmo(vport);
4362 		}
4363 
4364 		phba->fc_stat.elsXmitRetry++;
4365 		if (ndlp && delay) {
4366 			phba->fc_stat.elsDelayRetry++;
4367 			ndlp->nlp_retry = cmdiocb->retry;
4368 
4369 			/* delay is specified in milliseconds */
4370 			mod_timer(&ndlp->nlp_delayfunc,
4371 				jiffies + msecs_to_jiffies(delay));
4372 			spin_lock_irq(&ndlp->lock);
4373 			ndlp->nlp_flag |= NLP_DELAY_TMO;
4374 			spin_unlock_irq(&ndlp->lock);
4375 
4376 			ndlp->nlp_prev_state = ndlp->nlp_state;
4377 			if ((cmd == ELS_CMD_PRLI) ||
4378 			    (cmd == ELS_CMD_NVMEPRLI))
4379 				lpfc_nlp_set_state(vport, ndlp,
4380 					NLP_STE_PRLI_ISSUE);
4381 			else
4382 				lpfc_nlp_set_state(vport, ndlp,
4383 					NLP_STE_NPR_NODE);
4384 			ndlp->nlp_last_elscmd = cmd;
4385 
4386 			return 1;
4387 		}
4388 		switch (cmd) {
4389 		case ELS_CMD_FLOGI:
4390 			lpfc_issue_els_flogi(vport, ndlp, cmdiocb->retry);
4391 			return 1;
4392 		case ELS_CMD_FDISC:
4393 			lpfc_issue_els_fdisc(vport, ndlp, cmdiocb->retry);
4394 			return 1;
4395 		case ELS_CMD_PLOGI:
4396 			if (ndlp) {
4397 				ndlp->nlp_prev_state = ndlp->nlp_state;
4398 				lpfc_nlp_set_state(vport, ndlp,
4399 						   NLP_STE_PLOGI_ISSUE);
4400 			}
4401 			lpfc_issue_els_plogi(vport, did, cmdiocb->retry);
4402 			return 1;
4403 		case ELS_CMD_ADISC:
4404 			ndlp->nlp_prev_state = ndlp->nlp_state;
4405 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
4406 			lpfc_issue_els_adisc(vport, ndlp, cmdiocb->retry);
4407 			return 1;
4408 		case ELS_CMD_PRLI:
4409 		case ELS_CMD_NVMEPRLI:
4410 			ndlp->nlp_prev_state = ndlp->nlp_state;
4411 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
4412 			lpfc_issue_els_prli(vport, ndlp, cmdiocb->retry);
4413 			return 1;
4414 		case ELS_CMD_LOGO:
4415 			ndlp->nlp_prev_state = ndlp->nlp_state;
4416 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE);
4417 			lpfc_issue_els_logo(vport, ndlp, cmdiocb->retry);
4418 			return 1;
4419 		}
4420 	}
4421 	/* No retry ELS command <elsCmd> to remote NPORT <did> */
4422 	if (logerr) {
4423 		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
4424 			 "0137 No retry ELS command x%x to remote "
4425 			 "NPORT x%x: Out of Resources: Error:x%x/%x\n",
4426 			 cmd, did, irsp->ulpStatus,
4427 			 irsp->un.ulpWord[4]);
4428 	}
4429 	else {
4430 		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4431 			 "0108 No retry ELS command x%x to remote "
4432 			 "NPORT x%x Retried:%d Error:x%x/%x\n",
4433 			 cmd, did, cmdiocb->retry, irsp->ulpStatus,
4434 			 irsp->un.ulpWord[4]);
4435 	}
4436 	return 0;
4437 }
4438 
4439 /**
4440  * lpfc_els_free_data - Free lpfc dma buffer and data structure with an iocb
4441  * @phba: pointer to lpfc hba data structure.
4442  * @buf_ptr1: pointer to the lpfc DMA buffer data structure.
4443  *
4444  * This routine releases the lpfc DMA (Direct Memory Access) buffer(s)
4445  * associated with a command IOCB back to the lpfc DMA buffer pool. It first
4446  * checks to see whether there is a lpfc DMA buffer associated with the
4447  * response of the command IOCB. If so, it will be released before releasing
4448  * the lpfc DMA buffer associated with the IOCB itself.
4449  *
4450  * Return code
4451  *   0 - Successfully released lpfc DMA buffer (currently, always return 0)
4452  **/
4453 static int
4454 lpfc_els_free_data(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr1)
4455 {
4456 	struct lpfc_dmabuf *buf_ptr;
4457 
4458 	/* Free the response before processing the command. */
4459 	if (!list_empty(&buf_ptr1->list)) {
4460 		list_remove_head(&buf_ptr1->list, buf_ptr,
4461 				 struct lpfc_dmabuf,
4462 				 list);
4463 		lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
4464 		kfree(buf_ptr);
4465 	}
4466 	lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys);
4467 	kfree(buf_ptr1);
4468 	return 0;
4469 }
4470 
4471 /**
4472  * lpfc_els_free_bpl - Free lpfc dma buffer and data structure with bpl
4473  * @phba: pointer to lpfc hba data structure.
4474  * @buf_ptr: pointer to the lpfc dma buffer data structure.
4475  *
4476  * This routine releases the lpfc Direct Memory Access (DMA) buffer
4477  * associated with a Buffer Pointer List (BPL) back to the lpfc DMA buffer
4478  * pool.
4479  *
4480  * Return code
4481  *   0 - Successfully released lpfc DMA buffer (currently, always return 0)
4482  **/
4483 static int
4484 lpfc_els_free_bpl(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr)
4485 {
4486 	lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
4487 	kfree(buf_ptr);
4488 	return 0;
4489 }
4490 
4491 /**
4492  * lpfc_els_free_iocb - Free a command iocb and its associated resources
4493  * @phba: pointer to lpfc hba data structure.
4494  * @elsiocb: pointer to lpfc els command iocb data structure.
4495  *
4496  * This routine frees a command IOCB and its associated resources. The
4497  * command IOCB data structure contains the reference to various associated
4498  * resources, these fields must be set to NULL if the associated reference
4499  * not present:
4500  *   context1 - reference to ndlp
4501  *   context2 - reference to cmd
4502  *   context2->next - reference to rsp
4503  *   context3 - reference to bpl
4504  *
4505  * It first properly decrements the reference count held on ndlp for the
4506  * IOCB completion callback function. If LPFC_DELAY_MEM_FREE flag is not
4507  * set, it invokes the lpfc_els_free_data() routine to release the Direct
4508  * Memory Access (DMA) buffers associated with the IOCB. Otherwise, it
4509  * adds the DMA buffer the @phba data structure for the delayed release.
4510  * If reference to the Buffer Pointer List (BPL) is present, the
4511  * lpfc_els_free_bpl() routine is invoked to release the DMA memory
4512  * associated with BPL. Finally, the lpfc_sli_release_iocbq() routine is
4513  * invoked to release the IOCB data structure back to @phba IOCBQ list.
4514  *
4515  * Return code
4516  *   0 - Success (currently, always return 0)
4517  **/
4518 int
4519 lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
4520 {
4521 	struct lpfc_dmabuf *buf_ptr, *buf_ptr1;
4522 
4523 	/* The I/O job is complete.  Clear the context1 data. */
4524 	elsiocb->context1 = NULL;
4525 
4526 	/* context2  = cmd,  context2->next = rsp, context3 = bpl */
4527 	if (elsiocb->context2) {
4528 		if (elsiocb->iocb_flag & LPFC_DELAY_MEM_FREE) {
4529 			/* Firmware could still be in progress of DMAing
4530 			 * payload, so don't free data buffer till after
4531 			 * a hbeat.
4532 			 */
4533 			elsiocb->iocb_flag &= ~LPFC_DELAY_MEM_FREE;
4534 			buf_ptr = elsiocb->context2;
4535 			elsiocb->context2 = NULL;
4536 			if (buf_ptr) {
4537 				buf_ptr1 = NULL;
4538 				spin_lock_irq(&phba->hbalock);
4539 				if (!list_empty(&buf_ptr->list)) {
4540 					list_remove_head(&buf_ptr->list,
4541 						buf_ptr1, struct lpfc_dmabuf,
4542 						list);
4543 					INIT_LIST_HEAD(&buf_ptr1->list);
4544 					list_add_tail(&buf_ptr1->list,
4545 						&phba->elsbuf);
4546 					phba->elsbuf_cnt++;
4547 				}
4548 				INIT_LIST_HEAD(&buf_ptr->list);
4549 				list_add_tail(&buf_ptr->list, &phba->elsbuf);
4550 				phba->elsbuf_cnt++;
4551 				spin_unlock_irq(&phba->hbalock);
4552 			}
4553 		} else {
4554 			buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2;
4555 			lpfc_els_free_data(phba, buf_ptr1);
4556 			elsiocb->context2 = NULL;
4557 		}
4558 	}
4559 
4560 	if (elsiocb->context3) {
4561 		buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3;
4562 		lpfc_els_free_bpl(phba, buf_ptr);
4563 		elsiocb->context3 = NULL;
4564 	}
4565 	lpfc_sli_release_iocbq(phba, elsiocb);
4566 	return 0;
4567 }
4568 
4569 /**
4570  * lpfc_cmpl_els_logo_acc - Completion callback function to logo acc response
4571  * @phba: pointer to lpfc hba data structure.
4572  * @cmdiocb: pointer to lpfc command iocb data structure.
4573  * @rspiocb: pointer to lpfc response iocb data structure.
4574  *
4575  * This routine is the completion callback function to the Logout (LOGO)
4576  * Accept (ACC) Response ELS command. This routine is invoked to indicate
4577  * the completion of the LOGO process. It invokes the lpfc_nlp_not_used() to
4578  * release the ndlp if it has the last reference remaining (reference count
4579  * is 1). If succeeded (meaning ndlp released), it sets the IOCB context1
4580  * field to NULL to inform the following lpfc_els_free_iocb() routine no
4581  * ndlp reference count needs to be decremented. Otherwise, the ndlp
4582  * reference use-count shall be decremented by the lpfc_els_free_iocb()
4583  * routine. Finally, the lpfc_els_free_iocb() is invoked to release the
4584  * IOCB data structure.
4585  **/
4586 static void
4587 lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
4588 		       struct lpfc_iocbq *rspiocb)
4589 {
4590 	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
4591 	struct lpfc_vport *vport = cmdiocb->vport;
4592 	IOCB_t *irsp;
4593 
4594 	irsp = &rspiocb->iocb;
4595 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
4596 		"ACC LOGO cmpl:   status:x%x/x%x did:x%x",
4597 		irsp->ulpStatus, irsp->un.ulpWord[4], ndlp->nlp_DID);
4598 	/* ACC to LOGO completes to NPort <nlp_DID> */
4599 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4600 			 "0109 ACC to LOGO completes to NPort x%x refcnt %d "
4601 			 "Data: x%x x%x x%x\n",
4602 			 ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp->nlp_flag,
4603 			 ndlp->nlp_state, ndlp->nlp_rpi);
4604 
4605 	/* This clause allows the LOGO ACC to complete and free resources
4606 	 * for the Fabric Domain Controller.  It does deliberately skip
4607 	 * the unreg_rpi and release rpi because some fabrics send RDP
4608 	 * requests after logging out from the initiator.
4609 	 */
4610 	if (ndlp->nlp_type & NLP_FABRIC &&
4611 	    ((ndlp->nlp_DID & WELL_KNOWN_DID_MASK) != WELL_KNOWN_DID_MASK))
4612 		goto out;
4613 
4614 	if (ndlp->nlp_state == NLP_STE_NPR_NODE) {
4615 		/* NPort Recovery mode or node is just allocated */
4616 		if (!lpfc_nlp_not_used(ndlp)) {
4617 			/* A LOGO is completing and the node is in NPR state.
4618 			 * If this a fabric node that cleared its transport
4619 			 * registration, release the rpi.
4620 			 */
4621 			spin_lock_irq(&ndlp->lock);
4622 			ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
4623 			if (phba->sli_rev == LPFC_SLI_REV4)
4624 				ndlp->nlp_flag |= NLP_RELEASE_RPI;
4625 			spin_unlock_irq(&ndlp->lock);
4626 			lpfc_unreg_rpi(vport, ndlp);
4627 		} else {
4628 			/* Indicate the node has already released, should
4629 			 * not reference to it from within lpfc_els_free_iocb.
4630 			 */
4631 			cmdiocb->context1 = NULL;
4632 		}
4633 	}
4634  out:
4635 	/*
4636 	 * The driver received a LOGO from the rport and has ACK'd it.
4637 	 * At this point, the driver is done so release the IOCB
4638 	 */
4639 	lpfc_els_free_iocb(phba, cmdiocb);
4640 	lpfc_nlp_put(ndlp);
4641 }
4642 
4643 /**
4644  * lpfc_mbx_cmpl_dflt_rpi - Completion callbk func for unreg dflt rpi mbox cmd
4645  * @phba: pointer to lpfc hba data structure.
4646  * @pmb: pointer to the driver internal queue element for mailbox command.
4647  *
4648  * This routine is the completion callback function for unregister default
4649  * RPI (Remote Port Index) mailbox command to the @phba. It simply releases
4650  * the associated lpfc Direct Memory Access (DMA) buffer back to the pool and
4651  * decrements the ndlp reference count held for this completion callback
4652  * function. After that, it invokes the lpfc_nlp_not_used() to check
4653  * whether there is only one reference left on the ndlp. If so, it will
4654  * perform one more decrement and trigger the release of the ndlp.
4655  **/
4656 void
4657 lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
4658 {
4659 	struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
4660 	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
4661 	u32 mbx_flag = pmb->mbox_flag;
4662 	u32 mbx_cmd = pmb->u.mb.mbxCommand;
4663 
4664 	pmb->ctx_buf = NULL;
4665 	pmb->ctx_ndlp = NULL;
4666 
4667 	if (ndlp) {
4668 		lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
4669 				 "0006 rpi x%x DID:%x flg:%x %d x%px "
4670 				 "mbx_cmd x%x mbx_flag x%x x%px\n",
4671 				 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
4672 				 kref_read(&ndlp->kref), ndlp, mbx_cmd,
4673 				 mbx_flag, pmb);
4674 
4675 		/* This ends the default/temporary RPI cleanup logic for this
4676 		 * ndlp and the node and rpi needs to be released. Free the rpi
4677 		 * first on an UNREG_LOGIN and then release the final
4678 		 * references.
4679 		 */
4680 		spin_lock_irq(&ndlp->lock);
4681 		ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
4682 		if (mbx_cmd == MBX_UNREG_LOGIN)
4683 			ndlp->nlp_flag &= ~NLP_UNREG_INP;
4684 		spin_unlock_irq(&ndlp->lock);
4685 		lpfc_nlp_put(ndlp);
4686 		lpfc_drop_node(ndlp->vport, ndlp);
4687 	}
4688 
4689 	lpfc_mbuf_free(phba, mp->virt, mp->phys);
4690 	kfree(mp);
4691 	mempool_free(pmb, phba->mbox_mem_pool);
4692 	return;
4693 }
4694 
4695 /**
4696  * lpfc_cmpl_els_rsp - Completion callback function for els response iocb cmd
4697  * @phba: pointer to lpfc hba data structure.
4698  * @cmdiocb: pointer to lpfc command iocb data structure.
4699  * @rspiocb: pointer to lpfc response iocb data structure.
4700  *
4701  * This routine is the completion callback function for ELS Response IOCB
4702  * command. In normal case, this callback function just properly sets the
4703  * nlp_flag bitmap in the ndlp data structure, if the mbox command reference
4704  * field in the command IOCB is not NULL, the referred mailbox command will
4705  * be send out, and then invokes the lpfc_els_free_iocb() routine to release
4706  * the IOCB.
4707  **/
4708 static void
4709 lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
4710 		  struct lpfc_iocbq *rspiocb)
4711 {
4712 	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
4713 	struct lpfc_vport *vport = ndlp ? ndlp->vport : NULL;
4714 	struct Scsi_Host  *shost = vport ? lpfc_shost_from_vport(vport) : NULL;
4715 	IOCB_t  *irsp;
4716 	LPFC_MBOXQ_t *mbox = NULL;
4717 	struct lpfc_dmabuf *mp = NULL;
4718 
4719 	irsp = &rspiocb->iocb;
4720 
4721 	if (!vport) {
4722 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4723 				"3177 ELS response failed\n");
4724 		goto out;
4725 	}
4726 	if (cmdiocb->context_un.mbox)
4727 		mbox = cmdiocb->context_un.mbox;
4728 
4729 	/* Check to see if link went down during discovery */
4730 	if (!ndlp || lpfc_els_chk_latt(vport)) {
4731 		if (mbox) {
4732 			mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
4733 			if (mp) {
4734 				lpfc_mbuf_free(phba, mp->virt, mp->phys);
4735 				kfree(mp);
4736 			}
4737 			mempool_free(mbox, phba->mbox_mem_pool);
4738 		}
4739 		goto out;
4740 	}
4741 
4742 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
4743 		"ELS rsp cmpl:    status:x%x/x%x did:x%x",
4744 		irsp->ulpStatus, irsp->un.ulpWord[4],
4745 		cmdiocb->iocb.un.elsreq64.remoteID);
4746 	/* ELS response tag <ulpIoTag> completes */
4747 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4748 			 "0110 ELS response tag x%x completes "
4749 			 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%px\n",
4750 			 cmdiocb->iocb.ulpIoTag, rspiocb->iocb.ulpStatus,
4751 			 rspiocb->iocb.un.ulpWord[4], rspiocb->iocb.ulpTimeout,
4752 			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
4753 			 ndlp->nlp_rpi, kref_read(&ndlp->kref), mbox);
4754 	if (mbox) {
4755 		if ((rspiocb->iocb.ulpStatus == 0) &&
4756 		    (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) {
4757 			if (!lpfc_unreg_rpi(vport, ndlp) &&
4758 			    (!(vport->fc_flag & FC_PT2PT))) {
4759 				if (ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE) {
4760 					lpfc_printf_vlog(vport, KERN_INFO,
4761 							 LOG_DISCOVERY,
4762 							 "0314 PLOGI recov "
4763 							 "DID x%x "
4764 							 "Data: x%x x%x x%x\n",
4765 							 ndlp->nlp_DID,
4766 							 ndlp->nlp_state,
4767 							 ndlp->nlp_rpi,
4768 							 ndlp->nlp_flag);
4769 					mp = mbox->ctx_buf;
4770 					if (mp) {
4771 						lpfc_mbuf_free(phba, mp->virt,
4772 							       mp->phys);
4773 						kfree(mp);
4774 					}
4775 					mempool_free(mbox, phba->mbox_mem_pool);
4776 					goto out;
4777 				}
4778 			}
4779 
4780 			/* Increment reference count to ndlp to hold the
4781 			 * reference to ndlp for the callback function.
4782 			 */
4783 			mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
4784 			if (!mbox->ctx_ndlp)
4785 				goto out;
4786 
4787 			mbox->vport = vport;
4788 			if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) {
4789 				mbox->mbox_flag |= LPFC_MBX_IMED_UNREG;
4790 				mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
4791 			}
4792 			else {
4793 				mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
4794 				ndlp->nlp_prev_state = ndlp->nlp_state;
4795 				lpfc_nlp_set_state(vport, ndlp,
4796 					   NLP_STE_REG_LOGIN_ISSUE);
4797 			}
4798 
4799 			ndlp->nlp_flag |= NLP_REG_LOGIN_SEND;
4800 			if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
4801 			    != MBX_NOT_FINISHED)
4802 				goto out;
4803 
4804 			/* Decrement the ndlp reference count we
4805 			 * set for this failed mailbox command.
4806 			 */
4807 			lpfc_nlp_put(ndlp);
4808 			ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
4809 
4810 			/* ELS rsp: Cannot issue reg_login for <NPortid> */
4811 			lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
4812 				"0138 ELS rsp: Cannot issue reg_login for x%x "
4813 				"Data: x%x x%x x%x\n",
4814 				ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
4815 				ndlp->nlp_rpi);
4816 		}
4817 		mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
4818 		if (mp) {
4819 			lpfc_mbuf_free(phba, mp->virt, mp->phys);
4820 			kfree(mp);
4821 		}
4822 		mempool_free(mbox, phba->mbox_mem_pool);
4823 	}
4824 out:
4825 	if (ndlp && shost) {
4826 		spin_lock_irq(&ndlp->lock);
4827 		if (mbox)
4828 			ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN;
4829 		ndlp->nlp_flag &= ~NLP_RM_DFLT_RPI;
4830 		spin_unlock_irq(&ndlp->lock);
4831 	}
4832 
4833 	/* An SLI4 NPIV instance wants to drop the node at this point under
4834 	 * these conditions and release the RPI.
4835 	 */
4836 	if (phba->sli_rev == LPFC_SLI_REV4 &&
4837 	    (vport && vport->port_type == LPFC_NPIV_PORT) &&
4838 	    ndlp->nlp_flag & NLP_RELEASE_RPI) {
4839 		lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
4840 		spin_lock_irq(&ndlp->lock);
4841 		ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
4842 		ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
4843 		spin_unlock_irq(&ndlp->lock);
4844 		lpfc_drop_node(vport, ndlp);
4845 	}
4846 
4847 	/* Release the originating I/O reference. */
4848 	lpfc_els_free_iocb(phba, cmdiocb);
4849 	lpfc_nlp_put(ndlp);
4850 	return;
4851 }
4852 
4853 /**
4854  * lpfc_els_rsp_acc - Prepare and issue an acc response iocb command
4855  * @vport: pointer to a host virtual N_Port data structure.
4856  * @flag: the els command code to be accepted.
4857  * @oldiocb: pointer to the original lpfc command iocb data structure.
4858  * @ndlp: pointer to a node-list data structure.
4859  * @mbox: pointer to the driver internal queue element for mailbox command.
4860  *
4861  * This routine prepares and issues an Accept (ACC) response IOCB
4862  * command. It uses the @flag to properly set up the IOCB field for the
4863  * specific ACC response command to be issued and invokes the
4864  * lpfc_sli_issue_iocb() routine to send out ACC response IOCB. If a
4865  * @mbox pointer is passed in, it will be put into the context_un.mbox
4866  * field of the IOCB for the completion callback function to issue the
4867  * mailbox command to the HBA later when callback is invoked.
4868  *
4869  * Note that the ndlp reference count will be incremented by 1 for holding the
4870  * ndlp and the reference to ndlp will be stored into the context1 field of
4871  * the IOCB for the completion callback function to the corresponding
4872  * response ELS IOCB command.
4873  *
4874  * Return code
4875  *   0 - Successfully issued acc response
4876  *   1 - Failed to issue acc response
4877  **/
4878 int
4879 lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
4880 		 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp,
4881 		 LPFC_MBOXQ_t *mbox)
4882 {
4883 	struct lpfc_hba  *phba = vport->phba;
4884 	IOCB_t *icmd;
4885 	IOCB_t *oldcmd;
4886 	struct lpfc_iocbq *elsiocb;
4887 	uint8_t *pcmd;
4888 	struct serv_parm *sp;
4889 	uint16_t cmdsize;
4890 	int rc;
4891 	ELS_PKT *els_pkt_ptr;
4892 	struct fc_els_rdf_resp *rdf_resp;
4893 
4894 	oldcmd = &oldiocb->iocb;
4895 
4896 	switch (flag) {
4897 	case ELS_CMD_ACC:
4898 		cmdsize = sizeof(uint32_t);
4899 		elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
4900 					     ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
4901 		if (!elsiocb) {
4902 			spin_lock_irq(&ndlp->lock);
4903 			ndlp->nlp_flag &= ~NLP_LOGO_ACC;
4904 			spin_unlock_irq(&ndlp->lock);
4905 			return 1;
4906 		}
4907 
4908 		icmd = &elsiocb->iocb;
4909 		icmd->ulpContext = oldcmd->ulpContext;	/* Xri / rx_id */
4910 		icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
4911 		pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
4912 		*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
4913 		pcmd += sizeof(uint32_t);
4914 
4915 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
4916 			"Issue ACC:       did:x%x flg:x%x",
4917 			ndlp->nlp_DID, ndlp->nlp_flag, 0);
4918 		break;
4919 	case ELS_CMD_FLOGI:
4920 	case ELS_CMD_PLOGI:
4921 		cmdsize = (sizeof(struct serv_parm) + sizeof(uint32_t));
4922 		elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
4923 					     ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
4924 		if (!elsiocb)
4925 			return 1;
4926 
4927 		icmd = &elsiocb->iocb;
4928 		icmd->ulpContext = oldcmd->ulpContext;	/* Xri / rx_id */
4929 		icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
4930 		pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
4931 
4932 		if (mbox)
4933 			elsiocb->context_un.mbox = mbox;
4934 
4935 		*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
4936 		pcmd += sizeof(uint32_t);
4937 		sp = (struct serv_parm *)pcmd;
4938 
4939 		if (flag == ELS_CMD_FLOGI) {
4940 			/* Copy the received service parameters back */
4941 			memcpy(sp, &phba->fc_fabparam,
4942 			       sizeof(struct serv_parm));
4943 
4944 			/* Clear the F_Port bit */
4945 			sp->cmn.fPort = 0;
4946 
4947 			/* Mark all class service parameters as invalid */
4948 			sp->cls1.classValid = 0;
4949 			sp->cls2.classValid = 0;
4950 			sp->cls3.classValid = 0;
4951 			sp->cls4.classValid = 0;
4952 
4953 			/* Copy our worldwide names */
4954 			memcpy(&sp->portName, &vport->fc_sparam.portName,
4955 			       sizeof(struct lpfc_name));
4956 			memcpy(&sp->nodeName, &vport->fc_sparam.nodeName,
4957 			       sizeof(struct lpfc_name));
4958 		} else {
4959 			memcpy(pcmd, &vport->fc_sparam,
4960 			       sizeof(struct serv_parm));
4961 
4962 			sp->cmn.valid_vendor_ver_level = 0;
4963 			memset(sp->un.vendorVersion, 0,
4964 			       sizeof(sp->un.vendorVersion));
4965 			sp->cmn.bbRcvSizeMsb &= 0xF;
4966 
4967 			/* If our firmware supports this feature, convey that
4968 			 * info to the target using the vendor specific field.
4969 			 */
4970 			if (phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) {
4971 				sp->cmn.valid_vendor_ver_level = 1;
4972 				sp->un.vv.vid = cpu_to_be32(LPFC_VV_EMLX_ID);
4973 				sp->un.vv.flags =
4974 					cpu_to_be32(LPFC_VV_SUPPRESS_RSP);
4975 			}
4976 		}
4977 
4978 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
4979 			"Issue ACC FLOGI/PLOGI: did:x%x flg:x%x",
4980 			ndlp->nlp_DID, ndlp->nlp_flag, 0);
4981 		break;
4982 	case ELS_CMD_PRLO:
4983 		cmdsize = sizeof(uint32_t) + sizeof(PRLO);
4984 		elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
4985 					     ndlp, ndlp->nlp_DID, ELS_CMD_PRLO);
4986 		if (!elsiocb)
4987 			return 1;
4988 
4989 		icmd = &elsiocb->iocb;
4990 		icmd->ulpContext = oldcmd->ulpContext;	/* Xri / rx_id */
4991 		icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
4992 		pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
4993 
4994 		memcpy(pcmd, ((struct lpfc_dmabuf *) oldiocb->context2)->virt,
4995 		       sizeof(uint32_t) + sizeof(PRLO));
4996 		*((uint32_t *) (pcmd)) = ELS_CMD_PRLO_ACC;
4997 		els_pkt_ptr = (ELS_PKT *) pcmd;
4998 		els_pkt_ptr->un.prlo.acceptRspCode = PRLO_REQ_EXECUTED;
4999 
5000 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
5001 			"Issue ACC PRLO:  did:x%x flg:x%x",
5002 			ndlp->nlp_DID, ndlp->nlp_flag, 0);
5003 		break;
5004 	case ELS_CMD_RDF:
5005 		cmdsize = sizeof(*rdf_resp);
5006 		elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
5007 					     ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
5008 		if (!elsiocb)
5009 			return 1;
5010 
5011 		icmd = &elsiocb->iocb;
5012 		icmd->ulpContext = oldcmd->ulpContext;	/* Xri / rx_id */
5013 		icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
5014 		pcmd = (((struct lpfc_dmabuf *)elsiocb->context2)->virt);
5015 		rdf_resp = (struct fc_els_rdf_resp *)pcmd;
5016 		memset(rdf_resp, 0, sizeof(*rdf_resp));
5017 		rdf_resp->acc_hdr.la_cmd = ELS_LS_ACC;
5018 
5019 		/* FC-LS-5 specifies desc_list_len shall be set to 12 */
5020 		rdf_resp->desc_list_len = cpu_to_be32(12);
5021 
5022 		/* FC-LS-5 specifies LS REQ Information descriptor */
5023 		rdf_resp->lsri.desc_tag = cpu_to_be32(1);
5024 		rdf_resp->lsri.desc_len = cpu_to_be32(sizeof(u32));
5025 		rdf_resp->lsri.rqst_w0.cmd = ELS_RDF;
5026 		break;
5027 	default:
5028 		return 1;
5029 	}
5030 	if (ndlp->nlp_flag & NLP_LOGO_ACC) {
5031 		spin_lock_irq(&ndlp->lock);
5032 		if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED ||
5033 			ndlp->nlp_flag & NLP_REG_LOGIN_SEND))
5034 			ndlp->nlp_flag &= ~NLP_LOGO_ACC;
5035 		spin_unlock_irq(&ndlp->lock);
5036 		elsiocb->iocb_cmpl = lpfc_cmpl_els_logo_acc;
5037 	} else {
5038 		elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
5039 	}
5040 
5041 	phba->fc_stat.elsXmitACC++;
5042 	elsiocb->context1 = lpfc_nlp_get(ndlp);
5043 	if (!elsiocb->context1) {
5044 		lpfc_els_free_iocb(phba, elsiocb);
5045 		return 1;
5046 	}
5047 
5048 	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
5049 	if (rc == IOCB_ERROR) {
5050 		lpfc_els_free_iocb(phba, elsiocb);
5051 		lpfc_nlp_put(ndlp);
5052 		return 1;
5053 	}
5054 
5055 	/* Xmit ELS ACC response tag <ulpIoTag> */
5056 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5057 			 "0128 Xmit ELS ACC response Status: x%x, IoTag: x%x, "
5058 			 "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x "
5059 			 "RPI: x%x, fc_flag x%x refcnt %d\n",
5060 			 rc, elsiocb->iotag, elsiocb->sli4_xritag,
5061 			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
5062 			 ndlp->nlp_rpi, vport->fc_flag, kref_read(&ndlp->kref));
5063 	return 0;
5064 }
5065 
5066 /**
5067  * lpfc_els_rsp_reject - Prepare and issue a rjt response iocb command
5068  * @vport: pointer to a virtual N_Port data structure.
5069  * @rejectError: reject response to issue
5070  * @oldiocb: pointer to the original lpfc command iocb data structure.
5071  * @ndlp: pointer to a node-list data structure.
5072  * @mbox: pointer to the driver internal queue element for mailbox command.
5073  *
5074  * This routine prepares and issue an Reject (RJT) response IOCB
5075  * command. If a @mbox pointer is passed in, it will be put into the
5076  * context_un.mbox field of the IOCB for the completion callback function
5077  * to issue to the HBA later.
5078  *
5079  * Note that the ndlp reference count will be incremented by 1 for holding the
5080  * ndlp and the reference to ndlp will be stored into the context1 field of
5081  * the IOCB for the completion callback function to the reject response
5082  * ELS IOCB command.
5083  *
5084  * Return code
5085  *   0 - Successfully issued reject response
5086  *   1 - Failed to issue reject response
5087  **/
5088 int
5089 lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
5090 		    struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp,
5091 		    LPFC_MBOXQ_t *mbox)
5092 {
5093 	int rc;
5094 	struct lpfc_hba  *phba = vport->phba;
5095 	IOCB_t *icmd;
5096 	IOCB_t *oldcmd;
5097 	struct lpfc_iocbq *elsiocb;
5098 	uint8_t *pcmd;
5099 	uint16_t cmdsize;
5100 
5101 	cmdsize = 2 * sizeof(uint32_t);
5102 	elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
5103 				     ndlp->nlp_DID, ELS_CMD_LS_RJT);
5104 	if (!elsiocb)
5105 		return 1;
5106 
5107 	icmd = &elsiocb->iocb;
5108 	oldcmd = &oldiocb->iocb;
5109 	icmd->ulpContext = oldcmd->ulpContext;	/* Xri / rx_id */
5110 	icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
5111 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
5112 
5113 	*((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT;
5114 	pcmd += sizeof(uint32_t);
5115 	*((uint32_t *) (pcmd)) = rejectError;
5116 
5117 	if (mbox)
5118 		elsiocb->context_un.mbox = mbox;
5119 
5120 	/* Xmit ELS RJT <err> response tag <ulpIoTag> */
5121 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5122 			 "0129 Xmit ELS RJT x%x response tag x%x "
5123 			 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
5124 			 "rpi x%x\n",
5125 			 rejectError, elsiocb->iotag,
5126 			 elsiocb->iocb.ulpContext, ndlp->nlp_DID,
5127 			 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
5128 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
5129 		"Issue LS_RJT:    did:x%x flg:x%x err:x%x",
5130 		ndlp->nlp_DID, ndlp->nlp_flag, rejectError);
5131 
5132 	phba->fc_stat.elsXmitLSRJT++;
5133 	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
5134 	elsiocb->context1 = lpfc_nlp_get(ndlp);
5135 	if (!elsiocb->context1) {
5136 		lpfc_els_free_iocb(phba, elsiocb);
5137 		return 1;
5138 	}
5139 
5140 	/* The NPIV instance is rejecting this unsolicited ELS. Make sure the
5141 	 * node's assigned RPI needs to be released as this node will get
5142 	 * freed.
5143 	 */
5144 	if (phba->sli_rev == LPFC_SLI_REV4 &&
5145 	    vport->port_type == LPFC_NPIV_PORT) {
5146 		spin_lock_irq(&ndlp->lock);
5147 		ndlp->nlp_flag |= NLP_RELEASE_RPI;
5148 		spin_unlock_irq(&ndlp->lock);
5149 	}
5150 
5151 	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
5152 	if (rc == IOCB_ERROR) {
5153 		lpfc_els_free_iocb(phba, elsiocb);
5154 		lpfc_nlp_put(ndlp);
5155 		return 1;
5156 	}
5157 
5158 	return 0;
5159 }
5160 
5161 /**
5162  * lpfc_els_rsp_adisc_acc - Prepare and issue acc response to adisc iocb cmd
5163  * @vport: pointer to a virtual N_Port data structure.
5164  * @oldiocb: pointer to the original lpfc command iocb data structure.
5165  * @ndlp: pointer to a node-list data structure.
5166  *
5167  * This routine prepares and issues an Accept (ACC) response to Address
5168  * Discover (ADISC) ELS command. It simply prepares the payload of the IOCB
5169  * and invokes the lpfc_sli_issue_iocb() routine to send out the command.
5170  *
5171  * Note that the ndlp reference count will be incremented by 1 for holding the
5172  * ndlp and the reference to ndlp will be stored into the context1 field of
5173  * the IOCB for the completion callback function to the ADISC Accept response
5174  * ELS IOCB command.
5175  *
5176  * Return code
5177  *   0 - Successfully issued acc adisc response
5178  *   1 - Failed to issue adisc acc response
5179  **/
5180 int
5181 lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
5182 		       struct lpfc_nodelist *ndlp)
5183 {
5184 	struct lpfc_hba  *phba = vport->phba;
5185 	ADISC *ap;
5186 	IOCB_t *icmd, *oldcmd;
5187 	struct lpfc_iocbq *elsiocb;
5188 	uint8_t *pcmd;
5189 	uint16_t cmdsize;
5190 	int rc;
5191 
5192 	cmdsize = sizeof(uint32_t) + sizeof(ADISC);
5193 	elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
5194 				     ndlp->nlp_DID, ELS_CMD_ACC);
5195 	if (!elsiocb)
5196 		return 1;
5197 
5198 	icmd = &elsiocb->iocb;
5199 	oldcmd = &oldiocb->iocb;
5200 	icmd->ulpContext = oldcmd->ulpContext;	/* Xri / rx_id */
5201 	icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
5202 
5203 	/* Xmit ADISC ACC response tag <ulpIoTag> */
5204 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5205 			 "0130 Xmit ADISC ACC response iotag x%x xri: "
5206 			 "x%x, did x%x, nlp_flag x%x, nlp_state x%x rpi x%x\n",
5207 			 elsiocb->iotag, elsiocb->iocb.ulpContext,
5208 			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
5209 			 ndlp->nlp_rpi);
5210 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
5211 
5212 	*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
5213 	pcmd += sizeof(uint32_t);
5214 
5215 	ap = (ADISC *) (pcmd);
5216 	ap->hardAL_PA = phba->fc_pref_ALPA;
5217 	memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name));
5218 	memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
5219 	ap->DID = be32_to_cpu(vport->fc_myDID);
5220 
5221 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
5222 		      "Issue ACC ADISC: did:x%x flg:x%x refcnt %d",
5223 		      ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref));
5224 
5225 	phba->fc_stat.elsXmitACC++;
5226 	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
5227 	elsiocb->context1 = lpfc_nlp_get(ndlp);
5228 	if (!elsiocb->context1) {
5229 		lpfc_els_free_iocb(phba, elsiocb);
5230 		return 1;
5231 	}
5232 
5233 	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
5234 	if (rc == IOCB_ERROR) {
5235 		lpfc_els_free_iocb(phba, elsiocb);
5236 		lpfc_nlp_put(ndlp);
5237 		return 1;
5238 	}
5239 
5240 	/* Xmit ELS ACC response tag <ulpIoTag> */
5241 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5242 			 "0128 Xmit ELS ACC response Status: x%x, IoTag: x%x, "
5243 			 "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x "
5244 			 "RPI: x%x, fc_flag x%x\n",
5245 			 rc, elsiocb->iotag, elsiocb->sli4_xritag,
5246 			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
5247 			 ndlp->nlp_rpi, vport->fc_flag);
5248 	return 0;
5249 }
5250 
5251 /**
5252  * lpfc_els_rsp_prli_acc - Prepare and issue acc response to prli iocb cmd
5253  * @vport: pointer to a virtual N_Port data structure.
5254  * @oldiocb: pointer to the original lpfc command iocb data structure.
5255  * @ndlp: pointer to a node-list data structure.
5256  *
5257  * This routine prepares and issues an Accept (ACC) response to Process
5258  * Login (PRLI) ELS command. It simply prepares the payload of the IOCB
5259  * and invokes the lpfc_sli_issue_iocb() routine to send out the command.
5260  *
5261  * Note that the ndlp reference count will be incremented by 1 for holding the
5262  * ndlp and the reference to ndlp will be stored into the context1 field of
5263  * the IOCB for the completion callback function to the PRLI Accept response
5264  * ELS IOCB command.
5265  *
5266  * Return code
5267  *   0 - Successfully issued acc prli response
5268  *   1 - Failed to issue acc prli response
5269  **/
5270 int
5271 lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
5272 		      struct lpfc_nodelist *ndlp)
5273 {
5274 	struct lpfc_hba  *phba = vport->phba;
5275 	PRLI *npr;
5276 	struct lpfc_nvme_prli *npr_nvme;
5277 	lpfc_vpd_t *vpd;
5278 	IOCB_t *icmd;
5279 	IOCB_t *oldcmd;
5280 	struct lpfc_iocbq *elsiocb;
5281 	uint8_t *pcmd;
5282 	uint16_t cmdsize;
5283 	uint32_t prli_fc4_req, *req_payload;
5284 	struct lpfc_dmabuf *req_buf;
5285 	int rc;
5286 	u32 elsrspcmd;
5287 
5288 	/* Need the incoming PRLI payload to determine if the ACC is for an
5289 	 * FC4 or NVME PRLI type.  The PRLI type is at word 1.
5290 	 */
5291 	req_buf = (struct lpfc_dmabuf *)oldiocb->context2;
5292 	req_payload = (((uint32_t *)req_buf->virt) + 1);
5293 
5294 	/* PRLI type payload is at byte 3 for FCP or NVME. */
5295 	prli_fc4_req = be32_to_cpu(*req_payload);
5296 	prli_fc4_req = (prli_fc4_req >> 24) & 0xff;
5297 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5298 			 "6127 PRLI_ACC:  Req Type x%x, Word1 x%08x\n",
5299 			 prli_fc4_req, *((uint32_t *)req_payload));
5300 
5301 	if (prli_fc4_req == PRLI_FCP_TYPE) {
5302 		cmdsize = sizeof(uint32_t) + sizeof(PRLI);
5303 		elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK));
5304 	} else if (prli_fc4_req & PRLI_NVME_TYPE) {
5305 		cmdsize = sizeof(uint32_t) + sizeof(struct lpfc_nvme_prli);
5306 		elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_NVMEPRLI & ~ELS_RSP_MASK));
5307 	} else {
5308 		return 1;
5309 	}
5310 
5311 	elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
5312 		ndlp->nlp_DID, elsrspcmd);
5313 	if (!elsiocb)
5314 		return 1;
5315 
5316 	icmd = &elsiocb->iocb;
5317 	oldcmd = &oldiocb->iocb;
5318 	icmd->ulpContext = oldcmd->ulpContext;	/* Xri / rx_id */
5319 	icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
5320 
5321 	/* Xmit PRLI ACC response tag <ulpIoTag> */
5322 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5323 			 "0131 Xmit PRLI ACC response tag x%x xri x%x, "
5324 			 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
5325 			 elsiocb->iotag, elsiocb->iocb.ulpContext,
5326 			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
5327 			 ndlp->nlp_rpi);
5328 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
5329 	memset(pcmd, 0, cmdsize);
5330 
5331 	*((uint32_t *)(pcmd)) = elsrspcmd;
5332 	pcmd += sizeof(uint32_t);
5333 
5334 	/* For PRLI, remainder of payload is PRLI parameter page */
5335 	vpd = &phba->vpd;
5336 
5337 	if (prli_fc4_req == PRLI_FCP_TYPE) {
5338 		/*
5339 		 * If the remote port is a target and our firmware version
5340 		 * is 3.20 or later, set the following bits for FC-TAPE
5341 		 * support.
5342 		 */
5343 		npr = (PRLI *) pcmd;
5344 		if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
5345 		    (vpd->rev.feaLevelHigh >= 0x02)) {
5346 			npr->ConfmComplAllowed = 1;
5347 			npr->Retry = 1;
5348 			npr->TaskRetryIdReq = 1;
5349 		}
5350 		npr->acceptRspCode = PRLI_REQ_EXECUTED;
5351 		npr->estabImagePair = 1;
5352 		npr->readXferRdyDis = 1;
5353 		npr->ConfmComplAllowed = 1;
5354 		npr->prliType = PRLI_FCP_TYPE;
5355 		npr->initiatorFunc = 1;
5356 	} else if (prli_fc4_req & PRLI_NVME_TYPE) {
5357 		/* Respond with an NVME PRLI Type */
5358 		npr_nvme = (struct lpfc_nvme_prli *) pcmd;
5359 		bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE);
5360 		bf_set(prli_estabImagePair, npr_nvme, 0);  /* Should be 0 */
5361 		bf_set(prli_acc_rsp_code, npr_nvme, PRLI_REQ_EXECUTED);
5362 		if (phba->nvmet_support) {
5363 			bf_set(prli_tgt, npr_nvme, 1);
5364 			bf_set(prli_disc, npr_nvme, 1);
5365 			if (phba->cfg_nvme_enable_fb) {
5366 				bf_set(prli_fba, npr_nvme, 1);
5367 
5368 				/* TBD.  Target mode needs to post buffers
5369 				 * that support the configured first burst
5370 				 * byte size.
5371 				 */
5372 				bf_set(prli_fb_sz, npr_nvme,
5373 				       phba->cfg_nvmet_fb_size);
5374 			}
5375 		} else {
5376 			bf_set(prli_init, npr_nvme, 1);
5377 		}
5378 
5379 		lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
5380 				 "6015 NVME issue PRLI ACC word1 x%08x "
5381 				 "word4 x%08x word5 x%08x flag x%x, "
5382 				 "fcp_info x%x nlp_type x%x\n",
5383 				 npr_nvme->word1, npr_nvme->word4,
5384 				 npr_nvme->word5, ndlp->nlp_flag,
5385 				 ndlp->nlp_fcp_info, ndlp->nlp_type);
5386 		npr_nvme->word1 = cpu_to_be32(npr_nvme->word1);
5387 		npr_nvme->word4 = cpu_to_be32(npr_nvme->word4);
5388 		npr_nvme->word5 = cpu_to_be32(npr_nvme->word5);
5389 	} else
5390 		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
5391 				 "6128 Unknown FC_TYPE x%x x%x ndlp x%06x\n",
5392 				 prli_fc4_req, ndlp->nlp_fc4_type,
5393 				 ndlp->nlp_DID);
5394 
5395 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
5396 		      "Issue ACC PRLI:  did:x%x flg:x%x",
5397 		      ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref));
5398 
5399 	phba->fc_stat.elsXmitACC++;
5400 	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
5401 	elsiocb->context1 =  lpfc_nlp_get(ndlp);
5402 	if (!elsiocb->context1) {
5403 		lpfc_els_free_iocb(phba, elsiocb);
5404 		return 1;
5405 	}
5406 
5407 	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
5408 	if (rc == IOCB_ERROR) {
5409 		lpfc_els_free_iocb(phba, elsiocb);
5410 		lpfc_nlp_put(ndlp);
5411 		return 1;
5412 	}
5413 
5414 	return 0;
5415 }
5416 
5417 /**
5418  * lpfc_els_rsp_rnid_acc - Issue rnid acc response iocb command
5419  * @vport: pointer to a virtual N_Port data structure.
5420  * @format: rnid command format.
5421  * @oldiocb: pointer to the original lpfc command iocb data structure.
5422  * @ndlp: pointer to a node-list data structure.
5423  *
5424  * This routine issues a Request Node Identification Data (RNID) Accept
5425  * (ACC) response. It constructs the RNID ACC response command according to
5426  * the proper @format and then calls the lpfc_sli_issue_iocb() routine to
5427  * issue the response.
5428  *
5429  * Note that the ndlp reference count will be incremented by 1 for holding the
5430  * ndlp and the reference to ndlp will be stored into the context1 field of
5431  * the IOCB for the completion callback function.
5432  *
5433  * Return code
5434  *   0 - Successfully issued acc rnid response
5435  *   1 - Failed to issue acc rnid response
5436  **/
5437 static int
5438 lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
5439 		      struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
5440 {
5441 	struct lpfc_hba  *phba = vport->phba;
5442 	RNID *rn;
5443 	IOCB_t *icmd, *oldcmd;
5444 	struct lpfc_iocbq *elsiocb;
5445 	uint8_t *pcmd;
5446 	uint16_t cmdsize;
5447 	int rc;
5448 
5449 	cmdsize = sizeof(uint32_t) + sizeof(uint32_t)
5450 					+ (2 * sizeof(struct lpfc_name));
5451 	if (format)
5452 		cmdsize += sizeof(RNID_TOP_DISC);
5453 
5454 	elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
5455 				     ndlp->nlp_DID, ELS_CMD_ACC);
5456 	if (!elsiocb)
5457 		return 1;
5458 
5459 	icmd = &elsiocb->iocb;
5460 	oldcmd = &oldiocb->iocb;
5461 	icmd->ulpContext = oldcmd->ulpContext;	/* Xri / rx_id */
5462 	icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
5463 
5464 	/* Xmit RNID ACC response tag <ulpIoTag> */
5465 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5466 			 "0132 Xmit RNID ACC response tag x%x xri x%x\n",
5467 			 elsiocb->iotag, elsiocb->iocb.ulpContext);
5468 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
5469 	*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
5470 	pcmd += sizeof(uint32_t);
5471 
5472 	memset(pcmd, 0, sizeof(RNID));
5473 	rn = (RNID *) (pcmd);
5474 	rn->Format = format;
5475 	rn->CommonLen = (2 * sizeof(struct lpfc_name));
5476 	memcpy(&rn->portName, &vport->fc_portname, sizeof(struct lpfc_name));
5477 	memcpy(&rn->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
5478 	switch (format) {
5479 	case 0:
5480 		rn->SpecificLen = 0;
5481 		break;
5482 	case RNID_TOPOLOGY_DISC:
5483 		rn->SpecificLen = sizeof(RNID_TOP_DISC);
5484 		memcpy(&rn->un.topologyDisc.portName,
5485 		       &vport->fc_portname, sizeof(struct lpfc_name));
5486 		rn->un.topologyDisc.unitType = RNID_HBA;
5487 		rn->un.topologyDisc.physPort = 0;
5488 		rn->un.topologyDisc.attachedNodes = 0;
5489 		break;
5490 	default:
5491 		rn->CommonLen = 0;
5492 		rn->SpecificLen = 0;
5493 		break;
5494 	}
5495 
5496 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
5497 		      "Issue ACC RNID:  did:x%x flg:x%x refcnt %d",
5498 		      ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref));
5499 
5500 	phba->fc_stat.elsXmitACC++;
5501 	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
5502 	elsiocb->context1 = lpfc_nlp_get(ndlp);
5503 	if (!elsiocb->context1) {
5504 		lpfc_els_free_iocb(phba, elsiocb);
5505 		return 1;
5506 	}
5507 
5508 	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
5509 	if (rc == IOCB_ERROR) {
5510 		lpfc_els_free_iocb(phba, elsiocb);
5511 		lpfc_nlp_put(ndlp);
5512 		return 1;
5513 	}
5514 
5515 	return 0;
5516 }
5517 
5518 /**
5519  * lpfc_els_clear_rrq - Clear the rq that this rrq describes.
5520  * @vport: pointer to a virtual N_Port data structure.
5521  * @iocb: pointer to the lpfc command iocb data structure.
5522  * @ndlp: pointer to a node-list data structure.
5523  *
5524  * Return
5525  **/
5526 static void
5527 lpfc_els_clear_rrq(struct lpfc_vport *vport,
5528 		   struct lpfc_iocbq *iocb, struct lpfc_nodelist *ndlp)
5529 {
5530 	struct lpfc_hba  *phba = vport->phba;
5531 	uint8_t *pcmd;
5532 	struct RRQ *rrq;
5533 	uint16_t rxid;
5534 	uint16_t xri;
5535 	struct lpfc_node_rrq *prrq;
5536 
5537 
5538 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) iocb->context2)->virt);
5539 	pcmd += sizeof(uint32_t);
5540 	rrq = (struct RRQ *)pcmd;
5541 	rrq->rrq_exchg = be32_to_cpu(rrq->rrq_exchg);
5542 	rxid = bf_get(rrq_rxid, rrq);
5543 
5544 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5545 			"2883 Clear RRQ for SID:x%x OXID:x%x RXID:x%x"
5546 			" x%x x%x\n",
5547 			be32_to_cpu(bf_get(rrq_did, rrq)),
5548 			bf_get(rrq_oxid, rrq),
5549 			rxid,
5550 			iocb->iotag, iocb->iocb.ulpContext);
5551 
5552 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
5553 		"Clear RRQ:  did:x%x flg:x%x exchg:x%.08x",
5554 		ndlp->nlp_DID, ndlp->nlp_flag, rrq->rrq_exchg);
5555 	if (vport->fc_myDID == be32_to_cpu(bf_get(rrq_did, rrq)))
5556 		xri = bf_get(rrq_oxid, rrq);
5557 	else
5558 		xri = rxid;
5559 	prrq = lpfc_get_active_rrq(vport, xri, ndlp->nlp_DID);
5560 	if (prrq)
5561 		lpfc_clr_rrq_active(phba, xri, prrq);
5562 	return;
5563 }
5564 
5565 /**
5566  * lpfc_els_rsp_echo_acc - Issue echo acc response
5567  * @vport: pointer to a virtual N_Port data structure.
5568  * @data: pointer to echo data to return in the accept.
5569  * @oldiocb: pointer to the original lpfc command iocb data structure.
5570  * @ndlp: pointer to a node-list data structure.
5571  *
5572  * Return code
5573  *   0 - Successfully issued acc echo response
5574  *   1 - Failed to issue acc echo response
5575  **/
5576 static int
5577 lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data,
5578 		      struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
5579 {
5580 	struct lpfc_hba  *phba = vport->phba;
5581 	struct lpfc_iocbq *elsiocb;
5582 	uint8_t *pcmd;
5583 	uint16_t cmdsize;
5584 	int rc;
5585 
5586 	cmdsize = oldiocb->iocb.unsli3.rcvsli3.acc_len;
5587 
5588 	/* The accumulated length can exceed the BPL_SIZE.  For
5589 	 * now, use this as the limit
5590 	 */
5591 	if (cmdsize > LPFC_BPL_SIZE)
5592 		cmdsize = LPFC_BPL_SIZE;
5593 	elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
5594 				     ndlp->nlp_DID, ELS_CMD_ACC);
5595 	if (!elsiocb)
5596 		return 1;
5597 
5598 	elsiocb->iocb.ulpContext = oldiocb->iocb.ulpContext;  /* Xri / rx_id */
5599 	elsiocb->iocb.unsli3.rcvsli3.ox_id = oldiocb->iocb.unsli3.rcvsli3.ox_id;
5600 
5601 	/* Xmit ECHO ACC response tag <ulpIoTag> */
5602 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5603 			 "2876 Xmit ECHO ACC response tag x%x xri x%x\n",
5604 			 elsiocb->iotag, elsiocb->iocb.ulpContext);
5605 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
5606 	*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
5607 	pcmd += sizeof(uint32_t);
5608 	memcpy(pcmd, data, cmdsize - sizeof(uint32_t));
5609 
5610 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
5611 		      "Issue ACC ECHO:  did:x%x flg:x%x refcnt %d",
5612 		      ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref));
5613 
5614 	phba->fc_stat.elsXmitACC++;
5615 	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
5616 	elsiocb->context1 =  lpfc_nlp_get(ndlp);
5617 	if (!elsiocb->context1) {
5618 		lpfc_els_free_iocb(phba, elsiocb);
5619 		return 1;
5620 	}
5621 
5622 	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
5623 	if (rc == IOCB_ERROR) {
5624 		lpfc_els_free_iocb(phba, elsiocb);
5625 		lpfc_nlp_put(ndlp);
5626 		return 1;
5627 	}
5628 
5629 	return 0;
5630 }
5631 
5632 /**
5633  * lpfc_els_disc_adisc - Issue remaining adisc iocbs to npr nodes of a vport
5634  * @vport: pointer to a host virtual N_Port data structure.
5635  *
5636  * This routine issues Address Discover (ADISC) ELS commands to those
5637  * N_Ports which are in node port recovery state and ADISC has not been issued
5638  * for the @vport. Each time an ELS ADISC IOCB is issued by invoking the
5639  * lpfc_issue_els_adisc() routine, the per @vport number of discover count
5640  * (num_disc_nodes) shall be incremented. If the num_disc_nodes reaches a
5641  * pre-configured threshold (cfg_discovery_threads), the @vport fc_flag will
5642  * be marked with FC_NLP_MORE bit and the process of issuing remaining ADISC
5643  * IOCBs quit for later pick up. On the other hand, after walking through
5644  * all the ndlps with the @vport and there is none ADISC IOCB issued, the
5645  * @vport fc_flag shall be cleared with FC_NLP_MORE bit indicating there is
5646  * no more ADISC need to be sent.
5647  *
5648  * Return code
5649  *    The number of N_Ports with adisc issued.
5650  **/
5651 int
5652 lpfc_els_disc_adisc(struct lpfc_vport *vport)
5653 {
5654 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5655 	struct lpfc_nodelist *ndlp, *next_ndlp;
5656 	int sentadisc = 0;
5657 
5658 	/* go thru NPR nodes and issue any remaining ELS ADISCs */
5659 	list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
5660 		if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
5661 		    (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
5662 		    (ndlp->nlp_flag & NLP_NPR_ADISC) != 0) {
5663 			spin_lock_irq(&ndlp->lock);
5664 			ndlp->nlp_flag &= ~NLP_NPR_ADISC;
5665 			spin_unlock_irq(&ndlp->lock);
5666 			ndlp->nlp_prev_state = ndlp->nlp_state;
5667 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
5668 			lpfc_issue_els_adisc(vport, ndlp, 0);
5669 			sentadisc++;
5670 			vport->num_disc_nodes++;
5671 			if (vport->num_disc_nodes >=
5672 			    vport->cfg_discovery_threads) {
5673 				spin_lock_irq(shost->host_lock);
5674 				vport->fc_flag |= FC_NLP_MORE;
5675 				spin_unlock_irq(shost->host_lock);
5676 				break;
5677 			}
5678 		}
5679 	}
5680 	if (sentadisc == 0) {
5681 		spin_lock_irq(shost->host_lock);
5682 		vport->fc_flag &= ~FC_NLP_MORE;
5683 		spin_unlock_irq(shost->host_lock);
5684 	}
5685 	return sentadisc;
5686 }
5687 
5688 /**
5689  * lpfc_els_disc_plogi - Issue plogi for all npr nodes of a vport before adisc
5690  * @vport: pointer to a host virtual N_Port data structure.
5691  *
5692  * This routine issues Port Login (PLOGI) ELS commands to all the N_Ports
5693  * which are in node port recovery state, with a @vport. Each time an ELS
5694  * ADISC PLOGI IOCB is issued by invoking the lpfc_issue_els_plogi() routine,
5695  * the per @vport number of discover count (num_disc_nodes) shall be
5696  * incremented. If the num_disc_nodes reaches a pre-configured threshold
5697  * (cfg_discovery_threads), the @vport fc_flag will be marked with FC_NLP_MORE
5698  * bit set and quit the process of issuing remaining ADISC PLOGIN IOCBs for
5699  * later pick up. On the other hand, after walking through all the ndlps with
5700  * the @vport and there is none ADISC PLOGI IOCB issued, the @vport fc_flag
5701  * shall be cleared with the FC_NLP_MORE bit indicating there is no more ADISC
5702  * PLOGI need to be sent.
5703  *
5704  * Return code
5705  *   The number of N_Ports with plogi issued.
5706  **/
5707 int
5708 lpfc_els_disc_plogi(struct lpfc_vport *vport)
5709 {
5710 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5711 	struct lpfc_nodelist *ndlp, *next_ndlp;
5712 	int sentplogi = 0;
5713 
5714 	/* go thru NPR nodes and issue any remaining ELS PLOGIs */
5715 	list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
5716 		if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
5717 				(ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
5718 				(ndlp->nlp_flag & NLP_DELAY_TMO) == 0 &&
5719 				(ndlp->nlp_flag & NLP_NPR_ADISC) == 0) {
5720 			ndlp->nlp_prev_state = ndlp->nlp_state;
5721 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
5722 			lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
5723 			sentplogi++;
5724 			vport->num_disc_nodes++;
5725 			if (vport->num_disc_nodes >=
5726 					vport->cfg_discovery_threads) {
5727 				spin_lock_irq(shost->host_lock);
5728 				vport->fc_flag |= FC_NLP_MORE;
5729 				spin_unlock_irq(shost->host_lock);
5730 				break;
5731 			}
5732 		}
5733 	}
5734 
5735 	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
5736 			 "6452 Discover PLOGI %d flag x%x\n",
5737 			 sentplogi, vport->fc_flag);
5738 
5739 	if (sentplogi) {
5740 		lpfc_set_disctmo(vport);
5741 	}
5742 	else {
5743 		spin_lock_irq(shost->host_lock);
5744 		vport->fc_flag &= ~FC_NLP_MORE;
5745 		spin_unlock_irq(shost->host_lock);
5746 	}
5747 	return sentplogi;
5748 }
5749 
5750 static uint32_t
5751 lpfc_rdp_res_link_service(struct fc_rdp_link_service_desc *desc,
5752 		uint32_t word0)
5753 {
5754 
5755 	desc->tag = cpu_to_be32(RDP_LINK_SERVICE_DESC_TAG);
5756 	desc->payload.els_req = word0;
5757 	desc->length = cpu_to_be32(sizeof(desc->payload));
5758 
5759 	return sizeof(struct fc_rdp_link_service_desc);
5760 }
5761 
5762 static uint32_t
5763 lpfc_rdp_res_sfp_desc(struct fc_rdp_sfp_desc *desc,
5764 		uint8_t *page_a0, uint8_t *page_a2)
5765 {
5766 	uint16_t wavelength;
5767 	uint16_t temperature;
5768 	uint16_t rx_power;
5769 	uint16_t tx_bias;
5770 	uint16_t tx_power;
5771 	uint16_t vcc;
5772 	uint16_t flag = 0;
5773 	struct sff_trasnceiver_codes_byte4 *trasn_code_byte4;
5774 	struct sff_trasnceiver_codes_byte5 *trasn_code_byte5;
5775 
5776 	desc->tag = cpu_to_be32(RDP_SFP_DESC_TAG);
5777 
5778 	trasn_code_byte4 = (struct sff_trasnceiver_codes_byte4 *)
5779 			&page_a0[SSF_TRANSCEIVER_CODE_B4];
5780 	trasn_code_byte5 = (struct sff_trasnceiver_codes_byte5 *)
5781 			&page_a0[SSF_TRANSCEIVER_CODE_B5];
5782 
5783 	if ((trasn_code_byte4->fc_sw_laser) ||
5784 	    (trasn_code_byte5->fc_sw_laser_sl) ||
5785 	    (trasn_code_byte5->fc_sw_laser_sn)) {  /* check if its short WL */
5786 		flag |= (SFP_FLAG_PT_SWLASER << SFP_FLAG_PT_SHIFT);
5787 	} else if (trasn_code_byte4->fc_lw_laser) {
5788 		wavelength = (page_a0[SSF_WAVELENGTH_B1] << 8) |
5789 			page_a0[SSF_WAVELENGTH_B0];
5790 		if (wavelength == SFP_WAVELENGTH_LC1310)
5791 			flag |= SFP_FLAG_PT_LWLASER_LC1310 << SFP_FLAG_PT_SHIFT;
5792 		if (wavelength == SFP_WAVELENGTH_LL1550)
5793 			flag |= SFP_FLAG_PT_LWLASER_LL1550 << SFP_FLAG_PT_SHIFT;
5794 	}
5795 	/* check if its SFP+ */
5796 	flag |= ((page_a0[SSF_IDENTIFIER] == SFF_PG0_IDENT_SFP) ?
5797 			SFP_FLAG_CT_SFP_PLUS : SFP_FLAG_CT_UNKNOWN)
5798 					<< SFP_FLAG_CT_SHIFT;
5799 
5800 	/* check if its OPTICAL */
5801 	flag |= ((page_a0[SSF_CONNECTOR] == SFF_PG0_CONNECTOR_LC) ?
5802 			SFP_FLAG_IS_OPTICAL_PORT : 0)
5803 					<< SFP_FLAG_IS_OPTICAL_SHIFT;
5804 
5805 	temperature = (page_a2[SFF_TEMPERATURE_B1] << 8 |
5806 		page_a2[SFF_TEMPERATURE_B0]);
5807 	vcc = (page_a2[SFF_VCC_B1] << 8 |
5808 		page_a2[SFF_VCC_B0]);
5809 	tx_power = (page_a2[SFF_TXPOWER_B1] << 8 |
5810 		page_a2[SFF_TXPOWER_B0]);
5811 	tx_bias = (page_a2[SFF_TX_BIAS_CURRENT_B1] << 8 |
5812 		page_a2[SFF_TX_BIAS_CURRENT_B0]);
5813 	rx_power = (page_a2[SFF_RXPOWER_B1] << 8 |
5814 		page_a2[SFF_RXPOWER_B0]);
5815 	desc->sfp_info.temperature = cpu_to_be16(temperature);
5816 	desc->sfp_info.rx_power = cpu_to_be16(rx_power);
5817 	desc->sfp_info.tx_bias = cpu_to_be16(tx_bias);
5818 	desc->sfp_info.tx_power = cpu_to_be16(tx_power);
5819 	desc->sfp_info.vcc = cpu_to_be16(vcc);
5820 
5821 	desc->sfp_info.flags = cpu_to_be16(flag);
5822 	desc->length = cpu_to_be32(sizeof(desc->sfp_info));
5823 
5824 	return sizeof(struct fc_rdp_sfp_desc);
5825 }
5826 
5827 static uint32_t
5828 lpfc_rdp_res_link_error(struct fc_rdp_link_error_status_desc *desc,
5829 		READ_LNK_VAR *stat)
5830 {
5831 	uint32_t type;
5832 
5833 	desc->tag = cpu_to_be32(RDP_LINK_ERROR_STATUS_DESC_TAG);
5834 
5835 	type = VN_PT_PHY_PF_PORT << VN_PT_PHY_SHIFT;
5836 
5837 	desc->info.port_type = cpu_to_be32(type);
5838 
5839 	desc->info.link_status.link_failure_cnt =
5840 		cpu_to_be32(stat->linkFailureCnt);
5841 	desc->info.link_status.loss_of_synch_cnt =
5842 		cpu_to_be32(stat->lossSyncCnt);
5843 	desc->info.link_status.loss_of_signal_cnt =
5844 		cpu_to_be32(stat->lossSignalCnt);
5845 	desc->info.link_status.primitive_seq_proto_err =
5846 		cpu_to_be32(stat->primSeqErrCnt);
5847 	desc->info.link_status.invalid_trans_word =
5848 		cpu_to_be32(stat->invalidXmitWord);
5849 	desc->info.link_status.invalid_crc_cnt = cpu_to_be32(stat->crcCnt);
5850 
5851 	desc->length = cpu_to_be32(sizeof(desc->info));
5852 
5853 	return sizeof(struct fc_rdp_link_error_status_desc);
5854 }
5855 
5856 static uint32_t
5857 lpfc_rdp_res_bbc_desc(struct fc_rdp_bbc_desc *desc, READ_LNK_VAR *stat,
5858 		      struct lpfc_vport *vport)
5859 {
5860 	uint32_t bbCredit;
5861 
5862 	desc->tag = cpu_to_be32(RDP_BBC_DESC_TAG);
5863 
5864 	bbCredit = vport->fc_sparam.cmn.bbCreditLsb |
5865 			(vport->fc_sparam.cmn.bbCreditMsb << 8);
5866 	desc->bbc_info.port_bbc = cpu_to_be32(bbCredit);
5867 	if (vport->phba->fc_topology != LPFC_TOPOLOGY_LOOP) {
5868 		bbCredit = vport->phba->fc_fabparam.cmn.bbCreditLsb |
5869 			(vport->phba->fc_fabparam.cmn.bbCreditMsb << 8);
5870 		desc->bbc_info.attached_port_bbc = cpu_to_be32(bbCredit);
5871 	} else {
5872 		desc->bbc_info.attached_port_bbc = 0;
5873 	}
5874 
5875 	desc->bbc_info.rtt = 0;
5876 	desc->length = cpu_to_be32(sizeof(desc->bbc_info));
5877 
5878 	return sizeof(struct fc_rdp_bbc_desc);
5879 }
5880 
5881 static uint32_t
5882 lpfc_rdp_res_oed_temp_desc(struct lpfc_hba *phba,
5883 			   struct fc_rdp_oed_sfp_desc *desc, uint8_t *page_a2)
5884 {
5885 	uint32_t flags = 0;
5886 
5887 	desc->tag = cpu_to_be32(RDP_OED_DESC_TAG);
5888 
5889 	desc->oed_info.hi_alarm = page_a2[SSF_TEMP_HIGH_ALARM];
5890 	desc->oed_info.lo_alarm = page_a2[SSF_TEMP_LOW_ALARM];
5891 	desc->oed_info.hi_warning = page_a2[SSF_TEMP_HIGH_WARNING];
5892 	desc->oed_info.lo_warning = page_a2[SSF_TEMP_LOW_WARNING];
5893 
5894 	if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TEMPERATURE)
5895 		flags |= RDP_OET_HIGH_ALARM;
5896 	if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TEMPERATURE)
5897 		flags |= RDP_OET_LOW_ALARM;
5898 	if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TEMPERATURE)
5899 		flags |= RDP_OET_HIGH_WARNING;
5900 	if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TEMPERATURE)
5901 		flags |= RDP_OET_LOW_WARNING;
5902 
5903 	flags |= ((0xf & RDP_OED_TEMPERATURE) << RDP_OED_TYPE_SHIFT);
5904 	desc->oed_info.function_flags = cpu_to_be32(flags);
5905 	desc->length = cpu_to_be32(sizeof(desc->oed_info));
5906 	return sizeof(struct fc_rdp_oed_sfp_desc);
5907 }
5908 
5909 static uint32_t
5910 lpfc_rdp_res_oed_voltage_desc(struct lpfc_hba *phba,
5911 			      struct fc_rdp_oed_sfp_desc *desc,
5912 			      uint8_t *page_a2)
5913 {
5914 	uint32_t flags = 0;
5915 
5916 	desc->tag = cpu_to_be32(RDP_OED_DESC_TAG);
5917 
5918 	desc->oed_info.hi_alarm = page_a2[SSF_VOLTAGE_HIGH_ALARM];
5919 	desc->oed_info.lo_alarm = page_a2[SSF_VOLTAGE_LOW_ALARM];
5920 	desc->oed_info.hi_warning = page_a2[SSF_VOLTAGE_HIGH_WARNING];
5921 	desc->oed_info.lo_warning = page_a2[SSF_VOLTAGE_LOW_WARNING];
5922 
5923 	if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_VOLTAGE)
5924 		flags |= RDP_OET_HIGH_ALARM;
5925 	if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_VOLTAGE)
5926 		flags |= RDP_OET_LOW_ALARM;
5927 	if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_VOLTAGE)
5928 		flags |= RDP_OET_HIGH_WARNING;
5929 	if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_VOLTAGE)
5930 		flags |= RDP_OET_LOW_WARNING;
5931 
5932 	flags |= ((0xf & RDP_OED_VOLTAGE) << RDP_OED_TYPE_SHIFT);
5933 	desc->oed_info.function_flags = cpu_to_be32(flags);
5934 	desc->length = cpu_to_be32(sizeof(desc->oed_info));
5935 	return sizeof(struct fc_rdp_oed_sfp_desc);
5936 }
5937 
5938 static uint32_t
5939 lpfc_rdp_res_oed_txbias_desc(struct lpfc_hba *phba,
5940 			     struct fc_rdp_oed_sfp_desc *desc,
5941 			     uint8_t *page_a2)
5942 {
5943 	uint32_t flags = 0;
5944 
5945 	desc->tag = cpu_to_be32(RDP_OED_DESC_TAG);
5946 
5947 	desc->oed_info.hi_alarm = page_a2[SSF_BIAS_HIGH_ALARM];
5948 	desc->oed_info.lo_alarm = page_a2[SSF_BIAS_LOW_ALARM];
5949 	desc->oed_info.hi_warning = page_a2[SSF_BIAS_HIGH_WARNING];
5950 	desc->oed_info.lo_warning = page_a2[SSF_BIAS_LOW_WARNING];
5951 
5952 	if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TXBIAS)
5953 		flags |= RDP_OET_HIGH_ALARM;
5954 	if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TXBIAS)
5955 		flags |= RDP_OET_LOW_ALARM;
5956 	if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TXBIAS)
5957 		flags |= RDP_OET_HIGH_WARNING;
5958 	if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TXBIAS)
5959 		flags |= RDP_OET_LOW_WARNING;
5960 
5961 	flags |= ((0xf & RDP_OED_TXBIAS) << RDP_OED_TYPE_SHIFT);
5962 	desc->oed_info.function_flags = cpu_to_be32(flags);
5963 	desc->length = cpu_to_be32(sizeof(desc->oed_info));
5964 	return sizeof(struct fc_rdp_oed_sfp_desc);
5965 }
5966 
5967 static uint32_t
5968 lpfc_rdp_res_oed_txpower_desc(struct lpfc_hba *phba,
5969 			      struct fc_rdp_oed_sfp_desc *desc,
5970 			      uint8_t *page_a2)
5971 {
5972 	uint32_t flags = 0;
5973 
5974 	desc->tag = cpu_to_be32(RDP_OED_DESC_TAG);
5975 
5976 	desc->oed_info.hi_alarm = page_a2[SSF_TXPOWER_HIGH_ALARM];
5977 	desc->oed_info.lo_alarm = page_a2[SSF_TXPOWER_LOW_ALARM];
5978 	desc->oed_info.hi_warning = page_a2[SSF_TXPOWER_HIGH_WARNING];
5979 	desc->oed_info.lo_warning = page_a2[SSF_TXPOWER_LOW_WARNING];
5980 
5981 	if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TXPOWER)
5982 		flags |= RDP_OET_HIGH_ALARM;
5983 	if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TXPOWER)
5984 		flags |= RDP_OET_LOW_ALARM;
5985 	if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TXPOWER)
5986 		flags |= RDP_OET_HIGH_WARNING;
5987 	if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TXPOWER)
5988 		flags |= RDP_OET_LOW_WARNING;
5989 
5990 	flags |= ((0xf & RDP_OED_TXPOWER) << RDP_OED_TYPE_SHIFT);
5991 	desc->oed_info.function_flags = cpu_to_be32(flags);
5992 	desc->length = cpu_to_be32(sizeof(desc->oed_info));
5993 	return sizeof(struct fc_rdp_oed_sfp_desc);
5994 }
5995 
5996 
5997 static uint32_t
5998 lpfc_rdp_res_oed_rxpower_desc(struct lpfc_hba *phba,
5999 			      struct fc_rdp_oed_sfp_desc *desc,
6000 			      uint8_t *page_a2)
6001 {
6002 	uint32_t flags = 0;
6003 
6004 	desc->tag = cpu_to_be32(RDP_OED_DESC_TAG);
6005 
6006 	desc->oed_info.hi_alarm = page_a2[SSF_RXPOWER_HIGH_ALARM];
6007 	desc->oed_info.lo_alarm = page_a2[SSF_RXPOWER_LOW_ALARM];
6008 	desc->oed_info.hi_warning = page_a2[SSF_RXPOWER_HIGH_WARNING];
6009 	desc->oed_info.lo_warning = page_a2[SSF_RXPOWER_LOW_WARNING];
6010 
6011 	if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_RXPOWER)
6012 		flags |= RDP_OET_HIGH_ALARM;
6013 	if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_RXPOWER)
6014 		flags |= RDP_OET_LOW_ALARM;
6015 	if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_RXPOWER)
6016 		flags |= RDP_OET_HIGH_WARNING;
6017 	if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_RXPOWER)
6018 		flags |= RDP_OET_LOW_WARNING;
6019 
6020 	flags |= ((0xf & RDP_OED_RXPOWER) << RDP_OED_TYPE_SHIFT);
6021 	desc->oed_info.function_flags = cpu_to_be32(flags);
6022 	desc->length = cpu_to_be32(sizeof(desc->oed_info));
6023 	return sizeof(struct fc_rdp_oed_sfp_desc);
6024 }
6025 
6026 static uint32_t
6027 lpfc_rdp_res_opd_desc(struct fc_rdp_opd_sfp_desc *desc,
6028 		      uint8_t *page_a0, struct lpfc_vport *vport)
6029 {
6030 	desc->tag = cpu_to_be32(RDP_OPD_DESC_TAG);
6031 	memcpy(desc->opd_info.vendor_name, &page_a0[SSF_VENDOR_NAME], 16);
6032 	memcpy(desc->opd_info.model_number, &page_a0[SSF_VENDOR_PN], 16);
6033 	memcpy(desc->opd_info.serial_number, &page_a0[SSF_VENDOR_SN], 16);
6034 	memcpy(desc->opd_info.revision, &page_a0[SSF_VENDOR_REV], 4);
6035 	memcpy(desc->opd_info.date, &page_a0[SSF_DATE_CODE], 8);
6036 	desc->length = cpu_to_be32(sizeof(desc->opd_info));
6037 	return sizeof(struct fc_rdp_opd_sfp_desc);
6038 }
6039 
6040 static uint32_t
6041 lpfc_rdp_res_fec_desc(struct fc_fec_rdp_desc *desc, READ_LNK_VAR *stat)
6042 {
6043 	if (bf_get(lpfc_read_link_stat_gec2, stat) == 0)
6044 		return 0;
6045 	desc->tag = cpu_to_be32(RDP_FEC_DESC_TAG);
6046 
6047 	desc->info.CorrectedBlocks =
6048 		cpu_to_be32(stat->fecCorrBlkCount);
6049 	desc->info.UncorrectableBlocks =
6050 		cpu_to_be32(stat->fecUncorrBlkCount);
6051 
6052 	desc->length = cpu_to_be32(sizeof(desc->info));
6053 
6054 	return sizeof(struct fc_fec_rdp_desc);
6055 }
6056 
6057 static uint32_t
6058 lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba)
6059 {
6060 	uint16_t rdp_cap = 0;
6061 	uint16_t rdp_speed;
6062 
6063 	desc->tag = cpu_to_be32(RDP_PORT_SPEED_DESC_TAG);
6064 
6065 	switch (phba->fc_linkspeed) {
6066 	case LPFC_LINK_SPEED_1GHZ:
6067 		rdp_speed = RDP_PS_1GB;
6068 		break;
6069 	case LPFC_LINK_SPEED_2GHZ:
6070 		rdp_speed = RDP_PS_2GB;
6071 		break;
6072 	case LPFC_LINK_SPEED_4GHZ:
6073 		rdp_speed = RDP_PS_4GB;
6074 		break;
6075 	case LPFC_LINK_SPEED_8GHZ:
6076 		rdp_speed = RDP_PS_8GB;
6077 		break;
6078 	case LPFC_LINK_SPEED_10GHZ:
6079 		rdp_speed = RDP_PS_10GB;
6080 		break;
6081 	case LPFC_LINK_SPEED_16GHZ:
6082 		rdp_speed = RDP_PS_16GB;
6083 		break;
6084 	case LPFC_LINK_SPEED_32GHZ:
6085 		rdp_speed = RDP_PS_32GB;
6086 		break;
6087 	case LPFC_LINK_SPEED_64GHZ:
6088 		rdp_speed = RDP_PS_64GB;
6089 		break;
6090 	default:
6091 		rdp_speed = RDP_PS_UNKNOWN;
6092 		break;
6093 	}
6094 
6095 	desc->info.port_speed.speed = cpu_to_be16(rdp_speed);
6096 
6097 	if (phba->lmt & LMT_128Gb)
6098 		rdp_cap |= RDP_PS_128GB;
6099 	if (phba->lmt & LMT_64Gb)
6100 		rdp_cap |= RDP_PS_64GB;
6101 	if (phba->lmt & LMT_32Gb)
6102 		rdp_cap |= RDP_PS_32GB;
6103 	if (phba->lmt & LMT_16Gb)
6104 		rdp_cap |= RDP_PS_16GB;
6105 	if (phba->lmt & LMT_10Gb)
6106 		rdp_cap |= RDP_PS_10GB;
6107 	if (phba->lmt & LMT_8Gb)
6108 		rdp_cap |= RDP_PS_8GB;
6109 	if (phba->lmt & LMT_4Gb)
6110 		rdp_cap |= RDP_PS_4GB;
6111 	if (phba->lmt & LMT_2Gb)
6112 		rdp_cap |= RDP_PS_2GB;
6113 	if (phba->lmt & LMT_1Gb)
6114 		rdp_cap |= RDP_PS_1GB;
6115 
6116 	if (rdp_cap == 0)
6117 		rdp_cap = RDP_CAP_UNKNOWN;
6118 	if (phba->cfg_link_speed != LPFC_USER_LINK_SPEED_AUTO)
6119 		rdp_cap |= RDP_CAP_USER_CONFIGURED;
6120 
6121 	desc->info.port_speed.capabilities = cpu_to_be16(rdp_cap);
6122 	desc->length = cpu_to_be32(sizeof(desc->info));
6123 	return sizeof(struct fc_rdp_port_speed_desc);
6124 }
6125 
6126 static uint32_t
6127 lpfc_rdp_res_diag_port_names(struct fc_rdp_port_name_desc *desc,
6128 		struct lpfc_vport *vport)
6129 {
6130 
6131 	desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG);
6132 
6133 	memcpy(desc->port_names.wwnn, &vport->fc_nodename,
6134 			sizeof(desc->port_names.wwnn));
6135 
6136 	memcpy(desc->port_names.wwpn, &vport->fc_portname,
6137 			sizeof(desc->port_names.wwpn));
6138 
6139 	desc->length = cpu_to_be32(sizeof(desc->port_names));
6140 	return sizeof(struct fc_rdp_port_name_desc);
6141 }
6142 
6143 static uint32_t
6144 lpfc_rdp_res_attach_port_names(struct fc_rdp_port_name_desc *desc,
6145 		struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
6146 {
6147 
6148 	desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG);
6149 	if (vport->fc_flag & FC_FABRIC) {
6150 		memcpy(desc->port_names.wwnn, &vport->fabric_nodename,
6151 		       sizeof(desc->port_names.wwnn));
6152 
6153 		memcpy(desc->port_names.wwpn, &vport->fabric_portname,
6154 		       sizeof(desc->port_names.wwpn));
6155 	} else {  /* Point to Point */
6156 		memcpy(desc->port_names.wwnn, &ndlp->nlp_nodename,
6157 		       sizeof(desc->port_names.wwnn));
6158 
6159 		memcpy(desc->port_names.wwpn, &ndlp->nlp_portname,
6160 		       sizeof(desc->port_names.wwpn));
6161 	}
6162 
6163 	desc->length = cpu_to_be32(sizeof(desc->port_names));
6164 	return sizeof(struct fc_rdp_port_name_desc);
6165 }
6166 
6167 static void
6168 lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context,
6169 		int status)
6170 {
6171 	struct lpfc_nodelist *ndlp = rdp_context->ndlp;
6172 	struct lpfc_vport *vport = ndlp->vport;
6173 	struct lpfc_iocbq *elsiocb;
6174 	struct ulp_bde64 *bpl;
6175 	IOCB_t *icmd;
6176 	uint8_t *pcmd;
6177 	struct ls_rjt *stat;
6178 	struct fc_rdp_res_frame *rdp_res;
6179 	uint32_t cmdsize, len;
6180 	uint16_t *flag_ptr;
6181 	int rc;
6182 
6183 	if (status != SUCCESS)
6184 		goto error;
6185 
6186 	/* This will change once we know the true size of the RDP payload */
6187 	cmdsize = sizeof(struct fc_rdp_res_frame);
6188 
6189 	elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize,
6190 			lpfc_max_els_tries, rdp_context->ndlp,
6191 			rdp_context->ndlp->nlp_DID, ELS_CMD_ACC);
6192 	if (!elsiocb)
6193 		goto free_rdp_context;
6194 
6195 	icmd = &elsiocb->iocb;
6196 	icmd->ulpContext = rdp_context->rx_id;
6197 	icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id;
6198 
6199 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
6200 			"2171 Xmit RDP response tag x%x xri x%x, "
6201 			"did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x",
6202 			elsiocb->iotag, elsiocb->iocb.ulpContext,
6203 			ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
6204 			ndlp->nlp_rpi);
6205 	rdp_res = (struct fc_rdp_res_frame *)
6206 		(((struct lpfc_dmabuf *) elsiocb->context2)->virt);
6207 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
6208 	memset(pcmd, 0, sizeof(struct fc_rdp_res_frame));
6209 	*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
6210 
6211 	/* Update Alarm and Warning */
6212 	flag_ptr = (uint16_t *)(rdp_context->page_a2 + SSF_ALARM_FLAGS);
6213 	phba->sfp_alarm |= *flag_ptr;
6214 	flag_ptr = (uint16_t *)(rdp_context->page_a2 + SSF_WARNING_FLAGS);
6215 	phba->sfp_warning |= *flag_ptr;
6216 
6217 	/* For RDP payload */
6218 	len = 8;
6219 	len += lpfc_rdp_res_link_service((struct fc_rdp_link_service_desc *)
6220 					 (len + pcmd), ELS_CMD_RDP);
6221 
6222 	len += lpfc_rdp_res_sfp_desc((struct fc_rdp_sfp_desc *)(len + pcmd),
6223 			rdp_context->page_a0, rdp_context->page_a2);
6224 	len += lpfc_rdp_res_speed((struct fc_rdp_port_speed_desc *)(len + pcmd),
6225 				  phba);
6226 	len += lpfc_rdp_res_link_error((struct fc_rdp_link_error_status_desc *)
6227 				       (len + pcmd), &rdp_context->link_stat);
6228 	len += lpfc_rdp_res_diag_port_names((struct fc_rdp_port_name_desc *)
6229 					     (len + pcmd), vport);
6230 	len += lpfc_rdp_res_attach_port_names((struct fc_rdp_port_name_desc *)
6231 					(len + pcmd), vport, ndlp);
6232 	len += lpfc_rdp_res_fec_desc((struct fc_fec_rdp_desc *)(len + pcmd),
6233 			&rdp_context->link_stat);
6234 	len += lpfc_rdp_res_bbc_desc((struct fc_rdp_bbc_desc *)(len + pcmd),
6235 				     &rdp_context->link_stat, vport);
6236 	len += lpfc_rdp_res_oed_temp_desc(phba,
6237 				(struct fc_rdp_oed_sfp_desc *)(len + pcmd),
6238 				rdp_context->page_a2);
6239 	len += lpfc_rdp_res_oed_voltage_desc(phba,
6240 				(struct fc_rdp_oed_sfp_desc *)(len + pcmd),
6241 				rdp_context->page_a2);
6242 	len += lpfc_rdp_res_oed_txbias_desc(phba,
6243 				(struct fc_rdp_oed_sfp_desc *)(len + pcmd),
6244 				rdp_context->page_a2);
6245 	len += lpfc_rdp_res_oed_txpower_desc(phba,
6246 				(struct fc_rdp_oed_sfp_desc *)(len + pcmd),
6247 				rdp_context->page_a2);
6248 	len += lpfc_rdp_res_oed_rxpower_desc(phba,
6249 				(struct fc_rdp_oed_sfp_desc *)(len + pcmd),
6250 				rdp_context->page_a2);
6251 	len += lpfc_rdp_res_opd_desc((struct fc_rdp_opd_sfp_desc *)(len + pcmd),
6252 				     rdp_context->page_a0, vport);
6253 
6254 	rdp_res->length = cpu_to_be32(len - 8);
6255 	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
6256 
6257 	/* Now that we know the true size of the payload, update the BPL */
6258 	bpl = (struct ulp_bde64 *)
6259 		(((struct lpfc_dmabuf *)(elsiocb->context3))->virt);
6260 	bpl->tus.f.bdeSize = len;
6261 	bpl->tus.f.bdeFlags = 0;
6262 	bpl->tus.w = le32_to_cpu(bpl->tus.w);
6263 
6264 	phba->fc_stat.elsXmitACC++;
6265 	elsiocb->context1 = lpfc_nlp_get(ndlp);
6266 	if (!elsiocb->context1) {
6267 		lpfc_els_free_iocb(phba, elsiocb);
6268 		goto free_rdp_context;
6269 	}
6270 
6271 	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
6272 	if (rc == IOCB_ERROR) {
6273 		lpfc_els_free_iocb(phba, elsiocb);
6274 		lpfc_nlp_put(ndlp);
6275 	}
6276 
6277 	goto free_rdp_context;
6278 
6279 error:
6280 	cmdsize = 2 * sizeof(uint32_t);
6281 	elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, lpfc_max_els_tries,
6282 			ndlp, ndlp->nlp_DID, ELS_CMD_LS_RJT);
6283 	if (!elsiocb)
6284 		goto free_rdp_context;
6285 
6286 	icmd = &elsiocb->iocb;
6287 	icmd->ulpContext = rdp_context->rx_id;
6288 	icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id;
6289 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
6290 
6291 	*((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT;
6292 	stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t));
6293 	stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
6294 
6295 	phba->fc_stat.elsXmitLSRJT++;
6296 	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
6297 	elsiocb->context1 = lpfc_nlp_get(ndlp);
6298 	if (!elsiocb->context1) {
6299 		lpfc_els_free_iocb(phba, elsiocb);
6300 		goto free_rdp_context;
6301 	}
6302 
6303 	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
6304 	if (rc == IOCB_ERROR) {
6305 		lpfc_els_free_iocb(phba, elsiocb);
6306 		lpfc_nlp_put(ndlp);
6307 	}
6308 
6309 free_rdp_context:
6310 	/* This reference put is for the original unsolicited RDP. If the
6311 	 * iocb prep failed, there is no reference to remove.
6312 	 */
6313 	lpfc_nlp_put(ndlp);
6314 	kfree(rdp_context);
6315 }
6316 
6317 static int
6318 lpfc_get_rdp_info(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context)
6319 {
6320 	LPFC_MBOXQ_t *mbox = NULL;
6321 	int rc;
6322 
6323 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6324 	if (!mbox) {
6325 		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_ELS,
6326 				"7105 failed to allocate mailbox memory");
6327 		return 1;
6328 	}
6329 
6330 	if (lpfc_sli4_dump_page_a0(phba, mbox))
6331 		goto prep_mbox_fail;
6332 	mbox->vport = rdp_context->ndlp->vport;
6333 	mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_page_a0;
6334 	mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context;
6335 	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
6336 	if (rc == MBX_NOT_FINISHED)
6337 		goto issue_mbox_fail;
6338 
6339 	return 0;
6340 
6341 prep_mbox_fail:
6342 issue_mbox_fail:
6343 	mempool_free(mbox, phba->mbox_mem_pool);
6344 	return 1;
6345 }
6346 
6347 /*
6348  * lpfc_els_rcv_rdp - Process an unsolicited RDP ELS.
6349  * @vport: pointer to a host virtual N_Port data structure.
6350  * @cmdiocb: pointer to lpfc command iocb data structure.
6351  * @ndlp: pointer to a node-list data structure.
6352  *
6353  * This routine processes an unsolicited RDP(Read Diagnostic Parameters)
6354  * IOCB. First, the payload of the unsolicited RDP is checked.
6355  * Then it will (1) send MBX_DUMP_MEMORY, Embedded DMP_LMSD sub command TYPE-3
6356  * for Page A0, (2) send MBX_DUMP_MEMORY, DMP_LMSD for Page A2,
6357  * (3) send MBX_READ_LNK_STAT to get link stat, (4) Call lpfc_els_rdp_cmpl
6358  * gather all data and send RDP response.
6359  *
6360  * Return code
6361  *   0 - Sent the acc response
6362  *   1 - Sent the reject response.
6363  */
6364 static int
6365 lpfc_els_rcv_rdp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
6366 		struct lpfc_nodelist *ndlp)
6367 {
6368 	struct lpfc_hba *phba = vport->phba;
6369 	struct lpfc_dmabuf *pcmd;
6370 	uint8_t rjt_err, rjt_expl = LSEXP_NOTHING_MORE;
6371 	struct fc_rdp_req_frame *rdp_req;
6372 	struct lpfc_rdp_context *rdp_context;
6373 	IOCB_t *cmd = NULL;
6374 	struct ls_rjt stat;
6375 
6376 	if (phba->sli_rev < LPFC_SLI_REV4 ||
6377 	    bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
6378 						LPFC_SLI_INTF_IF_TYPE_2) {
6379 		rjt_err = LSRJT_UNABLE_TPC;
6380 		rjt_expl = LSEXP_REQ_UNSUPPORTED;
6381 		goto error;
6382 	}
6383 
6384 	if (phba->sli_rev < LPFC_SLI_REV4 || (phba->hba_flag & HBA_FCOE_MODE)) {
6385 		rjt_err = LSRJT_UNABLE_TPC;
6386 		rjt_expl = LSEXP_REQ_UNSUPPORTED;
6387 		goto error;
6388 	}
6389 
6390 	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
6391 	rdp_req = (struct fc_rdp_req_frame *) pcmd->virt;
6392 
6393 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
6394 			 "2422 ELS RDP Request "
6395 			 "dec len %d tag x%x port_id %d len %d\n",
6396 			 be32_to_cpu(rdp_req->rdp_des_length),
6397 			 be32_to_cpu(rdp_req->nport_id_desc.tag),
6398 			 be32_to_cpu(rdp_req->nport_id_desc.nport_id),
6399 			 be32_to_cpu(rdp_req->nport_id_desc.length));
6400 
6401 	if (sizeof(struct fc_rdp_nport_desc) !=
6402 			be32_to_cpu(rdp_req->rdp_des_length))
6403 		goto rjt_logerr;
6404 	if (RDP_N_PORT_DESC_TAG != be32_to_cpu(rdp_req->nport_id_desc.tag))
6405 		goto rjt_logerr;
6406 	if (RDP_NPORT_ID_SIZE !=
6407 			be32_to_cpu(rdp_req->nport_id_desc.length))
6408 		goto rjt_logerr;
6409 	rdp_context = kzalloc(sizeof(struct lpfc_rdp_context), GFP_KERNEL);
6410 	if (!rdp_context) {
6411 		rjt_err = LSRJT_UNABLE_TPC;
6412 		goto error;
6413 	}
6414 
6415 	cmd = &cmdiocb->iocb;
6416 	rdp_context->ndlp = lpfc_nlp_get(ndlp);
6417 	if (!rdp_context->ndlp) {
6418 		kfree(rdp_context);
6419 		rjt_err = LSRJT_UNABLE_TPC;
6420 		goto error;
6421 	}
6422 	rdp_context->ox_id = cmd->unsli3.rcvsli3.ox_id;
6423 	rdp_context->rx_id = cmd->ulpContext;
6424 	rdp_context->cmpl = lpfc_els_rdp_cmpl;
6425 	if (lpfc_get_rdp_info(phba, rdp_context)) {
6426 		lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_ELS,
6427 				 "2423 Unable to send mailbox");
6428 		kfree(rdp_context);
6429 		rjt_err = LSRJT_UNABLE_TPC;
6430 		lpfc_nlp_put(ndlp);
6431 		goto error;
6432 	}
6433 
6434 	return 0;
6435 
6436 rjt_logerr:
6437 	rjt_err = LSRJT_LOGICAL_ERR;
6438 
6439 error:
6440 	memset(&stat, 0, sizeof(stat));
6441 	stat.un.b.lsRjtRsnCode = rjt_err;
6442 	stat.un.b.lsRjtRsnCodeExp = rjt_expl;
6443 	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
6444 	return 1;
6445 }
6446 
6447 
6448 static void
6449 lpfc_els_lcb_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
6450 {
6451 	MAILBOX_t *mb;
6452 	IOCB_t *icmd;
6453 	uint8_t *pcmd;
6454 	struct lpfc_iocbq *elsiocb;
6455 	struct lpfc_nodelist *ndlp;
6456 	struct ls_rjt *stat;
6457 	union lpfc_sli4_cfg_shdr *shdr;
6458 	struct lpfc_lcb_context *lcb_context;
6459 	struct fc_lcb_res_frame *lcb_res;
6460 	uint32_t cmdsize, shdr_status, shdr_add_status;
6461 	int rc;
6462 
6463 	mb = &pmb->u.mb;
6464 	lcb_context = (struct lpfc_lcb_context *)pmb->ctx_ndlp;
6465 	ndlp = lcb_context->ndlp;
6466 	pmb->ctx_ndlp = NULL;
6467 	pmb->ctx_buf = NULL;
6468 
6469 	shdr = (union lpfc_sli4_cfg_shdr *)
6470 			&pmb->u.mqe.un.beacon_config.header.cfg_shdr;
6471 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6472 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6473 
6474 	lpfc_printf_log(phba, KERN_INFO, LOG_MBOX,
6475 				"0194 SET_BEACON_CONFIG mailbox "
6476 				"completed with status x%x add_status x%x,"
6477 				" mbx status x%x\n",
6478 				shdr_status, shdr_add_status, mb->mbxStatus);
6479 
6480 	if ((mb->mbxStatus != MBX_SUCCESS) || shdr_status ||
6481 	    (shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE) ||
6482 	    (shdr_add_status == ADD_STATUS_INVALID_REQUEST)) {
6483 		mempool_free(pmb, phba->mbox_mem_pool);
6484 		goto error;
6485 	}
6486 
6487 	mempool_free(pmb, phba->mbox_mem_pool);
6488 	cmdsize = sizeof(struct fc_lcb_res_frame);
6489 	elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
6490 			lpfc_max_els_tries, ndlp,
6491 			ndlp->nlp_DID, ELS_CMD_ACC);
6492 
6493 	/* Decrement the ndlp reference count from previous mbox command */
6494 	lpfc_nlp_put(ndlp);
6495 
6496 	if (!elsiocb)
6497 		goto free_lcb_context;
6498 
6499 	lcb_res = (struct fc_lcb_res_frame *)
6500 		(((struct lpfc_dmabuf *)elsiocb->context2)->virt);
6501 
6502 	memset(lcb_res, 0, sizeof(struct fc_lcb_res_frame));
6503 	icmd = &elsiocb->iocb;
6504 	icmd->ulpContext = lcb_context->rx_id;
6505 	icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id;
6506 
6507 	pcmd = (uint8_t *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt);
6508 	*((uint32_t *)(pcmd)) = ELS_CMD_ACC;
6509 	lcb_res->lcb_sub_command = lcb_context->sub_command;
6510 	lcb_res->lcb_type = lcb_context->type;
6511 	lcb_res->capability = lcb_context->capability;
6512 	lcb_res->lcb_frequency = lcb_context->frequency;
6513 	lcb_res->lcb_duration = lcb_context->duration;
6514 	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
6515 	phba->fc_stat.elsXmitACC++;
6516 
6517 	elsiocb->context1 = lpfc_nlp_get(ndlp);
6518 	if (!elsiocb->context1) {
6519 		lpfc_els_free_iocb(phba, elsiocb);
6520 		goto out;
6521 	}
6522 
6523 	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
6524 	if (rc == IOCB_ERROR) {
6525 		lpfc_els_free_iocb(phba, elsiocb);
6526 		lpfc_nlp_put(ndlp);
6527 	}
6528  out:
6529 	kfree(lcb_context);
6530 	return;
6531 
6532 error:
6533 	cmdsize = sizeof(struct fc_lcb_res_frame);
6534 	elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
6535 			lpfc_max_els_tries, ndlp,
6536 			ndlp->nlp_DID, ELS_CMD_LS_RJT);
6537 	lpfc_nlp_put(ndlp);
6538 	if (!elsiocb)
6539 		goto free_lcb_context;
6540 
6541 	icmd = &elsiocb->iocb;
6542 	icmd->ulpContext = lcb_context->rx_id;
6543 	icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id;
6544 	pcmd = (uint8_t *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt);
6545 
6546 	*((uint32_t *)(pcmd)) = ELS_CMD_LS_RJT;
6547 	stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t));
6548 	stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
6549 
6550 	if (shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE)
6551 		stat->un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS;
6552 
6553 	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
6554 	phba->fc_stat.elsXmitLSRJT++;
6555 	elsiocb->context1 = lpfc_nlp_get(ndlp);
6556 	if (!elsiocb->context1) {
6557 		lpfc_els_free_iocb(phba, elsiocb);
6558 		goto free_lcb_context;
6559 	}
6560 
6561 	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
6562 	if (rc == IOCB_ERROR) {
6563 		lpfc_els_free_iocb(phba, elsiocb);
6564 		lpfc_nlp_put(ndlp);
6565 	}
6566 free_lcb_context:
6567 	kfree(lcb_context);
6568 }
6569 
6570 static int
6571 lpfc_sli4_set_beacon(struct lpfc_vport *vport,
6572 		     struct lpfc_lcb_context *lcb_context,
6573 		     uint32_t beacon_state)
6574 {
6575 	struct lpfc_hba *phba = vport->phba;
6576 	union lpfc_sli4_cfg_shdr *cfg_shdr;
6577 	LPFC_MBOXQ_t *mbox = NULL;
6578 	uint32_t len;
6579 	int rc;
6580 
6581 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6582 	if (!mbox)
6583 		return 1;
6584 
6585 	cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr;
6586 	len = sizeof(struct lpfc_mbx_set_beacon_config) -
6587 		sizeof(struct lpfc_sli4_cfg_mhdr);
6588 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6589 			 LPFC_MBOX_OPCODE_SET_BEACON_CONFIG, len,
6590 			 LPFC_SLI4_MBX_EMBED);
6591 	mbox->ctx_ndlp = (void *)lcb_context;
6592 	mbox->vport = phba->pport;
6593 	mbox->mbox_cmpl = lpfc_els_lcb_rsp;
6594 	bf_set(lpfc_mbx_set_beacon_port_num, &mbox->u.mqe.un.beacon_config,
6595 	       phba->sli4_hba.physical_port);
6596 	bf_set(lpfc_mbx_set_beacon_state, &mbox->u.mqe.un.beacon_config,
6597 	       beacon_state);
6598 	mbox->u.mqe.un.beacon_config.word5 = 0;		/* Reserved */
6599 
6600 	/*
6601 	 *	Check bv1s bit before issuing the mailbox
6602 	 *	if bv1s == 1, LCB V1 supported
6603 	 *	else, LCB V0 supported
6604 	 */
6605 
6606 	if (phba->sli4_hba.pc_sli4_params.bv1s) {
6607 		/* COMMON_SET_BEACON_CONFIG_V1 */
6608 		cfg_shdr->request.word9 = BEACON_VERSION_V1;
6609 		lcb_context->capability |= LCB_CAPABILITY_DURATION;
6610 		bf_set(lpfc_mbx_set_beacon_port_type,
6611 		       &mbox->u.mqe.un.beacon_config, 0);
6612 		bf_set(lpfc_mbx_set_beacon_duration_v1,
6613 		       &mbox->u.mqe.un.beacon_config,
6614 		       be16_to_cpu(lcb_context->duration));
6615 	} else {
6616 		/* COMMON_SET_BEACON_CONFIG_V0 */
6617 		if (be16_to_cpu(lcb_context->duration) != 0) {
6618 			mempool_free(mbox, phba->mbox_mem_pool);
6619 			return 1;
6620 		}
6621 		cfg_shdr->request.word9 = BEACON_VERSION_V0;
6622 		lcb_context->capability &=  ~(LCB_CAPABILITY_DURATION);
6623 		bf_set(lpfc_mbx_set_beacon_state,
6624 		       &mbox->u.mqe.un.beacon_config, beacon_state);
6625 		bf_set(lpfc_mbx_set_beacon_port_type,
6626 		       &mbox->u.mqe.un.beacon_config, 1);
6627 		bf_set(lpfc_mbx_set_beacon_duration,
6628 		       &mbox->u.mqe.un.beacon_config,
6629 		       be16_to_cpu(lcb_context->duration));
6630 	}
6631 
6632 	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
6633 	if (rc == MBX_NOT_FINISHED) {
6634 		mempool_free(mbox, phba->mbox_mem_pool);
6635 		return 1;
6636 	}
6637 
6638 	return 0;
6639 }
6640 
6641 
6642 /**
6643  * lpfc_els_rcv_lcb - Process an unsolicited LCB
6644  * @vport: pointer to a host virtual N_Port data structure.
6645  * @cmdiocb: pointer to lpfc command iocb data structure.
6646  * @ndlp: pointer to a node-list data structure.
6647  *
6648  * This routine processes an unsolicited LCB(LINK CABLE BEACON) IOCB.
6649  * First, the payload of the unsolicited LCB is checked.
6650  * Then based on Subcommand beacon will either turn on or off.
6651  *
6652  * Return code
6653  * 0 - Sent the acc response
6654  * 1 - Sent the reject response.
6655  **/
6656 static int
6657 lpfc_els_rcv_lcb(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
6658 		 struct lpfc_nodelist *ndlp)
6659 {
6660 	struct lpfc_hba *phba = vport->phba;
6661 	struct lpfc_dmabuf *pcmd;
6662 	uint8_t *lp;
6663 	struct fc_lcb_request_frame *beacon;
6664 	struct lpfc_lcb_context *lcb_context;
6665 	u8 state, rjt_err = 0;
6666 	struct ls_rjt stat;
6667 
6668 	pcmd = (struct lpfc_dmabuf *)cmdiocb->context2;
6669 	lp = (uint8_t *)pcmd->virt;
6670 	beacon = (struct fc_lcb_request_frame *)pcmd->virt;
6671 
6672 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
6673 			"0192 ELS LCB Data x%x x%x x%x x%x sub x%x "
6674 			"type x%x frequency %x duration x%x\n",
6675 			lp[0], lp[1], lp[2],
6676 			beacon->lcb_command,
6677 			beacon->lcb_sub_command,
6678 			beacon->lcb_type,
6679 			beacon->lcb_frequency,
6680 			be16_to_cpu(beacon->lcb_duration));
6681 
6682 	if (beacon->lcb_sub_command != LPFC_LCB_ON &&
6683 	    beacon->lcb_sub_command != LPFC_LCB_OFF) {
6684 		rjt_err = LSRJT_CMD_UNSUPPORTED;
6685 		goto rjt;
6686 	}
6687 
6688 	if (phba->sli_rev < LPFC_SLI_REV4  ||
6689 	    phba->hba_flag & HBA_FCOE_MODE ||
6690 	    (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
6691 	    LPFC_SLI_INTF_IF_TYPE_2)) {
6692 		rjt_err = LSRJT_CMD_UNSUPPORTED;
6693 		goto rjt;
6694 	}
6695 
6696 	lcb_context = kmalloc(sizeof(*lcb_context), GFP_KERNEL);
6697 	if (!lcb_context) {
6698 		rjt_err = LSRJT_UNABLE_TPC;
6699 		goto rjt;
6700 	}
6701 
6702 	state = (beacon->lcb_sub_command == LPFC_LCB_ON) ? 1 : 0;
6703 	lcb_context->sub_command = beacon->lcb_sub_command;
6704 	lcb_context->capability	= 0;
6705 	lcb_context->type = beacon->lcb_type;
6706 	lcb_context->frequency = beacon->lcb_frequency;
6707 	lcb_context->duration = beacon->lcb_duration;
6708 	lcb_context->ox_id = cmdiocb->iocb.unsli3.rcvsli3.ox_id;
6709 	lcb_context->rx_id = cmdiocb->iocb.ulpContext;
6710 	lcb_context->ndlp = lpfc_nlp_get(ndlp);
6711 	if (!lcb_context->ndlp) {
6712 		rjt_err = LSRJT_UNABLE_TPC;
6713 		goto rjt_free;
6714 	}
6715 
6716 	if (lpfc_sli4_set_beacon(vport, lcb_context, state)) {
6717 		lpfc_printf_vlog(ndlp->vport, KERN_ERR, LOG_TRACE_EVENT,
6718 				 "0193 failed to send mail box");
6719 		lpfc_nlp_put(ndlp);
6720 		rjt_err = LSRJT_UNABLE_TPC;
6721 		goto rjt_free;
6722 	}
6723 	return 0;
6724 
6725 rjt_free:
6726 	kfree(lcb_context);
6727 rjt:
6728 	memset(&stat, 0, sizeof(stat));
6729 	stat.un.b.lsRjtRsnCode = rjt_err;
6730 	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
6731 	return 1;
6732 }
6733 
6734 
6735 /**
6736  * lpfc_els_flush_rscn - Clean up any rscn activities with a vport
6737  * @vport: pointer to a host virtual N_Port data structure.
6738  *
6739  * This routine cleans up any Registration State Change Notification
6740  * (RSCN) activity with a @vport. Note that the fc_rscn_flush flag of the
6741  * @vport together with the host_lock is used to prevent multiple thread
6742  * trying to access the RSCN array on a same @vport at the same time.
6743  **/
6744 void
6745 lpfc_els_flush_rscn(struct lpfc_vport *vport)
6746 {
6747 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6748 	struct lpfc_hba  *phba = vport->phba;
6749 	int i;
6750 
6751 	spin_lock_irq(shost->host_lock);
6752 	if (vport->fc_rscn_flush) {
6753 		/* Another thread is walking fc_rscn_id_list on this vport */
6754 		spin_unlock_irq(shost->host_lock);
6755 		return;
6756 	}
6757 	/* Indicate we are walking lpfc_els_flush_rscn on this vport */
6758 	vport->fc_rscn_flush = 1;
6759 	spin_unlock_irq(shost->host_lock);
6760 
6761 	for (i = 0; i < vport->fc_rscn_id_cnt; i++) {
6762 		lpfc_in_buf_free(phba, vport->fc_rscn_id_list[i]);
6763 		vport->fc_rscn_id_list[i] = NULL;
6764 	}
6765 	spin_lock_irq(shost->host_lock);
6766 	vport->fc_rscn_id_cnt = 0;
6767 	vport->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY);
6768 	spin_unlock_irq(shost->host_lock);
6769 	lpfc_can_disctmo(vport);
6770 	/* Indicate we are done walking this fc_rscn_id_list */
6771 	vport->fc_rscn_flush = 0;
6772 }
6773 
6774 /**
6775  * lpfc_rscn_payload_check - Check whether there is a pending rscn to a did
6776  * @vport: pointer to a host virtual N_Port data structure.
6777  * @did: remote destination port identifier.
6778  *
6779  * This routine checks whether there is any pending Registration State
6780  * Configuration Notification (RSCN) to a @did on @vport.
6781  *
6782  * Return code
6783  *   None zero - The @did matched with a pending rscn
6784  *   0 - not able to match @did with a pending rscn
6785  **/
6786 int
6787 lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did)
6788 {
6789 	D_ID ns_did;
6790 	D_ID rscn_did;
6791 	uint32_t *lp;
6792 	uint32_t payload_len, i;
6793 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6794 
6795 	ns_did.un.word = did;
6796 
6797 	/* Never match fabric nodes for RSCNs */
6798 	if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
6799 		return 0;
6800 
6801 	/* If we are doing a FULL RSCN rediscovery, match everything */
6802 	if (vport->fc_flag & FC_RSCN_DISCOVERY)
6803 		return did;
6804 
6805 	spin_lock_irq(shost->host_lock);
6806 	if (vport->fc_rscn_flush) {
6807 		/* Another thread is walking fc_rscn_id_list on this vport */
6808 		spin_unlock_irq(shost->host_lock);
6809 		return 0;
6810 	}
6811 	/* Indicate we are walking fc_rscn_id_list on this vport */
6812 	vport->fc_rscn_flush = 1;
6813 	spin_unlock_irq(shost->host_lock);
6814 	for (i = 0; i < vport->fc_rscn_id_cnt; i++) {
6815 		lp = vport->fc_rscn_id_list[i]->virt;
6816 		payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK);
6817 		payload_len -= sizeof(uint32_t);	/* take off word 0 */
6818 		while (payload_len) {
6819 			rscn_did.un.word = be32_to_cpu(*lp++);
6820 			payload_len -= sizeof(uint32_t);
6821 			switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) {
6822 			case RSCN_ADDRESS_FORMAT_PORT:
6823 				if ((ns_did.un.b.domain == rscn_did.un.b.domain)
6824 				    && (ns_did.un.b.area == rscn_did.un.b.area)
6825 				    && (ns_did.un.b.id == rscn_did.un.b.id))
6826 					goto return_did_out;
6827 				break;
6828 			case RSCN_ADDRESS_FORMAT_AREA:
6829 				if ((ns_did.un.b.domain == rscn_did.un.b.domain)
6830 				    && (ns_did.un.b.area == rscn_did.un.b.area))
6831 					goto return_did_out;
6832 				break;
6833 			case RSCN_ADDRESS_FORMAT_DOMAIN:
6834 				if (ns_did.un.b.domain == rscn_did.un.b.domain)
6835 					goto return_did_out;
6836 				break;
6837 			case RSCN_ADDRESS_FORMAT_FABRIC:
6838 				goto return_did_out;
6839 			}
6840 		}
6841 	}
6842 	/* Indicate we are done with walking fc_rscn_id_list on this vport */
6843 	vport->fc_rscn_flush = 0;
6844 	return 0;
6845 return_did_out:
6846 	/* Indicate we are done with walking fc_rscn_id_list on this vport */
6847 	vport->fc_rscn_flush = 0;
6848 	return did;
6849 }
6850 
6851 /**
6852  * lpfc_rscn_recovery_check - Send recovery event to vport nodes matching rscn
6853  * @vport: pointer to a host virtual N_Port data structure.
6854  *
6855  * This routine sends recovery (NLP_EVT_DEVICE_RECOVERY) event to the
6856  * state machine for a @vport's nodes that are with pending RSCN (Registration
6857  * State Change Notification).
6858  *
6859  * Return code
6860  *   0 - Successful (currently alway return 0)
6861  **/
6862 static int
6863 lpfc_rscn_recovery_check(struct lpfc_vport *vport)
6864 {
6865 	struct lpfc_nodelist *ndlp = NULL;
6866 
6867 	/* Move all affected nodes by pending RSCNs to NPR state. */
6868 	list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
6869 		if ((ndlp->nlp_state == NLP_STE_UNUSED_NODE) ||
6870 		    !lpfc_rscn_payload_check(vport, ndlp->nlp_DID))
6871 			continue;
6872 
6873 		/* NVME Target mode does not do RSCN Recovery. */
6874 		if (vport->phba->nvmet_support)
6875 			continue;
6876 
6877 		/* If we are in the process of doing discovery on this
6878 		 * NPort, let it continue on its own.
6879 		 */
6880 		switch (ndlp->nlp_state) {
6881 		case  NLP_STE_PLOGI_ISSUE:
6882 		case  NLP_STE_ADISC_ISSUE:
6883 		case  NLP_STE_REG_LOGIN_ISSUE:
6884 		case  NLP_STE_PRLI_ISSUE:
6885 		case  NLP_STE_LOGO_ISSUE:
6886 			continue;
6887 		}
6888 
6889 		/* Check to see if we need to NVME rescan this target
6890 		 * remoteport.
6891 		 */
6892 		if (ndlp->nlp_fc4_type & NLP_FC4_NVME &&
6893 		    ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_DISCOVERY))
6894 			lpfc_nvme_rescan_port(vport, ndlp);
6895 
6896 		lpfc_disc_state_machine(vport, ndlp, NULL,
6897 					NLP_EVT_DEVICE_RECOVERY);
6898 		lpfc_cancel_retry_delay_tmo(vport, ndlp);
6899 	}
6900 	return 0;
6901 }
6902 
6903 /**
6904  * lpfc_send_rscn_event - Send an RSCN event to management application
6905  * @vport: pointer to a host virtual N_Port data structure.
6906  * @cmdiocb: pointer to lpfc command iocb data structure.
6907  *
6908  * lpfc_send_rscn_event sends an RSCN netlink event to management
6909  * applications.
6910  */
6911 static void
6912 lpfc_send_rscn_event(struct lpfc_vport *vport,
6913 		struct lpfc_iocbq *cmdiocb)
6914 {
6915 	struct lpfc_dmabuf *pcmd;
6916 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6917 	uint32_t *payload_ptr;
6918 	uint32_t payload_len;
6919 	struct lpfc_rscn_event_header *rscn_event_data;
6920 
6921 	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
6922 	payload_ptr = (uint32_t *) pcmd->virt;
6923 	payload_len = be32_to_cpu(*payload_ptr & ~ELS_CMD_MASK);
6924 
6925 	rscn_event_data = kmalloc(sizeof(struct lpfc_rscn_event_header) +
6926 		payload_len, GFP_KERNEL);
6927 	if (!rscn_event_data) {
6928 		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
6929 			"0147 Failed to allocate memory for RSCN event\n");
6930 		return;
6931 	}
6932 	rscn_event_data->event_type = FC_REG_RSCN_EVENT;
6933 	rscn_event_data->payload_length = payload_len;
6934 	memcpy(rscn_event_data->rscn_payload, payload_ptr,
6935 		payload_len);
6936 
6937 	fc_host_post_vendor_event(shost,
6938 		fc_get_event_number(),
6939 		sizeof(struct lpfc_rscn_event_header) + payload_len,
6940 		(char *)rscn_event_data,
6941 		LPFC_NL_VENDOR_ID);
6942 
6943 	kfree(rscn_event_data);
6944 }
6945 
6946 /**
6947  * lpfc_els_rcv_rscn - Process an unsolicited rscn iocb
6948  * @vport: pointer to a host virtual N_Port data structure.
6949  * @cmdiocb: pointer to lpfc command iocb data structure.
6950  * @ndlp: pointer to a node-list data structure.
6951  *
6952  * This routine processes an unsolicited RSCN (Registration State Change
6953  * Notification) IOCB. First, the payload of the unsolicited RSCN is walked
6954  * to invoke fc_host_post_event() routine to the FC transport layer. If the
6955  * discover state machine is about to begin discovery, it just accepts the
6956  * RSCN and the discovery process will satisfy the RSCN. If this RSCN only
6957  * contains N_Port IDs for other vports on this HBA, it just accepts the
6958  * RSCN and ignore processing it. If the state machine is in the recovery
6959  * state, the fc_rscn_id_list of this @vport is walked and the
6960  * lpfc_rscn_recovery_check() routine is invoked to send recovery event for
6961  * all nodes that match RSCN payload. Otherwise, the lpfc_els_handle_rscn()
6962  * routine is invoked to handle the RSCN event.
6963  *
6964  * Return code
6965  *   0 - Just sent the acc response
6966  *   1 - Sent the acc response and waited for name server completion
6967  **/
6968 static int
6969 lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
6970 		  struct lpfc_nodelist *ndlp)
6971 {
6972 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6973 	struct lpfc_hba  *phba = vport->phba;
6974 	struct lpfc_dmabuf *pcmd;
6975 	uint32_t *lp, *datap;
6976 	uint32_t payload_len, length, nportid, *cmd;
6977 	int rscn_cnt;
6978 	int rscn_id = 0, hba_id = 0;
6979 	int i, tmo;
6980 
6981 	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
6982 	lp = (uint32_t *) pcmd->virt;
6983 
6984 	payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK);
6985 	payload_len -= sizeof(uint32_t);	/* take off word 0 */
6986 	/* RSCN received */
6987 	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
6988 			 "0214 RSCN received Data: x%x x%x x%x x%x\n",
6989 			 vport->fc_flag, payload_len, *lp,
6990 			 vport->fc_rscn_id_cnt);
6991 
6992 	/* Send an RSCN event to the management application */
6993 	lpfc_send_rscn_event(vport, cmdiocb);
6994 
6995 	for (i = 0; i < payload_len/sizeof(uint32_t); i++)
6996 		fc_host_post_event(shost, fc_get_event_number(),
6997 			FCH_EVT_RSCN, lp[i]);
6998 
6999 	/* Check if RSCN is coming from a direct-connected remote NPort */
7000 	if (vport->fc_flag & FC_PT2PT) {
7001 		/* If so, just ACC it, no other action needed for now */
7002 		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
7003 				 "2024 pt2pt RSCN %08x Data: x%x x%x\n",
7004 				 *lp, vport->fc_flag, payload_len);
7005 		lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
7006 
7007 		/* Check to see if we need to NVME rescan this target
7008 		 * remoteport.
7009 		 */
7010 		if (ndlp->nlp_fc4_type & NLP_FC4_NVME &&
7011 		    ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_DISCOVERY))
7012 			lpfc_nvme_rescan_port(vport, ndlp);
7013 		return 0;
7014 	}
7015 
7016 	/* If we are about to begin discovery, just ACC the RSCN.
7017 	 * Discovery processing will satisfy it.
7018 	 */
7019 	if (vport->port_state <= LPFC_NS_QRY) {
7020 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
7021 			"RCV RSCN ignore: did:x%x/ste:x%x flg:x%x",
7022 			ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
7023 
7024 		lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
7025 		return 0;
7026 	}
7027 
7028 	/* If this RSCN just contains NPortIDs for other vports on this HBA,
7029 	 * just ACC and ignore it.
7030 	 */
7031 	if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
7032 		!(vport->cfg_peer_port_login)) {
7033 		i = payload_len;
7034 		datap = lp;
7035 		while (i > 0) {
7036 			nportid = *datap++;
7037 			nportid = ((be32_to_cpu(nportid)) & Mask_DID);
7038 			i -= sizeof(uint32_t);
7039 			rscn_id++;
7040 			if (lpfc_find_vport_by_did(phba, nportid))
7041 				hba_id++;
7042 		}
7043 		if (rscn_id == hba_id) {
7044 			/* ALL NPortIDs in RSCN are on HBA */
7045 			lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
7046 					 "0219 Ignore RSCN "
7047 					 "Data: x%x x%x x%x x%x\n",
7048 					 vport->fc_flag, payload_len,
7049 					 *lp, vport->fc_rscn_id_cnt);
7050 			lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
7051 				"RCV RSCN vport:  did:x%x/ste:x%x flg:x%x",
7052 				ndlp->nlp_DID, vport->port_state,
7053 				ndlp->nlp_flag);
7054 
7055 			lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb,
7056 				ndlp, NULL);
7057 			return 0;
7058 		}
7059 	}
7060 
7061 	spin_lock_irq(shost->host_lock);
7062 	if (vport->fc_rscn_flush) {
7063 		/* Another thread is walking fc_rscn_id_list on this vport */
7064 		vport->fc_flag |= FC_RSCN_DISCOVERY;
7065 		spin_unlock_irq(shost->host_lock);
7066 		/* Send back ACC */
7067 		lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
7068 		return 0;
7069 	}
7070 	/* Indicate we are walking fc_rscn_id_list on this vport */
7071 	vport->fc_rscn_flush = 1;
7072 	spin_unlock_irq(shost->host_lock);
7073 	/* Get the array count after successfully have the token */
7074 	rscn_cnt = vport->fc_rscn_id_cnt;
7075 	/* If we are already processing an RSCN, save the received
7076 	 * RSCN payload buffer, cmdiocb->context2 to process later.
7077 	 */
7078 	if (vport->fc_flag & (FC_RSCN_MODE | FC_NDISC_ACTIVE)) {
7079 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
7080 			"RCV RSCN defer:  did:x%x/ste:x%x flg:x%x",
7081 			ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
7082 
7083 		spin_lock_irq(shost->host_lock);
7084 		vport->fc_flag |= FC_RSCN_DEFERRED;
7085 
7086 		/* Restart disctmo if its already running */
7087 		if (vport->fc_flag & FC_DISC_TMO) {
7088 			tmo = ((phba->fc_ratov * 3) + 3);
7089 			mod_timer(&vport->fc_disctmo,
7090 				  jiffies + msecs_to_jiffies(1000 * tmo));
7091 		}
7092 		if ((rscn_cnt < FC_MAX_HOLD_RSCN) &&
7093 		    !(vport->fc_flag & FC_RSCN_DISCOVERY)) {
7094 			vport->fc_flag |= FC_RSCN_MODE;
7095 			spin_unlock_irq(shost->host_lock);
7096 			if (rscn_cnt) {
7097 				cmd = vport->fc_rscn_id_list[rscn_cnt-1]->virt;
7098 				length = be32_to_cpu(*cmd & ~ELS_CMD_MASK);
7099 			}
7100 			if ((rscn_cnt) &&
7101 			    (payload_len + length <= LPFC_BPL_SIZE)) {
7102 				*cmd &= ELS_CMD_MASK;
7103 				*cmd |= cpu_to_be32(payload_len + length);
7104 				memcpy(((uint8_t *)cmd) + length, lp,
7105 				       payload_len);
7106 			} else {
7107 				vport->fc_rscn_id_list[rscn_cnt] = pcmd;
7108 				vport->fc_rscn_id_cnt++;
7109 				/* If we zero, cmdiocb->context2, the calling
7110 				 * routine will not try to free it.
7111 				 */
7112 				cmdiocb->context2 = NULL;
7113 			}
7114 			/* Deferred RSCN */
7115 			lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
7116 					 "0235 Deferred RSCN "
7117 					 "Data: x%x x%x x%x\n",
7118 					 vport->fc_rscn_id_cnt, vport->fc_flag,
7119 					 vport->port_state);
7120 		} else {
7121 			vport->fc_flag |= FC_RSCN_DISCOVERY;
7122 			spin_unlock_irq(shost->host_lock);
7123 			/* ReDiscovery RSCN */
7124 			lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
7125 					 "0234 ReDiscovery RSCN "
7126 					 "Data: x%x x%x x%x\n",
7127 					 vport->fc_rscn_id_cnt, vport->fc_flag,
7128 					 vport->port_state);
7129 		}
7130 		/* Indicate we are done walking fc_rscn_id_list on this vport */
7131 		vport->fc_rscn_flush = 0;
7132 		/* Send back ACC */
7133 		lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
7134 		/* send RECOVERY event for ALL nodes that match RSCN payload */
7135 		lpfc_rscn_recovery_check(vport);
7136 		return 0;
7137 	}
7138 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
7139 		"RCV RSCN:        did:x%x/ste:x%x flg:x%x",
7140 		ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
7141 
7142 	spin_lock_irq(shost->host_lock);
7143 	vport->fc_flag |= FC_RSCN_MODE;
7144 	spin_unlock_irq(shost->host_lock);
7145 	vport->fc_rscn_id_list[vport->fc_rscn_id_cnt++] = pcmd;
7146 	/* Indicate we are done walking fc_rscn_id_list on this vport */
7147 	vport->fc_rscn_flush = 0;
7148 	/*
7149 	 * If we zero, cmdiocb->context2, the calling routine will
7150 	 * not try to free it.
7151 	 */
7152 	cmdiocb->context2 = NULL;
7153 	lpfc_set_disctmo(vport);
7154 	/* Send back ACC */
7155 	lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
7156 	/* send RECOVERY event for ALL nodes that match RSCN payload */
7157 	lpfc_rscn_recovery_check(vport);
7158 	return lpfc_els_handle_rscn(vport);
7159 }
7160 
7161 /**
7162  * lpfc_els_handle_rscn - Handle rscn for a vport
7163  * @vport: pointer to a host virtual N_Port data structure.
7164  *
7165  * This routine handles the Registration State Configuration Notification
7166  * (RSCN) for a @vport. If login to NameServer does not exist, a new ndlp shall
7167  * be created and a Port Login (PLOGI) to the NameServer is issued. Otherwise,
7168  * if the ndlp to NameServer exists, a Common Transport (CT) command to the
7169  * NameServer shall be issued. If CT command to the NameServer fails to be
7170  * issued, the lpfc_els_flush_rscn() routine shall be invoked to clean up any
7171  * RSCN activities with the @vport.
7172  *
7173  * Return code
7174  *   0 - Cleaned up rscn on the @vport
7175  *   1 - Wait for plogi to name server before proceed
7176  **/
7177 int
7178 lpfc_els_handle_rscn(struct lpfc_vport *vport)
7179 {
7180 	struct lpfc_nodelist *ndlp;
7181 	struct lpfc_hba  *phba = vport->phba;
7182 
7183 	/* Ignore RSCN if the port is being torn down. */
7184 	if (vport->load_flag & FC_UNLOADING) {
7185 		lpfc_els_flush_rscn(vport);
7186 		return 0;
7187 	}
7188 
7189 	/* Start timer for RSCN processing */
7190 	lpfc_set_disctmo(vport);
7191 
7192 	/* RSCN processed */
7193 	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
7194 			 "0215 RSCN processed Data: x%x x%x x%x x%x x%x x%x\n",
7195 			 vport->fc_flag, 0, vport->fc_rscn_id_cnt,
7196 			 vport->port_state, vport->num_disc_nodes,
7197 			 vport->gidft_inp);
7198 
7199 	/* To process RSCN, first compare RSCN data with NameServer */
7200 	vport->fc_ns_retry = 0;
7201 	vport->num_disc_nodes = 0;
7202 
7203 	ndlp = lpfc_findnode_did(vport, NameServer_DID);
7204 	if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
7205 		/* Good ndlp, issue CT Request to NameServer.  Need to
7206 		 * know how many gidfts were issued.  If none, then just
7207 		 * flush the RSCN.  Otherwise, the outstanding requests
7208 		 * need to complete.
7209 		 */
7210 		if (phba->cfg_ns_query == LPFC_NS_QUERY_GID_FT) {
7211 			if (lpfc_issue_gidft(vport) > 0)
7212 				return 1;
7213 		} else if (phba->cfg_ns_query == LPFC_NS_QUERY_GID_PT) {
7214 			if (lpfc_issue_gidpt(vport) > 0)
7215 				return 1;
7216 		} else {
7217 			return 1;
7218 		}
7219 	} else {
7220 		/* Nameserver login in question.  Revalidate. */
7221 		if (ndlp) {
7222 			ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE;
7223 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
7224 		} else {
7225 			ndlp = lpfc_nlp_init(vport, NameServer_DID);
7226 			if (!ndlp) {
7227 				lpfc_els_flush_rscn(vport);
7228 				return 0;
7229 			}
7230 			ndlp->nlp_prev_state = ndlp->nlp_state;
7231 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
7232 		}
7233 		ndlp->nlp_type |= NLP_FABRIC;
7234 		lpfc_issue_els_plogi(vport, NameServer_DID, 0);
7235 		/* Wait for NameServer login cmpl before we can
7236 		 * continue
7237 		 */
7238 		return 1;
7239 	}
7240 
7241 	lpfc_els_flush_rscn(vport);
7242 	return 0;
7243 }
7244 
7245 /**
7246  * lpfc_els_rcv_flogi - Process an unsolicited flogi iocb
7247  * @vport: pointer to a host virtual N_Port data structure.
7248  * @cmdiocb: pointer to lpfc command iocb data structure.
7249  * @ndlp: pointer to a node-list data structure.
7250  *
7251  * This routine processes Fabric Login (FLOGI) IOCB received as an ELS
7252  * unsolicited event. An unsolicited FLOGI can be received in a point-to-
7253  * point topology. As an unsolicited FLOGI should not be received in a loop
7254  * mode, any unsolicited FLOGI received in loop mode shall be ignored. The
7255  * lpfc_check_sparm() routine is invoked to check the parameters in the
7256  * unsolicited FLOGI. If parameters validation failed, the routine
7257  * lpfc_els_rsp_reject() shall be called with reject reason code set to
7258  * LSEXP_SPARM_OPTIONS to reject the FLOGI. Otherwise, the Port WWN in the
7259  * FLOGI shall be compared with the Port WWN of the @vport to determine who
7260  * will initiate PLOGI. The higher lexicographical value party shall has
7261  * higher priority (as the winning port) and will initiate PLOGI and
7262  * communicate Port_IDs (Addresses) for both nodes in PLOGI. The result
7263  * of this will be marked in the @vport fc_flag field with FC_PT2PT_PLOGI
7264  * and then the lpfc_els_rsp_acc() routine is invoked to accept the FLOGI.
7265  *
7266  * Return code
7267  *   0 - Successfully processed the unsolicited flogi
7268  *   1 - Failed to process the unsolicited flogi
7269  **/
7270 static int
7271 lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
7272 		   struct lpfc_nodelist *ndlp)
7273 {
7274 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
7275 	struct lpfc_hba  *phba = vport->phba;
7276 	struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
7277 	uint32_t *lp = (uint32_t *) pcmd->virt;
7278 	IOCB_t *icmd = &cmdiocb->iocb;
7279 	struct serv_parm *sp;
7280 	LPFC_MBOXQ_t *mbox;
7281 	uint32_t cmd, did;
7282 	int rc;
7283 	uint32_t fc_flag = 0;
7284 	uint32_t port_state = 0;
7285 
7286 	cmd = *lp++;
7287 	sp = (struct serv_parm *) lp;
7288 
7289 	/* FLOGI received */
7290 
7291 	lpfc_set_disctmo(vport);
7292 
7293 	if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
7294 		/* We should never receive a FLOGI in loop mode, ignore it */
7295 		did = icmd->un.elsreq64.remoteID;
7296 
7297 		/* An FLOGI ELS command <elsCmd> was received from DID <did> in
7298 		   Loop Mode */
7299 		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
7300 				 "0113 An FLOGI ELS command x%x was "
7301 				 "received from DID x%x in Loop Mode\n",
7302 				 cmd, did);
7303 		return 1;
7304 	}
7305 
7306 	(void) lpfc_check_sparm(vport, ndlp, sp, CLASS3, 1);
7307 
7308 	/*
7309 	 * If our portname is greater than the remote portname,
7310 	 * then we initiate Nport login.
7311 	 */
7312 
7313 	rc = memcmp(&vport->fc_portname, &sp->portName,
7314 		    sizeof(struct lpfc_name));
7315 
7316 	if (!rc) {
7317 		if (phba->sli_rev < LPFC_SLI_REV4) {
7318 			mbox = mempool_alloc(phba->mbox_mem_pool,
7319 					     GFP_KERNEL);
7320 			if (!mbox)
7321 				return 1;
7322 			lpfc_linkdown(phba);
7323 			lpfc_init_link(phba, mbox,
7324 				       phba->cfg_topology,
7325 				       phba->cfg_link_speed);
7326 			mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
7327 			mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
7328 			mbox->vport = vport;
7329 			rc = lpfc_sli_issue_mbox(phba, mbox,
7330 						 MBX_NOWAIT);
7331 			lpfc_set_loopback_flag(phba);
7332 			if (rc == MBX_NOT_FINISHED)
7333 				mempool_free(mbox, phba->mbox_mem_pool);
7334 			return 1;
7335 		}
7336 
7337 		/* abort the flogi coming back to ourselves
7338 		 * due to external loopback on the port.
7339 		 */
7340 		lpfc_els_abort_flogi(phba);
7341 		return 0;
7342 
7343 	} else if (rc > 0) {	/* greater than */
7344 		spin_lock_irq(shost->host_lock);
7345 		vport->fc_flag |= FC_PT2PT_PLOGI;
7346 		spin_unlock_irq(shost->host_lock);
7347 
7348 		/* If we have the high WWPN we can assign our own
7349 		 * myDID; otherwise, we have to WAIT for a PLOGI
7350 		 * from the remote NPort to find out what it
7351 		 * will be.
7352 		 */
7353 		vport->fc_myDID = PT2PT_LocalID;
7354 	} else {
7355 		vport->fc_myDID = PT2PT_RemoteID;
7356 	}
7357 
7358 	/*
7359 	 * The vport state should go to LPFC_FLOGI only
7360 	 * AFTER we issue a FLOGI, not receive one.
7361 	 */
7362 	spin_lock_irq(shost->host_lock);
7363 	fc_flag = vport->fc_flag;
7364 	port_state = vport->port_state;
7365 	vport->fc_flag |= FC_PT2PT;
7366 	vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
7367 
7368 	/* Acking an unsol FLOGI.  Count 1 for link bounce
7369 	 * work-around.
7370 	 */
7371 	vport->rcv_flogi_cnt++;
7372 	spin_unlock_irq(shost->host_lock);
7373 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
7374 			 "3311 Rcv Flogi PS x%x new PS x%x "
7375 			 "fc_flag x%x new fc_flag x%x\n",
7376 			 port_state, vport->port_state,
7377 			 fc_flag, vport->fc_flag);
7378 
7379 	/*
7380 	 * We temporarily set fc_myDID to make it look like we are
7381 	 * a Fabric. This is done just so we end up with the right
7382 	 * did / sid on the FLOGI ACC rsp.
7383 	 */
7384 	did = vport->fc_myDID;
7385 	vport->fc_myDID = Fabric_DID;
7386 
7387 	memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
7388 
7389 	/* Defer ACC response until AFTER we issue a FLOGI */
7390 	if (!(phba->hba_flag & HBA_FLOGI_ISSUED)) {
7391 		phba->defer_flogi_acc_rx_id = cmdiocb->iocb.ulpContext;
7392 		phba->defer_flogi_acc_ox_id =
7393 					cmdiocb->iocb.unsli3.rcvsli3.ox_id;
7394 
7395 		vport->fc_myDID = did;
7396 
7397 		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
7398 				 "3344 Deferring FLOGI ACC: rx_id: x%x,"
7399 				 " ox_id: x%x, hba_flag x%x\n",
7400 				 phba->defer_flogi_acc_rx_id,
7401 				 phba->defer_flogi_acc_ox_id, phba->hba_flag);
7402 
7403 		phba->defer_flogi_acc_flag = true;
7404 
7405 		return 0;
7406 	}
7407 
7408 	/* Send back ACC */
7409 	lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, cmdiocb, ndlp, NULL);
7410 
7411 	/* Now lets put fc_myDID back to what its supposed to be */
7412 	vport->fc_myDID = did;
7413 
7414 	return 0;
7415 }
7416 
7417 /**
7418  * lpfc_els_rcv_rnid - Process an unsolicited rnid iocb
7419  * @vport: pointer to a host virtual N_Port data structure.
7420  * @cmdiocb: pointer to lpfc command iocb data structure.
7421  * @ndlp: pointer to a node-list data structure.
7422  *
7423  * This routine processes Request Node Identification Data (RNID) IOCB
7424  * received as an ELS unsolicited event. Only when the RNID specified format
7425  * 0x0 or 0xDF (Topology Discovery Specific Node Identification Data)
7426  * present, this routine will invoke the lpfc_els_rsp_rnid_acc() routine to
7427  * Accept (ACC) the RNID ELS command. All the other RNID formats are
7428  * rejected by invoking the lpfc_els_rsp_reject() routine.
7429  *
7430  * Return code
7431  *   0 - Successfully processed rnid iocb (currently always return 0)
7432  **/
7433 static int
7434 lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
7435 		  struct lpfc_nodelist *ndlp)
7436 {
7437 	struct lpfc_dmabuf *pcmd;
7438 	uint32_t *lp;
7439 	RNID *rn;
7440 	struct ls_rjt stat;
7441 
7442 	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
7443 	lp = (uint32_t *) pcmd->virt;
7444 
7445 	lp++;
7446 	rn = (RNID *) lp;
7447 
7448 	/* RNID received */
7449 
7450 	switch (rn->Format) {
7451 	case 0:
7452 	case RNID_TOPOLOGY_DISC:
7453 		/* Send back ACC */
7454 		lpfc_els_rsp_rnid_acc(vport, rn->Format, cmdiocb, ndlp);
7455 		break;
7456 	default:
7457 		/* Reject this request because format not supported */
7458 		stat.un.b.lsRjtRsvd0 = 0;
7459 		stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
7460 		stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
7461 		stat.un.b.vendorUnique = 0;
7462 		lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
7463 			NULL);
7464 	}
7465 	return 0;
7466 }
7467 
7468 /**
7469  * lpfc_els_rcv_echo - Process an unsolicited echo iocb
7470  * @vport: pointer to a host virtual N_Port data structure.
7471  * @cmdiocb: pointer to lpfc command iocb data structure.
7472  * @ndlp: pointer to a node-list data structure.
7473  *
7474  * Return code
7475  *   0 - Successfully processed echo iocb (currently always return 0)
7476  **/
7477 static int
7478 lpfc_els_rcv_echo(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
7479 		  struct lpfc_nodelist *ndlp)
7480 {
7481 	uint8_t *pcmd;
7482 
7483 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt);
7484 
7485 	/* skip over first word of echo command to find echo data */
7486 	pcmd += sizeof(uint32_t);
7487 
7488 	lpfc_els_rsp_echo_acc(vport, pcmd, cmdiocb, ndlp);
7489 	return 0;
7490 }
7491 
7492 /**
7493  * lpfc_els_rcv_lirr - Process an unsolicited lirr iocb
7494  * @vport: pointer to a host virtual N_Port data structure.
7495  * @cmdiocb: pointer to lpfc command iocb data structure.
7496  * @ndlp: pointer to a node-list data structure.
7497  *
7498  * This routine processes a Link Incident Report Registration(LIRR) IOCB
7499  * received as an ELS unsolicited event. Currently, this function just invokes
7500  * the lpfc_els_rsp_reject() routine to reject the LIRR IOCB unconditionally.
7501  *
7502  * Return code
7503  *   0 - Successfully processed lirr iocb (currently always return 0)
7504  **/
7505 static int
7506 lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
7507 		  struct lpfc_nodelist *ndlp)
7508 {
7509 	struct ls_rjt stat;
7510 
7511 	/* For now, unconditionally reject this command */
7512 	stat.un.b.lsRjtRsvd0 = 0;
7513 	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
7514 	stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
7515 	stat.un.b.vendorUnique = 0;
7516 	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
7517 	return 0;
7518 }
7519 
7520 /**
7521  * lpfc_els_rcv_rrq - Process an unsolicited rrq iocb
7522  * @vport: pointer to a host virtual N_Port data structure.
7523  * @cmdiocb: pointer to lpfc command iocb data structure.
7524  * @ndlp: pointer to a node-list data structure.
7525  *
7526  * This routine processes a Reinstate Recovery Qualifier (RRQ) IOCB
7527  * received as an ELS unsolicited event. A request to RRQ shall only
7528  * be accepted if the Originator Nx_Port N_Port_ID or the Responder
7529  * Nx_Port N_Port_ID of the target Exchange is the same as the
7530  * N_Port_ID of the Nx_Port that makes the request. If the RRQ is
7531  * not accepted, an LS_RJT with reason code "Unable to perform
7532  * command request" and reason code explanation "Invalid Originator
7533  * S_ID" shall be returned. For now, we just unconditionally accept
7534  * RRQ from the target.
7535  **/
7536 static void
7537 lpfc_els_rcv_rrq(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
7538 		 struct lpfc_nodelist *ndlp)
7539 {
7540 	lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
7541 	if (vport->phba->sli_rev == LPFC_SLI_REV4)
7542 		lpfc_els_clear_rrq(vport, cmdiocb, ndlp);
7543 }
7544 
7545 /**
7546  * lpfc_els_rsp_rls_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd
7547  * @phba: pointer to lpfc hba data structure.
7548  * @pmb: pointer to the driver internal queue element for mailbox command.
7549  *
7550  * This routine is the completion callback function for the MBX_READ_LNK_STAT
7551  * mailbox command. This callback function is to actually send the Accept
7552  * (ACC) response to a Read Link Status (RLS) unsolicited IOCB event. It
7553  * collects the link statistics from the completion of the MBX_READ_LNK_STAT
7554  * mailbox command, constructs the RLS response with the link statistics
7555  * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC
7556  * response to the RLS.
7557  *
7558  * Note that the ndlp reference count will be incremented by 1 for holding the
7559  * ndlp and the reference to ndlp will be stored into the context1 field of
7560  * the IOCB for the completion callback function to the RLS Accept Response
7561  * ELS IOCB command.
7562  *
7563  **/
7564 static void
7565 lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
7566 {
7567 	int rc = 0;
7568 	MAILBOX_t *mb;
7569 	IOCB_t *icmd;
7570 	struct RLS_RSP *rls_rsp;
7571 	uint8_t *pcmd;
7572 	struct lpfc_iocbq *elsiocb;
7573 	struct lpfc_nodelist *ndlp;
7574 	uint16_t oxid;
7575 	uint16_t rxid;
7576 	uint32_t cmdsize;
7577 
7578 	mb = &pmb->u.mb;
7579 
7580 	ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
7581 	rxid = (uint16_t)((unsigned long)(pmb->ctx_buf) & 0xffff);
7582 	oxid = (uint16_t)(((unsigned long)(pmb->ctx_buf) >> 16) & 0xffff);
7583 	pmb->ctx_buf = NULL;
7584 	pmb->ctx_ndlp = NULL;
7585 
7586 	if (mb->mbxStatus) {
7587 		mempool_free(pmb, phba->mbox_mem_pool);
7588 		return;
7589 	}
7590 
7591 	cmdsize = sizeof(struct RLS_RSP) + sizeof(uint32_t);
7592 	elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
7593 				     lpfc_max_els_tries, ndlp,
7594 				     ndlp->nlp_DID, ELS_CMD_ACC);
7595 
7596 	/* Decrement the ndlp reference count from previous mbox command */
7597 	lpfc_nlp_put(ndlp);
7598 
7599 	if (!elsiocb) {
7600 		mempool_free(pmb, phba->mbox_mem_pool);
7601 		return;
7602 	}
7603 
7604 	icmd = &elsiocb->iocb;
7605 	icmd->ulpContext = rxid;
7606 	icmd->unsli3.rcvsli3.ox_id = oxid;
7607 
7608 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
7609 	*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
7610 	pcmd += sizeof(uint32_t); /* Skip past command */
7611 	rls_rsp = (struct RLS_RSP *)pcmd;
7612 
7613 	rls_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt);
7614 	rls_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt);
7615 	rls_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt);
7616 	rls_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt);
7617 	rls_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord);
7618 	rls_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt);
7619 	mempool_free(pmb, phba->mbox_mem_pool);
7620 	/* Xmit ELS RLS ACC response tag <ulpIoTag> */
7621 	lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
7622 			 "2874 Xmit ELS RLS ACC response tag x%x xri x%x, "
7623 			 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
7624 			 elsiocb->iotag, elsiocb->iocb.ulpContext,
7625 			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
7626 			 ndlp->nlp_rpi);
7627 	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
7628 	phba->fc_stat.elsXmitACC++;
7629 	elsiocb->context1 = lpfc_nlp_get(ndlp);
7630 	if (!elsiocb->context1) {
7631 		lpfc_els_free_iocb(phba, elsiocb);
7632 		return;
7633 	}
7634 
7635 	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
7636 	if (rc == IOCB_ERROR) {
7637 		lpfc_els_free_iocb(phba, elsiocb);
7638 		lpfc_nlp_put(ndlp);
7639 	}
7640 	return;
7641 }
7642 
7643 /**
7644  * lpfc_els_rcv_rls - Process an unsolicited rls iocb
7645  * @vport: pointer to a host virtual N_Port data structure.
7646  * @cmdiocb: pointer to lpfc command iocb data structure.
7647  * @ndlp: pointer to a node-list data structure.
7648  *
7649  * This routine processes Read Link Status (RLS) IOCB received as an
7650  * ELS unsolicited event. It first checks the remote port state. If the
7651  * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
7652  * state, it invokes the lpfc_els_rsl_reject() routine to send the reject
7653  * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command
7654  * for reading the HBA link statistics. It is for the callback function,
7655  * lpfc_els_rsp_rls_acc(), set to the MBX_READ_LNK_STAT mailbox command
7656  * to actually sending out RPL Accept (ACC) response.
7657  *
7658  * Return codes
7659  *   0 - Successfully processed rls iocb (currently always return 0)
7660  **/
7661 static int
7662 lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
7663 		 struct lpfc_nodelist *ndlp)
7664 {
7665 	struct lpfc_hba *phba = vport->phba;
7666 	LPFC_MBOXQ_t *mbox;
7667 	struct ls_rjt stat;
7668 
7669 	if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
7670 	    (ndlp->nlp_state != NLP_STE_MAPPED_NODE))
7671 		/* reject the unsolicited RLS request and done with it */
7672 		goto reject_out;
7673 
7674 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
7675 	if (mbox) {
7676 		lpfc_read_lnk_stat(phba, mbox);
7677 		mbox->ctx_buf = (void *)((unsigned long)
7678 			((cmdiocb->iocb.unsli3.rcvsli3.ox_id << 16) |
7679 			cmdiocb->iocb.ulpContext)); /* rx_id */
7680 		mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
7681 		if (!mbox->ctx_ndlp)
7682 			goto node_err;
7683 		mbox->vport = vport;
7684 		mbox->mbox_cmpl = lpfc_els_rsp_rls_acc;
7685 		if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
7686 			!= MBX_NOT_FINISHED)
7687 			/* Mbox completion will send ELS Response */
7688 			return 0;
7689 		/* Decrement reference count used for the failed mbox
7690 		 * command.
7691 		 */
7692 		lpfc_nlp_put(ndlp);
7693 node_err:
7694 		mempool_free(mbox, phba->mbox_mem_pool);
7695 	}
7696 reject_out:
7697 	/* issue rejection response */
7698 	stat.un.b.lsRjtRsvd0 = 0;
7699 	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
7700 	stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
7701 	stat.un.b.vendorUnique = 0;
7702 	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
7703 	return 0;
7704 }
7705 
7706 /**
7707  * lpfc_els_rcv_rtv - Process an unsolicited rtv iocb
7708  * @vport: pointer to a host virtual N_Port data structure.
7709  * @cmdiocb: pointer to lpfc command iocb data structure.
7710  * @ndlp: pointer to a node-list data structure.
7711  *
7712  * This routine processes Read Timout Value (RTV) IOCB received as an
7713  * ELS unsolicited event. It first checks the remote port state. If the
7714  * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
7715  * state, it invokes the lpfc_els_rsl_reject() routine to send the reject
7716  * response. Otherwise, it sends the Accept(ACC) response to a Read Timeout
7717  * Value (RTV) unsolicited IOCB event.
7718  *
7719  * Note that the ndlp reference count will be incremented by 1 for holding the
7720  * ndlp and the reference to ndlp will be stored into the context1 field of
7721  * the IOCB for the completion callback function to the RTV Accept Response
7722  * ELS IOCB command.
7723  *
7724  * Return codes
7725  *   0 - Successfully processed rtv iocb (currently always return 0)
7726  **/
7727 static int
7728 lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
7729 		 struct lpfc_nodelist *ndlp)
7730 {
7731 	int rc = 0;
7732 	struct lpfc_hba *phba = vport->phba;
7733 	struct ls_rjt stat;
7734 	struct RTV_RSP *rtv_rsp;
7735 	uint8_t *pcmd;
7736 	struct lpfc_iocbq *elsiocb;
7737 	uint32_t cmdsize;
7738 
7739 
7740 	if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
7741 	    (ndlp->nlp_state != NLP_STE_MAPPED_NODE))
7742 		/* reject the unsolicited RTV request and done with it */
7743 		goto reject_out;
7744 
7745 	cmdsize = sizeof(struct RTV_RSP) + sizeof(uint32_t);
7746 	elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
7747 				     lpfc_max_els_tries, ndlp,
7748 				     ndlp->nlp_DID, ELS_CMD_ACC);
7749 
7750 	if (!elsiocb)
7751 		return 1;
7752 
7753 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
7754 	*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
7755 	pcmd += sizeof(uint32_t); /* Skip past command */
7756 
7757 	/* use the command's xri in the response */
7758 	elsiocb->iocb.ulpContext = cmdiocb->iocb.ulpContext;  /* Xri / rx_id */
7759 	elsiocb->iocb.unsli3.rcvsli3.ox_id = cmdiocb->iocb.unsli3.rcvsli3.ox_id;
7760 
7761 	rtv_rsp = (struct RTV_RSP *)pcmd;
7762 
7763 	/* populate RTV payload */
7764 	rtv_rsp->ratov = cpu_to_be32(phba->fc_ratov * 1000); /* report msecs */
7765 	rtv_rsp->edtov = cpu_to_be32(phba->fc_edtov);
7766 	bf_set(qtov_edtovres, rtv_rsp, phba->fc_edtovResol ? 1 : 0);
7767 	bf_set(qtov_rttov, rtv_rsp, 0); /* Field is for FC ONLY */
7768 	rtv_rsp->qtov = cpu_to_be32(rtv_rsp->qtov);
7769 
7770 	/* Xmit ELS RLS ACC response tag <ulpIoTag> */
7771 	lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
7772 			 "2875 Xmit ELS RTV ACC response tag x%x xri x%x, "
7773 			 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x, "
7774 			 "Data: x%x x%x x%x\n",
7775 			 elsiocb->iotag, elsiocb->iocb.ulpContext,
7776 			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
7777 			 ndlp->nlp_rpi,
7778 			rtv_rsp->ratov, rtv_rsp->edtov, rtv_rsp->qtov);
7779 	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
7780 	phba->fc_stat.elsXmitACC++;
7781 	elsiocb->context1 = lpfc_nlp_get(ndlp);
7782 	if (!elsiocb->context1) {
7783 		lpfc_els_free_iocb(phba, elsiocb);
7784 		return 0;
7785 	}
7786 
7787 	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
7788 	if (rc == IOCB_ERROR) {
7789 		lpfc_els_free_iocb(phba, elsiocb);
7790 		lpfc_nlp_put(ndlp);
7791 	}
7792 	return 0;
7793 
7794 reject_out:
7795 	/* issue rejection response */
7796 	stat.un.b.lsRjtRsvd0 = 0;
7797 	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
7798 	stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
7799 	stat.un.b.vendorUnique = 0;
7800 	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
7801 	return 0;
7802 }
7803 
7804 /* lpfc_issue_els_rrq - Process an unsolicited rrq iocb
7805  * @vport: pointer to a host virtual N_Port data structure.
7806  * @ndlp: pointer to a node-list data structure.
7807  * @did: DID of the target.
7808  * @rrq: Pointer to the rrq struct.
7809  *
7810  * Build a ELS RRQ command and send it to the target. If the issue_iocb is
7811  * Successful the the completion handler will clear the RRQ.
7812  *
7813  * Return codes
7814  *   0 - Successfully sent rrq els iocb.
7815  *   1 - Failed to send rrq els iocb.
7816  **/
7817 static int
7818 lpfc_issue_els_rrq(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
7819 			uint32_t did, struct lpfc_node_rrq *rrq)
7820 {
7821 	struct lpfc_hba  *phba = vport->phba;
7822 	struct RRQ *els_rrq;
7823 	struct lpfc_iocbq *elsiocb;
7824 	uint8_t *pcmd;
7825 	uint16_t cmdsize;
7826 	int ret;
7827 
7828 	if (!ndlp)
7829 		return 1;
7830 
7831 	/* If ndlp is not NULL, we will bump the reference count on it */
7832 	cmdsize = (sizeof(uint32_t) + sizeof(struct RRQ));
7833 	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, did,
7834 				     ELS_CMD_RRQ);
7835 	if (!elsiocb)
7836 		return 1;
7837 
7838 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
7839 
7840 	/* For RRQ request, remainder of payload is Exchange IDs */
7841 	*((uint32_t *) (pcmd)) = ELS_CMD_RRQ;
7842 	pcmd += sizeof(uint32_t);
7843 	els_rrq = (struct RRQ *) pcmd;
7844 
7845 	bf_set(rrq_oxid, els_rrq, phba->sli4_hba.xri_ids[rrq->xritag]);
7846 	bf_set(rrq_rxid, els_rrq, rrq->rxid);
7847 	bf_set(rrq_did, els_rrq, vport->fc_myDID);
7848 	els_rrq->rrq = cpu_to_be32(els_rrq->rrq);
7849 	els_rrq->rrq_exchg = cpu_to_be32(els_rrq->rrq_exchg);
7850 
7851 
7852 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
7853 		"Issue RRQ:     did:x%x",
7854 		did, rrq->xritag, rrq->rxid);
7855 	elsiocb->context_un.rrq = rrq;
7856 	elsiocb->iocb_cmpl = lpfc_cmpl_els_rrq;
7857 
7858 	lpfc_nlp_get(ndlp);
7859 	elsiocb->context1 = ndlp;
7860 
7861 	ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
7862 	if (ret == IOCB_ERROR)
7863 		goto io_err;
7864 	return 0;
7865 
7866  io_err:
7867 	lpfc_els_free_iocb(phba, elsiocb);
7868 	lpfc_nlp_put(ndlp);
7869 	return 1;
7870 }
7871 
7872 /**
7873  * lpfc_send_rrq - Sends ELS RRQ if needed.
7874  * @phba: pointer to lpfc hba data structure.
7875  * @rrq: pointer to the active rrq.
7876  *
7877  * This routine will call the lpfc_issue_els_rrq if the rrq is
7878  * still active for the xri. If this function returns a failure then
7879  * the caller needs to clean up the RRQ by calling lpfc_clr_active_rrq.
7880  *
7881  * Returns 0 Success.
7882  *         1 Failure.
7883  **/
7884 int
7885 lpfc_send_rrq(struct lpfc_hba *phba, struct lpfc_node_rrq *rrq)
7886 {
7887 	struct lpfc_nodelist *ndlp = lpfc_findnode_did(rrq->vport,
7888 						       rrq->nlp_DID);
7889 	if (!ndlp)
7890 		return 1;
7891 
7892 	if (lpfc_test_rrq_active(phba, ndlp, rrq->xritag))
7893 		return lpfc_issue_els_rrq(rrq->vport, ndlp,
7894 					 rrq->nlp_DID, rrq);
7895 	else
7896 		return 1;
7897 }
7898 
7899 /**
7900  * lpfc_els_rsp_rpl_acc - Issue an accept rpl els command
7901  * @vport: pointer to a host virtual N_Port data structure.
7902  * @cmdsize: size of the ELS command.
7903  * @oldiocb: pointer to the original lpfc command iocb data structure.
7904  * @ndlp: pointer to a node-list data structure.
7905  *
7906  * This routine issuees an Accept (ACC) Read Port List (RPL) ELS command.
7907  * It is to be called by the lpfc_els_rcv_rpl() routine to accept the RPL.
7908  *
7909  * Note that the ndlp reference count will be incremented by 1 for holding the
7910  * ndlp and the reference to ndlp will be stored into the context1 field of
7911  * the IOCB for the completion callback function to the RPL Accept Response
7912  * ELS command.
7913  *
7914  * Return code
7915  *   0 - Successfully issued ACC RPL ELS command
7916  *   1 - Failed to issue ACC RPL ELS command
7917  **/
7918 static int
7919 lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
7920 		     struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
7921 {
7922 	int rc = 0;
7923 	struct lpfc_hba *phba = vport->phba;
7924 	IOCB_t *icmd, *oldcmd;
7925 	RPL_RSP rpl_rsp;
7926 	struct lpfc_iocbq *elsiocb;
7927 	uint8_t *pcmd;
7928 
7929 	elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
7930 				     ndlp->nlp_DID, ELS_CMD_ACC);
7931 
7932 	if (!elsiocb)
7933 		return 1;
7934 
7935 	icmd = &elsiocb->iocb;
7936 	oldcmd = &oldiocb->iocb;
7937 	icmd->ulpContext = oldcmd->ulpContext;	/* Xri / rx_id */
7938 	icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
7939 
7940 	pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
7941 	*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
7942 	pcmd += sizeof(uint16_t);
7943 	*((uint16_t *)(pcmd)) = be16_to_cpu(cmdsize);
7944 	pcmd += sizeof(uint16_t);
7945 
7946 	/* Setup the RPL ACC payload */
7947 	rpl_rsp.listLen = be32_to_cpu(1);
7948 	rpl_rsp.index = 0;
7949 	rpl_rsp.port_num_blk.portNum = 0;
7950 	rpl_rsp.port_num_blk.portID = be32_to_cpu(vport->fc_myDID);
7951 	memcpy(&rpl_rsp.port_num_blk.portName, &vport->fc_portname,
7952 	    sizeof(struct lpfc_name));
7953 	memcpy(pcmd, &rpl_rsp, cmdsize - sizeof(uint32_t));
7954 	/* Xmit ELS RPL ACC response tag <ulpIoTag> */
7955 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
7956 			 "0120 Xmit ELS RPL ACC response tag x%x "
7957 			 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
7958 			 "rpi x%x\n",
7959 			 elsiocb->iotag, elsiocb->iocb.ulpContext,
7960 			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
7961 			 ndlp->nlp_rpi);
7962 	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
7963 	phba->fc_stat.elsXmitACC++;
7964 	elsiocb->context1 = lpfc_nlp_get(ndlp);
7965 	if (!elsiocb->context1) {
7966 		lpfc_els_free_iocb(phba, elsiocb);
7967 		return 1;
7968 	}
7969 
7970 	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
7971 	if (rc == IOCB_ERROR) {
7972 		lpfc_els_free_iocb(phba, elsiocb);
7973 		lpfc_nlp_put(ndlp);
7974 		return 1;
7975 	}
7976 
7977 	return 0;
7978 }
7979 
7980 /**
7981  * lpfc_els_rcv_rpl - Process an unsolicited rpl iocb
7982  * @vport: pointer to a host virtual N_Port data structure.
7983  * @cmdiocb: pointer to lpfc command iocb data structure.
7984  * @ndlp: pointer to a node-list data structure.
7985  *
7986  * This routine processes Read Port List (RPL) IOCB received as an ELS
7987  * unsolicited event. It first checks the remote port state. If the remote
7988  * port is not in NLP_STE_UNMAPPED_NODE and NLP_STE_MAPPED_NODE states, it
7989  * invokes the lpfc_els_rsp_reject() routine to send reject response.
7990  * Otherwise, this routine then invokes the lpfc_els_rsp_rpl_acc() routine
7991  * to accept the RPL.
7992  *
7993  * Return code
7994  *   0 - Successfully processed rpl iocb (currently always return 0)
7995  **/
7996 static int
7997 lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
7998 		 struct lpfc_nodelist *ndlp)
7999 {
8000 	struct lpfc_dmabuf *pcmd;
8001 	uint32_t *lp;
8002 	uint32_t maxsize;
8003 	uint16_t cmdsize;
8004 	RPL *rpl;
8005 	struct ls_rjt stat;
8006 
8007 	if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
8008 	    (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
8009 		/* issue rejection response */
8010 		stat.un.b.lsRjtRsvd0 = 0;
8011 		stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
8012 		stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
8013 		stat.un.b.vendorUnique = 0;
8014 		lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
8015 			NULL);
8016 		/* rejected the unsolicited RPL request and done with it */
8017 		return 0;
8018 	}
8019 
8020 	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
8021 	lp = (uint32_t *) pcmd->virt;
8022 	rpl = (RPL *) (lp + 1);
8023 	maxsize = be32_to_cpu(rpl->maxsize);
8024 
8025 	/* We support only one port */
8026 	if ((rpl->index == 0) &&
8027 	    ((maxsize == 0) ||
8028 	     ((maxsize * sizeof(uint32_t)) >= sizeof(RPL_RSP)))) {
8029 		cmdsize = sizeof(uint32_t) + sizeof(RPL_RSP);
8030 	} else {
8031 		cmdsize = sizeof(uint32_t) + maxsize * sizeof(uint32_t);
8032 	}
8033 	lpfc_els_rsp_rpl_acc(vport, cmdsize, cmdiocb, ndlp);
8034 
8035 	return 0;
8036 }
8037 
8038 /**
8039  * lpfc_els_rcv_farp - Process an unsolicited farp request els command
8040  * @vport: pointer to a virtual N_Port data structure.
8041  * @cmdiocb: pointer to lpfc command iocb data structure.
8042  * @ndlp: pointer to a node-list data structure.
8043  *
8044  * This routine processes Fibre Channel Address Resolution Protocol
8045  * (FARP) Request IOCB received as an ELS unsolicited event. Currently,
8046  * the lpfc driver only supports matching on WWPN or WWNN for FARP. As such,
8047  * FARP_MATCH_PORT flag and FARP_MATCH_NODE flag are checked against the
8048  * Match Flag in the FARP request IOCB: if FARP_MATCH_PORT flag is set, the
8049  * remote PortName is compared against the FC PortName stored in the @vport
8050  * data structure; if FARP_MATCH_NODE flag is set, the remote NodeName is
8051  * compared against the FC NodeName stored in the @vport data structure.
8052  * If any of these matches and the FARP_REQUEST_FARPR flag is set in the
8053  * FARP request IOCB Response Flag, the lpfc_issue_els_farpr() routine is
8054  * invoked to send out FARP Response to the remote node. Before sending the
8055  * FARP Response, however, the FARP_REQUEST_PLOGI flag is check in the FARP
8056  * request IOCB Response Flag and, if it is set, the lpfc_issue_els_plogi()
8057  * routine is invoked to log into the remote port first.
8058  *
8059  * Return code
8060  *   0 - Either the FARP Match Mode not supported or successfully processed
8061  **/
8062 static int
8063 lpfc_els_rcv_farp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
8064 		  struct lpfc_nodelist *ndlp)
8065 {
8066 	struct lpfc_dmabuf *pcmd;
8067 	uint32_t *lp;
8068 	IOCB_t *icmd;
8069 	FARP *fp;
8070 	uint32_t cnt, did;
8071 
8072 	icmd = &cmdiocb->iocb;
8073 	did = icmd->un.elsreq64.remoteID;
8074 	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
8075 	lp = (uint32_t *) pcmd->virt;
8076 
8077 	lp++;
8078 	fp = (FARP *) lp;
8079 	/* FARP-REQ received from DID <did> */
8080 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
8081 			 "0601 FARP-REQ received from DID x%x\n", did);
8082 	/* We will only support match on WWPN or WWNN */
8083 	if (fp->Mflags & ~(FARP_MATCH_NODE | FARP_MATCH_PORT)) {
8084 		return 0;
8085 	}
8086 
8087 	cnt = 0;
8088 	/* If this FARP command is searching for my portname */
8089 	if (fp->Mflags & FARP_MATCH_PORT) {
8090 		if (memcmp(&fp->RportName, &vport->fc_portname,
8091 			   sizeof(struct lpfc_name)) == 0)
8092 			cnt = 1;
8093 	}
8094 
8095 	/* If this FARP command is searching for my nodename */
8096 	if (fp->Mflags & FARP_MATCH_NODE) {
8097 		if (memcmp(&fp->RnodeName, &vport->fc_nodename,
8098 			   sizeof(struct lpfc_name)) == 0)
8099 			cnt = 1;
8100 	}
8101 
8102 	if (cnt) {
8103 		if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) ||
8104 		   (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
8105 			/* Log back into the node before sending the FARP. */
8106 			if (fp->Rflags & FARP_REQUEST_PLOGI) {
8107 				ndlp->nlp_prev_state = ndlp->nlp_state;
8108 				lpfc_nlp_set_state(vport, ndlp,
8109 						   NLP_STE_PLOGI_ISSUE);
8110 				lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
8111 			}
8112 
8113 			/* Send a FARP response to that node */
8114 			if (fp->Rflags & FARP_REQUEST_FARPR)
8115 				lpfc_issue_els_farpr(vport, did, 0);
8116 		}
8117 	}
8118 	return 0;
8119 }
8120 
8121 /**
8122  * lpfc_els_rcv_farpr - Process an unsolicited farp response iocb
8123  * @vport: pointer to a host virtual N_Port data structure.
8124  * @cmdiocb: pointer to lpfc command iocb data structure.
8125  * @ndlp: pointer to a node-list data structure.
8126  *
8127  * This routine processes Fibre Channel Address Resolution Protocol
8128  * Response (FARPR) IOCB received as an ELS unsolicited event. It simply
8129  * invokes the lpfc_els_rsp_acc() routine to the remote node to accept
8130  * the FARP response request.
8131  *
8132  * Return code
8133  *   0 - Successfully processed FARPR IOCB (currently always return 0)
8134  **/
8135 static int
8136 lpfc_els_rcv_farpr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
8137 		   struct lpfc_nodelist  *ndlp)
8138 {
8139 	struct lpfc_dmabuf *pcmd;
8140 	uint32_t *lp;
8141 	IOCB_t *icmd;
8142 	uint32_t did;
8143 
8144 	icmd = &cmdiocb->iocb;
8145 	did = icmd->un.elsreq64.remoteID;
8146 	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
8147 	lp = (uint32_t *) pcmd->virt;
8148 
8149 	lp++;
8150 	/* FARP-RSP received from DID <did> */
8151 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
8152 			 "0600 FARP-RSP received from DID x%x\n", did);
8153 	/* ACCEPT the Farp resp request */
8154 	lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
8155 
8156 	return 0;
8157 }
8158 
8159 /**
8160  * lpfc_els_rcv_fan - Process an unsolicited fan iocb command
8161  * @vport: pointer to a host virtual N_Port data structure.
8162  * @cmdiocb: pointer to lpfc command iocb data structure.
8163  * @fan_ndlp: pointer to a node-list data structure.
8164  *
8165  * This routine processes a Fabric Address Notification (FAN) IOCB
8166  * command received as an ELS unsolicited event. The FAN ELS command will
8167  * only be processed on a physical port (i.e., the @vport represents the
8168  * physical port). The fabric NodeName and PortName from the FAN IOCB are
8169  * compared against those in the phba data structure. If any of those is
8170  * different, the lpfc_initial_flogi() routine is invoked to initialize
8171  * Fabric Login (FLOGI) to the fabric to start the discover over. Otherwise,
8172  * if both of those are identical, the lpfc_issue_fabric_reglogin() routine
8173  * is invoked to register login to the fabric.
8174  *
8175  * Return code
8176  *   0 - Successfully processed fan iocb (currently always return 0).
8177  **/
8178 static int
8179 lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
8180 		 struct lpfc_nodelist *fan_ndlp)
8181 {
8182 	struct lpfc_hba *phba = vport->phba;
8183 	uint32_t *lp;
8184 	FAN *fp;
8185 
8186 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0265 FAN received\n");
8187 	lp = (uint32_t *)((struct lpfc_dmabuf *)cmdiocb->context2)->virt;
8188 	fp = (FAN *) ++lp;
8189 	/* FAN received; Fan does not have a reply sequence */
8190 	if ((vport == phba->pport) &&
8191 	    (vport->port_state == LPFC_LOCAL_CFG_LINK)) {
8192 		if ((memcmp(&phba->fc_fabparam.nodeName, &fp->FnodeName,
8193 			    sizeof(struct lpfc_name))) ||
8194 		    (memcmp(&phba->fc_fabparam.portName, &fp->FportName,
8195 			    sizeof(struct lpfc_name)))) {
8196 			/* This port has switched fabrics. FLOGI is required */
8197 			lpfc_issue_init_vfi(vport);
8198 		} else {
8199 			/* FAN verified - skip FLOGI */
8200 			vport->fc_myDID = vport->fc_prevDID;
8201 			if (phba->sli_rev < LPFC_SLI_REV4)
8202 				lpfc_issue_fabric_reglogin(vport);
8203 			else {
8204 				lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
8205 					"3138 Need register VFI: (x%x/%x)\n",
8206 					vport->fc_prevDID, vport->fc_myDID);
8207 				lpfc_issue_reg_vfi(vport);
8208 			}
8209 		}
8210 	}
8211 	return 0;
8212 }
8213 
8214 /**
8215  * lpfc_els_timeout - Handler funciton to the els timer
8216  * @t: timer context used to obtain the vport.
8217  *
8218  * This routine is invoked by the ELS timer after timeout. It posts the ELS
8219  * timer timeout event by setting the WORKER_ELS_TMO bit to the work port
8220  * event bitmap and then invokes the lpfc_worker_wake_up() routine to wake
8221  * up the worker thread. It is for the worker thread to invoke the routine
8222  * lpfc_els_timeout_handler() to work on the posted event WORKER_ELS_TMO.
8223  **/
8224 void
8225 lpfc_els_timeout(struct timer_list *t)
8226 {
8227 	struct lpfc_vport *vport = from_timer(vport, t, els_tmofunc);
8228 	struct lpfc_hba   *phba = vport->phba;
8229 	uint32_t tmo_posted;
8230 	unsigned long iflag;
8231 
8232 	spin_lock_irqsave(&vport->work_port_lock, iflag);
8233 	tmo_posted = vport->work_port_events & WORKER_ELS_TMO;
8234 	if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING)))
8235 		vport->work_port_events |= WORKER_ELS_TMO;
8236 	spin_unlock_irqrestore(&vport->work_port_lock, iflag);
8237 
8238 	if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING)))
8239 		lpfc_worker_wake_up(phba);
8240 	return;
8241 }
8242 
8243 
8244 /**
8245  * lpfc_els_timeout_handler - Process an els timeout event
8246  * @vport: pointer to a virtual N_Port data structure.
8247  *
8248  * This routine is the actual handler function that processes an ELS timeout
8249  * event. It walks the ELS ring to get and abort all the IOCBs (except the
8250  * ABORT/CLOSE/FARP/FARPR/FDISC), which are associated with the @vport by
8251  * invoking the lpfc_sli_issue_abort_iotag() routine.
8252  **/
8253 void
8254 lpfc_els_timeout_handler(struct lpfc_vport *vport)
8255 {
8256 	struct lpfc_hba  *phba = vport->phba;
8257 	struct lpfc_sli_ring *pring;
8258 	struct lpfc_iocbq *tmp_iocb, *piocb;
8259 	IOCB_t *cmd = NULL;
8260 	struct lpfc_dmabuf *pcmd;
8261 	uint32_t els_command = 0;
8262 	uint32_t timeout;
8263 	uint32_t remote_ID = 0xffffffff;
8264 	LIST_HEAD(abort_list);
8265 
8266 
8267 	timeout = (uint32_t)(phba->fc_ratov << 1);
8268 
8269 	pring = lpfc_phba_elsring(phba);
8270 	if (unlikely(!pring))
8271 		return;
8272 
8273 	if (phba->pport->load_flag & FC_UNLOADING)
8274 		return;
8275 
8276 	spin_lock_irq(&phba->hbalock);
8277 	if (phba->sli_rev == LPFC_SLI_REV4)
8278 		spin_lock(&pring->ring_lock);
8279 
8280 	list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
8281 		cmd = &piocb->iocb;
8282 
8283 		if ((piocb->iocb_flag & LPFC_IO_LIBDFC) != 0 ||
8284 		    piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
8285 		    piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
8286 			continue;
8287 
8288 		if (piocb->vport != vport)
8289 			continue;
8290 
8291 		pcmd = (struct lpfc_dmabuf *) piocb->context2;
8292 		if (pcmd)
8293 			els_command = *(uint32_t *) (pcmd->virt);
8294 
8295 		if (els_command == ELS_CMD_FARP ||
8296 		    els_command == ELS_CMD_FARPR ||
8297 		    els_command == ELS_CMD_FDISC)
8298 			continue;
8299 
8300 		if (piocb->drvrTimeout > 0) {
8301 			if (piocb->drvrTimeout >= timeout)
8302 				piocb->drvrTimeout -= timeout;
8303 			else
8304 				piocb->drvrTimeout = 0;
8305 			continue;
8306 		}
8307 
8308 		remote_ID = 0xffffffff;
8309 		if (cmd->ulpCommand != CMD_GEN_REQUEST64_CR)
8310 			remote_ID = cmd->un.elsreq64.remoteID;
8311 		else {
8312 			struct lpfc_nodelist *ndlp;
8313 			ndlp = __lpfc_findnode_rpi(vport, cmd->ulpContext);
8314 			if (ndlp)
8315 				remote_ID = ndlp->nlp_DID;
8316 		}
8317 		list_add_tail(&piocb->dlist, &abort_list);
8318 	}
8319 	if (phba->sli_rev == LPFC_SLI_REV4)
8320 		spin_unlock(&pring->ring_lock);
8321 	spin_unlock_irq(&phba->hbalock);
8322 
8323 	list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) {
8324 		cmd = &piocb->iocb;
8325 		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
8326 			 "0127 ELS timeout Data: x%x x%x x%x "
8327 			 "x%x\n", els_command,
8328 			 remote_ID, cmd->ulpCommand, cmd->ulpIoTag);
8329 		spin_lock_irq(&phba->hbalock);
8330 		list_del_init(&piocb->dlist);
8331 		lpfc_sli_issue_abort_iotag(phba, pring, piocb, NULL);
8332 		spin_unlock_irq(&phba->hbalock);
8333 	}
8334 
8335 	/* Make sure HBA is alive */
8336 	lpfc_issue_hb_tmo(phba);
8337 
8338 	if (!list_empty(&pring->txcmplq))
8339 		if (!(phba->pport->load_flag & FC_UNLOADING))
8340 			mod_timer(&vport->els_tmofunc,
8341 				  jiffies + msecs_to_jiffies(1000 * timeout));
8342 }
8343 
8344 /**
8345  * lpfc_els_flush_cmd - Clean up the outstanding els commands to a vport
8346  * @vport: pointer to a host virtual N_Port data structure.
8347  *
8348  * This routine is used to clean up all the outstanding ELS commands on a
8349  * @vport. It first aborts the @vport by invoking lpfc_fabric_abort_vport()
8350  * routine. After that, it walks the ELS transmit queue to remove all the
8351  * IOCBs with the @vport other than the QUE_RING and ABORT/CLOSE IOCBs. For
8352  * the IOCBs with a non-NULL completion callback function, the callback
8353  * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and
8354  * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs with a NULL completion
8355  * callback function, the IOCB will simply be released. Finally, it walks
8356  * the ELS transmit completion queue to issue an abort IOCB to any transmit
8357  * completion queue IOCB that is associated with the @vport and is not
8358  * an IOCB from libdfc (i.e., the management plane IOCBs that are not
8359  * part of the discovery state machine) out to HBA by invoking the
8360  * lpfc_sli_issue_abort_iotag() routine. Note that this function issues the
8361  * abort IOCB to any transmit completion queueed IOCB, it does not guarantee
8362  * the IOCBs are aborted when this function returns.
8363  **/
8364 void
8365 lpfc_els_flush_cmd(struct lpfc_vport *vport)
8366 {
8367 	LIST_HEAD(abort_list);
8368 	struct lpfc_hba  *phba = vport->phba;
8369 	struct lpfc_sli_ring *pring;
8370 	struct lpfc_iocbq *tmp_iocb, *piocb;
8371 	IOCB_t *cmd = NULL;
8372 	unsigned long iflags = 0;
8373 
8374 	lpfc_fabric_abort_vport(vport);
8375 
8376 	/*
8377 	 * For SLI3, only the hbalock is required.  But SLI4 needs to coordinate
8378 	 * with the ring insert operation.  Because lpfc_sli_issue_abort_iotag
8379 	 * ultimately grabs the ring_lock, the driver must splice the list into
8380 	 * a working list and release the locks before calling the abort.
8381 	 */
8382 	spin_lock_irqsave(&phba->hbalock, iflags);
8383 	pring = lpfc_phba_elsring(phba);
8384 
8385 	/* Bail out if we've no ELS wq, like in PCI error recovery case. */
8386 	if (unlikely(!pring)) {
8387 		spin_unlock_irqrestore(&phba->hbalock, iflags);
8388 		return;
8389 	}
8390 
8391 	if (phba->sli_rev == LPFC_SLI_REV4)
8392 		spin_lock(&pring->ring_lock);
8393 
8394 	/* First we need to issue aborts to outstanding cmds on txcmpl */
8395 	list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
8396 		if (piocb->iocb_flag & LPFC_IO_LIBDFC)
8397 			continue;
8398 
8399 		if (piocb->vport != vport)
8400 			continue;
8401 
8402 		if (piocb->iocb_flag & LPFC_DRIVER_ABORTED)
8403 			continue;
8404 
8405 		/* On the ELS ring we can have ELS_REQUESTs or
8406 		 * GEN_REQUESTs waiting for a response.
8407 		 */
8408 		cmd = &piocb->iocb;
8409 		if (cmd->ulpCommand == CMD_ELS_REQUEST64_CR) {
8410 			list_add_tail(&piocb->dlist, &abort_list);
8411 
8412 			/* If the link is down when flushing ELS commands
8413 			 * the firmware will not complete them till after
8414 			 * the link comes back up. This may confuse
8415 			 * discovery for the new link up, so we need to
8416 			 * change the compl routine to just clean up the iocb
8417 			 * and avoid any retry logic.
8418 			 */
8419 			if (phba->link_state == LPFC_LINK_DOWN)
8420 				piocb->iocb_cmpl = lpfc_cmpl_els_link_down;
8421 		}
8422 		if (cmd->ulpCommand == CMD_GEN_REQUEST64_CR)
8423 			list_add_tail(&piocb->dlist, &abort_list);
8424 	}
8425 
8426 	if (phba->sli_rev == LPFC_SLI_REV4)
8427 		spin_unlock(&pring->ring_lock);
8428 	spin_unlock_irqrestore(&phba->hbalock, iflags);
8429 
8430 	/* Abort each txcmpl iocb on aborted list and remove the dlist links. */
8431 	list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) {
8432 		spin_lock_irqsave(&phba->hbalock, iflags);
8433 		list_del_init(&piocb->dlist);
8434 		lpfc_sli_issue_abort_iotag(phba, pring, piocb, NULL);
8435 		spin_unlock_irqrestore(&phba->hbalock, iflags);
8436 	}
8437 	/* Make sure HBA is alive */
8438 	lpfc_issue_hb_tmo(phba);
8439 
8440 	if (!list_empty(&abort_list))
8441 		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
8442 				 "3387 abort list for txq not empty\n");
8443 	INIT_LIST_HEAD(&abort_list);
8444 
8445 	spin_lock_irqsave(&phba->hbalock, iflags);
8446 	if (phba->sli_rev == LPFC_SLI_REV4)
8447 		spin_lock(&pring->ring_lock);
8448 
8449 	/* No need to abort the txq list,
8450 	 * just queue them up for lpfc_sli_cancel_iocbs
8451 	 */
8452 	list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) {
8453 		cmd = &piocb->iocb;
8454 
8455 		if (piocb->iocb_flag & LPFC_IO_LIBDFC) {
8456 			continue;
8457 		}
8458 
8459 		/* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */
8460 		if (cmd->ulpCommand == CMD_QUE_RING_BUF_CN ||
8461 		    cmd->ulpCommand == CMD_QUE_RING_BUF64_CN ||
8462 		    cmd->ulpCommand == CMD_CLOSE_XRI_CN ||
8463 		    cmd->ulpCommand == CMD_ABORT_XRI_CN)
8464 			continue;
8465 
8466 		if (piocb->vport != vport)
8467 			continue;
8468 
8469 		list_del_init(&piocb->list);
8470 		list_add_tail(&piocb->list, &abort_list);
8471 	}
8472 
8473 	/* The same holds true for any FLOGI/FDISC on the fabric_iocb_list */
8474 	if (vport == phba->pport) {
8475 		list_for_each_entry_safe(piocb, tmp_iocb,
8476 					 &phba->fabric_iocb_list, list) {
8477 			cmd = &piocb->iocb;
8478 			list_del_init(&piocb->list);
8479 			list_add_tail(&piocb->list, &abort_list);
8480 		}
8481 	}
8482 
8483 	if (phba->sli_rev == LPFC_SLI_REV4)
8484 		spin_unlock(&pring->ring_lock);
8485 	spin_unlock_irqrestore(&phba->hbalock, iflags);
8486 
8487 	/* Cancel all the IOCBs from the completions list */
8488 	lpfc_sli_cancel_iocbs(phba, &abort_list,
8489 			      IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
8490 
8491 	return;
8492 }
8493 
8494 /**
8495  * lpfc_els_flush_all_cmd - Clean up all the outstanding els commands to a HBA
8496  * @phba: pointer to lpfc hba data structure.
8497  *
8498  * This routine is used to clean up all the outstanding ELS commands on a
8499  * @phba. It first aborts the @phba by invoking the lpfc_fabric_abort_hba()
8500  * routine. After that, it walks the ELS transmit queue to remove all the
8501  * IOCBs to the @phba other than the QUE_RING and ABORT/CLOSE IOCBs. For
8502  * the IOCBs with the completion callback function associated, the callback
8503  * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and
8504  * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs without the completion
8505  * callback function associated, the IOCB will simply be released. Finally,
8506  * it walks the ELS transmit completion queue to issue an abort IOCB to any
8507  * transmit completion queue IOCB that is not an IOCB from libdfc (i.e., the
8508  * management plane IOCBs that are not part of the discovery state machine)
8509  * out to HBA by invoking the lpfc_sli_issue_abort_iotag() routine.
8510  **/
8511 void
8512 lpfc_els_flush_all_cmd(struct lpfc_hba  *phba)
8513 {
8514 	struct lpfc_vport *vport;
8515 
8516 	spin_lock_irq(&phba->port_list_lock);
8517 	list_for_each_entry(vport, &phba->port_list, listentry)
8518 		lpfc_els_flush_cmd(vport);
8519 	spin_unlock_irq(&phba->port_list_lock);
8520 
8521 	return;
8522 }
8523 
8524 /**
8525  * lpfc_send_els_failure_event - Posts an ELS command failure event
8526  * @phba: Pointer to hba context object.
8527  * @cmdiocbp: Pointer to command iocb which reported error.
8528  * @rspiocbp: Pointer to response iocb which reported error.
8529  *
8530  * This function sends an event when there is an ELS command
8531  * failure.
8532  **/
8533 void
8534 lpfc_send_els_failure_event(struct lpfc_hba *phba,
8535 			struct lpfc_iocbq *cmdiocbp,
8536 			struct lpfc_iocbq *rspiocbp)
8537 {
8538 	struct lpfc_vport *vport = cmdiocbp->vport;
8539 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
8540 	struct lpfc_lsrjt_event lsrjt_event;
8541 	struct lpfc_fabric_event_header fabric_event;
8542 	struct ls_rjt stat;
8543 	struct lpfc_nodelist *ndlp;
8544 	uint32_t *pcmd;
8545 
8546 	ndlp = cmdiocbp->context1;
8547 	if (!ndlp)
8548 		return;
8549 
8550 	if (rspiocbp->iocb.ulpStatus == IOSTAT_LS_RJT) {
8551 		lsrjt_event.header.event_type = FC_REG_ELS_EVENT;
8552 		lsrjt_event.header.subcategory = LPFC_EVENT_LSRJT_RCV;
8553 		memcpy(lsrjt_event.header.wwpn, &ndlp->nlp_portname,
8554 			sizeof(struct lpfc_name));
8555 		memcpy(lsrjt_event.header.wwnn, &ndlp->nlp_nodename,
8556 			sizeof(struct lpfc_name));
8557 		pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
8558 			cmdiocbp->context2)->virt);
8559 		lsrjt_event.command = (pcmd != NULL) ? *pcmd : 0;
8560 		stat.un.lsRjtError = be32_to_cpu(rspiocbp->iocb.un.ulpWord[4]);
8561 		lsrjt_event.reason_code = stat.un.b.lsRjtRsnCode;
8562 		lsrjt_event.explanation = stat.un.b.lsRjtRsnCodeExp;
8563 		fc_host_post_vendor_event(shost,
8564 			fc_get_event_number(),
8565 			sizeof(lsrjt_event),
8566 			(char *)&lsrjt_event,
8567 			LPFC_NL_VENDOR_ID);
8568 		return;
8569 	}
8570 	if ((rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY) ||
8571 		(rspiocbp->iocb.ulpStatus == IOSTAT_FABRIC_BSY)) {
8572 		fabric_event.event_type = FC_REG_FABRIC_EVENT;
8573 		if (rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY)
8574 			fabric_event.subcategory = LPFC_EVENT_PORT_BUSY;
8575 		else
8576 			fabric_event.subcategory = LPFC_EVENT_FABRIC_BUSY;
8577 		memcpy(fabric_event.wwpn, &ndlp->nlp_portname,
8578 			sizeof(struct lpfc_name));
8579 		memcpy(fabric_event.wwnn, &ndlp->nlp_nodename,
8580 			sizeof(struct lpfc_name));
8581 		fc_host_post_vendor_event(shost,
8582 			fc_get_event_number(),
8583 			sizeof(fabric_event),
8584 			(char *)&fabric_event,
8585 			LPFC_NL_VENDOR_ID);
8586 		return;
8587 	}
8588 
8589 }
8590 
8591 /**
8592  * lpfc_send_els_event - Posts unsolicited els event
8593  * @vport: Pointer to vport object.
8594  * @ndlp: Pointer FC node object.
8595  * @payload: ELS command code type.
8596  *
8597  * This function posts an event when there is an incoming
8598  * unsolicited ELS command.
8599  **/
8600 static void
8601 lpfc_send_els_event(struct lpfc_vport *vport,
8602 		    struct lpfc_nodelist *ndlp,
8603 		    uint32_t *payload)
8604 {
8605 	struct lpfc_els_event_header *els_data = NULL;
8606 	struct lpfc_logo_event *logo_data = NULL;
8607 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
8608 
8609 	if (*payload == ELS_CMD_LOGO) {
8610 		logo_data = kmalloc(sizeof(struct lpfc_logo_event), GFP_KERNEL);
8611 		if (!logo_data) {
8612 			lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
8613 				"0148 Failed to allocate memory "
8614 				"for LOGO event\n");
8615 			return;
8616 		}
8617 		els_data = &logo_data->header;
8618 	} else {
8619 		els_data = kmalloc(sizeof(struct lpfc_els_event_header),
8620 			GFP_KERNEL);
8621 		if (!els_data) {
8622 			lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
8623 				"0149 Failed to allocate memory "
8624 				"for ELS event\n");
8625 			return;
8626 		}
8627 	}
8628 	els_data->event_type = FC_REG_ELS_EVENT;
8629 	switch (*payload) {
8630 	case ELS_CMD_PLOGI:
8631 		els_data->subcategory = LPFC_EVENT_PLOGI_RCV;
8632 		break;
8633 	case ELS_CMD_PRLO:
8634 		els_data->subcategory = LPFC_EVENT_PRLO_RCV;
8635 		break;
8636 	case ELS_CMD_ADISC:
8637 		els_data->subcategory = LPFC_EVENT_ADISC_RCV;
8638 		break;
8639 	case ELS_CMD_LOGO:
8640 		els_data->subcategory = LPFC_EVENT_LOGO_RCV;
8641 		/* Copy the WWPN in the LOGO payload */
8642 		memcpy(logo_data->logo_wwpn, &payload[2],
8643 			sizeof(struct lpfc_name));
8644 		break;
8645 	default:
8646 		kfree(els_data);
8647 		return;
8648 	}
8649 	memcpy(els_data->wwpn, &ndlp->nlp_portname, sizeof(struct lpfc_name));
8650 	memcpy(els_data->wwnn, &ndlp->nlp_nodename, sizeof(struct lpfc_name));
8651 	if (*payload == ELS_CMD_LOGO) {
8652 		fc_host_post_vendor_event(shost,
8653 			fc_get_event_number(),
8654 			sizeof(struct lpfc_logo_event),
8655 			(char *)logo_data,
8656 			LPFC_NL_VENDOR_ID);
8657 		kfree(logo_data);
8658 	} else {
8659 		fc_host_post_vendor_event(shost,
8660 			fc_get_event_number(),
8661 			sizeof(struct lpfc_els_event_header),
8662 			(char *)els_data,
8663 			LPFC_NL_VENDOR_ID);
8664 		kfree(els_data);
8665 	}
8666 
8667 	return;
8668 }
8669 
8670 
8671 DECLARE_ENUM2STR_LOOKUP(lpfc_get_tlv_dtag_nm, fc_ls_tlv_dtag,
8672 			FC_LS_TLV_DTAG_INIT);
8673 
8674 DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_li_event_nm, fc_fpin_li_event_types,
8675 			FC_FPIN_LI_EVT_TYPES_INIT);
8676 
8677 /**
8678  * lpfc_els_rcv_fpin_li - Process an FPIN Link Integrity Event.
8679  * @vport: Pointer to vport object.
8680  * @tlv:  Pointer to the Link Integrity Notification Descriptor.
8681  *
8682  * This function processes a link integrity FPIN event by
8683  * logging a message
8684  **/
8685 static void
8686 lpfc_els_rcv_fpin_li(struct lpfc_vport *vport, struct fc_tlv_desc *tlv)
8687 {
8688 	struct fc_fn_li_desc *li = (struct fc_fn_li_desc *)tlv;
8689 	const char *li_evt_str;
8690 	u32 li_evt;
8691 
8692 	li_evt = be16_to_cpu(li->event_type);
8693 	li_evt_str = lpfc_get_fpin_li_event_nm(li_evt);
8694 
8695 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
8696 			 "4680 FPIN Link Integrity %s (x%x) "
8697 			 "Detecting PN x%016llx Attached PN x%016llx "
8698 			 "Duration %d mSecs Count %d Port Cnt %d\n",
8699 			 li_evt_str, li_evt,
8700 			 be64_to_cpu(li->detecting_wwpn),
8701 			 be64_to_cpu(li->attached_wwpn),
8702 			 be32_to_cpu(li->event_threshold),
8703 			 be32_to_cpu(li->event_count),
8704 			 be32_to_cpu(li->pname_count));
8705 }
8706 
8707 static void
8708 lpfc_els_rcv_fpin(struct lpfc_vport *vport, struct fc_els_fpin *fpin,
8709 		  u32 fpin_length)
8710 {
8711 	struct fc_tlv_desc *tlv;
8712 	const char *dtag_nm;
8713 	uint32_t desc_cnt = 0, bytes_remain;
8714 	u32 dtag;
8715 
8716 	/* FPINs handled only if we are in the right discovery state */
8717 	if (vport->port_state < LPFC_DISC_AUTH)
8718 		return;
8719 
8720 	/* make sure there is the full fpin header */
8721 	if (fpin_length < sizeof(struct fc_els_fpin))
8722 		return;
8723 
8724 	tlv = (struct fc_tlv_desc *)&fpin->fpin_desc[0];
8725 	bytes_remain = fpin_length - offsetof(struct fc_els_fpin, fpin_desc);
8726 	bytes_remain = min_t(u32, bytes_remain, be32_to_cpu(fpin->desc_len));
8727 
8728 	/* process each descriptor */
8729 	while (bytes_remain >= FC_TLV_DESC_HDR_SZ &&
8730 	       bytes_remain >= FC_TLV_DESC_SZ_FROM_LENGTH(tlv)) {
8731 
8732 		dtag = be32_to_cpu(tlv->desc_tag);
8733 		switch (dtag) {
8734 		case ELS_DTAG_LNK_INTEGRITY:
8735 			lpfc_els_rcv_fpin_li(vport, tlv);
8736 			break;
8737 		default:
8738 			dtag_nm = lpfc_get_tlv_dtag_nm(dtag);
8739 			lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
8740 					 "4678  skipped FPIN descriptor[%d]: "
8741 					 "tag x%x (%s)\n",
8742 					 desc_cnt, dtag, dtag_nm);
8743 			break;
8744 		}
8745 
8746 		desc_cnt++;
8747 		bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv);
8748 		tlv = fc_tlv_next_desc(tlv);
8749 	}
8750 
8751 	fc_host_fpin_rcv(lpfc_shost_from_vport(vport), fpin_length,
8752 			 (char *)fpin);
8753 }
8754 
8755 /**
8756  * lpfc_els_unsol_buffer - Process an unsolicited event data buffer
8757  * @phba: pointer to lpfc hba data structure.
8758  * @pring: pointer to a SLI ring.
8759  * @vport: pointer to a host virtual N_Port data structure.
8760  * @elsiocb: pointer to lpfc els command iocb data structure.
8761  *
8762  * This routine is used for processing the IOCB associated with a unsolicited
8763  * event. It first determines whether there is an existing ndlp that matches
8764  * the DID from the unsolicited IOCB. If not, it will create a new one with
8765  * the DID from the unsolicited IOCB. The ELS command from the unsolicited
8766  * IOCB is then used to invoke the proper routine and to set up proper state
8767  * of the discovery state machine.
8768  **/
8769 static void
8770 lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
8771 		      struct lpfc_vport *vport, struct lpfc_iocbq *elsiocb)
8772 {
8773 	struct lpfc_nodelist *ndlp;
8774 	struct ls_rjt stat;
8775 	uint32_t *payload, payload_len;
8776 	uint32_t cmd, did, newnode;
8777 	uint8_t rjt_exp, rjt_err = 0, init_link = 0;
8778 	IOCB_t *icmd = &elsiocb->iocb;
8779 	LPFC_MBOXQ_t *mbox;
8780 
8781 	if (!vport || !(elsiocb->context2))
8782 		goto dropit;
8783 
8784 	newnode = 0;
8785 	payload = ((struct lpfc_dmabuf *)elsiocb->context2)->virt;
8786 	payload_len = elsiocb->iocb.unsli3.rcvsli3.acc_len;
8787 	cmd = *payload;
8788 	if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) == 0)
8789 		lpfc_post_buffer(phba, pring, 1);
8790 
8791 	did = icmd->un.rcvels.remoteID;
8792 	if (icmd->ulpStatus) {
8793 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
8794 			"RCV Unsol ELS:  status:x%x/x%x did:x%x",
8795 			icmd->ulpStatus, icmd->un.ulpWord[4], did);
8796 		goto dropit;
8797 	}
8798 
8799 	/* Check to see if link went down during discovery */
8800 	if (lpfc_els_chk_latt(vport))
8801 		goto dropit;
8802 
8803 	/* Ignore traffic received during vport shutdown. */
8804 	if (vport->load_flag & FC_UNLOADING)
8805 		goto dropit;
8806 
8807 	/* If NPort discovery is delayed drop incoming ELS */
8808 	if ((vport->fc_flag & FC_DISC_DELAYED) &&
8809 			(cmd != ELS_CMD_PLOGI))
8810 		goto dropit;
8811 
8812 	ndlp = lpfc_findnode_did(vport, did);
8813 	if (!ndlp) {
8814 		/* Cannot find existing Fabric ndlp, so allocate a new one */
8815 		ndlp = lpfc_nlp_init(vport, did);
8816 		if (!ndlp)
8817 			goto dropit;
8818 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
8819 		newnode = 1;
8820 		if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
8821 			ndlp->nlp_type |= NLP_FABRIC;
8822 	} else if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
8823 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
8824 		newnode = 1;
8825 	}
8826 
8827 	phba->fc_stat.elsRcvFrame++;
8828 
8829 	/*
8830 	 * Do not process any unsolicited ELS commands
8831 	 * if the ndlp is in DEV_LOSS
8832 	 */
8833 	spin_lock_irq(&ndlp->lock);
8834 	if (ndlp->nlp_flag & NLP_IN_DEV_LOSS) {
8835 		spin_unlock_irq(&ndlp->lock);
8836 		if (newnode)
8837 			lpfc_nlp_put(ndlp);
8838 		goto dropit;
8839 	}
8840 	spin_unlock_irq(&ndlp->lock);
8841 
8842 	elsiocb->context1 = lpfc_nlp_get(ndlp);
8843 	if (!elsiocb->context1)
8844 		goto dropit;
8845 	elsiocb->vport = vport;
8846 
8847 	if ((cmd & ELS_CMD_MASK) == ELS_CMD_RSCN) {
8848 		cmd &= ELS_CMD_MASK;
8849 	}
8850 	/* ELS command <elsCmd> received from NPORT <did> */
8851 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
8852 			 "0112 ELS command x%x received from NPORT x%x "
8853 			 "refcnt %d Data: x%x x%x x%x x%x\n",
8854 			 cmd, did, kref_read(&ndlp->kref), vport->port_state,
8855 			 vport->fc_flag, vport->fc_myDID, vport->fc_prevDID);
8856 
8857 	/* reject till our FLOGI completes or PLOGI assigned DID via PT2PT */
8858 	if ((vport->port_state < LPFC_FABRIC_CFG_LINK) &&
8859 	    (cmd != ELS_CMD_FLOGI) &&
8860 	    !((cmd == ELS_CMD_PLOGI) && (vport->fc_flag & FC_PT2PT))) {
8861 		rjt_err = LSRJT_LOGICAL_BSY;
8862 		rjt_exp = LSEXP_NOTHING_MORE;
8863 		goto lsrjt;
8864 	}
8865 
8866 	switch (cmd) {
8867 	case ELS_CMD_PLOGI:
8868 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
8869 			"RCV PLOGI:       did:x%x/ste:x%x flg:x%x",
8870 			did, vport->port_state, ndlp->nlp_flag);
8871 
8872 		phba->fc_stat.elsRcvPLOGI++;
8873 		ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp);
8874 		if (phba->sli_rev == LPFC_SLI_REV4 &&
8875 		    (phba->pport->fc_flag & FC_PT2PT)) {
8876 			vport->fc_prevDID = vport->fc_myDID;
8877 			/* Our DID needs to be updated before registering
8878 			 * the vfi. This is done in lpfc_rcv_plogi but
8879 			 * that is called after the reg_vfi.
8880 			 */
8881 			vport->fc_myDID = elsiocb->iocb.un.rcvels.parmRo;
8882 			lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
8883 					 "3312 Remote port assigned DID x%x "
8884 					 "%x\n", vport->fc_myDID,
8885 					 vport->fc_prevDID);
8886 		}
8887 
8888 		lpfc_send_els_event(vport, ndlp, payload);
8889 
8890 		/* If Nport discovery is delayed, reject PLOGIs */
8891 		if (vport->fc_flag & FC_DISC_DELAYED) {
8892 			rjt_err = LSRJT_UNABLE_TPC;
8893 			rjt_exp = LSEXP_NOTHING_MORE;
8894 			break;
8895 		}
8896 
8897 		if (vport->port_state < LPFC_DISC_AUTH) {
8898 			if (!(phba->pport->fc_flag & FC_PT2PT) ||
8899 				(phba->pport->fc_flag & FC_PT2PT_PLOGI)) {
8900 				rjt_err = LSRJT_UNABLE_TPC;
8901 				rjt_exp = LSEXP_NOTHING_MORE;
8902 				break;
8903 			}
8904 		}
8905 
8906 		spin_lock_irq(&ndlp->lock);
8907 		ndlp->nlp_flag &= ~NLP_TARGET_REMOVE;
8908 		spin_unlock_irq(&ndlp->lock);
8909 
8910 		lpfc_disc_state_machine(vport, ndlp, elsiocb,
8911 					NLP_EVT_RCV_PLOGI);
8912 
8913 		break;
8914 	case ELS_CMD_FLOGI:
8915 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
8916 			"RCV FLOGI:       did:x%x/ste:x%x flg:x%x",
8917 			did, vport->port_state, ndlp->nlp_flag);
8918 
8919 		phba->fc_stat.elsRcvFLOGI++;
8920 
8921 		/* If the driver believes fabric discovery is done and is ready,
8922 		 * bounce the link.  There is some descrepancy.
8923 		 */
8924 		if (vport->port_state >= LPFC_LOCAL_CFG_LINK &&
8925 		    vport->fc_flag & FC_PT2PT &&
8926 		    vport->rcv_flogi_cnt >= 1) {
8927 			rjt_err = LSRJT_LOGICAL_BSY;
8928 			rjt_exp = LSEXP_NOTHING_MORE;
8929 			init_link++;
8930 			goto lsrjt;
8931 		}
8932 
8933 		lpfc_els_rcv_flogi(vport, elsiocb, ndlp);
8934 		if (newnode)
8935 			lpfc_disc_state_machine(vport, ndlp, NULL,
8936 					NLP_EVT_DEVICE_RM);
8937 		break;
8938 	case ELS_CMD_LOGO:
8939 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
8940 			"RCV LOGO:        did:x%x/ste:x%x flg:x%x",
8941 			did, vport->port_state, ndlp->nlp_flag);
8942 
8943 		phba->fc_stat.elsRcvLOGO++;
8944 		lpfc_send_els_event(vport, ndlp, payload);
8945 		if (vport->port_state < LPFC_DISC_AUTH) {
8946 			rjt_err = LSRJT_UNABLE_TPC;
8947 			rjt_exp = LSEXP_NOTHING_MORE;
8948 			break;
8949 		}
8950 		lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_LOGO);
8951 		break;
8952 	case ELS_CMD_PRLO:
8953 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
8954 			"RCV PRLO:        did:x%x/ste:x%x flg:x%x",
8955 			did, vport->port_state, ndlp->nlp_flag);
8956 
8957 		phba->fc_stat.elsRcvPRLO++;
8958 		lpfc_send_els_event(vport, ndlp, payload);
8959 		if (vport->port_state < LPFC_DISC_AUTH) {
8960 			rjt_err = LSRJT_UNABLE_TPC;
8961 			rjt_exp = LSEXP_NOTHING_MORE;
8962 			break;
8963 		}
8964 		lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLO);
8965 		break;
8966 	case ELS_CMD_LCB:
8967 		phba->fc_stat.elsRcvLCB++;
8968 		lpfc_els_rcv_lcb(vport, elsiocb, ndlp);
8969 		break;
8970 	case ELS_CMD_RDP:
8971 		phba->fc_stat.elsRcvRDP++;
8972 		lpfc_els_rcv_rdp(vport, elsiocb, ndlp);
8973 		break;
8974 	case ELS_CMD_RSCN:
8975 		phba->fc_stat.elsRcvRSCN++;
8976 		lpfc_els_rcv_rscn(vport, elsiocb, ndlp);
8977 		if (newnode)
8978 			lpfc_disc_state_machine(vport, ndlp, NULL,
8979 					NLP_EVT_DEVICE_RM);
8980 		break;
8981 	case ELS_CMD_ADISC:
8982 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
8983 			"RCV ADISC:       did:x%x/ste:x%x flg:x%x",
8984 			did, vport->port_state, ndlp->nlp_flag);
8985 
8986 		lpfc_send_els_event(vport, ndlp, payload);
8987 		phba->fc_stat.elsRcvADISC++;
8988 		if (vport->port_state < LPFC_DISC_AUTH) {
8989 			rjt_err = LSRJT_UNABLE_TPC;
8990 			rjt_exp = LSEXP_NOTHING_MORE;
8991 			break;
8992 		}
8993 		lpfc_disc_state_machine(vport, ndlp, elsiocb,
8994 					NLP_EVT_RCV_ADISC);
8995 		break;
8996 	case ELS_CMD_PDISC:
8997 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
8998 			"RCV PDISC:       did:x%x/ste:x%x flg:x%x",
8999 			did, vport->port_state, ndlp->nlp_flag);
9000 
9001 		phba->fc_stat.elsRcvPDISC++;
9002 		if (vport->port_state < LPFC_DISC_AUTH) {
9003 			rjt_err = LSRJT_UNABLE_TPC;
9004 			rjt_exp = LSEXP_NOTHING_MORE;
9005 			break;
9006 		}
9007 		lpfc_disc_state_machine(vport, ndlp, elsiocb,
9008 					NLP_EVT_RCV_PDISC);
9009 		break;
9010 	case ELS_CMD_FARPR:
9011 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
9012 			"RCV FARPR:       did:x%x/ste:x%x flg:x%x",
9013 			did, vport->port_state, ndlp->nlp_flag);
9014 
9015 		phba->fc_stat.elsRcvFARPR++;
9016 		lpfc_els_rcv_farpr(vport, elsiocb, ndlp);
9017 		break;
9018 	case ELS_CMD_FARP:
9019 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
9020 			"RCV FARP:        did:x%x/ste:x%x flg:x%x",
9021 			did, vport->port_state, ndlp->nlp_flag);
9022 
9023 		phba->fc_stat.elsRcvFARP++;
9024 		lpfc_els_rcv_farp(vport, elsiocb, ndlp);
9025 		break;
9026 	case ELS_CMD_FAN:
9027 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
9028 			"RCV FAN:         did:x%x/ste:x%x flg:x%x",
9029 			did, vport->port_state, ndlp->nlp_flag);
9030 
9031 		phba->fc_stat.elsRcvFAN++;
9032 		lpfc_els_rcv_fan(vport, elsiocb, ndlp);
9033 		break;
9034 	case ELS_CMD_PRLI:
9035 	case ELS_CMD_NVMEPRLI:
9036 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
9037 			"RCV PRLI:        did:x%x/ste:x%x flg:x%x",
9038 			did, vport->port_state, ndlp->nlp_flag);
9039 
9040 		phba->fc_stat.elsRcvPRLI++;
9041 		if ((vport->port_state < LPFC_DISC_AUTH) &&
9042 		    (vport->fc_flag & FC_FABRIC)) {
9043 			rjt_err = LSRJT_UNABLE_TPC;
9044 			rjt_exp = LSEXP_NOTHING_MORE;
9045 			break;
9046 		}
9047 		lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLI);
9048 		break;
9049 	case ELS_CMD_LIRR:
9050 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
9051 			"RCV LIRR:        did:x%x/ste:x%x flg:x%x",
9052 			did, vport->port_state, ndlp->nlp_flag);
9053 
9054 		phba->fc_stat.elsRcvLIRR++;
9055 		lpfc_els_rcv_lirr(vport, elsiocb, ndlp);
9056 		if (newnode)
9057 			lpfc_disc_state_machine(vport, ndlp, NULL,
9058 					NLP_EVT_DEVICE_RM);
9059 		break;
9060 	case ELS_CMD_RLS:
9061 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
9062 			"RCV RLS:         did:x%x/ste:x%x flg:x%x",
9063 			did, vport->port_state, ndlp->nlp_flag);
9064 
9065 		phba->fc_stat.elsRcvRLS++;
9066 		lpfc_els_rcv_rls(vport, elsiocb, ndlp);
9067 		if (newnode)
9068 			lpfc_disc_state_machine(vport, ndlp, NULL,
9069 					NLP_EVT_DEVICE_RM);
9070 		break;
9071 	case ELS_CMD_RPL:
9072 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
9073 			"RCV RPL:         did:x%x/ste:x%x flg:x%x",
9074 			did, vport->port_state, ndlp->nlp_flag);
9075 
9076 		phba->fc_stat.elsRcvRPL++;
9077 		lpfc_els_rcv_rpl(vport, elsiocb, ndlp);
9078 		if (newnode)
9079 			lpfc_disc_state_machine(vport, ndlp, NULL,
9080 					NLP_EVT_DEVICE_RM);
9081 		break;
9082 	case ELS_CMD_RNID:
9083 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
9084 			"RCV RNID:        did:x%x/ste:x%x flg:x%x",
9085 			did, vport->port_state, ndlp->nlp_flag);
9086 
9087 		phba->fc_stat.elsRcvRNID++;
9088 		lpfc_els_rcv_rnid(vport, elsiocb, ndlp);
9089 		if (newnode)
9090 			lpfc_disc_state_machine(vport, ndlp, NULL,
9091 					NLP_EVT_DEVICE_RM);
9092 		break;
9093 	case ELS_CMD_RTV:
9094 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
9095 			"RCV RTV:        did:x%x/ste:x%x flg:x%x",
9096 			did, vport->port_state, ndlp->nlp_flag);
9097 		phba->fc_stat.elsRcvRTV++;
9098 		lpfc_els_rcv_rtv(vport, elsiocb, ndlp);
9099 		if (newnode)
9100 			lpfc_disc_state_machine(vport, ndlp, NULL,
9101 					NLP_EVT_DEVICE_RM);
9102 		break;
9103 	case ELS_CMD_RRQ:
9104 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
9105 			"RCV RRQ:         did:x%x/ste:x%x flg:x%x",
9106 			did, vport->port_state, ndlp->nlp_flag);
9107 
9108 		phba->fc_stat.elsRcvRRQ++;
9109 		lpfc_els_rcv_rrq(vport, elsiocb, ndlp);
9110 		if (newnode)
9111 			lpfc_disc_state_machine(vport, ndlp, NULL,
9112 					NLP_EVT_DEVICE_RM);
9113 		break;
9114 	case ELS_CMD_ECHO:
9115 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
9116 			"RCV ECHO:        did:x%x/ste:x%x flg:x%x",
9117 			did, vport->port_state, ndlp->nlp_flag);
9118 
9119 		phba->fc_stat.elsRcvECHO++;
9120 		lpfc_els_rcv_echo(vport, elsiocb, ndlp);
9121 		if (newnode)
9122 			lpfc_disc_state_machine(vport, ndlp, NULL,
9123 					NLP_EVT_DEVICE_RM);
9124 		break;
9125 	case ELS_CMD_REC:
9126 		/* receive this due to exchange closed */
9127 		rjt_err = LSRJT_UNABLE_TPC;
9128 		rjt_exp = LSEXP_INVALID_OX_RX;
9129 		break;
9130 	case ELS_CMD_FPIN:
9131 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
9132 				      "RCV FPIN:       did:x%x/ste:x%x flg:x%x",
9133 				      did, vport->port_state, ndlp->nlp_flag);
9134 
9135 		lpfc_els_rcv_fpin(vport, (struct fc_els_fpin *)payload,
9136 				  payload_len);
9137 
9138 		/* There are no replies, so no rjt codes */
9139 		break;
9140 	case ELS_CMD_RDF:
9141 		phba->fc_stat.elsRcvRDF++;
9142 		/* Accept RDF only from fabric controller */
9143 		if (did != Fabric_Cntl_DID) {
9144 			lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
9145 					 "1115 Received RDF from invalid DID "
9146 					 "x%x\n", did);
9147 			rjt_err = LSRJT_PROTOCOL_ERR;
9148 			rjt_exp = LSEXP_NOTHING_MORE;
9149 			goto lsrjt;
9150 		}
9151 
9152 		lpfc_els_rcv_rdf(vport, elsiocb, ndlp);
9153 		break;
9154 	default:
9155 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
9156 			"RCV ELS cmd:     cmd:x%x did:x%x/ste:x%x",
9157 			cmd, did, vport->port_state);
9158 
9159 		/* Unsupported ELS command, reject */
9160 		rjt_err = LSRJT_CMD_UNSUPPORTED;
9161 		rjt_exp = LSEXP_NOTHING_MORE;
9162 
9163 		/* Unknown ELS command <elsCmd> received from NPORT <did> */
9164 		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
9165 				 "0115 Unknown ELS command x%x "
9166 				 "received from NPORT x%x\n", cmd, did);
9167 		if (newnode)
9168 			lpfc_disc_state_machine(vport, ndlp, NULL,
9169 					NLP_EVT_DEVICE_RM);
9170 		break;
9171 	}
9172 
9173 lsrjt:
9174 	/* check if need to LS_RJT received ELS cmd */
9175 	if (rjt_err) {
9176 		memset(&stat, 0, sizeof(stat));
9177 		stat.un.b.lsRjtRsnCode = rjt_err;
9178 		stat.un.b.lsRjtRsnCodeExp = rjt_exp;
9179 		lpfc_els_rsp_reject(vport, stat.un.lsRjtError, elsiocb, ndlp,
9180 				    NULL);
9181 		/* Remove the reference from above for new nodes. */
9182 		if (newnode)
9183 			lpfc_disc_state_machine(vport, ndlp, NULL,
9184 					NLP_EVT_DEVICE_RM);
9185 	}
9186 
9187 	/* Release the reference on this elsiocb, not the ndlp. */
9188 	lpfc_nlp_put(elsiocb->context1);
9189 	elsiocb->context1 = NULL;
9190 
9191 	/* Special case.  Driver received an unsolicited command that
9192 	 * unsupportable given the driver's current state.  Reset the
9193 	 * link and start over.
9194 	 */
9195 	if (init_link) {
9196 		mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9197 		if (!mbox)
9198 			return;
9199 		lpfc_linkdown(phba);
9200 		lpfc_init_link(phba, mbox,
9201 			       phba->cfg_topology,
9202 			       phba->cfg_link_speed);
9203 		mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
9204 		mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
9205 		mbox->vport = vport;
9206 		if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) ==
9207 		    MBX_NOT_FINISHED)
9208 			mempool_free(mbox, phba->mbox_mem_pool);
9209 	}
9210 
9211 	return;
9212 
9213 dropit:
9214 	if (vport && !(vport->load_flag & FC_UNLOADING))
9215 		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
9216 			"0111 Dropping received ELS cmd "
9217 			"Data: x%x x%x x%x\n",
9218 			icmd->ulpStatus, icmd->un.ulpWord[4], icmd->ulpTimeout);
9219 	phba->fc_stat.elsRcvDrop++;
9220 }
9221 
9222 /**
9223  * lpfc_els_unsol_event - Process an unsolicited event from an els sli ring
9224  * @phba: pointer to lpfc hba data structure.
9225  * @pring: pointer to a SLI ring.
9226  * @elsiocb: pointer to lpfc els iocb data structure.
9227  *
9228  * This routine is used to process an unsolicited event received from a SLI
9229  * (Service Level Interface) ring. The actual processing of the data buffer
9230  * associated with the unsolicited event is done by invoking the routine
9231  * lpfc_els_unsol_buffer() after properly set up the iocb buffer from the
9232  * SLI ring on which the unsolicited event was received.
9233  **/
9234 void
9235 lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9236 		     struct lpfc_iocbq *elsiocb)
9237 {
9238 	struct lpfc_vport *vport = phba->pport;
9239 	IOCB_t *icmd = &elsiocb->iocb;
9240 	dma_addr_t paddr;
9241 	struct lpfc_dmabuf *bdeBuf1 = elsiocb->context2;
9242 	struct lpfc_dmabuf *bdeBuf2 = elsiocb->context3;
9243 
9244 	elsiocb->context1 = NULL;
9245 	elsiocb->context2 = NULL;
9246 	elsiocb->context3 = NULL;
9247 
9248 	if (icmd->ulpStatus == IOSTAT_NEED_BUFFER) {
9249 		lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
9250 	} else if (icmd->ulpStatus == IOSTAT_LOCAL_REJECT &&
9251 		   (icmd->un.ulpWord[4] & IOERR_PARAM_MASK) ==
9252 		   IOERR_RCV_BUFFER_WAITING) {
9253 		phba->fc_stat.NoRcvBuf++;
9254 		/* Not enough posted buffers; Try posting more buffers */
9255 		if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
9256 			lpfc_post_buffer(phba, pring, 0);
9257 		return;
9258 	}
9259 
9260 	if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
9261 	    (icmd->ulpCommand == CMD_IOCB_RCV_ELS64_CX ||
9262 	     icmd->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
9263 		if (icmd->unsli3.rcvsli3.vpi == 0xffff)
9264 			vport = phba->pport;
9265 		else
9266 			vport = lpfc_find_vport_by_vpid(phba,
9267 						icmd->unsli3.rcvsli3.vpi);
9268 	}
9269 
9270 	/* If there are no BDEs associated
9271 	 * with this IOCB, there is nothing to do.
9272 	 */
9273 	if (icmd->ulpBdeCount == 0)
9274 		return;
9275 
9276 	/* type of ELS cmd is first 32bit word
9277 	 * in packet
9278 	 */
9279 	if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
9280 		elsiocb->context2 = bdeBuf1;
9281 	} else {
9282 		paddr = getPaddr(icmd->un.cont64[0].addrHigh,
9283 				 icmd->un.cont64[0].addrLow);
9284 		elsiocb->context2 = lpfc_sli_ringpostbuf_get(phba, pring,
9285 							     paddr);
9286 	}
9287 
9288 	lpfc_els_unsol_buffer(phba, pring, vport, elsiocb);
9289 	/*
9290 	 * The different unsolicited event handlers would tell us
9291 	 * if they are done with "mp" by setting context2 to NULL.
9292 	 */
9293 	if (elsiocb->context2) {
9294 		lpfc_in_buf_free(phba, (struct lpfc_dmabuf *)elsiocb->context2);
9295 		elsiocb->context2 = NULL;
9296 	}
9297 
9298 	/* RCV_ELS64_CX provide for 2 BDEs - process 2nd if included */
9299 	if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) &&
9300 	    icmd->ulpBdeCount == 2) {
9301 		elsiocb->context2 = bdeBuf2;
9302 		lpfc_els_unsol_buffer(phba, pring, vport, elsiocb);
9303 		/* free mp if we are done with it */
9304 		if (elsiocb->context2) {
9305 			lpfc_in_buf_free(phba, elsiocb->context2);
9306 			elsiocb->context2 = NULL;
9307 		}
9308 	}
9309 }
9310 
9311 static void
9312 lpfc_start_fdmi(struct lpfc_vport *vport)
9313 {
9314 	struct lpfc_nodelist *ndlp;
9315 
9316 	/* If this is the first time, allocate an ndlp and initialize
9317 	 * it. Otherwise, make sure the node is enabled and then do the
9318 	 * login.
9319 	 */
9320 	ndlp = lpfc_findnode_did(vport, FDMI_DID);
9321 	if (!ndlp) {
9322 		ndlp = lpfc_nlp_init(vport, FDMI_DID);
9323 		if (ndlp) {
9324 			ndlp->nlp_type |= NLP_FABRIC;
9325 		} else {
9326 			return;
9327 		}
9328 	}
9329 
9330 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
9331 	lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
9332 }
9333 
9334 /**
9335  * lpfc_do_scr_ns_plogi - Issue a plogi to the name server for scr
9336  * @phba: pointer to lpfc hba data structure.
9337  * @vport: pointer to a virtual N_Port data structure.
9338  *
9339  * This routine issues a Port Login (PLOGI) to the Name Server with
9340  * State Change Request (SCR) for a @vport. This routine will create an
9341  * ndlp for the Name Server associated to the @vport if such node does
9342  * not already exist. The PLOGI to Name Server is issued by invoking the
9343  * lpfc_issue_els_plogi() routine. If Fabric-Device Management Interface
9344  * (FDMI) is configured to the @vport, a FDMI node will be created and
9345  * the PLOGI to FDMI is issued by invoking lpfc_issue_els_plogi() routine.
9346  **/
9347 void
9348 lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
9349 {
9350 	struct lpfc_nodelist *ndlp;
9351 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
9352 
9353 	/*
9354 	 * If lpfc_delay_discovery parameter is set and the clean address
9355 	 * bit is cleared and fc fabric parameters chenged, delay FC NPort
9356 	 * discovery.
9357 	 */
9358 	spin_lock_irq(shost->host_lock);
9359 	if (vport->fc_flag & FC_DISC_DELAYED) {
9360 		spin_unlock_irq(shost->host_lock);
9361 		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
9362 				 "3334 Delay fc port discovery for %d secs\n",
9363 				 phba->fc_ratov);
9364 		mod_timer(&vport->delayed_disc_tmo,
9365 			jiffies + msecs_to_jiffies(1000 * phba->fc_ratov));
9366 		return;
9367 	}
9368 	spin_unlock_irq(shost->host_lock);
9369 
9370 	ndlp = lpfc_findnode_did(vport, NameServer_DID);
9371 	if (!ndlp) {
9372 		ndlp = lpfc_nlp_init(vport, NameServer_DID);
9373 		if (!ndlp) {
9374 			if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
9375 				lpfc_disc_start(vport);
9376 				return;
9377 			}
9378 			lpfc_vport_set_state(vport, FC_VPORT_FAILED);
9379 			lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
9380 					 "0251 NameServer login: no memory\n");
9381 			return;
9382 		}
9383 	}
9384 
9385 	ndlp->nlp_type |= NLP_FABRIC;
9386 
9387 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
9388 
9389 	if (lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0)) {
9390 		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
9391 		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
9392 				 "0252 Cannot issue NameServer login\n");
9393 		return;
9394 	}
9395 
9396 	if ((phba->cfg_enable_SmartSAN ||
9397 	     (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) &&
9398 	     (vport->load_flag & FC_ALLOW_FDMI))
9399 		lpfc_start_fdmi(vport);
9400 }
9401 
9402 /**
9403  * lpfc_cmpl_reg_new_vport - Completion callback function to register new vport
9404  * @phba: pointer to lpfc hba data structure.
9405  * @pmb: pointer to the driver internal queue element for mailbox command.
9406  *
9407  * This routine is the completion callback function to register new vport
9408  * mailbox command. If the new vport mailbox command completes successfully,
9409  * the fabric registration login shall be performed on physical port (the
9410  * new vport created is actually a physical port, with VPI 0) or the port
9411  * login to Name Server for State Change Request (SCR) will be performed
9412  * on virtual port (real virtual port, with VPI greater than 0).
9413  **/
9414 static void
9415 lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
9416 {
9417 	struct lpfc_vport *vport = pmb->vport;
9418 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
9419 	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
9420 	MAILBOX_t *mb = &pmb->u.mb;
9421 	int rc;
9422 
9423 	spin_lock_irq(shost->host_lock);
9424 	vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
9425 	spin_unlock_irq(shost->host_lock);
9426 
9427 	if (mb->mbxStatus) {
9428 		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
9429 				"0915 Register VPI failed : Status: x%x"
9430 				" upd bit: x%x \n", mb->mbxStatus,
9431 				 mb->un.varRegVpi.upd);
9432 		if (phba->sli_rev == LPFC_SLI_REV4 &&
9433 			mb->un.varRegVpi.upd)
9434 			goto mbox_err_exit ;
9435 
9436 		switch (mb->mbxStatus) {
9437 		case 0x11:	/* unsupported feature */
9438 		case 0x9603:	/* max_vpi exceeded */
9439 		case 0x9602:	/* Link event since CLEAR_LA */
9440 			/* giving up on vport registration */
9441 			lpfc_vport_set_state(vport, FC_VPORT_FAILED);
9442 			spin_lock_irq(shost->host_lock);
9443 			vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
9444 			spin_unlock_irq(shost->host_lock);
9445 			lpfc_can_disctmo(vport);
9446 			break;
9447 		/* If reg_vpi fail with invalid VPI status, re-init VPI */
9448 		case 0x20:
9449 			spin_lock_irq(shost->host_lock);
9450 			vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
9451 			spin_unlock_irq(shost->host_lock);
9452 			lpfc_init_vpi(phba, pmb, vport->vpi);
9453 			pmb->vport = vport;
9454 			pmb->mbox_cmpl = lpfc_init_vpi_cmpl;
9455 			rc = lpfc_sli_issue_mbox(phba, pmb,
9456 				MBX_NOWAIT);
9457 			if (rc == MBX_NOT_FINISHED) {
9458 				lpfc_printf_vlog(vport, KERN_ERR,
9459 						 LOG_TRACE_EVENT,
9460 					"2732 Failed to issue INIT_VPI"
9461 					" mailbox command\n");
9462 			} else {
9463 				lpfc_nlp_put(ndlp);
9464 				return;
9465 			}
9466 			fallthrough;
9467 		default:
9468 			/* Try to recover from this error */
9469 			if (phba->sli_rev == LPFC_SLI_REV4)
9470 				lpfc_sli4_unreg_all_rpis(vport);
9471 			lpfc_mbx_unreg_vpi(vport);
9472 			spin_lock_irq(shost->host_lock);
9473 			vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
9474 			spin_unlock_irq(shost->host_lock);
9475 			if (mb->mbxStatus == MBX_NOT_FINISHED)
9476 				break;
9477 			if ((vport->port_type == LPFC_PHYSICAL_PORT) &&
9478 			    !(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG)) {
9479 				if (phba->sli_rev == LPFC_SLI_REV4)
9480 					lpfc_issue_init_vfi(vport);
9481 				else
9482 					lpfc_initial_flogi(vport);
9483 			} else {
9484 				lpfc_initial_fdisc(vport);
9485 			}
9486 			break;
9487 		}
9488 	} else {
9489 		spin_lock_irq(shost->host_lock);
9490 		vport->vpi_state |= LPFC_VPI_REGISTERED;
9491 		spin_unlock_irq(shost->host_lock);
9492 		if (vport == phba->pport) {
9493 			if (phba->sli_rev < LPFC_SLI_REV4)
9494 				lpfc_issue_fabric_reglogin(vport);
9495 			else {
9496 				/*
9497 				 * If the physical port is instantiated using
9498 				 * FDISC, do not start vport discovery.
9499 				 */
9500 				if (vport->port_state != LPFC_FDISC)
9501 					lpfc_start_fdiscs(phba);
9502 				lpfc_do_scr_ns_plogi(phba, vport);
9503 			}
9504 		} else {
9505 			lpfc_do_scr_ns_plogi(phba, vport);
9506 		}
9507 	}
9508 mbox_err_exit:
9509 	/* Now, we decrement the ndlp reference count held for this
9510 	 * callback function
9511 	 */
9512 	lpfc_nlp_put(ndlp);
9513 
9514 	mempool_free(pmb, phba->mbox_mem_pool);
9515 	return;
9516 }
9517 
9518 /**
9519  * lpfc_register_new_vport - Register a new vport with a HBA
9520  * @phba: pointer to lpfc hba data structure.
9521  * @vport: pointer to a host virtual N_Port data structure.
9522  * @ndlp: pointer to a node-list data structure.
9523  *
9524  * This routine registers the @vport as a new virtual port with a HBA.
9525  * It is done through a registering vpi mailbox command.
9526  **/
9527 void
9528 lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport,
9529 			struct lpfc_nodelist *ndlp)
9530 {
9531 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
9532 	LPFC_MBOXQ_t *mbox;
9533 
9534 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9535 	if (mbox) {
9536 		lpfc_reg_vpi(vport, mbox);
9537 		mbox->vport = vport;
9538 		mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
9539 		if (!mbox->ctx_ndlp) {
9540 			mempool_free(mbox, phba->mbox_mem_pool);
9541 			goto mbox_err_exit;
9542 		}
9543 
9544 		mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport;
9545 		if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
9546 		    == MBX_NOT_FINISHED) {
9547 			/* mailbox command not success, decrement ndlp
9548 			 * reference count for this command
9549 			 */
9550 			lpfc_nlp_put(ndlp);
9551 			mempool_free(mbox, phba->mbox_mem_pool);
9552 
9553 			lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
9554 				"0253 Register VPI: Can't send mbox\n");
9555 			goto mbox_err_exit;
9556 		}
9557 	} else {
9558 		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
9559 				 "0254 Register VPI: no memory\n");
9560 		goto mbox_err_exit;
9561 	}
9562 	return;
9563 
9564 mbox_err_exit:
9565 	lpfc_vport_set_state(vport, FC_VPORT_FAILED);
9566 	spin_lock_irq(shost->host_lock);
9567 	vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
9568 	spin_unlock_irq(shost->host_lock);
9569 	return;
9570 }
9571 
9572 /**
9573  * lpfc_cancel_all_vport_retry_delay_timer - Cancel all vport retry delay timer
9574  * @phba: pointer to lpfc hba data structure.
9575  *
9576  * This routine cancels the retry delay timers to all the vports.
9577  **/
9578 void
9579 lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *phba)
9580 {
9581 	struct lpfc_vport **vports;
9582 	struct lpfc_nodelist *ndlp;
9583 	uint32_t link_state;
9584 	int i;
9585 
9586 	/* Treat this failure as linkdown for all vports */
9587 	link_state = phba->link_state;
9588 	lpfc_linkdown(phba);
9589 	phba->link_state = link_state;
9590 
9591 	vports = lpfc_create_vport_work_array(phba);
9592 
9593 	if (vports) {
9594 		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
9595 			ndlp = lpfc_findnode_did(vports[i], Fabric_DID);
9596 			if (ndlp)
9597 				lpfc_cancel_retry_delay_tmo(vports[i], ndlp);
9598 			lpfc_els_flush_cmd(vports[i]);
9599 		}
9600 		lpfc_destroy_vport_work_array(phba, vports);
9601 	}
9602 }
9603 
9604 /**
9605  * lpfc_retry_pport_discovery - Start timer to retry FLOGI.
9606  * @phba: pointer to lpfc hba data structure.
9607  *
9608  * This routine abort all pending discovery commands and
9609  * start a timer to retry FLOGI for the physical port
9610  * discovery.
9611  **/
9612 void
9613 lpfc_retry_pport_discovery(struct lpfc_hba *phba)
9614 {
9615 	struct lpfc_nodelist *ndlp;
9616 
9617 	/* Cancel the all vports retry delay retry timers */
9618 	lpfc_cancel_all_vport_retry_delay_timer(phba);
9619 
9620 	/* If fabric require FLOGI, then re-instantiate physical login */
9621 	ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
9622 	if (!ndlp)
9623 		return;
9624 
9625 	mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000));
9626 	spin_lock_irq(&ndlp->lock);
9627 	ndlp->nlp_flag |= NLP_DELAY_TMO;
9628 	spin_unlock_irq(&ndlp->lock);
9629 	ndlp->nlp_last_elscmd = ELS_CMD_FLOGI;
9630 	phba->pport->port_state = LPFC_FLOGI;
9631 	return;
9632 }
9633 
9634 /**
9635  * lpfc_fabric_login_reqd - Check if FLOGI required.
9636  * @phba: pointer to lpfc hba data structure.
9637  * @cmdiocb: pointer to FDISC command iocb.
9638  * @rspiocb: pointer to FDISC response iocb.
9639  *
9640  * This routine checks if a FLOGI is reguired for FDISC
9641  * to succeed.
9642  **/
9643 static int
9644 lpfc_fabric_login_reqd(struct lpfc_hba *phba,
9645 		struct lpfc_iocbq *cmdiocb,
9646 		struct lpfc_iocbq *rspiocb)
9647 {
9648 
9649 	if ((rspiocb->iocb.ulpStatus != IOSTAT_FABRIC_RJT) ||
9650 		(rspiocb->iocb.un.ulpWord[4] != RJT_LOGIN_REQUIRED))
9651 		return 0;
9652 	else
9653 		return 1;
9654 }
9655 
9656 /**
9657  * lpfc_cmpl_els_fdisc - Completion function for fdisc iocb command
9658  * @phba: pointer to lpfc hba data structure.
9659  * @cmdiocb: pointer to lpfc command iocb data structure.
9660  * @rspiocb: pointer to lpfc response iocb data structure.
9661  *
9662  * This routine is the completion callback function to a Fabric Discover
9663  * (FDISC) ELS command. Since all the FDISC ELS commands are issued
9664  * single threaded, each FDISC completion callback function will reset
9665  * the discovery timer for all vports such that the timers will not get
9666  * unnecessary timeout. The function checks the FDISC IOCB status. If error
9667  * detected, the vport will be set to FC_VPORT_FAILED state. Otherwise,the
9668  * vport will set to FC_VPORT_ACTIVE state. It then checks whether the DID
9669  * assigned to the vport has been changed with the completion of the FDISC
9670  * command. If so, both RPI (Remote Port Index) and VPI (Virtual Port Index)
9671  * are unregistered from the HBA, and then the lpfc_register_new_vport()
9672  * routine is invoked to register new vport with the HBA. Otherwise, the
9673  * lpfc_do_scr_ns_plogi() routine is invoked to issue a PLOGI to the Name
9674  * Server for State Change Request (SCR).
9675  **/
9676 static void
9677 lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
9678 		    struct lpfc_iocbq *rspiocb)
9679 {
9680 	struct lpfc_vport *vport = cmdiocb->vport;
9681 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
9682 	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
9683 	struct lpfc_nodelist *np;
9684 	struct lpfc_nodelist *next_np;
9685 	IOCB_t *irsp = &rspiocb->iocb;
9686 	struct lpfc_iocbq *piocb;
9687 	struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp;
9688 	struct serv_parm *sp;
9689 	uint8_t fabric_param_changed;
9690 
9691 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
9692 			 "0123 FDISC completes. x%x/x%x prevDID: x%x\n",
9693 			 irsp->ulpStatus, irsp->un.ulpWord[4],
9694 			 vport->fc_prevDID);
9695 	/* Since all FDISCs are being single threaded, we
9696 	 * must reset the discovery timer for ALL vports
9697 	 * waiting to send FDISC when one completes.
9698 	 */
9699 	list_for_each_entry(piocb, &phba->fabric_iocb_list, list) {
9700 		lpfc_set_disctmo(piocb->vport);
9701 	}
9702 
9703 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
9704 		"FDISC cmpl:      status:x%x/x%x prevdid:x%x",
9705 		irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_prevDID);
9706 
9707 	if (irsp->ulpStatus) {
9708 
9709 		if (lpfc_fabric_login_reqd(phba, cmdiocb, rspiocb)) {
9710 			lpfc_retry_pport_discovery(phba);
9711 			goto out;
9712 		}
9713 
9714 		/* Check for retry */
9715 		if (lpfc_els_retry(phba, cmdiocb, rspiocb))
9716 			goto out;
9717 		/* FDISC failed */
9718 		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
9719 				 "0126 FDISC failed. (x%x/x%x)\n",
9720 				 irsp->ulpStatus, irsp->un.ulpWord[4]);
9721 		goto fdisc_failed;
9722 	}
9723 	spin_lock_irq(shost->host_lock);
9724 	vport->fc_flag &= ~FC_VPORT_CVL_RCVD;
9725 	vport->fc_flag &= ~FC_VPORT_LOGO_RCVD;
9726 	vport->fc_flag |= FC_FABRIC;
9727 	if (vport->phba->fc_topology == LPFC_TOPOLOGY_LOOP)
9728 		vport->fc_flag |=  FC_PUBLIC_LOOP;
9729 	spin_unlock_irq(shost->host_lock);
9730 
9731 	vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
9732 	lpfc_vport_set_state(vport, FC_VPORT_ACTIVE);
9733 	prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
9734 	if (!prsp)
9735 		goto out;
9736 	sp = prsp->virt + sizeof(uint32_t);
9737 	fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp);
9738 	memcpy(&vport->fabric_portname, &sp->portName,
9739 		sizeof(struct lpfc_name));
9740 	memcpy(&vport->fabric_nodename, &sp->nodeName,
9741 		sizeof(struct lpfc_name));
9742 	if (fabric_param_changed &&
9743 		!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
9744 		/* If our NportID changed, we need to ensure all
9745 		 * remaining NPORTs get unreg_login'ed so we can
9746 		 * issue unreg_vpi.
9747 		 */
9748 		list_for_each_entry_safe(np, next_np,
9749 			&vport->fc_nodes, nlp_listp) {
9750 			if ((np->nlp_state != NLP_STE_NPR_NODE) ||
9751 			    !(np->nlp_flag & NLP_NPR_ADISC))
9752 				continue;
9753 			spin_lock_irq(&ndlp->lock);
9754 			np->nlp_flag &= ~NLP_NPR_ADISC;
9755 			spin_unlock_irq(&ndlp->lock);
9756 			lpfc_unreg_rpi(vport, np);
9757 		}
9758 		lpfc_cleanup_pending_mbox(vport);
9759 
9760 		if (phba->sli_rev == LPFC_SLI_REV4)
9761 			lpfc_sli4_unreg_all_rpis(vport);
9762 
9763 		lpfc_mbx_unreg_vpi(vport);
9764 		spin_lock_irq(shost->host_lock);
9765 		vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
9766 		if (phba->sli_rev == LPFC_SLI_REV4)
9767 			vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
9768 		else
9769 			vport->fc_flag |= FC_LOGO_RCVD_DID_CHNG;
9770 		spin_unlock_irq(shost->host_lock);
9771 	} else if ((phba->sli_rev == LPFC_SLI_REV4) &&
9772 		!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
9773 		/*
9774 		 * Driver needs to re-reg VPI in order for f/w
9775 		 * to update the MAC address.
9776 		 */
9777 		lpfc_register_new_vport(phba, vport, ndlp);
9778 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
9779 		goto out;
9780 	}
9781 
9782 	if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI)
9783 		lpfc_issue_init_vpi(vport);
9784 	else if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
9785 		lpfc_register_new_vport(phba, vport, ndlp);
9786 	else
9787 		lpfc_do_scr_ns_plogi(phba, vport);
9788 
9789 	/* The FDISC completed successfully. Move the fabric ndlp to
9790 	 * UNMAPPED state and register with the transport.
9791 	 */
9792 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
9793 	goto out;
9794 
9795 fdisc_failed:
9796 	if (vport->fc_vport &&
9797 	    (vport->fc_vport->vport_state != FC_VPORT_NO_FABRIC_RSCS))
9798 		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
9799 	/* Cancel discovery timer */
9800 	lpfc_can_disctmo(vport);
9801 out:
9802 	lpfc_els_free_iocb(phba, cmdiocb);
9803 	lpfc_nlp_put(ndlp);
9804 }
9805 
9806 /**
9807  * lpfc_issue_els_fdisc - Issue a fdisc iocb command
9808  * @vport: pointer to a virtual N_Port data structure.
9809  * @ndlp: pointer to a node-list data structure.
9810  * @retry: number of retries to the command IOCB.
9811  *
9812  * This routine prepares and issues a Fabric Discover (FDISC) IOCB to
9813  * a remote node (@ndlp) off a @vport. It uses the lpfc_issue_fabric_iocb()
9814  * routine to issue the IOCB, which makes sure only one outstanding fabric
9815  * IOCB will be sent off HBA at any given time.
9816  *
9817  * Note that the ndlp reference count will be incremented by 1 for holding the
9818  * ndlp and the reference to ndlp will be stored into the context1 field of
9819  * the IOCB for the completion callback function to the FDISC ELS command.
9820  *
9821  * Return code
9822  *   0 - Successfully issued fdisc iocb command
9823  *   1 - Failed to issue fdisc iocb command
9824  **/
9825 static int
9826 lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
9827 		     uint8_t retry)
9828 {
9829 	struct lpfc_hba *phba = vport->phba;
9830 	IOCB_t *icmd;
9831 	struct lpfc_iocbq *elsiocb;
9832 	struct serv_parm *sp;
9833 	uint8_t *pcmd;
9834 	uint16_t cmdsize;
9835 	int did = ndlp->nlp_DID;
9836 	int rc;
9837 
9838 	vport->port_state = LPFC_FDISC;
9839 	vport->fc_myDID = 0;
9840 	cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
9841 	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did,
9842 				     ELS_CMD_FDISC);
9843 	if (!elsiocb) {
9844 		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
9845 		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
9846 				 "0255 Issue FDISC: no IOCB\n");
9847 		return 1;
9848 	}
9849 
9850 	icmd = &elsiocb->iocb;
9851 	icmd->un.elsreq64.myID = 0;
9852 	icmd->un.elsreq64.fl = 1;
9853 
9854 	/*
9855 	 * SLI3 ports require a different context type value than SLI4.
9856 	 * Catch SLI3 ports here and override the prep.
9857 	 */
9858 	if (phba->sli_rev == LPFC_SLI_REV3) {
9859 		icmd->ulpCt_h = 1;
9860 		icmd->ulpCt_l = 0;
9861 	}
9862 
9863 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
9864 	*((uint32_t *) (pcmd)) = ELS_CMD_FDISC;
9865 	pcmd += sizeof(uint32_t); /* CSP Word 1 */
9866 	memcpy(pcmd, &vport->phba->pport->fc_sparam, sizeof(struct serv_parm));
9867 	sp = (struct serv_parm *) pcmd;
9868 	/* Setup CSPs accordingly for Fabric */
9869 	sp->cmn.e_d_tov = 0;
9870 	sp->cmn.w2.r_a_tov = 0;
9871 	sp->cmn.virtual_fabric_support = 0;
9872 	sp->cls1.classValid = 0;
9873 	sp->cls2.seqDelivery = 1;
9874 	sp->cls3.seqDelivery = 1;
9875 
9876 	pcmd += sizeof(uint32_t); /* CSP Word 2 */
9877 	pcmd += sizeof(uint32_t); /* CSP Word 3 */
9878 	pcmd += sizeof(uint32_t); /* CSP Word 4 */
9879 	pcmd += sizeof(uint32_t); /* Port Name */
9880 	memcpy(pcmd, &vport->fc_portname, 8);
9881 	pcmd += sizeof(uint32_t); /* Node Name */
9882 	pcmd += sizeof(uint32_t); /* Node Name */
9883 	memcpy(pcmd, &vport->fc_nodename, 8);
9884 	sp->cmn.valid_vendor_ver_level = 0;
9885 	memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion));
9886 	lpfc_set_disctmo(vport);
9887 
9888 	phba->fc_stat.elsXmitFDISC++;
9889 	elsiocb->iocb_cmpl = lpfc_cmpl_els_fdisc;
9890 
9891 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
9892 		"Issue FDISC:     did:x%x",
9893 		did, 0, 0);
9894 
9895 	elsiocb->context1 = lpfc_nlp_get(ndlp);
9896 	if (!elsiocb->context1) {
9897 		lpfc_els_free_iocb(phba, elsiocb);
9898 		goto err_out;
9899 	}
9900 
9901 	rc = lpfc_issue_fabric_iocb(phba, elsiocb);
9902 	if (rc == IOCB_ERROR) {
9903 		lpfc_els_free_iocb(phba, elsiocb);
9904 		lpfc_nlp_put(ndlp);
9905 		goto err_out;
9906 	}
9907 
9908 	lpfc_vport_set_state(vport, FC_VPORT_INITIALIZING);
9909 	return 0;
9910 
9911  err_out:
9912 	lpfc_vport_set_state(vport, FC_VPORT_FAILED);
9913 	lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
9914 			 "0256 Issue FDISC: Cannot send IOCB\n");
9915 	return 1;
9916 }
9917 
9918 /**
9919  * lpfc_cmpl_els_npiv_logo - Completion function with vport logo
9920  * @phba: pointer to lpfc hba data structure.
9921  * @cmdiocb: pointer to lpfc command iocb data structure.
9922  * @rspiocb: pointer to lpfc response iocb data structure.
9923  *
9924  * This routine is the completion callback function to the issuing of a LOGO
9925  * ELS command off a vport. It frees the command IOCB and then decrement the
9926  * reference count held on ndlp for this completion function, indicating that
9927  * the reference to the ndlp is no long needed. Note that the
9928  * lpfc_els_free_iocb() routine decrements the ndlp reference held for this
9929  * callback function and an additional explicit ndlp reference decrementation
9930  * will trigger the actual release of the ndlp.
9931  **/
9932 static void
9933 lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
9934 			struct lpfc_iocbq *rspiocb)
9935 {
9936 	struct lpfc_vport *vport = cmdiocb->vport;
9937 	IOCB_t *irsp;
9938 	struct lpfc_nodelist *ndlp;
9939 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
9940 
9941 	ndlp = (struct lpfc_nodelist *)cmdiocb->context1;
9942 	irsp = &rspiocb->iocb;
9943 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
9944 		"LOGO npiv cmpl:  status:x%x/x%x did:x%x",
9945 		irsp->ulpStatus, irsp->un.ulpWord[4], irsp->un.rcvels.remoteID);
9946 
9947 	/* NPIV LOGO completes to NPort <nlp_DID> */
9948 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
9949 			 "2928 NPIV LOGO completes to NPort x%x "
9950 			 "Data: x%x x%x x%x x%x x%x x%x x%x\n",
9951 			 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
9952 			 irsp->ulpTimeout, vport->num_disc_nodes,
9953 			 kref_read(&ndlp->kref), ndlp->nlp_flag,
9954 			 ndlp->fc4_xpt_flags);
9955 
9956 	if (irsp->ulpStatus == IOSTAT_SUCCESS) {
9957 		spin_lock_irq(shost->host_lock);
9958 		vport->fc_flag &= ~FC_NDISC_ACTIVE;
9959 		vport->fc_flag &= ~FC_FABRIC;
9960 		spin_unlock_irq(shost->host_lock);
9961 		lpfc_can_disctmo(vport);
9962 	}
9963 
9964 	/* Safe to release resources now. */
9965 	lpfc_els_free_iocb(phba, cmdiocb);
9966 	lpfc_nlp_put(ndlp);
9967 	vport->unreg_vpi_cmpl = VPORT_ERROR;
9968 }
9969 
9970 /**
9971  * lpfc_issue_els_npiv_logo - Issue a logo off a vport
9972  * @vport: pointer to a virtual N_Port data structure.
9973  * @ndlp: pointer to a node-list data structure.
9974  *
9975  * This routine issues a LOGO ELS command to an @ndlp off a @vport.
9976  *
9977  * Note that the ndlp reference count will be incremented by 1 for holding the
9978  * ndlp and the reference to ndlp will be stored into the context1 field of
9979  * the IOCB for the completion callback function to the LOGO ELS command.
9980  *
9981  * Return codes
9982  *   0 - Successfully issued logo off the @vport
9983  *   1 - Failed to issue logo off the @vport
9984  **/
9985 int
9986 lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
9987 {
9988 	int rc = 0;
9989 	struct lpfc_hba  *phba = vport->phba;
9990 	struct lpfc_iocbq *elsiocb;
9991 	uint8_t *pcmd;
9992 	uint16_t cmdsize;
9993 
9994 	cmdsize = 2 * sizeof(uint32_t) + sizeof(struct lpfc_name);
9995 	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, ndlp->nlp_DID,
9996 				     ELS_CMD_LOGO);
9997 	if (!elsiocb)
9998 		return 1;
9999 
10000 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
10001 	*((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
10002 	pcmd += sizeof(uint32_t);
10003 
10004 	/* Fill in LOGO payload */
10005 	*((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID);
10006 	pcmd += sizeof(uint32_t);
10007 	memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name));
10008 
10009 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
10010 		"Issue LOGO npiv  did:x%x flg:x%x",
10011 		ndlp->nlp_DID, ndlp->nlp_flag, 0);
10012 
10013 	elsiocb->iocb_cmpl = lpfc_cmpl_els_npiv_logo;
10014 	spin_lock_irq(&ndlp->lock);
10015 	ndlp->nlp_flag |= NLP_LOGO_SND;
10016 	spin_unlock_irq(&ndlp->lock);
10017 	elsiocb->context1 = lpfc_nlp_get(ndlp);
10018 	if (!elsiocb->context1) {
10019 		lpfc_els_free_iocb(phba, elsiocb);
10020 		goto err;
10021 	}
10022 
10023 	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
10024 	if (rc == IOCB_ERROR) {
10025 		lpfc_els_free_iocb(phba, elsiocb);
10026 		lpfc_nlp_put(ndlp);
10027 		goto err;
10028 	}
10029 	return 0;
10030 
10031 err:
10032 	spin_lock_irq(&ndlp->lock);
10033 	ndlp->nlp_flag &= ~NLP_LOGO_SND;
10034 	spin_unlock_irq(&ndlp->lock);
10035 	return 1;
10036 }
10037 
10038 /**
10039  * lpfc_fabric_block_timeout - Handler function to the fabric block timer
10040  * @t: timer context used to obtain the lpfc hba.
10041  *
10042  * This routine is invoked by the fabric iocb block timer after
10043  * timeout. It posts the fabric iocb block timeout event by setting the
10044  * WORKER_FABRIC_BLOCK_TMO bit to work port event bitmap and then invokes
10045  * lpfc_worker_wake_up() routine to wake up the worker thread. It is for
10046  * the worker thread to invoke the lpfc_unblock_fabric_iocbs() on the
10047  * posted event WORKER_FABRIC_BLOCK_TMO.
10048  **/
10049 void
10050 lpfc_fabric_block_timeout(struct timer_list *t)
10051 {
10052 	struct lpfc_hba  *phba = from_timer(phba, t, fabric_block_timer);
10053 	unsigned long iflags;
10054 	uint32_t tmo_posted;
10055 
10056 	spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
10057 	tmo_posted = phba->pport->work_port_events & WORKER_FABRIC_BLOCK_TMO;
10058 	if (!tmo_posted)
10059 		phba->pport->work_port_events |= WORKER_FABRIC_BLOCK_TMO;
10060 	spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
10061 
10062 	if (!tmo_posted)
10063 		lpfc_worker_wake_up(phba);
10064 	return;
10065 }
10066 
10067 /**
10068  * lpfc_resume_fabric_iocbs - Issue a fabric iocb from driver internal list
10069  * @phba: pointer to lpfc hba data structure.
10070  *
10071  * This routine issues one fabric iocb from the driver internal list to
10072  * the HBA. It first checks whether it's ready to issue one fabric iocb to
10073  * the HBA (whether there is no outstanding fabric iocb). If so, it shall
10074  * remove one pending fabric iocb from the driver internal list and invokes
10075  * lpfc_sli_issue_iocb() routine to send the fabric iocb to the HBA.
10076  **/
10077 static void
10078 lpfc_resume_fabric_iocbs(struct lpfc_hba *phba)
10079 {
10080 	struct lpfc_iocbq *iocb;
10081 	unsigned long iflags;
10082 	int ret;
10083 	IOCB_t *cmd;
10084 
10085 repeat:
10086 	iocb = NULL;
10087 	spin_lock_irqsave(&phba->hbalock, iflags);
10088 	/* Post any pending iocb to the SLI layer */
10089 	if (atomic_read(&phba->fabric_iocb_count) == 0) {
10090 		list_remove_head(&phba->fabric_iocb_list, iocb, typeof(*iocb),
10091 				 list);
10092 		if (iocb)
10093 			/* Increment fabric iocb count to hold the position */
10094 			atomic_inc(&phba->fabric_iocb_count);
10095 	}
10096 	spin_unlock_irqrestore(&phba->hbalock, iflags);
10097 	if (iocb) {
10098 		iocb->fabric_iocb_cmpl = iocb->iocb_cmpl;
10099 		iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb;
10100 		iocb->iocb_flag |= LPFC_IO_FABRIC;
10101 
10102 		lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD,
10103 			"Fabric sched1:   ste:x%x",
10104 			iocb->vport->port_state, 0, 0);
10105 
10106 		ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0);
10107 
10108 		if (ret == IOCB_ERROR) {
10109 			iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
10110 			iocb->fabric_iocb_cmpl = NULL;
10111 			iocb->iocb_flag &= ~LPFC_IO_FABRIC;
10112 			cmd = &iocb->iocb;
10113 			cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
10114 			cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
10115 			iocb->iocb_cmpl(phba, iocb, iocb);
10116 
10117 			atomic_dec(&phba->fabric_iocb_count);
10118 			goto repeat;
10119 		}
10120 	}
10121 }
10122 
10123 /**
10124  * lpfc_unblock_fabric_iocbs - Unblock issuing fabric iocb command
10125  * @phba: pointer to lpfc hba data structure.
10126  *
10127  * This routine unblocks the  issuing fabric iocb command. The function
10128  * will clear the fabric iocb block bit and then invoke the routine
10129  * lpfc_resume_fabric_iocbs() to issue one of the pending fabric iocb
10130  * from the driver internal fabric iocb list.
10131  **/
10132 void
10133 lpfc_unblock_fabric_iocbs(struct lpfc_hba *phba)
10134 {
10135 	clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
10136 
10137 	lpfc_resume_fabric_iocbs(phba);
10138 	return;
10139 }
10140 
10141 /**
10142  * lpfc_block_fabric_iocbs - Block issuing fabric iocb command
10143  * @phba: pointer to lpfc hba data structure.
10144  *
10145  * This routine blocks the issuing fabric iocb for a specified amount of
10146  * time (currently 100 ms). This is done by set the fabric iocb block bit
10147  * and set up a timeout timer for 100ms. When the block bit is set, no more
10148  * fabric iocb will be issued out of the HBA.
10149  **/
10150 static void
10151 lpfc_block_fabric_iocbs(struct lpfc_hba *phba)
10152 {
10153 	int blocked;
10154 
10155 	blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
10156 	/* Start a timer to unblock fabric iocbs after 100ms */
10157 	if (!blocked)
10158 		mod_timer(&phba->fabric_block_timer,
10159 			  jiffies + msecs_to_jiffies(100));
10160 
10161 	return;
10162 }
10163 
10164 /**
10165  * lpfc_cmpl_fabric_iocb - Completion callback function for fabric iocb
10166  * @phba: pointer to lpfc hba data structure.
10167  * @cmdiocb: pointer to lpfc command iocb data structure.
10168  * @rspiocb: pointer to lpfc response iocb data structure.
10169  *
10170  * This routine is the callback function that is put to the fabric iocb's
10171  * callback function pointer (iocb->iocb_cmpl). The original iocb's callback
10172  * function pointer has been stored in iocb->fabric_iocb_cmpl. This callback
10173  * function first restores and invokes the original iocb's callback function
10174  * and then invokes the lpfc_resume_fabric_iocbs() routine to issue the next
10175  * fabric bound iocb from the driver internal fabric iocb list onto the wire.
10176  **/
10177 static void
10178 lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
10179 	struct lpfc_iocbq *rspiocb)
10180 {
10181 	struct ls_rjt stat;
10182 
10183 	BUG_ON((cmdiocb->iocb_flag & LPFC_IO_FABRIC) != LPFC_IO_FABRIC);
10184 
10185 	switch (rspiocb->iocb.ulpStatus) {
10186 		case IOSTAT_NPORT_RJT:
10187 		case IOSTAT_FABRIC_RJT:
10188 			if (rspiocb->iocb.un.ulpWord[4] & RJT_UNAVAIL_TEMP) {
10189 				lpfc_block_fabric_iocbs(phba);
10190 			}
10191 			break;
10192 
10193 		case IOSTAT_NPORT_BSY:
10194 		case IOSTAT_FABRIC_BSY:
10195 			lpfc_block_fabric_iocbs(phba);
10196 			break;
10197 
10198 		case IOSTAT_LS_RJT:
10199 			stat.un.lsRjtError =
10200 				be32_to_cpu(rspiocb->iocb.un.ulpWord[4]);
10201 			if ((stat.un.b.lsRjtRsnCode == LSRJT_UNABLE_TPC) ||
10202 				(stat.un.b.lsRjtRsnCode == LSRJT_LOGICAL_BSY))
10203 				lpfc_block_fabric_iocbs(phba);
10204 			break;
10205 	}
10206 
10207 	BUG_ON(atomic_read(&phba->fabric_iocb_count) == 0);
10208 
10209 	cmdiocb->iocb_cmpl = cmdiocb->fabric_iocb_cmpl;
10210 	cmdiocb->fabric_iocb_cmpl = NULL;
10211 	cmdiocb->iocb_flag &= ~LPFC_IO_FABRIC;
10212 	cmdiocb->iocb_cmpl(phba, cmdiocb, rspiocb);
10213 
10214 	atomic_dec(&phba->fabric_iocb_count);
10215 	if (!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags)) {
10216 		/* Post any pending iocbs to HBA */
10217 		lpfc_resume_fabric_iocbs(phba);
10218 	}
10219 }
10220 
10221 /**
10222  * lpfc_issue_fabric_iocb - Issue a fabric iocb command
10223  * @phba: pointer to lpfc hba data structure.
10224  * @iocb: pointer to lpfc command iocb data structure.
10225  *
10226  * This routine is used as the top-level API for issuing a fabric iocb command
10227  * such as FLOGI and FDISC. To accommodate certain switch fabric, this driver
10228  * function makes sure that only one fabric bound iocb will be outstanding at
10229  * any given time. As such, this function will first check to see whether there
10230  * is already an outstanding fabric iocb on the wire. If so, it will put the
10231  * newly issued iocb onto the driver internal fabric iocb list, waiting to be
10232  * issued later. Otherwise, it will issue the iocb on the wire and update the
10233  * fabric iocb count it indicate that there is one fabric iocb on the wire.
10234  *
10235  * Note, this implementation has a potential sending out fabric IOCBs out of
10236  * order. The problem is caused by the construction of the "ready" boolen does
10237  * not include the condition that the internal fabric IOCB list is empty. As
10238  * such, it is possible a fabric IOCB issued by this routine might be "jump"
10239  * ahead of the fabric IOCBs in the internal list.
10240  *
10241  * Return code
10242  *   IOCB_SUCCESS - either fabric iocb put on the list or issued successfully
10243  *   IOCB_ERROR - failed to issue fabric iocb
10244  **/
10245 static int
10246 lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb)
10247 {
10248 	unsigned long iflags;
10249 	int ready;
10250 	int ret;
10251 
10252 	BUG_ON(atomic_read(&phba->fabric_iocb_count) > 1);
10253 
10254 	spin_lock_irqsave(&phba->hbalock, iflags);
10255 	ready = atomic_read(&phba->fabric_iocb_count) == 0 &&
10256 		!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
10257 
10258 	if (ready)
10259 		/* Increment fabric iocb count to hold the position */
10260 		atomic_inc(&phba->fabric_iocb_count);
10261 	spin_unlock_irqrestore(&phba->hbalock, iflags);
10262 	if (ready) {
10263 		iocb->fabric_iocb_cmpl = iocb->iocb_cmpl;
10264 		iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb;
10265 		iocb->iocb_flag |= LPFC_IO_FABRIC;
10266 
10267 		lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD,
10268 			"Fabric sched2:   ste:x%x",
10269 			iocb->vport->port_state, 0, 0);
10270 
10271 		ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0);
10272 
10273 		if (ret == IOCB_ERROR) {
10274 			iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
10275 			iocb->fabric_iocb_cmpl = NULL;
10276 			iocb->iocb_flag &= ~LPFC_IO_FABRIC;
10277 			atomic_dec(&phba->fabric_iocb_count);
10278 		}
10279 	} else {
10280 		spin_lock_irqsave(&phba->hbalock, iflags);
10281 		list_add_tail(&iocb->list, &phba->fabric_iocb_list);
10282 		spin_unlock_irqrestore(&phba->hbalock, iflags);
10283 		ret = IOCB_SUCCESS;
10284 	}
10285 	return ret;
10286 }
10287 
10288 /**
10289  * lpfc_fabric_abort_vport - Abort a vport's iocbs from driver fabric iocb list
10290  * @vport: pointer to a virtual N_Port data structure.
10291  *
10292  * This routine aborts all the IOCBs associated with a @vport from the
10293  * driver internal fabric IOCB list. The list contains fabric IOCBs to be
10294  * issued to the ELS IOCB ring. This abort function walks the fabric IOCB
10295  * list, removes each IOCB associated with the @vport off the list, set the
10296  * status field to IOSTAT_LOCAL_REJECT, and invokes the callback function
10297  * associated with the IOCB.
10298  **/
10299 static void lpfc_fabric_abort_vport(struct lpfc_vport *vport)
10300 {
10301 	LIST_HEAD(completions);
10302 	struct lpfc_hba  *phba = vport->phba;
10303 	struct lpfc_iocbq *tmp_iocb, *piocb;
10304 
10305 	spin_lock_irq(&phba->hbalock);
10306 	list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list,
10307 				 list) {
10308 
10309 		if (piocb->vport != vport)
10310 			continue;
10311 
10312 		list_move_tail(&piocb->list, &completions);
10313 	}
10314 	spin_unlock_irq(&phba->hbalock);
10315 
10316 	/* Cancel all the IOCBs from the completions list */
10317 	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
10318 			      IOERR_SLI_ABORTED);
10319 }
10320 
10321 /**
10322  * lpfc_fabric_abort_nport - Abort a ndlp's iocbs from driver fabric iocb list
10323  * @ndlp: pointer to a node-list data structure.
10324  *
10325  * This routine aborts all the IOCBs associated with an @ndlp from the
10326  * driver internal fabric IOCB list. The list contains fabric IOCBs to be
10327  * issued to the ELS IOCB ring. This abort function walks the fabric IOCB
10328  * list, removes each IOCB associated with the @ndlp off the list, set the
10329  * status field to IOSTAT_LOCAL_REJECT, and invokes the callback function
10330  * associated with the IOCB.
10331  **/
10332 void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp)
10333 {
10334 	LIST_HEAD(completions);
10335 	struct lpfc_hba  *phba = ndlp->phba;
10336 	struct lpfc_iocbq *tmp_iocb, *piocb;
10337 	struct lpfc_sli_ring *pring;
10338 
10339 	pring = lpfc_phba_elsring(phba);
10340 
10341 	if (unlikely(!pring))
10342 		return;
10343 
10344 	spin_lock_irq(&phba->hbalock);
10345 	list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list,
10346 				 list) {
10347 		if ((lpfc_check_sli_ndlp(phba, pring, piocb, ndlp))) {
10348 
10349 			list_move_tail(&piocb->list, &completions);
10350 		}
10351 	}
10352 	spin_unlock_irq(&phba->hbalock);
10353 
10354 	/* Cancel all the IOCBs from the completions list */
10355 	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
10356 			      IOERR_SLI_ABORTED);
10357 }
10358 
10359 /**
10360  * lpfc_fabric_abort_hba - Abort all iocbs on driver fabric iocb list
10361  * @phba: pointer to lpfc hba data structure.
10362  *
10363  * This routine aborts all the IOCBs currently on the driver internal
10364  * fabric IOCB list. The list contains fabric IOCBs to be issued to the ELS
10365  * IOCB ring. This function takes the entire IOCB list off the fabric IOCB
10366  * list, removes IOCBs off the list, set the status field to
10367  * IOSTAT_LOCAL_REJECT, and invokes the callback function associated with
10368  * the IOCB.
10369  **/
10370 void lpfc_fabric_abort_hba(struct lpfc_hba *phba)
10371 {
10372 	LIST_HEAD(completions);
10373 
10374 	spin_lock_irq(&phba->hbalock);
10375 	list_splice_init(&phba->fabric_iocb_list, &completions);
10376 	spin_unlock_irq(&phba->hbalock);
10377 
10378 	/* Cancel all the IOCBs from the completions list */
10379 	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
10380 			      IOERR_SLI_ABORTED);
10381 }
10382 
10383 /**
10384  * lpfc_sli4_vport_delete_els_xri_aborted -Remove all ndlp references for vport
10385  * @vport: pointer to lpfc vport data structure.
10386  *
10387  * This routine is invoked by the vport cleanup for deletions and the cleanup
10388  * for an ndlp on removal.
10389  **/
10390 void
10391 lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *vport)
10392 {
10393 	struct lpfc_hba *phba = vport->phba;
10394 	struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
10395 	unsigned long iflag = 0;
10396 
10397 	spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, iflag);
10398 	list_for_each_entry_safe(sglq_entry, sglq_next,
10399 			&phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
10400 		if (sglq_entry->ndlp && sglq_entry->ndlp->vport == vport) {
10401 			lpfc_nlp_put(sglq_entry->ndlp);
10402 			sglq_entry->ndlp = NULL;
10403 		}
10404 	}
10405 	spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, iflag);
10406 	return;
10407 }
10408 
10409 /**
10410  * lpfc_sli4_els_xri_aborted - Slow-path process of els xri abort
10411  * @phba: pointer to lpfc hba data structure.
10412  * @axri: pointer to the els xri abort wcqe structure.
10413  *
10414  * This routine is invoked by the worker thread to process a SLI4 slow-path
10415  * ELS aborted xri.
10416  **/
10417 void
10418 lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
10419 			  struct sli4_wcqe_xri_aborted *axri)
10420 {
10421 	uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
10422 	uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
10423 	uint16_t lxri = 0;
10424 
10425 	struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
10426 	unsigned long iflag = 0;
10427 	struct lpfc_nodelist *ndlp;
10428 	struct lpfc_sli_ring *pring;
10429 
10430 	pring = lpfc_phba_elsring(phba);
10431 
10432 	spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, iflag);
10433 	list_for_each_entry_safe(sglq_entry, sglq_next,
10434 			&phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
10435 		if (sglq_entry->sli4_xritag == xri) {
10436 			list_del(&sglq_entry->list);
10437 			ndlp = sglq_entry->ndlp;
10438 			sglq_entry->ndlp = NULL;
10439 			list_add_tail(&sglq_entry->list,
10440 				&phba->sli4_hba.lpfc_els_sgl_list);
10441 			sglq_entry->state = SGL_FREED;
10442 			spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock,
10443 					       iflag);
10444 
10445 			if (ndlp) {
10446 				lpfc_set_rrq_active(phba, ndlp,
10447 					sglq_entry->sli4_lxritag,
10448 					rxid, 1);
10449 				lpfc_nlp_put(ndlp);
10450 			}
10451 
10452 			/* Check if TXQ queue needs to be serviced */
10453 			if (pring && !list_empty(&pring->txq))
10454 				lpfc_worker_wake_up(phba);
10455 			return;
10456 		}
10457 	}
10458 	spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, iflag);
10459 	lxri = lpfc_sli4_xri_inrange(phba, xri);
10460 	if (lxri == NO_XRI)
10461 		return;
10462 
10463 	spin_lock_irqsave(&phba->hbalock, iflag);
10464 	sglq_entry = __lpfc_get_active_sglq(phba, lxri);
10465 	if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) {
10466 		spin_unlock_irqrestore(&phba->hbalock, iflag);
10467 		return;
10468 	}
10469 	sglq_entry->state = SGL_XRI_ABORTED;
10470 	spin_unlock_irqrestore(&phba->hbalock, iflag);
10471 	return;
10472 }
10473 
10474 /* lpfc_sli_abts_recover_port - Recover a port that failed a BLS_ABORT req.
10475  * @vport: pointer to virtual port object.
10476  * @ndlp: nodelist pointer for the impacted node.
10477  *
10478  * The driver calls this routine in response to an SLI4 XRI ABORT CQE
10479  * or an SLI3 ASYNC_STATUS_CN event from the port.  For either event,
10480  * the driver is required to send a LOGO to the remote node before it
10481  * attempts to recover its login to the remote node.
10482  */
10483 void
10484 lpfc_sli_abts_recover_port(struct lpfc_vport *vport,
10485 			   struct lpfc_nodelist *ndlp)
10486 {
10487 	struct Scsi_Host *shost;
10488 	struct lpfc_hba *phba;
10489 	unsigned long flags = 0;
10490 
10491 	shost = lpfc_shost_from_vport(vport);
10492 	phba = vport->phba;
10493 	if (ndlp->nlp_state != NLP_STE_MAPPED_NODE) {
10494 		lpfc_printf_log(phba, KERN_INFO,
10495 				LOG_SLI, "3093 No rport recovery needed. "
10496 				"rport in state 0x%x\n", ndlp->nlp_state);
10497 		return;
10498 	}
10499 	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10500 			"3094 Start rport recovery on shost id 0x%x "
10501 			"fc_id 0x%06x vpi 0x%x rpi 0x%x state 0x%x "
10502 			"flags 0x%x\n",
10503 			shost->host_no, ndlp->nlp_DID,
10504 			vport->vpi, ndlp->nlp_rpi, ndlp->nlp_state,
10505 			ndlp->nlp_flag);
10506 	/*
10507 	 * The rport is not responding.  Remove the FCP-2 flag to prevent
10508 	 * an ADISC in the follow-up recovery code.
10509 	 */
10510 	spin_lock_irqsave(&ndlp->lock, flags);
10511 	ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
10512 	ndlp->nlp_flag |= NLP_ISSUE_LOGO;
10513 	spin_unlock_irqrestore(&ndlp->lock, flags);
10514 	lpfc_unreg_rpi(vport, ndlp);
10515 }
10516 
10517 static void lpfc_init_cs_ctl_bitmap(struct lpfc_vport *vport)
10518 {
10519 	bitmap_zero(vport->vmid_priority_range, LPFC_VMID_MAX_PRIORITY_RANGE);
10520 }
10521 
10522 static void
10523 lpfc_vmid_set_cs_ctl_range(struct lpfc_vport *vport, u32 min, u32 max)
10524 {
10525 	u32 i;
10526 
10527 	if ((min > max) || (max > LPFC_VMID_MAX_PRIORITY_RANGE))
10528 		return;
10529 
10530 	for (i = min; i <= max; i++)
10531 		set_bit(i, vport->vmid_priority_range);
10532 }
10533 
10534 static void lpfc_vmid_put_cs_ctl(struct lpfc_vport *vport, u32 ctcl_vmid)
10535 {
10536 	set_bit(ctcl_vmid, vport->vmid_priority_range);
10537 }
10538 
10539 u32 lpfc_vmid_get_cs_ctl(struct lpfc_vport *vport)
10540 {
10541 	u32 i;
10542 
10543 	i = find_first_bit(vport->vmid_priority_range,
10544 			   LPFC_VMID_MAX_PRIORITY_RANGE);
10545 
10546 	if (i == LPFC_VMID_MAX_PRIORITY_RANGE)
10547 		return 0;
10548 
10549 	clear_bit(i, vport->vmid_priority_range);
10550 	return i;
10551 }
10552 
10553 #define MAX_PRIORITY_DESC	255
10554 
10555 static void
10556 lpfc_cmpl_els_qfpa(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
10557 		   struct lpfc_iocbq *rspiocb)
10558 {
10559 	struct lpfc_vport *vport = cmdiocb->vport;
10560 	struct priority_range_desc *desc;
10561 	struct lpfc_dmabuf *prsp = NULL;
10562 	struct lpfc_vmid_priority_range *vmid_range = NULL;
10563 	u32 *data;
10564 	struct lpfc_dmabuf *dmabuf = cmdiocb->context2;
10565 	IOCB_t *irsp = &rspiocb->iocb;
10566 	u8 *pcmd, max_desc;
10567 	u32 len, i;
10568 	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)cmdiocb->context1;
10569 
10570 	prsp = list_get_first(&dmabuf->list, struct lpfc_dmabuf, list);
10571 	if (!prsp)
10572 		goto out;
10573 
10574 	pcmd = prsp->virt;
10575 	data = (u32 *)pcmd;
10576 	if (data[0] == ELS_CMD_LS_RJT) {
10577 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI,
10578 				 "3277 QFPA LS_RJT x%x  x%x\n",
10579 				 data[0], data[1]);
10580 		goto out;
10581 	}
10582 	if (irsp->ulpStatus) {
10583 		lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
10584 				 "6529 QFPA failed with status x%x  x%x\n",
10585 				 irsp->ulpStatus, irsp->un.ulpWord[4]);
10586 		goto out;
10587 	}
10588 
10589 	if (!vport->qfpa_res) {
10590 		max_desc = FCELSSIZE / sizeof(*vport->qfpa_res);
10591 		vport->qfpa_res = kcalloc(max_desc, sizeof(*vport->qfpa_res),
10592 					  GFP_KERNEL);
10593 		if (!vport->qfpa_res)
10594 			goto out;
10595 	}
10596 
10597 	len = *((u32 *)(pcmd + 4));
10598 	len = be32_to_cpu(len);
10599 	memcpy(vport->qfpa_res, pcmd, len + 8);
10600 	len = len / LPFC_PRIORITY_RANGE_DESC_SIZE;
10601 
10602 	desc = (struct priority_range_desc *)(pcmd + 8);
10603 	vmid_range = vport->vmid_priority.vmid_range;
10604 	if (!vmid_range) {
10605 		vmid_range = kcalloc(MAX_PRIORITY_DESC, sizeof(*vmid_range),
10606 				     GFP_KERNEL);
10607 		if (!vmid_range) {
10608 			kfree(vport->qfpa_res);
10609 			goto out;
10610 		}
10611 		vport->vmid_priority.vmid_range = vmid_range;
10612 	}
10613 	vport->vmid_priority.num_descriptors = len;
10614 
10615 	for (i = 0; i < len; i++, vmid_range++, desc++) {
10616 		lpfc_printf_vlog(vport, KERN_DEBUG, LOG_ELS,
10617 				 "6539 vmid values low=%d, high=%d, qos=%d, "
10618 				 "local ve id=%d\n", desc->lo_range,
10619 				 desc->hi_range, desc->qos_priority,
10620 				 desc->local_ve_id);
10621 
10622 		vmid_range->low = desc->lo_range << 1;
10623 		if (desc->local_ve_id == QFPA_ODD_ONLY)
10624 			vmid_range->low++;
10625 		if (desc->qos_priority)
10626 			vport->vmid_flag |= LPFC_VMID_QOS_ENABLED;
10627 		vmid_range->qos = desc->qos_priority;
10628 
10629 		vmid_range->high = desc->hi_range << 1;
10630 		if ((desc->local_ve_id == QFPA_ODD_ONLY) ||
10631 		    (desc->local_ve_id == QFPA_EVEN_ODD))
10632 			vmid_range->high++;
10633 	}
10634 	lpfc_init_cs_ctl_bitmap(vport);
10635 	for (i = 0; i < vport->vmid_priority.num_descriptors; i++) {
10636 		lpfc_vmid_set_cs_ctl_range(vport,
10637 				vport->vmid_priority.vmid_range[i].low,
10638 				vport->vmid_priority.vmid_range[i].high);
10639 	}
10640 
10641 	vport->vmid_flag |= LPFC_VMID_QFPA_CMPL;
10642  out:
10643 	lpfc_els_free_iocb(phba, cmdiocb);
10644 	lpfc_nlp_put(ndlp);
10645 }
10646 
10647 int lpfc_issue_els_qfpa(struct lpfc_vport *vport)
10648 {
10649 	struct lpfc_hba *phba = vport->phba;
10650 	struct lpfc_nodelist *ndlp;
10651 	struct lpfc_iocbq *elsiocb;
10652 	u8 *pcmd;
10653 	int ret;
10654 
10655 	ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
10656 	if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)
10657 		return -ENXIO;
10658 
10659 	elsiocb = lpfc_prep_els_iocb(vport, 1, LPFC_QFPA_SIZE, 2, ndlp,
10660 				     ndlp->nlp_DID, ELS_CMD_QFPA);
10661 	if (!elsiocb)
10662 		return -ENOMEM;
10663 
10664 	pcmd = (u8 *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt);
10665 
10666 	*((u32 *)(pcmd)) = ELS_CMD_QFPA;
10667 	pcmd += 4;
10668 
10669 	elsiocb->iocb_cmpl = lpfc_cmpl_els_qfpa;
10670 
10671 	elsiocb->context1 = lpfc_nlp_get(ndlp);
10672 	if (!elsiocb->context1) {
10673 		lpfc_els_free_iocb(vport->phba, elsiocb);
10674 		return -ENXIO;
10675 	}
10676 
10677 	ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 2);
10678 	if (ret != IOCB_SUCCESS) {
10679 		lpfc_els_free_iocb(phba, elsiocb);
10680 		lpfc_nlp_put(ndlp);
10681 		return -EIO;
10682 	}
10683 	vport->vmid_flag &= ~LPFC_VMID_QOS_ENABLED;
10684 	return 0;
10685 }
10686 
10687 int
10688 lpfc_vmid_uvem(struct lpfc_vport *vport,
10689 	       struct lpfc_vmid *vmid, bool instantiated)
10690 {
10691 	struct lpfc_vem_id_desc *vem_id_desc;
10692 	struct lpfc_nodelist *ndlp;
10693 	struct lpfc_iocbq *elsiocb;
10694 	struct instantiated_ve_desc *inst_desc;
10695 	struct lpfc_vmid_context *vmid_context;
10696 	u8 *pcmd;
10697 	u32 *len;
10698 	int ret = 0;
10699 
10700 	ndlp = lpfc_findnode_did(vport, Fabric_DID);
10701 	if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)
10702 		return -ENXIO;
10703 
10704 	vmid_context = kmalloc(sizeof(*vmid_context), GFP_KERNEL);
10705 	if (!vmid_context)
10706 		return -ENOMEM;
10707 	elsiocb = lpfc_prep_els_iocb(vport, 1, LPFC_UVEM_SIZE, 2,
10708 				     ndlp, Fabric_DID, ELS_CMD_UVEM);
10709 	if (!elsiocb)
10710 		goto out;
10711 
10712 	lpfc_printf_vlog(vport, KERN_DEBUG, LOG_ELS,
10713 			 "3427 Host vmid %s %d\n",
10714 			 vmid->host_vmid, instantiated);
10715 	vmid_context->vmp = vmid;
10716 	vmid_context->nlp = ndlp;
10717 	vmid_context->instantiated = instantiated;
10718 	elsiocb->vmid_tag.vmid_context = vmid_context;
10719 	pcmd = (u8 *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt);
10720 
10721 	if (uuid_is_null((uuid_t *)vport->lpfc_vmid_host_uuid))
10722 		memcpy(vport->lpfc_vmid_host_uuid, vmid->host_vmid,
10723 		       LPFC_COMPRESS_VMID_SIZE);
10724 
10725 	*((u32 *)(pcmd)) = ELS_CMD_UVEM;
10726 	len = (u32 *)(pcmd + 4);
10727 	*len = cpu_to_be32(LPFC_UVEM_SIZE - 8);
10728 
10729 	vem_id_desc = (struct lpfc_vem_id_desc *)(pcmd + 8);
10730 	vem_id_desc->tag = be32_to_cpu(VEM_ID_DESC_TAG);
10731 	vem_id_desc->length = be32_to_cpu(LPFC_UVEM_VEM_ID_DESC_SIZE);
10732 	memcpy(vem_id_desc->vem_id, vport->lpfc_vmid_host_uuid,
10733 	       LPFC_COMPRESS_VMID_SIZE);
10734 
10735 	inst_desc = (struct instantiated_ve_desc *)(pcmd + 32);
10736 	inst_desc->tag = be32_to_cpu(INSTANTIATED_VE_DESC_TAG);
10737 	inst_desc->length = be32_to_cpu(LPFC_UVEM_VE_MAP_DESC_SIZE);
10738 	memcpy(inst_desc->global_vem_id, vmid->host_vmid,
10739 	       LPFC_COMPRESS_VMID_SIZE);
10740 
10741 	bf_set(lpfc_instantiated_nport_id, inst_desc, vport->fc_myDID);
10742 	bf_set(lpfc_instantiated_local_id, inst_desc,
10743 	       vmid->un.cs_ctl_vmid);
10744 	if (instantiated) {
10745 		inst_desc->tag = be32_to_cpu(INSTANTIATED_VE_DESC_TAG);
10746 	} else {
10747 		inst_desc->tag = be32_to_cpu(DEINSTANTIATED_VE_DESC_TAG);
10748 		lpfc_vmid_put_cs_ctl(vport, vmid->un.cs_ctl_vmid);
10749 	}
10750 	inst_desc->word6 = cpu_to_be32(inst_desc->word6);
10751 
10752 	elsiocb->iocb_cmpl = lpfc_cmpl_els_uvem;
10753 
10754 	elsiocb->context1 = lpfc_nlp_get(ndlp);
10755 	if (!elsiocb->context1) {
10756 		lpfc_els_free_iocb(vport->phba, elsiocb);
10757 		goto out;
10758 	}
10759 
10760 	ret = lpfc_sli_issue_iocb(vport->phba, LPFC_ELS_RING, elsiocb, 0);
10761 	if (ret != IOCB_SUCCESS) {
10762 		lpfc_els_free_iocb(vport->phba, elsiocb);
10763 		lpfc_nlp_put(ndlp);
10764 		goto out;
10765 	}
10766 
10767 	return 0;
10768  out:
10769 	kfree(vmid_context);
10770 	return -EIO;
10771 }
10772 
10773 static void
10774 lpfc_cmpl_els_uvem(struct lpfc_hba *phba, struct lpfc_iocbq *icmdiocb,
10775 		   struct lpfc_iocbq *rspiocb)
10776 {
10777 	struct lpfc_vport *vport = icmdiocb->vport;
10778 	struct lpfc_dmabuf *prsp = NULL;
10779 	struct lpfc_vmid_context *vmid_context =
10780 	    icmdiocb->vmid_tag.vmid_context;
10781 	struct lpfc_nodelist *ndlp = icmdiocb->context1;
10782 	u8 *pcmd;
10783 	u32 *data;
10784 	IOCB_t *irsp = &rspiocb->iocb;
10785 	struct lpfc_dmabuf *dmabuf = icmdiocb->context2;
10786 	struct lpfc_vmid *vmid;
10787 
10788 	vmid = vmid_context->vmp;
10789 	if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)
10790 		ndlp = NULL;
10791 
10792 	prsp = list_get_first(&dmabuf->list, struct lpfc_dmabuf, list);
10793 	if (!prsp)
10794 		goto out;
10795 	pcmd = prsp->virt;
10796 	data = (u32 *)pcmd;
10797 	if (data[0] == ELS_CMD_LS_RJT) {
10798 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI,
10799 				 "4532 UVEM LS_RJT %x %x\n", data[0], data[1]);
10800 		goto out;
10801 	}
10802 	if (irsp->ulpStatus) {
10803 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI,
10804 				 "4533 UVEM error status %x: %x\n",
10805 				 irsp->ulpStatus, irsp->un.ulpWord[4]);
10806 		goto out;
10807 	}
10808 	spin_lock(&phba->hbalock);
10809 	/* Set IN USE flag */
10810 	vport->vmid_flag |= LPFC_VMID_IN_USE;
10811 	phba->pport->vmid_flag |= LPFC_VMID_IN_USE;
10812 	spin_unlock(&phba->hbalock);
10813 
10814 	if (vmid_context->instantiated) {
10815 		write_lock(&vport->vmid_lock);
10816 		vmid->flag |= LPFC_VMID_REGISTERED;
10817 		vmid->flag &= ~LPFC_VMID_REQ_REGISTER;
10818 		write_unlock(&vport->vmid_lock);
10819 	}
10820 
10821  out:
10822 	kfree(vmid_context);
10823 	lpfc_els_free_iocb(phba, icmdiocb);
10824 	lpfc_nlp_put(ndlp);
10825 }
10826