xref: /openbmc/linux/drivers/scsi/lpfc/lpfc_els.c (revision b8d312aa)
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
5  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
6  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
7  * EMULEX and SLI are trademarks of Emulex.                        *
8  * www.broadcom.com                                                *
9  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
10  *                                                                 *
11  * This program is free software; you can redistribute it and/or   *
12  * modify it under the terms of version 2 of the GNU General       *
13  * Public License as published by the Free Software Foundation.    *
14  * This program is distributed in the hope that it will be useful. *
15  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
16  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
17  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
18  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
20  * more details, a copy of which can be found in the file COPYING  *
21  * included with this package.                                     *
22  *******************************************************************/
23 /* See Fibre Channel protocol T11 FC-LS for details */
24 #include <linux/blkdev.h>
25 #include <linux/pci.h>
26 #include <linux/slab.h>
27 #include <linux/interrupt.h>
28 
29 #include <scsi/scsi.h>
30 #include <scsi/scsi_device.h>
31 #include <scsi/scsi_host.h>
32 #include <scsi/scsi_transport_fc.h>
33 #include <uapi/scsi/fc/fc_fs.h>
34 #include <uapi/scsi/fc/fc_els.h>
35 
36 #include "lpfc_hw4.h"
37 #include "lpfc_hw.h"
38 #include "lpfc_sli.h"
39 #include "lpfc_sli4.h"
40 #include "lpfc_nl.h"
41 #include "lpfc_disc.h"
42 #include "lpfc_scsi.h"
43 #include "lpfc.h"
44 #include "lpfc_logmsg.h"
45 #include "lpfc_crtn.h"
46 #include "lpfc_vport.h"
47 #include "lpfc_debugfs.h"
48 
49 static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *,
50 			  struct lpfc_iocbq *);
51 static void lpfc_cmpl_fabric_iocb(struct lpfc_hba *, struct lpfc_iocbq *,
52 			struct lpfc_iocbq *);
53 static void lpfc_fabric_abort_vport(struct lpfc_vport *vport);
54 static int lpfc_issue_els_fdisc(struct lpfc_vport *vport,
55 				struct lpfc_nodelist *ndlp, uint8_t retry);
56 static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba,
57 				  struct lpfc_iocbq *iocb);
58 
59 static int lpfc_max_els_tries = 3;
60 
61 /**
62  * lpfc_els_chk_latt - Check host link attention event for a vport
63  * @vport: pointer to a host virtual N_Port data structure.
64  *
65  * This routine checks whether there is an outstanding host link
66  * attention event during the discovery process with the @vport. It is done
67  * by reading the HBA's Host Attention (HA) register. If there is any host
68  * link attention events during this @vport's discovery process, the @vport
69  * shall be marked as FC_ABORT_DISCOVERY, a host link attention clear shall
70  * be issued if the link state is not already in host link cleared state,
71  * and a return code shall indicate whether the host link attention event
72  * had happened.
73  *
74  * Note that, if either the host link is in state LPFC_LINK_DOWN or @vport
75  * state in LPFC_VPORT_READY, the request for checking host link attention
76  * event will be ignored and a return code shall indicate no host link
77  * attention event had happened.
78  *
79  * Return codes
80  *   0 - no host link attention event happened
81  *   1 - host link attention event happened
82  **/
83 int
84 lpfc_els_chk_latt(struct lpfc_vport *vport)
85 {
86 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
87 	struct lpfc_hba  *phba = vport->phba;
88 	uint32_t ha_copy;
89 
90 	if (vport->port_state >= LPFC_VPORT_READY ||
91 	    phba->link_state == LPFC_LINK_DOWN ||
92 	    phba->sli_rev > LPFC_SLI_REV3)
93 		return 0;
94 
95 	/* Read the HBA Host Attention Register */
96 	if (lpfc_readl(phba->HAregaddr, &ha_copy))
97 		return 1;
98 
99 	if (!(ha_copy & HA_LATT))
100 		return 0;
101 
102 	/* Pending Link Event during Discovery */
103 	lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
104 			 "0237 Pending Link Event during "
105 			 "Discovery: State x%x\n",
106 			 phba->pport->port_state);
107 
108 	/* CLEAR_LA should re-enable link attention events and
109 	 * we should then immediately take a LATT event. The
110 	 * LATT processing should call lpfc_linkdown() which
111 	 * will cleanup any left over in-progress discovery
112 	 * events.
113 	 */
114 	spin_lock_irq(shost->host_lock);
115 	vport->fc_flag |= FC_ABORT_DISCOVERY;
116 	spin_unlock_irq(shost->host_lock);
117 
118 	if (phba->link_state != LPFC_CLEAR_LA)
119 		lpfc_issue_clear_la(phba, vport);
120 
121 	return 1;
122 }
123 
124 /**
125  * lpfc_prep_els_iocb - Allocate and prepare a lpfc iocb data structure
126  * @vport: pointer to a host virtual N_Port data structure.
127  * @expectRsp: flag indicating whether response is expected.
128  * @cmdSize: size of the ELS command.
129  * @retry: number of retries to the command IOCB when it fails.
130  * @ndlp: pointer to a node-list data structure.
131  * @did: destination identifier.
132  * @elscmd: the ELS command code.
133  *
134  * This routine is used for allocating a lpfc-IOCB data structure from
135  * the driver lpfc-IOCB free-list and prepare the IOCB with the parameters
136  * passed into the routine for discovery state machine to issue an Extended
137  * Link Service (ELS) commands. It is a generic lpfc-IOCB allocation
138  * and preparation routine that is used by all the discovery state machine
139  * routines and the ELS command-specific fields will be later set up by
140  * the individual discovery machine routines after calling this routine
141  * allocating and preparing a generic IOCB data structure. It fills in the
142  * Buffer Descriptor Entries (BDEs), allocates buffers for both command
143  * payload and response payload (if expected). The reference count on the
144  * ndlp is incremented by 1 and the reference to the ndlp is put into
145  * context1 of the IOCB data structure for this IOCB to hold the ndlp
146  * reference for the command's callback function to access later.
147  *
148  * Return code
149  *   Pointer to the newly allocated/prepared els iocb data structure
150  *   NULL - when els iocb data structure allocation/preparation failed
151  **/
152 struct lpfc_iocbq *
153 lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
154 		   uint16_t cmdSize, uint8_t retry,
155 		   struct lpfc_nodelist *ndlp, uint32_t did,
156 		   uint32_t elscmd)
157 {
158 	struct lpfc_hba  *phba = vport->phba;
159 	struct lpfc_iocbq *elsiocb;
160 	struct lpfc_dmabuf *pcmd, *prsp, *pbuflist;
161 	struct ulp_bde64 *bpl;
162 	IOCB_t *icmd;
163 
164 
165 	if (!lpfc_is_link_up(phba))
166 		return NULL;
167 
168 	/* Allocate buffer for  command iocb */
169 	elsiocb = lpfc_sli_get_iocbq(phba);
170 
171 	if (elsiocb == NULL)
172 		return NULL;
173 
174 	/*
175 	 * If this command is for fabric controller and HBA running
176 	 * in FIP mode send FLOGI, FDISC and LOGO as FIP frames.
177 	 */
178 	if ((did == Fabric_DID) &&
179 		(phba->hba_flag & HBA_FIP_SUPPORT) &&
180 		((elscmd == ELS_CMD_FLOGI) ||
181 		 (elscmd == ELS_CMD_FDISC) ||
182 		 (elscmd == ELS_CMD_LOGO)))
183 		switch (elscmd) {
184 		case ELS_CMD_FLOGI:
185 		elsiocb->iocb_flag |=
186 			((LPFC_ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT)
187 					& LPFC_FIP_ELS_ID_MASK);
188 		break;
189 		case ELS_CMD_FDISC:
190 		elsiocb->iocb_flag |=
191 			((LPFC_ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT)
192 					& LPFC_FIP_ELS_ID_MASK);
193 		break;
194 		case ELS_CMD_LOGO:
195 		elsiocb->iocb_flag |=
196 			((LPFC_ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT)
197 					& LPFC_FIP_ELS_ID_MASK);
198 		break;
199 		}
200 	else
201 		elsiocb->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK;
202 
203 	icmd = &elsiocb->iocb;
204 
205 	/* fill in BDEs for command */
206 	/* Allocate buffer for command payload */
207 	pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
208 	if (pcmd)
209 		pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys);
210 	if (!pcmd || !pcmd->virt)
211 		goto els_iocb_free_pcmb_exit;
212 
213 	INIT_LIST_HEAD(&pcmd->list);
214 
215 	/* Allocate buffer for response payload */
216 	if (expectRsp) {
217 		prsp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
218 		if (prsp)
219 			prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
220 						     &prsp->phys);
221 		if (!prsp || !prsp->virt)
222 			goto els_iocb_free_prsp_exit;
223 		INIT_LIST_HEAD(&prsp->list);
224 	} else
225 		prsp = NULL;
226 
227 	/* Allocate buffer for Buffer ptr list */
228 	pbuflist = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
229 	if (pbuflist)
230 		pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
231 						 &pbuflist->phys);
232 	if (!pbuflist || !pbuflist->virt)
233 		goto els_iocb_free_pbuf_exit;
234 
235 	INIT_LIST_HEAD(&pbuflist->list);
236 
237 	if (expectRsp) {
238 		icmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys);
239 		icmd->un.elsreq64.bdl.addrLow = putPaddrLow(pbuflist->phys);
240 		icmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
241 		icmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
242 
243 		icmd->un.elsreq64.remoteID = did;		/* DID */
244 		icmd->ulpCommand = CMD_ELS_REQUEST64_CR;
245 		if (elscmd == ELS_CMD_FLOGI)
246 			icmd->ulpTimeout = FF_DEF_RATOV * 2;
247 		else if (elscmd == ELS_CMD_LOGO)
248 			icmd->ulpTimeout = phba->fc_ratov;
249 		else
250 			icmd->ulpTimeout = phba->fc_ratov * 2;
251 	} else {
252 		icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys);
253 		icmd->un.xseq64.bdl.addrLow = putPaddrLow(pbuflist->phys);
254 		icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
255 		icmd->un.xseq64.bdl.bdeSize = sizeof(struct ulp_bde64);
256 		icmd->un.xseq64.xmit_els_remoteID = did;	/* DID */
257 		icmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX;
258 	}
259 	icmd->ulpBdeCount = 1;
260 	icmd->ulpLe = 1;
261 	icmd->ulpClass = CLASS3;
262 
263 	/*
264 	 * If we have NPIV enabled, we want to send ELS traffic by VPI.
265 	 * For SLI4, since the driver controls VPIs we also want to include
266 	 * all ELS pt2pt protocol traffic as well.
267 	 */
268 	if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) ||
269 		((phba->sli_rev == LPFC_SLI_REV4) &&
270 		    (vport->fc_flag & FC_PT2PT))) {
271 
272 		if (expectRsp) {
273 			icmd->un.elsreq64.myID = vport->fc_myDID;
274 
275 			/* For ELS_REQUEST64_CR, use the VPI by default */
276 			icmd->ulpContext = phba->vpi_ids[vport->vpi];
277 		}
278 
279 		icmd->ulpCt_h = 0;
280 		/* The CT field must be 0=INVALID_RPI for the ECHO cmd */
281 		if (elscmd == ELS_CMD_ECHO)
282 			icmd->ulpCt_l = 0; /* context = invalid RPI */
283 		else
284 			icmd->ulpCt_l = 1; /* context = VPI */
285 	}
286 
287 	bpl = (struct ulp_bde64 *) pbuflist->virt;
288 	bpl->addrLow = le32_to_cpu(putPaddrLow(pcmd->phys));
289 	bpl->addrHigh = le32_to_cpu(putPaddrHigh(pcmd->phys));
290 	bpl->tus.f.bdeSize = cmdSize;
291 	bpl->tus.f.bdeFlags = 0;
292 	bpl->tus.w = le32_to_cpu(bpl->tus.w);
293 
294 	if (expectRsp) {
295 		bpl++;
296 		bpl->addrLow = le32_to_cpu(putPaddrLow(prsp->phys));
297 		bpl->addrHigh = le32_to_cpu(putPaddrHigh(prsp->phys));
298 		bpl->tus.f.bdeSize = FCELSSIZE;
299 		bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
300 		bpl->tus.w = le32_to_cpu(bpl->tus.w);
301 	}
302 
303 	/* prevent preparing iocb with NULL ndlp reference */
304 	elsiocb->context1 = lpfc_nlp_get(ndlp);
305 	if (!elsiocb->context1)
306 		goto els_iocb_free_pbuf_exit;
307 	elsiocb->context2 = pcmd;
308 	elsiocb->context3 = pbuflist;
309 	elsiocb->retry = retry;
310 	elsiocb->vport = vport;
311 	elsiocb->drvrTimeout = (phba->fc_ratov << 1) + LPFC_DRVR_TIMEOUT;
312 
313 	if (prsp) {
314 		list_add(&prsp->list, &pcmd->list);
315 	}
316 	if (expectRsp) {
317 		/* Xmit ELS command <elsCmd> to remote NPORT <did> */
318 		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
319 				 "0116 Xmit ELS command x%x to remote "
320 				 "NPORT x%x I/O tag: x%x, port state:x%x "
321 				 "rpi x%x fc_flag:x%x\n",
322 				 elscmd, did, elsiocb->iotag,
323 				 vport->port_state, ndlp->nlp_rpi,
324 				 vport->fc_flag);
325 	} else {
326 		/* Xmit ELS response <elsCmd> to remote NPORT <did> */
327 		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
328 				 "0117 Xmit ELS response x%x to remote "
329 				 "NPORT x%x I/O tag: x%x, size: x%x "
330 				 "port_state x%x  rpi x%x fc_flag x%x\n",
331 				 elscmd, ndlp->nlp_DID, elsiocb->iotag,
332 				 cmdSize, vport->port_state,
333 				 ndlp->nlp_rpi, vport->fc_flag);
334 	}
335 	return elsiocb;
336 
337 els_iocb_free_pbuf_exit:
338 	if (expectRsp)
339 		lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
340 	kfree(pbuflist);
341 
342 els_iocb_free_prsp_exit:
343 	lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
344 	kfree(prsp);
345 
346 els_iocb_free_pcmb_exit:
347 	kfree(pcmd);
348 	lpfc_sli_release_iocbq(phba, elsiocb);
349 	return NULL;
350 }
351 
352 /**
353  * lpfc_issue_fabric_reglogin - Issue fabric registration login for a vport
354  * @vport: pointer to a host virtual N_Port data structure.
355  *
356  * This routine issues a fabric registration login for a @vport. An
357  * active ndlp node with Fabric_DID must already exist for this @vport.
358  * The routine invokes two mailbox commands to carry out fabric registration
359  * login through the HBA firmware: the first mailbox command requests the
360  * HBA to perform link configuration for the @vport; and the second mailbox
361  * command requests the HBA to perform the actual fabric registration login
362  * with the @vport.
363  *
364  * Return code
365  *   0 - successfully issued fabric registration login for @vport
366  *   -ENXIO -- failed to issue fabric registration login for @vport
367  **/
368 int
369 lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
370 {
371 	struct lpfc_hba  *phba = vport->phba;
372 	LPFC_MBOXQ_t *mbox;
373 	struct lpfc_dmabuf *mp;
374 	struct lpfc_nodelist *ndlp;
375 	struct serv_parm *sp;
376 	int rc;
377 	int err = 0;
378 
379 	sp = &phba->fc_fabparam;
380 	ndlp = lpfc_findnode_did(vport, Fabric_DID);
381 	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
382 		err = 1;
383 		goto fail;
384 	}
385 
386 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
387 	if (!mbox) {
388 		err = 2;
389 		goto fail;
390 	}
391 
392 	vport->port_state = LPFC_FABRIC_CFG_LINK;
393 	lpfc_config_link(phba, mbox);
394 	mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
395 	mbox->vport = vport;
396 
397 	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
398 	if (rc == MBX_NOT_FINISHED) {
399 		err = 3;
400 		goto fail_free_mbox;
401 	}
402 
403 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
404 	if (!mbox) {
405 		err = 4;
406 		goto fail;
407 	}
408 	rc = lpfc_reg_rpi(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox,
409 			  ndlp->nlp_rpi);
410 	if (rc) {
411 		err = 5;
412 		goto fail_free_mbox;
413 	}
414 
415 	mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login;
416 	mbox->vport = vport;
417 	/* increment the reference count on ndlp to hold reference
418 	 * for the callback routine.
419 	 */
420 	mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
421 
422 	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
423 	if (rc == MBX_NOT_FINISHED) {
424 		err = 6;
425 		goto fail_issue_reg_login;
426 	}
427 
428 	return 0;
429 
430 fail_issue_reg_login:
431 	/* decrement the reference count on ndlp just incremented
432 	 * for the failed mbox command.
433 	 */
434 	lpfc_nlp_put(ndlp);
435 	mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
436 	lpfc_mbuf_free(phba, mp->virt, mp->phys);
437 	kfree(mp);
438 fail_free_mbox:
439 	mempool_free(mbox, phba->mbox_mem_pool);
440 
441 fail:
442 	lpfc_vport_set_state(vport, FC_VPORT_FAILED);
443 	lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
444 		"0249 Cannot issue Register Fabric login: Err %d\n", err);
445 	return -ENXIO;
446 }
447 
448 /**
449  * lpfc_issue_reg_vfi - Register VFI for this vport's fabric login
450  * @vport: pointer to a host virtual N_Port data structure.
451  *
452  * This routine issues a REG_VFI mailbox for the vfi, vpi, fcfi triplet for
453  * the @vport. This mailbox command is necessary for SLI4 port only.
454  *
455  * Return code
456  *   0 - successfully issued REG_VFI for @vport
457  *   A failure code otherwise.
458  **/
459 int
460 lpfc_issue_reg_vfi(struct lpfc_vport *vport)
461 {
462 	struct lpfc_hba  *phba = vport->phba;
463 	LPFC_MBOXQ_t *mboxq = NULL;
464 	struct lpfc_nodelist *ndlp;
465 	struct lpfc_dmabuf *dmabuf = NULL;
466 	int rc = 0;
467 
468 	/* move forward in case of SLI4 FC port loopback test and pt2pt mode */
469 	if ((phba->sli_rev == LPFC_SLI_REV4) &&
470 	    !(phba->link_flag & LS_LOOPBACK_MODE) &&
471 	    !(vport->fc_flag & FC_PT2PT)) {
472 		ndlp = lpfc_findnode_did(vport, Fabric_DID);
473 		if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
474 			rc = -ENODEV;
475 			goto fail;
476 		}
477 	}
478 
479 	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
480 	if (!mboxq) {
481 		rc = -ENOMEM;
482 		goto fail;
483 	}
484 
485 	/* Supply CSP's only if we are fabric connect or pt-to-pt connect */
486 	if ((vport->fc_flag & FC_FABRIC) || (vport->fc_flag & FC_PT2PT)) {
487 		dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
488 		if (!dmabuf) {
489 			rc = -ENOMEM;
490 			goto fail;
491 		}
492 		dmabuf->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &dmabuf->phys);
493 		if (!dmabuf->virt) {
494 			rc = -ENOMEM;
495 			goto fail;
496 		}
497 		memcpy(dmabuf->virt, &phba->fc_fabparam,
498 		       sizeof(struct serv_parm));
499 	}
500 
501 	vport->port_state = LPFC_FABRIC_CFG_LINK;
502 	if (dmabuf)
503 		lpfc_reg_vfi(mboxq, vport, dmabuf->phys);
504 	else
505 		lpfc_reg_vfi(mboxq, vport, 0);
506 
507 	mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi;
508 	mboxq->vport = vport;
509 	mboxq->ctx_buf = dmabuf;
510 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
511 	if (rc == MBX_NOT_FINISHED) {
512 		rc = -ENXIO;
513 		goto fail;
514 	}
515 	return 0;
516 
517 fail:
518 	if (mboxq)
519 		mempool_free(mboxq, phba->mbox_mem_pool);
520 	if (dmabuf) {
521 		if (dmabuf->virt)
522 			lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
523 		kfree(dmabuf);
524 	}
525 
526 	lpfc_vport_set_state(vport, FC_VPORT_FAILED);
527 	lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
528 		"0289 Issue Register VFI failed: Err %d\n", rc);
529 	return rc;
530 }
531 
532 /**
533  * lpfc_issue_unreg_vfi - Unregister VFI for this vport's fabric login
534  * @vport: pointer to a host virtual N_Port data structure.
535  *
536  * This routine issues a UNREG_VFI mailbox with the vfi, vpi, fcfi triplet for
537  * the @vport. This mailbox command is necessary for SLI4 port only.
538  *
539  * Return code
540  *   0 - successfully issued REG_VFI for @vport
541  *   A failure code otherwise.
542  **/
543 int
544 lpfc_issue_unreg_vfi(struct lpfc_vport *vport)
545 {
546 	struct lpfc_hba *phba = vport->phba;
547 	struct Scsi_Host *shost;
548 	LPFC_MBOXQ_t *mboxq;
549 	int rc;
550 
551 	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
552 	if (!mboxq) {
553 		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
554 				"2556 UNREG_VFI mbox allocation failed"
555 				"HBA state x%x\n", phba->pport->port_state);
556 		return -ENOMEM;
557 	}
558 
559 	lpfc_unreg_vfi(mboxq, vport);
560 	mboxq->vport = vport;
561 	mboxq->mbox_cmpl = lpfc_unregister_vfi_cmpl;
562 
563 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
564 	if (rc == MBX_NOT_FINISHED) {
565 		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
566 				"2557 UNREG_VFI issue mbox failed rc x%x "
567 				"HBA state x%x\n",
568 				rc, phba->pport->port_state);
569 		mempool_free(mboxq, phba->mbox_mem_pool);
570 		return -EIO;
571 	}
572 
573 	shost = lpfc_shost_from_vport(vport);
574 	spin_lock_irq(shost->host_lock);
575 	vport->fc_flag &= ~FC_VFI_REGISTERED;
576 	spin_unlock_irq(shost->host_lock);
577 	return 0;
578 }
579 
580 /**
581  * lpfc_check_clean_addr_bit - Check whether assigned FCID is clean.
582  * @vport: pointer to a host virtual N_Port data structure.
583  * @sp: pointer to service parameter data structure.
584  *
585  * This routine is called from FLOGI/FDISC completion handler functions.
586  * lpfc_check_clean_addr_bit return 1 when FCID/Fabric portname/ Fabric
587  * node nodename is changed in the completion service parameter else return
588  * 0. This function also set flag in the vport data structure to delay
589  * NP_Port discovery after the FLOGI/FDISC completion if Clean address bit
590  * in FLOGI/FDISC response is cleared and FCID/Fabric portname/ Fabric
591  * node nodename is changed in the completion service parameter.
592  *
593  * Return code
594  *   0 - FCID and Fabric Nodename and Fabric portname is not changed.
595  *   1 - FCID or Fabric Nodename or Fabric portname is changed.
596  *
597  **/
598 static uint8_t
599 lpfc_check_clean_addr_bit(struct lpfc_vport *vport,
600 		struct serv_parm *sp)
601 {
602 	struct lpfc_hba *phba = vport->phba;
603 	uint8_t fabric_param_changed = 0;
604 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
605 
606 	if ((vport->fc_prevDID != vport->fc_myDID) ||
607 		memcmp(&vport->fabric_portname, &sp->portName,
608 			sizeof(struct lpfc_name)) ||
609 		memcmp(&vport->fabric_nodename, &sp->nodeName,
610 			sizeof(struct lpfc_name)) ||
611 		(vport->vport_flag & FAWWPN_PARAM_CHG)) {
612 		fabric_param_changed = 1;
613 		vport->vport_flag &= ~FAWWPN_PARAM_CHG;
614 	}
615 	/*
616 	 * Word 1 Bit 31 in common service parameter is overloaded.
617 	 * Word 1 Bit 31 in FLOGI request is multiple NPort request
618 	 * Word 1 Bit 31 in FLOGI response is clean address bit
619 	 *
620 	 * If fabric parameter is changed and clean address bit is
621 	 * cleared delay nport discovery if
622 	 * - vport->fc_prevDID != 0 (not initial discovery) OR
623 	 * - lpfc_delay_discovery module parameter is set.
624 	 */
625 	if (fabric_param_changed && !sp->cmn.clean_address_bit &&
626 	    (vport->fc_prevDID || phba->cfg_delay_discovery)) {
627 		spin_lock_irq(shost->host_lock);
628 		vport->fc_flag |= FC_DISC_DELAYED;
629 		spin_unlock_irq(shost->host_lock);
630 	}
631 
632 	return fabric_param_changed;
633 }
634 
635 
636 /**
637  * lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port
638  * @vport: pointer to a host virtual N_Port data structure.
639  * @ndlp: pointer to a node-list data structure.
640  * @sp: pointer to service parameter data structure.
641  * @irsp: pointer to the IOCB within the lpfc response IOCB.
642  *
643  * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback
644  * function to handle the completion of a Fabric Login (FLOGI) into a fabric
645  * port in a fabric topology. It properly sets up the parameters to the @ndlp
646  * from the IOCB response. It also check the newly assigned N_Port ID to the
647  * @vport against the previously assigned N_Port ID. If it is different from
648  * the previously assigned Destination ID (DID), the lpfc_unreg_rpi() routine
649  * is invoked on all the remaining nodes with the @vport to unregister the
650  * Remote Port Indicators (RPIs). Finally, the lpfc_issue_fabric_reglogin()
651  * is invoked to register login to the fabric.
652  *
653  * Return code
654  *   0 - Success (currently, always return 0)
655  **/
656 static int
657 lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
658 			   struct serv_parm *sp, IOCB_t *irsp)
659 {
660 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
661 	struct lpfc_hba  *phba = vport->phba;
662 	struct lpfc_nodelist *np;
663 	struct lpfc_nodelist *next_np;
664 	uint8_t fabric_param_changed;
665 
666 	spin_lock_irq(shost->host_lock);
667 	vport->fc_flag |= FC_FABRIC;
668 	spin_unlock_irq(shost->host_lock);
669 
670 	phba->fc_edtov = be32_to_cpu(sp->cmn.e_d_tov);
671 	if (sp->cmn.edtovResolution)	/* E_D_TOV ticks are in nanoseconds */
672 		phba->fc_edtov = (phba->fc_edtov + 999999) / 1000000;
673 
674 	phba->fc_edtovResol = sp->cmn.edtovResolution;
675 	phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000;
676 
677 	if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
678 		spin_lock_irq(shost->host_lock);
679 		vport->fc_flag |= FC_PUBLIC_LOOP;
680 		spin_unlock_irq(shost->host_lock);
681 	}
682 
683 	vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
684 	memcpy(&ndlp->nlp_portname, &sp->portName, sizeof(struct lpfc_name));
685 	memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof(struct lpfc_name));
686 	ndlp->nlp_class_sup = 0;
687 	if (sp->cls1.classValid)
688 		ndlp->nlp_class_sup |= FC_COS_CLASS1;
689 	if (sp->cls2.classValid)
690 		ndlp->nlp_class_sup |= FC_COS_CLASS2;
691 	if (sp->cls3.classValid)
692 		ndlp->nlp_class_sup |= FC_COS_CLASS3;
693 	if (sp->cls4.classValid)
694 		ndlp->nlp_class_sup |= FC_COS_CLASS4;
695 	ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) |
696 				sp->cmn.bbRcvSizeLsb;
697 
698 	fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp);
699 	if (fabric_param_changed) {
700 		/* Reset FDMI attribute masks based on config parameter */
701 		if (phba->cfg_enable_SmartSAN ||
702 		    (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) {
703 			/* Setup appropriate attribute masks */
704 			vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR;
705 			if (phba->cfg_enable_SmartSAN)
706 				vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR;
707 			else
708 				vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
709 		} else {
710 			vport->fdmi_hba_mask = 0;
711 			vport->fdmi_port_mask = 0;
712 		}
713 
714 	}
715 	memcpy(&vport->fabric_portname, &sp->portName,
716 			sizeof(struct lpfc_name));
717 	memcpy(&vport->fabric_nodename, &sp->nodeName,
718 			sizeof(struct lpfc_name));
719 	memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
720 
721 	if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
722 		if (sp->cmn.response_multiple_NPort) {
723 			lpfc_printf_vlog(vport, KERN_WARNING,
724 					 LOG_ELS | LOG_VPORT,
725 					 "1816 FLOGI NPIV supported, "
726 					 "response data 0x%x\n",
727 					 sp->cmn.response_multiple_NPort);
728 			spin_lock_irq(&phba->hbalock);
729 			phba->link_flag |= LS_NPIV_FAB_SUPPORTED;
730 			spin_unlock_irq(&phba->hbalock);
731 		} else {
732 			/* Because we asked f/w for NPIV it still expects us
733 			to call reg_vnpid atleast for the physcial host */
734 			lpfc_printf_vlog(vport, KERN_WARNING,
735 					 LOG_ELS | LOG_VPORT,
736 					 "1817 Fabric does not support NPIV "
737 					 "- configuring single port mode.\n");
738 			spin_lock_irq(&phba->hbalock);
739 			phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
740 			spin_unlock_irq(&phba->hbalock);
741 		}
742 	}
743 
744 	/*
745 	 * For FC we need to do some special processing because of the SLI
746 	 * Port's default settings of the Common Service Parameters.
747 	 */
748 	if ((phba->sli_rev == LPFC_SLI_REV4) &&
749 	    (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC)) {
750 		/* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */
751 		if (fabric_param_changed)
752 			lpfc_unregister_fcf_prep(phba);
753 
754 		/* This should just update the VFI CSPs*/
755 		if (vport->fc_flag & FC_VFI_REGISTERED)
756 			lpfc_issue_reg_vfi(vport);
757 	}
758 
759 	if (fabric_param_changed &&
760 		!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
761 
762 		/* If our NportID changed, we need to ensure all
763 		 * remaining NPORTs get unreg_login'ed.
764 		 */
765 		list_for_each_entry_safe(np, next_np,
766 					&vport->fc_nodes, nlp_listp) {
767 			if (!NLP_CHK_NODE_ACT(np))
768 				continue;
769 			if ((np->nlp_state != NLP_STE_NPR_NODE) ||
770 				   !(np->nlp_flag & NLP_NPR_ADISC))
771 				continue;
772 			spin_lock_irq(shost->host_lock);
773 			np->nlp_flag &= ~NLP_NPR_ADISC;
774 			spin_unlock_irq(shost->host_lock);
775 			lpfc_unreg_rpi(vport, np);
776 		}
777 		lpfc_cleanup_pending_mbox(vport);
778 
779 		if (phba->sli_rev == LPFC_SLI_REV4) {
780 			lpfc_sli4_unreg_all_rpis(vport);
781 			lpfc_mbx_unreg_vpi(vport);
782 			spin_lock_irq(shost->host_lock);
783 			vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
784 			spin_unlock_irq(shost->host_lock);
785 		}
786 
787 		/*
788 		 * For SLI3 and SLI4, the VPI needs to be reregistered in
789 		 * response to this fabric parameter change event.
790 		 */
791 		spin_lock_irq(shost->host_lock);
792 		vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
793 		spin_unlock_irq(shost->host_lock);
794 	} else if ((phba->sli_rev == LPFC_SLI_REV4) &&
795 		!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
796 			/*
797 			 * Driver needs to re-reg VPI in order for f/w
798 			 * to update the MAC address.
799 			 */
800 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
801 			lpfc_register_new_vport(phba, vport, ndlp);
802 			return 0;
803 	}
804 
805 	if (phba->sli_rev < LPFC_SLI_REV4) {
806 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE);
807 		if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED &&
808 		    vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
809 			lpfc_register_new_vport(phba, vport, ndlp);
810 		else
811 			lpfc_issue_fabric_reglogin(vport);
812 	} else {
813 		ndlp->nlp_type |= NLP_FABRIC;
814 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
815 		if ((!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) &&
816 			(vport->vpi_state & LPFC_VPI_REGISTERED)) {
817 			lpfc_start_fdiscs(phba);
818 			lpfc_do_scr_ns_plogi(phba, vport);
819 		} else if (vport->fc_flag & FC_VFI_REGISTERED)
820 			lpfc_issue_init_vpi(vport);
821 		else {
822 			lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
823 					"3135 Need register VFI: (x%x/%x)\n",
824 					vport->fc_prevDID, vport->fc_myDID);
825 			lpfc_issue_reg_vfi(vport);
826 		}
827 	}
828 	return 0;
829 }
830 
831 /**
832  * lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port
833  * @vport: pointer to a host virtual N_Port data structure.
834  * @ndlp: pointer to a node-list data structure.
835  * @sp: pointer to service parameter data structure.
836  *
837  * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback
838  * function to handle the completion of a Fabric Login (FLOGI) into an N_Port
839  * in a point-to-point topology. First, the @vport's N_Port Name is compared
840  * with the received N_Port Name: if the @vport's N_Port Name is greater than
841  * the received N_Port Name lexicographically, this node shall assign local
842  * N_Port ID (PT2PT_LocalID: 1) and remote N_Port ID (PT2PT_RemoteID: 2) and
843  * will send out Port Login (PLOGI) with the N_Port IDs assigned. Otherwise,
844  * this node shall just wait for the remote node to issue PLOGI and assign
845  * N_Port IDs.
846  *
847  * Return code
848  *   0 - Success
849  *   -ENXIO - Fail
850  **/
851 static int
852 lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
853 			  struct serv_parm *sp)
854 {
855 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
856 	struct lpfc_hba  *phba = vport->phba;
857 	LPFC_MBOXQ_t *mbox;
858 	int rc;
859 
860 	spin_lock_irq(shost->host_lock);
861 	vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
862 	vport->fc_flag |= FC_PT2PT;
863 	spin_unlock_irq(shost->host_lock);
864 
865 	/* If we are pt2pt with another NPort, force NPIV off! */
866 	phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
867 
868 	/* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */
869 	if ((phba->sli_rev == LPFC_SLI_REV4) && phba->fc_topology_changed) {
870 		lpfc_unregister_fcf_prep(phba);
871 
872 		spin_lock_irq(shost->host_lock);
873 		vport->fc_flag &= ~FC_VFI_REGISTERED;
874 		spin_unlock_irq(shost->host_lock);
875 		phba->fc_topology_changed = 0;
876 	}
877 
878 	rc = memcmp(&vport->fc_portname, &sp->portName,
879 		    sizeof(vport->fc_portname));
880 
881 	if (rc >= 0) {
882 		/* This side will initiate the PLOGI */
883 		spin_lock_irq(shost->host_lock);
884 		vport->fc_flag |= FC_PT2PT_PLOGI;
885 		spin_unlock_irq(shost->host_lock);
886 
887 		/*
888 		 * N_Port ID cannot be 0, set our Id to LocalID
889 		 * the other side will be RemoteID.
890 		 */
891 
892 		/* not equal */
893 		if (rc)
894 			vport->fc_myDID = PT2PT_LocalID;
895 
896 		/* Decrement ndlp reference count indicating that ndlp can be
897 		 * safely released when other references to it are done.
898 		 */
899 		lpfc_nlp_put(ndlp);
900 
901 		ndlp = lpfc_findnode_did(vport, PT2PT_RemoteID);
902 		if (!ndlp) {
903 			/*
904 			 * Cannot find existing Fabric ndlp, so allocate a
905 			 * new one
906 			 */
907 			ndlp = lpfc_nlp_init(vport, PT2PT_RemoteID);
908 			if (!ndlp)
909 				goto fail;
910 		} else if (!NLP_CHK_NODE_ACT(ndlp)) {
911 			ndlp = lpfc_enable_node(vport, ndlp,
912 						NLP_STE_UNUSED_NODE);
913 			if(!ndlp)
914 				goto fail;
915 		}
916 
917 		memcpy(&ndlp->nlp_portname, &sp->portName,
918 		       sizeof(struct lpfc_name));
919 		memcpy(&ndlp->nlp_nodename, &sp->nodeName,
920 		       sizeof(struct lpfc_name));
921 		/* Set state will put ndlp onto node list if not already done */
922 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
923 		spin_lock_irq(shost->host_lock);
924 		ndlp->nlp_flag |= NLP_NPR_2B_DISC;
925 		spin_unlock_irq(shost->host_lock);
926 
927 		mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
928 		if (!mbox)
929 			goto fail;
930 
931 		lpfc_config_link(phba, mbox);
932 
933 		mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
934 		mbox->vport = vport;
935 		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
936 		if (rc == MBX_NOT_FINISHED) {
937 			mempool_free(mbox, phba->mbox_mem_pool);
938 			goto fail;
939 		}
940 	} else {
941 		/* This side will wait for the PLOGI, decrement ndlp reference
942 		 * count indicating that ndlp can be released when other
943 		 * references to it are done.
944 		 */
945 		lpfc_nlp_put(ndlp);
946 
947 		/* Start discovery - this should just do CLEAR_LA */
948 		lpfc_disc_start(vport);
949 	}
950 
951 	return 0;
952 fail:
953 	return -ENXIO;
954 }
955 
956 /**
957  * lpfc_cmpl_els_flogi - Completion callback function for flogi
958  * @phba: pointer to lpfc hba data structure.
959  * @cmdiocb: pointer to lpfc command iocb data structure.
960  * @rspiocb: pointer to lpfc response iocb data structure.
961  *
962  * This routine is the top-level completion callback function for issuing
963  * a Fabric Login (FLOGI) command. If the response IOCB reported error,
964  * the lpfc_els_retry() routine shall be invoked to retry the FLOGI. If
965  * retry has been made (either immediately or delayed with lpfc_els_retry()
966  * returning 1), the command IOCB will be released and function returned.
967  * If the retry attempt has been given up (possibly reach the maximum
968  * number of retries), one additional decrement of ndlp reference shall be
969  * invoked before going out after releasing the command IOCB. This will
970  * actually release the remote node (Note, lpfc_els_free_iocb() will also
971  * invoke one decrement of ndlp reference count). If no error reported in
972  * the IOCB status, the command Port ID field is used to determine whether
973  * this is a point-to-point topology or a fabric topology: if the Port ID
974  * field is assigned, it is a fabric topology; otherwise, it is a
975  * point-to-point topology. The routine lpfc_cmpl_els_flogi_fabric() or
976  * lpfc_cmpl_els_flogi_nport() shall be invoked accordingly to handle the
977  * specific topology completion conditions.
978  **/
979 static void
980 lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
981 		    struct lpfc_iocbq *rspiocb)
982 {
983 	struct lpfc_vport *vport = cmdiocb->vport;
984 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
985 	IOCB_t *irsp = &rspiocb->iocb;
986 	struct lpfc_nodelist *ndlp = cmdiocb->context1;
987 	struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp;
988 	struct serv_parm *sp;
989 	uint16_t fcf_index;
990 	int rc;
991 
992 	/* Check to see if link went down during discovery */
993 	if (lpfc_els_chk_latt(vport)) {
994 		/* One additional decrement on node reference count to
995 		 * trigger the release of the node
996 		 */
997 		lpfc_nlp_put(ndlp);
998 		goto out;
999 	}
1000 
1001 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1002 		"FLOGI cmpl:      status:x%x/x%x state:x%x",
1003 		irsp->ulpStatus, irsp->un.ulpWord[4],
1004 		vport->port_state);
1005 
1006 	if (irsp->ulpStatus) {
1007 		/*
1008 		 * In case of FIP mode, perform roundrobin FCF failover
1009 		 * due to new FCF discovery
1010 		 */
1011 		if ((phba->hba_flag & HBA_FIP_SUPPORT) &&
1012 		    (phba->fcf.fcf_flag & FCF_DISCOVERY)) {
1013 			if (phba->link_state < LPFC_LINK_UP)
1014 				goto stop_rr_fcf_flogi;
1015 			if ((phba->fcoe_cvl_eventtag_attn ==
1016 			     phba->fcoe_cvl_eventtag) &&
1017 			    (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
1018 			    ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
1019 			    IOERR_SLI_ABORTED))
1020 				goto stop_rr_fcf_flogi;
1021 			else
1022 				phba->fcoe_cvl_eventtag_attn =
1023 					phba->fcoe_cvl_eventtag;
1024 			lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS,
1025 					"2611 FLOGI failed on FCF (x%x), "
1026 					"status:x%x/x%x, tmo:x%x, perform "
1027 					"roundrobin FCF failover\n",
1028 					phba->fcf.current_rec.fcf_indx,
1029 					irsp->ulpStatus, irsp->un.ulpWord[4],
1030 					irsp->ulpTimeout);
1031 			lpfc_sli4_set_fcf_flogi_fail(phba,
1032 					phba->fcf.current_rec.fcf_indx);
1033 			fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba);
1034 			rc = lpfc_sli4_fcf_rr_next_proc(vport, fcf_index);
1035 			if (rc)
1036 				goto out;
1037 		}
1038 
1039 stop_rr_fcf_flogi:
1040 		/* FLOGI failure */
1041 		if (!(irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
1042 		      ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
1043 					IOERR_LOOP_OPEN_FAILURE)))
1044 			lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1045 					"2858 FLOGI failure Status:x%x/x%x "
1046 					"TMO:x%x Data x%x x%x\n",
1047 					irsp->ulpStatus, irsp->un.ulpWord[4],
1048 					irsp->ulpTimeout, phba->hba_flag,
1049 					phba->fcf.fcf_flag);
1050 
1051 		/* Check for retry */
1052 		if (lpfc_els_retry(phba, cmdiocb, rspiocb))
1053 			goto out;
1054 
1055 		/* If this is not a loop open failure, bail out */
1056 		if (!(irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
1057 		      ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
1058 					IOERR_LOOP_OPEN_FAILURE)))
1059 			goto flogifail;
1060 
1061 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
1062 				 "0150 FLOGI failure Status:x%x/x%x xri x%x TMO:x%x\n",
1063 				 irsp->ulpStatus, irsp->un.ulpWord[4],
1064 				 cmdiocb->sli4_xritag, irsp->ulpTimeout);
1065 
1066 		/* FLOGI failed, so there is no fabric */
1067 		spin_lock_irq(shost->host_lock);
1068 		vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
1069 		spin_unlock_irq(shost->host_lock);
1070 
1071 		/* If private loop, then allow max outstanding els to be
1072 		 * LPFC_MAX_DISC_THREADS (32). Scanning in the case of no
1073 		 * alpa map would take too long otherwise.
1074 		 */
1075 		if (phba->alpa_map[0] == 0)
1076 			vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS;
1077 		if ((phba->sli_rev == LPFC_SLI_REV4) &&
1078 		    (!(vport->fc_flag & FC_VFI_REGISTERED) ||
1079 		     (vport->fc_prevDID != vport->fc_myDID) ||
1080 			phba->fc_topology_changed)) {
1081 			if (vport->fc_flag & FC_VFI_REGISTERED) {
1082 				if (phba->fc_topology_changed) {
1083 					lpfc_unregister_fcf_prep(phba);
1084 					spin_lock_irq(shost->host_lock);
1085 					vport->fc_flag &= ~FC_VFI_REGISTERED;
1086 					spin_unlock_irq(shost->host_lock);
1087 					phba->fc_topology_changed = 0;
1088 				} else {
1089 					lpfc_sli4_unreg_all_rpis(vport);
1090 				}
1091 			}
1092 
1093 			/* Do not register VFI if the driver aborted FLOGI */
1094 			if (!lpfc_error_lost_link(irsp))
1095 				lpfc_issue_reg_vfi(vport);
1096 			lpfc_nlp_put(ndlp);
1097 			goto out;
1098 		}
1099 		goto flogifail;
1100 	}
1101 	spin_lock_irq(shost->host_lock);
1102 	vport->fc_flag &= ~FC_VPORT_CVL_RCVD;
1103 	vport->fc_flag &= ~FC_VPORT_LOGO_RCVD;
1104 	spin_unlock_irq(shost->host_lock);
1105 
1106 	/*
1107 	 * The FLogI succeeded.  Sync the data for the CPU before
1108 	 * accessing it.
1109 	 */
1110 	prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
1111 	if (!prsp)
1112 		goto out;
1113 	sp = prsp->virt + sizeof(uint32_t);
1114 
1115 	/* FLOGI completes successfully */
1116 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1117 			 "0101 FLOGI completes successfully, I/O tag:x%x, "
1118 			 "xri x%x Data: x%x x%x x%x x%x x%x %x\n",
1119 			 cmdiocb->iotag, cmdiocb->sli4_xritag,
1120 			 irsp->un.ulpWord[4], sp->cmn.e_d_tov,
1121 			 sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution,
1122 			 vport->port_state, vport->fc_flag);
1123 
1124 	if (vport->port_state == LPFC_FLOGI) {
1125 		/*
1126 		 * If Common Service Parameters indicate Nport
1127 		 * we are point to point, if Fport we are Fabric.
1128 		 */
1129 		if (sp->cmn.fPort)
1130 			rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp, irsp);
1131 		else if (!(phba->hba_flag & HBA_FCOE_MODE))
1132 			rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp);
1133 		else {
1134 			lpfc_printf_vlog(vport, KERN_ERR,
1135 				LOG_FIP | LOG_ELS,
1136 				"2831 FLOGI response with cleared Fabric "
1137 				"bit fcf_index 0x%x "
1138 				"Switch Name %02x%02x%02x%02x%02x%02x%02x%02x "
1139 				"Fabric Name "
1140 				"%02x%02x%02x%02x%02x%02x%02x%02x\n",
1141 				phba->fcf.current_rec.fcf_indx,
1142 				phba->fcf.current_rec.switch_name[0],
1143 				phba->fcf.current_rec.switch_name[1],
1144 				phba->fcf.current_rec.switch_name[2],
1145 				phba->fcf.current_rec.switch_name[3],
1146 				phba->fcf.current_rec.switch_name[4],
1147 				phba->fcf.current_rec.switch_name[5],
1148 				phba->fcf.current_rec.switch_name[6],
1149 				phba->fcf.current_rec.switch_name[7],
1150 				phba->fcf.current_rec.fabric_name[0],
1151 				phba->fcf.current_rec.fabric_name[1],
1152 				phba->fcf.current_rec.fabric_name[2],
1153 				phba->fcf.current_rec.fabric_name[3],
1154 				phba->fcf.current_rec.fabric_name[4],
1155 				phba->fcf.current_rec.fabric_name[5],
1156 				phba->fcf.current_rec.fabric_name[6],
1157 				phba->fcf.current_rec.fabric_name[7]);
1158 			lpfc_nlp_put(ndlp);
1159 			spin_lock_irq(&phba->hbalock);
1160 			phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
1161 			phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO);
1162 			spin_unlock_irq(&phba->hbalock);
1163 			phba->fcf.fcf_redisc_attempted = 0; /* reset */
1164 			goto out;
1165 		}
1166 		if (!rc) {
1167 			/* Mark the FCF discovery process done */
1168 			if (phba->hba_flag & HBA_FIP_SUPPORT)
1169 				lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP |
1170 						LOG_ELS,
1171 						"2769 FLOGI to FCF (x%x) "
1172 						"completed successfully\n",
1173 						phba->fcf.current_rec.fcf_indx);
1174 			spin_lock_irq(&phba->hbalock);
1175 			phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
1176 			phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO);
1177 			spin_unlock_irq(&phba->hbalock);
1178 			phba->fcf.fcf_redisc_attempted = 0; /* reset */
1179 			goto out;
1180 		}
1181 	}
1182 
1183 flogifail:
1184 	spin_lock_irq(&phba->hbalock);
1185 	phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
1186 	spin_unlock_irq(&phba->hbalock);
1187 
1188 	lpfc_nlp_put(ndlp);
1189 
1190 	if (!lpfc_error_lost_link(irsp)) {
1191 		/* FLOGI failed, so just use loop map to make discovery list */
1192 		lpfc_disc_list_loopmap(vport);
1193 
1194 		/* Start discovery */
1195 		lpfc_disc_start(vport);
1196 	} else if (((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
1197 			(((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
1198 			 IOERR_SLI_ABORTED) &&
1199 			((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
1200 			 IOERR_SLI_DOWN))) &&
1201 			(phba->link_state != LPFC_CLEAR_LA)) {
1202 		/* If FLOGI failed enable link interrupt. */
1203 		lpfc_issue_clear_la(phba, vport);
1204 	}
1205 out:
1206 	lpfc_els_free_iocb(phba, cmdiocb);
1207 }
1208 
1209 /**
1210  * lpfc_issue_els_flogi - Issue an flogi iocb command for a vport
1211  * @vport: pointer to a host virtual N_Port data structure.
1212  * @ndlp: pointer to a node-list data structure.
1213  * @retry: number of retries to the command IOCB.
1214  *
1215  * This routine issues a Fabric Login (FLOGI) Request ELS command
1216  * for a @vport. The initiator service parameters are put into the payload
1217  * of the FLOGI Request IOCB and the top-level callback function pointer
1218  * to lpfc_cmpl_els_flogi() routine is put to the IOCB completion callback
1219  * function field. The lpfc_issue_fabric_iocb routine is invoked to send
1220  * out FLOGI ELS command with one outstanding fabric IOCB at a time.
1221  *
1222  * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
1223  * will be incremented by 1 for holding the ndlp and the reference to ndlp
1224  * will be stored into the context1 field of the IOCB for the completion
1225  * callback function to the FLOGI ELS command.
1226  *
1227  * Return code
1228  *   0 - successfully issued flogi iocb for @vport
1229  *   1 - failed to issue flogi iocb for @vport
1230  **/
1231 static int
1232 lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1233 		     uint8_t retry)
1234 {
1235 	struct lpfc_hba  *phba = vport->phba;
1236 	struct serv_parm *sp;
1237 	IOCB_t *icmd;
1238 	struct lpfc_iocbq *elsiocb;
1239 	struct lpfc_iocbq defer_flogi_acc;
1240 	uint8_t *pcmd;
1241 	uint16_t cmdsize;
1242 	uint32_t tmo, did;
1243 	int rc;
1244 
1245 	cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
1246 	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
1247 				     ndlp->nlp_DID, ELS_CMD_FLOGI);
1248 
1249 	if (!elsiocb)
1250 		return 1;
1251 
1252 	icmd = &elsiocb->iocb;
1253 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1254 
1255 	/* For FLOGI request, remainder of payload is service parameters */
1256 	*((uint32_t *) (pcmd)) = ELS_CMD_FLOGI;
1257 	pcmd += sizeof(uint32_t);
1258 	memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
1259 	sp = (struct serv_parm *) pcmd;
1260 
1261 	/* Setup CSPs accordingly for Fabric */
1262 	sp->cmn.e_d_tov = 0;
1263 	sp->cmn.w2.r_a_tov = 0;
1264 	sp->cmn.virtual_fabric_support = 0;
1265 	sp->cls1.classValid = 0;
1266 	if (sp->cmn.fcphLow < FC_PH3)
1267 		sp->cmn.fcphLow = FC_PH3;
1268 	if (sp->cmn.fcphHigh < FC_PH3)
1269 		sp->cmn.fcphHigh = FC_PH3;
1270 
1271 	if  (phba->sli_rev == LPFC_SLI_REV4) {
1272 		if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
1273 		    LPFC_SLI_INTF_IF_TYPE_0) {
1274 			elsiocb->iocb.ulpCt_h = ((SLI4_CT_FCFI >> 1) & 1);
1275 			elsiocb->iocb.ulpCt_l = (SLI4_CT_FCFI & 1);
1276 			/* FLOGI needs to be 3 for WQE FCFI */
1277 			/* Set the fcfi to the fcfi we registered with */
1278 			elsiocb->iocb.ulpContext = phba->fcf.fcfi;
1279 		}
1280 		/* Can't do SLI4 class2 without support sequence coalescing */
1281 		sp->cls2.classValid = 0;
1282 		sp->cls2.seqDelivery = 0;
1283 	} else {
1284 		/* Historical, setting sequential-delivery bit for SLI3 */
1285 		sp->cls2.seqDelivery = (sp->cls2.classValid) ? 1 : 0;
1286 		sp->cls3.seqDelivery = (sp->cls3.classValid) ? 1 : 0;
1287 		if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
1288 			sp->cmn.request_multiple_Nport = 1;
1289 			/* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */
1290 			icmd->ulpCt_h = 1;
1291 			icmd->ulpCt_l = 0;
1292 		} else
1293 			sp->cmn.request_multiple_Nport = 0;
1294 	}
1295 
1296 	if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) {
1297 		icmd->un.elsreq64.myID = 0;
1298 		icmd->un.elsreq64.fl = 1;
1299 	}
1300 
1301 	tmo = phba->fc_ratov;
1302 	phba->fc_ratov = LPFC_DISC_FLOGI_TMO;
1303 	lpfc_set_disctmo(vport);
1304 	phba->fc_ratov = tmo;
1305 
1306 	phba->fc_stat.elsXmitFLOGI++;
1307 	elsiocb->iocb_cmpl = lpfc_cmpl_els_flogi;
1308 
1309 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1310 		"Issue FLOGI:     opt:x%x",
1311 		phba->sli3_options, 0, 0);
1312 
1313 	rc = lpfc_issue_fabric_iocb(phba, elsiocb);
1314 
1315 	phba->hba_flag |= HBA_FLOGI_ISSUED;
1316 
1317 	/* Check for a deferred FLOGI ACC condition */
1318 	if (phba->defer_flogi_acc_flag) {
1319 		did = vport->fc_myDID;
1320 		vport->fc_myDID = Fabric_DID;
1321 
1322 		memset(&defer_flogi_acc, 0, sizeof(struct lpfc_iocbq));
1323 
1324 		defer_flogi_acc.iocb.ulpContext = phba->defer_flogi_acc_rx_id;
1325 		defer_flogi_acc.iocb.unsli3.rcvsli3.ox_id =
1326 						phba->defer_flogi_acc_ox_id;
1327 
1328 		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1329 				 "3354 Xmit deferred FLOGI ACC: rx_id: x%x,"
1330 				 " ox_id: x%x, hba_flag x%x\n",
1331 				 phba->defer_flogi_acc_rx_id,
1332 				 phba->defer_flogi_acc_ox_id, phba->hba_flag);
1333 
1334 		/* Send deferred FLOGI ACC */
1335 		lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, &defer_flogi_acc,
1336 				 ndlp, NULL);
1337 
1338 		phba->defer_flogi_acc_flag = false;
1339 
1340 		vport->fc_myDID = did;
1341 	}
1342 
1343 	if (rc == IOCB_ERROR) {
1344 		lpfc_els_free_iocb(phba, elsiocb);
1345 		return 1;
1346 	}
1347 	return 0;
1348 }
1349 
1350 /**
1351  * lpfc_els_abort_flogi - Abort all outstanding flogi iocbs
1352  * @phba: pointer to lpfc hba data structure.
1353  *
1354  * This routine aborts all the outstanding Fabric Login (FLOGI) IOCBs
1355  * with a @phba. This routine walks all the outstanding IOCBs on the txcmplq
1356  * list and issues an abort IOCB commond on each outstanding IOCB that
1357  * contains a active Fabric_DID ndlp. Note that this function is to issue
1358  * the abort IOCB command on all the outstanding IOCBs, thus when this
1359  * function returns, it does not guarantee all the IOCBs are actually aborted.
1360  *
1361  * Return code
1362  *   0 - Successfully issued abort iocb on all outstanding flogis (Always 0)
1363  **/
1364 int
1365 lpfc_els_abort_flogi(struct lpfc_hba *phba)
1366 {
1367 	struct lpfc_sli_ring *pring;
1368 	struct lpfc_iocbq *iocb, *next_iocb;
1369 	struct lpfc_nodelist *ndlp;
1370 	IOCB_t *icmd;
1371 
1372 	/* Abort outstanding I/O on NPort <nlp_DID> */
1373 	lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
1374 			"0201 Abort outstanding I/O on NPort x%x\n",
1375 			Fabric_DID);
1376 
1377 	pring = lpfc_phba_elsring(phba);
1378 	if (unlikely(!pring))
1379 		return -EIO;
1380 
1381 	/*
1382 	 * Check the txcmplq for an iocb that matches the nport the driver is
1383 	 * searching for.
1384 	 */
1385 	spin_lock_irq(&phba->hbalock);
1386 	list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
1387 		icmd = &iocb->iocb;
1388 		if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR) {
1389 			ndlp = (struct lpfc_nodelist *)(iocb->context1);
1390 			if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
1391 			    (ndlp->nlp_DID == Fabric_DID))
1392 				lpfc_sli_issue_abort_iotag(phba, pring, iocb);
1393 		}
1394 	}
1395 	spin_unlock_irq(&phba->hbalock);
1396 
1397 	return 0;
1398 }
1399 
1400 /**
1401  * lpfc_initial_flogi - Issue an initial fabric login for a vport
1402  * @vport: pointer to a host virtual N_Port data structure.
1403  *
1404  * This routine issues an initial Fabric Login (FLOGI) for the @vport
1405  * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from
1406  * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and
1407  * put it into the @vport's ndlp list. If an inactive ndlp found on the list,
1408  * it will just be enabled and made active. The lpfc_issue_els_flogi() routine
1409  * is then invoked with the @vport and the ndlp to perform the FLOGI for the
1410  * @vport.
1411  *
1412  * Return code
1413  *   0 - failed to issue initial flogi for @vport
1414  *   1 - successfully issued initial flogi for @vport
1415  **/
1416 int
1417 lpfc_initial_flogi(struct lpfc_vport *vport)
1418 {
1419 	struct lpfc_nodelist *ndlp;
1420 
1421 	vport->port_state = LPFC_FLOGI;
1422 	lpfc_set_disctmo(vport);
1423 
1424 	/* First look for the Fabric ndlp */
1425 	ndlp = lpfc_findnode_did(vport, Fabric_DID);
1426 	if (!ndlp) {
1427 		/* Cannot find existing Fabric ndlp, so allocate a new one */
1428 		ndlp = lpfc_nlp_init(vport, Fabric_DID);
1429 		if (!ndlp)
1430 			return 0;
1431 		/* Set the node type */
1432 		ndlp->nlp_type |= NLP_FABRIC;
1433 		/* Put ndlp onto node list */
1434 		lpfc_enqueue_node(vport, ndlp);
1435 	} else if (!NLP_CHK_NODE_ACT(ndlp)) {
1436 		/* re-setup ndlp without removing from node list */
1437 		ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
1438 		if (!ndlp)
1439 			return 0;
1440 	}
1441 
1442 	if (lpfc_issue_els_flogi(vport, ndlp, 0)) {
1443 		/* This decrement of reference count to node shall kick off
1444 		 * the release of the node.
1445 		 */
1446 		lpfc_nlp_put(ndlp);
1447 		return 0;
1448 	}
1449 	return 1;
1450 }
1451 
1452 /**
1453  * lpfc_initial_fdisc - Issue an initial fabric discovery for a vport
1454  * @vport: pointer to a host virtual N_Port data structure.
1455  *
1456  * This routine issues an initial Fabric Discover (FDISC) for the @vport
1457  * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from
1458  * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and
1459  * put it into the @vport's ndlp list. If an inactive ndlp found on the list,
1460  * it will just be enabled and made active. The lpfc_issue_els_fdisc() routine
1461  * is then invoked with the @vport and the ndlp to perform the FDISC for the
1462  * @vport.
1463  *
1464  * Return code
1465  *   0 - failed to issue initial fdisc for @vport
1466  *   1 - successfully issued initial fdisc for @vport
1467  **/
1468 int
1469 lpfc_initial_fdisc(struct lpfc_vport *vport)
1470 {
1471 	struct lpfc_nodelist *ndlp;
1472 
1473 	/* First look for the Fabric ndlp */
1474 	ndlp = lpfc_findnode_did(vport, Fabric_DID);
1475 	if (!ndlp) {
1476 		/* Cannot find existing Fabric ndlp, so allocate a new one */
1477 		ndlp = lpfc_nlp_init(vport, Fabric_DID);
1478 		if (!ndlp)
1479 			return 0;
1480 		/* Put ndlp onto node list */
1481 		lpfc_enqueue_node(vport, ndlp);
1482 	} else if (!NLP_CHK_NODE_ACT(ndlp)) {
1483 		/* re-setup ndlp without removing from node list */
1484 		ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
1485 		if (!ndlp)
1486 			return 0;
1487 	}
1488 
1489 	if (lpfc_issue_els_fdisc(vport, ndlp, 0)) {
1490 		/* decrement node reference count to trigger the release of
1491 		 * the node.
1492 		 */
1493 		lpfc_nlp_put(ndlp);
1494 		return 0;
1495 	}
1496 	return 1;
1497 }
1498 
1499 /**
1500  * lpfc_more_plogi - Check and issue remaining plogis for a vport
1501  * @vport: pointer to a host virtual N_Port data structure.
1502  *
1503  * This routine checks whether there are more remaining Port Logins
1504  * (PLOGI) to be issued for the @vport. If so, it will invoke the routine
1505  * lpfc_els_disc_plogi() to go through the Node Port Recovery (NPR) nodes
1506  * to issue ELS PLOGIs up to the configured discover threads with the
1507  * @vport (@vport->cfg_discovery_threads). The function also decrement
1508  * the @vport's num_disc_node by 1 if it is not already 0.
1509  **/
1510 void
1511 lpfc_more_plogi(struct lpfc_vport *vport)
1512 {
1513 	if (vport->num_disc_nodes)
1514 		vport->num_disc_nodes--;
1515 
1516 	/* Continue discovery with <num_disc_nodes> PLOGIs to go */
1517 	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1518 			 "0232 Continue discovery with %d PLOGIs to go "
1519 			 "Data: x%x x%x x%x\n",
1520 			 vport->num_disc_nodes, vport->fc_plogi_cnt,
1521 			 vport->fc_flag, vport->port_state);
1522 	/* Check to see if there are more PLOGIs to be sent */
1523 	if (vport->fc_flag & FC_NLP_MORE)
1524 		/* go thru NPR nodes and issue any remaining ELS PLOGIs */
1525 		lpfc_els_disc_plogi(vport);
1526 
1527 	return;
1528 }
1529 
1530 /**
1531  * lpfc_plogi_confirm_nport - Confirm pologi wwpn matches stored ndlp
1532  * @phba: pointer to lpfc hba data structure.
1533  * @prsp: pointer to response IOCB payload.
1534  * @ndlp: pointer to a node-list data structure.
1535  *
1536  * This routine checks and indicates whether the WWPN of an N_Port, retrieved
1537  * from a PLOGI, matches the WWPN that is stored in the @ndlp for that N_POrt.
1538  * The following cases are considered N_Port confirmed:
1539  * 1) The N_Port is a Fabric ndlp; 2) The @ndlp is on vport list and matches
1540  * the WWPN of the N_Port logged into; 3) The @ndlp is not on vport list but
1541  * it does not have WWPN assigned either. If the WWPN is confirmed, the
1542  * pointer to the @ndlp will be returned. If the WWPN is not confirmed:
1543  * 1) if there is a node on vport list other than the @ndlp with the same
1544  * WWPN of the N_Port PLOGI logged into, the lpfc_unreg_rpi() will be invoked
1545  * on that node to release the RPI associated with the node; 2) if there is
1546  * no node found on vport list with the same WWPN of the N_Port PLOGI logged
1547  * into, a new node shall be allocated (or activated). In either case, the
1548  * parameters of the @ndlp shall be copied to the new_ndlp, the @ndlp shall
1549  * be released and the new_ndlp shall be put on to the vport node list and
1550  * its pointer returned as the confirmed node.
1551  *
1552  * Note that before the @ndlp got "released", the keepDID from not-matching
1553  * or inactive "new_ndlp" on the vport node list is assigned to the nlp_DID
1554  * of the @ndlp. This is because the release of @ndlp is actually to put it
1555  * into an inactive state on the vport node list and the vport node list
1556  * management algorithm does not allow two node with a same DID.
1557  *
1558  * Return code
1559  *   pointer to the PLOGI N_Port @ndlp
1560  **/
1561 static struct lpfc_nodelist *
1562 lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
1563 			 struct lpfc_nodelist *ndlp)
1564 {
1565 	struct lpfc_vport *vport = ndlp->vport;
1566 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1567 	struct lpfc_nodelist *new_ndlp;
1568 	struct lpfc_rport_data *rdata;
1569 	struct fc_rport *rport;
1570 	struct serv_parm *sp;
1571 	uint8_t  name[sizeof(struct lpfc_name)];
1572 	uint32_t rc, keepDID = 0, keep_nlp_flag = 0;
1573 	uint32_t keep_new_nlp_flag = 0;
1574 	uint16_t keep_nlp_state;
1575 	u32 keep_nlp_fc4_type = 0;
1576 	struct lpfc_nvme_rport *keep_nrport = NULL;
1577 	int  put_node;
1578 	int  put_rport;
1579 	unsigned long *active_rrqs_xri_bitmap = NULL;
1580 
1581 	/* Fabric nodes can have the same WWPN so we don't bother searching
1582 	 * by WWPN.  Just return the ndlp that was given to us.
1583 	 */
1584 	if (ndlp->nlp_type & NLP_FABRIC)
1585 		return ndlp;
1586 
1587 	sp = (struct serv_parm *) ((uint8_t *) prsp + sizeof(uint32_t));
1588 	memset(name, 0, sizeof(struct lpfc_name));
1589 
1590 	/* Now we find out if the NPort we are logging into, matches the WWPN
1591 	 * we have for that ndlp. If not, we have some work to do.
1592 	 */
1593 	new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName);
1594 
1595 	/* return immediately if the WWPN matches ndlp */
1596 	if (new_ndlp == ndlp && NLP_CHK_NODE_ACT(new_ndlp))
1597 		return ndlp;
1598 
1599 	if (phba->sli_rev == LPFC_SLI_REV4) {
1600 		active_rrqs_xri_bitmap = mempool_alloc(phba->active_rrq_pool,
1601 						       GFP_KERNEL);
1602 		if (active_rrqs_xri_bitmap)
1603 			memset(active_rrqs_xri_bitmap, 0,
1604 			       phba->cfg_rrq_xri_bitmap_sz);
1605 	}
1606 
1607 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE,
1608 			 "3178 PLOGI confirm: ndlp x%x x%x x%x: "
1609 			 "new_ndlp x%x x%x x%x\n",
1610 			 ndlp->nlp_DID, ndlp->nlp_flag,  ndlp->nlp_fc4_type,
1611 			 (new_ndlp ? new_ndlp->nlp_DID : 0),
1612 			 (new_ndlp ? new_ndlp->nlp_flag : 0),
1613 			 (new_ndlp ? new_ndlp->nlp_fc4_type : 0));
1614 
1615 	if (!new_ndlp) {
1616 		rc = memcmp(&ndlp->nlp_portname, name,
1617 			    sizeof(struct lpfc_name));
1618 		if (!rc) {
1619 			if (active_rrqs_xri_bitmap)
1620 				mempool_free(active_rrqs_xri_bitmap,
1621 					     phba->active_rrq_pool);
1622 			return ndlp;
1623 		}
1624 		new_ndlp = lpfc_nlp_init(vport, ndlp->nlp_DID);
1625 		if (!new_ndlp) {
1626 			if (active_rrqs_xri_bitmap)
1627 				mempool_free(active_rrqs_xri_bitmap,
1628 					     phba->active_rrq_pool);
1629 			return ndlp;
1630 		}
1631 	} else if (!NLP_CHK_NODE_ACT(new_ndlp)) {
1632 		rc = memcmp(&ndlp->nlp_portname, name,
1633 			    sizeof(struct lpfc_name));
1634 		if (!rc) {
1635 			if (active_rrqs_xri_bitmap)
1636 				mempool_free(active_rrqs_xri_bitmap,
1637 					     phba->active_rrq_pool);
1638 			return ndlp;
1639 		}
1640 		new_ndlp = lpfc_enable_node(vport, new_ndlp,
1641 						NLP_STE_UNUSED_NODE);
1642 		if (!new_ndlp) {
1643 			if (active_rrqs_xri_bitmap)
1644 				mempool_free(active_rrqs_xri_bitmap,
1645 					     phba->active_rrq_pool);
1646 			return ndlp;
1647 		}
1648 		keepDID = new_ndlp->nlp_DID;
1649 		if ((phba->sli_rev == LPFC_SLI_REV4) && active_rrqs_xri_bitmap)
1650 			memcpy(active_rrqs_xri_bitmap,
1651 			       new_ndlp->active_rrqs_xri_bitmap,
1652 			       phba->cfg_rrq_xri_bitmap_sz);
1653 	} else {
1654 		keepDID = new_ndlp->nlp_DID;
1655 		if (phba->sli_rev == LPFC_SLI_REV4 &&
1656 		    active_rrqs_xri_bitmap)
1657 			memcpy(active_rrqs_xri_bitmap,
1658 			       new_ndlp->active_rrqs_xri_bitmap,
1659 			       phba->cfg_rrq_xri_bitmap_sz);
1660 	}
1661 
1662 	/* At this point in this routine, we know new_ndlp will be
1663 	 * returned. however, any previous GID_FTs that were done
1664 	 * would have updated nlp_fc4_type in ndlp, so we must ensure
1665 	 * new_ndlp has the right value.
1666 	 */
1667 	if (vport->fc_flag & FC_FABRIC) {
1668 		keep_nlp_fc4_type = new_ndlp->nlp_fc4_type;
1669 		new_ndlp->nlp_fc4_type = ndlp->nlp_fc4_type;
1670 	}
1671 
1672 	lpfc_unreg_rpi(vport, new_ndlp);
1673 	new_ndlp->nlp_DID = ndlp->nlp_DID;
1674 	new_ndlp->nlp_prev_state = ndlp->nlp_prev_state;
1675 	if (phba->sli_rev == LPFC_SLI_REV4)
1676 		memcpy(new_ndlp->active_rrqs_xri_bitmap,
1677 		       ndlp->active_rrqs_xri_bitmap,
1678 		       phba->cfg_rrq_xri_bitmap_sz);
1679 
1680 	spin_lock_irq(shost->host_lock);
1681 	keep_new_nlp_flag = new_ndlp->nlp_flag;
1682 	keep_nlp_flag = ndlp->nlp_flag;
1683 	new_ndlp->nlp_flag = ndlp->nlp_flag;
1684 
1685 	/* if new_ndlp had NLP_UNREG_INP set, keep it */
1686 	if (keep_new_nlp_flag & NLP_UNREG_INP)
1687 		new_ndlp->nlp_flag |= NLP_UNREG_INP;
1688 	else
1689 		new_ndlp->nlp_flag &= ~NLP_UNREG_INP;
1690 
1691 	/* if new_ndlp had NLP_RPI_REGISTERED set, keep it */
1692 	if (keep_new_nlp_flag & NLP_RPI_REGISTERED)
1693 		new_ndlp->nlp_flag |= NLP_RPI_REGISTERED;
1694 	else
1695 		new_ndlp->nlp_flag &= ~NLP_RPI_REGISTERED;
1696 
1697 	ndlp->nlp_flag = keep_new_nlp_flag;
1698 
1699 	/* if ndlp had NLP_UNREG_INP set, keep it */
1700 	if (keep_nlp_flag & NLP_UNREG_INP)
1701 		ndlp->nlp_flag |= NLP_UNREG_INP;
1702 	else
1703 		ndlp->nlp_flag &= ~NLP_UNREG_INP;
1704 
1705 	/* if ndlp had NLP_RPI_REGISTERED set, keep it */
1706 	if (keep_nlp_flag & NLP_RPI_REGISTERED)
1707 		ndlp->nlp_flag |= NLP_RPI_REGISTERED;
1708 	else
1709 		ndlp->nlp_flag &= ~NLP_RPI_REGISTERED;
1710 
1711 	spin_unlock_irq(shost->host_lock);
1712 
1713 	/* Set nlp_states accordingly */
1714 	keep_nlp_state = new_ndlp->nlp_state;
1715 	lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state);
1716 
1717 	/* interchange the nvme remoteport structs */
1718 	keep_nrport = new_ndlp->nrport;
1719 	new_ndlp->nrport = ndlp->nrport;
1720 
1721 	/* Move this back to NPR state */
1722 	if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) {
1723 		/* The new_ndlp is replacing ndlp totally, so we need
1724 		 * to put ndlp on UNUSED list and try to free it.
1725 		 */
1726 		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1727 			 "3179 PLOGI confirm NEW: %x %x\n",
1728 			 new_ndlp->nlp_DID, keepDID);
1729 
1730 		/* Fix up the rport accordingly */
1731 		rport =  ndlp->rport;
1732 		if (rport) {
1733 			rdata = rport->dd_data;
1734 			if (rdata->pnode == ndlp) {
1735 				/* break the link before dropping the ref */
1736 				ndlp->rport = NULL;
1737 				lpfc_nlp_put(ndlp);
1738 				rdata->pnode = lpfc_nlp_get(new_ndlp);
1739 				new_ndlp->rport = rport;
1740 			}
1741 			new_ndlp->nlp_type = ndlp->nlp_type;
1742 		}
1743 
1744 		/* Fix up the nvme rport */
1745 		if (ndlp->nrport) {
1746 			ndlp->nrport = NULL;
1747 			lpfc_nlp_put(ndlp);
1748 		}
1749 
1750 		/* We shall actually free the ndlp with both nlp_DID and
1751 		 * nlp_portname fields equals 0 to avoid any ndlp on the
1752 		 * nodelist never to be used.
1753 		 */
1754 		if (ndlp->nlp_DID == 0) {
1755 			spin_lock_irq(&phba->ndlp_lock);
1756 			NLP_SET_FREE_REQ(ndlp);
1757 			spin_unlock_irq(&phba->ndlp_lock);
1758 		}
1759 
1760 		/* Two ndlps cannot have the same did on the nodelist.
1761 		 * Note: for this case, ndlp has a NULL WWPN so setting
1762 		 * the nlp_fc4_type isn't required.
1763 		 */
1764 		ndlp->nlp_DID = keepDID;
1765 		lpfc_nlp_set_state(vport, ndlp, keep_nlp_state);
1766 		if (phba->sli_rev == LPFC_SLI_REV4 &&
1767 		    active_rrqs_xri_bitmap)
1768 			memcpy(ndlp->active_rrqs_xri_bitmap,
1769 			       active_rrqs_xri_bitmap,
1770 			       phba->cfg_rrq_xri_bitmap_sz);
1771 
1772 		if (!NLP_CHK_NODE_ACT(ndlp))
1773 			lpfc_drop_node(vport, ndlp);
1774 	}
1775 	else {
1776 		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1777 			 "3180 PLOGI confirm SWAP: %x %x\n",
1778 			 new_ndlp->nlp_DID, keepDID);
1779 
1780 		lpfc_unreg_rpi(vport, ndlp);
1781 
1782 		/* Two ndlps cannot have the same did and the fc4
1783 		 * type must be transferred because the ndlp is in
1784 		 * flight.
1785 		 */
1786 		ndlp->nlp_DID = keepDID;
1787 		ndlp->nlp_fc4_type = keep_nlp_fc4_type;
1788 
1789 		if (phba->sli_rev == LPFC_SLI_REV4 &&
1790 		    active_rrqs_xri_bitmap)
1791 			memcpy(ndlp->active_rrqs_xri_bitmap,
1792 			       active_rrqs_xri_bitmap,
1793 			       phba->cfg_rrq_xri_bitmap_sz);
1794 
1795 		/* Since we are switching over to the new_ndlp,
1796 		 * reset the old ndlp state
1797 		 */
1798 		if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) ||
1799 		    (ndlp->nlp_state == NLP_STE_MAPPED_NODE))
1800 			keep_nlp_state = NLP_STE_NPR_NODE;
1801 		lpfc_nlp_set_state(vport, ndlp, keep_nlp_state);
1802 
1803 		/* Previous ndlp no longer active with nvme host transport.
1804 		 * Remove reference from earlier registration unless the
1805 		 * nvme host took care of it.
1806 		 */
1807 		if (ndlp->nrport)
1808 			lpfc_nlp_put(ndlp);
1809 		ndlp->nrport = keep_nrport;
1810 
1811 		/* Fix up the rport accordingly */
1812 		rport = ndlp->rport;
1813 		if (rport) {
1814 			rdata = rport->dd_data;
1815 			put_node = rdata->pnode != NULL;
1816 			put_rport = ndlp->rport != NULL;
1817 			rdata->pnode = NULL;
1818 			ndlp->rport = NULL;
1819 			if (put_node)
1820 				lpfc_nlp_put(ndlp);
1821 			if (put_rport)
1822 				put_device(&rport->dev);
1823 		}
1824 	}
1825 	if (phba->sli_rev == LPFC_SLI_REV4 &&
1826 	    active_rrqs_xri_bitmap)
1827 		mempool_free(active_rrqs_xri_bitmap,
1828 			     phba->active_rrq_pool);
1829 
1830 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE,
1831 			 "3173 PLOGI confirm exit: new_ndlp x%x x%x x%x\n",
1832 			 new_ndlp->nlp_DID, new_ndlp->nlp_flag,
1833 			 new_ndlp->nlp_fc4_type);
1834 
1835 	return new_ndlp;
1836 }
1837 
1838 /**
1839  * lpfc_end_rscn - Check and handle more rscn for a vport
1840  * @vport: pointer to a host virtual N_Port data structure.
1841  *
1842  * This routine checks whether more Registration State Change
1843  * Notifications (RSCNs) came in while the discovery state machine was in
1844  * the FC_RSCN_MODE. If so, the lpfc_els_handle_rscn() routine will be
1845  * invoked to handle the additional RSCNs for the @vport. Otherwise, the
1846  * FC_RSCN_MODE bit will be cleared with the @vport to mark as the end of
1847  * handling the RSCNs.
1848  **/
1849 void
1850 lpfc_end_rscn(struct lpfc_vport *vport)
1851 {
1852 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1853 
1854 	if (vport->fc_flag & FC_RSCN_MODE) {
1855 		/*
1856 		 * Check to see if more RSCNs came in while we were
1857 		 * processing this one.
1858 		 */
1859 		if (vport->fc_rscn_id_cnt ||
1860 		    (vport->fc_flag & FC_RSCN_DISCOVERY) != 0)
1861 			lpfc_els_handle_rscn(vport);
1862 		else {
1863 			spin_lock_irq(shost->host_lock);
1864 			vport->fc_flag &= ~FC_RSCN_MODE;
1865 			spin_unlock_irq(shost->host_lock);
1866 		}
1867 	}
1868 }
1869 
1870 /**
1871  * lpfc_cmpl_els_rrq - Completion handled for els RRQs.
1872  * @phba: pointer to lpfc hba data structure.
1873  * @cmdiocb: pointer to lpfc command iocb data structure.
1874  * @rspiocb: pointer to lpfc response iocb data structure.
1875  *
1876  * This routine will call the clear rrq function to free the rrq and
1877  * clear the xri's bit in the ndlp's xri_bitmap. If the ndlp does not
1878  * exist then the clear_rrq is still called because the rrq needs to
1879  * be freed.
1880  **/
1881 
1882 static void
1883 lpfc_cmpl_els_rrq(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1884 		    struct lpfc_iocbq *rspiocb)
1885 {
1886 	struct lpfc_vport *vport = cmdiocb->vport;
1887 	IOCB_t *irsp;
1888 	struct lpfc_nodelist *ndlp;
1889 	struct lpfc_node_rrq *rrq;
1890 
1891 	/* we pass cmdiocb to state machine which needs rspiocb as well */
1892 	rrq = cmdiocb->context_un.rrq;
1893 	cmdiocb->context_un.rsp_iocb = rspiocb;
1894 
1895 	irsp = &rspiocb->iocb;
1896 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1897 		"RRQ cmpl:      status:x%x/x%x did:x%x",
1898 		irsp->ulpStatus, irsp->un.ulpWord[4],
1899 		irsp->un.elsreq64.remoteID);
1900 
1901 	ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID);
1902 	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || ndlp != rrq->ndlp) {
1903 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1904 				 "2882 RRQ completes to NPort x%x "
1905 				 "with no ndlp. Data: x%x x%x x%x\n",
1906 				 irsp->un.elsreq64.remoteID,
1907 				 irsp->ulpStatus, irsp->un.ulpWord[4],
1908 				 irsp->ulpIoTag);
1909 		goto out;
1910 	}
1911 
1912 	/* rrq completes to NPort <nlp_DID> */
1913 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1914 			 "2880 RRQ completes to NPort x%x "
1915 			 "Data: x%x x%x x%x x%x x%x\n",
1916 			 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
1917 			 irsp->ulpTimeout, rrq->xritag, rrq->rxid);
1918 
1919 	if (irsp->ulpStatus) {
1920 		/* Check for retry */
1921 		/* RRQ failed Don't print the vport to vport rjts */
1922 		if (irsp->ulpStatus != IOSTAT_LS_RJT ||
1923 			(((irsp->un.ulpWord[4]) >> 16 != LSRJT_INVALID_CMD) &&
1924 			((irsp->un.ulpWord[4]) >> 16 != LSRJT_UNABLE_TPC)) ||
1925 			(phba)->pport->cfg_log_verbose & LOG_ELS)
1926 			lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1927 				 "2881 RRQ failure DID:%06X Status:x%x/x%x\n",
1928 				 ndlp->nlp_DID, irsp->ulpStatus,
1929 				 irsp->un.ulpWord[4]);
1930 	}
1931 out:
1932 	if (rrq)
1933 		lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
1934 	lpfc_els_free_iocb(phba, cmdiocb);
1935 	return;
1936 }
1937 /**
1938  * lpfc_cmpl_els_plogi - Completion callback function for plogi
1939  * @phba: pointer to lpfc hba data structure.
1940  * @cmdiocb: pointer to lpfc command iocb data structure.
1941  * @rspiocb: pointer to lpfc response iocb data structure.
1942  *
1943  * This routine is the completion callback function for issuing the Port
1944  * Login (PLOGI) command. For PLOGI completion, there must be an active
1945  * ndlp on the vport node list that matches the remote node ID from the
1946  * PLOGI response IOCB. If such ndlp does not exist, the PLOGI is simply
1947  * ignored and command IOCB released. The PLOGI response IOCB status is
1948  * checked for error conditons. If there is error status reported, PLOGI
1949  * retry shall be attempted by invoking the lpfc_els_retry() routine.
1950  * Otherwise, the lpfc_plogi_confirm_nport() routine shall be invoked on
1951  * the ndlp and the NLP_EVT_CMPL_PLOGI state to the Discover State Machine
1952  * (DSM) is set for this PLOGI completion. Finally, it checks whether
1953  * there are additional N_Port nodes with the vport that need to perform
1954  * PLOGI. If so, the lpfc_more_plogi() routine is invoked to issue addition
1955  * PLOGIs.
1956  **/
1957 static void
1958 lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1959 		    struct lpfc_iocbq *rspiocb)
1960 {
1961 	struct lpfc_vport *vport = cmdiocb->vport;
1962 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
1963 	IOCB_t *irsp;
1964 	struct lpfc_nodelist *ndlp;
1965 	struct lpfc_dmabuf *prsp;
1966 	int disc;
1967 
1968 	/* we pass cmdiocb to state machine which needs rspiocb as well */
1969 	cmdiocb->context_un.rsp_iocb = rspiocb;
1970 
1971 	irsp = &rspiocb->iocb;
1972 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1973 		"PLOGI cmpl:      status:x%x/x%x did:x%x",
1974 		irsp->ulpStatus, irsp->un.ulpWord[4],
1975 		irsp->un.elsreq64.remoteID);
1976 
1977 	ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID);
1978 	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
1979 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1980 				 "0136 PLOGI completes to NPort x%x "
1981 				 "with no ndlp. Data: x%x x%x x%x\n",
1982 				 irsp->un.elsreq64.remoteID,
1983 				 irsp->ulpStatus, irsp->un.ulpWord[4],
1984 				 irsp->ulpIoTag);
1985 		goto out;
1986 	}
1987 
1988 	/* Since ndlp can be freed in the disc state machine, note if this node
1989 	 * is being used during discovery.
1990 	 */
1991 	spin_lock_irq(shost->host_lock);
1992 	disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
1993 	ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
1994 	spin_unlock_irq(shost->host_lock);
1995 
1996 	/* PLOGI completes to NPort <nlp_DID> */
1997 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1998 			 "0102 PLOGI completes to NPort x%06x "
1999 			 "Data: x%x x%x x%x x%x x%x\n",
2000 			 ndlp->nlp_DID, ndlp->nlp_fc4_type,
2001 			 irsp->ulpStatus, irsp->un.ulpWord[4],
2002 			 disc, vport->num_disc_nodes);
2003 
2004 	/* Check to see if link went down during discovery */
2005 	if (lpfc_els_chk_latt(vport)) {
2006 		spin_lock_irq(shost->host_lock);
2007 		ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2008 		spin_unlock_irq(shost->host_lock);
2009 		goto out;
2010 	}
2011 
2012 	if (irsp->ulpStatus) {
2013 		/* Check for retry */
2014 		if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
2015 			/* ELS command is being retried */
2016 			if (disc) {
2017 				spin_lock_irq(shost->host_lock);
2018 				ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2019 				spin_unlock_irq(shost->host_lock);
2020 			}
2021 			goto out;
2022 		}
2023 		/* PLOGI failed Don't print the vport to vport rjts */
2024 		if (irsp->ulpStatus != IOSTAT_LS_RJT ||
2025 			(((irsp->un.ulpWord[4]) >> 16 != LSRJT_INVALID_CMD) &&
2026 			((irsp->un.ulpWord[4]) >> 16 != LSRJT_UNABLE_TPC)) ||
2027 			(phba)->pport->cfg_log_verbose & LOG_ELS)
2028 			lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2029 				 "2753 PLOGI failure DID:%06X Status:x%x/x%x\n",
2030 				 ndlp->nlp_DID, irsp->ulpStatus,
2031 				 irsp->un.ulpWord[4]);
2032 		/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
2033 		if (!lpfc_error_lost_link(irsp))
2034 			lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2035 						NLP_EVT_CMPL_PLOGI);
2036 	} else {
2037 		/* Good status, call state machine */
2038 		prsp = list_entry(((struct lpfc_dmabuf *)
2039 				   cmdiocb->context2)->list.next,
2040 				  struct lpfc_dmabuf, list);
2041 		ndlp = lpfc_plogi_confirm_nport(phba, prsp->virt, ndlp);
2042 		lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2043 					     NLP_EVT_CMPL_PLOGI);
2044 	}
2045 
2046 	if (disc && vport->num_disc_nodes) {
2047 		/* Check to see if there are more PLOGIs to be sent */
2048 		lpfc_more_plogi(vport);
2049 
2050 		if (vport->num_disc_nodes == 0) {
2051 			spin_lock_irq(shost->host_lock);
2052 			vport->fc_flag &= ~FC_NDISC_ACTIVE;
2053 			spin_unlock_irq(shost->host_lock);
2054 
2055 			lpfc_can_disctmo(vport);
2056 			lpfc_end_rscn(vport);
2057 		}
2058 	}
2059 
2060 out:
2061 	lpfc_els_free_iocb(phba, cmdiocb);
2062 	return;
2063 }
2064 
2065 /**
2066  * lpfc_issue_els_plogi - Issue an plogi iocb command for a vport
2067  * @vport: pointer to a host virtual N_Port data structure.
2068  * @did: destination port identifier.
2069  * @retry: number of retries to the command IOCB.
2070  *
2071  * This routine issues a Port Login (PLOGI) command to a remote N_Port
2072  * (with the @did) for a @vport. Before issuing a PLOGI to a remote N_Port,
2073  * the ndlp with the remote N_Port DID must exist on the @vport's ndlp list.
2074  * This routine constructs the proper feilds of the PLOGI IOCB and invokes
2075  * the lpfc_sli_issue_iocb() routine to send out PLOGI ELS command.
2076  *
2077  * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
2078  * will be incremented by 1 for holding the ndlp and the reference to ndlp
2079  * will be stored into the context1 field of the IOCB for the completion
2080  * callback function to the PLOGI ELS command.
2081  *
2082  * Return code
2083  *   0 - Successfully issued a plogi for @vport
2084  *   1 - failed to issue a plogi for @vport
2085  **/
2086 int
2087 lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
2088 {
2089 	struct lpfc_hba  *phba = vport->phba;
2090 	struct Scsi_Host *shost;
2091 	struct serv_parm *sp;
2092 	struct lpfc_nodelist *ndlp;
2093 	struct lpfc_iocbq *elsiocb;
2094 	uint8_t *pcmd;
2095 	uint16_t cmdsize;
2096 	int ret;
2097 
2098 	ndlp = lpfc_findnode_did(vport, did);
2099 
2100 	if (ndlp) {
2101 		/* Defer the processing of the issue PLOGI until after the
2102 		 * outstanding UNREG_RPI mbox command completes, unless we
2103 		 * are going offline. This logic does not apply for Fabric DIDs
2104 		 */
2105 		if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2106 		    ((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) &&
2107 		    !(vport->fc_flag & FC_OFFLINE_MODE)) {
2108 			lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2109 					 "4110 Issue PLOGI x%x deferred "
2110 					 "on NPort x%x rpi x%x Data: %p\n",
2111 					 ndlp->nlp_defer_did, ndlp->nlp_DID,
2112 					 ndlp->nlp_rpi, ndlp);
2113 
2114 			/* We can only defer 1st PLOGI */
2115 			if (ndlp->nlp_defer_did == NLP_EVT_NOTHING_PENDING)
2116 				ndlp->nlp_defer_did = did;
2117 			return 0;
2118 		}
2119 		if (!NLP_CHK_NODE_ACT(ndlp))
2120 			ndlp = NULL;
2121 	}
2122 
2123 	/* If ndlp is not NULL, we will bump the reference count on it */
2124 	cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
2125 	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did,
2126 				     ELS_CMD_PLOGI);
2127 	if (!elsiocb)
2128 		return 1;
2129 
2130 	shost = lpfc_shost_from_vport(vport);
2131 	spin_lock_irq(shost->host_lock);
2132 	ndlp->nlp_flag &= ~NLP_FCP_PRLI_RJT;
2133 	spin_unlock_irq(shost->host_lock);
2134 
2135 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2136 
2137 	/* For PLOGI request, remainder of payload is service parameters */
2138 	*((uint32_t *) (pcmd)) = ELS_CMD_PLOGI;
2139 	pcmd += sizeof(uint32_t);
2140 	memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
2141 	sp = (struct serv_parm *) pcmd;
2142 
2143 	/*
2144 	 * If we are a N-port connected to a Fabric, fix-up paramm's so logins
2145 	 * to device on remote loops work.
2146 	 */
2147 	if ((vport->fc_flag & FC_FABRIC) && !(vport->fc_flag & FC_PUBLIC_LOOP))
2148 		sp->cmn.altBbCredit = 1;
2149 
2150 	if (sp->cmn.fcphLow < FC_PH_4_3)
2151 		sp->cmn.fcphLow = FC_PH_4_3;
2152 
2153 	if (sp->cmn.fcphHigh < FC_PH3)
2154 		sp->cmn.fcphHigh = FC_PH3;
2155 
2156 	sp->cmn.valid_vendor_ver_level = 0;
2157 	memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion));
2158 	sp->cmn.bbRcvSizeMsb &= 0xF;
2159 
2160 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2161 		"Issue PLOGI:     did:x%x",
2162 		did, 0, 0);
2163 
2164 	/* If our firmware supports this feature, convey that
2165 	 * information to the target using the vendor specific field.
2166 	 */
2167 	if (phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) {
2168 		sp->cmn.valid_vendor_ver_level = 1;
2169 		sp->un.vv.vid = cpu_to_be32(LPFC_VV_EMLX_ID);
2170 		sp->un.vv.flags = cpu_to_be32(LPFC_VV_SUPPRESS_RSP);
2171 	}
2172 
2173 	phba->fc_stat.elsXmitPLOGI++;
2174 	elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi;
2175 	ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
2176 
2177 	if (ret == IOCB_ERROR) {
2178 		lpfc_els_free_iocb(phba, elsiocb);
2179 		return 1;
2180 	}
2181 	return 0;
2182 }
2183 
2184 /**
2185  * lpfc_cmpl_els_prli - Completion callback function for prli
2186  * @phba: pointer to lpfc hba data structure.
2187  * @cmdiocb: pointer to lpfc command iocb data structure.
2188  * @rspiocb: pointer to lpfc response iocb data structure.
2189  *
2190  * This routine is the completion callback function for a Process Login
2191  * (PRLI) ELS command. The PRLI response IOCB status is checked for error
2192  * status. If there is error status reported, PRLI retry shall be attempted
2193  * by invoking the lpfc_els_retry() routine. Otherwise, the state
2194  * NLP_EVT_CMPL_PRLI is sent to the Discover State Machine (DSM) for this
2195  * ndlp to mark the PRLI completion.
2196  **/
2197 static void
2198 lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2199 		   struct lpfc_iocbq *rspiocb)
2200 {
2201 	struct lpfc_vport *vport = cmdiocb->vport;
2202 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
2203 	IOCB_t *irsp;
2204 	struct lpfc_nodelist *ndlp;
2205 
2206 	/* we pass cmdiocb to state machine which needs rspiocb as well */
2207 	cmdiocb->context_un.rsp_iocb = rspiocb;
2208 
2209 	irsp = &(rspiocb->iocb);
2210 	ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
2211 	spin_lock_irq(shost->host_lock);
2212 	ndlp->nlp_flag &= ~NLP_PRLI_SND;
2213 
2214 	/* Driver supports multiple FC4 types.  Counters matter. */
2215 	vport->fc_prli_sent--;
2216 	ndlp->fc4_prli_sent--;
2217 	spin_unlock_irq(shost->host_lock);
2218 
2219 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2220 		"PRLI cmpl:       status:x%x/x%x did:x%x",
2221 		irsp->ulpStatus, irsp->un.ulpWord[4],
2222 		ndlp->nlp_DID);
2223 
2224 	/* PRLI completes to NPort <nlp_DID> */
2225 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2226 			 "0103 PRLI completes to NPort x%06x "
2227 			 "Data: x%x x%x x%x x%x\n",
2228 			 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
2229 			 vport->num_disc_nodes, ndlp->fc4_prli_sent);
2230 
2231 	/* Check to see if link went down during discovery */
2232 	if (lpfc_els_chk_latt(vport))
2233 		goto out;
2234 
2235 	if (irsp->ulpStatus) {
2236 		/* Check for retry */
2237 		if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
2238 			/* ELS command is being retried */
2239 			goto out;
2240 		}
2241 
2242 		/* PRLI failed */
2243 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2244 				 "2754 PRLI failure DID:%06X Status:x%x/x%x, "
2245 				 "data: x%x\n",
2246 				 ndlp->nlp_DID, irsp->ulpStatus,
2247 				 irsp->un.ulpWord[4], ndlp->fc4_prli_sent);
2248 
2249 		/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
2250 		if (lpfc_error_lost_link(irsp))
2251 			goto out;
2252 		else
2253 			lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2254 						NLP_EVT_CMPL_PRLI);
2255 	} else {
2256 		/* Good status, call state machine.  However, if another
2257 		 * PRLI is outstanding, don't call the state machine
2258 		 * because final disposition to Mapped or Unmapped is
2259 		 * completed there.
2260 		 */
2261 		lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2262 					NLP_EVT_CMPL_PRLI);
2263 	}
2264 
2265 out:
2266 	lpfc_els_free_iocb(phba, cmdiocb);
2267 	return;
2268 }
2269 
2270 /**
2271  * lpfc_issue_els_prli - Issue a prli iocb command for a vport
2272  * @vport: pointer to a host virtual N_Port data structure.
2273  * @ndlp: pointer to a node-list data structure.
2274  * @retry: number of retries to the command IOCB.
2275  *
2276  * This routine issues a Process Login (PRLI) ELS command for the
2277  * @vport. The PRLI service parameters are set up in the payload of the
2278  * PRLI Request command and the pointer to lpfc_cmpl_els_prli() routine
2279  * is put to the IOCB completion callback func field before invoking the
2280  * routine lpfc_sli_issue_iocb() to send out PRLI command.
2281  *
2282  * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
2283  * will be incremented by 1 for holding the ndlp and the reference to ndlp
2284  * will be stored into the context1 field of the IOCB for the completion
2285  * callback function to the PRLI ELS command.
2286  *
2287  * Return code
2288  *   0 - successfully issued prli iocb command for @vport
2289  *   1 - failed to issue prli iocb command for @vport
2290  **/
2291 int
2292 lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2293 		    uint8_t retry)
2294 {
2295 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2296 	struct lpfc_hba *phba = vport->phba;
2297 	PRLI *npr;
2298 	struct lpfc_nvme_prli *npr_nvme;
2299 	struct lpfc_iocbq *elsiocb;
2300 	uint8_t *pcmd;
2301 	uint16_t cmdsize;
2302 	u32 local_nlp_type, elscmd;
2303 
2304 	/*
2305 	 * If we are in RSCN mode, the FC4 types supported from a
2306 	 * previous GFT_ID command may not be accurate. So, if we
2307 	 * are a NVME Initiator, always look for the possibility of
2308 	 * the remote NPort beng a NVME Target.
2309 	 */
2310 	if (phba->sli_rev == LPFC_SLI_REV4 &&
2311 	    vport->fc_flag & FC_RSCN_MODE &&
2312 	    vport->nvmei_support)
2313 		ndlp->nlp_fc4_type |= NLP_FC4_NVME;
2314 	local_nlp_type = ndlp->nlp_fc4_type;
2315 
2316 	/* This routine will issue 1 or 2 PRLIs, so zero all the ndlp
2317 	 * fields here before any of them can complete.
2318 	 */
2319 	ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
2320 	ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR);
2321 	ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
2322 	ndlp->nlp_flag &= ~(NLP_FIRSTBURST | NLP_NPR_2B_DISC);
2323 	ndlp->nvme_fb_size = 0;
2324 
2325  send_next_prli:
2326 	if (local_nlp_type & NLP_FC4_FCP) {
2327 		/* Payload is 4 + 16 = 20 x14 bytes. */
2328 		cmdsize = (sizeof(uint32_t) + sizeof(PRLI));
2329 		elscmd = ELS_CMD_PRLI;
2330 	} else if (local_nlp_type & NLP_FC4_NVME) {
2331 		/* Payload is 4 + 20 = 24 x18 bytes. */
2332 		cmdsize = (sizeof(uint32_t) + sizeof(struct lpfc_nvme_prli));
2333 		elscmd = ELS_CMD_NVMEPRLI;
2334 	} else {
2335 		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2336 				 "3083 Unknown FC_TYPE x%x ndlp x%06x\n",
2337 				 ndlp->nlp_fc4_type, ndlp->nlp_DID);
2338 		return 1;
2339 	}
2340 
2341 	/* SLI3 ports don't support NVME.  If this rport is a strict NVME
2342 	 * FC4 type, implicitly LOGO.
2343 	 */
2344 	if (phba->sli_rev == LPFC_SLI_REV3 &&
2345 	    ndlp->nlp_fc4_type == NLP_FC4_NVME) {
2346 		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2347 				 "3088 Rport fc4 type 0x%x not supported by SLI3 adapter\n",
2348 				 ndlp->nlp_type);
2349 		lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
2350 		return 1;
2351 	}
2352 
2353 	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
2354 				     ndlp->nlp_DID, elscmd);
2355 	if (!elsiocb)
2356 		return 1;
2357 
2358 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2359 
2360 	/* For PRLI request, remainder of payload is service parameters */
2361 	memset(pcmd, 0, cmdsize);
2362 
2363 	if (local_nlp_type & NLP_FC4_FCP) {
2364 		/* Remainder of payload is FCP PRLI parameter page.
2365 		 * Note: this data structure is defined as
2366 		 * BE/LE in the structure definition so no
2367 		 * byte swap call is made.
2368 		 */
2369 		*((uint32_t *)(pcmd)) = ELS_CMD_PRLI;
2370 		pcmd += sizeof(uint32_t);
2371 		npr = (PRLI *)pcmd;
2372 
2373 		/*
2374 		 * If our firmware version is 3.20 or later,
2375 		 * set the following bits for FC-TAPE support.
2376 		 */
2377 		if (phba->vpd.rev.feaLevelHigh >= 0x02) {
2378 			npr->ConfmComplAllowed = 1;
2379 			npr->Retry = 1;
2380 			npr->TaskRetryIdReq = 1;
2381 		}
2382 		npr->estabImagePair = 1;
2383 		npr->readXferRdyDis = 1;
2384 		if (vport->cfg_first_burst_size)
2385 			npr->writeXferRdyDis = 1;
2386 
2387 		/* For FCP support */
2388 		npr->prliType = PRLI_FCP_TYPE;
2389 		npr->initiatorFunc = 1;
2390 		elsiocb->iocb_flag |= LPFC_PRLI_FCP_REQ;
2391 
2392 		/* Remove FCP type - processed. */
2393 		local_nlp_type &= ~NLP_FC4_FCP;
2394 	} else if (local_nlp_type & NLP_FC4_NVME) {
2395 		/* Remainder of payload is NVME PRLI parameter page.
2396 		 * This data structure is the newer definition that
2397 		 * uses bf macros so a byte swap is required.
2398 		 */
2399 		*((uint32_t *)(pcmd)) = ELS_CMD_NVMEPRLI;
2400 		pcmd += sizeof(uint32_t);
2401 		npr_nvme = (struct lpfc_nvme_prli *)pcmd;
2402 		bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE);
2403 		bf_set(prli_estabImagePair, npr_nvme, 0);  /* Should be 0 */
2404 
2405 		/* Only initiators request first burst. */
2406 		if ((phba->cfg_nvme_enable_fb) &&
2407 		    !phba->nvmet_support)
2408 			bf_set(prli_fba, npr_nvme, 1);
2409 
2410 		if (phba->nvmet_support) {
2411 			bf_set(prli_tgt, npr_nvme, 1);
2412 			bf_set(prli_disc, npr_nvme, 1);
2413 		} else {
2414 			bf_set(prli_init, npr_nvme, 1);
2415 			bf_set(prli_conf, npr_nvme, 1);
2416 		}
2417 
2418 		npr_nvme->word1 = cpu_to_be32(npr_nvme->word1);
2419 		npr_nvme->word4 = cpu_to_be32(npr_nvme->word4);
2420 		elsiocb->iocb_flag |= LPFC_PRLI_NVME_REQ;
2421 
2422 		/* Remove NVME type - processed. */
2423 		local_nlp_type &= ~NLP_FC4_NVME;
2424 	}
2425 
2426 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2427 		"Issue PRLI:      did:x%x",
2428 		ndlp->nlp_DID, 0, 0);
2429 
2430 	phba->fc_stat.elsXmitPRLI++;
2431 	elsiocb->iocb_cmpl = lpfc_cmpl_els_prli;
2432 	spin_lock_irq(shost->host_lock);
2433 	ndlp->nlp_flag |= NLP_PRLI_SND;
2434 
2435 	/* The vport counters are used for lpfc_scan_finished, but
2436 	 * the ndlp is used to track outstanding PRLIs for different
2437 	 * FC4 types.
2438 	 */
2439 	vport->fc_prli_sent++;
2440 	ndlp->fc4_prli_sent++;
2441 	spin_unlock_irq(shost->host_lock);
2442 	if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
2443 	    IOCB_ERROR) {
2444 		spin_lock_irq(shost->host_lock);
2445 		ndlp->nlp_flag &= ~NLP_PRLI_SND;
2446 		spin_unlock_irq(shost->host_lock);
2447 		lpfc_els_free_iocb(phba, elsiocb);
2448 		return 1;
2449 	}
2450 
2451 
2452 	/* The driver supports 2 FC4 types.  Make sure
2453 	 * a PRLI is issued for all types before exiting.
2454 	 */
2455 	if (phba->sli_rev == LPFC_SLI_REV4 &&
2456 	    local_nlp_type & (NLP_FC4_FCP | NLP_FC4_NVME))
2457 		goto send_next_prli;
2458 
2459 	return 0;
2460 }
2461 
2462 /**
2463  * lpfc_rscn_disc - Perform rscn discovery for a vport
2464  * @vport: pointer to a host virtual N_Port data structure.
2465  *
2466  * This routine performs Registration State Change Notification (RSCN)
2467  * discovery for a @vport. If the @vport's node port recovery count is not
2468  * zero, it will invoke the lpfc_els_disc_plogi() to perform PLOGI for all
2469  * the nodes that need recovery. If none of the PLOGI were needed through
2470  * the lpfc_els_disc_plogi() routine, the lpfc_end_rscn() routine shall be
2471  * invoked to check and handle possible more RSCN came in during the period
2472  * of processing the current ones.
2473  **/
2474 static void
2475 lpfc_rscn_disc(struct lpfc_vport *vport)
2476 {
2477 	lpfc_can_disctmo(vport);
2478 
2479 	/* RSCN discovery */
2480 	/* go thru NPR nodes and issue ELS PLOGIs */
2481 	if (vport->fc_npr_cnt)
2482 		if (lpfc_els_disc_plogi(vport))
2483 			return;
2484 
2485 	lpfc_end_rscn(vport);
2486 }
2487 
2488 /**
2489  * lpfc_adisc_done - Complete the adisc phase of discovery
2490  * @vport: pointer to lpfc_vport hba data structure that finished all ADISCs.
2491  *
2492  * This function is called when the final ADISC is completed during discovery.
2493  * This function handles clearing link attention or issuing reg_vpi depending
2494  * on whether npiv is enabled. This function also kicks off the PLOGI phase of
2495  * discovery.
2496  * This function is called with no locks held.
2497  **/
2498 static void
2499 lpfc_adisc_done(struct lpfc_vport *vport)
2500 {
2501 	struct Scsi_Host   *shost = lpfc_shost_from_vport(vport);
2502 	struct lpfc_hba   *phba = vport->phba;
2503 
2504 	/*
2505 	 * For NPIV, cmpl_reg_vpi will set port_state to READY,
2506 	 * and continue discovery.
2507 	 */
2508 	if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
2509 	    !(vport->fc_flag & FC_RSCN_MODE) &&
2510 	    (phba->sli_rev < LPFC_SLI_REV4)) {
2511 		/* The ADISCs are complete.  Doesn't matter if they
2512 		 * succeeded or failed because the ADISC completion
2513 		 * routine guarantees to call the state machine and
2514 		 * the RPI is either unregistered (failed ADISC response)
2515 		 * or the RPI is still valid and the node is marked
2516 		 * mapped for a target.  The exchanges should be in the
2517 		 * correct state. This code is specific to SLI3.
2518 		 */
2519 		lpfc_issue_clear_la(phba, vport);
2520 		lpfc_issue_reg_vpi(phba, vport);
2521 		return;
2522 	}
2523 	/*
2524 	* For SLI2, we need to set port_state to READY
2525 	* and continue discovery.
2526 	*/
2527 	if (vport->port_state < LPFC_VPORT_READY) {
2528 		/* If we get here, there is nothing to ADISC */
2529 		lpfc_issue_clear_la(phba, vport);
2530 		if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
2531 			vport->num_disc_nodes = 0;
2532 			/* go thru NPR list, issue ELS PLOGIs */
2533 			if (vport->fc_npr_cnt)
2534 				lpfc_els_disc_plogi(vport);
2535 			if (!vport->num_disc_nodes) {
2536 				spin_lock_irq(shost->host_lock);
2537 				vport->fc_flag &= ~FC_NDISC_ACTIVE;
2538 				spin_unlock_irq(shost->host_lock);
2539 				lpfc_can_disctmo(vport);
2540 				lpfc_end_rscn(vport);
2541 			}
2542 		}
2543 		vport->port_state = LPFC_VPORT_READY;
2544 	} else
2545 		lpfc_rscn_disc(vport);
2546 }
2547 
2548 /**
2549  * lpfc_more_adisc - Issue more adisc as needed
2550  * @vport: pointer to a host virtual N_Port data structure.
2551  *
2552  * This routine determines whether there are more ndlps on a @vport
2553  * node list need to have Address Discover (ADISC) issued. If so, it will
2554  * invoke the lpfc_els_disc_adisc() routine to issue ADISC on the @vport's
2555  * remaining nodes which need to have ADISC sent.
2556  **/
2557 void
2558 lpfc_more_adisc(struct lpfc_vport *vport)
2559 {
2560 	if (vport->num_disc_nodes)
2561 		vport->num_disc_nodes--;
2562 	/* Continue discovery with <num_disc_nodes> ADISCs to go */
2563 	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2564 			 "0210 Continue discovery with %d ADISCs to go "
2565 			 "Data: x%x x%x x%x\n",
2566 			 vport->num_disc_nodes, vport->fc_adisc_cnt,
2567 			 vport->fc_flag, vport->port_state);
2568 	/* Check to see if there are more ADISCs to be sent */
2569 	if (vport->fc_flag & FC_NLP_MORE) {
2570 		lpfc_set_disctmo(vport);
2571 		/* go thru NPR nodes and issue any remaining ELS ADISCs */
2572 		lpfc_els_disc_adisc(vport);
2573 	}
2574 	if (!vport->num_disc_nodes)
2575 		lpfc_adisc_done(vport);
2576 	return;
2577 }
2578 
2579 /**
2580  * lpfc_cmpl_els_adisc - Completion callback function for adisc
2581  * @phba: pointer to lpfc hba data structure.
2582  * @cmdiocb: pointer to lpfc command iocb data structure.
2583  * @rspiocb: pointer to lpfc response iocb data structure.
2584  *
2585  * This routine is the completion function for issuing the Address Discover
2586  * (ADISC) command. It first checks to see whether link went down during
2587  * the discovery process. If so, the node will be marked as node port
2588  * recovery for issuing discover IOCB by the link attention handler and
2589  * exit. Otherwise, the response status is checked. If error was reported
2590  * in the response status, the ADISC command shall be retried by invoking
2591  * the lpfc_els_retry() routine. Otherwise, if no error was reported in
2592  * the response status, the state machine is invoked to set transition
2593  * with respect to NLP_EVT_CMPL_ADISC event.
2594  **/
2595 static void
2596 lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2597 		    struct lpfc_iocbq *rspiocb)
2598 {
2599 	struct lpfc_vport *vport = cmdiocb->vport;
2600 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
2601 	IOCB_t *irsp;
2602 	struct lpfc_nodelist *ndlp;
2603 	int  disc;
2604 
2605 	/* we pass cmdiocb to state machine which needs rspiocb as well */
2606 	cmdiocb->context_un.rsp_iocb = rspiocb;
2607 
2608 	irsp = &(rspiocb->iocb);
2609 	ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
2610 
2611 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2612 		"ADISC cmpl:      status:x%x/x%x did:x%x",
2613 		irsp->ulpStatus, irsp->un.ulpWord[4],
2614 		ndlp->nlp_DID);
2615 
2616 	/* Since ndlp can be freed in the disc state machine, note if this node
2617 	 * is being used during discovery.
2618 	 */
2619 	spin_lock_irq(shost->host_lock);
2620 	disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
2621 	ndlp->nlp_flag &= ~(NLP_ADISC_SND | NLP_NPR_2B_DISC);
2622 	spin_unlock_irq(shost->host_lock);
2623 	/* ADISC completes to NPort <nlp_DID> */
2624 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2625 			 "0104 ADISC completes to NPort x%x "
2626 			 "Data: x%x x%x x%x x%x x%x\n",
2627 			 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
2628 			 irsp->ulpTimeout, disc, vport->num_disc_nodes);
2629 	/* Check to see if link went down during discovery */
2630 	if (lpfc_els_chk_latt(vport)) {
2631 		spin_lock_irq(shost->host_lock);
2632 		ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2633 		spin_unlock_irq(shost->host_lock);
2634 		goto out;
2635 	}
2636 
2637 	if (irsp->ulpStatus) {
2638 		/* Check for retry */
2639 		if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
2640 			/* ELS command is being retried */
2641 			if (disc) {
2642 				spin_lock_irq(shost->host_lock);
2643 				ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2644 				spin_unlock_irq(shost->host_lock);
2645 				lpfc_set_disctmo(vport);
2646 			}
2647 			goto out;
2648 		}
2649 		/* ADISC failed */
2650 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2651 				 "2755 ADISC failure DID:%06X Status:x%x/x%x\n",
2652 				 ndlp->nlp_DID, irsp->ulpStatus,
2653 				 irsp->un.ulpWord[4]);
2654 		/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
2655 		if (!lpfc_error_lost_link(irsp))
2656 			lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2657 						NLP_EVT_CMPL_ADISC);
2658 	} else
2659 		/* Good status, call state machine */
2660 		lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2661 					NLP_EVT_CMPL_ADISC);
2662 
2663 	/* Check to see if there are more ADISCs to be sent */
2664 	if (disc && vport->num_disc_nodes)
2665 		lpfc_more_adisc(vport);
2666 out:
2667 	lpfc_els_free_iocb(phba, cmdiocb);
2668 	return;
2669 }
2670 
2671 /**
2672  * lpfc_issue_els_adisc - Issue an address discover iocb to an node on a vport
2673  * @vport: pointer to a virtual N_Port data structure.
2674  * @ndlp: pointer to a node-list data structure.
2675  * @retry: number of retries to the command IOCB.
2676  *
2677  * This routine issues an Address Discover (ADISC) for an @ndlp on a
2678  * @vport. It prepares the payload of the ADISC ELS command, updates the
2679  * and states of the ndlp, and invokes the lpfc_sli_issue_iocb() routine
2680  * to issue the ADISC ELS command.
2681  *
2682  * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
2683  * will be incremented by 1 for holding the ndlp and the reference to ndlp
2684  * will be stored into the context1 field of the IOCB for the completion
2685  * callback function to the ADISC ELS command.
2686  *
2687  * Return code
2688  *   0 - successfully issued adisc
2689  *   1 - failed to issue adisc
2690  **/
2691 int
2692 lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2693 		     uint8_t retry)
2694 {
2695 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2696 	struct lpfc_hba  *phba = vport->phba;
2697 	ADISC *ap;
2698 	struct lpfc_iocbq *elsiocb;
2699 	uint8_t *pcmd;
2700 	uint16_t cmdsize;
2701 
2702 	cmdsize = (sizeof(uint32_t) + sizeof(ADISC));
2703 	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
2704 				     ndlp->nlp_DID, ELS_CMD_ADISC);
2705 	if (!elsiocb)
2706 		return 1;
2707 
2708 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2709 
2710 	/* For ADISC request, remainder of payload is service parameters */
2711 	*((uint32_t *) (pcmd)) = ELS_CMD_ADISC;
2712 	pcmd += sizeof(uint32_t);
2713 
2714 	/* Fill in ADISC payload */
2715 	ap = (ADISC *) pcmd;
2716 	ap->hardAL_PA = phba->fc_pref_ALPA;
2717 	memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name));
2718 	memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
2719 	ap->DID = be32_to_cpu(vport->fc_myDID);
2720 
2721 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2722 		"Issue ADISC:     did:x%x",
2723 		ndlp->nlp_DID, 0, 0);
2724 
2725 	phba->fc_stat.elsXmitADISC++;
2726 	elsiocb->iocb_cmpl = lpfc_cmpl_els_adisc;
2727 	spin_lock_irq(shost->host_lock);
2728 	ndlp->nlp_flag |= NLP_ADISC_SND;
2729 	spin_unlock_irq(shost->host_lock);
2730 	if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
2731 	    IOCB_ERROR) {
2732 		spin_lock_irq(shost->host_lock);
2733 		ndlp->nlp_flag &= ~NLP_ADISC_SND;
2734 		spin_unlock_irq(shost->host_lock);
2735 		lpfc_els_free_iocb(phba, elsiocb);
2736 		return 1;
2737 	}
2738 	return 0;
2739 }
2740 
2741 /**
2742  * lpfc_cmpl_els_logo - Completion callback function for logo
2743  * @phba: pointer to lpfc hba data structure.
2744  * @cmdiocb: pointer to lpfc command iocb data structure.
2745  * @rspiocb: pointer to lpfc response iocb data structure.
2746  *
2747  * This routine is the completion function for issuing the ELS Logout (LOGO)
2748  * command. If no error status was reported from the LOGO response, the
2749  * state machine of the associated ndlp shall be invoked for transition with
2750  * respect to NLP_EVT_CMPL_LOGO event. Otherwise, if error status was reported,
2751  * the lpfc_els_retry() routine will be invoked to retry the LOGO command.
2752  **/
2753 static void
2754 lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2755 		   struct lpfc_iocbq *rspiocb)
2756 {
2757 	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
2758 	struct lpfc_vport *vport = ndlp->vport;
2759 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
2760 	IOCB_t *irsp;
2761 	struct lpfcMboxq *mbox;
2762 	unsigned long flags;
2763 	uint32_t skip_recovery = 0;
2764 
2765 	/* we pass cmdiocb to state machine which needs rspiocb as well */
2766 	cmdiocb->context_un.rsp_iocb = rspiocb;
2767 
2768 	irsp = &(rspiocb->iocb);
2769 	spin_lock_irq(shost->host_lock);
2770 	ndlp->nlp_flag &= ~NLP_LOGO_SND;
2771 	spin_unlock_irq(shost->host_lock);
2772 
2773 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2774 		"LOGO cmpl:       status:x%x/x%x did:x%x",
2775 		irsp->ulpStatus, irsp->un.ulpWord[4],
2776 		ndlp->nlp_DID);
2777 
2778 	/* LOGO completes to NPort <nlp_DID> */
2779 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2780 			 "0105 LOGO completes to NPort x%x "
2781 			 "Data: x%x x%x x%x x%x\n",
2782 			 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
2783 			 irsp->ulpTimeout, vport->num_disc_nodes);
2784 
2785 	if (lpfc_els_chk_latt(vport)) {
2786 		skip_recovery = 1;
2787 		goto out;
2788 	}
2789 
2790 	/* Check to see if link went down during discovery */
2791 	if (ndlp->nlp_flag & NLP_TARGET_REMOVE) {
2792 	        /* NLP_EVT_DEVICE_RM should unregister the RPI
2793 		 * which should abort all outstanding IOs.
2794 		 */
2795 		lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2796 					NLP_EVT_DEVICE_RM);
2797 		skip_recovery = 1;
2798 		goto out;
2799 	}
2800 
2801 	/* The LOGO will not be retried on failure.  A LOGO was
2802 	 * issued to the remote rport and a ACC or RJT or no Answer are
2803 	 * all acceptable.  Note the failure and move forward with
2804 	 * discovery.  The PLOGI will retry.
2805 	 */
2806 	if (irsp->ulpStatus) {
2807 		/* LOGO failed */
2808 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2809 				 "2756 LOGO failure, No Retry DID:%06X Status:x%x/x%x\n",
2810 				 ndlp->nlp_DID, irsp->ulpStatus,
2811 				 irsp->un.ulpWord[4]);
2812 		/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
2813 		if (lpfc_error_lost_link(irsp)) {
2814 			skip_recovery = 1;
2815 			goto out;
2816 		}
2817 	}
2818 
2819 	/* Call state machine. This will unregister the rpi if needed. */
2820 	lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_CMPL_LOGO);
2821 
2822 out:
2823 	lpfc_els_free_iocb(phba, cmdiocb);
2824 	/* If we are in pt2pt mode, we could rcv new S_ID on PLOGI */
2825 	if ((vport->fc_flag & FC_PT2PT) &&
2826 		!(vport->fc_flag & FC_PT2PT_PLOGI)) {
2827 		phba->pport->fc_myDID = 0;
2828 
2829 		if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
2830 		    (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
2831 			if (phba->nvmet_support)
2832 				lpfc_nvmet_update_targetport(phba);
2833 			else
2834 				lpfc_nvme_update_localport(phba->pport);
2835 		}
2836 
2837 		mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2838 		if (mbox) {
2839 			lpfc_config_link(phba, mbox);
2840 			mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2841 			mbox->vport = vport;
2842 			if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) ==
2843 				MBX_NOT_FINISHED) {
2844 				mempool_free(mbox, phba->mbox_mem_pool);
2845 				skip_recovery = 1;
2846 			}
2847 		}
2848 	}
2849 
2850 	/*
2851 	 * If the node is a target, the handling attempts to recover the port.
2852 	 * For any other port type, the rpi is unregistered as an implicit
2853 	 * LOGO.
2854 	 */
2855 	if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET) &&
2856 	    skip_recovery == 0) {
2857 		lpfc_cancel_retry_delay_tmo(vport, ndlp);
2858 		spin_lock_irqsave(shost->host_lock, flags);
2859 		ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2860 		spin_unlock_irqrestore(shost->host_lock, flags);
2861 
2862 		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2863 				 "3187 LOGO completes to NPort x%x: Start "
2864 				 "Recovery Data: x%x x%x x%x x%x\n",
2865 				 ndlp->nlp_DID, irsp->ulpStatus,
2866 				 irsp->un.ulpWord[4], irsp->ulpTimeout,
2867 				 vport->num_disc_nodes);
2868 		lpfc_disc_start(vport);
2869 	}
2870 	return;
2871 }
2872 
2873 /**
2874  * lpfc_issue_els_logo - Issue a logo to an node on a vport
2875  * @vport: pointer to a virtual N_Port data structure.
2876  * @ndlp: pointer to a node-list data structure.
2877  * @retry: number of retries to the command IOCB.
2878  *
2879  * This routine constructs and issues an ELS Logout (LOGO) iocb command
2880  * to a remote node, referred by an @ndlp on a @vport. It constructs the
2881  * payload of the IOCB, properly sets up the @ndlp state, and invokes the
2882  * lpfc_sli_issue_iocb() routine to send out the LOGO ELS command.
2883  *
2884  * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
2885  * will be incremented by 1 for holding the ndlp and the reference to ndlp
2886  * will be stored into the context1 field of the IOCB for the completion
2887  * callback function to the LOGO ELS command.
2888  *
2889  * Callers of this routine are expected to unregister the RPI first
2890  *
2891  * Return code
2892  *   0 - successfully issued logo
2893  *   1 - failed to issue logo
2894  **/
2895 int
2896 lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2897 		    uint8_t retry)
2898 {
2899 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2900 	struct lpfc_hba  *phba = vport->phba;
2901 	struct lpfc_iocbq *elsiocb;
2902 	uint8_t *pcmd;
2903 	uint16_t cmdsize;
2904 	int rc;
2905 
2906 	spin_lock_irq(shost->host_lock);
2907 	if (ndlp->nlp_flag & NLP_LOGO_SND) {
2908 		spin_unlock_irq(shost->host_lock);
2909 		return 0;
2910 	}
2911 	spin_unlock_irq(shost->host_lock);
2912 
2913 	cmdsize = (2 * sizeof(uint32_t)) + sizeof(struct lpfc_name);
2914 	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
2915 				     ndlp->nlp_DID, ELS_CMD_LOGO);
2916 	if (!elsiocb)
2917 		return 1;
2918 
2919 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2920 	*((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
2921 	pcmd += sizeof(uint32_t);
2922 
2923 	/* Fill in LOGO payload */
2924 	*((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID);
2925 	pcmd += sizeof(uint32_t);
2926 	memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name));
2927 
2928 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2929 		"Issue LOGO:      did:x%x",
2930 		ndlp->nlp_DID, 0, 0);
2931 
2932 	phba->fc_stat.elsXmitLOGO++;
2933 	elsiocb->iocb_cmpl = lpfc_cmpl_els_logo;
2934 	spin_lock_irq(shost->host_lock);
2935 	ndlp->nlp_flag |= NLP_LOGO_SND;
2936 	ndlp->nlp_flag &= ~NLP_ISSUE_LOGO;
2937 	spin_unlock_irq(shost->host_lock);
2938 	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
2939 	if (rc == IOCB_ERROR) {
2940 		spin_lock_irq(shost->host_lock);
2941 		ndlp->nlp_flag &= ~NLP_LOGO_SND;
2942 		spin_unlock_irq(shost->host_lock);
2943 		lpfc_els_free_iocb(phba, elsiocb);
2944 		return 1;
2945 	}
2946 
2947 	spin_lock_irq(shost->host_lock);
2948 	ndlp->nlp_prev_state = ndlp->nlp_state;
2949 	spin_unlock_irq(shost->host_lock);
2950 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE);
2951 	return 0;
2952 }
2953 
2954 /**
2955  * lpfc_cmpl_els_cmd - Completion callback function for generic els command
2956  * @phba: pointer to lpfc hba data structure.
2957  * @cmdiocb: pointer to lpfc command iocb data structure.
2958  * @rspiocb: pointer to lpfc response iocb data structure.
2959  *
2960  * This routine is a generic completion callback function for ELS commands.
2961  * Specifically, it is the callback function which does not need to perform
2962  * any command specific operations. It is currently used by the ELS command
2963  * issuing routines for the ELS State Change  Request (SCR),
2964  * lpfc_issue_els_scr(), and the ELS Fibre Channel Address Resolution
2965  * Protocol Response (FARPR) routine, lpfc_issue_els_farpr(). Other than
2966  * certain debug loggings, this callback function simply invokes the
2967  * lpfc_els_chk_latt() routine to check whether link went down during the
2968  * discovery process.
2969  **/
2970 static void
2971 lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2972 		  struct lpfc_iocbq *rspiocb)
2973 {
2974 	struct lpfc_vport *vport = cmdiocb->vport;
2975 	IOCB_t *irsp;
2976 
2977 	irsp = &rspiocb->iocb;
2978 
2979 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2980 		"ELS cmd cmpl:    status:x%x/x%x did:x%x",
2981 		irsp->ulpStatus, irsp->un.ulpWord[4],
2982 		irsp->un.elsreq64.remoteID);
2983 	/* ELS cmd tag <ulpIoTag> completes */
2984 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2985 			 "0106 ELS cmd tag x%x completes Data: x%x x%x x%x\n",
2986 			 irsp->ulpIoTag, irsp->ulpStatus,
2987 			 irsp->un.ulpWord[4], irsp->ulpTimeout);
2988 	/* Check to see if link went down during discovery */
2989 	lpfc_els_chk_latt(vport);
2990 	lpfc_els_free_iocb(phba, cmdiocb);
2991 	return;
2992 }
2993 
2994 /**
2995  * lpfc_issue_els_scr - Issue a scr to an node on a vport
2996  * @vport: pointer to a host virtual N_Port data structure.
2997  * @nportid: N_Port identifier to the remote node.
2998  * @retry: number of retries to the command IOCB.
2999  *
3000  * This routine issues a State Change Request (SCR) to a fabric node
3001  * on a @vport. The remote node @nportid is passed into the function. It
3002  * first search the @vport node list to find the matching ndlp. If no such
3003  * ndlp is found, a new ndlp shall be created for this (SCR) purpose. An
3004  * IOCB is allocated, payload prepared, and the lpfc_sli_issue_iocb()
3005  * routine is invoked to send the SCR IOCB.
3006  *
3007  * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
3008  * will be incremented by 1 for holding the ndlp and the reference to ndlp
3009  * will be stored into the context1 field of the IOCB for the completion
3010  * callback function to the SCR ELS command.
3011  *
3012  * Return code
3013  *   0 - Successfully issued scr command
3014  *   1 - Failed to issue scr command
3015  **/
3016 int
3017 lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
3018 {
3019 	struct lpfc_hba  *phba = vport->phba;
3020 	struct lpfc_iocbq *elsiocb;
3021 	uint8_t *pcmd;
3022 	uint16_t cmdsize;
3023 	struct lpfc_nodelist *ndlp;
3024 
3025 	cmdsize = (sizeof(uint32_t) + sizeof(SCR));
3026 
3027 	ndlp = lpfc_findnode_did(vport, nportid);
3028 	if (!ndlp) {
3029 		ndlp = lpfc_nlp_init(vport, nportid);
3030 		if (!ndlp)
3031 			return 1;
3032 		lpfc_enqueue_node(vport, ndlp);
3033 	} else if (!NLP_CHK_NODE_ACT(ndlp)) {
3034 		ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
3035 		if (!ndlp)
3036 			return 1;
3037 	}
3038 
3039 	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
3040 				     ndlp->nlp_DID, ELS_CMD_SCR);
3041 
3042 	if (!elsiocb) {
3043 		/* This will trigger the release of the node just
3044 		 * allocated
3045 		 */
3046 		lpfc_nlp_put(ndlp);
3047 		return 1;
3048 	}
3049 
3050 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3051 
3052 	*((uint32_t *) (pcmd)) = ELS_CMD_SCR;
3053 	pcmd += sizeof(uint32_t);
3054 
3055 	/* For SCR, remainder of payload is SCR parameter page */
3056 	memset(pcmd, 0, sizeof(SCR));
3057 	((SCR *) pcmd)->Function = SCR_FUNC_FULL;
3058 
3059 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
3060 		"Issue SCR:       did:x%x",
3061 		ndlp->nlp_DID, 0, 0);
3062 
3063 	phba->fc_stat.elsXmitSCR++;
3064 	elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
3065 	if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
3066 	    IOCB_ERROR) {
3067 		/* The additional lpfc_nlp_put will cause the following
3068 		 * lpfc_els_free_iocb routine to trigger the rlease of
3069 		 * the node.
3070 		 */
3071 		lpfc_nlp_put(ndlp);
3072 		lpfc_els_free_iocb(phba, elsiocb);
3073 		return 1;
3074 	}
3075 	/* This will cause the callback-function lpfc_cmpl_els_cmd to
3076 	 * trigger the release of node.
3077 	 */
3078 	if (!(vport->fc_flag & FC_PT2PT))
3079 		lpfc_nlp_put(ndlp);
3080 	return 0;
3081 }
3082 
3083 /**
3084  * lpfc_issue_els_rscn - Issue an RSCN to the Fabric Controller (Fabric)
3085  *   or the other nport (pt2pt).
3086  * @vport: pointer to a host virtual N_Port data structure.
3087  * @retry: number of retries to the command IOCB.
3088  *
3089  * This routine issues a RSCN to the Fabric Controller (DID 0xFFFFFD)
3090  *  when connected to a fabric, or to the remote port when connected
3091  *  in point-to-point mode. When sent to the Fabric Controller, it will
3092  *  replay the RSCN to registered recipients.
3093  *
3094  * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
3095  * will be incremented by 1 for holding the ndlp and the reference to ndlp
3096  * will be stored into the context1 field of the IOCB for the completion
3097  * callback function to the RSCN ELS command.
3098  *
3099  * Return code
3100  *   0 - Successfully issued RSCN command
3101  *   1 - Failed to issue RSCN command
3102  **/
3103 int
3104 lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry)
3105 {
3106 	struct lpfc_hba *phba = vport->phba;
3107 	struct lpfc_iocbq *elsiocb;
3108 	struct lpfc_nodelist *ndlp;
3109 	struct {
3110 		struct fc_els_rscn rscn;
3111 		struct fc_els_rscn_page portid;
3112 	} *event;
3113 	uint32_t nportid;
3114 	uint16_t cmdsize = sizeof(*event);
3115 
3116 	/* Not supported for private loop */
3117 	if (phba->fc_topology == LPFC_TOPOLOGY_LOOP &&
3118 	    !(vport->fc_flag & FC_PUBLIC_LOOP))
3119 		return 1;
3120 
3121 	if (vport->fc_flag & FC_PT2PT) {
3122 		/* find any mapped nport - that would be the other nport */
3123 		ndlp = lpfc_findnode_mapped(vport);
3124 		if (!ndlp)
3125 			return 1;
3126 	} else {
3127 		nportid = FC_FID_FCTRL;
3128 		/* find the fabric controller node */
3129 		ndlp = lpfc_findnode_did(vport, nportid);
3130 		if (!ndlp) {
3131 			/* if one didn't exist, make one */
3132 			ndlp = lpfc_nlp_init(vport, nportid);
3133 			if (!ndlp)
3134 				return 1;
3135 			lpfc_enqueue_node(vport, ndlp);
3136 		} else if (!NLP_CHK_NODE_ACT(ndlp)) {
3137 			ndlp = lpfc_enable_node(vport, ndlp,
3138 						NLP_STE_UNUSED_NODE);
3139 			if (!ndlp)
3140 				return 1;
3141 		}
3142 	}
3143 
3144 	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
3145 				     ndlp->nlp_DID, ELS_CMD_RSCN_XMT);
3146 
3147 	if (!elsiocb) {
3148 		/* This will trigger the release of the node just
3149 		 * allocated
3150 		 */
3151 		lpfc_nlp_put(ndlp);
3152 		return 1;
3153 	}
3154 
3155 	event = ((struct lpfc_dmabuf *)elsiocb->context2)->virt;
3156 
3157 	event->rscn.rscn_cmd = ELS_RSCN;
3158 	event->rscn.rscn_page_len = sizeof(struct fc_els_rscn_page);
3159 	event->rscn.rscn_plen = cpu_to_be16(cmdsize);
3160 
3161 	nportid = vport->fc_myDID;
3162 	/* appears that page flags must be 0 for fabric to broadcast RSCN */
3163 	event->portid.rscn_page_flags = 0;
3164 	event->portid.rscn_fid[0] = (nportid & 0x00FF0000) >> 16;
3165 	event->portid.rscn_fid[1] = (nportid & 0x0000FF00) >> 8;
3166 	event->portid.rscn_fid[2] = nportid & 0x000000FF;
3167 
3168 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
3169 			      "Issue RSCN:       did:x%x",
3170 			      ndlp->nlp_DID, 0, 0);
3171 
3172 	phba->fc_stat.elsXmitRSCN++;
3173 	elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
3174 	if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
3175 	    IOCB_ERROR) {
3176 		/* The additional lpfc_nlp_put will cause the following
3177 		 * lpfc_els_free_iocb routine to trigger the rlease of
3178 		 * the node.
3179 		 */
3180 		lpfc_nlp_put(ndlp);
3181 		lpfc_els_free_iocb(phba, elsiocb);
3182 		return 1;
3183 	}
3184 	/* This will cause the callback-function lpfc_cmpl_els_cmd to
3185 	 * trigger the release of node.
3186 	 */
3187 	if (!(vport->fc_flag & FC_PT2PT))
3188 		lpfc_nlp_put(ndlp);
3189 
3190 	return 0;
3191 }
3192 
3193 /**
3194  * lpfc_issue_els_farpr - Issue a farp to an node on a vport
3195  * @vport: pointer to a host virtual N_Port data structure.
3196  * @nportid: N_Port identifier to the remote node.
3197  * @retry: number of retries to the command IOCB.
3198  *
3199  * This routine issues a Fibre Channel Address Resolution Response
3200  * (FARPR) to a node on a vport. The remote node N_Port identifier (@nportid)
3201  * is passed into the function. It first search the @vport node list to find
3202  * the matching ndlp. If no such ndlp is found, a new ndlp shall be created
3203  * for this (FARPR) purpose. An IOCB is allocated, payload prepared, and the
3204  * lpfc_sli_issue_iocb() routine is invoked to send the FARPR ELS command.
3205  *
3206  * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
3207  * will be incremented by 1 for holding the ndlp and the reference to ndlp
3208  * will be stored into the context1 field of the IOCB for the completion
3209  * callback function to the PARPR ELS command.
3210  *
3211  * Return code
3212  *   0 - Successfully issued farpr command
3213  *   1 - Failed to issue farpr command
3214  **/
3215 static int
3216 lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
3217 {
3218 	struct lpfc_hba  *phba = vport->phba;
3219 	struct lpfc_iocbq *elsiocb;
3220 	FARP *fp;
3221 	uint8_t *pcmd;
3222 	uint32_t *lp;
3223 	uint16_t cmdsize;
3224 	struct lpfc_nodelist *ondlp;
3225 	struct lpfc_nodelist *ndlp;
3226 
3227 	cmdsize = (sizeof(uint32_t) + sizeof(FARP));
3228 
3229 	ndlp = lpfc_findnode_did(vport, nportid);
3230 	if (!ndlp) {
3231 		ndlp = lpfc_nlp_init(vport, nportid);
3232 		if (!ndlp)
3233 			return 1;
3234 		lpfc_enqueue_node(vport, ndlp);
3235 	} else if (!NLP_CHK_NODE_ACT(ndlp)) {
3236 		ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
3237 		if (!ndlp)
3238 			return 1;
3239 	}
3240 
3241 	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
3242 				     ndlp->nlp_DID, ELS_CMD_RNID);
3243 	if (!elsiocb) {
3244 		/* This will trigger the release of the node just
3245 		 * allocated
3246 		 */
3247 		lpfc_nlp_put(ndlp);
3248 		return 1;
3249 	}
3250 
3251 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3252 
3253 	*((uint32_t *) (pcmd)) = ELS_CMD_FARPR;
3254 	pcmd += sizeof(uint32_t);
3255 
3256 	/* Fill in FARPR payload */
3257 	fp = (FARP *) (pcmd);
3258 	memset(fp, 0, sizeof(FARP));
3259 	lp = (uint32_t *) pcmd;
3260 	*lp++ = be32_to_cpu(nportid);
3261 	*lp++ = be32_to_cpu(vport->fc_myDID);
3262 	fp->Rflags = 0;
3263 	fp->Mflags = (FARP_MATCH_PORT | FARP_MATCH_NODE);
3264 
3265 	memcpy(&fp->RportName, &vport->fc_portname, sizeof(struct lpfc_name));
3266 	memcpy(&fp->RnodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
3267 	ondlp = lpfc_findnode_did(vport, nportid);
3268 	if (ondlp && NLP_CHK_NODE_ACT(ondlp)) {
3269 		memcpy(&fp->OportName, &ondlp->nlp_portname,
3270 		       sizeof(struct lpfc_name));
3271 		memcpy(&fp->OnodeName, &ondlp->nlp_nodename,
3272 		       sizeof(struct lpfc_name));
3273 	}
3274 
3275 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
3276 		"Issue FARPR:     did:x%x",
3277 		ndlp->nlp_DID, 0, 0);
3278 
3279 	phba->fc_stat.elsXmitFARPR++;
3280 	elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
3281 	if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
3282 	    IOCB_ERROR) {
3283 		/* The additional lpfc_nlp_put will cause the following
3284 		 * lpfc_els_free_iocb routine to trigger the release of
3285 		 * the node.
3286 		 */
3287 		lpfc_nlp_put(ndlp);
3288 		lpfc_els_free_iocb(phba, elsiocb);
3289 		return 1;
3290 	}
3291 	/* This will cause the callback-function lpfc_cmpl_els_cmd to
3292 	 * trigger the release of the node.
3293 	 */
3294 	lpfc_nlp_put(ndlp);
3295 	return 0;
3296 }
3297 
3298 /**
3299  * lpfc_cancel_retry_delay_tmo - Cancel the timer with delayed iocb-cmd retry
3300  * @vport: pointer to a host virtual N_Port data structure.
3301  * @nlp: pointer to a node-list data structure.
3302  *
3303  * This routine cancels the timer with a delayed IOCB-command retry for
3304  * a @vport's @ndlp. It stops the timer for the delayed function retrial and
3305  * removes the ELS retry event if it presents. In addition, if the
3306  * NLP_NPR_2B_DISC bit is set in the @nlp's nlp_flag bitmap, ADISC IOCB
3307  * commands are sent for the @vport's nodes that require issuing discovery
3308  * ADISC.
3309  **/
3310 void
3311 lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp)
3312 {
3313 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3314 	struct lpfc_work_evt *evtp;
3315 
3316 	if (!(nlp->nlp_flag & NLP_DELAY_TMO))
3317 		return;
3318 	spin_lock_irq(shost->host_lock);
3319 	nlp->nlp_flag &= ~NLP_DELAY_TMO;
3320 	spin_unlock_irq(shost->host_lock);
3321 	del_timer_sync(&nlp->nlp_delayfunc);
3322 	nlp->nlp_last_elscmd = 0;
3323 	if (!list_empty(&nlp->els_retry_evt.evt_listp)) {
3324 		list_del_init(&nlp->els_retry_evt.evt_listp);
3325 		/* Decrement nlp reference count held for the delayed retry */
3326 		evtp = &nlp->els_retry_evt;
3327 		lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1);
3328 	}
3329 	if (nlp->nlp_flag & NLP_NPR_2B_DISC) {
3330 		spin_lock_irq(shost->host_lock);
3331 		nlp->nlp_flag &= ~NLP_NPR_2B_DISC;
3332 		spin_unlock_irq(shost->host_lock);
3333 		if (vport->num_disc_nodes) {
3334 			if (vport->port_state < LPFC_VPORT_READY) {
3335 				/* Check if there are more ADISCs to be sent */
3336 				lpfc_more_adisc(vport);
3337 			} else {
3338 				/* Check if there are more PLOGIs to be sent */
3339 				lpfc_more_plogi(vport);
3340 				if (vport->num_disc_nodes == 0) {
3341 					spin_lock_irq(shost->host_lock);
3342 					vport->fc_flag &= ~FC_NDISC_ACTIVE;
3343 					spin_unlock_irq(shost->host_lock);
3344 					lpfc_can_disctmo(vport);
3345 					lpfc_end_rscn(vport);
3346 				}
3347 			}
3348 		}
3349 	}
3350 	return;
3351 }
3352 
3353 /**
3354  * lpfc_els_retry_delay - Timer function with a ndlp delayed function timer
3355  * @ptr: holder for the pointer to the timer function associated data (ndlp).
3356  *
3357  * This routine is invoked by the ndlp delayed-function timer to check
3358  * whether there is any pending ELS retry event(s) with the node. If not, it
3359  * simply returns. Otherwise, if there is at least one ELS delayed event, it
3360  * adds the delayed events to the HBA work list and invokes the
3361  * lpfc_worker_wake_up() routine to wake up worker thread to process the
3362  * event. Note that lpfc_nlp_get() is called before posting the event to
3363  * the work list to hold reference count of ndlp so that it guarantees the
3364  * reference to ndlp will still be available when the worker thread gets
3365  * to the event associated with the ndlp.
3366  **/
3367 void
3368 lpfc_els_retry_delay(struct timer_list *t)
3369 {
3370 	struct lpfc_nodelist *ndlp = from_timer(ndlp, t, nlp_delayfunc);
3371 	struct lpfc_vport *vport = ndlp->vport;
3372 	struct lpfc_hba   *phba = vport->phba;
3373 	unsigned long flags;
3374 	struct lpfc_work_evt  *evtp = &ndlp->els_retry_evt;
3375 
3376 	spin_lock_irqsave(&phba->hbalock, flags);
3377 	if (!list_empty(&evtp->evt_listp)) {
3378 		spin_unlock_irqrestore(&phba->hbalock, flags);
3379 		return;
3380 	}
3381 
3382 	/* We need to hold the node by incrementing the reference
3383 	 * count until the queued work is done
3384 	 */
3385 	evtp->evt_arg1  = lpfc_nlp_get(ndlp);
3386 	if (evtp->evt_arg1) {
3387 		evtp->evt = LPFC_EVT_ELS_RETRY;
3388 		list_add_tail(&evtp->evt_listp, &phba->work_list);
3389 		lpfc_worker_wake_up(phba);
3390 	}
3391 	spin_unlock_irqrestore(&phba->hbalock, flags);
3392 	return;
3393 }
3394 
3395 /**
3396  * lpfc_els_retry_delay_handler - Work thread handler for ndlp delayed function
3397  * @ndlp: pointer to a node-list data structure.
3398  *
3399  * This routine is the worker-thread handler for processing the @ndlp delayed
3400  * event(s), posted by the lpfc_els_retry_delay() routine. It simply retrieves
3401  * the last ELS command from the associated ndlp and invokes the proper ELS
3402  * function according to the delayed ELS command to retry the command.
3403  **/
3404 void
3405 lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp)
3406 {
3407 	struct lpfc_vport *vport = ndlp->vport;
3408 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
3409 	uint32_t cmd, retry;
3410 
3411 	spin_lock_irq(shost->host_lock);
3412 	cmd = ndlp->nlp_last_elscmd;
3413 	ndlp->nlp_last_elscmd = 0;
3414 
3415 	if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
3416 		spin_unlock_irq(shost->host_lock);
3417 		return;
3418 	}
3419 
3420 	ndlp->nlp_flag &= ~NLP_DELAY_TMO;
3421 	spin_unlock_irq(shost->host_lock);
3422 	/*
3423 	 * If a discovery event readded nlp_delayfunc after timer
3424 	 * firing and before processing the timer, cancel the
3425 	 * nlp_delayfunc.
3426 	 */
3427 	del_timer_sync(&ndlp->nlp_delayfunc);
3428 	retry = ndlp->nlp_retry;
3429 	ndlp->nlp_retry = 0;
3430 
3431 	switch (cmd) {
3432 	case ELS_CMD_FLOGI:
3433 		lpfc_issue_els_flogi(vport, ndlp, retry);
3434 		break;
3435 	case ELS_CMD_PLOGI:
3436 		if (!lpfc_issue_els_plogi(vport, ndlp->nlp_DID, retry)) {
3437 			ndlp->nlp_prev_state = ndlp->nlp_state;
3438 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
3439 		}
3440 		break;
3441 	case ELS_CMD_ADISC:
3442 		if (!lpfc_issue_els_adisc(vport, ndlp, retry)) {
3443 			ndlp->nlp_prev_state = ndlp->nlp_state;
3444 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
3445 		}
3446 		break;
3447 	case ELS_CMD_PRLI:
3448 	case ELS_CMD_NVMEPRLI:
3449 		if (!lpfc_issue_els_prli(vport, ndlp, retry)) {
3450 			ndlp->nlp_prev_state = ndlp->nlp_state;
3451 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
3452 		}
3453 		break;
3454 	case ELS_CMD_LOGO:
3455 		if (!lpfc_issue_els_logo(vport, ndlp, retry)) {
3456 			ndlp->nlp_prev_state = ndlp->nlp_state;
3457 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE);
3458 		}
3459 		break;
3460 	case ELS_CMD_FDISC:
3461 		if (!(vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI))
3462 			lpfc_issue_els_fdisc(vport, ndlp, retry);
3463 		break;
3464 	}
3465 	return;
3466 }
3467 
3468 /**
3469  * lpfc_link_reset - Issue link reset
3470  * @vport: pointer to a virtual N_Port data structure.
3471  *
3472  * This routine performs link reset by sending INIT_LINK mailbox command.
3473  * For SLI-3 adapter, link attention interrupt is enabled before issuing
3474  * INIT_LINK mailbox command.
3475  *
3476  * Return code
3477  *   0 - Link reset initiated successfully
3478  *   1 - Failed to initiate link reset
3479  **/
3480 int
3481 lpfc_link_reset(struct lpfc_vport *vport)
3482 {
3483 	struct lpfc_hba *phba = vport->phba;
3484 	LPFC_MBOXQ_t *mbox;
3485 	uint32_t control;
3486 	int rc;
3487 
3488 	lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
3489 			 "2851 Attempt link reset\n");
3490 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3491 	if (!mbox) {
3492 		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
3493 				"2852 Failed to allocate mbox memory");
3494 		return 1;
3495 	}
3496 
3497 	/* Enable Link attention interrupts */
3498 	if (phba->sli_rev <= LPFC_SLI_REV3) {
3499 		spin_lock_irq(&phba->hbalock);
3500 		phba->sli.sli_flag |= LPFC_PROCESS_LA;
3501 		control = readl(phba->HCregaddr);
3502 		control |= HC_LAINT_ENA;
3503 		writel(control, phba->HCregaddr);
3504 		readl(phba->HCregaddr); /* flush */
3505 		spin_unlock_irq(&phba->hbalock);
3506 	}
3507 
3508 	lpfc_init_link(phba, mbox, phba->cfg_topology,
3509 		       phba->cfg_link_speed);
3510 	mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
3511 	mbox->vport = vport;
3512 	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
3513 	if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
3514 		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
3515 				"2853 Failed to issue INIT_LINK "
3516 				"mbox command, rc:x%x\n", rc);
3517 		mempool_free(mbox, phba->mbox_mem_pool);
3518 		return 1;
3519 	}
3520 
3521 	return 0;
3522 }
3523 
3524 /**
3525  * lpfc_els_retry - Make retry decision on an els command iocb
3526  * @phba: pointer to lpfc hba data structure.
3527  * @cmdiocb: pointer to lpfc command iocb data structure.
3528  * @rspiocb: pointer to lpfc response iocb data structure.
3529  *
3530  * This routine makes a retry decision on an ELS command IOCB, which has
3531  * failed. The following ELS IOCBs use this function for retrying the command
3532  * when previously issued command responsed with error status: FLOGI, PLOGI,
3533  * PRLI, ADISC, LOGO, and FDISC. Based on the ELS command type and the
3534  * returned error status, it makes the decision whether a retry shall be
3535  * issued for the command, and whether a retry shall be made immediately or
3536  * delayed. In the former case, the corresponding ELS command issuing-function
3537  * is called to retry the command. In the later case, the ELS command shall
3538  * be posted to the ndlp delayed event and delayed function timer set to the
3539  * ndlp for the delayed command issusing.
3540  *
3541  * Return code
3542  *   0 - No retry of els command is made
3543  *   1 - Immediate or delayed retry of els command is made
3544  **/
3545 static int
3546 lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3547 	       struct lpfc_iocbq *rspiocb)
3548 {
3549 	struct lpfc_vport *vport = cmdiocb->vport;
3550 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
3551 	IOCB_t *irsp = &rspiocb->iocb;
3552 	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
3553 	struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
3554 	uint32_t *elscmd;
3555 	struct ls_rjt stat;
3556 	int retry = 0, maxretry = lpfc_max_els_tries, delay = 0;
3557 	int logerr = 0;
3558 	uint32_t cmd = 0;
3559 	uint32_t did;
3560 	int link_reset = 0, rc;
3561 
3562 
3563 	/* Note: context2 may be 0 for internal driver abort
3564 	 * of delays ELS command.
3565 	 */
3566 
3567 	if (pcmd && pcmd->virt) {
3568 		elscmd = (uint32_t *) (pcmd->virt);
3569 		cmd = *elscmd++;
3570 	}
3571 
3572 	if (ndlp && NLP_CHK_NODE_ACT(ndlp))
3573 		did = ndlp->nlp_DID;
3574 	else {
3575 		/* We should only hit this case for retrying PLOGI */
3576 		did = irsp->un.elsreq64.remoteID;
3577 		ndlp = lpfc_findnode_did(vport, did);
3578 		if ((!ndlp || !NLP_CHK_NODE_ACT(ndlp))
3579 		    && (cmd != ELS_CMD_PLOGI))
3580 			return 1;
3581 	}
3582 
3583 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
3584 		"Retry ELS:       wd7:x%x wd4:x%x did:x%x",
3585 		*(((uint32_t *) irsp) + 7), irsp->un.ulpWord[4], ndlp->nlp_DID);
3586 
3587 	switch (irsp->ulpStatus) {
3588 	case IOSTAT_FCP_RSP_ERROR:
3589 		break;
3590 	case IOSTAT_REMOTE_STOP:
3591 		if (phba->sli_rev == LPFC_SLI_REV4) {
3592 			/* This IO was aborted by the target, we don't
3593 			 * know the rxid and because we did not send the
3594 			 * ABTS we cannot generate and RRQ.
3595 			 */
3596 			lpfc_set_rrq_active(phba, ndlp,
3597 					 cmdiocb->sli4_lxritag, 0, 0);
3598 		}
3599 		break;
3600 	case IOSTAT_LOCAL_REJECT:
3601 		switch ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK)) {
3602 		case IOERR_LOOP_OPEN_FAILURE:
3603 			if (cmd == ELS_CMD_FLOGI) {
3604 				if (PCI_DEVICE_ID_HORNET ==
3605 					phba->pcidev->device) {
3606 					phba->fc_topology = LPFC_TOPOLOGY_LOOP;
3607 					phba->pport->fc_myDID = 0;
3608 					phba->alpa_map[0] = 0;
3609 					phba->alpa_map[1] = 0;
3610 				}
3611 			}
3612 			if (cmd == ELS_CMD_PLOGI && cmdiocb->retry == 0)
3613 				delay = 1000;
3614 			retry = 1;
3615 			break;
3616 
3617 		case IOERR_ILLEGAL_COMMAND:
3618 			lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
3619 					 "0124 Retry illegal cmd x%x "
3620 					 "retry:x%x delay:x%x\n",
3621 					 cmd, cmdiocb->retry, delay);
3622 			retry = 1;
3623 			/* All command's retry policy */
3624 			maxretry = 8;
3625 			if (cmdiocb->retry > 2)
3626 				delay = 1000;
3627 			break;
3628 
3629 		case IOERR_NO_RESOURCES:
3630 			logerr = 1; /* HBA out of resources */
3631 			retry = 1;
3632 			if (cmdiocb->retry > 100)
3633 				delay = 100;
3634 			maxretry = 250;
3635 			break;
3636 
3637 		case IOERR_ILLEGAL_FRAME:
3638 			delay = 100;
3639 			retry = 1;
3640 			break;
3641 
3642 		case IOERR_INVALID_RPI:
3643 			if (cmd == ELS_CMD_PLOGI &&
3644 			    did == NameServer_DID) {
3645 				/* Continue forever if plogi to */
3646 				/* the nameserver fails */
3647 				maxretry = 0;
3648 				delay = 100;
3649 			}
3650 			retry = 1;
3651 			break;
3652 
3653 		case IOERR_SEQUENCE_TIMEOUT:
3654 			if (cmd == ELS_CMD_PLOGI &&
3655 			    did == NameServer_DID &&
3656 			    (cmdiocb->retry + 1) == maxretry) {
3657 				/* Reset the Link */
3658 				link_reset = 1;
3659 				break;
3660 			}
3661 			retry = 1;
3662 			delay = 100;
3663 			break;
3664 		}
3665 		break;
3666 
3667 	case IOSTAT_NPORT_RJT:
3668 	case IOSTAT_FABRIC_RJT:
3669 		if (irsp->un.ulpWord[4] & RJT_UNAVAIL_TEMP) {
3670 			retry = 1;
3671 			break;
3672 		}
3673 		break;
3674 
3675 	case IOSTAT_NPORT_BSY:
3676 	case IOSTAT_FABRIC_BSY:
3677 		logerr = 1; /* Fabric / Remote NPort out of resources */
3678 		retry = 1;
3679 		break;
3680 
3681 	case IOSTAT_LS_RJT:
3682 		stat.un.lsRjtError = be32_to_cpu(irsp->un.ulpWord[4]);
3683 		/* Added for Vendor specifc support
3684 		 * Just keep retrying for these Rsn / Exp codes
3685 		 */
3686 		switch (stat.un.b.lsRjtRsnCode) {
3687 		case LSRJT_UNABLE_TPC:
3688 			/* The driver has a VALID PLOGI but the rport has
3689 			 * rejected the PRLI - can't do it now.  Delay
3690 			 * for 1 second and try again - don't care about
3691 			 * the explanation.
3692 			 */
3693 			if (cmd == ELS_CMD_PRLI || cmd == ELS_CMD_NVMEPRLI) {
3694 				delay = 1000;
3695 				maxretry = lpfc_max_els_tries + 1;
3696 				retry = 1;
3697 				break;
3698 			}
3699 
3700 			/* Legacy bug fix code for targets with PLOGI delays. */
3701 			if (stat.un.b.lsRjtRsnCodeExp ==
3702 			    LSEXP_CMD_IN_PROGRESS) {
3703 				if (cmd == ELS_CMD_PLOGI) {
3704 					delay = 1000;
3705 					maxretry = 48;
3706 				}
3707 				retry = 1;
3708 				break;
3709 			}
3710 			if (stat.un.b.lsRjtRsnCodeExp ==
3711 			    LSEXP_CANT_GIVE_DATA) {
3712 				if (cmd == ELS_CMD_PLOGI) {
3713 					delay = 1000;
3714 					maxretry = 48;
3715 				}
3716 				retry = 1;
3717 				break;
3718 			}
3719 			if (cmd == ELS_CMD_PLOGI) {
3720 				delay = 1000;
3721 				maxretry = lpfc_max_els_tries + 1;
3722 				retry = 1;
3723 				break;
3724 			}
3725 			if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
3726 			  (cmd == ELS_CMD_FDISC) &&
3727 			  (stat.un.b.lsRjtRsnCodeExp == LSEXP_OUT_OF_RESOURCE)){
3728 				lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
3729 						 "0125 FDISC Failed (x%x). "
3730 						 "Fabric out of resources\n",
3731 						 stat.un.lsRjtError);
3732 				lpfc_vport_set_state(vport,
3733 						     FC_VPORT_NO_FABRIC_RSCS);
3734 			}
3735 			break;
3736 
3737 		case LSRJT_LOGICAL_BSY:
3738 			if ((cmd == ELS_CMD_PLOGI) ||
3739 			    (cmd == ELS_CMD_PRLI) ||
3740 			    (cmd == ELS_CMD_NVMEPRLI)) {
3741 				delay = 1000;
3742 				maxretry = 48;
3743 			} else if (cmd == ELS_CMD_FDISC) {
3744 				/* FDISC retry policy */
3745 				maxretry = 48;
3746 				if (cmdiocb->retry >= 32)
3747 					delay = 1000;
3748 			}
3749 			retry = 1;
3750 			break;
3751 
3752 		case LSRJT_LOGICAL_ERR:
3753 			/* There are some cases where switches return this
3754 			 * error when they are not ready and should be returning
3755 			 * Logical Busy. We should delay every time.
3756 			 */
3757 			if (cmd == ELS_CMD_FDISC &&
3758 			    stat.un.b.lsRjtRsnCodeExp == LSEXP_PORT_LOGIN_REQ) {
3759 				maxretry = 3;
3760 				delay = 1000;
3761 				retry = 1;
3762 			} else if (cmd == ELS_CMD_FLOGI &&
3763 				   stat.un.b.lsRjtRsnCodeExp ==
3764 						LSEXP_NOTHING_MORE) {
3765 				vport->fc_sparam.cmn.bbRcvSizeMsb &= 0xf;
3766 				retry = 1;
3767 				lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
3768 						 "0820 FLOGI Failed (x%x). "
3769 						 "BBCredit Not Supported\n",
3770 						 stat.un.lsRjtError);
3771 			}
3772 			break;
3773 
3774 		case LSRJT_PROTOCOL_ERR:
3775 			if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
3776 			  (cmd == ELS_CMD_FDISC) &&
3777 			  ((stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_PNAME) ||
3778 			  (stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_NPORT_ID))
3779 			  ) {
3780 				lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
3781 						 "0122 FDISC Failed (x%x). "
3782 						 "Fabric Detected Bad WWN\n",
3783 						 stat.un.lsRjtError);
3784 				lpfc_vport_set_state(vport,
3785 						     FC_VPORT_FABRIC_REJ_WWN);
3786 			}
3787 			break;
3788 		case LSRJT_VENDOR_UNIQUE:
3789 			if ((stat.un.b.vendorUnique == 0x45) &&
3790 			    (cmd == ELS_CMD_FLOGI)) {
3791 				goto out_retry;
3792 			}
3793 			break;
3794 		case LSRJT_CMD_UNSUPPORTED:
3795 			/* lpfc nvmet returns this type of LS_RJT when it
3796 			 * receives an FCP PRLI because lpfc nvmet only
3797 			 * support NVME.  ELS request is terminated for FCP4
3798 			 * on this rport.
3799 			 */
3800 			if (stat.un.b.lsRjtRsnCodeExp ==
3801 			    LSEXP_REQ_UNSUPPORTED && cmd == ELS_CMD_PRLI) {
3802 				spin_lock_irq(shost->host_lock);
3803 				ndlp->nlp_flag |= NLP_FCP_PRLI_RJT;
3804 				spin_unlock_irq(shost->host_lock);
3805 				retry = 0;
3806 				goto out_retry;
3807 			}
3808 			break;
3809 		}
3810 		break;
3811 
3812 	case IOSTAT_INTERMED_RSP:
3813 	case IOSTAT_BA_RJT:
3814 		break;
3815 
3816 	default:
3817 		break;
3818 	}
3819 
3820 	if (link_reset) {
3821 		rc = lpfc_link_reset(vport);
3822 		if (rc) {
3823 			/* Do not give up. Retry PLOGI one more time and attempt
3824 			 * link reset if PLOGI fails again.
3825 			 */
3826 			retry = 1;
3827 			delay = 100;
3828 			goto out_retry;
3829 		}
3830 		return 1;
3831 	}
3832 
3833 	if (did == FDMI_DID)
3834 		retry = 1;
3835 
3836 	if ((cmd == ELS_CMD_FLOGI) &&
3837 	    (phba->fc_topology != LPFC_TOPOLOGY_LOOP) &&
3838 	    !lpfc_error_lost_link(irsp)) {
3839 		/* FLOGI retry policy */
3840 		retry = 1;
3841 		/* retry FLOGI forever */
3842 		if (phba->link_flag != LS_LOOPBACK_MODE)
3843 			maxretry = 0;
3844 		else
3845 			maxretry = 2;
3846 
3847 		if (cmdiocb->retry >= 100)
3848 			delay = 5000;
3849 		else if (cmdiocb->retry >= 32)
3850 			delay = 1000;
3851 	} else if ((cmd == ELS_CMD_FDISC) && !lpfc_error_lost_link(irsp)) {
3852 		/* retry FDISCs every second up to devloss */
3853 		retry = 1;
3854 		maxretry = vport->cfg_devloss_tmo;
3855 		delay = 1000;
3856 	}
3857 
3858 	cmdiocb->retry++;
3859 	if (maxretry && (cmdiocb->retry >= maxretry)) {
3860 		phba->fc_stat.elsRetryExceeded++;
3861 		retry = 0;
3862 	}
3863 
3864 	if ((vport->load_flag & FC_UNLOADING) != 0)
3865 		retry = 0;
3866 
3867 out_retry:
3868 	if (retry) {
3869 		if ((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_FDISC)) {
3870 			/* Stop retrying PLOGI and FDISC if in FCF discovery */
3871 			if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
3872 				lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3873 						 "2849 Stop retry ELS command "
3874 						 "x%x to remote NPORT x%x, "
3875 						 "Data: x%x x%x\n", cmd, did,
3876 						 cmdiocb->retry, delay);
3877 				return 0;
3878 			}
3879 		}
3880 
3881 		/* Retry ELS command <elsCmd> to remote NPORT <did> */
3882 		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3883 				 "0107 Retry ELS command x%x to remote "
3884 				 "NPORT x%x Data: x%x x%x\n",
3885 				 cmd, did, cmdiocb->retry, delay);
3886 
3887 		if (((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) &&
3888 			((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
3889 			((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
3890 			IOERR_NO_RESOURCES))) {
3891 			/* Don't reset timer for no resources */
3892 
3893 			/* If discovery / RSCN timer is running, reset it */
3894 			if (timer_pending(&vport->fc_disctmo) ||
3895 			    (vport->fc_flag & FC_RSCN_MODE))
3896 				lpfc_set_disctmo(vport);
3897 		}
3898 
3899 		phba->fc_stat.elsXmitRetry++;
3900 		if (ndlp && NLP_CHK_NODE_ACT(ndlp) && delay) {
3901 			phba->fc_stat.elsDelayRetry++;
3902 			ndlp->nlp_retry = cmdiocb->retry;
3903 
3904 			/* delay is specified in milliseconds */
3905 			mod_timer(&ndlp->nlp_delayfunc,
3906 				jiffies + msecs_to_jiffies(delay));
3907 			spin_lock_irq(shost->host_lock);
3908 			ndlp->nlp_flag |= NLP_DELAY_TMO;
3909 			spin_unlock_irq(shost->host_lock);
3910 
3911 			ndlp->nlp_prev_state = ndlp->nlp_state;
3912 			if ((cmd == ELS_CMD_PRLI) ||
3913 			    (cmd == ELS_CMD_NVMEPRLI))
3914 				lpfc_nlp_set_state(vport, ndlp,
3915 					NLP_STE_PRLI_ISSUE);
3916 			else
3917 				lpfc_nlp_set_state(vport, ndlp,
3918 					NLP_STE_NPR_NODE);
3919 			ndlp->nlp_last_elscmd = cmd;
3920 
3921 			return 1;
3922 		}
3923 		switch (cmd) {
3924 		case ELS_CMD_FLOGI:
3925 			lpfc_issue_els_flogi(vport, ndlp, cmdiocb->retry);
3926 			return 1;
3927 		case ELS_CMD_FDISC:
3928 			lpfc_issue_els_fdisc(vport, ndlp, cmdiocb->retry);
3929 			return 1;
3930 		case ELS_CMD_PLOGI:
3931 			if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
3932 				ndlp->nlp_prev_state = ndlp->nlp_state;
3933 				lpfc_nlp_set_state(vport, ndlp,
3934 						   NLP_STE_PLOGI_ISSUE);
3935 			}
3936 			lpfc_issue_els_plogi(vport, did, cmdiocb->retry);
3937 			return 1;
3938 		case ELS_CMD_ADISC:
3939 			ndlp->nlp_prev_state = ndlp->nlp_state;
3940 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
3941 			lpfc_issue_els_adisc(vport, ndlp, cmdiocb->retry);
3942 			return 1;
3943 		case ELS_CMD_PRLI:
3944 		case ELS_CMD_NVMEPRLI:
3945 			ndlp->nlp_prev_state = ndlp->nlp_state;
3946 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
3947 			lpfc_issue_els_prli(vport, ndlp, cmdiocb->retry);
3948 			return 1;
3949 		case ELS_CMD_LOGO:
3950 			ndlp->nlp_prev_state = ndlp->nlp_state;
3951 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE);
3952 			lpfc_issue_els_logo(vport, ndlp, cmdiocb->retry);
3953 			return 1;
3954 		}
3955 	}
3956 	/* No retry ELS command <elsCmd> to remote NPORT <did> */
3957 	if (logerr) {
3958 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
3959 			 "0137 No retry ELS command x%x to remote "
3960 			 "NPORT x%x: Out of Resources: Error:x%x/%x\n",
3961 			 cmd, did, irsp->ulpStatus,
3962 			 irsp->un.ulpWord[4]);
3963 	}
3964 	else {
3965 		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3966 			 "0108 No retry ELS command x%x to remote "
3967 			 "NPORT x%x Retried:%d Error:x%x/%x\n",
3968 			 cmd, did, cmdiocb->retry, irsp->ulpStatus,
3969 			 irsp->un.ulpWord[4]);
3970 	}
3971 	return 0;
3972 }
3973 
3974 /**
3975  * lpfc_els_free_data - Free lpfc dma buffer and data structure with an iocb
3976  * @phba: pointer to lpfc hba data structure.
3977  * @buf_ptr1: pointer to the lpfc DMA buffer data structure.
3978  *
3979  * This routine releases the lpfc DMA (Direct Memory Access) buffer(s)
3980  * associated with a command IOCB back to the lpfc DMA buffer pool. It first
3981  * checks to see whether there is a lpfc DMA buffer associated with the
3982  * response of the command IOCB. If so, it will be released before releasing
3983  * the lpfc DMA buffer associated with the IOCB itself.
3984  *
3985  * Return code
3986  *   0 - Successfully released lpfc DMA buffer (currently, always return 0)
3987  **/
3988 static int
3989 lpfc_els_free_data(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr1)
3990 {
3991 	struct lpfc_dmabuf *buf_ptr;
3992 
3993 	/* Free the response before processing the command. */
3994 	if (!list_empty(&buf_ptr1->list)) {
3995 		list_remove_head(&buf_ptr1->list, buf_ptr,
3996 				 struct lpfc_dmabuf,
3997 				 list);
3998 		lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
3999 		kfree(buf_ptr);
4000 	}
4001 	lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys);
4002 	kfree(buf_ptr1);
4003 	return 0;
4004 }
4005 
4006 /**
4007  * lpfc_els_free_bpl - Free lpfc dma buffer and data structure with bpl
4008  * @phba: pointer to lpfc hba data structure.
4009  * @buf_ptr: pointer to the lpfc dma buffer data structure.
4010  *
4011  * This routine releases the lpfc Direct Memory Access (DMA) buffer
4012  * associated with a Buffer Pointer List (BPL) back to the lpfc DMA buffer
4013  * pool.
4014  *
4015  * Return code
4016  *   0 - Successfully released lpfc DMA buffer (currently, always return 0)
4017  **/
4018 static int
4019 lpfc_els_free_bpl(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr)
4020 {
4021 	lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
4022 	kfree(buf_ptr);
4023 	return 0;
4024 }
4025 
4026 /**
4027  * lpfc_els_free_iocb - Free a command iocb and its associated resources
4028  * @phba: pointer to lpfc hba data structure.
4029  * @elsiocb: pointer to lpfc els command iocb data structure.
4030  *
4031  * This routine frees a command IOCB and its associated resources. The
4032  * command IOCB data structure contains the reference to various associated
4033  * resources, these fields must be set to NULL if the associated reference
4034  * not present:
4035  *   context1 - reference to ndlp
4036  *   context2 - reference to cmd
4037  *   context2->next - reference to rsp
4038  *   context3 - reference to bpl
4039  *
4040  * It first properly decrements the reference count held on ndlp for the
4041  * IOCB completion callback function. If LPFC_DELAY_MEM_FREE flag is not
4042  * set, it invokes the lpfc_els_free_data() routine to release the Direct
4043  * Memory Access (DMA) buffers associated with the IOCB. Otherwise, it
4044  * adds the DMA buffer the @phba data structure for the delayed release.
4045  * If reference to the Buffer Pointer List (BPL) is present, the
4046  * lpfc_els_free_bpl() routine is invoked to release the DMA memory
4047  * associated with BPL. Finally, the lpfc_sli_release_iocbq() routine is
4048  * invoked to release the IOCB data structure back to @phba IOCBQ list.
4049  *
4050  * Return code
4051  *   0 - Success (currently, always return 0)
4052  **/
4053 int
4054 lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
4055 {
4056 	struct lpfc_dmabuf *buf_ptr, *buf_ptr1;
4057 	struct lpfc_nodelist *ndlp;
4058 
4059 	ndlp = (struct lpfc_nodelist *)elsiocb->context1;
4060 	if (ndlp) {
4061 		if (ndlp->nlp_flag & NLP_DEFER_RM) {
4062 			lpfc_nlp_put(ndlp);
4063 
4064 			/* If the ndlp is not being used by another discovery
4065 			 * thread, free it.
4066 			 */
4067 			if (!lpfc_nlp_not_used(ndlp)) {
4068 				/* If ndlp is being used by another discovery
4069 				 * thread, just clear NLP_DEFER_RM
4070 				 */
4071 				ndlp->nlp_flag &= ~NLP_DEFER_RM;
4072 			}
4073 		}
4074 		else
4075 			lpfc_nlp_put(ndlp);
4076 		elsiocb->context1 = NULL;
4077 	}
4078 	/* context2  = cmd,  context2->next = rsp, context3 = bpl */
4079 	if (elsiocb->context2) {
4080 		if (elsiocb->iocb_flag & LPFC_DELAY_MEM_FREE) {
4081 			/* Firmware could still be in progress of DMAing
4082 			 * payload, so don't free data buffer till after
4083 			 * a hbeat.
4084 			 */
4085 			elsiocb->iocb_flag &= ~LPFC_DELAY_MEM_FREE;
4086 			buf_ptr = elsiocb->context2;
4087 			elsiocb->context2 = NULL;
4088 			if (buf_ptr) {
4089 				buf_ptr1 = NULL;
4090 				spin_lock_irq(&phba->hbalock);
4091 				if (!list_empty(&buf_ptr->list)) {
4092 					list_remove_head(&buf_ptr->list,
4093 						buf_ptr1, struct lpfc_dmabuf,
4094 						list);
4095 					INIT_LIST_HEAD(&buf_ptr1->list);
4096 					list_add_tail(&buf_ptr1->list,
4097 						&phba->elsbuf);
4098 					phba->elsbuf_cnt++;
4099 				}
4100 				INIT_LIST_HEAD(&buf_ptr->list);
4101 				list_add_tail(&buf_ptr->list, &phba->elsbuf);
4102 				phba->elsbuf_cnt++;
4103 				spin_unlock_irq(&phba->hbalock);
4104 			}
4105 		} else {
4106 			buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2;
4107 			lpfc_els_free_data(phba, buf_ptr1);
4108 			elsiocb->context2 = NULL;
4109 		}
4110 	}
4111 
4112 	if (elsiocb->context3) {
4113 		buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3;
4114 		lpfc_els_free_bpl(phba, buf_ptr);
4115 		elsiocb->context3 = NULL;
4116 	}
4117 	lpfc_sli_release_iocbq(phba, elsiocb);
4118 	return 0;
4119 }
4120 
4121 /**
4122  * lpfc_cmpl_els_logo_acc - Completion callback function to logo acc response
4123  * @phba: pointer to lpfc hba data structure.
4124  * @cmdiocb: pointer to lpfc command iocb data structure.
4125  * @rspiocb: pointer to lpfc response iocb data structure.
4126  *
4127  * This routine is the completion callback function to the Logout (LOGO)
4128  * Accept (ACC) Response ELS command. This routine is invoked to indicate
4129  * the completion of the LOGO process. It invokes the lpfc_nlp_not_used() to
4130  * release the ndlp if it has the last reference remaining (reference count
4131  * is 1). If succeeded (meaning ndlp released), it sets the IOCB context1
4132  * field to NULL to inform the following lpfc_els_free_iocb() routine no
4133  * ndlp reference count needs to be decremented. Otherwise, the ndlp
4134  * reference use-count shall be decremented by the lpfc_els_free_iocb()
4135  * routine. Finally, the lpfc_els_free_iocb() is invoked to release the
4136  * IOCB data structure.
4137  **/
4138 static void
4139 lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
4140 		       struct lpfc_iocbq *rspiocb)
4141 {
4142 	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
4143 	struct lpfc_vport *vport = cmdiocb->vport;
4144 	IOCB_t *irsp;
4145 
4146 	irsp = &rspiocb->iocb;
4147 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
4148 		"ACC LOGO cmpl:   status:x%x/x%x did:x%x",
4149 		irsp->ulpStatus, irsp->un.ulpWord[4], ndlp->nlp_DID);
4150 	/* ACC to LOGO completes to NPort <nlp_DID> */
4151 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4152 			 "0109 ACC to LOGO completes to NPort x%x "
4153 			 "Data: x%x x%x x%x\n",
4154 			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
4155 			 ndlp->nlp_rpi);
4156 
4157 	if (ndlp->nlp_state == NLP_STE_NPR_NODE) {
4158 		/* NPort Recovery mode or node is just allocated */
4159 		if (!lpfc_nlp_not_used(ndlp)) {
4160 			/* If the ndlp is being used by another discovery
4161 			 * thread, just unregister the RPI.
4162 			 */
4163 			lpfc_unreg_rpi(vport, ndlp);
4164 		} else {
4165 			/* Indicate the node has already released, should
4166 			 * not reference to it from within lpfc_els_free_iocb.
4167 			 */
4168 			cmdiocb->context1 = NULL;
4169 		}
4170 	}
4171 
4172 	/*
4173 	 * The driver received a LOGO from the rport and has ACK'd it.
4174 	 * At this point, the driver is done so release the IOCB
4175 	 */
4176 	lpfc_els_free_iocb(phba, cmdiocb);
4177 }
4178 
4179 /**
4180  * lpfc_mbx_cmpl_dflt_rpi - Completion callbk func for unreg dflt rpi mbox cmd
4181  * @phba: pointer to lpfc hba data structure.
4182  * @pmb: pointer to the driver internal queue element for mailbox command.
4183  *
4184  * This routine is the completion callback function for unregister default
4185  * RPI (Remote Port Index) mailbox command to the @phba. It simply releases
4186  * the associated lpfc Direct Memory Access (DMA) buffer back to the pool and
4187  * decrements the ndlp reference count held for this completion callback
4188  * function. After that, it invokes the lpfc_nlp_not_used() to check
4189  * whether there is only one reference left on the ndlp. If so, it will
4190  * perform one more decrement and trigger the release of the ndlp.
4191  **/
4192 void
4193 lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
4194 {
4195 	struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
4196 	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
4197 
4198 	pmb->ctx_buf = NULL;
4199 	pmb->ctx_ndlp = NULL;
4200 
4201 	lpfc_mbuf_free(phba, mp->virt, mp->phys);
4202 	kfree(mp);
4203 	mempool_free(pmb, phba->mbox_mem_pool);
4204 	if (ndlp) {
4205 		lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
4206 				 "0006 rpi%x DID:%x flg:%x %d map:%x %p\n",
4207 				 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
4208 				 kref_read(&ndlp->kref),
4209 				 ndlp->nlp_usg_map, ndlp);
4210 		if (NLP_CHK_NODE_ACT(ndlp)) {
4211 			lpfc_nlp_put(ndlp);
4212 			/* This is the end of the default RPI cleanup logic for
4213 			 * this ndlp. If no other discovery threads are using
4214 			 * this ndlp, free all resources associated with it.
4215 			 */
4216 			lpfc_nlp_not_used(ndlp);
4217 		} else {
4218 			lpfc_drop_node(ndlp->vport, ndlp);
4219 		}
4220 	}
4221 
4222 	return;
4223 }
4224 
4225 /**
4226  * lpfc_cmpl_els_rsp - Completion callback function for els response iocb cmd
4227  * @phba: pointer to lpfc hba data structure.
4228  * @cmdiocb: pointer to lpfc command iocb data structure.
4229  * @rspiocb: pointer to lpfc response iocb data structure.
4230  *
4231  * This routine is the completion callback function for ELS Response IOCB
4232  * command. In normal case, this callback function just properly sets the
4233  * nlp_flag bitmap in the ndlp data structure, if the mbox command reference
4234  * field in the command IOCB is not NULL, the referred mailbox command will
4235  * be send out, and then invokes the lpfc_els_free_iocb() routine to release
4236  * the IOCB. Under error conditions, such as when a LS_RJT is returned or a
4237  * link down event occurred during the discovery, the lpfc_nlp_not_used()
4238  * routine shall be invoked trying to release the ndlp if no other threads
4239  * are currently referring it.
4240  **/
4241 static void
4242 lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
4243 		  struct lpfc_iocbq *rspiocb)
4244 {
4245 	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
4246 	struct lpfc_vport *vport = ndlp ? ndlp->vport : NULL;
4247 	struct Scsi_Host  *shost = vport ? lpfc_shost_from_vport(vport) : NULL;
4248 	IOCB_t  *irsp;
4249 	uint8_t *pcmd;
4250 	LPFC_MBOXQ_t *mbox = NULL;
4251 	struct lpfc_dmabuf *mp = NULL;
4252 	uint32_t ls_rjt = 0;
4253 
4254 	irsp = &rspiocb->iocb;
4255 
4256 	if (cmdiocb->context_un.mbox)
4257 		mbox = cmdiocb->context_un.mbox;
4258 
4259 	/* First determine if this is a LS_RJT cmpl. Note, this callback
4260 	 * function can have cmdiocb->contest1 (ndlp) field set to NULL.
4261 	 */
4262 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt);
4263 	if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
4264 	    (*((uint32_t *) (pcmd)) == ELS_CMD_LS_RJT)) {
4265 		/* A LS_RJT associated with Default RPI cleanup has its own
4266 		 * separate code path.
4267 		 */
4268 		if (!(ndlp->nlp_flag & NLP_RM_DFLT_RPI))
4269 			ls_rjt = 1;
4270 	}
4271 
4272 	/* Check to see if link went down during discovery */
4273 	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || lpfc_els_chk_latt(vport)) {
4274 		if (mbox) {
4275 			mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
4276 			if (mp) {
4277 				lpfc_mbuf_free(phba, mp->virt, mp->phys);
4278 				kfree(mp);
4279 			}
4280 			mempool_free(mbox, phba->mbox_mem_pool);
4281 		}
4282 		if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
4283 		    (ndlp->nlp_flag & NLP_RM_DFLT_RPI))
4284 			if (lpfc_nlp_not_used(ndlp)) {
4285 				ndlp = NULL;
4286 				/* Indicate the node has already released,
4287 				 * should not reference to it from within
4288 				 * the routine lpfc_els_free_iocb.
4289 				 */
4290 				cmdiocb->context1 = NULL;
4291 			}
4292 		goto out;
4293 	}
4294 
4295 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
4296 		"ELS rsp cmpl:    status:x%x/x%x did:x%x",
4297 		irsp->ulpStatus, irsp->un.ulpWord[4],
4298 		cmdiocb->iocb.un.elsreq64.remoteID);
4299 	/* ELS response tag <ulpIoTag> completes */
4300 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4301 			 "0110 ELS response tag x%x completes "
4302 			 "Data: x%x x%x x%x x%x x%x x%x x%x\n",
4303 			 cmdiocb->iocb.ulpIoTag, rspiocb->iocb.ulpStatus,
4304 			 rspiocb->iocb.un.ulpWord[4], rspiocb->iocb.ulpTimeout,
4305 			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
4306 			 ndlp->nlp_rpi);
4307 	if (mbox) {
4308 		if ((rspiocb->iocb.ulpStatus == 0)
4309 		    && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) {
4310 			if (!lpfc_unreg_rpi(vport, ndlp) &&
4311 			    (!(vport->fc_flag & FC_PT2PT)) &&
4312 			    (ndlp->nlp_state ==  NLP_STE_PLOGI_ISSUE ||
4313 			     ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE)) {
4314 				lpfc_printf_vlog(vport, KERN_INFO,
4315 					LOG_DISCOVERY,
4316 					"0314 PLOGI recov DID x%x "
4317 					"Data: x%x x%x x%x\n",
4318 					ndlp->nlp_DID, ndlp->nlp_state,
4319 					ndlp->nlp_rpi, ndlp->nlp_flag);
4320 				mp = mbox->ctx_buf;
4321 				if (mp) {
4322 					lpfc_mbuf_free(phba, mp->virt,
4323 						       mp->phys);
4324 					kfree(mp);
4325 				}
4326 				mempool_free(mbox, phba->mbox_mem_pool);
4327 				goto out;
4328 			}
4329 
4330 			/* Increment reference count to ndlp to hold the
4331 			 * reference to ndlp for the callback function.
4332 			 */
4333 			mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
4334 			mbox->vport = vport;
4335 			if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) {
4336 				mbox->mbox_flag |= LPFC_MBX_IMED_UNREG;
4337 				mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
4338 			}
4339 			else {
4340 				mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
4341 				ndlp->nlp_prev_state = ndlp->nlp_state;
4342 				lpfc_nlp_set_state(vport, ndlp,
4343 					   NLP_STE_REG_LOGIN_ISSUE);
4344 			}
4345 
4346 			ndlp->nlp_flag |= NLP_REG_LOGIN_SEND;
4347 			if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
4348 			    != MBX_NOT_FINISHED)
4349 				goto out;
4350 
4351 			/* Decrement the ndlp reference count we
4352 			 * set for this failed mailbox command.
4353 			 */
4354 			lpfc_nlp_put(ndlp);
4355 			ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
4356 
4357 			/* ELS rsp: Cannot issue reg_login for <NPortid> */
4358 			lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
4359 				"0138 ELS rsp: Cannot issue reg_login for x%x "
4360 				"Data: x%x x%x x%x\n",
4361 				ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
4362 				ndlp->nlp_rpi);
4363 
4364 			if (lpfc_nlp_not_used(ndlp)) {
4365 				ndlp = NULL;
4366 				/* Indicate node has already been released,
4367 				 * should not reference to it from within
4368 				 * the routine lpfc_els_free_iocb.
4369 				 */
4370 				cmdiocb->context1 = NULL;
4371 			}
4372 		} else {
4373 			/* Do not drop node for lpfc_els_abort'ed ELS cmds */
4374 			if (!lpfc_error_lost_link(irsp) &&
4375 			    ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
4376 				if (lpfc_nlp_not_used(ndlp)) {
4377 					ndlp = NULL;
4378 					/* Indicate node has already been
4379 					 * released, should not reference
4380 					 * to it from within the routine
4381 					 * lpfc_els_free_iocb.
4382 					 */
4383 					cmdiocb->context1 = NULL;
4384 				}
4385 			}
4386 		}
4387 		mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
4388 		if (mp) {
4389 			lpfc_mbuf_free(phba, mp->virt, mp->phys);
4390 			kfree(mp);
4391 		}
4392 		mempool_free(mbox, phba->mbox_mem_pool);
4393 	}
4394 out:
4395 	if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
4396 		spin_lock_irq(shost->host_lock);
4397 		ndlp->nlp_flag &= ~(NLP_ACC_REGLOGIN | NLP_RM_DFLT_RPI);
4398 		spin_unlock_irq(shost->host_lock);
4399 
4400 		/* If the node is not being used by another discovery thread,
4401 		 * and we are sending a reject, we are done with it.
4402 		 * Release driver reference count here and free associated
4403 		 * resources.
4404 		 */
4405 		if (ls_rjt)
4406 			if (lpfc_nlp_not_used(ndlp))
4407 				/* Indicate node has already been released,
4408 				 * should not reference to it from within
4409 				 * the routine lpfc_els_free_iocb.
4410 				 */
4411 				cmdiocb->context1 = NULL;
4412 
4413 	}
4414 
4415 	lpfc_els_free_iocb(phba, cmdiocb);
4416 	return;
4417 }
4418 
4419 /**
4420  * lpfc_els_rsp_acc - Prepare and issue an acc response iocb command
4421  * @vport: pointer to a host virtual N_Port data structure.
4422  * @flag: the els command code to be accepted.
4423  * @oldiocb: pointer to the original lpfc command iocb data structure.
4424  * @ndlp: pointer to a node-list data structure.
4425  * @mbox: pointer to the driver internal queue element for mailbox command.
4426  *
4427  * This routine prepares and issues an Accept (ACC) response IOCB
4428  * command. It uses the @flag to properly set up the IOCB field for the
4429  * specific ACC response command to be issued and invokes the
4430  * lpfc_sli_issue_iocb() routine to send out ACC response IOCB. If a
4431  * @mbox pointer is passed in, it will be put into the context_un.mbox
4432  * field of the IOCB for the completion callback function to issue the
4433  * mailbox command to the HBA later when callback is invoked.
4434  *
4435  * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
4436  * will be incremented by 1 for holding the ndlp and the reference to ndlp
4437  * will be stored into the context1 field of the IOCB for the completion
4438  * callback function to the corresponding response ELS IOCB command.
4439  *
4440  * Return code
4441  *   0 - Successfully issued acc response
4442  *   1 - Failed to issue acc response
4443  **/
4444 int
4445 lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
4446 		 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp,
4447 		 LPFC_MBOXQ_t *mbox)
4448 {
4449 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4450 	struct lpfc_hba  *phba = vport->phba;
4451 	IOCB_t *icmd;
4452 	IOCB_t *oldcmd;
4453 	struct lpfc_iocbq *elsiocb;
4454 	uint8_t *pcmd;
4455 	struct serv_parm *sp;
4456 	uint16_t cmdsize;
4457 	int rc;
4458 	ELS_PKT *els_pkt_ptr;
4459 
4460 	oldcmd = &oldiocb->iocb;
4461 
4462 	switch (flag) {
4463 	case ELS_CMD_ACC:
4464 		cmdsize = sizeof(uint32_t);
4465 		elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
4466 					     ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
4467 		if (!elsiocb) {
4468 			spin_lock_irq(shost->host_lock);
4469 			ndlp->nlp_flag &= ~NLP_LOGO_ACC;
4470 			spin_unlock_irq(shost->host_lock);
4471 			return 1;
4472 		}
4473 
4474 		icmd = &elsiocb->iocb;
4475 		icmd->ulpContext = oldcmd->ulpContext;	/* Xri / rx_id */
4476 		icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
4477 		pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
4478 		*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
4479 		pcmd += sizeof(uint32_t);
4480 
4481 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
4482 			"Issue ACC:       did:x%x flg:x%x",
4483 			ndlp->nlp_DID, ndlp->nlp_flag, 0);
4484 		break;
4485 	case ELS_CMD_FLOGI:
4486 	case ELS_CMD_PLOGI:
4487 		cmdsize = (sizeof(struct serv_parm) + sizeof(uint32_t));
4488 		elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
4489 					     ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
4490 		if (!elsiocb)
4491 			return 1;
4492 
4493 		icmd = &elsiocb->iocb;
4494 		icmd->ulpContext = oldcmd->ulpContext;	/* Xri / rx_id */
4495 		icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
4496 		pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
4497 
4498 		if (mbox)
4499 			elsiocb->context_un.mbox = mbox;
4500 
4501 		*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
4502 		pcmd += sizeof(uint32_t);
4503 		sp = (struct serv_parm *)pcmd;
4504 
4505 		if (flag == ELS_CMD_FLOGI) {
4506 			/* Copy the received service parameters back */
4507 			memcpy(sp, &phba->fc_fabparam,
4508 			       sizeof(struct serv_parm));
4509 
4510 			/* Clear the F_Port bit */
4511 			sp->cmn.fPort = 0;
4512 
4513 			/* Mark all class service parameters as invalid */
4514 			sp->cls1.classValid = 0;
4515 			sp->cls2.classValid = 0;
4516 			sp->cls3.classValid = 0;
4517 			sp->cls4.classValid = 0;
4518 
4519 			/* Copy our worldwide names */
4520 			memcpy(&sp->portName, &vport->fc_sparam.portName,
4521 			       sizeof(struct lpfc_name));
4522 			memcpy(&sp->nodeName, &vport->fc_sparam.nodeName,
4523 			       sizeof(struct lpfc_name));
4524 		} else {
4525 			memcpy(pcmd, &vport->fc_sparam,
4526 			       sizeof(struct serv_parm));
4527 
4528 			sp->cmn.valid_vendor_ver_level = 0;
4529 			memset(sp->un.vendorVersion, 0,
4530 			       sizeof(sp->un.vendorVersion));
4531 			sp->cmn.bbRcvSizeMsb &= 0xF;
4532 
4533 			/* If our firmware supports this feature, convey that
4534 			 * info to the target using the vendor specific field.
4535 			 */
4536 			if (phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) {
4537 				sp->cmn.valid_vendor_ver_level = 1;
4538 				sp->un.vv.vid = cpu_to_be32(LPFC_VV_EMLX_ID);
4539 				sp->un.vv.flags =
4540 					cpu_to_be32(LPFC_VV_SUPPRESS_RSP);
4541 			}
4542 		}
4543 
4544 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
4545 			"Issue ACC FLOGI/PLOGI: did:x%x flg:x%x",
4546 			ndlp->nlp_DID, ndlp->nlp_flag, 0);
4547 		break;
4548 	case ELS_CMD_PRLO:
4549 		cmdsize = sizeof(uint32_t) + sizeof(PRLO);
4550 		elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
4551 					     ndlp, ndlp->nlp_DID, ELS_CMD_PRLO);
4552 		if (!elsiocb)
4553 			return 1;
4554 
4555 		icmd = &elsiocb->iocb;
4556 		icmd->ulpContext = oldcmd->ulpContext;	/* Xri / rx_id */
4557 		icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
4558 		pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
4559 
4560 		memcpy(pcmd, ((struct lpfc_dmabuf *) oldiocb->context2)->virt,
4561 		       sizeof(uint32_t) + sizeof(PRLO));
4562 		*((uint32_t *) (pcmd)) = ELS_CMD_PRLO_ACC;
4563 		els_pkt_ptr = (ELS_PKT *) pcmd;
4564 		els_pkt_ptr->un.prlo.acceptRspCode = PRLO_REQ_EXECUTED;
4565 
4566 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
4567 			"Issue ACC PRLO:  did:x%x flg:x%x",
4568 			ndlp->nlp_DID, ndlp->nlp_flag, 0);
4569 		break;
4570 	default:
4571 		return 1;
4572 	}
4573 	if (ndlp->nlp_flag & NLP_LOGO_ACC) {
4574 		spin_lock_irq(shost->host_lock);
4575 		if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED ||
4576 			ndlp->nlp_flag & NLP_REG_LOGIN_SEND))
4577 			ndlp->nlp_flag &= ~NLP_LOGO_ACC;
4578 		spin_unlock_irq(shost->host_lock);
4579 		elsiocb->iocb_cmpl = lpfc_cmpl_els_logo_acc;
4580 	} else {
4581 		elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
4582 	}
4583 
4584 	phba->fc_stat.elsXmitACC++;
4585 	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
4586 	if (rc == IOCB_ERROR) {
4587 		lpfc_els_free_iocb(phba, elsiocb);
4588 		return 1;
4589 	}
4590 	return 0;
4591 }
4592 
4593 /**
4594  * lpfc_els_rsp_reject - Propare and issue a rjt response iocb command
4595  * @vport: pointer to a virtual N_Port data structure.
4596  * @rejectError:
4597  * @oldiocb: pointer to the original lpfc command iocb data structure.
4598  * @ndlp: pointer to a node-list data structure.
4599  * @mbox: pointer to the driver internal queue element for mailbox command.
4600  *
4601  * This routine prepares and issue an Reject (RJT) response IOCB
4602  * command. If a @mbox pointer is passed in, it will be put into the
4603  * context_un.mbox field of the IOCB for the completion callback function
4604  * to issue to the HBA later.
4605  *
4606  * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
4607  * will be incremented by 1 for holding the ndlp and the reference to ndlp
4608  * will be stored into the context1 field of the IOCB for the completion
4609  * callback function to the reject response ELS IOCB command.
4610  *
4611  * Return code
4612  *   0 - Successfully issued reject response
4613  *   1 - Failed to issue reject response
4614  **/
4615 int
4616 lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
4617 		    struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp,
4618 		    LPFC_MBOXQ_t *mbox)
4619 {
4620 	struct lpfc_hba  *phba = vport->phba;
4621 	IOCB_t *icmd;
4622 	IOCB_t *oldcmd;
4623 	struct lpfc_iocbq *elsiocb;
4624 	uint8_t *pcmd;
4625 	uint16_t cmdsize;
4626 	int rc;
4627 
4628 	cmdsize = 2 * sizeof(uint32_t);
4629 	elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
4630 				     ndlp->nlp_DID, ELS_CMD_LS_RJT);
4631 	if (!elsiocb)
4632 		return 1;
4633 
4634 	icmd = &elsiocb->iocb;
4635 	oldcmd = &oldiocb->iocb;
4636 	icmd->ulpContext = oldcmd->ulpContext;	/* Xri / rx_id */
4637 	icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
4638 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
4639 
4640 	*((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT;
4641 	pcmd += sizeof(uint32_t);
4642 	*((uint32_t *) (pcmd)) = rejectError;
4643 
4644 	if (mbox)
4645 		elsiocb->context_un.mbox = mbox;
4646 
4647 	/* Xmit ELS RJT <err> response tag <ulpIoTag> */
4648 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4649 			 "0129 Xmit ELS RJT x%x response tag x%x "
4650 			 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
4651 			 "rpi x%x\n",
4652 			 rejectError, elsiocb->iotag,
4653 			 elsiocb->iocb.ulpContext, ndlp->nlp_DID,
4654 			 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
4655 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
4656 		"Issue LS_RJT:    did:x%x flg:x%x err:x%x",
4657 		ndlp->nlp_DID, ndlp->nlp_flag, rejectError);
4658 
4659 	phba->fc_stat.elsXmitLSRJT++;
4660 	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
4661 	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
4662 
4663 	if (rc == IOCB_ERROR) {
4664 		lpfc_els_free_iocb(phba, elsiocb);
4665 		return 1;
4666 	}
4667 	return 0;
4668 }
4669 
4670 /**
4671  * lpfc_els_rsp_adisc_acc - Prepare and issue acc response to adisc iocb cmd
4672  * @vport: pointer to a virtual N_Port data structure.
4673  * @oldiocb: pointer to the original lpfc command iocb data structure.
4674  * @ndlp: pointer to a node-list data structure.
4675  *
4676  * This routine prepares and issues an Accept (ACC) response to Address
4677  * Discover (ADISC) ELS command. It simply prepares the payload of the IOCB
4678  * and invokes the lpfc_sli_issue_iocb() routine to send out the command.
4679  *
4680  * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
4681  * will be incremented by 1 for holding the ndlp and the reference to ndlp
4682  * will be stored into the context1 field of the IOCB for the completion
4683  * callback function to the ADISC Accept response ELS IOCB command.
4684  *
4685  * Return code
4686  *   0 - Successfully issued acc adisc response
4687  *   1 - Failed to issue adisc acc response
4688  **/
4689 int
4690 lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
4691 		       struct lpfc_nodelist *ndlp)
4692 {
4693 	struct lpfc_hba  *phba = vport->phba;
4694 	ADISC *ap;
4695 	IOCB_t *icmd, *oldcmd;
4696 	struct lpfc_iocbq *elsiocb;
4697 	uint8_t *pcmd;
4698 	uint16_t cmdsize;
4699 	int rc;
4700 
4701 	cmdsize = sizeof(uint32_t) + sizeof(ADISC);
4702 	elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
4703 				     ndlp->nlp_DID, ELS_CMD_ACC);
4704 	if (!elsiocb)
4705 		return 1;
4706 
4707 	icmd = &elsiocb->iocb;
4708 	oldcmd = &oldiocb->iocb;
4709 	icmd->ulpContext = oldcmd->ulpContext;	/* Xri / rx_id */
4710 	icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
4711 
4712 	/* Xmit ADISC ACC response tag <ulpIoTag> */
4713 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4714 			 "0130 Xmit ADISC ACC response iotag x%x xri: "
4715 			 "x%x, did x%x, nlp_flag x%x, nlp_state x%x rpi x%x\n",
4716 			 elsiocb->iotag, elsiocb->iocb.ulpContext,
4717 			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
4718 			 ndlp->nlp_rpi);
4719 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
4720 
4721 	*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
4722 	pcmd += sizeof(uint32_t);
4723 
4724 	ap = (ADISC *) (pcmd);
4725 	ap->hardAL_PA = phba->fc_pref_ALPA;
4726 	memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name));
4727 	memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
4728 	ap->DID = be32_to_cpu(vport->fc_myDID);
4729 
4730 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
4731 		"Issue ACC ADISC: did:x%x flg:x%x",
4732 		ndlp->nlp_DID, ndlp->nlp_flag, 0);
4733 
4734 	phba->fc_stat.elsXmitACC++;
4735 	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
4736 	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
4737 	if (rc == IOCB_ERROR) {
4738 		lpfc_els_free_iocb(phba, elsiocb);
4739 		return 1;
4740 	}
4741 
4742 	/* Xmit ELS ACC response tag <ulpIoTag> */
4743 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4744 			 "0128 Xmit ELS ACC response Status: x%x, IoTag: x%x, "
4745 			 "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x "
4746 			 "RPI: x%x, fc_flag x%x\n",
4747 			 rc, elsiocb->iotag, elsiocb->sli4_xritag,
4748 			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
4749 			 ndlp->nlp_rpi, vport->fc_flag);
4750 	return 0;
4751 }
4752 
4753 /**
4754  * lpfc_els_rsp_prli_acc - Prepare and issue acc response to prli iocb cmd
4755  * @vport: pointer to a virtual N_Port data structure.
4756  * @oldiocb: pointer to the original lpfc command iocb data structure.
4757  * @ndlp: pointer to a node-list data structure.
4758  *
4759  * This routine prepares and issues an Accept (ACC) response to Process
4760  * Login (PRLI) ELS command. It simply prepares the payload of the IOCB
4761  * and invokes the lpfc_sli_issue_iocb() routine to send out the command.
4762  *
4763  * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
4764  * will be incremented by 1 for holding the ndlp and the reference to ndlp
4765  * will be stored into the context1 field of the IOCB for the completion
4766  * callback function to the PRLI Accept response ELS IOCB command.
4767  *
4768  * Return code
4769  *   0 - Successfully issued acc prli response
4770  *   1 - Failed to issue acc prli response
4771  **/
4772 int
4773 lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
4774 		      struct lpfc_nodelist *ndlp)
4775 {
4776 	struct lpfc_hba  *phba = vport->phba;
4777 	PRLI *npr;
4778 	struct lpfc_nvme_prli *npr_nvme;
4779 	lpfc_vpd_t *vpd;
4780 	IOCB_t *icmd;
4781 	IOCB_t *oldcmd;
4782 	struct lpfc_iocbq *elsiocb;
4783 	uint8_t *pcmd;
4784 	uint16_t cmdsize;
4785 	uint32_t prli_fc4_req, *req_payload;
4786 	struct lpfc_dmabuf *req_buf;
4787 	int rc;
4788 	u32 elsrspcmd;
4789 
4790 	/* Need the incoming PRLI payload to determine if the ACC is for an
4791 	 * FC4 or NVME PRLI type.  The PRLI type is at word 1.
4792 	 */
4793 	req_buf = (struct lpfc_dmabuf *)oldiocb->context2;
4794 	req_payload = (((uint32_t *)req_buf->virt) + 1);
4795 
4796 	/* PRLI type payload is at byte 3 for FCP or NVME. */
4797 	prli_fc4_req = be32_to_cpu(*req_payload);
4798 	prli_fc4_req = (prli_fc4_req >> 24) & 0xff;
4799 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4800 			 "6127 PRLI_ACC:  Req Type x%x, Word1 x%08x\n",
4801 			 prli_fc4_req, *((uint32_t *)req_payload));
4802 
4803 	if (prli_fc4_req == PRLI_FCP_TYPE) {
4804 		cmdsize = sizeof(uint32_t) + sizeof(PRLI);
4805 		elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK));
4806 	} else if (prli_fc4_req & PRLI_NVME_TYPE) {
4807 		cmdsize = sizeof(uint32_t) + sizeof(struct lpfc_nvme_prli);
4808 		elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_NVMEPRLI & ~ELS_RSP_MASK));
4809 	} else {
4810 		return 1;
4811 	}
4812 
4813 	elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
4814 		ndlp->nlp_DID, elsrspcmd);
4815 	if (!elsiocb)
4816 		return 1;
4817 
4818 	icmd = &elsiocb->iocb;
4819 	oldcmd = &oldiocb->iocb;
4820 	icmd->ulpContext = oldcmd->ulpContext;	/* Xri / rx_id */
4821 	icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
4822 
4823 	/* Xmit PRLI ACC response tag <ulpIoTag> */
4824 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4825 			 "0131 Xmit PRLI ACC response tag x%x xri x%x, "
4826 			 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
4827 			 elsiocb->iotag, elsiocb->iocb.ulpContext,
4828 			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
4829 			 ndlp->nlp_rpi);
4830 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
4831 	memset(pcmd, 0, cmdsize);
4832 
4833 	*((uint32_t *)(pcmd)) = elsrspcmd;
4834 	pcmd += sizeof(uint32_t);
4835 
4836 	/* For PRLI, remainder of payload is PRLI parameter page */
4837 	vpd = &phba->vpd;
4838 
4839 	if (prli_fc4_req == PRLI_FCP_TYPE) {
4840 		/*
4841 		 * If the remote port is a target and our firmware version
4842 		 * is 3.20 or later, set the following bits for FC-TAPE
4843 		 * support.
4844 		 */
4845 		npr = (PRLI *) pcmd;
4846 		if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
4847 		    (vpd->rev.feaLevelHigh >= 0x02)) {
4848 			npr->ConfmComplAllowed = 1;
4849 			npr->Retry = 1;
4850 			npr->TaskRetryIdReq = 1;
4851 		}
4852 		npr->acceptRspCode = PRLI_REQ_EXECUTED;
4853 		npr->estabImagePair = 1;
4854 		npr->readXferRdyDis = 1;
4855 		npr->ConfmComplAllowed = 1;
4856 		npr->prliType = PRLI_FCP_TYPE;
4857 		npr->initiatorFunc = 1;
4858 	} else if (prli_fc4_req & PRLI_NVME_TYPE) {
4859 		/* Respond with an NVME PRLI Type */
4860 		npr_nvme = (struct lpfc_nvme_prli *) pcmd;
4861 		bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE);
4862 		bf_set(prli_estabImagePair, npr_nvme, 0);  /* Should be 0 */
4863 		bf_set(prli_acc_rsp_code, npr_nvme, PRLI_REQ_EXECUTED);
4864 		if (phba->nvmet_support) {
4865 			bf_set(prli_tgt, npr_nvme, 1);
4866 			bf_set(prli_disc, npr_nvme, 1);
4867 			if (phba->cfg_nvme_enable_fb) {
4868 				bf_set(prli_fba, npr_nvme, 1);
4869 
4870 				/* TBD.  Target mode needs to post buffers
4871 				 * that support the configured first burst
4872 				 * byte size.
4873 				 */
4874 				bf_set(prli_fb_sz, npr_nvme,
4875 				       phba->cfg_nvmet_fb_size);
4876 			}
4877 		} else {
4878 			bf_set(prli_init, npr_nvme, 1);
4879 		}
4880 
4881 		lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
4882 				 "6015 NVME issue PRLI ACC word1 x%08x "
4883 				 "word4 x%08x word5 x%08x flag x%x, "
4884 				 "fcp_info x%x nlp_type x%x\n",
4885 				 npr_nvme->word1, npr_nvme->word4,
4886 				 npr_nvme->word5, ndlp->nlp_flag,
4887 				 ndlp->nlp_fcp_info, ndlp->nlp_type);
4888 		npr_nvme->word1 = cpu_to_be32(npr_nvme->word1);
4889 		npr_nvme->word4 = cpu_to_be32(npr_nvme->word4);
4890 		npr_nvme->word5 = cpu_to_be32(npr_nvme->word5);
4891 	} else
4892 		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
4893 				 "6128 Unknown FC_TYPE x%x x%x ndlp x%06x\n",
4894 				 prli_fc4_req, ndlp->nlp_fc4_type,
4895 				 ndlp->nlp_DID);
4896 
4897 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
4898 		"Issue ACC PRLI:  did:x%x flg:x%x",
4899 		ndlp->nlp_DID, ndlp->nlp_flag, 0);
4900 
4901 	phba->fc_stat.elsXmitACC++;
4902 	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
4903 
4904 	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
4905 	if (rc == IOCB_ERROR) {
4906 		lpfc_els_free_iocb(phba, elsiocb);
4907 		return 1;
4908 	}
4909 	return 0;
4910 }
4911 
4912 /**
4913  * lpfc_els_rsp_rnid_acc - Issue rnid acc response iocb command
4914  * @vport: pointer to a virtual N_Port data structure.
4915  * @format: rnid command format.
4916  * @oldiocb: pointer to the original lpfc command iocb data structure.
4917  * @ndlp: pointer to a node-list data structure.
4918  *
4919  * This routine issues a Request Node Identification Data (RNID) Accept
4920  * (ACC) response. It constructs the RNID ACC response command according to
4921  * the proper @format and then calls the lpfc_sli_issue_iocb() routine to
4922  * issue the response. Note that this command does not need to hold the ndlp
4923  * reference count for the callback. So, the ndlp reference count taken by
4924  * the lpfc_prep_els_iocb() routine is put back and the context1 field of
4925  * IOCB is set to NULL to indicate to the lpfc_els_free_iocb() routine that
4926  * there is no ndlp reference available.
4927  *
4928  * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
4929  * will be incremented by 1 for holding the ndlp and the reference to ndlp
4930  * will be stored into the context1 field of the IOCB for the completion
4931  * callback function. However, for the RNID Accept Response ELS command,
4932  * this is undone later by this routine after the IOCB is allocated.
4933  *
4934  * Return code
4935  *   0 - Successfully issued acc rnid response
4936  *   1 - Failed to issue acc rnid response
4937  **/
4938 static int
4939 lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
4940 		      struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
4941 {
4942 	struct lpfc_hba  *phba = vport->phba;
4943 	RNID *rn;
4944 	IOCB_t *icmd, *oldcmd;
4945 	struct lpfc_iocbq *elsiocb;
4946 	uint8_t *pcmd;
4947 	uint16_t cmdsize;
4948 	int rc;
4949 
4950 	cmdsize = sizeof(uint32_t) + sizeof(uint32_t)
4951 					+ (2 * sizeof(struct lpfc_name));
4952 	if (format)
4953 		cmdsize += sizeof(RNID_TOP_DISC);
4954 
4955 	elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
4956 				     ndlp->nlp_DID, ELS_CMD_ACC);
4957 	if (!elsiocb)
4958 		return 1;
4959 
4960 	icmd = &elsiocb->iocb;
4961 	oldcmd = &oldiocb->iocb;
4962 	icmd->ulpContext = oldcmd->ulpContext;	/* Xri / rx_id */
4963 	icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
4964 
4965 	/* Xmit RNID ACC response tag <ulpIoTag> */
4966 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4967 			 "0132 Xmit RNID ACC response tag x%x xri x%x\n",
4968 			 elsiocb->iotag, elsiocb->iocb.ulpContext);
4969 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
4970 	*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
4971 	pcmd += sizeof(uint32_t);
4972 
4973 	memset(pcmd, 0, sizeof(RNID));
4974 	rn = (RNID *) (pcmd);
4975 	rn->Format = format;
4976 	rn->CommonLen = (2 * sizeof(struct lpfc_name));
4977 	memcpy(&rn->portName, &vport->fc_portname, sizeof(struct lpfc_name));
4978 	memcpy(&rn->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
4979 	switch (format) {
4980 	case 0:
4981 		rn->SpecificLen = 0;
4982 		break;
4983 	case RNID_TOPOLOGY_DISC:
4984 		rn->SpecificLen = sizeof(RNID_TOP_DISC);
4985 		memcpy(&rn->un.topologyDisc.portName,
4986 		       &vport->fc_portname, sizeof(struct lpfc_name));
4987 		rn->un.topologyDisc.unitType = RNID_HBA;
4988 		rn->un.topologyDisc.physPort = 0;
4989 		rn->un.topologyDisc.attachedNodes = 0;
4990 		break;
4991 	default:
4992 		rn->CommonLen = 0;
4993 		rn->SpecificLen = 0;
4994 		break;
4995 	}
4996 
4997 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
4998 		"Issue ACC RNID:  did:x%x flg:x%x",
4999 		ndlp->nlp_DID, ndlp->nlp_flag, 0);
5000 
5001 	phba->fc_stat.elsXmitACC++;
5002 	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
5003 
5004 	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
5005 	if (rc == IOCB_ERROR) {
5006 		lpfc_els_free_iocb(phba, elsiocb);
5007 		return 1;
5008 	}
5009 	return 0;
5010 }
5011 
5012 /**
5013  * lpfc_els_clear_rrq - Clear the rq that this rrq describes.
5014  * @vport: pointer to a virtual N_Port data structure.
5015  * @iocb: pointer to the lpfc command iocb data structure.
5016  * @ndlp: pointer to a node-list data structure.
5017  *
5018  * Return
5019  **/
5020 static void
5021 lpfc_els_clear_rrq(struct lpfc_vport *vport,
5022 		   struct lpfc_iocbq *iocb, struct lpfc_nodelist *ndlp)
5023 {
5024 	struct lpfc_hba  *phba = vport->phba;
5025 	uint8_t *pcmd;
5026 	struct RRQ *rrq;
5027 	uint16_t rxid;
5028 	uint16_t xri;
5029 	struct lpfc_node_rrq *prrq;
5030 
5031 
5032 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) iocb->context2)->virt);
5033 	pcmd += sizeof(uint32_t);
5034 	rrq = (struct RRQ *)pcmd;
5035 	rrq->rrq_exchg = be32_to_cpu(rrq->rrq_exchg);
5036 	rxid = bf_get(rrq_rxid, rrq);
5037 
5038 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5039 			"2883 Clear RRQ for SID:x%x OXID:x%x RXID:x%x"
5040 			" x%x x%x\n",
5041 			be32_to_cpu(bf_get(rrq_did, rrq)),
5042 			bf_get(rrq_oxid, rrq),
5043 			rxid,
5044 			iocb->iotag, iocb->iocb.ulpContext);
5045 
5046 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
5047 		"Clear RRQ:  did:x%x flg:x%x exchg:x%.08x",
5048 		ndlp->nlp_DID, ndlp->nlp_flag, rrq->rrq_exchg);
5049 	if (vport->fc_myDID == be32_to_cpu(bf_get(rrq_did, rrq)))
5050 		xri = bf_get(rrq_oxid, rrq);
5051 	else
5052 		xri = rxid;
5053 	prrq = lpfc_get_active_rrq(vport, xri, ndlp->nlp_DID);
5054 	if (prrq)
5055 		lpfc_clr_rrq_active(phba, xri, prrq);
5056 	return;
5057 }
5058 
5059 /**
5060  * lpfc_els_rsp_echo_acc - Issue echo acc response
5061  * @vport: pointer to a virtual N_Port data structure.
5062  * @data: pointer to echo data to return in the accept.
5063  * @oldiocb: pointer to the original lpfc command iocb data structure.
5064  * @ndlp: pointer to a node-list data structure.
5065  *
5066  * Return code
5067  *   0 - Successfully issued acc echo response
5068  *   1 - Failed to issue acc echo response
5069  **/
5070 static int
5071 lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data,
5072 		      struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
5073 {
5074 	struct lpfc_hba  *phba = vport->phba;
5075 	struct lpfc_iocbq *elsiocb;
5076 	uint8_t *pcmd;
5077 	uint16_t cmdsize;
5078 	int rc;
5079 
5080 	cmdsize = oldiocb->iocb.unsli3.rcvsli3.acc_len;
5081 
5082 	/* The accumulated length can exceed the BPL_SIZE.  For
5083 	 * now, use this as the limit
5084 	 */
5085 	if (cmdsize > LPFC_BPL_SIZE)
5086 		cmdsize = LPFC_BPL_SIZE;
5087 	elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
5088 				     ndlp->nlp_DID, ELS_CMD_ACC);
5089 	if (!elsiocb)
5090 		return 1;
5091 
5092 	elsiocb->iocb.ulpContext = oldiocb->iocb.ulpContext;  /* Xri / rx_id */
5093 	elsiocb->iocb.unsli3.rcvsli3.ox_id = oldiocb->iocb.unsli3.rcvsli3.ox_id;
5094 
5095 	/* Xmit ECHO ACC response tag <ulpIoTag> */
5096 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5097 			 "2876 Xmit ECHO ACC response tag x%x xri x%x\n",
5098 			 elsiocb->iotag, elsiocb->iocb.ulpContext);
5099 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
5100 	*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
5101 	pcmd += sizeof(uint32_t);
5102 	memcpy(pcmd, data, cmdsize - sizeof(uint32_t));
5103 
5104 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
5105 		"Issue ACC ECHO:  did:x%x flg:x%x",
5106 		ndlp->nlp_DID, ndlp->nlp_flag, 0);
5107 
5108 	phba->fc_stat.elsXmitACC++;
5109 	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
5110 
5111 	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
5112 	if (rc == IOCB_ERROR) {
5113 		lpfc_els_free_iocb(phba, elsiocb);
5114 		return 1;
5115 	}
5116 	return 0;
5117 }
5118 
5119 /**
5120  * lpfc_els_disc_adisc - Issue remaining adisc iocbs to npr nodes of a vport
5121  * @vport: pointer to a host virtual N_Port data structure.
5122  *
5123  * This routine issues Address Discover (ADISC) ELS commands to those
5124  * N_Ports which are in node port recovery state and ADISC has not been issued
5125  * for the @vport. Each time an ELS ADISC IOCB is issued by invoking the
5126  * lpfc_issue_els_adisc() routine, the per @vport number of discover count
5127  * (num_disc_nodes) shall be incremented. If the num_disc_nodes reaches a
5128  * pre-configured threshold (cfg_discovery_threads), the @vport fc_flag will
5129  * be marked with FC_NLP_MORE bit and the process of issuing remaining ADISC
5130  * IOCBs quit for later pick up. On the other hand, after walking through
5131  * all the ndlps with the @vport and there is none ADISC IOCB issued, the
5132  * @vport fc_flag shall be cleared with FC_NLP_MORE bit indicating there is
5133  * no more ADISC need to be sent.
5134  *
5135  * Return code
5136  *    The number of N_Ports with adisc issued.
5137  **/
5138 int
5139 lpfc_els_disc_adisc(struct lpfc_vport *vport)
5140 {
5141 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5142 	struct lpfc_nodelist *ndlp, *next_ndlp;
5143 	int sentadisc = 0;
5144 
5145 	/* go thru NPR nodes and issue any remaining ELS ADISCs */
5146 	list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
5147 		if (!NLP_CHK_NODE_ACT(ndlp))
5148 			continue;
5149 		if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
5150 		    (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
5151 		    (ndlp->nlp_flag & NLP_NPR_ADISC) != 0) {
5152 			spin_lock_irq(shost->host_lock);
5153 			ndlp->nlp_flag &= ~NLP_NPR_ADISC;
5154 			spin_unlock_irq(shost->host_lock);
5155 			ndlp->nlp_prev_state = ndlp->nlp_state;
5156 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
5157 			lpfc_issue_els_adisc(vport, ndlp, 0);
5158 			sentadisc++;
5159 			vport->num_disc_nodes++;
5160 			if (vport->num_disc_nodes >=
5161 			    vport->cfg_discovery_threads) {
5162 				spin_lock_irq(shost->host_lock);
5163 				vport->fc_flag |= FC_NLP_MORE;
5164 				spin_unlock_irq(shost->host_lock);
5165 				break;
5166 			}
5167 		}
5168 	}
5169 	if (sentadisc == 0) {
5170 		spin_lock_irq(shost->host_lock);
5171 		vport->fc_flag &= ~FC_NLP_MORE;
5172 		spin_unlock_irq(shost->host_lock);
5173 	}
5174 	return sentadisc;
5175 }
5176 
5177 /**
5178  * lpfc_els_disc_plogi - Issue plogi for all npr nodes of a vport before adisc
5179  * @vport: pointer to a host virtual N_Port data structure.
5180  *
5181  * This routine issues Port Login (PLOGI) ELS commands to all the N_Ports
5182  * which are in node port recovery state, with a @vport. Each time an ELS
5183  * ADISC PLOGI IOCB is issued by invoking the lpfc_issue_els_plogi() routine,
5184  * the per @vport number of discover count (num_disc_nodes) shall be
5185  * incremented. If the num_disc_nodes reaches a pre-configured threshold
5186  * (cfg_discovery_threads), the @vport fc_flag will be marked with FC_NLP_MORE
5187  * bit set and quit the process of issuing remaining ADISC PLOGIN IOCBs for
5188  * later pick up. On the other hand, after walking through all the ndlps with
5189  * the @vport and there is none ADISC PLOGI IOCB issued, the @vport fc_flag
5190  * shall be cleared with the FC_NLP_MORE bit indicating there is no more ADISC
5191  * PLOGI need to be sent.
5192  *
5193  * Return code
5194  *   The number of N_Ports with plogi issued.
5195  **/
5196 int
5197 lpfc_els_disc_plogi(struct lpfc_vport *vport)
5198 {
5199 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5200 	struct lpfc_nodelist *ndlp, *next_ndlp;
5201 	int sentplogi = 0;
5202 
5203 	/* go thru NPR nodes and issue any remaining ELS PLOGIs */
5204 	list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
5205 		if (!NLP_CHK_NODE_ACT(ndlp))
5206 			continue;
5207 		if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
5208 				(ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
5209 				(ndlp->nlp_flag & NLP_DELAY_TMO) == 0 &&
5210 				(ndlp->nlp_flag & NLP_NPR_ADISC) == 0) {
5211 			ndlp->nlp_prev_state = ndlp->nlp_state;
5212 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
5213 			lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
5214 			sentplogi++;
5215 			vport->num_disc_nodes++;
5216 			if (vport->num_disc_nodes >=
5217 					vport->cfg_discovery_threads) {
5218 				spin_lock_irq(shost->host_lock);
5219 				vport->fc_flag |= FC_NLP_MORE;
5220 				spin_unlock_irq(shost->host_lock);
5221 				break;
5222 			}
5223 		}
5224 	}
5225 	if (sentplogi) {
5226 		lpfc_set_disctmo(vport);
5227 	}
5228 	else {
5229 		spin_lock_irq(shost->host_lock);
5230 		vport->fc_flag &= ~FC_NLP_MORE;
5231 		spin_unlock_irq(shost->host_lock);
5232 	}
5233 	return sentplogi;
5234 }
5235 
5236 static uint32_t
5237 lpfc_rdp_res_link_service(struct fc_rdp_link_service_desc *desc,
5238 		uint32_t word0)
5239 {
5240 
5241 	desc->tag = cpu_to_be32(RDP_LINK_SERVICE_DESC_TAG);
5242 	desc->payload.els_req = word0;
5243 	desc->length = cpu_to_be32(sizeof(desc->payload));
5244 
5245 	return sizeof(struct fc_rdp_link_service_desc);
5246 }
5247 
5248 static uint32_t
5249 lpfc_rdp_res_sfp_desc(struct fc_rdp_sfp_desc *desc,
5250 		uint8_t *page_a0, uint8_t *page_a2)
5251 {
5252 	uint16_t wavelength;
5253 	uint16_t temperature;
5254 	uint16_t rx_power;
5255 	uint16_t tx_bias;
5256 	uint16_t tx_power;
5257 	uint16_t vcc;
5258 	uint16_t flag = 0;
5259 	struct sff_trasnceiver_codes_byte4 *trasn_code_byte4;
5260 	struct sff_trasnceiver_codes_byte5 *trasn_code_byte5;
5261 
5262 	desc->tag = cpu_to_be32(RDP_SFP_DESC_TAG);
5263 
5264 	trasn_code_byte4 = (struct sff_trasnceiver_codes_byte4 *)
5265 			&page_a0[SSF_TRANSCEIVER_CODE_B4];
5266 	trasn_code_byte5 = (struct sff_trasnceiver_codes_byte5 *)
5267 			&page_a0[SSF_TRANSCEIVER_CODE_B5];
5268 
5269 	if ((trasn_code_byte4->fc_sw_laser) ||
5270 	    (trasn_code_byte5->fc_sw_laser_sl) ||
5271 	    (trasn_code_byte5->fc_sw_laser_sn)) {  /* check if its short WL */
5272 		flag |= (SFP_FLAG_PT_SWLASER << SFP_FLAG_PT_SHIFT);
5273 	} else if (trasn_code_byte4->fc_lw_laser) {
5274 		wavelength = (page_a0[SSF_WAVELENGTH_B1] << 8) |
5275 			page_a0[SSF_WAVELENGTH_B0];
5276 		if (wavelength == SFP_WAVELENGTH_LC1310)
5277 			flag |= SFP_FLAG_PT_LWLASER_LC1310 << SFP_FLAG_PT_SHIFT;
5278 		if (wavelength == SFP_WAVELENGTH_LL1550)
5279 			flag |= SFP_FLAG_PT_LWLASER_LL1550 << SFP_FLAG_PT_SHIFT;
5280 	}
5281 	/* check if its SFP+ */
5282 	flag |= ((page_a0[SSF_IDENTIFIER] == SFF_PG0_IDENT_SFP) ?
5283 			SFP_FLAG_CT_SFP_PLUS : SFP_FLAG_CT_UNKNOWN)
5284 					<< SFP_FLAG_CT_SHIFT;
5285 
5286 	/* check if its OPTICAL */
5287 	flag |= ((page_a0[SSF_CONNECTOR] == SFF_PG0_CONNECTOR_LC) ?
5288 			SFP_FLAG_IS_OPTICAL_PORT : 0)
5289 					<< SFP_FLAG_IS_OPTICAL_SHIFT;
5290 
5291 	temperature = (page_a2[SFF_TEMPERATURE_B1] << 8 |
5292 		page_a2[SFF_TEMPERATURE_B0]);
5293 	vcc = (page_a2[SFF_VCC_B1] << 8 |
5294 		page_a2[SFF_VCC_B0]);
5295 	tx_power = (page_a2[SFF_TXPOWER_B1] << 8 |
5296 		page_a2[SFF_TXPOWER_B0]);
5297 	tx_bias = (page_a2[SFF_TX_BIAS_CURRENT_B1] << 8 |
5298 		page_a2[SFF_TX_BIAS_CURRENT_B0]);
5299 	rx_power = (page_a2[SFF_RXPOWER_B1] << 8 |
5300 		page_a2[SFF_RXPOWER_B0]);
5301 	desc->sfp_info.temperature = cpu_to_be16(temperature);
5302 	desc->sfp_info.rx_power = cpu_to_be16(rx_power);
5303 	desc->sfp_info.tx_bias = cpu_to_be16(tx_bias);
5304 	desc->sfp_info.tx_power = cpu_to_be16(tx_power);
5305 	desc->sfp_info.vcc = cpu_to_be16(vcc);
5306 
5307 	desc->sfp_info.flags = cpu_to_be16(flag);
5308 	desc->length = cpu_to_be32(sizeof(desc->sfp_info));
5309 
5310 	return sizeof(struct fc_rdp_sfp_desc);
5311 }
5312 
5313 static uint32_t
5314 lpfc_rdp_res_link_error(struct fc_rdp_link_error_status_desc *desc,
5315 		READ_LNK_VAR *stat)
5316 {
5317 	uint32_t type;
5318 
5319 	desc->tag = cpu_to_be32(RDP_LINK_ERROR_STATUS_DESC_TAG);
5320 
5321 	type = VN_PT_PHY_PF_PORT << VN_PT_PHY_SHIFT;
5322 
5323 	desc->info.port_type = cpu_to_be32(type);
5324 
5325 	desc->info.link_status.link_failure_cnt =
5326 		cpu_to_be32(stat->linkFailureCnt);
5327 	desc->info.link_status.loss_of_synch_cnt =
5328 		cpu_to_be32(stat->lossSyncCnt);
5329 	desc->info.link_status.loss_of_signal_cnt =
5330 		cpu_to_be32(stat->lossSignalCnt);
5331 	desc->info.link_status.primitive_seq_proto_err =
5332 		cpu_to_be32(stat->primSeqErrCnt);
5333 	desc->info.link_status.invalid_trans_word =
5334 		cpu_to_be32(stat->invalidXmitWord);
5335 	desc->info.link_status.invalid_crc_cnt = cpu_to_be32(stat->crcCnt);
5336 
5337 	desc->length = cpu_to_be32(sizeof(desc->info));
5338 
5339 	return sizeof(struct fc_rdp_link_error_status_desc);
5340 }
5341 
5342 static uint32_t
5343 lpfc_rdp_res_bbc_desc(struct fc_rdp_bbc_desc *desc, READ_LNK_VAR *stat,
5344 		      struct lpfc_vport *vport)
5345 {
5346 	uint32_t bbCredit;
5347 
5348 	desc->tag = cpu_to_be32(RDP_BBC_DESC_TAG);
5349 
5350 	bbCredit = vport->fc_sparam.cmn.bbCreditLsb |
5351 			(vport->fc_sparam.cmn.bbCreditMsb << 8);
5352 	desc->bbc_info.port_bbc = cpu_to_be32(bbCredit);
5353 	if (vport->phba->fc_topology != LPFC_TOPOLOGY_LOOP) {
5354 		bbCredit = vport->phba->fc_fabparam.cmn.bbCreditLsb |
5355 			(vport->phba->fc_fabparam.cmn.bbCreditMsb << 8);
5356 		desc->bbc_info.attached_port_bbc = cpu_to_be32(bbCredit);
5357 	} else {
5358 		desc->bbc_info.attached_port_bbc = 0;
5359 	}
5360 
5361 	desc->bbc_info.rtt = 0;
5362 	desc->length = cpu_to_be32(sizeof(desc->bbc_info));
5363 
5364 	return sizeof(struct fc_rdp_bbc_desc);
5365 }
5366 
5367 static uint32_t
5368 lpfc_rdp_res_oed_temp_desc(struct lpfc_hba *phba,
5369 			   struct fc_rdp_oed_sfp_desc *desc, uint8_t *page_a2)
5370 {
5371 	uint32_t flags = 0;
5372 
5373 	desc->tag = cpu_to_be32(RDP_OED_DESC_TAG);
5374 
5375 	desc->oed_info.hi_alarm = page_a2[SSF_TEMP_HIGH_ALARM];
5376 	desc->oed_info.lo_alarm = page_a2[SSF_TEMP_LOW_ALARM];
5377 	desc->oed_info.hi_warning = page_a2[SSF_TEMP_HIGH_WARNING];
5378 	desc->oed_info.lo_warning = page_a2[SSF_TEMP_LOW_WARNING];
5379 
5380 	if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TEMPERATURE)
5381 		flags |= RDP_OET_HIGH_ALARM;
5382 	if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TEMPERATURE)
5383 		flags |= RDP_OET_LOW_ALARM;
5384 	if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TEMPERATURE)
5385 		flags |= RDP_OET_HIGH_WARNING;
5386 	if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TEMPERATURE)
5387 		flags |= RDP_OET_LOW_WARNING;
5388 
5389 	flags |= ((0xf & RDP_OED_TEMPERATURE) << RDP_OED_TYPE_SHIFT);
5390 	desc->oed_info.function_flags = cpu_to_be32(flags);
5391 	desc->length = cpu_to_be32(sizeof(desc->oed_info));
5392 	return sizeof(struct fc_rdp_oed_sfp_desc);
5393 }
5394 
5395 static uint32_t
5396 lpfc_rdp_res_oed_voltage_desc(struct lpfc_hba *phba,
5397 			      struct fc_rdp_oed_sfp_desc *desc,
5398 			      uint8_t *page_a2)
5399 {
5400 	uint32_t flags = 0;
5401 
5402 	desc->tag = cpu_to_be32(RDP_OED_DESC_TAG);
5403 
5404 	desc->oed_info.hi_alarm = page_a2[SSF_VOLTAGE_HIGH_ALARM];
5405 	desc->oed_info.lo_alarm = page_a2[SSF_VOLTAGE_LOW_ALARM];
5406 	desc->oed_info.hi_warning = page_a2[SSF_VOLTAGE_HIGH_WARNING];
5407 	desc->oed_info.lo_warning = page_a2[SSF_VOLTAGE_LOW_WARNING];
5408 
5409 	if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_VOLTAGE)
5410 		flags |= RDP_OET_HIGH_ALARM;
5411 	if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_VOLTAGE)
5412 		flags |= RDP_OET_LOW_ALARM;
5413 	if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_VOLTAGE)
5414 		flags |= RDP_OET_HIGH_WARNING;
5415 	if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_VOLTAGE)
5416 		flags |= RDP_OET_LOW_WARNING;
5417 
5418 	flags |= ((0xf & RDP_OED_VOLTAGE) << RDP_OED_TYPE_SHIFT);
5419 	desc->oed_info.function_flags = cpu_to_be32(flags);
5420 	desc->length = cpu_to_be32(sizeof(desc->oed_info));
5421 	return sizeof(struct fc_rdp_oed_sfp_desc);
5422 }
5423 
5424 static uint32_t
5425 lpfc_rdp_res_oed_txbias_desc(struct lpfc_hba *phba,
5426 			     struct fc_rdp_oed_sfp_desc *desc,
5427 			     uint8_t *page_a2)
5428 {
5429 	uint32_t flags = 0;
5430 
5431 	desc->tag = cpu_to_be32(RDP_OED_DESC_TAG);
5432 
5433 	desc->oed_info.hi_alarm = page_a2[SSF_BIAS_HIGH_ALARM];
5434 	desc->oed_info.lo_alarm = page_a2[SSF_BIAS_LOW_ALARM];
5435 	desc->oed_info.hi_warning = page_a2[SSF_BIAS_HIGH_WARNING];
5436 	desc->oed_info.lo_warning = page_a2[SSF_BIAS_LOW_WARNING];
5437 
5438 	if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TXBIAS)
5439 		flags |= RDP_OET_HIGH_ALARM;
5440 	if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TXBIAS)
5441 		flags |= RDP_OET_LOW_ALARM;
5442 	if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TXBIAS)
5443 		flags |= RDP_OET_HIGH_WARNING;
5444 	if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TXBIAS)
5445 		flags |= RDP_OET_LOW_WARNING;
5446 
5447 	flags |= ((0xf & RDP_OED_TXBIAS) << RDP_OED_TYPE_SHIFT);
5448 	desc->oed_info.function_flags = cpu_to_be32(flags);
5449 	desc->length = cpu_to_be32(sizeof(desc->oed_info));
5450 	return sizeof(struct fc_rdp_oed_sfp_desc);
5451 }
5452 
5453 static uint32_t
5454 lpfc_rdp_res_oed_txpower_desc(struct lpfc_hba *phba,
5455 			      struct fc_rdp_oed_sfp_desc *desc,
5456 			      uint8_t *page_a2)
5457 {
5458 	uint32_t flags = 0;
5459 
5460 	desc->tag = cpu_to_be32(RDP_OED_DESC_TAG);
5461 
5462 	desc->oed_info.hi_alarm = page_a2[SSF_TXPOWER_HIGH_ALARM];
5463 	desc->oed_info.lo_alarm = page_a2[SSF_TXPOWER_LOW_ALARM];
5464 	desc->oed_info.hi_warning = page_a2[SSF_TXPOWER_HIGH_WARNING];
5465 	desc->oed_info.lo_warning = page_a2[SSF_TXPOWER_LOW_WARNING];
5466 
5467 	if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TXPOWER)
5468 		flags |= RDP_OET_HIGH_ALARM;
5469 	if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TXPOWER)
5470 		flags |= RDP_OET_LOW_ALARM;
5471 	if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TXPOWER)
5472 		flags |= RDP_OET_HIGH_WARNING;
5473 	if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TXPOWER)
5474 		flags |= RDP_OET_LOW_WARNING;
5475 
5476 	flags |= ((0xf & RDP_OED_TXPOWER) << RDP_OED_TYPE_SHIFT);
5477 	desc->oed_info.function_flags = cpu_to_be32(flags);
5478 	desc->length = cpu_to_be32(sizeof(desc->oed_info));
5479 	return sizeof(struct fc_rdp_oed_sfp_desc);
5480 }
5481 
5482 
5483 static uint32_t
5484 lpfc_rdp_res_oed_rxpower_desc(struct lpfc_hba *phba,
5485 			      struct fc_rdp_oed_sfp_desc *desc,
5486 			      uint8_t *page_a2)
5487 {
5488 	uint32_t flags = 0;
5489 
5490 	desc->tag = cpu_to_be32(RDP_OED_DESC_TAG);
5491 
5492 	desc->oed_info.hi_alarm = page_a2[SSF_RXPOWER_HIGH_ALARM];
5493 	desc->oed_info.lo_alarm = page_a2[SSF_RXPOWER_LOW_ALARM];
5494 	desc->oed_info.hi_warning = page_a2[SSF_RXPOWER_HIGH_WARNING];
5495 	desc->oed_info.lo_warning = page_a2[SSF_RXPOWER_LOW_WARNING];
5496 
5497 	if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_RXPOWER)
5498 		flags |= RDP_OET_HIGH_ALARM;
5499 	if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_RXPOWER)
5500 		flags |= RDP_OET_LOW_ALARM;
5501 	if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_RXPOWER)
5502 		flags |= RDP_OET_HIGH_WARNING;
5503 	if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_RXPOWER)
5504 		flags |= RDP_OET_LOW_WARNING;
5505 
5506 	flags |= ((0xf & RDP_OED_RXPOWER) << RDP_OED_TYPE_SHIFT);
5507 	desc->oed_info.function_flags = cpu_to_be32(flags);
5508 	desc->length = cpu_to_be32(sizeof(desc->oed_info));
5509 	return sizeof(struct fc_rdp_oed_sfp_desc);
5510 }
5511 
5512 static uint32_t
5513 lpfc_rdp_res_opd_desc(struct fc_rdp_opd_sfp_desc *desc,
5514 		      uint8_t *page_a0, struct lpfc_vport *vport)
5515 {
5516 	desc->tag = cpu_to_be32(RDP_OPD_DESC_TAG);
5517 	memcpy(desc->opd_info.vendor_name, &page_a0[SSF_VENDOR_NAME], 16);
5518 	memcpy(desc->opd_info.model_number, &page_a0[SSF_VENDOR_PN], 16);
5519 	memcpy(desc->opd_info.serial_number, &page_a0[SSF_VENDOR_SN], 16);
5520 	memcpy(desc->opd_info.revision, &page_a0[SSF_VENDOR_REV], 4);
5521 	memcpy(desc->opd_info.date, &page_a0[SSF_DATE_CODE], 8);
5522 	desc->length = cpu_to_be32(sizeof(desc->opd_info));
5523 	return sizeof(struct fc_rdp_opd_sfp_desc);
5524 }
5525 
5526 static uint32_t
5527 lpfc_rdp_res_fec_desc(struct fc_fec_rdp_desc *desc, READ_LNK_VAR *stat)
5528 {
5529 	if (bf_get(lpfc_read_link_stat_gec2, stat) == 0)
5530 		return 0;
5531 	desc->tag = cpu_to_be32(RDP_FEC_DESC_TAG);
5532 
5533 	desc->info.CorrectedBlocks =
5534 		cpu_to_be32(stat->fecCorrBlkCount);
5535 	desc->info.UncorrectableBlocks =
5536 		cpu_to_be32(stat->fecUncorrBlkCount);
5537 
5538 	desc->length = cpu_to_be32(sizeof(desc->info));
5539 
5540 	return sizeof(struct fc_fec_rdp_desc);
5541 }
5542 
5543 static uint32_t
5544 lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba)
5545 {
5546 	uint16_t rdp_cap = 0;
5547 	uint16_t rdp_speed;
5548 
5549 	desc->tag = cpu_to_be32(RDP_PORT_SPEED_DESC_TAG);
5550 
5551 	switch (phba->fc_linkspeed) {
5552 	case LPFC_LINK_SPEED_1GHZ:
5553 		rdp_speed = RDP_PS_1GB;
5554 		break;
5555 	case LPFC_LINK_SPEED_2GHZ:
5556 		rdp_speed = RDP_PS_2GB;
5557 		break;
5558 	case LPFC_LINK_SPEED_4GHZ:
5559 		rdp_speed = RDP_PS_4GB;
5560 		break;
5561 	case LPFC_LINK_SPEED_8GHZ:
5562 		rdp_speed = RDP_PS_8GB;
5563 		break;
5564 	case LPFC_LINK_SPEED_10GHZ:
5565 		rdp_speed = RDP_PS_10GB;
5566 		break;
5567 	case LPFC_LINK_SPEED_16GHZ:
5568 		rdp_speed = RDP_PS_16GB;
5569 		break;
5570 	case LPFC_LINK_SPEED_32GHZ:
5571 		rdp_speed = RDP_PS_32GB;
5572 		break;
5573 	case LPFC_LINK_SPEED_64GHZ:
5574 		rdp_speed = RDP_PS_64GB;
5575 		break;
5576 	default:
5577 		rdp_speed = RDP_PS_UNKNOWN;
5578 		break;
5579 	}
5580 
5581 	desc->info.port_speed.speed = cpu_to_be16(rdp_speed);
5582 
5583 	if (phba->lmt & LMT_128Gb)
5584 		rdp_cap |= RDP_PS_128GB;
5585 	if (phba->lmt & LMT_64Gb)
5586 		rdp_cap |= RDP_PS_64GB;
5587 	if (phba->lmt & LMT_32Gb)
5588 		rdp_cap |= RDP_PS_32GB;
5589 	if (phba->lmt & LMT_16Gb)
5590 		rdp_cap |= RDP_PS_16GB;
5591 	if (phba->lmt & LMT_10Gb)
5592 		rdp_cap |= RDP_PS_10GB;
5593 	if (phba->lmt & LMT_8Gb)
5594 		rdp_cap |= RDP_PS_8GB;
5595 	if (phba->lmt & LMT_4Gb)
5596 		rdp_cap |= RDP_PS_4GB;
5597 	if (phba->lmt & LMT_2Gb)
5598 		rdp_cap |= RDP_PS_2GB;
5599 	if (phba->lmt & LMT_1Gb)
5600 		rdp_cap |= RDP_PS_1GB;
5601 
5602 	if (rdp_cap == 0)
5603 		rdp_cap = RDP_CAP_UNKNOWN;
5604 	if (phba->cfg_link_speed != LPFC_USER_LINK_SPEED_AUTO)
5605 		rdp_cap |= RDP_CAP_USER_CONFIGURED;
5606 
5607 	desc->info.port_speed.capabilities = cpu_to_be16(rdp_cap);
5608 	desc->length = cpu_to_be32(sizeof(desc->info));
5609 	return sizeof(struct fc_rdp_port_speed_desc);
5610 }
5611 
5612 static uint32_t
5613 lpfc_rdp_res_diag_port_names(struct fc_rdp_port_name_desc *desc,
5614 		struct lpfc_vport *vport)
5615 {
5616 
5617 	desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG);
5618 
5619 	memcpy(desc->port_names.wwnn, &vport->fc_nodename,
5620 			sizeof(desc->port_names.wwnn));
5621 
5622 	memcpy(desc->port_names.wwpn, &vport->fc_portname,
5623 			sizeof(desc->port_names.wwpn));
5624 
5625 	desc->length = cpu_to_be32(sizeof(desc->port_names));
5626 	return sizeof(struct fc_rdp_port_name_desc);
5627 }
5628 
5629 static uint32_t
5630 lpfc_rdp_res_attach_port_names(struct fc_rdp_port_name_desc *desc,
5631 		struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
5632 {
5633 
5634 	desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG);
5635 	if (vport->fc_flag & FC_FABRIC) {
5636 		memcpy(desc->port_names.wwnn, &vport->fabric_nodename,
5637 				sizeof(desc->port_names.wwnn));
5638 
5639 		memcpy(desc->port_names.wwpn, &vport->fabric_portname,
5640 				sizeof(desc->port_names.wwpn));
5641 	} else {  /* Point to Point */
5642 		memcpy(desc->port_names.wwnn, &ndlp->nlp_nodename,
5643 				sizeof(desc->port_names.wwnn));
5644 
5645 		memcpy(desc->port_names.wwnn, &ndlp->nlp_portname,
5646 				sizeof(desc->port_names.wwpn));
5647 	}
5648 
5649 	desc->length = cpu_to_be32(sizeof(desc->port_names));
5650 	return sizeof(struct fc_rdp_port_name_desc);
5651 }
5652 
5653 static void
5654 lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context,
5655 		int status)
5656 {
5657 	struct lpfc_nodelist *ndlp = rdp_context->ndlp;
5658 	struct lpfc_vport *vport = ndlp->vport;
5659 	struct lpfc_iocbq *elsiocb;
5660 	struct ulp_bde64 *bpl;
5661 	IOCB_t *icmd;
5662 	uint8_t *pcmd;
5663 	struct ls_rjt *stat;
5664 	struct fc_rdp_res_frame *rdp_res;
5665 	uint32_t cmdsize, len;
5666 	uint16_t *flag_ptr;
5667 	int rc;
5668 
5669 	if (status != SUCCESS)
5670 		goto error;
5671 
5672 	/* This will change once we know the true size of the RDP payload */
5673 	cmdsize = sizeof(struct fc_rdp_res_frame);
5674 
5675 	elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize,
5676 			lpfc_max_els_tries, rdp_context->ndlp,
5677 			rdp_context->ndlp->nlp_DID, ELS_CMD_ACC);
5678 	lpfc_nlp_put(ndlp);
5679 	if (!elsiocb)
5680 		goto free_rdp_context;
5681 
5682 	icmd = &elsiocb->iocb;
5683 	icmd->ulpContext = rdp_context->rx_id;
5684 	icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id;
5685 
5686 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5687 			"2171 Xmit RDP response tag x%x xri x%x, "
5688 			"did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x",
5689 			elsiocb->iotag, elsiocb->iocb.ulpContext,
5690 			ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
5691 			ndlp->nlp_rpi);
5692 	rdp_res = (struct fc_rdp_res_frame *)
5693 		(((struct lpfc_dmabuf *) elsiocb->context2)->virt);
5694 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
5695 	memset(pcmd, 0, sizeof(struct fc_rdp_res_frame));
5696 	*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
5697 
5698 	/* Update Alarm and Warning */
5699 	flag_ptr = (uint16_t *)(rdp_context->page_a2 + SSF_ALARM_FLAGS);
5700 	phba->sfp_alarm |= *flag_ptr;
5701 	flag_ptr = (uint16_t *)(rdp_context->page_a2 + SSF_WARNING_FLAGS);
5702 	phba->sfp_warning |= *flag_ptr;
5703 
5704 	/* For RDP payload */
5705 	len = 8;
5706 	len += lpfc_rdp_res_link_service((struct fc_rdp_link_service_desc *)
5707 					 (len + pcmd), ELS_CMD_RDP);
5708 
5709 	len += lpfc_rdp_res_sfp_desc((struct fc_rdp_sfp_desc *)(len + pcmd),
5710 			rdp_context->page_a0, rdp_context->page_a2);
5711 	len += lpfc_rdp_res_speed((struct fc_rdp_port_speed_desc *)(len + pcmd),
5712 				  phba);
5713 	len += lpfc_rdp_res_link_error((struct fc_rdp_link_error_status_desc *)
5714 				       (len + pcmd), &rdp_context->link_stat);
5715 	len += lpfc_rdp_res_diag_port_names((struct fc_rdp_port_name_desc *)
5716 					     (len + pcmd), vport);
5717 	len += lpfc_rdp_res_attach_port_names((struct fc_rdp_port_name_desc *)
5718 					(len + pcmd), vport, ndlp);
5719 	len += lpfc_rdp_res_fec_desc((struct fc_fec_rdp_desc *)(len + pcmd),
5720 			&rdp_context->link_stat);
5721 	len += lpfc_rdp_res_bbc_desc((struct fc_rdp_bbc_desc *)(len + pcmd),
5722 				     &rdp_context->link_stat, vport);
5723 	len += lpfc_rdp_res_oed_temp_desc(phba,
5724 				(struct fc_rdp_oed_sfp_desc *)(len + pcmd),
5725 				rdp_context->page_a2);
5726 	len += lpfc_rdp_res_oed_voltage_desc(phba,
5727 				(struct fc_rdp_oed_sfp_desc *)(len + pcmd),
5728 				rdp_context->page_a2);
5729 	len += lpfc_rdp_res_oed_txbias_desc(phba,
5730 				(struct fc_rdp_oed_sfp_desc *)(len + pcmd),
5731 				rdp_context->page_a2);
5732 	len += lpfc_rdp_res_oed_txpower_desc(phba,
5733 				(struct fc_rdp_oed_sfp_desc *)(len + pcmd),
5734 				rdp_context->page_a2);
5735 	len += lpfc_rdp_res_oed_rxpower_desc(phba,
5736 				(struct fc_rdp_oed_sfp_desc *)(len + pcmd),
5737 				rdp_context->page_a2);
5738 	len += lpfc_rdp_res_opd_desc((struct fc_rdp_opd_sfp_desc *)(len + pcmd),
5739 				     rdp_context->page_a0, vport);
5740 
5741 	rdp_res->length = cpu_to_be32(len - 8);
5742 	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
5743 
5744 	/* Now that we know the true size of the payload, update the BPL */
5745 	bpl = (struct ulp_bde64 *)
5746 		(((struct lpfc_dmabuf *)(elsiocb->context3))->virt);
5747 	bpl->tus.f.bdeSize = len;
5748 	bpl->tus.f.bdeFlags = 0;
5749 	bpl->tus.w = le32_to_cpu(bpl->tus.w);
5750 
5751 	phba->fc_stat.elsXmitACC++;
5752 	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
5753 	if (rc == IOCB_ERROR)
5754 		lpfc_els_free_iocb(phba, elsiocb);
5755 
5756 	kfree(rdp_context);
5757 
5758 	return;
5759 error:
5760 	cmdsize = 2 * sizeof(uint32_t);
5761 	elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, lpfc_max_els_tries,
5762 			ndlp, ndlp->nlp_DID, ELS_CMD_LS_RJT);
5763 	lpfc_nlp_put(ndlp);
5764 	if (!elsiocb)
5765 		goto free_rdp_context;
5766 
5767 	icmd = &elsiocb->iocb;
5768 	icmd->ulpContext = rdp_context->rx_id;
5769 	icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id;
5770 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
5771 
5772 	*((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT;
5773 	stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t));
5774 	stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
5775 
5776 	phba->fc_stat.elsXmitLSRJT++;
5777 	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
5778 	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
5779 
5780 	if (rc == IOCB_ERROR)
5781 		lpfc_els_free_iocb(phba, elsiocb);
5782 free_rdp_context:
5783 	kfree(rdp_context);
5784 }
5785 
5786 static int
5787 lpfc_get_rdp_info(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context)
5788 {
5789 	LPFC_MBOXQ_t *mbox = NULL;
5790 	int rc;
5791 
5792 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5793 	if (!mbox) {
5794 		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_ELS,
5795 				"7105 failed to allocate mailbox memory");
5796 		return 1;
5797 	}
5798 
5799 	if (lpfc_sli4_dump_page_a0(phba, mbox))
5800 		goto prep_mbox_fail;
5801 	mbox->vport = rdp_context->ndlp->vport;
5802 	mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_page_a0;
5803 	mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context;
5804 	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
5805 	if (rc == MBX_NOT_FINISHED)
5806 		goto issue_mbox_fail;
5807 
5808 	return 0;
5809 
5810 prep_mbox_fail:
5811 issue_mbox_fail:
5812 	mempool_free(mbox, phba->mbox_mem_pool);
5813 	return 1;
5814 }
5815 
5816 /*
5817  * lpfc_els_rcv_rdp - Process an unsolicited RDP ELS.
5818  * @vport: pointer to a host virtual N_Port data structure.
5819  * @cmdiocb: pointer to lpfc command iocb data structure.
5820  * @ndlp: pointer to a node-list data structure.
5821  *
5822  * This routine processes an unsolicited RDP(Read Diagnostic Parameters)
5823  * IOCB. First, the payload of the unsolicited RDP is checked.
5824  * Then it will (1) send MBX_DUMP_MEMORY, Embedded DMP_LMSD sub command TYPE-3
5825  * for Page A0, (2) send MBX_DUMP_MEMORY, DMP_LMSD for Page A2,
5826  * (3) send MBX_READ_LNK_STAT to get link stat, (4) Call lpfc_els_rdp_cmpl
5827  * gather all data and send RDP response.
5828  *
5829  * Return code
5830  *   0 - Sent the acc response
5831  *   1 - Sent the reject response.
5832  */
5833 static int
5834 lpfc_els_rcv_rdp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5835 		struct lpfc_nodelist *ndlp)
5836 {
5837 	struct lpfc_hba *phba = vport->phba;
5838 	struct lpfc_dmabuf *pcmd;
5839 	uint8_t rjt_err, rjt_expl = LSEXP_NOTHING_MORE;
5840 	struct fc_rdp_req_frame *rdp_req;
5841 	struct lpfc_rdp_context *rdp_context;
5842 	IOCB_t *cmd = NULL;
5843 	struct ls_rjt stat;
5844 
5845 	if (phba->sli_rev < LPFC_SLI_REV4 ||
5846 	    bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
5847 						LPFC_SLI_INTF_IF_TYPE_2) {
5848 		rjt_err = LSRJT_UNABLE_TPC;
5849 		rjt_expl = LSEXP_REQ_UNSUPPORTED;
5850 		goto error;
5851 	}
5852 
5853 	if (phba->sli_rev < LPFC_SLI_REV4 || (phba->hba_flag & HBA_FCOE_MODE)) {
5854 		rjt_err = LSRJT_UNABLE_TPC;
5855 		rjt_expl = LSEXP_REQ_UNSUPPORTED;
5856 		goto error;
5857 	}
5858 
5859 	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
5860 	rdp_req = (struct fc_rdp_req_frame *) pcmd->virt;
5861 
5862 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5863 			 "2422 ELS RDP Request "
5864 			 "dec len %d tag x%x port_id %d len %d\n",
5865 			 be32_to_cpu(rdp_req->rdp_des_length),
5866 			 be32_to_cpu(rdp_req->nport_id_desc.tag),
5867 			 be32_to_cpu(rdp_req->nport_id_desc.nport_id),
5868 			 be32_to_cpu(rdp_req->nport_id_desc.length));
5869 
5870 	if (sizeof(struct fc_rdp_nport_desc) !=
5871 			be32_to_cpu(rdp_req->rdp_des_length))
5872 		goto rjt_logerr;
5873 	if (RDP_N_PORT_DESC_TAG != be32_to_cpu(rdp_req->nport_id_desc.tag))
5874 		goto rjt_logerr;
5875 	if (RDP_NPORT_ID_SIZE !=
5876 			be32_to_cpu(rdp_req->nport_id_desc.length))
5877 		goto rjt_logerr;
5878 	rdp_context = kzalloc(sizeof(struct lpfc_rdp_context), GFP_KERNEL);
5879 	if (!rdp_context) {
5880 		rjt_err = LSRJT_UNABLE_TPC;
5881 		goto error;
5882 	}
5883 
5884 	cmd = &cmdiocb->iocb;
5885 	rdp_context->ndlp = lpfc_nlp_get(ndlp);
5886 	rdp_context->ox_id = cmd->unsli3.rcvsli3.ox_id;
5887 	rdp_context->rx_id = cmd->ulpContext;
5888 	rdp_context->cmpl = lpfc_els_rdp_cmpl;
5889 	if (lpfc_get_rdp_info(phba, rdp_context)) {
5890 		lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_ELS,
5891 				 "2423 Unable to send mailbox");
5892 		kfree(rdp_context);
5893 		rjt_err = LSRJT_UNABLE_TPC;
5894 		lpfc_nlp_put(ndlp);
5895 		goto error;
5896 	}
5897 
5898 	return 0;
5899 
5900 rjt_logerr:
5901 	rjt_err = LSRJT_LOGICAL_ERR;
5902 
5903 error:
5904 	memset(&stat, 0, sizeof(stat));
5905 	stat.un.b.lsRjtRsnCode = rjt_err;
5906 	stat.un.b.lsRjtRsnCodeExp = rjt_expl;
5907 	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
5908 	return 1;
5909 }
5910 
5911 
5912 static void
5913 lpfc_els_lcb_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5914 {
5915 	MAILBOX_t *mb;
5916 	IOCB_t *icmd;
5917 	uint8_t *pcmd;
5918 	struct lpfc_iocbq *elsiocb;
5919 	struct lpfc_nodelist *ndlp;
5920 	struct ls_rjt *stat;
5921 	union lpfc_sli4_cfg_shdr *shdr;
5922 	struct lpfc_lcb_context *lcb_context;
5923 	struct fc_lcb_res_frame *lcb_res;
5924 	uint32_t cmdsize, shdr_status, shdr_add_status;
5925 	int rc;
5926 
5927 	mb = &pmb->u.mb;
5928 	lcb_context = (struct lpfc_lcb_context *)pmb->ctx_ndlp;
5929 	ndlp = lcb_context->ndlp;
5930 	pmb->ctx_ndlp = NULL;
5931 	pmb->ctx_buf = NULL;
5932 
5933 	shdr = (union lpfc_sli4_cfg_shdr *)
5934 			&pmb->u.mqe.un.beacon_config.header.cfg_shdr;
5935 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5936 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5937 
5938 	lpfc_printf_log(phba, KERN_INFO, LOG_MBOX,
5939 				"0194 SET_BEACON_CONFIG mailbox "
5940 				"completed with status x%x add_status x%x,"
5941 				" mbx status x%x\n",
5942 				shdr_status, shdr_add_status, mb->mbxStatus);
5943 
5944 	if ((mb->mbxStatus != MBX_SUCCESS) || shdr_status ||
5945 	    (shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE) ||
5946 	    (shdr_add_status == ADD_STATUS_INVALID_REQUEST)) {
5947 		mempool_free(pmb, phba->mbox_mem_pool);
5948 		goto error;
5949 	}
5950 
5951 	mempool_free(pmb, phba->mbox_mem_pool);
5952 	cmdsize = sizeof(struct fc_lcb_res_frame);
5953 	elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
5954 			lpfc_max_els_tries, ndlp,
5955 			ndlp->nlp_DID, ELS_CMD_ACC);
5956 
5957 	/* Decrement the ndlp reference count from previous mbox command */
5958 	lpfc_nlp_put(ndlp);
5959 
5960 	if (!elsiocb)
5961 		goto free_lcb_context;
5962 
5963 	lcb_res = (struct fc_lcb_res_frame *)
5964 		(((struct lpfc_dmabuf *)elsiocb->context2)->virt);
5965 
5966 	memset(lcb_res, 0, sizeof(struct fc_lcb_res_frame));
5967 	icmd = &elsiocb->iocb;
5968 	icmd->ulpContext = lcb_context->rx_id;
5969 	icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id;
5970 
5971 	pcmd = (uint8_t *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt);
5972 	*((uint32_t *)(pcmd)) = ELS_CMD_ACC;
5973 	lcb_res->lcb_sub_command = lcb_context->sub_command;
5974 	lcb_res->lcb_type = lcb_context->type;
5975 	lcb_res->capability = lcb_context->capability;
5976 	lcb_res->lcb_frequency = lcb_context->frequency;
5977 	lcb_res->lcb_duration = lcb_context->duration;
5978 	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
5979 	phba->fc_stat.elsXmitACC++;
5980 	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
5981 	if (rc == IOCB_ERROR)
5982 		lpfc_els_free_iocb(phba, elsiocb);
5983 
5984 	kfree(lcb_context);
5985 	return;
5986 
5987 error:
5988 	cmdsize = sizeof(struct fc_lcb_res_frame);
5989 	elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
5990 			lpfc_max_els_tries, ndlp,
5991 			ndlp->nlp_DID, ELS_CMD_LS_RJT);
5992 	lpfc_nlp_put(ndlp);
5993 	if (!elsiocb)
5994 		goto free_lcb_context;
5995 
5996 	icmd = &elsiocb->iocb;
5997 	icmd->ulpContext = lcb_context->rx_id;
5998 	icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id;
5999 	pcmd = (uint8_t *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt);
6000 
6001 	*((uint32_t *)(pcmd)) = ELS_CMD_LS_RJT;
6002 	stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t));
6003 	stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
6004 
6005 	if (shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE)
6006 		stat->un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS;
6007 
6008 	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
6009 	phba->fc_stat.elsXmitLSRJT++;
6010 	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
6011 	if (rc == IOCB_ERROR)
6012 		lpfc_els_free_iocb(phba, elsiocb);
6013 free_lcb_context:
6014 	kfree(lcb_context);
6015 }
6016 
6017 static int
6018 lpfc_sli4_set_beacon(struct lpfc_vport *vport,
6019 		     struct lpfc_lcb_context *lcb_context,
6020 		     uint32_t beacon_state)
6021 {
6022 	struct lpfc_hba *phba = vport->phba;
6023 	union lpfc_sli4_cfg_shdr *cfg_shdr;
6024 	LPFC_MBOXQ_t *mbox = NULL;
6025 	uint32_t len;
6026 	int rc;
6027 
6028 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6029 	if (!mbox)
6030 		return 1;
6031 
6032 	cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr;
6033 	len = sizeof(struct lpfc_mbx_set_beacon_config) -
6034 		sizeof(struct lpfc_sli4_cfg_mhdr);
6035 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6036 			 LPFC_MBOX_OPCODE_SET_BEACON_CONFIG, len,
6037 			 LPFC_SLI4_MBX_EMBED);
6038 	mbox->ctx_ndlp = (void *)lcb_context;
6039 	mbox->vport = phba->pport;
6040 	mbox->mbox_cmpl = lpfc_els_lcb_rsp;
6041 	bf_set(lpfc_mbx_set_beacon_port_num, &mbox->u.mqe.un.beacon_config,
6042 	       phba->sli4_hba.physical_port);
6043 	bf_set(lpfc_mbx_set_beacon_state, &mbox->u.mqe.un.beacon_config,
6044 	       beacon_state);
6045 	mbox->u.mqe.un.beacon_config.word5 = 0;		/* Reserved */
6046 
6047 	/*
6048 	 *	Check bv1s bit before issuing the mailbox
6049 	 *	if bv1s == 1, LCB V1 supported
6050 	 *	else, LCB V0 supported
6051 	 */
6052 
6053 	if (phba->sli4_hba.pc_sli4_params.bv1s) {
6054 		/* COMMON_SET_BEACON_CONFIG_V1 */
6055 		cfg_shdr->request.word9 = BEACON_VERSION_V1;
6056 		lcb_context->capability |= LCB_CAPABILITY_DURATION;
6057 		bf_set(lpfc_mbx_set_beacon_port_type,
6058 		       &mbox->u.mqe.un.beacon_config, 0);
6059 		bf_set(lpfc_mbx_set_beacon_duration_v1,
6060 		       &mbox->u.mqe.un.beacon_config,
6061 		       be16_to_cpu(lcb_context->duration));
6062 	} else {
6063 		/* COMMON_SET_BEACON_CONFIG_V0 */
6064 		if (be16_to_cpu(lcb_context->duration) != 0) {
6065 			mempool_free(mbox, phba->mbox_mem_pool);
6066 			return 1;
6067 		}
6068 		cfg_shdr->request.word9 = BEACON_VERSION_V0;
6069 		lcb_context->capability &=  ~(LCB_CAPABILITY_DURATION);
6070 		bf_set(lpfc_mbx_set_beacon_state,
6071 		       &mbox->u.mqe.un.beacon_config, beacon_state);
6072 		bf_set(lpfc_mbx_set_beacon_port_type,
6073 		       &mbox->u.mqe.un.beacon_config, 1);
6074 		bf_set(lpfc_mbx_set_beacon_duration,
6075 		       &mbox->u.mqe.un.beacon_config,
6076 		       be16_to_cpu(lcb_context->duration));
6077 	}
6078 
6079 	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
6080 	if (rc == MBX_NOT_FINISHED) {
6081 		mempool_free(mbox, phba->mbox_mem_pool);
6082 		return 1;
6083 	}
6084 
6085 	return 0;
6086 }
6087 
6088 
6089 /**
6090  * lpfc_els_rcv_lcb - Process an unsolicited LCB
6091  * @vport: pointer to a host virtual N_Port data structure.
6092  * @cmdiocb: pointer to lpfc command iocb data structure.
6093  * @ndlp: pointer to a node-list data structure.
6094  *
6095  * This routine processes an unsolicited LCB(LINK CABLE BEACON) IOCB.
6096  * First, the payload of the unsolicited LCB is checked.
6097  * Then based on Subcommand beacon will either turn on or off.
6098  *
6099  * Return code
6100  * 0 - Sent the acc response
6101  * 1 - Sent the reject response.
6102  **/
6103 static int
6104 lpfc_els_rcv_lcb(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
6105 		 struct lpfc_nodelist *ndlp)
6106 {
6107 	struct lpfc_hba *phba = vport->phba;
6108 	struct lpfc_dmabuf *pcmd;
6109 	uint8_t *lp;
6110 	struct fc_lcb_request_frame *beacon;
6111 	struct lpfc_lcb_context *lcb_context;
6112 	uint8_t state, rjt_err;
6113 	struct ls_rjt stat;
6114 
6115 	pcmd = (struct lpfc_dmabuf *)cmdiocb->context2;
6116 	lp = (uint8_t *)pcmd->virt;
6117 	beacon = (struct fc_lcb_request_frame *)pcmd->virt;
6118 
6119 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
6120 			"0192 ELS LCB Data x%x x%x x%x x%x sub x%x "
6121 			"type x%x frequency %x duration x%x\n",
6122 			lp[0], lp[1], lp[2],
6123 			beacon->lcb_command,
6124 			beacon->lcb_sub_command,
6125 			beacon->lcb_type,
6126 			beacon->lcb_frequency,
6127 			be16_to_cpu(beacon->lcb_duration));
6128 
6129 	if (beacon->lcb_sub_command != LPFC_LCB_ON &&
6130 	    beacon->lcb_sub_command != LPFC_LCB_OFF) {
6131 		rjt_err = LSRJT_CMD_UNSUPPORTED;
6132 		goto rjt;
6133 	}
6134 
6135 	if (phba->sli_rev < LPFC_SLI_REV4  ||
6136 	    phba->hba_flag & HBA_FCOE_MODE ||
6137 	    (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
6138 	    LPFC_SLI_INTF_IF_TYPE_2)) {
6139 		rjt_err = LSRJT_CMD_UNSUPPORTED;
6140 		goto rjt;
6141 	}
6142 
6143 	lcb_context = kmalloc(sizeof(*lcb_context), GFP_KERNEL);
6144 	if (!lcb_context) {
6145 		rjt_err = LSRJT_UNABLE_TPC;
6146 		goto rjt;
6147 	}
6148 
6149 	state = (beacon->lcb_sub_command == LPFC_LCB_ON) ? 1 : 0;
6150 	lcb_context->sub_command = beacon->lcb_sub_command;
6151 	lcb_context->capability	= 0;
6152 	lcb_context->type = beacon->lcb_type;
6153 	lcb_context->frequency = beacon->lcb_frequency;
6154 	lcb_context->duration = beacon->lcb_duration;
6155 	lcb_context->ox_id = cmdiocb->iocb.unsli3.rcvsli3.ox_id;
6156 	lcb_context->rx_id = cmdiocb->iocb.ulpContext;
6157 	lcb_context->ndlp = lpfc_nlp_get(ndlp);
6158 	if (lpfc_sli4_set_beacon(vport, lcb_context, state)) {
6159 		lpfc_printf_vlog(ndlp->vport, KERN_ERR,
6160 				 LOG_ELS, "0193 failed to send mail box");
6161 		kfree(lcb_context);
6162 		lpfc_nlp_put(ndlp);
6163 		rjt_err = LSRJT_UNABLE_TPC;
6164 		goto rjt;
6165 	}
6166 	return 0;
6167 rjt:
6168 	memset(&stat, 0, sizeof(stat));
6169 	stat.un.b.lsRjtRsnCode = rjt_err;
6170 	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
6171 	return 1;
6172 }
6173 
6174 
6175 /**
6176  * lpfc_els_flush_rscn - Clean up any rscn activities with a vport
6177  * @vport: pointer to a host virtual N_Port data structure.
6178  *
6179  * This routine cleans up any Registration State Change Notification
6180  * (RSCN) activity with a @vport. Note that the fc_rscn_flush flag of the
6181  * @vport together with the host_lock is used to prevent multiple thread
6182  * trying to access the RSCN array on a same @vport at the same time.
6183  **/
6184 void
6185 lpfc_els_flush_rscn(struct lpfc_vport *vport)
6186 {
6187 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6188 	struct lpfc_hba  *phba = vport->phba;
6189 	int i;
6190 
6191 	spin_lock_irq(shost->host_lock);
6192 	if (vport->fc_rscn_flush) {
6193 		/* Another thread is walking fc_rscn_id_list on this vport */
6194 		spin_unlock_irq(shost->host_lock);
6195 		return;
6196 	}
6197 	/* Indicate we are walking lpfc_els_flush_rscn on this vport */
6198 	vport->fc_rscn_flush = 1;
6199 	spin_unlock_irq(shost->host_lock);
6200 
6201 	for (i = 0; i < vport->fc_rscn_id_cnt; i++) {
6202 		lpfc_in_buf_free(phba, vport->fc_rscn_id_list[i]);
6203 		vport->fc_rscn_id_list[i] = NULL;
6204 	}
6205 	spin_lock_irq(shost->host_lock);
6206 	vport->fc_rscn_id_cnt = 0;
6207 	vport->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY);
6208 	spin_unlock_irq(shost->host_lock);
6209 	lpfc_can_disctmo(vport);
6210 	/* Indicate we are done walking this fc_rscn_id_list */
6211 	vport->fc_rscn_flush = 0;
6212 }
6213 
6214 /**
6215  * lpfc_rscn_payload_check - Check whether there is a pending rscn to a did
6216  * @vport: pointer to a host virtual N_Port data structure.
6217  * @did: remote destination port identifier.
6218  *
6219  * This routine checks whether there is any pending Registration State
6220  * Configuration Notification (RSCN) to a @did on @vport.
6221  *
6222  * Return code
6223  *   None zero - The @did matched with a pending rscn
6224  *   0 - not able to match @did with a pending rscn
6225  **/
6226 int
6227 lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did)
6228 {
6229 	D_ID ns_did;
6230 	D_ID rscn_did;
6231 	uint32_t *lp;
6232 	uint32_t payload_len, i;
6233 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6234 
6235 	ns_did.un.word = did;
6236 
6237 	/* Never match fabric nodes for RSCNs */
6238 	if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
6239 		return 0;
6240 
6241 	/* If we are doing a FULL RSCN rediscovery, match everything */
6242 	if (vport->fc_flag & FC_RSCN_DISCOVERY)
6243 		return did;
6244 
6245 	spin_lock_irq(shost->host_lock);
6246 	if (vport->fc_rscn_flush) {
6247 		/* Another thread is walking fc_rscn_id_list on this vport */
6248 		spin_unlock_irq(shost->host_lock);
6249 		return 0;
6250 	}
6251 	/* Indicate we are walking fc_rscn_id_list on this vport */
6252 	vport->fc_rscn_flush = 1;
6253 	spin_unlock_irq(shost->host_lock);
6254 	for (i = 0; i < vport->fc_rscn_id_cnt; i++) {
6255 		lp = vport->fc_rscn_id_list[i]->virt;
6256 		payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK);
6257 		payload_len -= sizeof(uint32_t);	/* take off word 0 */
6258 		while (payload_len) {
6259 			rscn_did.un.word = be32_to_cpu(*lp++);
6260 			payload_len -= sizeof(uint32_t);
6261 			switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) {
6262 			case RSCN_ADDRESS_FORMAT_PORT:
6263 				if ((ns_did.un.b.domain == rscn_did.un.b.domain)
6264 				    && (ns_did.un.b.area == rscn_did.un.b.area)
6265 				    && (ns_did.un.b.id == rscn_did.un.b.id))
6266 					goto return_did_out;
6267 				break;
6268 			case RSCN_ADDRESS_FORMAT_AREA:
6269 				if ((ns_did.un.b.domain == rscn_did.un.b.domain)
6270 				    && (ns_did.un.b.area == rscn_did.un.b.area))
6271 					goto return_did_out;
6272 				break;
6273 			case RSCN_ADDRESS_FORMAT_DOMAIN:
6274 				if (ns_did.un.b.domain == rscn_did.un.b.domain)
6275 					goto return_did_out;
6276 				break;
6277 			case RSCN_ADDRESS_FORMAT_FABRIC:
6278 				goto return_did_out;
6279 			}
6280 		}
6281 	}
6282 	/* Indicate we are done with walking fc_rscn_id_list on this vport */
6283 	vport->fc_rscn_flush = 0;
6284 	return 0;
6285 return_did_out:
6286 	/* Indicate we are done with walking fc_rscn_id_list on this vport */
6287 	vport->fc_rscn_flush = 0;
6288 	return did;
6289 }
6290 
6291 /**
6292  * lpfc_rscn_recovery_check - Send recovery event to vport nodes matching rscn
6293  * @vport: pointer to a host virtual N_Port data structure.
6294  *
6295  * This routine sends recovery (NLP_EVT_DEVICE_RECOVERY) event to the
6296  * state machine for a @vport's nodes that are with pending RSCN (Registration
6297  * State Change Notification).
6298  *
6299  * Return code
6300  *   0 - Successful (currently alway return 0)
6301  **/
6302 static int
6303 lpfc_rscn_recovery_check(struct lpfc_vport *vport)
6304 {
6305 	struct lpfc_nodelist *ndlp = NULL;
6306 
6307 	/* Move all affected nodes by pending RSCNs to NPR state. */
6308 	list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
6309 		if (!NLP_CHK_NODE_ACT(ndlp) ||
6310 		    (ndlp->nlp_state == NLP_STE_UNUSED_NODE) ||
6311 		    !lpfc_rscn_payload_check(vport, ndlp->nlp_DID))
6312 			continue;
6313 
6314 		/* NVME Target mode does not do RSCN Recovery. */
6315 		if (vport->phba->nvmet_support)
6316 			continue;
6317 
6318 		/* If we are in the process of doing discovery on this
6319 		 * NPort, let it continue on its own.
6320 		 */
6321 		switch (ndlp->nlp_state) {
6322 		case  NLP_STE_PLOGI_ISSUE:
6323 		case  NLP_STE_ADISC_ISSUE:
6324 		case  NLP_STE_REG_LOGIN_ISSUE:
6325 		case  NLP_STE_PRLI_ISSUE:
6326 		case  NLP_STE_LOGO_ISSUE:
6327 			continue;
6328 		}
6329 
6330 		if (ndlp->nlp_fc4_type & NLP_FC4_NVME)
6331 			lpfc_nvme_rescan_port(vport, ndlp);
6332 
6333 		lpfc_disc_state_machine(vport, ndlp, NULL,
6334 					NLP_EVT_DEVICE_RECOVERY);
6335 		lpfc_cancel_retry_delay_tmo(vport, ndlp);
6336 	}
6337 	return 0;
6338 }
6339 
6340 /**
6341  * lpfc_send_rscn_event - Send an RSCN event to management application
6342  * @vport: pointer to a host virtual N_Port data structure.
6343  * @cmdiocb: pointer to lpfc command iocb data structure.
6344  *
6345  * lpfc_send_rscn_event sends an RSCN netlink event to management
6346  * applications.
6347  */
6348 static void
6349 lpfc_send_rscn_event(struct lpfc_vport *vport,
6350 		struct lpfc_iocbq *cmdiocb)
6351 {
6352 	struct lpfc_dmabuf *pcmd;
6353 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6354 	uint32_t *payload_ptr;
6355 	uint32_t payload_len;
6356 	struct lpfc_rscn_event_header *rscn_event_data;
6357 
6358 	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
6359 	payload_ptr = (uint32_t *) pcmd->virt;
6360 	payload_len = be32_to_cpu(*payload_ptr & ~ELS_CMD_MASK);
6361 
6362 	rscn_event_data = kmalloc(sizeof(struct lpfc_rscn_event_header) +
6363 		payload_len, GFP_KERNEL);
6364 	if (!rscn_event_data) {
6365 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
6366 			"0147 Failed to allocate memory for RSCN event\n");
6367 		return;
6368 	}
6369 	rscn_event_data->event_type = FC_REG_RSCN_EVENT;
6370 	rscn_event_data->payload_length = payload_len;
6371 	memcpy(rscn_event_data->rscn_payload, payload_ptr,
6372 		payload_len);
6373 
6374 	fc_host_post_vendor_event(shost,
6375 		fc_get_event_number(),
6376 		sizeof(struct lpfc_rscn_event_header) + payload_len,
6377 		(char *)rscn_event_data,
6378 		LPFC_NL_VENDOR_ID);
6379 
6380 	kfree(rscn_event_data);
6381 }
6382 
6383 /**
6384  * lpfc_els_rcv_rscn - Process an unsolicited rscn iocb
6385  * @vport: pointer to a host virtual N_Port data structure.
6386  * @cmdiocb: pointer to lpfc command iocb data structure.
6387  * @ndlp: pointer to a node-list data structure.
6388  *
6389  * This routine processes an unsolicited RSCN (Registration State Change
6390  * Notification) IOCB. First, the payload of the unsolicited RSCN is walked
6391  * to invoke fc_host_post_event() routine to the FC transport layer. If the
6392  * discover state machine is about to begin discovery, it just accepts the
6393  * RSCN and the discovery process will satisfy the RSCN. If this RSCN only
6394  * contains N_Port IDs for other vports on this HBA, it just accepts the
6395  * RSCN and ignore processing it. If the state machine is in the recovery
6396  * state, the fc_rscn_id_list of this @vport is walked and the
6397  * lpfc_rscn_recovery_check() routine is invoked to send recovery event for
6398  * all nodes that match RSCN payload. Otherwise, the lpfc_els_handle_rscn()
6399  * routine is invoked to handle the RSCN event.
6400  *
6401  * Return code
6402  *   0 - Just sent the acc response
6403  *   1 - Sent the acc response and waited for name server completion
6404  **/
6405 static int
6406 lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
6407 		  struct lpfc_nodelist *ndlp)
6408 {
6409 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6410 	struct lpfc_hba  *phba = vport->phba;
6411 	struct lpfc_dmabuf *pcmd;
6412 	uint32_t *lp, *datap;
6413 	uint32_t payload_len, length, nportid, *cmd;
6414 	int rscn_cnt;
6415 	int rscn_id = 0, hba_id = 0;
6416 	int i;
6417 
6418 	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
6419 	lp = (uint32_t *) pcmd->virt;
6420 
6421 	payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK);
6422 	payload_len -= sizeof(uint32_t);	/* take off word 0 */
6423 	/* RSCN received */
6424 	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
6425 			 "0214 RSCN received Data: x%x x%x x%x x%x\n",
6426 			 vport->fc_flag, payload_len, *lp,
6427 			 vport->fc_rscn_id_cnt);
6428 
6429 	/* Send an RSCN event to the management application */
6430 	lpfc_send_rscn_event(vport, cmdiocb);
6431 
6432 	for (i = 0; i < payload_len/sizeof(uint32_t); i++)
6433 		fc_host_post_event(shost, fc_get_event_number(),
6434 			FCH_EVT_RSCN, lp[i]);
6435 
6436 	/* Check if RSCN is coming from a direct-connected remote NPort */
6437 	if (vport->fc_flag & FC_PT2PT) {
6438 		/* If so, just ACC it, no other action needed for now */
6439 		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
6440 				 "2024 pt2pt RSCN %08x Data: x%x x%x\n",
6441 				 *lp, vport->fc_flag, payload_len);
6442 		lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
6443 
6444 		if (ndlp->nlp_fc4_type & NLP_FC4_NVME)
6445 			lpfc_nvme_rescan_port(vport, ndlp);
6446 		return 0;
6447 	}
6448 
6449 	/* If we are about to begin discovery, just ACC the RSCN.
6450 	 * Discovery processing will satisfy it.
6451 	 */
6452 	if (vport->port_state <= LPFC_NS_QRY) {
6453 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6454 			"RCV RSCN ignore: did:x%x/ste:x%x flg:x%x",
6455 			ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
6456 
6457 		lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
6458 		return 0;
6459 	}
6460 
6461 	/* If this RSCN just contains NPortIDs for other vports on this HBA,
6462 	 * just ACC and ignore it.
6463 	 */
6464 	if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
6465 		!(vport->cfg_peer_port_login)) {
6466 		i = payload_len;
6467 		datap = lp;
6468 		while (i > 0) {
6469 			nportid = *datap++;
6470 			nportid = ((be32_to_cpu(nportid)) & Mask_DID);
6471 			i -= sizeof(uint32_t);
6472 			rscn_id++;
6473 			if (lpfc_find_vport_by_did(phba, nportid))
6474 				hba_id++;
6475 		}
6476 		if (rscn_id == hba_id) {
6477 			/* ALL NPortIDs in RSCN are on HBA */
6478 			lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
6479 					 "0219 Ignore RSCN "
6480 					 "Data: x%x x%x x%x x%x\n",
6481 					 vport->fc_flag, payload_len,
6482 					 *lp, vport->fc_rscn_id_cnt);
6483 			lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6484 				"RCV RSCN vport:  did:x%x/ste:x%x flg:x%x",
6485 				ndlp->nlp_DID, vport->port_state,
6486 				ndlp->nlp_flag);
6487 
6488 			lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb,
6489 				ndlp, NULL);
6490 			return 0;
6491 		}
6492 	}
6493 
6494 	spin_lock_irq(shost->host_lock);
6495 	if (vport->fc_rscn_flush) {
6496 		/* Another thread is walking fc_rscn_id_list on this vport */
6497 		vport->fc_flag |= FC_RSCN_DISCOVERY;
6498 		spin_unlock_irq(shost->host_lock);
6499 		/* Send back ACC */
6500 		lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
6501 		return 0;
6502 	}
6503 	/* Indicate we are walking fc_rscn_id_list on this vport */
6504 	vport->fc_rscn_flush = 1;
6505 	spin_unlock_irq(shost->host_lock);
6506 	/* Get the array count after successfully have the token */
6507 	rscn_cnt = vport->fc_rscn_id_cnt;
6508 	/* If we are already processing an RSCN, save the received
6509 	 * RSCN payload buffer, cmdiocb->context2 to process later.
6510 	 */
6511 	if (vport->fc_flag & (FC_RSCN_MODE | FC_NDISC_ACTIVE)) {
6512 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6513 			"RCV RSCN defer:  did:x%x/ste:x%x flg:x%x",
6514 			ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
6515 
6516 		spin_lock_irq(shost->host_lock);
6517 		vport->fc_flag |= FC_RSCN_DEFERRED;
6518 		if ((rscn_cnt < FC_MAX_HOLD_RSCN) &&
6519 		    !(vport->fc_flag & FC_RSCN_DISCOVERY)) {
6520 			vport->fc_flag |= FC_RSCN_MODE;
6521 			spin_unlock_irq(shost->host_lock);
6522 			if (rscn_cnt) {
6523 				cmd = vport->fc_rscn_id_list[rscn_cnt-1]->virt;
6524 				length = be32_to_cpu(*cmd & ~ELS_CMD_MASK);
6525 			}
6526 			if ((rscn_cnt) &&
6527 			    (payload_len + length <= LPFC_BPL_SIZE)) {
6528 				*cmd &= ELS_CMD_MASK;
6529 				*cmd |= cpu_to_be32(payload_len + length);
6530 				memcpy(((uint8_t *)cmd) + length, lp,
6531 				       payload_len);
6532 			} else {
6533 				vport->fc_rscn_id_list[rscn_cnt] = pcmd;
6534 				vport->fc_rscn_id_cnt++;
6535 				/* If we zero, cmdiocb->context2, the calling
6536 				 * routine will not try to free it.
6537 				 */
6538 				cmdiocb->context2 = NULL;
6539 			}
6540 			/* Deferred RSCN */
6541 			lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
6542 					 "0235 Deferred RSCN "
6543 					 "Data: x%x x%x x%x\n",
6544 					 vport->fc_rscn_id_cnt, vport->fc_flag,
6545 					 vport->port_state);
6546 		} else {
6547 			vport->fc_flag |= FC_RSCN_DISCOVERY;
6548 			spin_unlock_irq(shost->host_lock);
6549 			/* ReDiscovery RSCN */
6550 			lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
6551 					 "0234 ReDiscovery RSCN "
6552 					 "Data: x%x x%x x%x\n",
6553 					 vport->fc_rscn_id_cnt, vport->fc_flag,
6554 					 vport->port_state);
6555 		}
6556 		/* Indicate we are done walking fc_rscn_id_list on this vport */
6557 		vport->fc_rscn_flush = 0;
6558 		/* Send back ACC */
6559 		lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
6560 		/* send RECOVERY event for ALL nodes that match RSCN payload */
6561 		lpfc_rscn_recovery_check(vport);
6562 		return 0;
6563 	}
6564 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6565 		"RCV RSCN:        did:x%x/ste:x%x flg:x%x",
6566 		ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
6567 
6568 	spin_lock_irq(shost->host_lock);
6569 	vport->fc_flag |= FC_RSCN_MODE;
6570 	spin_unlock_irq(shost->host_lock);
6571 	vport->fc_rscn_id_list[vport->fc_rscn_id_cnt++] = pcmd;
6572 	/* Indicate we are done walking fc_rscn_id_list on this vport */
6573 	vport->fc_rscn_flush = 0;
6574 	/*
6575 	 * If we zero, cmdiocb->context2, the calling routine will
6576 	 * not try to free it.
6577 	 */
6578 	cmdiocb->context2 = NULL;
6579 	lpfc_set_disctmo(vport);
6580 	/* Send back ACC */
6581 	lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
6582 	/* send RECOVERY event for ALL nodes that match RSCN payload */
6583 	lpfc_rscn_recovery_check(vport);
6584 	return lpfc_els_handle_rscn(vport);
6585 }
6586 
6587 /**
6588  * lpfc_els_handle_rscn - Handle rscn for a vport
6589  * @vport: pointer to a host virtual N_Port data structure.
6590  *
6591  * This routine handles the Registration State Configuration Notification
6592  * (RSCN) for a @vport. If login to NameServer does not exist, a new ndlp shall
6593  * be created and a Port Login (PLOGI) to the NameServer is issued. Otherwise,
6594  * if the ndlp to NameServer exists, a Common Transport (CT) command to the
6595  * NameServer shall be issued. If CT command to the NameServer fails to be
6596  * issued, the lpfc_els_flush_rscn() routine shall be invoked to clean up any
6597  * RSCN activities with the @vport.
6598  *
6599  * Return code
6600  *   0 - Cleaned up rscn on the @vport
6601  *   1 - Wait for plogi to name server before proceed
6602  **/
6603 int
6604 lpfc_els_handle_rscn(struct lpfc_vport *vport)
6605 {
6606 	struct lpfc_nodelist *ndlp;
6607 	struct lpfc_hba  *phba = vport->phba;
6608 
6609 	/* Ignore RSCN if the port is being torn down. */
6610 	if (vport->load_flag & FC_UNLOADING) {
6611 		lpfc_els_flush_rscn(vport);
6612 		return 0;
6613 	}
6614 
6615 	/* Start timer for RSCN processing */
6616 	lpfc_set_disctmo(vport);
6617 
6618 	/* RSCN processed */
6619 	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
6620 			 "0215 RSCN processed Data: x%x x%x x%x x%x\n",
6621 			 vport->fc_flag, 0, vport->fc_rscn_id_cnt,
6622 			 vport->port_state);
6623 
6624 	/* To process RSCN, first compare RSCN data with NameServer */
6625 	vport->fc_ns_retry = 0;
6626 	vport->num_disc_nodes = 0;
6627 
6628 	ndlp = lpfc_findnode_did(vport, NameServer_DID);
6629 	if (ndlp && NLP_CHK_NODE_ACT(ndlp)
6630 	    && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
6631 		/* Good ndlp, issue CT Request to NameServer.  Need to
6632 		 * know how many gidfts were issued.  If none, then just
6633 		 * flush the RSCN.  Otherwise, the outstanding requests
6634 		 * need to complete.
6635 		 */
6636 		if (phba->cfg_ns_query == LPFC_NS_QUERY_GID_FT) {
6637 			if (lpfc_issue_gidft(vport) > 0)
6638 				return 1;
6639 		} else if (phba->cfg_ns_query == LPFC_NS_QUERY_GID_PT) {
6640 			if (lpfc_issue_gidpt(vport) > 0)
6641 				return 1;
6642 		} else {
6643 			return 1;
6644 		}
6645 	} else {
6646 		/* Nameserver login in question.  Revalidate. */
6647 		if (ndlp) {
6648 			ndlp = lpfc_enable_node(vport, ndlp,
6649 						NLP_STE_PLOGI_ISSUE);
6650 			if (!ndlp) {
6651 				lpfc_els_flush_rscn(vport);
6652 				return 0;
6653 			}
6654 			ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE;
6655 		} else {
6656 			ndlp = lpfc_nlp_init(vport, NameServer_DID);
6657 			if (!ndlp) {
6658 				lpfc_els_flush_rscn(vport);
6659 				return 0;
6660 			}
6661 			ndlp->nlp_prev_state = ndlp->nlp_state;
6662 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
6663 		}
6664 		ndlp->nlp_type |= NLP_FABRIC;
6665 		lpfc_issue_els_plogi(vport, NameServer_DID, 0);
6666 		/* Wait for NameServer login cmpl before we can
6667 		 * continue
6668 		 */
6669 		return 1;
6670 	}
6671 
6672 	lpfc_els_flush_rscn(vport);
6673 	return 0;
6674 }
6675 
6676 /**
6677  * lpfc_els_rcv_flogi - Process an unsolicited flogi iocb
6678  * @vport: pointer to a host virtual N_Port data structure.
6679  * @cmdiocb: pointer to lpfc command iocb data structure.
6680  * @ndlp: pointer to a node-list data structure.
6681  *
6682  * This routine processes Fabric Login (FLOGI) IOCB received as an ELS
6683  * unsolicited event. An unsolicited FLOGI can be received in a point-to-
6684  * point topology. As an unsolicited FLOGI should not be received in a loop
6685  * mode, any unsolicited FLOGI received in loop mode shall be ignored. The
6686  * lpfc_check_sparm() routine is invoked to check the parameters in the
6687  * unsolicited FLOGI. If parameters validation failed, the routine
6688  * lpfc_els_rsp_reject() shall be called with reject reason code set to
6689  * LSEXP_SPARM_OPTIONS to reject the FLOGI. Otherwise, the Port WWN in the
6690  * FLOGI shall be compared with the Port WWN of the @vport to determine who
6691  * will initiate PLOGI. The higher lexicographical value party shall has
6692  * higher priority (as the winning port) and will initiate PLOGI and
6693  * communicate Port_IDs (Addresses) for both nodes in PLOGI. The result
6694  * of this will be marked in the @vport fc_flag field with FC_PT2PT_PLOGI
6695  * and then the lpfc_els_rsp_acc() routine is invoked to accept the FLOGI.
6696  *
6697  * Return code
6698  *   0 - Successfully processed the unsolicited flogi
6699  *   1 - Failed to process the unsolicited flogi
6700  **/
6701 static int
6702 lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
6703 		   struct lpfc_nodelist *ndlp)
6704 {
6705 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6706 	struct lpfc_hba  *phba = vport->phba;
6707 	struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
6708 	uint32_t *lp = (uint32_t *) pcmd->virt;
6709 	IOCB_t *icmd = &cmdiocb->iocb;
6710 	struct serv_parm *sp;
6711 	LPFC_MBOXQ_t *mbox;
6712 	uint32_t cmd, did;
6713 	int rc;
6714 	uint32_t fc_flag = 0;
6715 	uint32_t port_state = 0;
6716 
6717 	cmd = *lp++;
6718 	sp = (struct serv_parm *) lp;
6719 
6720 	/* FLOGI received */
6721 
6722 	lpfc_set_disctmo(vport);
6723 
6724 	if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
6725 		/* We should never receive a FLOGI in loop mode, ignore it */
6726 		did = icmd->un.elsreq64.remoteID;
6727 
6728 		/* An FLOGI ELS command <elsCmd> was received from DID <did> in
6729 		   Loop Mode */
6730 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
6731 				 "0113 An FLOGI ELS command x%x was "
6732 				 "received from DID x%x in Loop Mode\n",
6733 				 cmd, did);
6734 		return 1;
6735 	}
6736 
6737 	(void) lpfc_check_sparm(vport, ndlp, sp, CLASS3, 1);
6738 
6739 	/*
6740 	 * If our portname is greater than the remote portname,
6741 	 * then we initiate Nport login.
6742 	 */
6743 
6744 	rc = memcmp(&vport->fc_portname, &sp->portName,
6745 		    sizeof(struct lpfc_name));
6746 
6747 	if (!rc) {
6748 		if (phba->sli_rev < LPFC_SLI_REV4) {
6749 			mbox = mempool_alloc(phba->mbox_mem_pool,
6750 					     GFP_KERNEL);
6751 			if (!mbox)
6752 				return 1;
6753 			lpfc_linkdown(phba);
6754 			lpfc_init_link(phba, mbox,
6755 				       phba->cfg_topology,
6756 				       phba->cfg_link_speed);
6757 			mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
6758 			mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
6759 			mbox->vport = vport;
6760 			rc = lpfc_sli_issue_mbox(phba, mbox,
6761 						 MBX_NOWAIT);
6762 			lpfc_set_loopback_flag(phba);
6763 			if (rc == MBX_NOT_FINISHED)
6764 				mempool_free(mbox, phba->mbox_mem_pool);
6765 			return 1;
6766 		}
6767 
6768 		/* abort the flogi coming back to ourselves
6769 		 * due to external loopback on the port.
6770 		 */
6771 		lpfc_els_abort_flogi(phba);
6772 		return 0;
6773 
6774 	} else if (rc > 0) {	/* greater than */
6775 		spin_lock_irq(shost->host_lock);
6776 		vport->fc_flag |= FC_PT2PT_PLOGI;
6777 		spin_unlock_irq(shost->host_lock);
6778 
6779 		/* If we have the high WWPN we can assign our own
6780 		 * myDID; otherwise, we have to WAIT for a PLOGI
6781 		 * from the remote NPort to find out what it
6782 		 * will be.
6783 		 */
6784 		vport->fc_myDID = PT2PT_LocalID;
6785 	} else {
6786 		vport->fc_myDID = PT2PT_RemoteID;
6787 	}
6788 
6789 	/*
6790 	 * The vport state should go to LPFC_FLOGI only
6791 	 * AFTER we issue a FLOGI, not receive one.
6792 	 */
6793 	spin_lock_irq(shost->host_lock);
6794 	fc_flag = vport->fc_flag;
6795 	port_state = vport->port_state;
6796 	vport->fc_flag |= FC_PT2PT;
6797 	vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
6798 
6799 	/* Acking an unsol FLOGI.  Count 1 for link bounce
6800 	 * work-around.
6801 	 */
6802 	vport->rcv_flogi_cnt++;
6803 	spin_unlock_irq(shost->host_lock);
6804 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
6805 			 "3311 Rcv Flogi PS x%x new PS x%x "
6806 			 "fc_flag x%x new fc_flag x%x\n",
6807 			 port_state, vport->port_state,
6808 			 fc_flag, vport->fc_flag);
6809 
6810 	/*
6811 	 * We temporarily set fc_myDID to make it look like we are
6812 	 * a Fabric. This is done just so we end up with the right
6813 	 * did / sid on the FLOGI ACC rsp.
6814 	 */
6815 	did = vport->fc_myDID;
6816 	vport->fc_myDID = Fabric_DID;
6817 
6818 	memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
6819 
6820 	/* Defer ACC response until AFTER we issue a FLOGI */
6821 	if (!(phba->hba_flag & HBA_FLOGI_ISSUED)) {
6822 		phba->defer_flogi_acc_rx_id = cmdiocb->iocb.ulpContext;
6823 		phba->defer_flogi_acc_ox_id =
6824 					cmdiocb->iocb.unsli3.rcvsli3.ox_id;
6825 
6826 		vport->fc_myDID = did;
6827 
6828 		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
6829 				 "3344 Deferring FLOGI ACC: rx_id: x%x,"
6830 				 " ox_id: x%x, hba_flag x%x\n",
6831 				 phba->defer_flogi_acc_rx_id,
6832 				 phba->defer_flogi_acc_ox_id, phba->hba_flag);
6833 
6834 		phba->defer_flogi_acc_flag = true;
6835 
6836 		return 0;
6837 	}
6838 
6839 	/* Send back ACC */
6840 	lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, cmdiocb, ndlp, NULL);
6841 
6842 	/* Now lets put fc_myDID back to what its supposed to be */
6843 	vport->fc_myDID = did;
6844 
6845 	return 0;
6846 }
6847 
6848 /**
6849  * lpfc_els_rcv_rnid - Process an unsolicited rnid iocb
6850  * @vport: pointer to a host virtual N_Port data structure.
6851  * @cmdiocb: pointer to lpfc command iocb data structure.
6852  * @ndlp: pointer to a node-list data structure.
6853  *
6854  * This routine processes Request Node Identification Data (RNID) IOCB
6855  * received as an ELS unsolicited event. Only when the RNID specified format
6856  * 0x0 or 0xDF (Topology Discovery Specific Node Identification Data)
6857  * present, this routine will invoke the lpfc_els_rsp_rnid_acc() routine to
6858  * Accept (ACC) the RNID ELS command. All the other RNID formats are
6859  * rejected by invoking the lpfc_els_rsp_reject() routine.
6860  *
6861  * Return code
6862  *   0 - Successfully processed rnid iocb (currently always return 0)
6863  **/
6864 static int
6865 lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
6866 		  struct lpfc_nodelist *ndlp)
6867 {
6868 	struct lpfc_dmabuf *pcmd;
6869 	uint32_t *lp;
6870 	RNID *rn;
6871 	struct ls_rjt stat;
6872 
6873 	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
6874 	lp = (uint32_t *) pcmd->virt;
6875 
6876 	lp++;
6877 	rn = (RNID *) lp;
6878 
6879 	/* RNID received */
6880 
6881 	switch (rn->Format) {
6882 	case 0:
6883 	case RNID_TOPOLOGY_DISC:
6884 		/* Send back ACC */
6885 		lpfc_els_rsp_rnid_acc(vport, rn->Format, cmdiocb, ndlp);
6886 		break;
6887 	default:
6888 		/* Reject this request because format not supported */
6889 		stat.un.b.lsRjtRsvd0 = 0;
6890 		stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
6891 		stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
6892 		stat.un.b.vendorUnique = 0;
6893 		lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
6894 			NULL);
6895 	}
6896 	return 0;
6897 }
6898 
6899 /**
6900  * lpfc_els_rcv_echo - Process an unsolicited echo iocb
6901  * @vport: pointer to a host virtual N_Port data structure.
6902  * @cmdiocb: pointer to lpfc command iocb data structure.
6903  * @ndlp: pointer to a node-list data structure.
6904  *
6905  * Return code
6906  *   0 - Successfully processed echo iocb (currently always return 0)
6907  **/
6908 static int
6909 lpfc_els_rcv_echo(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
6910 		  struct lpfc_nodelist *ndlp)
6911 {
6912 	uint8_t *pcmd;
6913 
6914 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt);
6915 
6916 	/* skip over first word of echo command to find echo data */
6917 	pcmd += sizeof(uint32_t);
6918 
6919 	lpfc_els_rsp_echo_acc(vport, pcmd, cmdiocb, ndlp);
6920 	return 0;
6921 }
6922 
6923 /**
6924  * lpfc_els_rcv_lirr - Process an unsolicited lirr iocb
6925  * @vport: pointer to a host virtual N_Port data structure.
6926  * @cmdiocb: pointer to lpfc command iocb data structure.
6927  * @ndlp: pointer to a node-list data structure.
6928  *
6929  * This routine processes a Link Incident Report Registration(LIRR) IOCB
6930  * received as an ELS unsolicited event. Currently, this function just invokes
6931  * the lpfc_els_rsp_reject() routine to reject the LIRR IOCB unconditionally.
6932  *
6933  * Return code
6934  *   0 - Successfully processed lirr iocb (currently always return 0)
6935  **/
6936 static int
6937 lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
6938 		  struct lpfc_nodelist *ndlp)
6939 {
6940 	struct ls_rjt stat;
6941 
6942 	/* For now, unconditionally reject this command */
6943 	stat.un.b.lsRjtRsvd0 = 0;
6944 	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
6945 	stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
6946 	stat.un.b.vendorUnique = 0;
6947 	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
6948 	return 0;
6949 }
6950 
6951 /**
6952  * lpfc_els_rcv_rrq - Process an unsolicited rrq iocb
6953  * @vport: pointer to a host virtual N_Port data structure.
6954  * @cmdiocb: pointer to lpfc command iocb data structure.
6955  * @ndlp: pointer to a node-list data structure.
6956  *
6957  * This routine processes a Reinstate Recovery Qualifier (RRQ) IOCB
6958  * received as an ELS unsolicited event. A request to RRQ shall only
6959  * be accepted if the Originator Nx_Port N_Port_ID or the Responder
6960  * Nx_Port N_Port_ID of the target Exchange is the same as the
6961  * N_Port_ID of the Nx_Port that makes the request. If the RRQ is
6962  * not accepted, an LS_RJT with reason code "Unable to perform
6963  * command request" and reason code explanation "Invalid Originator
6964  * S_ID" shall be returned. For now, we just unconditionally accept
6965  * RRQ from the target.
6966  **/
6967 static void
6968 lpfc_els_rcv_rrq(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
6969 		 struct lpfc_nodelist *ndlp)
6970 {
6971 	lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
6972 	if (vport->phba->sli_rev == LPFC_SLI_REV4)
6973 		lpfc_els_clear_rrq(vport, cmdiocb, ndlp);
6974 }
6975 
6976 /**
6977  * lpfc_els_rsp_rls_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd
6978  * @phba: pointer to lpfc hba data structure.
6979  * @pmb: pointer to the driver internal queue element for mailbox command.
6980  *
6981  * This routine is the completion callback function for the MBX_READ_LNK_STAT
6982  * mailbox command. This callback function is to actually send the Accept
6983  * (ACC) response to a Read Port Status (RPS) unsolicited IOCB event. It
6984  * collects the link statistics from the completion of the MBX_READ_LNK_STAT
6985  * mailbox command, constructs the RPS response with the link statistics
6986  * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC
6987  * response to the RPS.
6988  *
6989  * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
6990  * will be incremented by 1 for holding the ndlp and the reference to ndlp
6991  * will be stored into the context1 field of the IOCB for the completion
6992  * callback function to the RPS Accept Response ELS IOCB command.
6993  *
6994  **/
6995 static void
6996 lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
6997 {
6998 	MAILBOX_t *mb;
6999 	IOCB_t *icmd;
7000 	struct RLS_RSP *rls_rsp;
7001 	uint8_t *pcmd;
7002 	struct lpfc_iocbq *elsiocb;
7003 	struct lpfc_nodelist *ndlp;
7004 	uint16_t oxid;
7005 	uint16_t rxid;
7006 	uint32_t cmdsize;
7007 
7008 	mb = &pmb->u.mb;
7009 
7010 	ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
7011 	rxid = (uint16_t)((unsigned long)(pmb->ctx_buf) & 0xffff);
7012 	oxid = (uint16_t)(((unsigned long)(pmb->ctx_buf) >> 16) & 0xffff);
7013 	pmb->ctx_buf = NULL;
7014 	pmb->ctx_ndlp = NULL;
7015 
7016 	if (mb->mbxStatus) {
7017 		mempool_free(pmb, phba->mbox_mem_pool);
7018 		return;
7019 	}
7020 
7021 	cmdsize = sizeof(struct RLS_RSP) + sizeof(uint32_t);
7022 	elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
7023 				     lpfc_max_els_tries, ndlp,
7024 				     ndlp->nlp_DID, ELS_CMD_ACC);
7025 
7026 	/* Decrement the ndlp reference count from previous mbox command */
7027 	lpfc_nlp_put(ndlp);
7028 
7029 	if (!elsiocb) {
7030 		mempool_free(pmb, phba->mbox_mem_pool);
7031 		return;
7032 	}
7033 
7034 	icmd = &elsiocb->iocb;
7035 	icmd->ulpContext = rxid;
7036 	icmd->unsli3.rcvsli3.ox_id = oxid;
7037 
7038 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
7039 	*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
7040 	pcmd += sizeof(uint32_t); /* Skip past command */
7041 	rls_rsp = (struct RLS_RSP *)pcmd;
7042 
7043 	rls_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt);
7044 	rls_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt);
7045 	rls_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt);
7046 	rls_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt);
7047 	rls_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord);
7048 	rls_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt);
7049 	mempool_free(pmb, phba->mbox_mem_pool);
7050 	/* Xmit ELS RLS ACC response tag <ulpIoTag> */
7051 	lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
7052 			 "2874 Xmit ELS RLS ACC response tag x%x xri x%x, "
7053 			 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
7054 			 elsiocb->iotag, elsiocb->iocb.ulpContext,
7055 			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
7056 			 ndlp->nlp_rpi);
7057 	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
7058 	phba->fc_stat.elsXmitACC++;
7059 	if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR)
7060 		lpfc_els_free_iocb(phba, elsiocb);
7061 }
7062 
7063 /**
7064  * lpfc_els_rsp_rps_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd
7065  * @phba: pointer to lpfc hba data structure.
7066  * @pmb: pointer to the driver internal queue element for mailbox command.
7067  *
7068  * This routine is the completion callback function for the MBX_READ_LNK_STAT
7069  * mailbox command. This callback function is to actually send the Accept
7070  * (ACC) response to a Read Port Status (RPS) unsolicited IOCB event. It
7071  * collects the link statistics from the completion of the MBX_READ_LNK_STAT
7072  * mailbox command, constructs the RPS response with the link statistics
7073  * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC
7074  * response to the RPS.
7075  *
7076  * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
7077  * will be incremented by 1 for holding the ndlp and the reference to ndlp
7078  * will be stored into the context1 field of the IOCB for the completion
7079  * callback function to the RPS Accept Response ELS IOCB command.
7080  *
7081  **/
7082 static void
7083 lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
7084 {
7085 	MAILBOX_t *mb;
7086 	IOCB_t *icmd;
7087 	RPS_RSP *rps_rsp;
7088 	uint8_t *pcmd;
7089 	struct lpfc_iocbq *elsiocb;
7090 	struct lpfc_nodelist *ndlp;
7091 	uint16_t status;
7092 	uint16_t oxid;
7093 	uint16_t rxid;
7094 	uint32_t cmdsize;
7095 
7096 	mb = &pmb->u.mb;
7097 
7098 	ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
7099 	rxid = (uint16_t)((unsigned long)(pmb->ctx_buf) & 0xffff);
7100 	oxid = (uint16_t)(((unsigned long)(pmb->ctx_buf) >> 16) & 0xffff);
7101 	pmb->ctx_ndlp = NULL;
7102 	pmb->ctx_buf = NULL;
7103 
7104 	if (mb->mbxStatus) {
7105 		mempool_free(pmb, phba->mbox_mem_pool);
7106 		return;
7107 	}
7108 
7109 	cmdsize = sizeof(RPS_RSP) + sizeof(uint32_t);
7110 	mempool_free(pmb, phba->mbox_mem_pool);
7111 	elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
7112 				     lpfc_max_els_tries, ndlp,
7113 				     ndlp->nlp_DID, ELS_CMD_ACC);
7114 
7115 	/* Decrement the ndlp reference count from previous mbox command */
7116 	lpfc_nlp_put(ndlp);
7117 
7118 	if (!elsiocb)
7119 		return;
7120 
7121 	icmd = &elsiocb->iocb;
7122 	icmd->ulpContext = rxid;
7123 	icmd->unsli3.rcvsli3.ox_id = oxid;
7124 
7125 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
7126 	*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
7127 	pcmd += sizeof(uint32_t); /* Skip past command */
7128 	rps_rsp = (RPS_RSP *)pcmd;
7129 
7130 	if (phba->fc_topology != LPFC_TOPOLOGY_LOOP)
7131 		status = 0x10;
7132 	else
7133 		status = 0x8;
7134 	if (phba->pport->fc_flag & FC_FABRIC)
7135 		status |= 0x4;
7136 
7137 	rps_rsp->rsvd1 = 0;
7138 	rps_rsp->portStatus = cpu_to_be16(status);
7139 	rps_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt);
7140 	rps_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt);
7141 	rps_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt);
7142 	rps_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt);
7143 	rps_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord);
7144 	rps_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt);
7145 	/* Xmit ELS RPS ACC response tag <ulpIoTag> */
7146 	lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
7147 			 "0118 Xmit ELS RPS ACC response tag x%x xri x%x, "
7148 			 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
7149 			 elsiocb->iotag, elsiocb->iocb.ulpContext,
7150 			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
7151 			 ndlp->nlp_rpi);
7152 	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
7153 	phba->fc_stat.elsXmitACC++;
7154 	if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR)
7155 		lpfc_els_free_iocb(phba, elsiocb);
7156 	return;
7157 }
7158 
7159 /**
7160  * lpfc_els_rcv_rls - Process an unsolicited rls iocb
7161  * @vport: pointer to a host virtual N_Port data structure.
7162  * @cmdiocb: pointer to lpfc command iocb data structure.
7163  * @ndlp: pointer to a node-list data structure.
7164  *
7165  * This routine processes Read Port Status (RPL) IOCB received as an
7166  * ELS unsolicited event. It first checks the remote port state. If the
7167  * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
7168  * state, it invokes the lpfc_els_rsl_reject() routine to send the reject
7169  * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command
7170  * for reading the HBA link statistics. It is for the callback function,
7171  * lpfc_els_rsp_rls_acc(), set to the MBX_READ_LNK_STAT mailbox command
7172  * to actually sending out RPL Accept (ACC) response.
7173  *
7174  * Return codes
7175  *   0 - Successfully processed rls iocb (currently always return 0)
7176  **/
7177 static int
7178 lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
7179 		 struct lpfc_nodelist *ndlp)
7180 {
7181 	struct lpfc_hba *phba = vport->phba;
7182 	LPFC_MBOXQ_t *mbox;
7183 	struct ls_rjt stat;
7184 
7185 	if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
7186 	    (ndlp->nlp_state != NLP_STE_MAPPED_NODE))
7187 		/* reject the unsolicited RPS request and done with it */
7188 		goto reject_out;
7189 
7190 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
7191 	if (mbox) {
7192 		lpfc_read_lnk_stat(phba, mbox);
7193 		mbox->ctx_buf = (void *)((unsigned long)
7194 			((cmdiocb->iocb.unsli3.rcvsli3.ox_id << 16) |
7195 			cmdiocb->iocb.ulpContext)); /* rx_id */
7196 		mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
7197 		mbox->vport = vport;
7198 		mbox->mbox_cmpl = lpfc_els_rsp_rls_acc;
7199 		if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
7200 			!= MBX_NOT_FINISHED)
7201 			/* Mbox completion will send ELS Response */
7202 			return 0;
7203 		/* Decrement reference count used for the failed mbox
7204 		 * command.
7205 		 */
7206 		lpfc_nlp_put(ndlp);
7207 		mempool_free(mbox, phba->mbox_mem_pool);
7208 	}
7209 reject_out:
7210 	/* issue rejection response */
7211 	stat.un.b.lsRjtRsvd0 = 0;
7212 	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
7213 	stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
7214 	stat.un.b.vendorUnique = 0;
7215 	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
7216 	return 0;
7217 }
7218 
7219 /**
7220  * lpfc_els_rcv_rtv - Process an unsolicited rtv iocb
7221  * @vport: pointer to a host virtual N_Port data structure.
7222  * @cmdiocb: pointer to lpfc command iocb data structure.
7223  * @ndlp: pointer to a node-list data structure.
7224  *
7225  * This routine processes Read Timout Value (RTV) IOCB received as an
7226  * ELS unsolicited event. It first checks the remote port state. If the
7227  * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
7228  * state, it invokes the lpfc_els_rsl_reject() routine to send the reject
7229  * response. Otherwise, it sends the Accept(ACC) response to a Read Timeout
7230  * Value (RTV) unsolicited IOCB event.
7231  *
7232  * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
7233  * will be incremented by 1 for holding the ndlp and the reference to ndlp
7234  * will be stored into the context1 field of the IOCB for the completion
7235  * callback function to the RPS Accept Response ELS IOCB command.
7236  *
7237  * Return codes
7238  *   0 - Successfully processed rtv iocb (currently always return 0)
7239  **/
7240 static int
7241 lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
7242 		 struct lpfc_nodelist *ndlp)
7243 {
7244 	struct lpfc_hba *phba = vport->phba;
7245 	struct ls_rjt stat;
7246 	struct RTV_RSP *rtv_rsp;
7247 	uint8_t *pcmd;
7248 	struct lpfc_iocbq *elsiocb;
7249 	uint32_t cmdsize;
7250 
7251 
7252 	if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
7253 	    (ndlp->nlp_state != NLP_STE_MAPPED_NODE))
7254 		/* reject the unsolicited RPS request and done with it */
7255 		goto reject_out;
7256 
7257 	cmdsize = sizeof(struct RTV_RSP) + sizeof(uint32_t);
7258 	elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
7259 				     lpfc_max_els_tries, ndlp,
7260 				     ndlp->nlp_DID, ELS_CMD_ACC);
7261 
7262 	if (!elsiocb)
7263 		return 1;
7264 
7265 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
7266 	*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
7267 	pcmd += sizeof(uint32_t); /* Skip past command */
7268 
7269 	/* use the command's xri in the response */
7270 	elsiocb->iocb.ulpContext = cmdiocb->iocb.ulpContext;  /* Xri / rx_id */
7271 	elsiocb->iocb.unsli3.rcvsli3.ox_id = cmdiocb->iocb.unsli3.rcvsli3.ox_id;
7272 
7273 	rtv_rsp = (struct RTV_RSP *)pcmd;
7274 
7275 	/* populate RTV payload */
7276 	rtv_rsp->ratov = cpu_to_be32(phba->fc_ratov * 1000); /* report msecs */
7277 	rtv_rsp->edtov = cpu_to_be32(phba->fc_edtov);
7278 	bf_set(qtov_edtovres, rtv_rsp, phba->fc_edtovResol ? 1 : 0);
7279 	bf_set(qtov_rttov, rtv_rsp, 0); /* Field is for FC ONLY */
7280 	rtv_rsp->qtov = cpu_to_be32(rtv_rsp->qtov);
7281 
7282 	/* Xmit ELS RLS ACC response tag <ulpIoTag> */
7283 	lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
7284 			 "2875 Xmit ELS RTV ACC response tag x%x xri x%x, "
7285 			 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x, "
7286 			 "Data: x%x x%x x%x\n",
7287 			 elsiocb->iotag, elsiocb->iocb.ulpContext,
7288 			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
7289 			 ndlp->nlp_rpi,
7290 			rtv_rsp->ratov, rtv_rsp->edtov, rtv_rsp->qtov);
7291 	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
7292 	phba->fc_stat.elsXmitACC++;
7293 	if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR)
7294 		lpfc_els_free_iocb(phba, elsiocb);
7295 	return 0;
7296 
7297 reject_out:
7298 	/* issue rejection response */
7299 	stat.un.b.lsRjtRsvd0 = 0;
7300 	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
7301 	stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
7302 	stat.un.b.vendorUnique = 0;
7303 	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
7304 	return 0;
7305 }
7306 
7307 /* lpfc_els_rcv_rps - Process an unsolicited rps iocb
7308  * @vport: pointer to a host virtual N_Port data structure.
7309  * @cmdiocb: pointer to lpfc command iocb data structure.
7310  * @ndlp: pointer to a node-list data structure.
7311  *
7312  * This routine processes Read Port Status (RPS) IOCB received as an
7313  * ELS unsolicited event. It first checks the remote port state. If the
7314  * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
7315  * state, it invokes the lpfc_els_rsp_reject() routine to send the reject
7316  * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command
7317  * for reading the HBA link statistics. It is for the callback function,
7318  * lpfc_els_rsp_rps_acc(), set to the MBX_READ_LNK_STAT mailbox command
7319  * to actually sending out RPS Accept (ACC) response.
7320  *
7321  * Return codes
7322  *   0 - Successfully processed rps iocb (currently always return 0)
7323  **/
7324 static int
7325 lpfc_els_rcv_rps(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
7326 		 struct lpfc_nodelist *ndlp)
7327 {
7328 	struct lpfc_hba *phba = vport->phba;
7329 	uint32_t *lp;
7330 	uint8_t flag;
7331 	LPFC_MBOXQ_t *mbox;
7332 	struct lpfc_dmabuf *pcmd;
7333 	RPS *rps;
7334 	struct ls_rjt stat;
7335 
7336 	if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
7337 	    (ndlp->nlp_state != NLP_STE_MAPPED_NODE))
7338 		/* reject the unsolicited RPS request and done with it */
7339 		goto reject_out;
7340 
7341 	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
7342 	lp = (uint32_t *) pcmd->virt;
7343 	flag = (be32_to_cpu(*lp++) & 0xf);
7344 	rps = (RPS *) lp;
7345 
7346 	if ((flag == 0) ||
7347 	    ((flag == 1) && (be32_to_cpu(rps->un.portNum) == 0)) ||
7348 	    ((flag == 2) && (memcmp(&rps->un.portName, &vport->fc_portname,
7349 				    sizeof(struct lpfc_name)) == 0))) {
7350 
7351 		printk("Fix me....\n");
7352 		dump_stack();
7353 		mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
7354 		if (mbox) {
7355 			lpfc_read_lnk_stat(phba, mbox);
7356 			mbox->ctx_buf = (void *)((unsigned long)
7357 				((cmdiocb->iocb.unsli3.rcvsli3.ox_id << 16) |
7358 				cmdiocb->iocb.ulpContext)); /* rx_id */
7359 			mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
7360 			mbox->vport = vport;
7361 			mbox->mbox_cmpl = lpfc_els_rsp_rps_acc;
7362 			if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
7363 				!= MBX_NOT_FINISHED)
7364 				/* Mbox completion will send ELS Response */
7365 				return 0;
7366 			/* Decrement reference count used for the failed mbox
7367 			 * command.
7368 			 */
7369 			lpfc_nlp_put(ndlp);
7370 			mempool_free(mbox, phba->mbox_mem_pool);
7371 		}
7372 	}
7373 
7374 reject_out:
7375 	/* issue rejection response */
7376 	stat.un.b.lsRjtRsvd0 = 0;
7377 	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
7378 	stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
7379 	stat.un.b.vendorUnique = 0;
7380 	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
7381 	return 0;
7382 }
7383 
7384 /* lpfc_issue_els_rrq - Process an unsolicited rps iocb
7385  * @vport: pointer to a host virtual N_Port data structure.
7386  * @ndlp: pointer to a node-list data structure.
7387  * @did: DID of the target.
7388  * @rrq: Pointer to the rrq struct.
7389  *
7390  * Build a ELS RRQ command and send it to the target. If the issue_iocb is
7391  * Successful the the completion handler will clear the RRQ.
7392  *
7393  * Return codes
7394  *   0 - Successfully sent rrq els iocb.
7395  *   1 - Failed to send rrq els iocb.
7396  **/
7397 static int
7398 lpfc_issue_els_rrq(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
7399 			uint32_t did, struct lpfc_node_rrq *rrq)
7400 {
7401 	struct lpfc_hba  *phba = vport->phba;
7402 	struct RRQ *els_rrq;
7403 	struct lpfc_iocbq *elsiocb;
7404 	uint8_t *pcmd;
7405 	uint16_t cmdsize;
7406 	int ret;
7407 
7408 
7409 	if (ndlp != rrq->ndlp)
7410 		ndlp = rrq->ndlp;
7411 	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
7412 		return 1;
7413 
7414 	/* If ndlp is not NULL, we will bump the reference count on it */
7415 	cmdsize = (sizeof(uint32_t) + sizeof(struct RRQ));
7416 	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, did,
7417 				     ELS_CMD_RRQ);
7418 	if (!elsiocb)
7419 		return 1;
7420 
7421 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
7422 
7423 	/* For RRQ request, remainder of payload is Exchange IDs */
7424 	*((uint32_t *) (pcmd)) = ELS_CMD_RRQ;
7425 	pcmd += sizeof(uint32_t);
7426 	els_rrq = (struct RRQ *) pcmd;
7427 
7428 	bf_set(rrq_oxid, els_rrq, phba->sli4_hba.xri_ids[rrq->xritag]);
7429 	bf_set(rrq_rxid, els_rrq, rrq->rxid);
7430 	bf_set(rrq_did, els_rrq, vport->fc_myDID);
7431 	els_rrq->rrq = cpu_to_be32(els_rrq->rrq);
7432 	els_rrq->rrq_exchg = cpu_to_be32(els_rrq->rrq_exchg);
7433 
7434 
7435 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
7436 		"Issue RRQ:     did:x%x",
7437 		did, rrq->xritag, rrq->rxid);
7438 	elsiocb->context_un.rrq = rrq;
7439 	elsiocb->iocb_cmpl = lpfc_cmpl_els_rrq;
7440 	ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
7441 
7442 	if (ret == IOCB_ERROR) {
7443 		lpfc_els_free_iocb(phba, elsiocb);
7444 		return 1;
7445 	}
7446 	return 0;
7447 }
7448 
7449 /**
7450  * lpfc_send_rrq - Sends ELS RRQ if needed.
7451  * @phba: pointer to lpfc hba data structure.
7452  * @rrq: pointer to the active rrq.
7453  *
7454  * This routine will call the lpfc_issue_els_rrq if the rrq is
7455  * still active for the xri. If this function returns a failure then
7456  * the caller needs to clean up the RRQ by calling lpfc_clr_active_rrq.
7457  *
7458  * Returns 0 Success.
7459  *         1 Failure.
7460  **/
7461 int
7462 lpfc_send_rrq(struct lpfc_hba *phba, struct lpfc_node_rrq *rrq)
7463 {
7464 	struct lpfc_nodelist *ndlp = lpfc_findnode_did(rrq->vport,
7465 						       rrq->nlp_DID);
7466 	if (!ndlp)
7467 		return 1;
7468 
7469 	if (lpfc_test_rrq_active(phba, ndlp, rrq->xritag))
7470 		return lpfc_issue_els_rrq(rrq->vport, ndlp,
7471 					 rrq->nlp_DID, rrq);
7472 	else
7473 		return 1;
7474 }
7475 
7476 /**
7477  * lpfc_els_rsp_rpl_acc - Issue an accept rpl els command
7478  * @vport: pointer to a host virtual N_Port data structure.
7479  * @cmdsize: size of the ELS command.
7480  * @oldiocb: pointer to the original lpfc command iocb data structure.
7481  * @ndlp: pointer to a node-list data structure.
7482  *
7483  * This routine issuees an Accept (ACC) Read Port List (RPL) ELS command.
7484  * It is to be called by the lpfc_els_rcv_rpl() routine to accept the RPL.
7485  *
7486  * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
7487  * will be incremented by 1 for holding the ndlp and the reference to ndlp
7488  * will be stored into the context1 field of the IOCB for the completion
7489  * callback function to the RPL Accept Response ELS command.
7490  *
7491  * Return code
7492  *   0 - Successfully issued ACC RPL ELS command
7493  *   1 - Failed to issue ACC RPL ELS command
7494  **/
7495 static int
7496 lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
7497 		     struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
7498 {
7499 	struct lpfc_hba *phba = vport->phba;
7500 	IOCB_t *icmd, *oldcmd;
7501 	RPL_RSP rpl_rsp;
7502 	struct lpfc_iocbq *elsiocb;
7503 	uint8_t *pcmd;
7504 
7505 	elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
7506 				     ndlp->nlp_DID, ELS_CMD_ACC);
7507 
7508 	if (!elsiocb)
7509 		return 1;
7510 
7511 	icmd = &elsiocb->iocb;
7512 	oldcmd = &oldiocb->iocb;
7513 	icmd->ulpContext = oldcmd->ulpContext;	/* Xri / rx_id */
7514 	icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
7515 
7516 	pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
7517 	*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
7518 	pcmd += sizeof(uint16_t);
7519 	*((uint16_t *)(pcmd)) = be16_to_cpu(cmdsize);
7520 	pcmd += sizeof(uint16_t);
7521 
7522 	/* Setup the RPL ACC payload */
7523 	rpl_rsp.listLen = be32_to_cpu(1);
7524 	rpl_rsp.index = 0;
7525 	rpl_rsp.port_num_blk.portNum = 0;
7526 	rpl_rsp.port_num_blk.portID = be32_to_cpu(vport->fc_myDID);
7527 	memcpy(&rpl_rsp.port_num_blk.portName, &vport->fc_portname,
7528 	    sizeof(struct lpfc_name));
7529 	memcpy(pcmd, &rpl_rsp, cmdsize - sizeof(uint32_t));
7530 	/* Xmit ELS RPL ACC response tag <ulpIoTag> */
7531 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
7532 			 "0120 Xmit ELS RPL ACC response tag x%x "
7533 			 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
7534 			 "rpi x%x\n",
7535 			 elsiocb->iotag, elsiocb->iocb.ulpContext,
7536 			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
7537 			 ndlp->nlp_rpi);
7538 	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
7539 	phba->fc_stat.elsXmitACC++;
7540 	if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
7541 	    IOCB_ERROR) {
7542 		lpfc_els_free_iocb(phba, elsiocb);
7543 		return 1;
7544 	}
7545 	return 0;
7546 }
7547 
7548 /**
7549  * lpfc_els_rcv_rpl - Process an unsolicited rpl iocb
7550  * @vport: pointer to a host virtual N_Port data structure.
7551  * @cmdiocb: pointer to lpfc command iocb data structure.
7552  * @ndlp: pointer to a node-list data structure.
7553  *
7554  * This routine processes Read Port List (RPL) IOCB received as an ELS
7555  * unsolicited event. It first checks the remote port state. If the remote
7556  * port is not in NLP_STE_UNMAPPED_NODE and NLP_STE_MAPPED_NODE states, it
7557  * invokes the lpfc_els_rsp_reject() routine to send reject response.
7558  * Otherwise, this routine then invokes the lpfc_els_rsp_rpl_acc() routine
7559  * to accept the RPL.
7560  *
7561  * Return code
7562  *   0 - Successfully processed rpl iocb (currently always return 0)
7563  **/
7564 static int
7565 lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
7566 		 struct lpfc_nodelist *ndlp)
7567 {
7568 	struct lpfc_dmabuf *pcmd;
7569 	uint32_t *lp;
7570 	uint32_t maxsize;
7571 	uint16_t cmdsize;
7572 	RPL *rpl;
7573 	struct ls_rjt stat;
7574 
7575 	if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
7576 	    (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
7577 		/* issue rejection response */
7578 		stat.un.b.lsRjtRsvd0 = 0;
7579 		stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
7580 		stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
7581 		stat.un.b.vendorUnique = 0;
7582 		lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
7583 			NULL);
7584 		/* rejected the unsolicited RPL request and done with it */
7585 		return 0;
7586 	}
7587 
7588 	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
7589 	lp = (uint32_t *) pcmd->virt;
7590 	rpl = (RPL *) (lp + 1);
7591 	maxsize = be32_to_cpu(rpl->maxsize);
7592 
7593 	/* We support only one port */
7594 	if ((rpl->index == 0) &&
7595 	    ((maxsize == 0) ||
7596 	     ((maxsize * sizeof(uint32_t)) >= sizeof(RPL_RSP)))) {
7597 		cmdsize = sizeof(uint32_t) + sizeof(RPL_RSP);
7598 	} else {
7599 		cmdsize = sizeof(uint32_t) + maxsize * sizeof(uint32_t);
7600 	}
7601 	lpfc_els_rsp_rpl_acc(vport, cmdsize, cmdiocb, ndlp);
7602 
7603 	return 0;
7604 }
7605 
7606 /**
7607  * lpfc_els_rcv_farp - Process an unsolicited farp request els command
7608  * @vport: pointer to a virtual N_Port data structure.
7609  * @cmdiocb: pointer to lpfc command iocb data structure.
7610  * @ndlp: pointer to a node-list data structure.
7611  *
7612  * This routine processes Fibre Channel Address Resolution Protocol
7613  * (FARP) Request IOCB received as an ELS unsolicited event. Currently,
7614  * the lpfc driver only supports matching on WWPN or WWNN for FARP. As such,
7615  * FARP_MATCH_PORT flag and FARP_MATCH_NODE flag are checked against the
7616  * Match Flag in the FARP request IOCB: if FARP_MATCH_PORT flag is set, the
7617  * remote PortName is compared against the FC PortName stored in the @vport
7618  * data structure; if FARP_MATCH_NODE flag is set, the remote NodeName is
7619  * compared against the FC NodeName stored in the @vport data structure.
7620  * If any of these matches and the FARP_REQUEST_FARPR flag is set in the
7621  * FARP request IOCB Response Flag, the lpfc_issue_els_farpr() routine is
7622  * invoked to send out FARP Response to the remote node. Before sending the
7623  * FARP Response, however, the FARP_REQUEST_PLOGI flag is check in the FARP
7624  * request IOCB Response Flag and, if it is set, the lpfc_issue_els_plogi()
7625  * routine is invoked to log into the remote port first.
7626  *
7627  * Return code
7628  *   0 - Either the FARP Match Mode not supported or successfully processed
7629  **/
7630 static int
7631 lpfc_els_rcv_farp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
7632 		  struct lpfc_nodelist *ndlp)
7633 {
7634 	struct lpfc_dmabuf *pcmd;
7635 	uint32_t *lp;
7636 	IOCB_t *icmd;
7637 	FARP *fp;
7638 	uint32_t cnt, did;
7639 
7640 	icmd = &cmdiocb->iocb;
7641 	did = icmd->un.elsreq64.remoteID;
7642 	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
7643 	lp = (uint32_t *) pcmd->virt;
7644 
7645 	lp++;
7646 	fp = (FARP *) lp;
7647 	/* FARP-REQ received from DID <did> */
7648 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
7649 			 "0601 FARP-REQ received from DID x%x\n", did);
7650 	/* We will only support match on WWPN or WWNN */
7651 	if (fp->Mflags & ~(FARP_MATCH_NODE | FARP_MATCH_PORT)) {
7652 		return 0;
7653 	}
7654 
7655 	cnt = 0;
7656 	/* If this FARP command is searching for my portname */
7657 	if (fp->Mflags & FARP_MATCH_PORT) {
7658 		if (memcmp(&fp->RportName, &vport->fc_portname,
7659 			   sizeof(struct lpfc_name)) == 0)
7660 			cnt = 1;
7661 	}
7662 
7663 	/* If this FARP command is searching for my nodename */
7664 	if (fp->Mflags & FARP_MATCH_NODE) {
7665 		if (memcmp(&fp->RnodeName, &vport->fc_nodename,
7666 			   sizeof(struct lpfc_name)) == 0)
7667 			cnt = 1;
7668 	}
7669 
7670 	if (cnt) {
7671 		if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) ||
7672 		   (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
7673 			/* Log back into the node before sending the FARP. */
7674 			if (fp->Rflags & FARP_REQUEST_PLOGI) {
7675 				ndlp->nlp_prev_state = ndlp->nlp_state;
7676 				lpfc_nlp_set_state(vport, ndlp,
7677 						   NLP_STE_PLOGI_ISSUE);
7678 				lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
7679 			}
7680 
7681 			/* Send a FARP response to that node */
7682 			if (fp->Rflags & FARP_REQUEST_FARPR)
7683 				lpfc_issue_els_farpr(vport, did, 0);
7684 		}
7685 	}
7686 	return 0;
7687 }
7688 
7689 /**
7690  * lpfc_els_rcv_farpr - Process an unsolicited farp response iocb
7691  * @vport: pointer to a host virtual N_Port data structure.
7692  * @cmdiocb: pointer to lpfc command iocb data structure.
7693  * @ndlp: pointer to a node-list data structure.
7694  *
7695  * This routine processes Fibre Channel Address Resolution Protocol
7696  * Response (FARPR) IOCB received as an ELS unsolicited event. It simply
7697  * invokes the lpfc_els_rsp_acc() routine to the remote node to accept
7698  * the FARP response request.
7699  *
7700  * Return code
7701  *   0 - Successfully processed FARPR IOCB (currently always return 0)
7702  **/
7703 static int
7704 lpfc_els_rcv_farpr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
7705 		   struct lpfc_nodelist  *ndlp)
7706 {
7707 	struct lpfc_dmabuf *pcmd;
7708 	uint32_t *lp;
7709 	IOCB_t *icmd;
7710 	uint32_t did;
7711 
7712 	icmd = &cmdiocb->iocb;
7713 	did = icmd->un.elsreq64.remoteID;
7714 	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
7715 	lp = (uint32_t *) pcmd->virt;
7716 
7717 	lp++;
7718 	/* FARP-RSP received from DID <did> */
7719 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
7720 			 "0600 FARP-RSP received from DID x%x\n", did);
7721 	/* ACCEPT the Farp resp request */
7722 	lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
7723 
7724 	return 0;
7725 }
7726 
7727 /**
7728  * lpfc_els_rcv_fan - Process an unsolicited fan iocb command
7729  * @vport: pointer to a host virtual N_Port data structure.
7730  * @cmdiocb: pointer to lpfc command iocb data structure.
7731  * @fan_ndlp: pointer to a node-list data structure.
7732  *
7733  * This routine processes a Fabric Address Notification (FAN) IOCB
7734  * command received as an ELS unsolicited event. The FAN ELS command will
7735  * only be processed on a physical port (i.e., the @vport represents the
7736  * physical port). The fabric NodeName and PortName from the FAN IOCB are
7737  * compared against those in the phba data structure. If any of those is
7738  * different, the lpfc_initial_flogi() routine is invoked to initialize
7739  * Fabric Login (FLOGI) to the fabric to start the discover over. Otherwise,
7740  * if both of those are identical, the lpfc_issue_fabric_reglogin() routine
7741  * is invoked to register login to the fabric.
7742  *
7743  * Return code
7744  *   0 - Successfully processed fan iocb (currently always return 0).
7745  **/
7746 static int
7747 lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
7748 		 struct lpfc_nodelist *fan_ndlp)
7749 {
7750 	struct lpfc_hba *phba = vport->phba;
7751 	uint32_t *lp;
7752 	FAN *fp;
7753 
7754 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0265 FAN received\n");
7755 	lp = (uint32_t *)((struct lpfc_dmabuf *)cmdiocb->context2)->virt;
7756 	fp = (FAN *) ++lp;
7757 	/* FAN received; Fan does not have a reply sequence */
7758 	if ((vport == phba->pport) &&
7759 	    (vport->port_state == LPFC_LOCAL_CFG_LINK)) {
7760 		if ((memcmp(&phba->fc_fabparam.nodeName, &fp->FnodeName,
7761 			    sizeof(struct lpfc_name))) ||
7762 		    (memcmp(&phba->fc_fabparam.portName, &fp->FportName,
7763 			    sizeof(struct lpfc_name)))) {
7764 			/* This port has switched fabrics. FLOGI is required */
7765 			lpfc_issue_init_vfi(vport);
7766 		} else {
7767 			/* FAN verified - skip FLOGI */
7768 			vport->fc_myDID = vport->fc_prevDID;
7769 			if (phba->sli_rev < LPFC_SLI_REV4)
7770 				lpfc_issue_fabric_reglogin(vport);
7771 			else {
7772 				lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
7773 					"3138 Need register VFI: (x%x/%x)\n",
7774 					vport->fc_prevDID, vport->fc_myDID);
7775 				lpfc_issue_reg_vfi(vport);
7776 			}
7777 		}
7778 	}
7779 	return 0;
7780 }
7781 
7782 /**
7783  * lpfc_els_timeout - Handler funciton to the els timer
7784  * @ptr: holder for the timer function associated data.
7785  *
7786  * This routine is invoked by the ELS timer after timeout. It posts the ELS
7787  * timer timeout event by setting the WORKER_ELS_TMO bit to the work port
7788  * event bitmap and then invokes the lpfc_worker_wake_up() routine to wake
7789  * up the worker thread. It is for the worker thread to invoke the routine
7790  * lpfc_els_timeout_handler() to work on the posted event WORKER_ELS_TMO.
7791  **/
7792 void
7793 lpfc_els_timeout(struct timer_list *t)
7794 {
7795 	struct lpfc_vport *vport = from_timer(vport, t, els_tmofunc);
7796 	struct lpfc_hba   *phba = vport->phba;
7797 	uint32_t tmo_posted;
7798 	unsigned long iflag;
7799 
7800 	spin_lock_irqsave(&vport->work_port_lock, iflag);
7801 	tmo_posted = vport->work_port_events & WORKER_ELS_TMO;
7802 	if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING)))
7803 		vport->work_port_events |= WORKER_ELS_TMO;
7804 	spin_unlock_irqrestore(&vport->work_port_lock, iflag);
7805 
7806 	if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING)))
7807 		lpfc_worker_wake_up(phba);
7808 	return;
7809 }
7810 
7811 
7812 /**
7813  * lpfc_els_timeout_handler - Process an els timeout event
7814  * @vport: pointer to a virtual N_Port data structure.
7815  *
7816  * This routine is the actual handler function that processes an ELS timeout
7817  * event. It walks the ELS ring to get and abort all the IOCBs (except the
7818  * ABORT/CLOSE/FARP/FARPR/FDISC), which are associated with the @vport by
7819  * invoking the lpfc_sli_issue_abort_iotag() routine.
7820  **/
7821 void
7822 lpfc_els_timeout_handler(struct lpfc_vport *vport)
7823 {
7824 	struct lpfc_hba  *phba = vport->phba;
7825 	struct lpfc_sli_ring *pring;
7826 	struct lpfc_iocbq *tmp_iocb, *piocb;
7827 	IOCB_t *cmd = NULL;
7828 	struct lpfc_dmabuf *pcmd;
7829 	uint32_t els_command = 0;
7830 	uint32_t timeout;
7831 	uint32_t remote_ID = 0xffffffff;
7832 	LIST_HEAD(abort_list);
7833 
7834 
7835 	timeout = (uint32_t)(phba->fc_ratov << 1);
7836 
7837 	pring = lpfc_phba_elsring(phba);
7838 	if (unlikely(!pring))
7839 		return;
7840 
7841 	if ((phba->pport->load_flag & FC_UNLOADING))
7842 		return;
7843 	spin_lock_irq(&phba->hbalock);
7844 	if (phba->sli_rev == LPFC_SLI_REV4)
7845 		spin_lock(&pring->ring_lock);
7846 
7847 	if ((phba->pport->load_flag & FC_UNLOADING)) {
7848 		if (phba->sli_rev == LPFC_SLI_REV4)
7849 			spin_unlock(&pring->ring_lock);
7850 		spin_unlock_irq(&phba->hbalock);
7851 		return;
7852 	}
7853 
7854 	list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
7855 		cmd = &piocb->iocb;
7856 
7857 		if ((piocb->iocb_flag & LPFC_IO_LIBDFC) != 0 ||
7858 		    piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
7859 		    piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
7860 			continue;
7861 
7862 		if (piocb->vport != vport)
7863 			continue;
7864 
7865 		pcmd = (struct lpfc_dmabuf *) piocb->context2;
7866 		if (pcmd)
7867 			els_command = *(uint32_t *) (pcmd->virt);
7868 
7869 		if (els_command == ELS_CMD_FARP ||
7870 		    els_command == ELS_CMD_FARPR ||
7871 		    els_command == ELS_CMD_FDISC)
7872 			continue;
7873 
7874 		if (piocb->drvrTimeout > 0) {
7875 			if (piocb->drvrTimeout >= timeout)
7876 				piocb->drvrTimeout -= timeout;
7877 			else
7878 				piocb->drvrTimeout = 0;
7879 			continue;
7880 		}
7881 
7882 		remote_ID = 0xffffffff;
7883 		if (cmd->ulpCommand != CMD_GEN_REQUEST64_CR)
7884 			remote_ID = cmd->un.elsreq64.remoteID;
7885 		else {
7886 			struct lpfc_nodelist *ndlp;
7887 			ndlp = __lpfc_findnode_rpi(vport, cmd->ulpContext);
7888 			if (ndlp && NLP_CHK_NODE_ACT(ndlp))
7889 				remote_ID = ndlp->nlp_DID;
7890 		}
7891 		list_add_tail(&piocb->dlist, &abort_list);
7892 	}
7893 	if (phba->sli_rev == LPFC_SLI_REV4)
7894 		spin_unlock(&pring->ring_lock);
7895 	spin_unlock_irq(&phba->hbalock);
7896 
7897 	list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) {
7898 		cmd = &piocb->iocb;
7899 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
7900 			 "0127 ELS timeout Data: x%x x%x x%x "
7901 			 "x%x\n", els_command,
7902 			 remote_ID, cmd->ulpCommand, cmd->ulpIoTag);
7903 		spin_lock_irq(&phba->hbalock);
7904 		list_del_init(&piocb->dlist);
7905 		lpfc_sli_issue_abort_iotag(phba, pring, piocb);
7906 		spin_unlock_irq(&phba->hbalock);
7907 	}
7908 
7909 	if (!list_empty(&pring->txcmplq))
7910 		if (!(phba->pport->load_flag & FC_UNLOADING))
7911 			mod_timer(&vport->els_tmofunc,
7912 				  jiffies + msecs_to_jiffies(1000 * timeout));
7913 }
7914 
7915 /**
7916  * lpfc_els_flush_cmd - Clean up the outstanding els commands to a vport
7917  * @vport: pointer to a host virtual N_Port data structure.
7918  *
7919  * This routine is used to clean up all the outstanding ELS commands on a
7920  * @vport. It first aborts the @vport by invoking lpfc_fabric_abort_vport()
7921  * routine. After that, it walks the ELS transmit queue to remove all the
7922  * IOCBs with the @vport other than the QUE_RING and ABORT/CLOSE IOCBs. For
7923  * the IOCBs with a non-NULL completion callback function, the callback
7924  * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and
7925  * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs with a NULL completion
7926  * callback function, the IOCB will simply be released. Finally, it walks
7927  * the ELS transmit completion queue to issue an abort IOCB to any transmit
7928  * completion queue IOCB that is associated with the @vport and is not
7929  * an IOCB from libdfc (i.e., the management plane IOCBs that are not
7930  * part of the discovery state machine) out to HBA by invoking the
7931  * lpfc_sli_issue_abort_iotag() routine. Note that this function issues the
7932  * abort IOCB to any transmit completion queueed IOCB, it does not guarantee
7933  * the IOCBs are aborted when this function returns.
7934  **/
7935 void
7936 lpfc_els_flush_cmd(struct lpfc_vport *vport)
7937 {
7938 	LIST_HEAD(abort_list);
7939 	struct lpfc_hba  *phba = vport->phba;
7940 	struct lpfc_sli_ring *pring;
7941 	struct lpfc_iocbq *tmp_iocb, *piocb;
7942 	IOCB_t *cmd = NULL;
7943 
7944 	lpfc_fabric_abort_vport(vport);
7945 	/*
7946 	 * For SLI3, only the hbalock is required.  But SLI4 needs to coordinate
7947 	 * with the ring insert operation.  Because lpfc_sli_issue_abort_iotag
7948 	 * ultimately grabs the ring_lock, the driver must splice the list into
7949 	 * a working list and release the locks before calling the abort.
7950 	 */
7951 	spin_lock_irq(&phba->hbalock);
7952 	pring = lpfc_phba_elsring(phba);
7953 
7954 	/* Bail out if we've no ELS wq, like in PCI error recovery case. */
7955 	if (unlikely(!pring)) {
7956 		spin_unlock_irq(&phba->hbalock);
7957 		return;
7958 	}
7959 
7960 	if (phba->sli_rev == LPFC_SLI_REV4)
7961 		spin_lock(&pring->ring_lock);
7962 
7963 	list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
7964 		if (piocb->iocb_flag & LPFC_IO_LIBDFC)
7965 			continue;
7966 
7967 		if (piocb->vport != vport)
7968 			continue;
7969 		list_add_tail(&piocb->dlist, &abort_list);
7970 	}
7971 	if (phba->sli_rev == LPFC_SLI_REV4)
7972 		spin_unlock(&pring->ring_lock);
7973 	spin_unlock_irq(&phba->hbalock);
7974 	/* Abort each iocb on the aborted list and remove the dlist links. */
7975 	list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) {
7976 		spin_lock_irq(&phba->hbalock);
7977 		list_del_init(&piocb->dlist);
7978 		lpfc_sli_issue_abort_iotag(phba, pring, piocb);
7979 		spin_unlock_irq(&phba->hbalock);
7980 	}
7981 	if (!list_empty(&abort_list))
7982 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
7983 				 "3387 abort list for txq not empty\n");
7984 	INIT_LIST_HEAD(&abort_list);
7985 
7986 	spin_lock_irq(&phba->hbalock);
7987 	if (phba->sli_rev == LPFC_SLI_REV4)
7988 		spin_lock(&pring->ring_lock);
7989 
7990 	list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) {
7991 		cmd = &piocb->iocb;
7992 
7993 		if (piocb->iocb_flag & LPFC_IO_LIBDFC) {
7994 			continue;
7995 		}
7996 
7997 		/* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */
7998 		if (cmd->ulpCommand == CMD_QUE_RING_BUF_CN ||
7999 		    cmd->ulpCommand == CMD_QUE_RING_BUF64_CN ||
8000 		    cmd->ulpCommand == CMD_CLOSE_XRI_CN ||
8001 		    cmd->ulpCommand == CMD_ABORT_XRI_CN)
8002 			continue;
8003 
8004 		if (piocb->vport != vport)
8005 			continue;
8006 
8007 		list_del_init(&piocb->list);
8008 		list_add_tail(&piocb->list, &abort_list);
8009 	}
8010 	if (phba->sli_rev == LPFC_SLI_REV4)
8011 		spin_unlock(&pring->ring_lock);
8012 	spin_unlock_irq(&phba->hbalock);
8013 
8014 	/* Cancell all the IOCBs from the completions list */
8015 	lpfc_sli_cancel_iocbs(phba, &abort_list,
8016 			      IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
8017 
8018 	return;
8019 }
8020 
8021 /**
8022  * lpfc_els_flush_all_cmd - Clean up all the outstanding els commands to a HBA
8023  * @phba: pointer to lpfc hba data structure.
8024  *
8025  * This routine is used to clean up all the outstanding ELS commands on a
8026  * @phba. It first aborts the @phba by invoking the lpfc_fabric_abort_hba()
8027  * routine. After that, it walks the ELS transmit queue to remove all the
8028  * IOCBs to the @phba other than the QUE_RING and ABORT/CLOSE IOCBs. For
8029  * the IOCBs with the completion callback function associated, the callback
8030  * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and
8031  * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs without the completion
8032  * callback function associated, the IOCB will simply be released. Finally,
8033  * it walks the ELS transmit completion queue to issue an abort IOCB to any
8034  * transmit completion queue IOCB that is not an IOCB from libdfc (i.e., the
8035  * management plane IOCBs that are not part of the discovery state machine)
8036  * out to HBA by invoking the lpfc_sli_issue_abort_iotag() routine.
8037  **/
8038 void
8039 lpfc_els_flush_all_cmd(struct lpfc_hba  *phba)
8040 {
8041 	struct lpfc_vport *vport;
8042 
8043 	spin_lock_irq(&phba->port_list_lock);
8044 	list_for_each_entry(vport, &phba->port_list, listentry)
8045 		lpfc_els_flush_cmd(vport);
8046 	spin_unlock_irq(&phba->port_list_lock);
8047 
8048 	return;
8049 }
8050 
8051 /**
8052  * lpfc_send_els_failure_event - Posts an ELS command failure event
8053  * @phba: Pointer to hba context object.
8054  * @cmdiocbp: Pointer to command iocb which reported error.
8055  * @rspiocbp: Pointer to response iocb which reported error.
8056  *
8057  * This function sends an event when there is an ELS command
8058  * failure.
8059  **/
8060 void
8061 lpfc_send_els_failure_event(struct lpfc_hba *phba,
8062 			struct lpfc_iocbq *cmdiocbp,
8063 			struct lpfc_iocbq *rspiocbp)
8064 {
8065 	struct lpfc_vport *vport = cmdiocbp->vport;
8066 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
8067 	struct lpfc_lsrjt_event lsrjt_event;
8068 	struct lpfc_fabric_event_header fabric_event;
8069 	struct ls_rjt stat;
8070 	struct lpfc_nodelist *ndlp;
8071 	uint32_t *pcmd;
8072 
8073 	ndlp = cmdiocbp->context1;
8074 	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
8075 		return;
8076 
8077 	if (rspiocbp->iocb.ulpStatus == IOSTAT_LS_RJT) {
8078 		lsrjt_event.header.event_type = FC_REG_ELS_EVENT;
8079 		lsrjt_event.header.subcategory = LPFC_EVENT_LSRJT_RCV;
8080 		memcpy(lsrjt_event.header.wwpn, &ndlp->nlp_portname,
8081 			sizeof(struct lpfc_name));
8082 		memcpy(lsrjt_event.header.wwnn, &ndlp->nlp_nodename,
8083 			sizeof(struct lpfc_name));
8084 		pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
8085 			cmdiocbp->context2)->virt);
8086 		lsrjt_event.command = (pcmd != NULL) ? *pcmd : 0;
8087 		stat.un.lsRjtError = be32_to_cpu(rspiocbp->iocb.un.ulpWord[4]);
8088 		lsrjt_event.reason_code = stat.un.b.lsRjtRsnCode;
8089 		lsrjt_event.explanation = stat.un.b.lsRjtRsnCodeExp;
8090 		fc_host_post_vendor_event(shost,
8091 			fc_get_event_number(),
8092 			sizeof(lsrjt_event),
8093 			(char *)&lsrjt_event,
8094 			LPFC_NL_VENDOR_ID);
8095 		return;
8096 	}
8097 	if ((rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY) ||
8098 		(rspiocbp->iocb.ulpStatus == IOSTAT_FABRIC_BSY)) {
8099 		fabric_event.event_type = FC_REG_FABRIC_EVENT;
8100 		if (rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY)
8101 			fabric_event.subcategory = LPFC_EVENT_PORT_BUSY;
8102 		else
8103 			fabric_event.subcategory = LPFC_EVENT_FABRIC_BUSY;
8104 		memcpy(fabric_event.wwpn, &ndlp->nlp_portname,
8105 			sizeof(struct lpfc_name));
8106 		memcpy(fabric_event.wwnn, &ndlp->nlp_nodename,
8107 			sizeof(struct lpfc_name));
8108 		fc_host_post_vendor_event(shost,
8109 			fc_get_event_number(),
8110 			sizeof(fabric_event),
8111 			(char *)&fabric_event,
8112 			LPFC_NL_VENDOR_ID);
8113 		return;
8114 	}
8115 
8116 }
8117 
8118 /**
8119  * lpfc_send_els_event - Posts unsolicited els event
8120  * @vport: Pointer to vport object.
8121  * @ndlp: Pointer FC node object.
8122  * @cmd: ELS command code.
8123  *
8124  * This function posts an event when there is an incoming
8125  * unsolicited ELS command.
8126  **/
8127 static void
8128 lpfc_send_els_event(struct lpfc_vport *vport,
8129 		    struct lpfc_nodelist *ndlp,
8130 		    uint32_t *payload)
8131 {
8132 	struct lpfc_els_event_header *els_data = NULL;
8133 	struct lpfc_logo_event *logo_data = NULL;
8134 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
8135 
8136 	if (*payload == ELS_CMD_LOGO) {
8137 		logo_data = kmalloc(sizeof(struct lpfc_logo_event), GFP_KERNEL);
8138 		if (!logo_data) {
8139 			lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
8140 				"0148 Failed to allocate memory "
8141 				"for LOGO event\n");
8142 			return;
8143 		}
8144 		els_data = &logo_data->header;
8145 	} else {
8146 		els_data = kmalloc(sizeof(struct lpfc_els_event_header),
8147 			GFP_KERNEL);
8148 		if (!els_data) {
8149 			lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
8150 				"0149 Failed to allocate memory "
8151 				"for ELS event\n");
8152 			return;
8153 		}
8154 	}
8155 	els_data->event_type = FC_REG_ELS_EVENT;
8156 	switch (*payload) {
8157 	case ELS_CMD_PLOGI:
8158 		els_data->subcategory = LPFC_EVENT_PLOGI_RCV;
8159 		break;
8160 	case ELS_CMD_PRLO:
8161 		els_data->subcategory = LPFC_EVENT_PRLO_RCV;
8162 		break;
8163 	case ELS_CMD_ADISC:
8164 		els_data->subcategory = LPFC_EVENT_ADISC_RCV;
8165 		break;
8166 	case ELS_CMD_LOGO:
8167 		els_data->subcategory = LPFC_EVENT_LOGO_RCV;
8168 		/* Copy the WWPN in the LOGO payload */
8169 		memcpy(logo_data->logo_wwpn, &payload[2],
8170 			sizeof(struct lpfc_name));
8171 		break;
8172 	default:
8173 		kfree(els_data);
8174 		return;
8175 	}
8176 	memcpy(els_data->wwpn, &ndlp->nlp_portname, sizeof(struct lpfc_name));
8177 	memcpy(els_data->wwnn, &ndlp->nlp_nodename, sizeof(struct lpfc_name));
8178 	if (*payload == ELS_CMD_LOGO) {
8179 		fc_host_post_vendor_event(shost,
8180 			fc_get_event_number(),
8181 			sizeof(struct lpfc_logo_event),
8182 			(char *)logo_data,
8183 			LPFC_NL_VENDOR_ID);
8184 		kfree(logo_data);
8185 	} else {
8186 		fc_host_post_vendor_event(shost,
8187 			fc_get_event_number(),
8188 			sizeof(struct lpfc_els_event_header),
8189 			(char *)els_data,
8190 			LPFC_NL_VENDOR_ID);
8191 		kfree(els_data);
8192 	}
8193 
8194 	return;
8195 }
8196 
8197 
8198 /**
8199  * lpfc_els_unsol_buffer - Process an unsolicited event data buffer
8200  * @phba: pointer to lpfc hba data structure.
8201  * @pring: pointer to a SLI ring.
8202  * @vport: pointer to a host virtual N_Port data structure.
8203  * @elsiocb: pointer to lpfc els command iocb data structure.
8204  *
8205  * This routine is used for processing the IOCB associated with a unsolicited
8206  * event. It first determines whether there is an existing ndlp that matches
8207  * the DID from the unsolicited IOCB. If not, it will create a new one with
8208  * the DID from the unsolicited IOCB. The ELS command from the unsolicited
8209  * IOCB is then used to invoke the proper routine and to set up proper state
8210  * of the discovery state machine.
8211  **/
8212 static void
8213 lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
8214 		      struct lpfc_vport *vport, struct lpfc_iocbq *elsiocb)
8215 {
8216 	struct Scsi_Host  *shost;
8217 	struct lpfc_nodelist *ndlp;
8218 	struct ls_rjt stat;
8219 	uint32_t *payload;
8220 	uint32_t cmd, did, newnode;
8221 	uint8_t rjt_exp, rjt_err = 0, init_link = 0;
8222 	IOCB_t *icmd = &elsiocb->iocb;
8223 	LPFC_MBOXQ_t *mbox;
8224 
8225 	if (!vport || !(elsiocb->context2))
8226 		goto dropit;
8227 
8228 	newnode = 0;
8229 	payload = ((struct lpfc_dmabuf *)elsiocb->context2)->virt;
8230 	cmd = *payload;
8231 	if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) == 0)
8232 		lpfc_post_buffer(phba, pring, 1);
8233 
8234 	did = icmd->un.rcvels.remoteID;
8235 	if (icmd->ulpStatus) {
8236 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
8237 			"RCV Unsol ELS:  status:x%x/x%x did:x%x",
8238 			icmd->ulpStatus, icmd->un.ulpWord[4], did);
8239 		goto dropit;
8240 	}
8241 
8242 	/* Check to see if link went down during discovery */
8243 	if (lpfc_els_chk_latt(vport))
8244 		goto dropit;
8245 
8246 	/* Ignore traffic received during vport shutdown. */
8247 	if (vport->load_flag & FC_UNLOADING)
8248 		goto dropit;
8249 
8250 	/* If NPort discovery is delayed drop incoming ELS */
8251 	if ((vport->fc_flag & FC_DISC_DELAYED) &&
8252 			(cmd != ELS_CMD_PLOGI))
8253 		goto dropit;
8254 
8255 	ndlp = lpfc_findnode_did(vport, did);
8256 	if (!ndlp) {
8257 		/* Cannot find existing Fabric ndlp, so allocate a new one */
8258 		ndlp = lpfc_nlp_init(vport, did);
8259 		if (!ndlp)
8260 			goto dropit;
8261 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
8262 		newnode = 1;
8263 		if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
8264 			ndlp->nlp_type |= NLP_FABRIC;
8265 	} else if (!NLP_CHK_NODE_ACT(ndlp)) {
8266 		ndlp = lpfc_enable_node(vport, ndlp,
8267 					NLP_STE_UNUSED_NODE);
8268 		if (!ndlp)
8269 			goto dropit;
8270 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
8271 		newnode = 1;
8272 		if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
8273 			ndlp->nlp_type |= NLP_FABRIC;
8274 	} else if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
8275 		/* This is similar to the new node path */
8276 		ndlp = lpfc_nlp_get(ndlp);
8277 		if (!ndlp)
8278 			goto dropit;
8279 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
8280 		newnode = 1;
8281 	}
8282 
8283 	phba->fc_stat.elsRcvFrame++;
8284 
8285 	/*
8286 	 * Do not process any unsolicited ELS commands
8287 	 * if the ndlp is in DEV_LOSS
8288 	 */
8289 	shost = lpfc_shost_from_vport(vport);
8290 	spin_lock_irq(shost->host_lock);
8291 	if (ndlp->nlp_flag & NLP_IN_DEV_LOSS) {
8292 		spin_unlock_irq(shost->host_lock);
8293 		goto dropit;
8294 	}
8295 	spin_unlock_irq(shost->host_lock);
8296 
8297 	elsiocb->context1 = lpfc_nlp_get(ndlp);
8298 	elsiocb->vport = vport;
8299 
8300 	if ((cmd & ELS_CMD_MASK) == ELS_CMD_RSCN) {
8301 		cmd &= ELS_CMD_MASK;
8302 	}
8303 	/* ELS command <elsCmd> received from NPORT <did> */
8304 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
8305 			 "0112 ELS command x%x received from NPORT x%x "
8306 			 "Data: x%x x%x x%x x%x\n",
8307 			cmd, did, vport->port_state, vport->fc_flag,
8308 			vport->fc_myDID, vport->fc_prevDID);
8309 
8310 	/* reject till our FLOGI completes or PLOGI assigned DID via PT2PT */
8311 	if ((vport->port_state < LPFC_FABRIC_CFG_LINK) &&
8312 	    (cmd != ELS_CMD_FLOGI) &&
8313 	    !((cmd == ELS_CMD_PLOGI) && (vport->fc_flag & FC_PT2PT))) {
8314 		rjt_err = LSRJT_LOGICAL_BSY;
8315 		rjt_exp = LSEXP_NOTHING_MORE;
8316 		goto lsrjt;
8317 	}
8318 
8319 	switch (cmd) {
8320 	case ELS_CMD_PLOGI:
8321 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
8322 			"RCV PLOGI:       did:x%x/ste:x%x flg:x%x",
8323 			did, vport->port_state, ndlp->nlp_flag);
8324 
8325 		phba->fc_stat.elsRcvPLOGI++;
8326 		ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp);
8327 		if (phba->sli_rev == LPFC_SLI_REV4 &&
8328 		    (phba->pport->fc_flag & FC_PT2PT)) {
8329 			vport->fc_prevDID = vport->fc_myDID;
8330 			/* Our DID needs to be updated before registering
8331 			 * the vfi. This is done in lpfc_rcv_plogi but
8332 			 * that is called after the reg_vfi.
8333 			 */
8334 			vport->fc_myDID = elsiocb->iocb.un.rcvels.parmRo;
8335 			lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
8336 					 "3312 Remote port assigned DID x%x "
8337 					 "%x\n", vport->fc_myDID,
8338 					 vport->fc_prevDID);
8339 		}
8340 
8341 		lpfc_send_els_event(vport, ndlp, payload);
8342 
8343 		/* If Nport discovery is delayed, reject PLOGIs */
8344 		if (vport->fc_flag & FC_DISC_DELAYED) {
8345 			rjt_err = LSRJT_UNABLE_TPC;
8346 			rjt_exp = LSEXP_NOTHING_MORE;
8347 			break;
8348 		}
8349 
8350 		if (vport->port_state < LPFC_DISC_AUTH) {
8351 			if (!(phba->pport->fc_flag & FC_PT2PT) ||
8352 				(phba->pport->fc_flag & FC_PT2PT_PLOGI)) {
8353 				rjt_err = LSRJT_UNABLE_TPC;
8354 				rjt_exp = LSEXP_NOTHING_MORE;
8355 				break;
8356 			}
8357 		}
8358 
8359 		spin_lock_irq(shost->host_lock);
8360 		ndlp->nlp_flag &= ~NLP_TARGET_REMOVE;
8361 		spin_unlock_irq(shost->host_lock);
8362 
8363 		lpfc_disc_state_machine(vport, ndlp, elsiocb,
8364 					NLP_EVT_RCV_PLOGI);
8365 
8366 		break;
8367 	case ELS_CMD_FLOGI:
8368 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
8369 			"RCV FLOGI:       did:x%x/ste:x%x flg:x%x",
8370 			did, vport->port_state, ndlp->nlp_flag);
8371 
8372 		phba->fc_stat.elsRcvFLOGI++;
8373 
8374 		/* If the driver believes fabric discovery is done and is ready,
8375 		 * bounce the link.  There is some descrepancy.
8376 		 */
8377 		if (vport->port_state >= LPFC_LOCAL_CFG_LINK &&
8378 		    vport->fc_flag & FC_PT2PT &&
8379 		    vport->rcv_flogi_cnt >= 1) {
8380 			rjt_err = LSRJT_LOGICAL_BSY;
8381 			rjt_exp = LSEXP_NOTHING_MORE;
8382 			init_link++;
8383 			goto lsrjt;
8384 		}
8385 
8386 		lpfc_els_rcv_flogi(vport, elsiocb, ndlp);
8387 		if (newnode)
8388 			lpfc_nlp_put(ndlp);
8389 		break;
8390 	case ELS_CMD_LOGO:
8391 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
8392 			"RCV LOGO:        did:x%x/ste:x%x flg:x%x",
8393 			did, vport->port_state, ndlp->nlp_flag);
8394 
8395 		phba->fc_stat.elsRcvLOGO++;
8396 		lpfc_send_els_event(vport, ndlp, payload);
8397 		if (vport->port_state < LPFC_DISC_AUTH) {
8398 			rjt_err = LSRJT_UNABLE_TPC;
8399 			rjt_exp = LSEXP_NOTHING_MORE;
8400 			break;
8401 		}
8402 		lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_LOGO);
8403 		break;
8404 	case ELS_CMD_PRLO:
8405 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
8406 			"RCV PRLO:        did:x%x/ste:x%x flg:x%x",
8407 			did, vport->port_state, ndlp->nlp_flag);
8408 
8409 		phba->fc_stat.elsRcvPRLO++;
8410 		lpfc_send_els_event(vport, ndlp, payload);
8411 		if (vport->port_state < LPFC_DISC_AUTH) {
8412 			rjt_err = LSRJT_UNABLE_TPC;
8413 			rjt_exp = LSEXP_NOTHING_MORE;
8414 			break;
8415 		}
8416 		lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLO);
8417 		break;
8418 	case ELS_CMD_LCB:
8419 		phba->fc_stat.elsRcvLCB++;
8420 		lpfc_els_rcv_lcb(vport, elsiocb, ndlp);
8421 		break;
8422 	case ELS_CMD_RDP:
8423 		phba->fc_stat.elsRcvRDP++;
8424 		lpfc_els_rcv_rdp(vport, elsiocb, ndlp);
8425 		break;
8426 	case ELS_CMD_RSCN:
8427 		phba->fc_stat.elsRcvRSCN++;
8428 		lpfc_els_rcv_rscn(vport, elsiocb, ndlp);
8429 		if (newnode)
8430 			lpfc_nlp_put(ndlp);
8431 		break;
8432 	case ELS_CMD_ADISC:
8433 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
8434 			"RCV ADISC:       did:x%x/ste:x%x flg:x%x",
8435 			did, vport->port_state, ndlp->nlp_flag);
8436 
8437 		lpfc_send_els_event(vport, ndlp, payload);
8438 		phba->fc_stat.elsRcvADISC++;
8439 		if (vport->port_state < LPFC_DISC_AUTH) {
8440 			rjt_err = LSRJT_UNABLE_TPC;
8441 			rjt_exp = LSEXP_NOTHING_MORE;
8442 			break;
8443 		}
8444 		lpfc_disc_state_machine(vport, ndlp, elsiocb,
8445 					NLP_EVT_RCV_ADISC);
8446 		break;
8447 	case ELS_CMD_PDISC:
8448 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
8449 			"RCV PDISC:       did:x%x/ste:x%x flg:x%x",
8450 			did, vport->port_state, ndlp->nlp_flag);
8451 
8452 		phba->fc_stat.elsRcvPDISC++;
8453 		if (vport->port_state < LPFC_DISC_AUTH) {
8454 			rjt_err = LSRJT_UNABLE_TPC;
8455 			rjt_exp = LSEXP_NOTHING_MORE;
8456 			break;
8457 		}
8458 		lpfc_disc_state_machine(vport, ndlp, elsiocb,
8459 					NLP_EVT_RCV_PDISC);
8460 		break;
8461 	case ELS_CMD_FARPR:
8462 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
8463 			"RCV FARPR:       did:x%x/ste:x%x flg:x%x",
8464 			did, vport->port_state, ndlp->nlp_flag);
8465 
8466 		phba->fc_stat.elsRcvFARPR++;
8467 		lpfc_els_rcv_farpr(vport, elsiocb, ndlp);
8468 		break;
8469 	case ELS_CMD_FARP:
8470 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
8471 			"RCV FARP:        did:x%x/ste:x%x flg:x%x",
8472 			did, vport->port_state, ndlp->nlp_flag);
8473 
8474 		phba->fc_stat.elsRcvFARP++;
8475 		lpfc_els_rcv_farp(vport, elsiocb, ndlp);
8476 		break;
8477 	case ELS_CMD_FAN:
8478 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
8479 			"RCV FAN:         did:x%x/ste:x%x flg:x%x",
8480 			did, vport->port_state, ndlp->nlp_flag);
8481 
8482 		phba->fc_stat.elsRcvFAN++;
8483 		lpfc_els_rcv_fan(vport, elsiocb, ndlp);
8484 		break;
8485 	case ELS_CMD_PRLI:
8486 	case ELS_CMD_NVMEPRLI:
8487 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
8488 			"RCV PRLI:        did:x%x/ste:x%x flg:x%x",
8489 			did, vport->port_state, ndlp->nlp_flag);
8490 
8491 		phba->fc_stat.elsRcvPRLI++;
8492 		if ((vport->port_state < LPFC_DISC_AUTH) &&
8493 		    (vport->fc_flag & FC_FABRIC)) {
8494 			rjt_err = LSRJT_UNABLE_TPC;
8495 			rjt_exp = LSEXP_NOTHING_MORE;
8496 			break;
8497 		}
8498 		lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLI);
8499 		break;
8500 	case ELS_CMD_LIRR:
8501 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
8502 			"RCV LIRR:        did:x%x/ste:x%x flg:x%x",
8503 			did, vport->port_state, ndlp->nlp_flag);
8504 
8505 		phba->fc_stat.elsRcvLIRR++;
8506 		lpfc_els_rcv_lirr(vport, elsiocb, ndlp);
8507 		if (newnode)
8508 			lpfc_nlp_put(ndlp);
8509 		break;
8510 	case ELS_CMD_RLS:
8511 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
8512 			"RCV RLS:         did:x%x/ste:x%x flg:x%x",
8513 			did, vport->port_state, ndlp->nlp_flag);
8514 
8515 		phba->fc_stat.elsRcvRLS++;
8516 		lpfc_els_rcv_rls(vport, elsiocb, ndlp);
8517 		if (newnode)
8518 			lpfc_nlp_put(ndlp);
8519 		break;
8520 	case ELS_CMD_RPS:
8521 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
8522 			"RCV RPS:         did:x%x/ste:x%x flg:x%x",
8523 			did, vport->port_state, ndlp->nlp_flag);
8524 
8525 		phba->fc_stat.elsRcvRPS++;
8526 		lpfc_els_rcv_rps(vport, elsiocb, ndlp);
8527 		if (newnode)
8528 			lpfc_nlp_put(ndlp);
8529 		break;
8530 	case ELS_CMD_RPL:
8531 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
8532 			"RCV RPL:         did:x%x/ste:x%x flg:x%x",
8533 			did, vport->port_state, ndlp->nlp_flag);
8534 
8535 		phba->fc_stat.elsRcvRPL++;
8536 		lpfc_els_rcv_rpl(vport, elsiocb, ndlp);
8537 		if (newnode)
8538 			lpfc_nlp_put(ndlp);
8539 		break;
8540 	case ELS_CMD_RNID:
8541 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
8542 			"RCV RNID:        did:x%x/ste:x%x flg:x%x",
8543 			did, vport->port_state, ndlp->nlp_flag);
8544 
8545 		phba->fc_stat.elsRcvRNID++;
8546 		lpfc_els_rcv_rnid(vport, elsiocb, ndlp);
8547 		if (newnode)
8548 			lpfc_nlp_put(ndlp);
8549 		break;
8550 	case ELS_CMD_RTV:
8551 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
8552 			"RCV RTV:        did:x%x/ste:x%x flg:x%x",
8553 			did, vport->port_state, ndlp->nlp_flag);
8554 		phba->fc_stat.elsRcvRTV++;
8555 		lpfc_els_rcv_rtv(vport, elsiocb, ndlp);
8556 		if (newnode)
8557 			lpfc_nlp_put(ndlp);
8558 		break;
8559 	case ELS_CMD_RRQ:
8560 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
8561 			"RCV RRQ:         did:x%x/ste:x%x flg:x%x",
8562 			did, vport->port_state, ndlp->nlp_flag);
8563 
8564 		phba->fc_stat.elsRcvRRQ++;
8565 		lpfc_els_rcv_rrq(vport, elsiocb, ndlp);
8566 		if (newnode)
8567 			lpfc_nlp_put(ndlp);
8568 		break;
8569 	case ELS_CMD_ECHO:
8570 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
8571 			"RCV ECHO:        did:x%x/ste:x%x flg:x%x",
8572 			did, vport->port_state, ndlp->nlp_flag);
8573 
8574 		phba->fc_stat.elsRcvECHO++;
8575 		lpfc_els_rcv_echo(vport, elsiocb, ndlp);
8576 		if (newnode)
8577 			lpfc_nlp_put(ndlp);
8578 		break;
8579 	case ELS_CMD_REC:
8580 		/* receive this due to exchange closed */
8581 		rjt_err = LSRJT_UNABLE_TPC;
8582 		rjt_exp = LSEXP_INVALID_OX_RX;
8583 		break;
8584 	case ELS_CMD_FPIN:
8585 		/*
8586 		 * Received FPIN from fabric - pass it to the
8587 		 * transport FPIN handler.
8588 		 */
8589 		fc_host_fpin_rcv(shost, elsiocb->iocb.unsli3.rcvsli3.acc_len,
8590 				(char *)payload);
8591 		break;
8592 	default:
8593 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
8594 			"RCV ELS cmd:     cmd:x%x did:x%x/ste:x%x",
8595 			cmd, did, vport->port_state);
8596 
8597 		/* Unsupported ELS command, reject */
8598 		rjt_err = LSRJT_CMD_UNSUPPORTED;
8599 		rjt_exp = LSEXP_NOTHING_MORE;
8600 
8601 		/* Unknown ELS command <elsCmd> received from NPORT <did> */
8602 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
8603 				 "0115 Unknown ELS command x%x "
8604 				 "received from NPORT x%x\n", cmd, did);
8605 		if (newnode)
8606 			lpfc_nlp_put(ndlp);
8607 		break;
8608 	}
8609 
8610 lsrjt:
8611 	/* check if need to LS_RJT received ELS cmd */
8612 	if (rjt_err) {
8613 		memset(&stat, 0, sizeof(stat));
8614 		stat.un.b.lsRjtRsnCode = rjt_err;
8615 		stat.un.b.lsRjtRsnCodeExp = rjt_exp;
8616 		lpfc_els_rsp_reject(vport, stat.un.lsRjtError, elsiocb, ndlp,
8617 			NULL);
8618 	}
8619 
8620 	lpfc_nlp_put(elsiocb->context1);
8621 	elsiocb->context1 = NULL;
8622 
8623 	/* Special case.  Driver received an unsolicited command that
8624 	 * unsupportable given the driver's current state.  Reset the
8625 	 * link and start over.
8626 	 */
8627 	if (init_link) {
8628 		mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
8629 		if (!mbox)
8630 			return;
8631 		lpfc_linkdown(phba);
8632 		lpfc_init_link(phba, mbox,
8633 			       phba->cfg_topology,
8634 			       phba->cfg_link_speed);
8635 		mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
8636 		mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
8637 		mbox->vport = vport;
8638 		if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) ==
8639 		    MBX_NOT_FINISHED)
8640 			mempool_free(mbox, phba->mbox_mem_pool);
8641 	}
8642 
8643 	return;
8644 
8645 dropit:
8646 	if (vport && !(vport->load_flag & FC_UNLOADING))
8647 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
8648 			"0111 Dropping received ELS cmd "
8649 			"Data: x%x x%x x%x\n",
8650 			icmd->ulpStatus, icmd->un.ulpWord[4], icmd->ulpTimeout);
8651 	phba->fc_stat.elsRcvDrop++;
8652 }
8653 
8654 /**
8655  * lpfc_els_unsol_event - Process an unsolicited event from an els sli ring
8656  * @phba: pointer to lpfc hba data structure.
8657  * @pring: pointer to a SLI ring.
8658  * @elsiocb: pointer to lpfc els iocb data structure.
8659  *
8660  * This routine is used to process an unsolicited event received from a SLI
8661  * (Service Level Interface) ring. The actual processing of the data buffer
8662  * associated with the unsolicited event is done by invoking the routine
8663  * lpfc_els_unsol_buffer() after properly set up the iocb buffer from the
8664  * SLI ring on which the unsolicited event was received.
8665  **/
8666 void
8667 lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
8668 		     struct lpfc_iocbq *elsiocb)
8669 {
8670 	struct lpfc_vport *vport = phba->pport;
8671 	IOCB_t *icmd = &elsiocb->iocb;
8672 	dma_addr_t paddr;
8673 	struct lpfc_dmabuf *bdeBuf1 = elsiocb->context2;
8674 	struct lpfc_dmabuf *bdeBuf2 = elsiocb->context3;
8675 
8676 	elsiocb->context1 = NULL;
8677 	elsiocb->context2 = NULL;
8678 	elsiocb->context3 = NULL;
8679 
8680 	if (icmd->ulpStatus == IOSTAT_NEED_BUFFER) {
8681 		lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
8682 	} else if (icmd->ulpStatus == IOSTAT_LOCAL_REJECT &&
8683 		   (icmd->un.ulpWord[4] & IOERR_PARAM_MASK) ==
8684 		   IOERR_RCV_BUFFER_WAITING) {
8685 		phba->fc_stat.NoRcvBuf++;
8686 		/* Not enough posted buffers; Try posting more buffers */
8687 		if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
8688 			lpfc_post_buffer(phba, pring, 0);
8689 		return;
8690 	}
8691 
8692 	if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
8693 	    (icmd->ulpCommand == CMD_IOCB_RCV_ELS64_CX ||
8694 	     icmd->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
8695 		if (icmd->unsli3.rcvsli3.vpi == 0xffff)
8696 			vport = phba->pport;
8697 		else
8698 			vport = lpfc_find_vport_by_vpid(phba,
8699 						icmd->unsli3.rcvsli3.vpi);
8700 	}
8701 
8702 	/* If there are no BDEs associated
8703 	 * with this IOCB, there is nothing to do.
8704 	 */
8705 	if (icmd->ulpBdeCount == 0)
8706 		return;
8707 
8708 	/* type of ELS cmd is first 32bit word
8709 	 * in packet
8710 	 */
8711 	if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
8712 		elsiocb->context2 = bdeBuf1;
8713 	} else {
8714 		paddr = getPaddr(icmd->un.cont64[0].addrHigh,
8715 				 icmd->un.cont64[0].addrLow);
8716 		elsiocb->context2 = lpfc_sli_ringpostbuf_get(phba, pring,
8717 							     paddr);
8718 	}
8719 
8720 	lpfc_els_unsol_buffer(phba, pring, vport, elsiocb);
8721 	/*
8722 	 * The different unsolicited event handlers would tell us
8723 	 * if they are done with "mp" by setting context2 to NULL.
8724 	 */
8725 	if (elsiocb->context2) {
8726 		lpfc_in_buf_free(phba, (struct lpfc_dmabuf *)elsiocb->context2);
8727 		elsiocb->context2 = NULL;
8728 	}
8729 
8730 	/* RCV_ELS64_CX provide for 2 BDEs - process 2nd if included */
8731 	if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) &&
8732 	    icmd->ulpBdeCount == 2) {
8733 		elsiocb->context2 = bdeBuf2;
8734 		lpfc_els_unsol_buffer(phba, pring, vport, elsiocb);
8735 		/* free mp if we are done with it */
8736 		if (elsiocb->context2) {
8737 			lpfc_in_buf_free(phba, elsiocb->context2);
8738 			elsiocb->context2 = NULL;
8739 		}
8740 	}
8741 }
8742 
8743 static void
8744 lpfc_start_fdmi(struct lpfc_vport *vport)
8745 {
8746 	struct lpfc_nodelist *ndlp;
8747 
8748 	/* If this is the first time, allocate an ndlp and initialize
8749 	 * it. Otherwise, make sure the node is enabled and then do the
8750 	 * login.
8751 	 */
8752 	ndlp = lpfc_findnode_did(vport, FDMI_DID);
8753 	if (!ndlp) {
8754 		ndlp = lpfc_nlp_init(vport, FDMI_DID);
8755 		if (ndlp) {
8756 			ndlp->nlp_type |= NLP_FABRIC;
8757 		} else {
8758 			return;
8759 		}
8760 	}
8761 	if (!NLP_CHK_NODE_ACT(ndlp))
8762 		ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_NPR_NODE);
8763 
8764 	if (ndlp) {
8765 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
8766 		lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
8767 	}
8768 }
8769 
8770 /**
8771  * lpfc_do_scr_ns_plogi - Issue a plogi to the name server for scr
8772  * @phba: pointer to lpfc hba data structure.
8773  * @vport: pointer to a virtual N_Port data structure.
8774  *
8775  * This routine issues a Port Login (PLOGI) to the Name Server with
8776  * State Change Request (SCR) for a @vport. This routine will create an
8777  * ndlp for the Name Server associated to the @vport if such node does
8778  * not already exist. The PLOGI to Name Server is issued by invoking the
8779  * lpfc_issue_els_plogi() routine. If Fabric-Device Management Interface
8780  * (FDMI) is configured to the @vport, a FDMI node will be created and
8781  * the PLOGI to FDMI is issued by invoking lpfc_issue_els_plogi() routine.
8782  **/
8783 void
8784 lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
8785 {
8786 	struct lpfc_nodelist *ndlp;
8787 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
8788 
8789 	/*
8790 	 * If lpfc_delay_discovery parameter is set and the clean address
8791 	 * bit is cleared and fc fabric parameters chenged, delay FC NPort
8792 	 * discovery.
8793 	 */
8794 	spin_lock_irq(shost->host_lock);
8795 	if (vport->fc_flag & FC_DISC_DELAYED) {
8796 		spin_unlock_irq(shost->host_lock);
8797 		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
8798 				"3334 Delay fc port discovery for %d seconds\n",
8799 				phba->fc_ratov);
8800 		mod_timer(&vport->delayed_disc_tmo,
8801 			jiffies + msecs_to_jiffies(1000 * phba->fc_ratov));
8802 		return;
8803 	}
8804 	spin_unlock_irq(shost->host_lock);
8805 
8806 	ndlp = lpfc_findnode_did(vport, NameServer_DID);
8807 	if (!ndlp) {
8808 		ndlp = lpfc_nlp_init(vport, NameServer_DID);
8809 		if (!ndlp) {
8810 			if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
8811 				lpfc_disc_start(vport);
8812 				return;
8813 			}
8814 			lpfc_vport_set_state(vport, FC_VPORT_FAILED);
8815 			lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
8816 					 "0251 NameServer login: no memory\n");
8817 			return;
8818 		}
8819 	} else if (!NLP_CHK_NODE_ACT(ndlp)) {
8820 		ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
8821 		if (!ndlp) {
8822 			if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
8823 				lpfc_disc_start(vport);
8824 				return;
8825 			}
8826 			lpfc_vport_set_state(vport, FC_VPORT_FAILED);
8827 			lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
8828 					"0348 NameServer login: node freed\n");
8829 			return;
8830 		}
8831 	}
8832 	ndlp->nlp_type |= NLP_FABRIC;
8833 
8834 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
8835 
8836 	if (lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0)) {
8837 		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
8838 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
8839 				 "0252 Cannot issue NameServer login\n");
8840 		return;
8841 	}
8842 
8843 	if ((phba->cfg_enable_SmartSAN ||
8844 	     (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) &&
8845 	     (vport->load_flag & FC_ALLOW_FDMI))
8846 		lpfc_start_fdmi(vport);
8847 }
8848 
8849 /**
8850  * lpfc_cmpl_reg_new_vport - Completion callback function to register new vport
8851  * @phba: pointer to lpfc hba data structure.
8852  * @pmb: pointer to the driver internal queue element for mailbox command.
8853  *
8854  * This routine is the completion callback function to register new vport
8855  * mailbox command. If the new vport mailbox command completes successfully,
8856  * the fabric registration login shall be performed on physical port (the
8857  * new vport created is actually a physical port, with VPI 0) or the port
8858  * login to Name Server for State Change Request (SCR) will be performed
8859  * on virtual port (real virtual port, with VPI greater than 0).
8860  **/
8861 static void
8862 lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
8863 {
8864 	struct lpfc_vport *vport = pmb->vport;
8865 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
8866 	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
8867 	MAILBOX_t *mb = &pmb->u.mb;
8868 	int rc;
8869 
8870 	spin_lock_irq(shost->host_lock);
8871 	vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
8872 	spin_unlock_irq(shost->host_lock);
8873 
8874 	if (mb->mbxStatus) {
8875 		lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
8876 				"0915 Register VPI failed : Status: x%x"
8877 				" upd bit: x%x \n", mb->mbxStatus,
8878 				 mb->un.varRegVpi.upd);
8879 		if (phba->sli_rev == LPFC_SLI_REV4 &&
8880 			mb->un.varRegVpi.upd)
8881 			goto mbox_err_exit ;
8882 
8883 		switch (mb->mbxStatus) {
8884 		case 0x11:	/* unsupported feature */
8885 		case 0x9603:	/* max_vpi exceeded */
8886 		case 0x9602:	/* Link event since CLEAR_LA */
8887 			/* giving up on vport registration */
8888 			lpfc_vport_set_state(vport, FC_VPORT_FAILED);
8889 			spin_lock_irq(shost->host_lock);
8890 			vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
8891 			spin_unlock_irq(shost->host_lock);
8892 			lpfc_can_disctmo(vport);
8893 			break;
8894 		/* If reg_vpi fail with invalid VPI status, re-init VPI */
8895 		case 0x20:
8896 			spin_lock_irq(shost->host_lock);
8897 			vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
8898 			spin_unlock_irq(shost->host_lock);
8899 			lpfc_init_vpi(phba, pmb, vport->vpi);
8900 			pmb->vport = vport;
8901 			pmb->mbox_cmpl = lpfc_init_vpi_cmpl;
8902 			rc = lpfc_sli_issue_mbox(phba, pmb,
8903 				MBX_NOWAIT);
8904 			if (rc == MBX_NOT_FINISHED) {
8905 				lpfc_printf_vlog(vport,
8906 					KERN_ERR, LOG_MBOX,
8907 					"2732 Failed to issue INIT_VPI"
8908 					" mailbox command\n");
8909 			} else {
8910 				lpfc_nlp_put(ndlp);
8911 				return;
8912 			}
8913 			/* fall through */
8914 		default:
8915 			/* Try to recover from this error */
8916 			if (phba->sli_rev == LPFC_SLI_REV4)
8917 				lpfc_sli4_unreg_all_rpis(vport);
8918 			lpfc_mbx_unreg_vpi(vport);
8919 			spin_lock_irq(shost->host_lock);
8920 			vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
8921 			spin_unlock_irq(shost->host_lock);
8922 			if (mb->mbxStatus == MBX_NOT_FINISHED)
8923 				break;
8924 			if ((vport->port_type == LPFC_PHYSICAL_PORT) &&
8925 			    !(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG)) {
8926 				if (phba->sli_rev == LPFC_SLI_REV4)
8927 					lpfc_issue_init_vfi(vport);
8928 				else
8929 					lpfc_initial_flogi(vport);
8930 			} else {
8931 				lpfc_initial_fdisc(vport);
8932 			}
8933 			break;
8934 		}
8935 	} else {
8936 		spin_lock_irq(shost->host_lock);
8937 		vport->vpi_state |= LPFC_VPI_REGISTERED;
8938 		spin_unlock_irq(shost->host_lock);
8939 		if (vport == phba->pport) {
8940 			if (phba->sli_rev < LPFC_SLI_REV4)
8941 				lpfc_issue_fabric_reglogin(vport);
8942 			else {
8943 				/*
8944 				 * If the physical port is instantiated using
8945 				 * FDISC, do not start vport discovery.
8946 				 */
8947 				if (vport->port_state != LPFC_FDISC)
8948 					lpfc_start_fdiscs(phba);
8949 				lpfc_do_scr_ns_plogi(phba, vport);
8950 			}
8951 		} else
8952 			lpfc_do_scr_ns_plogi(phba, vport);
8953 	}
8954 mbox_err_exit:
8955 	/* Now, we decrement the ndlp reference count held for this
8956 	 * callback function
8957 	 */
8958 	lpfc_nlp_put(ndlp);
8959 
8960 	mempool_free(pmb, phba->mbox_mem_pool);
8961 	return;
8962 }
8963 
8964 /**
8965  * lpfc_register_new_vport - Register a new vport with a HBA
8966  * @phba: pointer to lpfc hba data structure.
8967  * @vport: pointer to a host virtual N_Port data structure.
8968  * @ndlp: pointer to a node-list data structure.
8969  *
8970  * This routine registers the @vport as a new virtual port with a HBA.
8971  * It is done through a registering vpi mailbox command.
8972  **/
8973 void
8974 lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport,
8975 			struct lpfc_nodelist *ndlp)
8976 {
8977 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
8978 	LPFC_MBOXQ_t *mbox;
8979 
8980 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
8981 	if (mbox) {
8982 		lpfc_reg_vpi(vport, mbox);
8983 		mbox->vport = vport;
8984 		mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
8985 		mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport;
8986 		if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
8987 		    == MBX_NOT_FINISHED) {
8988 			/* mailbox command not success, decrement ndlp
8989 			 * reference count for this command
8990 			 */
8991 			lpfc_nlp_put(ndlp);
8992 			mempool_free(mbox, phba->mbox_mem_pool);
8993 
8994 			lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
8995 				"0253 Register VPI: Can't send mbox\n");
8996 			goto mbox_err_exit;
8997 		}
8998 	} else {
8999 		lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
9000 				 "0254 Register VPI: no memory\n");
9001 		goto mbox_err_exit;
9002 	}
9003 	return;
9004 
9005 mbox_err_exit:
9006 	lpfc_vport_set_state(vport, FC_VPORT_FAILED);
9007 	spin_lock_irq(shost->host_lock);
9008 	vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
9009 	spin_unlock_irq(shost->host_lock);
9010 	return;
9011 }
9012 
9013 /**
9014  * lpfc_cancel_all_vport_retry_delay_timer - Cancel all vport retry delay timer
9015  * @phba: pointer to lpfc hba data structure.
9016  *
9017  * This routine cancels the retry delay timers to all the vports.
9018  **/
9019 void
9020 lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *phba)
9021 {
9022 	struct lpfc_vport **vports;
9023 	struct lpfc_nodelist *ndlp;
9024 	uint32_t link_state;
9025 	int i;
9026 
9027 	/* Treat this failure as linkdown for all vports */
9028 	link_state = phba->link_state;
9029 	lpfc_linkdown(phba);
9030 	phba->link_state = link_state;
9031 
9032 	vports = lpfc_create_vport_work_array(phba);
9033 
9034 	if (vports) {
9035 		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
9036 			ndlp = lpfc_findnode_did(vports[i], Fabric_DID);
9037 			if (ndlp)
9038 				lpfc_cancel_retry_delay_tmo(vports[i], ndlp);
9039 			lpfc_els_flush_cmd(vports[i]);
9040 		}
9041 		lpfc_destroy_vport_work_array(phba, vports);
9042 	}
9043 }
9044 
9045 /**
9046  * lpfc_retry_pport_discovery - Start timer to retry FLOGI.
9047  * @phba: pointer to lpfc hba data structure.
9048  *
9049  * This routine abort all pending discovery commands and
9050  * start a timer to retry FLOGI for the physical port
9051  * discovery.
9052  **/
9053 void
9054 lpfc_retry_pport_discovery(struct lpfc_hba *phba)
9055 {
9056 	struct lpfc_nodelist *ndlp;
9057 	struct Scsi_Host  *shost;
9058 
9059 	/* Cancel the all vports retry delay retry timers */
9060 	lpfc_cancel_all_vport_retry_delay_timer(phba);
9061 
9062 	/* If fabric require FLOGI, then re-instantiate physical login */
9063 	ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
9064 	if (!ndlp)
9065 		return;
9066 
9067 	shost = lpfc_shost_from_vport(phba->pport);
9068 	mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000));
9069 	spin_lock_irq(shost->host_lock);
9070 	ndlp->nlp_flag |= NLP_DELAY_TMO;
9071 	spin_unlock_irq(shost->host_lock);
9072 	ndlp->nlp_last_elscmd = ELS_CMD_FLOGI;
9073 	phba->pport->port_state = LPFC_FLOGI;
9074 	return;
9075 }
9076 
9077 /**
9078  * lpfc_fabric_login_reqd - Check if FLOGI required.
9079  * @phba: pointer to lpfc hba data structure.
9080  * @cmdiocb: pointer to FDISC command iocb.
9081  * @rspiocb: pointer to FDISC response iocb.
9082  *
9083  * This routine checks if a FLOGI is reguired for FDISC
9084  * to succeed.
9085  **/
9086 static int
9087 lpfc_fabric_login_reqd(struct lpfc_hba *phba,
9088 		struct lpfc_iocbq *cmdiocb,
9089 		struct lpfc_iocbq *rspiocb)
9090 {
9091 
9092 	if ((rspiocb->iocb.ulpStatus != IOSTAT_FABRIC_RJT) ||
9093 		(rspiocb->iocb.un.ulpWord[4] != RJT_LOGIN_REQUIRED))
9094 		return 0;
9095 	else
9096 		return 1;
9097 }
9098 
9099 /**
9100  * lpfc_cmpl_els_fdisc - Completion function for fdisc iocb command
9101  * @phba: pointer to lpfc hba data structure.
9102  * @cmdiocb: pointer to lpfc command iocb data structure.
9103  * @rspiocb: pointer to lpfc response iocb data structure.
9104  *
9105  * This routine is the completion callback function to a Fabric Discover
9106  * (FDISC) ELS command. Since all the FDISC ELS commands are issued
9107  * single threaded, each FDISC completion callback function will reset
9108  * the discovery timer for all vports such that the timers will not get
9109  * unnecessary timeout. The function checks the FDISC IOCB status. If error
9110  * detected, the vport will be set to FC_VPORT_FAILED state. Otherwise,the
9111  * vport will set to FC_VPORT_ACTIVE state. It then checks whether the DID
9112  * assigned to the vport has been changed with the completion of the FDISC
9113  * command. If so, both RPI (Remote Port Index) and VPI (Virtual Port Index)
9114  * are unregistered from the HBA, and then the lpfc_register_new_vport()
9115  * routine is invoked to register new vport with the HBA. Otherwise, the
9116  * lpfc_do_scr_ns_plogi() routine is invoked to issue a PLOGI to the Name
9117  * Server for State Change Request (SCR).
9118  **/
9119 static void
9120 lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
9121 		    struct lpfc_iocbq *rspiocb)
9122 {
9123 	struct lpfc_vport *vport = cmdiocb->vport;
9124 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
9125 	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
9126 	struct lpfc_nodelist *np;
9127 	struct lpfc_nodelist *next_np;
9128 	IOCB_t *irsp = &rspiocb->iocb;
9129 	struct lpfc_iocbq *piocb;
9130 	struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp;
9131 	struct serv_parm *sp;
9132 	uint8_t fabric_param_changed;
9133 
9134 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
9135 			 "0123 FDISC completes. x%x/x%x prevDID: x%x\n",
9136 			 irsp->ulpStatus, irsp->un.ulpWord[4],
9137 			 vport->fc_prevDID);
9138 	/* Since all FDISCs are being single threaded, we
9139 	 * must reset the discovery timer for ALL vports
9140 	 * waiting to send FDISC when one completes.
9141 	 */
9142 	list_for_each_entry(piocb, &phba->fabric_iocb_list, list) {
9143 		lpfc_set_disctmo(piocb->vport);
9144 	}
9145 
9146 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
9147 		"FDISC cmpl:      status:x%x/x%x prevdid:x%x",
9148 		irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_prevDID);
9149 
9150 	if (irsp->ulpStatus) {
9151 
9152 		if (lpfc_fabric_login_reqd(phba, cmdiocb, rspiocb)) {
9153 			lpfc_retry_pport_discovery(phba);
9154 			goto out;
9155 		}
9156 
9157 		/* Check for retry */
9158 		if (lpfc_els_retry(phba, cmdiocb, rspiocb))
9159 			goto out;
9160 		/* FDISC failed */
9161 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
9162 				 "0126 FDISC failed. (x%x/x%x)\n",
9163 				 irsp->ulpStatus, irsp->un.ulpWord[4]);
9164 		goto fdisc_failed;
9165 	}
9166 	spin_lock_irq(shost->host_lock);
9167 	vport->fc_flag &= ~FC_VPORT_CVL_RCVD;
9168 	vport->fc_flag &= ~FC_VPORT_LOGO_RCVD;
9169 	vport->fc_flag |= FC_FABRIC;
9170 	if (vport->phba->fc_topology == LPFC_TOPOLOGY_LOOP)
9171 		vport->fc_flag |=  FC_PUBLIC_LOOP;
9172 	spin_unlock_irq(shost->host_lock);
9173 
9174 	vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
9175 	lpfc_vport_set_state(vport, FC_VPORT_ACTIVE);
9176 	prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
9177 	if (!prsp)
9178 		goto out;
9179 	sp = prsp->virt + sizeof(uint32_t);
9180 	fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp);
9181 	memcpy(&vport->fabric_portname, &sp->portName,
9182 		sizeof(struct lpfc_name));
9183 	memcpy(&vport->fabric_nodename, &sp->nodeName,
9184 		sizeof(struct lpfc_name));
9185 	if (fabric_param_changed &&
9186 		!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
9187 		/* If our NportID changed, we need to ensure all
9188 		 * remaining NPORTs get unreg_login'ed so we can
9189 		 * issue unreg_vpi.
9190 		 */
9191 		list_for_each_entry_safe(np, next_np,
9192 			&vport->fc_nodes, nlp_listp) {
9193 			if (!NLP_CHK_NODE_ACT(ndlp) ||
9194 			    (np->nlp_state != NLP_STE_NPR_NODE) ||
9195 			    !(np->nlp_flag & NLP_NPR_ADISC))
9196 				continue;
9197 			spin_lock_irq(shost->host_lock);
9198 			np->nlp_flag &= ~NLP_NPR_ADISC;
9199 			spin_unlock_irq(shost->host_lock);
9200 			lpfc_unreg_rpi(vport, np);
9201 		}
9202 		lpfc_cleanup_pending_mbox(vport);
9203 
9204 		if (phba->sli_rev == LPFC_SLI_REV4)
9205 			lpfc_sli4_unreg_all_rpis(vport);
9206 
9207 		lpfc_mbx_unreg_vpi(vport);
9208 		spin_lock_irq(shost->host_lock);
9209 		vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
9210 		if (phba->sli_rev == LPFC_SLI_REV4)
9211 			vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
9212 		else
9213 			vport->fc_flag |= FC_LOGO_RCVD_DID_CHNG;
9214 		spin_unlock_irq(shost->host_lock);
9215 	} else if ((phba->sli_rev == LPFC_SLI_REV4) &&
9216 		!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
9217 		/*
9218 		 * Driver needs to re-reg VPI in order for f/w
9219 		 * to update the MAC address.
9220 		 */
9221 		lpfc_register_new_vport(phba, vport, ndlp);
9222 		goto out;
9223 	}
9224 
9225 	if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI)
9226 		lpfc_issue_init_vpi(vport);
9227 	else if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
9228 		lpfc_register_new_vport(phba, vport, ndlp);
9229 	else
9230 		lpfc_do_scr_ns_plogi(phba, vport);
9231 	goto out;
9232 fdisc_failed:
9233 	if (vport->fc_vport &&
9234 	    (vport->fc_vport->vport_state != FC_VPORT_NO_FABRIC_RSCS))
9235 		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
9236 	/* Cancel discovery timer */
9237 	lpfc_can_disctmo(vport);
9238 	lpfc_nlp_put(ndlp);
9239 out:
9240 	lpfc_els_free_iocb(phba, cmdiocb);
9241 }
9242 
9243 /**
9244  * lpfc_issue_els_fdisc - Issue a fdisc iocb command
9245  * @vport: pointer to a virtual N_Port data structure.
9246  * @ndlp: pointer to a node-list data structure.
9247  * @retry: number of retries to the command IOCB.
9248  *
9249  * This routine prepares and issues a Fabric Discover (FDISC) IOCB to
9250  * a remote node (@ndlp) off a @vport. It uses the lpfc_issue_fabric_iocb()
9251  * routine to issue the IOCB, which makes sure only one outstanding fabric
9252  * IOCB will be sent off HBA at any given time.
9253  *
9254  * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
9255  * will be incremented by 1 for holding the ndlp and the reference to ndlp
9256  * will be stored into the context1 field of the IOCB for the completion
9257  * callback function to the FDISC ELS command.
9258  *
9259  * Return code
9260  *   0 - Successfully issued fdisc iocb command
9261  *   1 - Failed to issue fdisc iocb command
9262  **/
9263 static int
9264 lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
9265 		     uint8_t retry)
9266 {
9267 	struct lpfc_hba *phba = vport->phba;
9268 	IOCB_t *icmd;
9269 	struct lpfc_iocbq *elsiocb;
9270 	struct serv_parm *sp;
9271 	uint8_t *pcmd;
9272 	uint16_t cmdsize;
9273 	int did = ndlp->nlp_DID;
9274 	int rc;
9275 
9276 	vport->port_state = LPFC_FDISC;
9277 	vport->fc_myDID = 0;
9278 	cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
9279 	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did,
9280 				     ELS_CMD_FDISC);
9281 	if (!elsiocb) {
9282 		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
9283 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
9284 				 "0255 Issue FDISC: no IOCB\n");
9285 		return 1;
9286 	}
9287 
9288 	icmd = &elsiocb->iocb;
9289 	icmd->un.elsreq64.myID = 0;
9290 	icmd->un.elsreq64.fl = 1;
9291 
9292 	/*
9293 	 * SLI3 ports require a different context type value than SLI4.
9294 	 * Catch SLI3 ports here and override the prep.
9295 	 */
9296 	if (phba->sli_rev == LPFC_SLI_REV3) {
9297 		icmd->ulpCt_h = 1;
9298 		icmd->ulpCt_l = 0;
9299 	}
9300 
9301 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
9302 	*((uint32_t *) (pcmd)) = ELS_CMD_FDISC;
9303 	pcmd += sizeof(uint32_t); /* CSP Word 1 */
9304 	memcpy(pcmd, &vport->phba->pport->fc_sparam, sizeof(struct serv_parm));
9305 	sp = (struct serv_parm *) pcmd;
9306 	/* Setup CSPs accordingly for Fabric */
9307 	sp->cmn.e_d_tov = 0;
9308 	sp->cmn.w2.r_a_tov = 0;
9309 	sp->cmn.virtual_fabric_support = 0;
9310 	sp->cls1.classValid = 0;
9311 	sp->cls2.seqDelivery = 1;
9312 	sp->cls3.seqDelivery = 1;
9313 
9314 	pcmd += sizeof(uint32_t); /* CSP Word 2 */
9315 	pcmd += sizeof(uint32_t); /* CSP Word 3 */
9316 	pcmd += sizeof(uint32_t); /* CSP Word 4 */
9317 	pcmd += sizeof(uint32_t); /* Port Name */
9318 	memcpy(pcmd, &vport->fc_portname, 8);
9319 	pcmd += sizeof(uint32_t); /* Node Name */
9320 	pcmd += sizeof(uint32_t); /* Node Name */
9321 	memcpy(pcmd, &vport->fc_nodename, 8);
9322 	sp->cmn.valid_vendor_ver_level = 0;
9323 	memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion));
9324 	lpfc_set_disctmo(vport);
9325 
9326 	phba->fc_stat.elsXmitFDISC++;
9327 	elsiocb->iocb_cmpl = lpfc_cmpl_els_fdisc;
9328 
9329 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
9330 		"Issue FDISC:     did:x%x",
9331 		did, 0, 0);
9332 
9333 	rc = lpfc_issue_fabric_iocb(phba, elsiocb);
9334 	if (rc == IOCB_ERROR) {
9335 		lpfc_els_free_iocb(phba, elsiocb);
9336 		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
9337 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
9338 				 "0256 Issue FDISC: Cannot send IOCB\n");
9339 		return 1;
9340 	}
9341 	lpfc_vport_set_state(vport, FC_VPORT_INITIALIZING);
9342 	return 0;
9343 }
9344 
9345 /**
9346  * lpfc_cmpl_els_npiv_logo - Completion function with vport logo
9347  * @phba: pointer to lpfc hba data structure.
9348  * @cmdiocb: pointer to lpfc command iocb data structure.
9349  * @rspiocb: pointer to lpfc response iocb data structure.
9350  *
9351  * This routine is the completion callback function to the issuing of a LOGO
9352  * ELS command off a vport. It frees the command IOCB and then decrement the
9353  * reference count held on ndlp for this completion function, indicating that
9354  * the reference to the ndlp is no long needed. Note that the
9355  * lpfc_els_free_iocb() routine decrements the ndlp reference held for this
9356  * callback function and an additional explicit ndlp reference decrementation
9357  * will trigger the actual release of the ndlp.
9358  **/
9359 static void
9360 lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
9361 			struct lpfc_iocbq *rspiocb)
9362 {
9363 	struct lpfc_vport *vport = cmdiocb->vport;
9364 	IOCB_t *irsp;
9365 	struct lpfc_nodelist *ndlp;
9366 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
9367 
9368 	ndlp = (struct lpfc_nodelist *)cmdiocb->context1;
9369 	irsp = &rspiocb->iocb;
9370 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
9371 		"LOGO npiv cmpl:  status:x%x/x%x did:x%x",
9372 		irsp->ulpStatus, irsp->un.ulpWord[4], irsp->un.rcvels.remoteID);
9373 
9374 	lpfc_els_free_iocb(phba, cmdiocb);
9375 	vport->unreg_vpi_cmpl = VPORT_ERROR;
9376 
9377 	/* Trigger the release of the ndlp after logo */
9378 	lpfc_nlp_put(ndlp);
9379 
9380 	/* NPIV LOGO completes to NPort <nlp_DID> */
9381 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
9382 			 "2928 NPIV LOGO completes to NPort x%x "
9383 			 "Data: x%x x%x x%x x%x\n",
9384 			 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
9385 			 irsp->ulpTimeout, vport->num_disc_nodes);
9386 
9387 	if (irsp->ulpStatus == IOSTAT_SUCCESS) {
9388 		spin_lock_irq(shost->host_lock);
9389 		vport->fc_flag &= ~FC_NDISC_ACTIVE;
9390 		vport->fc_flag &= ~FC_FABRIC;
9391 		spin_unlock_irq(shost->host_lock);
9392 		lpfc_can_disctmo(vport);
9393 	}
9394 }
9395 
9396 /**
9397  * lpfc_issue_els_npiv_logo - Issue a logo off a vport
9398  * @vport: pointer to a virtual N_Port data structure.
9399  * @ndlp: pointer to a node-list data structure.
9400  *
9401  * This routine issues a LOGO ELS command to an @ndlp off a @vport.
9402  *
9403  * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
9404  * will be incremented by 1 for holding the ndlp and the reference to ndlp
9405  * will be stored into the context1 field of the IOCB for the completion
9406  * callback function to the LOGO ELS command.
9407  *
9408  * Return codes
9409  *   0 - Successfully issued logo off the @vport
9410  *   1 - Failed to issue logo off the @vport
9411  **/
9412 int
9413 lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
9414 {
9415 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
9416 	struct lpfc_hba  *phba = vport->phba;
9417 	struct lpfc_iocbq *elsiocb;
9418 	uint8_t *pcmd;
9419 	uint16_t cmdsize;
9420 
9421 	cmdsize = 2 * sizeof(uint32_t) + sizeof(struct lpfc_name);
9422 	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, ndlp->nlp_DID,
9423 				     ELS_CMD_LOGO);
9424 	if (!elsiocb)
9425 		return 1;
9426 
9427 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
9428 	*((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
9429 	pcmd += sizeof(uint32_t);
9430 
9431 	/* Fill in LOGO payload */
9432 	*((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID);
9433 	pcmd += sizeof(uint32_t);
9434 	memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name));
9435 
9436 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
9437 		"Issue LOGO npiv  did:x%x flg:x%x",
9438 		ndlp->nlp_DID, ndlp->nlp_flag, 0);
9439 
9440 	elsiocb->iocb_cmpl = lpfc_cmpl_els_npiv_logo;
9441 	spin_lock_irq(shost->host_lock);
9442 	ndlp->nlp_flag |= NLP_LOGO_SND;
9443 	spin_unlock_irq(shost->host_lock);
9444 	if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
9445 	    IOCB_ERROR) {
9446 		spin_lock_irq(shost->host_lock);
9447 		ndlp->nlp_flag &= ~NLP_LOGO_SND;
9448 		spin_unlock_irq(shost->host_lock);
9449 		lpfc_els_free_iocb(phba, elsiocb);
9450 		return 1;
9451 	}
9452 	return 0;
9453 }
9454 
9455 /**
9456  * lpfc_fabric_block_timeout - Handler function to the fabric block timer
9457  * @ptr: holder for the timer function associated data.
9458  *
9459  * This routine is invoked by the fabric iocb block timer after
9460  * timeout. It posts the fabric iocb block timeout event by setting the
9461  * WORKER_FABRIC_BLOCK_TMO bit to work port event bitmap and then invokes
9462  * lpfc_worker_wake_up() routine to wake up the worker thread. It is for
9463  * the worker thread to invoke the lpfc_unblock_fabric_iocbs() on the
9464  * posted event WORKER_FABRIC_BLOCK_TMO.
9465  **/
9466 void
9467 lpfc_fabric_block_timeout(struct timer_list *t)
9468 {
9469 	struct lpfc_hba  *phba = from_timer(phba, t, fabric_block_timer);
9470 	unsigned long iflags;
9471 	uint32_t tmo_posted;
9472 
9473 	spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
9474 	tmo_posted = phba->pport->work_port_events & WORKER_FABRIC_BLOCK_TMO;
9475 	if (!tmo_posted)
9476 		phba->pport->work_port_events |= WORKER_FABRIC_BLOCK_TMO;
9477 	spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
9478 
9479 	if (!tmo_posted)
9480 		lpfc_worker_wake_up(phba);
9481 	return;
9482 }
9483 
9484 /**
9485  * lpfc_resume_fabric_iocbs - Issue a fabric iocb from driver internal list
9486  * @phba: pointer to lpfc hba data structure.
9487  *
9488  * This routine issues one fabric iocb from the driver internal list to
9489  * the HBA. It first checks whether it's ready to issue one fabric iocb to
9490  * the HBA (whether there is no outstanding fabric iocb). If so, it shall
9491  * remove one pending fabric iocb from the driver internal list and invokes
9492  * lpfc_sli_issue_iocb() routine to send the fabric iocb to the HBA.
9493  **/
9494 static void
9495 lpfc_resume_fabric_iocbs(struct lpfc_hba *phba)
9496 {
9497 	struct lpfc_iocbq *iocb;
9498 	unsigned long iflags;
9499 	int ret;
9500 	IOCB_t *cmd;
9501 
9502 repeat:
9503 	iocb = NULL;
9504 	spin_lock_irqsave(&phba->hbalock, iflags);
9505 	/* Post any pending iocb to the SLI layer */
9506 	if (atomic_read(&phba->fabric_iocb_count) == 0) {
9507 		list_remove_head(&phba->fabric_iocb_list, iocb, typeof(*iocb),
9508 				 list);
9509 		if (iocb)
9510 			/* Increment fabric iocb count to hold the position */
9511 			atomic_inc(&phba->fabric_iocb_count);
9512 	}
9513 	spin_unlock_irqrestore(&phba->hbalock, iflags);
9514 	if (iocb) {
9515 		iocb->fabric_iocb_cmpl = iocb->iocb_cmpl;
9516 		iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb;
9517 		iocb->iocb_flag |= LPFC_IO_FABRIC;
9518 
9519 		lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD,
9520 			"Fabric sched1:   ste:x%x",
9521 			iocb->vport->port_state, 0, 0);
9522 
9523 		ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0);
9524 
9525 		if (ret == IOCB_ERROR) {
9526 			iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
9527 			iocb->fabric_iocb_cmpl = NULL;
9528 			iocb->iocb_flag &= ~LPFC_IO_FABRIC;
9529 			cmd = &iocb->iocb;
9530 			cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
9531 			cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
9532 			iocb->iocb_cmpl(phba, iocb, iocb);
9533 
9534 			atomic_dec(&phba->fabric_iocb_count);
9535 			goto repeat;
9536 		}
9537 	}
9538 
9539 	return;
9540 }
9541 
9542 /**
9543  * lpfc_unblock_fabric_iocbs - Unblock issuing fabric iocb command
9544  * @phba: pointer to lpfc hba data structure.
9545  *
9546  * This routine unblocks the  issuing fabric iocb command. The function
9547  * will clear the fabric iocb block bit and then invoke the routine
9548  * lpfc_resume_fabric_iocbs() to issue one of the pending fabric iocb
9549  * from the driver internal fabric iocb list.
9550  **/
9551 void
9552 lpfc_unblock_fabric_iocbs(struct lpfc_hba *phba)
9553 {
9554 	clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
9555 
9556 	lpfc_resume_fabric_iocbs(phba);
9557 	return;
9558 }
9559 
9560 /**
9561  * lpfc_block_fabric_iocbs - Block issuing fabric iocb command
9562  * @phba: pointer to lpfc hba data structure.
9563  *
9564  * This routine blocks the issuing fabric iocb for a specified amount of
9565  * time (currently 100 ms). This is done by set the fabric iocb block bit
9566  * and set up a timeout timer for 100ms. When the block bit is set, no more
9567  * fabric iocb will be issued out of the HBA.
9568  **/
9569 static void
9570 lpfc_block_fabric_iocbs(struct lpfc_hba *phba)
9571 {
9572 	int blocked;
9573 
9574 	blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
9575 	/* Start a timer to unblock fabric iocbs after 100ms */
9576 	if (!blocked)
9577 		mod_timer(&phba->fabric_block_timer,
9578 			  jiffies + msecs_to_jiffies(100));
9579 
9580 	return;
9581 }
9582 
9583 /**
9584  * lpfc_cmpl_fabric_iocb - Completion callback function for fabric iocb
9585  * @phba: pointer to lpfc hba data structure.
9586  * @cmdiocb: pointer to lpfc command iocb data structure.
9587  * @rspiocb: pointer to lpfc response iocb data structure.
9588  *
9589  * This routine is the callback function that is put to the fabric iocb's
9590  * callback function pointer (iocb->iocb_cmpl). The original iocb's callback
9591  * function pointer has been stored in iocb->fabric_iocb_cmpl. This callback
9592  * function first restores and invokes the original iocb's callback function
9593  * and then invokes the lpfc_resume_fabric_iocbs() routine to issue the next
9594  * fabric bound iocb from the driver internal fabric iocb list onto the wire.
9595  **/
9596 static void
9597 lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
9598 	struct lpfc_iocbq *rspiocb)
9599 {
9600 	struct ls_rjt stat;
9601 
9602 	BUG_ON((cmdiocb->iocb_flag & LPFC_IO_FABRIC) != LPFC_IO_FABRIC);
9603 
9604 	switch (rspiocb->iocb.ulpStatus) {
9605 		case IOSTAT_NPORT_RJT:
9606 		case IOSTAT_FABRIC_RJT:
9607 			if (rspiocb->iocb.un.ulpWord[4] & RJT_UNAVAIL_TEMP) {
9608 				lpfc_block_fabric_iocbs(phba);
9609 			}
9610 			break;
9611 
9612 		case IOSTAT_NPORT_BSY:
9613 		case IOSTAT_FABRIC_BSY:
9614 			lpfc_block_fabric_iocbs(phba);
9615 			break;
9616 
9617 		case IOSTAT_LS_RJT:
9618 			stat.un.lsRjtError =
9619 				be32_to_cpu(rspiocb->iocb.un.ulpWord[4]);
9620 			if ((stat.un.b.lsRjtRsnCode == LSRJT_UNABLE_TPC) ||
9621 				(stat.un.b.lsRjtRsnCode == LSRJT_LOGICAL_BSY))
9622 				lpfc_block_fabric_iocbs(phba);
9623 			break;
9624 	}
9625 
9626 	BUG_ON(atomic_read(&phba->fabric_iocb_count) == 0);
9627 
9628 	cmdiocb->iocb_cmpl = cmdiocb->fabric_iocb_cmpl;
9629 	cmdiocb->fabric_iocb_cmpl = NULL;
9630 	cmdiocb->iocb_flag &= ~LPFC_IO_FABRIC;
9631 	cmdiocb->iocb_cmpl(phba, cmdiocb, rspiocb);
9632 
9633 	atomic_dec(&phba->fabric_iocb_count);
9634 	if (!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags)) {
9635 		/* Post any pending iocbs to HBA */
9636 		lpfc_resume_fabric_iocbs(phba);
9637 	}
9638 }
9639 
9640 /**
9641  * lpfc_issue_fabric_iocb - Issue a fabric iocb command
9642  * @phba: pointer to lpfc hba data structure.
9643  * @iocb: pointer to lpfc command iocb data structure.
9644  *
9645  * This routine is used as the top-level API for issuing a fabric iocb command
9646  * such as FLOGI and FDISC. To accommodate certain switch fabric, this driver
9647  * function makes sure that only one fabric bound iocb will be outstanding at
9648  * any given time. As such, this function will first check to see whether there
9649  * is already an outstanding fabric iocb on the wire. If so, it will put the
9650  * newly issued iocb onto the driver internal fabric iocb list, waiting to be
9651  * issued later. Otherwise, it will issue the iocb on the wire and update the
9652  * fabric iocb count it indicate that there is one fabric iocb on the wire.
9653  *
9654  * Note, this implementation has a potential sending out fabric IOCBs out of
9655  * order. The problem is caused by the construction of the "ready" boolen does
9656  * not include the condition that the internal fabric IOCB list is empty. As
9657  * such, it is possible a fabric IOCB issued by this routine might be "jump"
9658  * ahead of the fabric IOCBs in the internal list.
9659  *
9660  * Return code
9661  *   IOCB_SUCCESS - either fabric iocb put on the list or issued successfully
9662  *   IOCB_ERROR - failed to issue fabric iocb
9663  **/
9664 static int
9665 lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb)
9666 {
9667 	unsigned long iflags;
9668 	int ready;
9669 	int ret;
9670 
9671 	BUG_ON(atomic_read(&phba->fabric_iocb_count) > 1);
9672 
9673 	spin_lock_irqsave(&phba->hbalock, iflags);
9674 	ready = atomic_read(&phba->fabric_iocb_count) == 0 &&
9675 		!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
9676 
9677 	if (ready)
9678 		/* Increment fabric iocb count to hold the position */
9679 		atomic_inc(&phba->fabric_iocb_count);
9680 	spin_unlock_irqrestore(&phba->hbalock, iflags);
9681 	if (ready) {
9682 		iocb->fabric_iocb_cmpl = iocb->iocb_cmpl;
9683 		iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb;
9684 		iocb->iocb_flag |= LPFC_IO_FABRIC;
9685 
9686 		lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD,
9687 			"Fabric sched2:   ste:x%x",
9688 			iocb->vport->port_state, 0, 0);
9689 
9690 		ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0);
9691 
9692 		if (ret == IOCB_ERROR) {
9693 			iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
9694 			iocb->fabric_iocb_cmpl = NULL;
9695 			iocb->iocb_flag &= ~LPFC_IO_FABRIC;
9696 			atomic_dec(&phba->fabric_iocb_count);
9697 		}
9698 	} else {
9699 		spin_lock_irqsave(&phba->hbalock, iflags);
9700 		list_add_tail(&iocb->list, &phba->fabric_iocb_list);
9701 		spin_unlock_irqrestore(&phba->hbalock, iflags);
9702 		ret = IOCB_SUCCESS;
9703 	}
9704 	return ret;
9705 }
9706 
9707 /**
9708  * lpfc_fabric_abort_vport - Abort a vport's iocbs from driver fabric iocb list
9709  * @vport: pointer to a virtual N_Port data structure.
9710  *
9711  * This routine aborts all the IOCBs associated with a @vport from the
9712  * driver internal fabric IOCB list. The list contains fabric IOCBs to be
9713  * issued to the ELS IOCB ring. This abort function walks the fabric IOCB
9714  * list, removes each IOCB associated with the @vport off the list, set the
9715  * status feild to IOSTAT_LOCAL_REJECT, and invokes the callback function
9716  * associated with the IOCB.
9717  **/
9718 static void lpfc_fabric_abort_vport(struct lpfc_vport *vport)
9719 {
9720 	LIST_HEAD(completions);
9721 	struct lpfc_hba  *phba = vport->phba;
9722 	struct lpfc_iocbq *tmp_iocb, *piocb;
9723 
9724 	spin_lock_irq(&phba->hbalock);
9725 	list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list,
9726 				 list) {
9727 
9728 		if (piocb->vport != vport)
9729 			continue;
9730 
9731 		list_move_tail(&piocb->list, &completions);
9732 	}
9733 	spin_unlock_irq(&phba->hbalock);
9734 
9735 	/* Cancel all the IOCBs from the completions list */
9736 	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
9737 			      IOERR_SLI_ABORTED);
9738 }
9739 
9740 /**
9741  * lpfc_fabric_abort_nport - Abort a ndlp's iocbs from driver fabric iocb list
9742  * @ndlp: pointer to a node-list data structure.
9743  *
9744  * This routine aborts all the IOCBs associated with an @ndlp from the
9745  * driver internal fabric IOCB list. The list contains fabric IOCBs to be
9746  * issued to the ELS IOCB ring. This abort function walks the fabric IOCB
9747  * list, removes each IOCB associated with the @ndlp off the list, set the
9748  * status feild to IOSTAT_LOCAL_REJECT, and invokes the callback function
9749  * associated with the IOCB.
9750  **/
9751 void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp)
9752 {
9753 	LIST_HEAD(completions);
9754 	struct lpfc_hba  *phba = ndlp->phba;
9755 	struct lpfc_iocbq *tmp_iocb, *piocb;
9756 	struct lpfc_sli_ring *pring;
9757 
9758 	pring = lpfc_phba_elsring(phba);
9759 
9760 	if (unlikely(!pring))
9761 		return;
9762 
9763 	spin_lock_irq(&phba->hbalock);
9764 	list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list,
9765 				 list) {
9766 		if ((lpfc_check_sli_ndlp(phba, pring, piocb, ndlp))) {
9767 
9768 			list_move_tail(&piocb->list, &completions);
9769 		}
9770 	}
9771 	spin_unlock_irq(&phba->hbalock);
9772 
9773 	/* Cancel all the IOCBs from the completions list */
9774 	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
9775 			      IOERR_SLI_ABORTED);
9776 }
9777 
9778 /**
9779  * lpfc_fabric_abort_hba - Abort all iocbs on driver fabric iocb list
9780  * @phba: pointer to lpfc hba data structure.
9781  *
9782  * This routine aborts all the IOCBs currently on the driver internal
9783  * fabric IOCB list. The list contains fabric IOCBs to be issued to the ELS
9784  * IOCB ring. This function takes the entire IOCB list off the fabric IOCB
9785  * list, removes IOCBs off the list, set the status feild to
9786  * IOSTAT_LOCAL_REJECT, and invokes the callback function associated with
9787  * the IOCB.
9788  **/
9789 void lpfc_fabric_abort_hba(struct lpfc_hba *phba)
9790 {
9791 	LIST_HEAD(completions);
9792 
9793 	spin_lock_irq(&phba->hbalock);
9794 	list_splice_init(&phba->fabric_iocb_list, &completions);
9795 	spin_unlock_irq(&phba->hbalock);
9796 
9797 	/* Cancel all the IOCBs from the completions list */
9798 	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
9799 			      IOERR_SLI_ABORTED);
9800 }
9801 
9802 /**
9803  * lpfc_sli4_vport_delete_els_xri_aborted -Remove all ndlp references for vport
9804  * @vport: pointer to lpfc vport data structure.
9805  *
9806  * This routine is invoked by the vport cleanup for deletions and the cleanup
9807  * for an ndlp on removal.
9808  **/
9809 void
9810 lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *vport)
9811 {
9812 	struct lpfc_hba *phba = vport->phba;
9813 	struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
9814 	unsigned long iflag = 0;
9815 
9816 	spin_lock_irqsave(&phba->hbalock, iflag);
9817 	spin_lock(&phba->sli4_hba.sgl_list_lock);
9818 	list_for_each_entry_safe(sglq_entry, sglq_next,
9819 			&phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
9820 		if (sglq_entry->ndlp && sglq_entry->ndlp->vport == vport)
9821 			sglq_entry->ndlp = NULL;
9822 	}
9823 	spin_unlock(&phba->sli4_hba.sgl_list_lock);
9824 	spin_unlock_irqrestore(&phba->hbalock, iflag);
9825 	return;
9826 }
9827 
9828 /**
9829  * lpfc_sli4_els_xri_aborted - Slow-path process of els xri abort
9830  * @phba: pointer to lpfc hba data structure.
9831  * @axri: pointer to the els xri abort wcqe structure.
9832  *
9833  * This routine is invoked by the worker thread to process a SLI4 slow-path
9834  * ELS aborted xri.
9835  **/
9836 void
9837 lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
9838 			  struct sli4_wcqe_xri_aborted *axri)
9839 {
9840 	uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
9841 	uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
9842 	uint16_t lxri = 0;
9843 
9844 	struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
9845 	unsigned long iflag = 0;
9846 	struct lpfc_nodelist *ndlp;
9847 	struct lpfc_sli_ring *pring;
9848 
9849 	pring = lpfc_phba_elsring(phba);
9850 
9851 	spin_lock_irqsave(&phba->hbalock, iflag);
9852 	spin_lock(&phba->sli4_hba.sgl_list_lock);
9853 	list_for_each_entry_safe(sglq_entry, sglq_next,
9854 			&phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
9855 		if (sglq_entry->sli4_xritag == xri) {
9856 			list_del(&sglq_entry->list);
9857 			ndlp = sglq_entry->ndlp;
9858 			sglq_entry->ndlp = NULL;
9859 			list_add_tail(&sglq_entry->list,
9860 				&phba->sli4_hba.lpfc_els_sgl_list);
9861 			sglq_entry->state = SGL_FREED;
9862 			spin_unlock(&phba->sli4_hba.sgl_list_lock);
9863 			spin_unlock_irqrestore(&phba->hbalock, iflag);
9864 			lpfc_set_rrq_active(phba, ndlp,
9865 				sglq_entry->sli4_lxritag,
9866 				rxid, 1);
9867 
9868 			/* Check if TXQ queue needs to be serviced */
9869 			if (pring && !list_empty(&pring->txq))
9870 				lpfc_worker_wake_up(phba);
9871 			return;
9872 		}
9873 	}
9874 	spin_unlock(&phba->sli4_hba.sgl_list_lock);
9875 	lxri = lpfc_sli4_xri_inrange(phba, xri);
9876 	if (lxri == NO_XRI) {
9877 		spin_unlock_irqrestore(&phba->hbalock, iflag);
9878 		return;
9879 	}
9880 	spin_lock(&phba->sli4_hba.sgl_list_lock);
9881 	sglq_entry = __lpfc_get_active_sglq(phba, lxri);
9882 	if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) {
9883 		spin_unlock(&phba->sli4_hba.sgl_list_lock);
9884 		spin_unlock_irqrestore(&phba->hbalock, iflag);
9885 		return;
9886 	}
9887 	sglq_entry->state = SGL_XRI_ABORTED;
9888 	spin_unlock(&phba->sli4_hba.sgl_list_lock);
9889 	spin_unlock_irqrestore(&phba->hbalock, iflag);
9890 	return;
9891 }
9892 
9893 /* lpfc_sli_abts_recover_port - Recover a port that failed a BLS_ABORT req.
9894  * @vport: pointer to virtual port object.
9895  * @ndlp: nodelist pointer for the impacted node.
9896  *
9897  * The driver calls this routine in response to an SLI4 XRI ABORT CQE
9898  * or an SLI3 ASYNC_STATUS_CN event from the port.  For either event,
9899  * the driver is required to send a LOGO to the remote node before it
9900  * attempts to recover its login to the remote node.
9901  */
9902 void
9903 lpfc_sli_abts_recover_port(struct lpfc_vport *vport,
9904 			   struct lpfc_nodelist *ndlp)
9905 {
9906 	struct Scsi_Host *shost;
9907 	struct lpfc_hba *phba;
9908 	unsigned long flags = 0;
9909 
9910 	shost = lpfc_shost_from_vport(vport);
9911 	phba = vport->phba;
9912 	if (ndlp->nlp_state != NLP_STE_MAPPED_NODE) {
9913 		lpfc_printf_log(phba, KERN_INFO,
9914 				LOG_SLI, "3093 No rport recovery needed. "
9915 				"rport in state 0x%x\n", ndlp->nlp_state);
9916 		return;
9917 	}
9918 	lpfc_printf_log(phba, KERN_ERR,
9919 			LOG_ELS | LOG_FCP_ERROR | LOG_NVME_IOERR,
9920 			"3094 Start rport recovery on shost id 0x%x "
9921 			"fc_id 0x%06x vpi 0x%x rpi 0x%x state 0x%x "
9922 			"flags 0x%x\n",
9923 			shost->host_no, ndlp->nlp_DID,
9924 			vport->vpi, ndlp->nlp_rpi, ndlp->nlp_state,
9925 			ndlp->nlp_flag);
9926 	/*
9927 	 * The rport is not responding.  Remove the FCP-2 flag to prevent
9928 	 * an ADISC in the follow-up recovery code.
9929 	 */
9930 	spin_lock_irqsave(shost->host_lock, flags);
9931 	ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
9932 	ndlp->nlp_flag |= NLP_ISSUE_LOGO;
9933 	spin_unlock_irqrestore(shost->host_lock, flags);
9934 	lpfc_unreg_rpi(vport, ndlp);
9935 }
9936 
9937