xref: /openbmc/linux/drivers/scsi/lpfc/lpfc_els.c (revision 981ab3f1)
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2017 Broadcom. All Rights Reserved. The term      *
5  * “Broadcom” refers to Broadcom Limited and/or its subsidiaries.  *
6  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
7  * EMULEX and SLI are trademarks of Emulex.                        *
8  * www.broadcom.com                                                *
9  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
10  *                                                                 *
11  * This program is free software; you can redistribute it and/or   *
12  * modify it under the terms of version 2 of the GNU General       *
13  * Public License as published by the Free Software Foundation.    *
14  * This program is distributed in the hope that it will be useful. *
15  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
16  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
17  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
18  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
20  * more details, a copy of which can be found in the file COPYING  *
21  * included with this package.                                     *
22  *******************************************************************/
23 /* See Fibre Channel protocol T11 FC-LS for details */
24 #include <linux/blkdev.h>
25 #include <linux/pci.h>
26 #include <linux/slab.h>
27 #include <linux/interrupt.h>
28 
29 #include <scsi/scsi.h>
30 #include <scsi/scsi_device.h>
31 #include <scsi/scsi_host.h>
32 #include <scsi/scsi_transport_fc.h>
33 
34 #include "lpfc_hw4.h"
35 #include "lpfc_hw.h"
36 #include "lpfc_sli.h"
37 #include "lpfc_sli4.h"
38 #include "lpfc_nl.h"
39 #include "lpfc_disc.h"
40 #include "lpfc_scsi.h"
41 #include "lpfc.h"
42 #include "lpfc_logmsg.h"
43 #include "lpfc_crtn.h"
44 #include "lpfc_vport.h"
45 #include "lpfc_debugfs.h"
46 
47 static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *,
48 			  struct lpfc_iocbq *);
49 static void lpfc_cmpl_fabric_iocb(struct lpfc_hba *, struct lpfc_iocbq *,
50 			struct lpfc_iocbq *);
51 static void lpfc_fabric_abort_vport(struct lpfc_vport *vport);
52 static int lpfc_issue_els_fdisc(struct lpfc_vport *vport,
53 				struct lpfc_nodelist *ndlp, uint8_t retry);
54 static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba,
55 				  struct lpfc_iocbq *iocb);
56 
57 static int lpfc_max_els_tries = 3;
58 
59 /**
60  * lpfc_els_chk_latt - Check host link attention event for a vport
61  * @vport: pointer to a host virtual N_Port data structure.
62  *
63  * This routine checks whether there is an outstanding host link
64  * attention event during the discovery process with the @vport. It is done
65  * by reading the HBA's Host Attention (HA) register. If there is any host
66  * link attention events during this @vport's discovery process, the @vport
67  * shall be marked as FC_ABORT_DISCOVERY, a host link attention clear shall
68  * be issued if the link state is not already in host link cleared state,
69  * and a return code shall indicate whether the host link attention event
70  * had happened.
71  *
72  * Note that, if either the host link is in state LPFC_LINK_DOWN or @vport
73  * state in LPFC_VPORT_READY, the request for checking host link attention
74  * event will be ignored and a return code shall indicate no host link
75  * attention event had happened.
76  *
77  * Return codes
78  *   0 - no host link attention event happened
79  *   1 - host link attention event happened
80  **/
81 int
82 lpfc_els_chk_latt(struct lpfc_vport *vport)
83 {
84 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
85 	struct lpfc_hba  *phba = vport->phba;
86 	uint32_t ha_copy;
87 
88 	if (vport->port_state >= LPFC_VPORT_READY ||
89 	    phba->link_state == LPFC_LINK_DOWN ||
90 	    phba->sli_rev > LPFC_SLI_REV3)
91 		return 0;
92 
93 	/* Read the HBA Host Attention Register */
94 	if (lpfc_readl(phba->HAregaddr, &ha_copy))
95 		return 1;
96 
97 	if (!(ha_copy & HA_LATT))
98 		return 0;
99 
100 	/* Pending Link Event during Discovery */
101 	lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
102 			 "0237 Pending Link Event during "
103 			 "Discovery: State x%x\n",
104 			 phba->pport->port_state);
105 
106 	/* CLEAR_LA should re-enable link attention events and
107 	 * we should then immediately take a LATT event. The
108 	 * LATT processing should call lpfc_linkdown() which
109 	 * will cleanup any left over in-progress discovery
110 	 * events.
111 	 */
112 	spin_lock_irq(shost->host_lock);
113 	vport->fc_flag |= FC_ABORT_DISCOVERY;
114 	spin_unlock_irq(shost->host_lock);
115 
116 	if (phba->link_state != LPFC_CLEAR_LA)
117 		lpfc_issue_clear_la(phba, vport);
118 
119 	return 1;
120 }
121 
122 /**
123  * lpfc_prep_els_iocb - Allocate and prepare a lpfc iocb data structure
124  * @vport: pointer to a host virtual N_Port data structure.
125  * @expectRsp: flag indicating whether response is expected.
126  * @cmdSize: size of the ELS command.
127  * @retry: number of retries to the command IOCB when it fails.
128  * @ndlp: pointer to a node-list data structure.
129  * @did: destination identifier.
130  * @elscmd: the ELS command code.
131  *
132  * This routine is used for allocating a lpfc-IOCB data structure from
133  * the driver lpfc-IOCB free-list and prepare the IOCB with the parameters
134  * passed into the routine for discovery state machine to issue an Extended
135  * Link Service (ELS) commands. It is a generic lpfc-IOCB allocation
136  * and preparation routine that is used by all the discovery state machine
137  * routines and the ELS command-specific fields will be later set up by
138  * the individual discovery machine routines after calling this routine
139  * allocating and preparing a generic IOCB data structure. It fills in the
140  * Buffer Descriptor Entries (BDEs), allocates buffers for both command
141  * payload and response payload (if expected). The reference count on the
142  * ndlp is incremented by 1 and the reference to the ndlp is put into
143  * context1 of the IOCB data structure for this IOCB to hold the ndlp
144  * reference for the command's callback function to access later.
145  *
146  * Return code
147  *   Pointer to the newly allocated/prepared els iocb data structure
148  *   NULL - when els iocb data structure allocation/preparation failed
149  **/
150 struct lpfc_iocbq *
151 lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
152 		   uint16_t cmdSize, uint8_t retry,
153 		   struct lpfc_nodelist *ndlp, uint32_t did,
154 		   uint32_t elscmd)
155 {
156 	struct lpfc_hba  *phba = vport->phba;
157 	struct lpfc_iocbq *elsiocb;
158 	struct lpfc_dmabuf *pcmd, *prsp, *pbuflist;
159 	struct ulp_bde64 *bpl;
160 	IOCB_t *icmd;
161 
162 
163 	if (!lpfc_is_link_up(phba))
164 		return NULL;
165 
166 	/* Allocate buffer for  command iocb */
167 	elsiocb = lpfc_sli_get_iocbq(phba);
168 
169 	if (elsiocb == NULL)
170 		return NULL;
171 
172 	/*
173 	 * If this command is for fabric controller and HBA running
174 	 * in FIP mode send FLOGI, FDISC and LOGO as FIP frames.
175 	 */
176 	if ((did == Fabric_DID) &&
177 		(phba->hba_flag & HBA_FIP_SUPPORT) &&
178 		((elscmd == ELS_CMD_FLOGI) ||
179 		 (elscmd == ELS_CMD_FDISC) ||
180 		 (elscmd == ELS_CMD_LOGO)))
181 		switch (elscmd) {
182 		case ELS_CMD_FLOGI:
183 		elsiocb->iocb_flag |=
184 			((LPFC_ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT)
185 					& LPFC_FIP_ELS_ID_MASK);
186 		break;
187 		case ELS_CMD_FDISC:
188 		elsiocb->iocb_flag |=
189 			((LPFC_ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT)
190 					& LPFC_FIP_ELS_ID_MASK);
191 		break;
192 		case ELS_CMD_LOGO:
193 		elsiocb->iocb_flag |=
194 			((LPFC_ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT)
195 					& LPFC_FIP_ELS_ID_MASK);
196 		break;
197 		}
198 	else
199 		elsiocb->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK;
200 
201 	icmd = &elsiocb->iocb;
202 
203 	/* fill in BDEs for command */
204 	/* Allocate buffer for command payload */
205 	pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
206 	if (pcmd)
207 		pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys);
208 	if (!pcmd || !pcmd->virt)
209 		goto els_iocb_free_pcmb_exit;
210 
211 	INIT_LIST_HEAD(&pcmd->list);
212 
213 	/* Allocate buffer for response payload */
214 	if (expectRsp) {
215 		prsp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
216 		if (prsp)
217 			prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
218 						     &prsp->phys);
219 		if (!prsp || !prsp->virt)
220 			goto els_iocb_free_prsp_exit;
221 		INIT_LIST_HEAD(&prsp->list);
222 	} else
223 		prsp = NULL;
224 
225 	/* Allocate buffer for Buffer ptr list */
226 	pbuflist = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
227 	if (pbuflist)
228 		pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
229 						 &pbuflist->phys);
230 	if (!pbuflist || !pbuflist->virt)
231 		goto els_iocb_free_pbuf_exit;
232 
233 	INIT_LIST_HEAD(&pbuflist->list);
234 
235 	if (expectRsp) {
236 		icmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys);
237 		icmd->un.elsreq64.bdl.addrLow = putPaddrLow(pbuflist->phys);
238 		icmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
239 		icmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
240 
241 		icmd->un.elsreq64.remoteID = did;		/* DID */
242 		icmd->ulpCommand = CMD_ELS_REQUEST64_CR;
243 		if (elscmd == ELS_CMD_FLOGI)
244 			icmd->ulpTimeout = FF_DEF_RATOV * 2;
245 		else
246 			icmd->ulpTimeout = phba->fc_ratov * 2;
247 	} else {
248 		icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys);
249 		icmd->un.xseq64.bdl.addrLow = putPaddrLow(pbuflist->phys);
250 		icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
251 		icmd->un.xseq64.bdl.bdeSize = sizeof(struct ulp_bde64);
252 		icmd->un.xseq64.xmit_els_remoteID = did;	/* DID */
253 		icmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX;
254 	}
255 	icmd->ulpBdeCount = 1;
256 	icmd->ulpLe = 1;
257 	icmd->ulpClass = CLASS3;
258 
259 	/*
260 	 * If we have NPIV enabled, we want to send ELS traffic by VPI.
261 	 * For SLI4, since the driver controls VPIs we also want to include
262 	 * all ELS pt2pt protocol traffic as well.
263 	 */
264 	if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) ||
265 		((phba->sli_rev == LPFC_SLI_REV4) &&
266 		    (vport->fc_flag & FC_PT2PT))) {
267 
268 		if (expectRsp) {
269 			icmd->un.elsreq64.myID = vport->fc_myDID;
270 
271 			/* For ELS_REQUEST64_CR, use the VPI by default */
272 			icmd->ulpContext = phba->vpi_ids[vport->vpi];
273 		}
274 
275 		icmd->ulpCt_h = 0;
276 		/* The CT field must be 0=INVALID_RPI for the ECHO cmd */
277 		if (elscmd == ELS_CMD_ECHO)
278 			icmd->ulpCt_l = 0; /* context = invalid RPI */
279 		else
280 			icmd->ulpCt_l = 1; /* context = VPI */
281 	}
282 
283 	bpl = (struct ulp_bde64 *) pbuflist->virt;
284 	bpl->addrLow = le32_to_cpu(putPaddrLow(pcmd->phys));
285 	bpl->addrHigh = le32_to_cpu(putPaddrHigh(pcmd->phys));
286 	bpl->tus.f.bdeSize = cmdSize;
287 	bpl->tus.f.bdeFlags = 0;
288 	bpl->tus.w = le32_to_cpu(bpl->tus.w);
289 
290 	if (expectRsp) {
291 		bpl++;
292 		bpl->addrLow = le32_to_cpu(putPaddrLow(prsp->phys));
293 		bpl->addrHigh = le32_to_cpu(putPaddrHigh(prsp->phys));
294 		bpl->tus.f.bdeSize = FCELSSIZE;
295 		bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
296 		bpl->tus.w = le32_to_cpu(bpl->tus.w);
297 	}
298 
299 	/* prevent preparing iocb with NULL ndlp reference */
300 	elsiocb->context1 = lpfc_nlp_get(ndlp);
301 	if (!elsiocb->context1)
302 		goto els_iocb_free_pbuf_exit;
303 	elsiocb->context2 = pcmd;
304 	elsiocb->context3 = pbuflist;
305 	elsiocb->retry = retry;
306 	elsiocb->vport = vport;
307 	elsiocb->drvrTimeout = (phba->fc_ratov << 1) + LPFC_DRVR_TIMEOUT;
308 
309 	if (prsp) {
310 		list_add(&prsp->list, &pcmd->list);
311 	}
312 	if (expectRsp) {
313 		/* Xmit ELS command <elsCmd> to remote NPORT <did> */
314 		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
315 				 "0116 Xmit ELS command x%x to remote "
316 				 "NPORT x%x I/O tag: x%x, port state:x%x"
317 				 " fc_flag:x%x\n",
318 				 elscmd, did, elsiocb->iotag,
319 				 vport->port_state,
320 				 vport->fc_flag);
321 	} else {
322 		/* Xmit ELS response <elsCmd> to remote NPORT <did> */
323 		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
324 				 "0117 Xmit ELS response x%x to remote "
325 				 "NPORT x%x I/O tag: x%x, size: x%x "
326 				 "port_state x%x fc_flag x%x\n",
327 				 elscmd, ndlp->nlp_DID, elsiocb->iotag,
328 				 cmdSize, vport->port_state,
329 				 vport->fc_flag);
330 	}
331 	return elsiocb;
332 
333 els_iocb_free_pbuf_exit:
334 	if (expectRsp)
335 		lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
336 	kfree(pbuflist);
337 
338 els_iocb_free_prsp_exit:
339 	lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
340 	kfree(prsp);
341 
342 els_iocb_free_pcmb_exit:
343 	kfree(pcmd);
344 	lpfc_sli_release_iocbq(phba, elsiocb);
345 	return NULL;
346 }
347 
348 /**
349  * lpfc_issue_fabric_reglogin - Issue fabric registration login for a vport
350  * @vport: pointer to a host virtual N_Port data structure.
351  *
352  * This routine issues a fabric registration login for a @vport. An
353  * active ndlp node with Fabric_DID must already exist for this @vport.
354  * The routine invokes two mailbox commands to carry out fabric registration
355  * login through the HBA firmware: the first mailbox command requests the
356  * HBA to perform link configuration for the @vport; and the second mailbox
357  * command requests the HBA to perform the actual fabric registration login
358  * with the @vport.
359  *
360  * Return code
361  *   0 - successfully issued fabric registration login for @vport
362  *   -ENXIO -- failed to issue fabric registration login for @vport
363  **/
364 int
365 lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
366 {
367 	struct lpfc_hba  *phba = vport->phba;
368 	LPFC_MBOXQ_t *mbox;
369 	struct lpfc_dmabuf *mp;
370 	struct lpfc_nodelist *ndlp;
371 	struct serv_parm *sp;
372 	int rc;
373 	int err = 0;
374 
375 	sp = &phba->fc_fabparam;
376 	ndlp = lpfc_findnode_did(vport, Fabric_DID);
377 	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
378 		err = 1;
379 		goto fail;
380 	}
381 
382 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
383 	if (!mbox) {
384 		err = 2;
385 		goto fail;
386 	}
387 
388 	vport->port_state = LPFC_FABRIC_CFG_LINK;
389 	lpfc_config_link(phba, mbox);
390 	mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
391 	mbox->vport = vport;
392 
393 	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
394 	if (rc == MBX_NOT_FINISHED) {
395 		err = 3;
396 		goto fail_free_mbox;
397 	}
398 
399 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
400 	if (!mbox) {
401 		err = 4;
402 		goto fail;
403 	}
404 	rc = lpfc_reg_rpi(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox,
405 			  ndlp->nlp_rpi);
406 	if (rc) {
407 		err = 5;
408 		goto fail_free_mbox;
409 	}
410 
411 	mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login;
412 	mbox->vport = vport;
413 	/* increment the reference count on ndlp to hold reference
414 	 * for the callback routine.
415 	 */
416 	mbox->context2 = lpfc_nlp_get(ndlp);
417 
418 	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
419 	if (rc == MBX_NOT_FINISHED) {
420 		err = 6;
421 		goto fail_issue_reg_login;
422 	}
423 
424 	return 0;
425 
426 fail_issue_reg_login:
427 	/* decrement the reference count on ndlp just incremented
428 	 * for the failed mbox command.
429 	 */
430 	lpfc_nlp_put(ndlp);
431 	mp = (struct lpfc_dmabuf *) mbox->context1;
432 	lpfc_mbuf_free(phba, mp->virt, mp->phys);
433 	kfree(mp);
434 fail_free_mbox:
435 	mempool_free(mbox, phba->mbox_mem_pool);
436 
437 fail:
438 	lpfc_vport_set_state(vport, FC_VPORT_FAILED);
439 	lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
440 		"0249 Cannot issue Register Fabric login: Err %d\n", err);
441 	return -ENXIO;
442 }
443 
444 /**
445  * lpfc_issue_reg_vfi - Register VFI for this vport's fabric login
446  * @vport: pointer to a host virtual N_Port data structure.
447  *
448  * This routine issues a REG_VFI mailbox for the vfi, vpi, fcfi triplet for
449  * the @vport. This mailbox command is necessary for SLI4 port only.
450  *
451  * Return code
452  *   0 - successfully issued REG_VFI for @vport
453  *   A failure code otherwise.
454  **/
455 int
456 lpfc_issue_reg_vfi(struct lpfc_vport *vport)
457 {
458 	struct lpfc_hba  *phba = vport->phba;
459 	LPFC_MBOXQ_t *mboxq = NULL;
460 	struct lpfc_nodelist *ndlp;
461 	struct lpfc_dmabuf *dmabuf = NULL;
462 	int rc = 0;
463 
464 	/* move forward in case of SLI4 FC port loopback test and pt2pt mode */
465 	if ((phba->sli_rev == LPFC_SLI_REV4) &&
466 	    !(phba->link_flag & LS_LOOPBACK_MODE) &&
467 	    !(vport->fc_flag & FC_PT2PT)) {
468 		ndlp = lpfc_findnode_did(vport, Fabric_DID);
469 		if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
470 			rc = -ENODEV;
471 			goto fail;
472 		}
473 	}
474 
475 	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
476 	if (!mboxq) {
477 		rc = -ENOMEM;
478 		goto fail;
479 	}
480 
481 	/* Supply CSP's only if we are fabric connect or pt-to-pt connect */
482 	if ((vport->fc_flag & FC_FABRIC) || (vport->fc_flag & FC_PT2PT)) {
483 		dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
484 		if (!dmabuf) {
485 			rc = -ENOMEM;
486 			goto fail;
487 		}
488 		dmabuf->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &dmabuf->phys);
489 		if (!dmabuf->virt) {
490 			rc = -ENOMEM;
491 			goto fail;
492 		}
493 		memcpy(dmabuf->virt, &phba->fc_fabparam,
494 		       sizeof(struct serv_parm));
495 	}
496 
497 	vport->port_state = LPFC_FABRIC_CFG_LINK;
498 	if (dmabuf)
499 		lpfc_reg_vfi(mboxq, vport, dmabuf->phys);
500 	else
501 		lpfc_reg_vfi(mboxq, vport, 0);
502 
503 	mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi;
504 	mboxq->vport = vport;
505 	mboxq->context1 = dmabuf;
506 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
507 	if (rc == MBX_NOT_FINISHED) {
508 		rc = -ENXIO;
509 		goto fail;
510 	}
511 	return 0;
512 
513 fail:
514 	if (mboxq)
515 		mempool_free(mboxq, phba->mbox_mem_pool);
516 	if (dmabuf) {
517 		if (dmabuf->virt)
518 			lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
519 		kfree(dmabuf);
520 	}
521 
522 	lpfc_vport_set_state(vport, FC_VPORT_FAILED);
523 	lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
524 		"0289 Issue Register VFI failed: Err %d\n", rc);
525 	return rc;
526 }
527 
528 /**
529  * lpfc_issue_unreg_vfi - Unregister VFI for this vport's fabric login
530  * @vport: pointer to a host virtual N_Port data structure.
531  *
532  * This routine issues a UNREG_VFI mailbox with the vfi, vpi, fcfi triplet for
533  * the @vport. This mailbox command is necessary for SLI4 port only.
534  *
535  * Return code
536  *   0 - successfully issued REG_VFI for @vport
537  *   A failure code otherwise.
538  **/
539 int
540 lpfc_issue_unreg_vfi(struct lpfc_vport *vport)
541 {
542 	struct lpfc_hba *phba = vport->phba;
543 	struct Scsi_Host *shost;
544 	LPFC_MBOXQ_t *mboxq;
545 	int rc;
546 
547 	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
548 	if (!mboxq) {
549 		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
550 				"2556 UNREG_VFI mbox allocation failed"
551 				"HBA state x%x\n", phba->pport->port_state);
552 		return -ENOMEM;
553 	}
554 
555 	lpfc_unreg_vfi(mboxq, vport);
556 	mboxq->vport = vport;
557 	mboxq->mbox_cmpl = lpfc_unregister_vfi_cmpl;
558 
559 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
560 	if (rc == MBX_NOT_FINISHED) {
561 		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
562 				"2557 UNREG_VFI issue mbox failed rc x%x "
563 				"HBA state x%x\n",
564 				rc, phba->pport->port_state);
565 		mempool_free(mboxq, phba->mbox_mem_pool);
566 		return -EIO;
567 	}
568 
569 	shost = lpfc_shost_from_vport(vport);
570 	spin_lock_irq(shost->host_lock);
571 	vport->fc_flag &= ~FC_VFI_REGISTERED;
572 	spin_unlock_irq(shost->host_lock);
573 	return 0;
574 }
575 
576 /**
577  * lpfc_check_clean_addr_bit - Check whether assigned FCID is clean.
578  * @vport: pointer to a host virtual N_Port data structure.
579  * @sp: pointer to service parameter data structure.
580  *
581  * This routine is called from FLOGI/FDISC completion handler functions.
582  * lpfc_check_clean_addr_bit return 1 when FCID/Fabric portname/ Fabric
583  * node nodename is changed in the completion service parameter else return
584  * 0. This function also set flag in the vport data structure to delay
585  * NP_Port discovery after the FLOGI/FDISC completion if Clean address bit
586  * in FLOGI/FDISC response is cleared and FCID/Fabric portname/ Fabric
587  * node nodename is changed in the completion service parameter.
588  *
589  * Return code
590  *   0 - FCID and Fabric Nodename and Fabric portname is not changed.
591  *   1 - FCID or Fabric Nodename or Fabric portname is changed.
592  *
593  **/
594 static uint8_t
595 lpfc_check_clean_addr_bit(struct lpfc_vport *vport,
596 		struct serv_parm *sp)
597 {
598 	struct lpfc_hba *phba = vport->phba;
599 	uint8_t fabric_param_changed = 0;
600 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
601 
602 	if ((vport->fc_prevDID != vport->fc_myDID) ||
603 		memcmp(&vport->fabric_portname, &sp->portName,
604 			sizeof(struct lpfc_name)) ||
605 		memcmp(&vport->fabric_nodename, &sp->nodeName,
606 			sizeof(struct lpfc_name)) ||
607 		(vport->vport_flag & FAWWPN_PARAM_CHG)) {
608 		fabric_param_changed = 1;
609 		vport->vport_flag &= ~FAWWPN_PARAM_CHG;
610 	}
611 	/*
612 	 * Word 1 Bit 31 in common service parameter is overloaded.
613 	 * Word 1 Bit 31 in FLOGI request is multiple NPort request
614 	 * Word 1 Bit 31 in FLOGI response is clean address bit
615 	 *
616 	 * If fabric parameter is changed and clean address bit is
617 	 * cleared delay nport discovery if
618 	 * - vport->fc_prevDID != 0 (not initial discovery) OR
619 	 * - lpfc_delay_discovery module parameter is set.
620 	 */
621 	if (fabric_param_changed && !sp->cmn.clean_address_bit &&
622 	    (vport->fc_prevDID || phba->cfg_delay_discovery)) {
623 		spin_lock_irq(shost->host_lock);
624 		vport->fc_flag |= FC_DISC_DELAYED;
625 		spin_unlock_irq(shost->host_lock);
626 	}
627 
628 	return fabric_param_changed;
629 }
630 
631 
632 /**
633  * lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port
634  * @vport: pointer to a host virtual N_Port data structure.
635  * @ndlp: pointer to a node-list data structure.
636  * @sp: pointer to service parameter data structure.
637  * @irsp: pointer to the IOCB within the lpfc response IOCB.
638  *
639  * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback
640  * function to handle the completion of a Fabric Login (FLOGI) into a fabric
641  * port in a fabric topology. It properly sets up the parameters to the @ndlp
642  * from the IOCB response. It also check the newly assigned N_Port ID to the
643  * @vport against the previously assigned N_Port ID. If it is different from
644  * the previously assigned Destination ID (DID), the lpfc_unreg_rpi() routine
645  * is invoked on all the remaining nodes with the @vport to unregister the
646  * Remote Port Indicators (RPIs). Finally, the lpfc_issue_fabric_reglogin()
647  * is invoked to register login to the fabric.
648  *
649  * Return code
650  *   0 - Success (currently, always return 0)
651  **/
652 static int
653 lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
654 			   struct serv_parm *sp, IOCB_t *irsp)
655 {
656 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
657 	struct lpfc_hba  *phba = vport->phba;
658 	struct lpfc_nodelist *np;
659 	struct lpfc_nodelist *next_np;
660 	uint8_t fabric_param_changed;
661 
662 	spin_lock_irq(shost->host_lock);
663 	vport->fc_flag |= FC_FABRIC;
664 	spin_unlock_irq(shost->host_lock);
665 
666 	phba->fc_edtov = be32_to_cpu(sp->cmn.e_d_tov);
667 	if (sp->cmn.edtovResolution)	/* E_D_TOV ticks are in nanoseconds */
668 		phba->fc_edtov = (phba->fc_edtov + 999999) / 1000000;
669 
670 	phba->fc_edtovResol = sp->cmn.edtovResolution;
671 	phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000;
672 
673 	if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
674 		spin_lock_irq(shost->host_lock);
675 		vport->fc_flag |= FC_PUBLIC_LOOP;
676 		spin_unlock_irq(shost->host_lock);
677 	}
678 
679 	vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
680 	memcpy(&ndlp->nlp_portname, &sp->portName, sizeof(struct lpfc_name));
681 	memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof(struct lpfc_name));
682 	ndlp->nlp_class_sup = 0;
683 	if (sp->cls1.classValid)
684 		ndlp->nlp_class_sup |= FC_COS_CLASS1;
685 	if (sp->cls2.classValid)
686 		ndlp->nlp_class_sup |= FC_COS_CLASS2;
687 	if (sp->cls3.classValid)
688 		ndlp->nlp_class_sup |= FC_COS_CLASS3;
689 	if (sp->cls4.classValid)
690 		ndlp->nlp_class_sup |= FC_COS_CLASS4;
691 	ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) |
692 				sp->cmn.bbRcvSizeLsb;
693 
694 	fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp);
695 	if (fabric_param_changed) {
696 		/* Reset FDMI attribute masks based on config parameter */
697 		if (phba->cfg_enable_SmartSAN ||
698 		    (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) {
699 			/* Setup appropriate attribute masks */
700 			vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR;
701 			if (phba->cfg_enable_SmartSAN)
702 				vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR;
703 			else
704 				vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
705 		} else {
706 			vport->fdmi_hba_mask = 0;
707 			vport->fdmi_port_mask = 0;
708 		}
709 
710 	}
711 	memcpy(&vport->fabric_portname, &sp->portName,
712 			sizeof(struct lpfc_name));
713 	memcpy(&vport->fabric_nodename, &sp->nodeName,
714 			sizeof(struct lpfc_name));
715 	memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
716 
717 	if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
718 		if (sp->cmn.response_multiple_NPort) {
719 			lpfc_printf_vlog(vport, KERN_WARNING,
720 					 LOG_ELS | LOG_VPORT,
721 					 "1816 FLOGI NPIV supported, "
722 					 "response data 0x%x\n",
723 					 sp->cmn.response_multiple_NPort);
724 			spin_lock_irq(&phba->hbalock);
725 			phba->link_flag |= LS_NPIV_FAB_SUPPORTED;
726 			spin_unlock_irq(&phba->hbalock);
727 		} else {
728 			/* Because we asked f/w for NPIV it still expects us
729 			to call reg_vnpid atleast for the physcial host */
730 			lpfc_printf_vlog(vport, KERN_WARNING,
731 					 LOG_ELS | LOG_VPORT,
732 					 "1817 Fabric does not support NPIV "
733 					 "- configuring single port mode.\n");
734 			spin_lock_irq(&phba->hbalock);
735 			phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
736 			spin_unlock_irq(&phba->hbalock);
737 		}
738 	}
739 
740 	/*
741 	 * For FC we need to do some special processing because of the SLI
742 	 * Port's default settings of the Common Service Parameters.
743 	 */
744 	if ((phba->sli_rev == LPFC_SLI_REV4) &&
745 	    (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC)) {
746 		/* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */
747 		if (fabric_param_changed)
748 			lpfc_unregister_fcf_prep(phba);
749 
750 		/* This should just update the VFI CSPs*/
751 		if (vport->fc_flag & FC_VFI_REGISTERED)
752 			lpfc_issue_reg_vfi(vport);
753 	}
754 
755 	if (fabric_param_changed &&
756 		!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
757 
758 		/* If our NportID changed, we need to ensure all
759 		 * remaining NPORTs get unreg_login'ed.
760 		 */
761 		list_for_each_entry_safe(np, next_np,
762 					&vport->fc_nodes, nlp_listp) {
763 			if (!NLP_CHK_NODE_ACT(np))
764 				continue;
765 			if ((np->nlp_state != NLP_STE_NPR_NODE) ||
766 				   !(np->nlp_flag & NLP_NPR_ADISC))
767 				continue;
768 			spin_lock_irq(shost->host_lock);
769 			np->nlp_flag &= ~NLP_NPR_ADISC;
770 			spin_unlock_irq(shost->host_lock);
771 			lpfc_unreg_rpi(vport, np);
772 		}
773 		lpfc_cleanup_pending_mbox(vport);
774 
775 		if (phba->sli_rev == LPFC_SLI_REV4) {
776 			lpfc_sli4_unreg_all_rpis(vport);
777 			lpfc_mbx_unreg_vpi(vport);
778 			spin_lock_irq(shost->host_lock);
779 			vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
780 			spin_unlock_irq(shost->host_lock);
781 		}
782 
783 		/*
784 		 * For SLI3 and SLI4, the VPI needs to be reregistered in
785 		 * response to this fabric parameter change event.
786 		 */
787 		spin_lock_irq(shost->host_lock);
788 		vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
789 		spin_unlock_irq(shost->host_lock);
790 	} else if ((phba->sli_rev == LPFC_SLI_REV4) &&
791 		!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
792 			/*
793 			 * Driver needs to re-reg VPI in order for f/w
794 			 * to update the MAC address.
795 			 */
796 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
797 			lpfc_register_new_vport(phba, vport, ndlp);
798 			return 0;
799 	}
800 
801 	if (phba->sli_rev < LPFC_SLI_REV4) {
802 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE);
803 		if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED &&
804 		    vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
805 			lpfc_register_new_vport(phba, vport, ndlp);
806 		else
807 			lpfc_issue_fabric_reglogin(vport);
808 	} else {
809 		ndlp->nlp_type |= NLP_FABRIC;
810 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
811 		if ((!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) &&
812 			(vport->vpi_state & LPFC_VPI_REGISTERED)) {
813 			lpfc_start_fdiscs(phba);
814 			lpfc_do_scr_ns_plogi(phba, vport);
815 		} else if (vport->fc_flag & FC_VFI_REGISTERED)
816 			lpfc_issue_init_vpi(vport);
817 		else {
818 			lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
819 					"3135 Need register VFI: (x%x/%x)\n",
820 					vport->fc_prevDID, vport->fc_myDID);
821 			lpfc_issue_reg_vfi(vport);
822 		}
823 	}
824 	return 0;
825 }
826 
827 /**
828  * lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port
829  * @vport: pointer to a host virtual N_Port data structure.
830  * @ndlp: pointer to a node-list data structure.
831  * @sp: pointer to service parameter data structure.
832  *
833  * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback
834  * function to handle the completion of a Fabric Login (FLOGI) into an N_Port
835  * in a point-to-point topology. First, the @vport's N_Port Name is compared
836  * with the received N_Port Name: if the @vport's N_Port Name is greater than
837  * the received N_Port Name lexicographically, this node shall assign local
838  * N_Port ID (PT2PT_LocalID: 1) and remote N_Port ID (PT2PT_RemoteID: 2) and
839  * will send out Port Login (PLOGI) with the N_Port IDs assigned. Otherwise,
840  * this node shall just wait for the remote node to issue PLOGI and assign
841  * N_Port IDs.
842  *
843  * Return code
844  *   0 - Success
845  *   -ENXIO - Fail
846  **/
847 static int
848 lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
849 			  struct serv_parm *sp)
850 {
851 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
852 	struct lpfc_hba  *phba = vport->phba;
853 	LPFC_MBOXQ_t *mbox;
854 	int rc;
855 
856 	spin_lock_irq(shost->host_lock);
857 	vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
858 	vport->fc_flag |= FC_PT2PT;
859 	spin_unlock_irq(shost->host_lock);
860 
861 	/* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */
862 	if ((phba->sli_rev == LPFC_SLI_REV4) && phba->fc_topology_changed) {
863 		lpfc_unregister_fcf_prep(phba);
864 
865 		spin_lock_irq(shost->host_lock);
866 		vport->fc_flag &= ~FC_VFI_REGISTERED;
867 		spin_unlock_irq(shost->host_lock);
868 		phba->fc_topology_changed = 0;
869 	}
870 
871 	rc = memcmp(&vport->fc_portname, &sp->portName,
872 		    sizeof(vport->fc_portname));
873 
874 	if (rc >= 0) {
875 		/* This side will initiate the PLOGI */
876 		spin_lock_irq(shost->host_lock);
877 		vport->fc_flag |= FC_PT2PT_PLOGI;
878 		spin_unlock_irq(shost->host_lock);
879 
880 		/*
881 		 * N_Port ID cannot be 0, set our Id to LocalID
882 		 * the other side will be RemoteID.
883 		 */
884 
885 		/* not equal */
886 		if (rc)
887 			vport->fc_myDID = PT2PT_LocalID;
888 
889 		/* Decrement ndlp reference count indicating that ndlp can be
890 		 * safely released when other references to it are done.
891 		 */
892 		lpfc_nlp_put(ndlp);
893 
894 		ndlp = lpfc_findnode_did(vport, PT2PT_RemoteID);
895 		if (!ndlp) {
896 			/*
897 			 * Cannot find existing Fabric ndlp, so allocate a
898 			 * new one
899 			 */
900 			ndlp = lpfc_nlp_init(vport, PT2PT_RemoteID);
901 			if (!ndlp)
902 				goto fail;
903 		} else if (!NLP_CHK_NODE_ACT(ndlp)) {
904 			ndlp = lpfc_enable_node(vport, ndlp,
905 						NLP_STE_UNUSED_NODE);
906 			if(!ndlp)
907 				goto fail;
908 		}
909 
910 		memcpy(&ndlp->nlp_portname, &sp->portName,
911 		       sizeof(struct lpfc_name));
912 		memcpy(&ndlp->nlp_nodename, &sp->nodeName,
913 		       sizeof(struct lpfc_name));
914 		/* Set state will put ndlp onto node list if not already done */
915 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
916 		spin_lock_irq(shost->host_lock);
917 		ndlp->nlp_flag |= NLP_NPR_2B_DISC;
918 		spin_unlock_irq(shost->host_lock);
919 	} else
920 		/* This side will wait for the PLOGI, decrement ndlp reference
921 		 * count indicating that ndlp can be released when other
922 		 * references to it are done.
923 		 */
924 		lpfc_nlp_put(ndlp);
925 
926 	/* If we are pt2pt with another NPort, force NPIV off! */
927 	phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
928 
929 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
930 	if (!mbox)
931 		goto fail;
932 
933 	lpfc_config_link(phba, mbox);
934 
935 	mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
936 	mbox->vport = vport;
937 	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
938 	if (rc == MBX_NOT_FINISHED) {
939 		mempool_free(mbox, phba->mbox_mem_pool);
940 		goto fail;
941 	}
942 
943 	return 0;
944 fail:
945 	return -ENXIO;
946 }
947 
948 /**
949  * lpfc_cmpl_els_flogi - Completion callback function for flogi
950  * @phba: pointer to lpfc hba data structure.
951  * @cmdiocb: pointer to lpfc command iocb data structure.
952  * @rspiocb: pointer to lpfc response iocb data structure.
953  *
954  * This routine is the top-level completion callback function for issuing
955  * a Fabric Login (FLOGI) command. If the response IOCB reported error,
956  * the lpfc_els_retry() routine shall be invoked to retry the FLOGI. If
957  * retry has been made (either immediately or delayed with lpfc_els_retry()
958  * returning 1), the command IOCB will be released and function returned.
959  * If the retry attempt has been given up (possibly reach the maximum
960  * number of retries), one additional decrement of ndlp reference shall be
961  * invoked before going out after releasing the command IOCB. This will
962  * actually release the remote node (Note, lpfc_els_free_iocb() will also
963  * invoke one decrement of ndlp reference count). If no error reported in
964  * the IOCB status, the command Port ID field is used to determine whether
965  * this is a point-to-point topology or a fabric topology: if the Port ID
966  * field is assigned, it is a fabric topology; otherwise, it is a
967  * point-to-point topology. The routine lpfc_cmpl_els_flogi_fabric() or
968  * lpfc_cmpl_els_flogi_nport() shall be invoked accordingly to handle the
969  * specific topology completion conditions.
970  **/
971 static void
972 lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
973 		    struct lpfc_iocbq *rspiocb)
974 {
975 	struct lpfc_vport *vport = cmdiocb->vport;
976 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
977 	IOCB_t *irsp = &rspiocb->iocb;
978 	struct lpfc_nodelist *ndlp = cmdiocb->context1;
979 	struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp;
980 	struct serv_parm *sp;
981 	uint16_t fcf_index;
982 	int rc;
983 
984 	/* Check to see if link went down during discovery */
985 	if (lpfc_els_chk_latt(vport)) {
986 		/* One additional decrement on node reference count to
987 		 * trigger the release of the node
988 		 */
989 		lpfc_nlp_put(ndlp);
990 		goto out;
991 	}
992 
993 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
994 		"FLOGI cmpl:      status:x%x/x%x state:x%x",
995 		irsp->ulpStatus, irsp->un.ulpWord[4],
996 		vport->port_state);
997 
998 	if (irsp->ulpStatus) {
999 		/*
1000 		 * In case of FIP mode, perform roundrobin FCF failover
1001 		 * due to new FCF discovery
1002 		 */
1003 		if ((phba->hba_flag & HBA_FIP_SUPPORT) &&
1004 		    (phba->fcf.fcf_flag & FCF_DISCOVERY)) {
1005 			if (phba->link_state < LPFC_LINK_UP)
1006 				goto stop_rr_fcf_flogi;
1007 			if ((phba->fcoe_cvl_eventtag_attn ==
1008 			     phba->fcoe_cvl_eventtag) &&
1009 			    (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
1010 			    ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
1011 			    IOERR_SLI_ABORTED))
1012 				goto stop_rr_fcf_flogi;
1013 			else
1014 				phba->fcoe_cvl_eventtag_attn =
1015 					phba->fcoe_cvl_eventtag;
1016 			lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS,
1017 					"2611 FLOGI failed on FCF (x%x), "
1018 					"status:x%x/x%x, tmo:x%x, perform "
1019 					"roundrobin FCF failover\n",
1020 					phba->fcf.current_rec.fcf_indx,
1021 					irsp->ulpStatus, irsp->un.ulpWord[4],
1022 					irsp->ulpTimeout);
1023 			lpfc_sli4_set_fcf_flogi_fail(phba,
1024 					phba->fcf.current_rec.fcf_indx);
1025 			fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba);
1026 			rc = lpfc_sli4_fcf_rr_next_proc(vport, fcf_index);
1027 			if (rc)
1028 				goto out;
1029 		}
1030 
1031 stop_rr_fcf_flogi:
1032 		/* FLOGI failure */
1033 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1034 				"2858 FLOGI failure Status:x%x/x%x TMO:x%x "
1035 				"Data x%x x%x\n",
1036 				irsp->ulpStatus, irsp->un.ulpWord[4],
1037 				irsp->ulpTimeout, phba->hba_flag,
1038 				phba->fcf.fcf_flag);
1039 
1040 		/* Check for retry */
1041 		if (lpfc_els_retry(phba, cmdiocb, rspiocb))
1042 			goto out;
1043 
1044 		/* FLOGI failure */
1045 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1046 				 "0100 FLOGI failure Status:x%x/x%x TMO:x%x\n",
1047 				 irsp->ulpStatus, irsp->un.ulpWord[4],
1048 				 irsp->ulpTimeout);
1049 
1050 
1051 		/* If this is not a loop open failure, bail out */
1052 		if (!(irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
1053 		      ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
1054 					IOERR_LOOP_OPEN_FAILURE)))
1055 			goto flogifail;
1056 
1057 		/* FLOGI failed, so there is no fabric */
1058 		spin_lock_irq(shost->host_lock);
1059 		vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
1060 		spin_unlock_irq(shost->host_lock);
1061 
1062 		/* If private loop, then allow max outstanding els to be
1063 		 * LPFC_MAX_DISC_THREADS (32). Scanning in the case of no
1064 		 * alpa map would take too long otherwise.
1065 		 */
1066 		if (phba->alpa_map[0] == 0)
1067 			vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS;
1068 		if ((phba->sli_rev == LPFC_SLI_REV4) &&
1069 		    (!(vport->fc_flag & FC_VFI_REGISTERED) ||
1070 		     (vport->fc_prevDID != vport->fc_myDID) ||
1071 			phba->fc_topology_changed)) {
1072 			if (vport->fc_flag & FC_VFI_REGISTERED) {
1073 				if (phba->fc_topology_changed) {
1074 					lpfc_unregister_fcf_prep(phba);
1075 					spin_lock_irq(shost->host_lock);
1076 					vport->fc_flag &= ~FC_VFI_REGISTERED;
1077 					spin_unlock_irq(shost->host_lock);
1078 					phba->fc_topology_changed = 0;
1079 				} else {
1080 					lpfc_sli4_unreg_all_rpis(vport);
1081 				}
1082 			}
1083 
1084 			/* Do not register VFI if the driver aborted FLOGI */
1085 			if (!lpfc_error_lost_link(irsp))
1086 				lpfc_issue_reg_vfi(vport);
1087 			lpfc_nlp_put(ndlp);
1088 			goto out;
1089 		}
1090 		goto flogifail;
1091 	}
1092 	spin_lock_irq(shost->host_lock);
1093 	vport->fc_flag &= ~FC_VPORT_CVL_RCVD;
1094 	vport->fc_flag &= ~FC_VPORT_LOGO_RCVD;
1095 	spin_unlock_irq(shost->host_lock);
1096 
1097 	/*
1098 	 * The FLogI succeeded.  Sync the data for the CPU before
1099 	 * accessing it.
1100 	 */
1101 	prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
1102 	if (!prsp)
1103 		goto out;
1104 	sp = prsp->virt + sizeof(uint32_t);
1105 
1106 	/* FLOGI completes successfully */
1107 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1108 			 "0101 FLOGI completes successfully, I/O tag:x%x, "
1109 			 "Data: x%x x%x x%x x%x x%x x%x\n", cmdiocb->iotag,
1110 			 irsp->un.ulpWord[4], sp->cmn.e_d_tov,
1111 			 sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution,
1112 			 vport->port_state, vport->fc_flag);
1113 
1114 	if (vport->port_state == LPFC_FLOGI) {
1115 		/*
1116 		 * If Common Service Parameters indicate Nport
1117 		 * we are point to point, if Fport we are Fabric.
1118 		 */
1119 		if (sp->cmn.fPort)
1120 			rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp, irsp);
1121 		else if (!(phba->hba_flag & HBA_FCOE_MODE))
1122 			rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp);
1123 		else {
1124 			lpfc_printf_vlog(vport, KERN_ERR,
1125 				LOG_FIP | LOG_ELS,
1126 				"2831 FLOGI response with cleared Fabric "
1127 				"bit fcf_index 0x%x "
1128 				"Switch Name %02x%02x%02x%02x%02x%02x%02x%02x "
1129 				"Fabric Name "
1130 				"%02x%02x%02x%02x%02x%02x%02x%02x\n",
1131 				phba->fcf.current_rec.fcf_indx,
1132 				phba->fcf.current_rec.switch_name[0],
1133 				phba->fcf.current_rec.switch_name[1],
1134 				phba->fcf.current_rec.switch_name[2],
1135 				phba->fcf.current_rec.switch_name[3],
1136 				phba->fcf.current_rec.switch_name[4],
1137 				phba->fcf.current_rec.switch_name[5],
1138 				phba->fcf.current_rec.switch_name[6],
1139 				phba->fcf.current_rec.switch_name[7],
1140 				phba->fcf.current_rec.fabric_name[0],
1141 				phba->fcf.current_rec.fabric_name[1],
1142 				phba->fcf.current_rec.fabric_name[2],
1143 				phba->fcf.current_rec.fabric_name[3],
1144 				phba->fcf.current_rec.fabric_name[4],
1145 				phba->fcf.current_rec.fabric_name[5],
1146 				phba->fcf.current_rec.fabric_name[6],
1147 				phba->fcf.current_rec.fabric_name[7]);
1148 			lpfc_nlp_put(ndlp);
1149 			spin_lock_irq(&phba->hbalock);
1150 			phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
1151 			phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO);
1152 			spin_unlock_irq(&phba->hbalock);
1153 			goto out;
1154 		}
1155 		if (!rc) {
1156 			/* Mark the FCF discovery process done */
1157 			if (phba->hba_flag & HBA_FIP_SUPPORT)
1158 				lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP |
1159 						LOG_ELS,
1160 						"2769 FLOGI to FCF (x%x) "
1161 						"completed successfully\n",
1162 						phba->fcf.current_rec.fcf_indx);
1163 			spin_lock_irq(&phba->hbalock);
1164 			phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
1165 			phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO);
1166 			spin_unlock_irq(&phba->hbalock);
1167 			goto out;
1168 		}
1169 	}
1170 
1171 flogifail:
1172 	spin_lock_irq(&phba->hbalock);
1173 	phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
1174 	spin_unlock_irq(&phba->hbalock);
1175 
1176 	lpfc_nlp_put(ndlp);
1177 
1178 	if (!lpfc_error_lost_link(irsp)) {
1179 		/* FLOGI failed, so just use loop map to make discovery list */
1180 		lpfc_disc_list_loopmap(vport);
1181 
1182 		/* Start discovery */
1183 		lpfc_disc_start(vport);
1184 	} else if (((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
1185 			(((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
1186 			 IOERR_SLI_ABORTED) &&
1187 			((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
1188 			 IOERR_SLI_DOWN))) &&
1189 			(phba->link_state != LPFC_CLEAR_LA)) {
1190 		/* If FLOGI failed enable link interrupt. */
1191 		lpfc_issue_clear_la(phba, vport);
1192 	}
1193 out:
1194 	lpfc_els_free_iocb(phba, cmdiocb);
1195 }
1196 
1197 /**
1198  * lpfc_issue_els_flogi - Issue an flogi iocb command for a vport
1199  * @vport: pointer to a host virtual N_Port data structure.
1200  * @ndlp: pointer to a node-list data structure.
1201  * @retry: number of retries to the command IOCB.
1202  *
1203  * This routine issues a Fabric Login (FLOGI) Request ELS command
1204  * for a @vport. The initiator service parameters are put into the payload
1205  * of the FLOGI Request IOCB and the top-level callback function pointer
1206  * to lpfc_cmpl_els_flogi() routine is put to the IOCB completion callback
1207  * function field. The lpfc_issue_fabric_iocb routine is invoked to send
1208  * out FLOGI ELS command with one outstanding fabric IOCB at a time.
1209  *
1210  * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
1211  * will be incremented by 1 for holding the ndlp and the reference to ndlp
1212  * will be stored into the context1 field of the IOCB for the completion
1213  * callback function to the FLOGI ELS command.
1214  *
1215  * Return code
1216  *   0 - successfully issued flogi iocb for @vport
1217  *   1 - failed to issue flogi iocb for @vport
1218  **/
1219 static int
1220 lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1221 		     uint8_t retry)
1222 {
1223 	struct lpfc_hba  *phba = vport->phba;
1224 	struct serv_parm *sp;
1225 	IOCB_t *icmd;
1226 	struct lpfc_iocbq *elsiocb;
1227 	uint8_t *pcmd;
1228 	uint16_t cmdsize;
1229 	uint32_t tmo;
1230 	int rc;
1231 
1232 	cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
1233 	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
1234 				     ndlp->nlp_DID, ELS_CMD_FLOGI);
1235 
1236 	if (!elsiocb)
1237 		return 1;
1238 
1239 	icmd = &elsiocb->iocb;
1240 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1241 
1242 	/* For FLOGI request, remainder of payload is service parameters */
1243 	*((uint32_t *) (pcmd)) = ELS_CMD_FLOGI;
1244 	pcmd += sizeof(uint32_t);
1245 	memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
1246 	sp = (struct serv_parm *) pcmd;
1247 
1248 	/* Setup CSPs accordingly for Fabric */
1249 	sp->cmn.e_d_tov = 0;
1250 	sp->cmn.w2.r_a_tov = 0;
1251 	sp->cmn.virtual_fabric_support = 0;
1252 	sp->cls1.classValid = 0;
1253 	if (sp->cmn.fcphLow < FC_PH3)
1254 		sp->cmn.fcphLow = FC_PH3;
1255 	if (sp->cmn.fcphHigh < FC_PH3)
1256 		sp->cmn.fcphHigh = FC_PH3;
1257 
1258 	if  (phba->sli_rev == LPFC_SLI_REV4) {
1259 		if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
1260 		    LPFC_SLI_INTF_IF_TYPE_0) {
1261 			elsiocb->iocb.ulpCt_h = ((SLI4_CT_FCFI >> 1) & 1);
1262 			elsiocb->iocb.ulpCt_l = (SLI4_CT_FCFI & 1);
1263 			/* FLOGI needs to be 3 for WQE FCFI */
1264 			/* Set the fcfi to the fcfi we registered with */
1265 			elsiocb->iocb.ulpContext = phba->fcf.fcfi;
1266 		}
1267 		/* Can't do SLI4 class2 without support sequence coalescing */
1268 		sp->cls2.classValid = 0;
1269 		sp->cls2.seqDelivery = 0;
1270 	} else {
1271 		/* Historical, setting sequential-delivery bit for SLI3 */
1272 		sp->cls2.seqDelivery = (sp->cls2.classValid) ? 1 : 0;
1273 		sp->cls3.seqDelivery = (sp->cls3.classValid) ? 1 : 0;
1274 		if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
1275 			sp->cmn.request_multiple_Nport = 1;
1276 			/* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */
1277 			icmd->ulpCt_h = 1;
1278 			icmd->ulpCt_l = 0;
1279 		} else
1280 			sp->cmn.request_multiple_Nport = 0;
1281 	}
1282 
1283 	if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) {
1284 		icmd->un.elsreq64.myID = 0;
1285 		icmd->un.elsreq64.fl = 1;
1286 	}
1287 
1288 	tmo = phba->fc_ratov;
1289 	phba->fc_ratov = LPFC_DISC_FLOGI_TMO;
1290 	lpfc_set_disctmo(vport);
1291 	phba->fc_ratov = tmo;
1292 
1293 	phba->fc_stat.elsXmitFLOGI++;
1294 	elsiocb->iocb_cmpl = lpfc_cmpl_els_flogi;
1295 
1296 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1297 		"Issue FLOGI:     opt:x%x",
1298 		phba->sli3_options, 0, 0);
1299 
1300 	rc = lpfc_issue_fabric_iocb(phba, elsiocb);
1301 	if (rc == IOCB_ERROR) {
1302 		lpfc_els_free_iocb(phba, elsiocb);
1303 		return 1;
1304 	}
1305 	return 0;
1306 }
1307 
1308 /**
1309  * lpfc_els_abort_flogi - Abort all outstanding flogi iocbs
1310  * @phba: pointer to lpfc hba data structure.
1311  *
1312  * This routine aborts all the outstanding Fabric Login (FLOGI) IOCBs
1313  * with a @phba. This routine walks all the outstanding IOCBs on the txcmplq
1314  * list and issues an abort IOCB commond on each outstanding IOCB that
1315  * contains a active Fabric_DID ndlp. Note that this function is to issue
1316  * the abort IOCB command on all the outstanding IOCBs, thus when this
1317  * function returns, it does not guarantee all the IOCBs are actually aborted.
1318  *
1319  * Return code
1320  *   0 - Successfully issued abort iocb on all outstanding flogis (Always 0)
1321  **/
1322 int
1323 lpfc_els_abort_flogi(struct lpfc_hba *phba)
1324 {
1325 	struct lpfc_sli_ring *pring;
1326 	struct lpfc_iocbq *iocb, *next_iocb;
1327 	struct lpfc_nodelist *ndlp;
1328 	IOCB_t *icmd;
1329 
1330 	/* Abort outstanding I/O on NPort <nlp_DID> */
1331 	lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
1332 			"0201 Abort outstanding I/O on NPort x%x\n",
1333 			Fabric_DID);
1334 
1335 	pring = lpfc_phba_elsring(phba);
1336 
1337 	/*
1338 	 * Check the txcmplq for an iocb that matches the nport the driver is
1339 	 * searching for.
1340 	 */
1341 	spin_lock_irq(&phba->hbalock);
1342 	list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
1343 		icmd = &iocb->iocb;
1344 		if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR) {
1345 			ndlp = (struct lpfc_nodelist *)(iocb->context1);
1346 			if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
1347 			    (ndlp->nlp_DID == Fabric_DID))
1348 				lpfc_sli_issue_abort_iotag(phba, pring, iocb);
1349 		}
1350 	}
1351 	spin_unlock_irq(&phba->hbalock);
1352 
1353 	return 0;
1354 }
1355 
1356 /**
1357  * lpfc_initial_flogi - Issue an initial fabric login for a vport
1358  * @vport: pointer to a host virtual N_Port data structure.
1359  *
1360  * This routine issues an initial Fabric Login (FLOGI) for the @vport
1361  * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from
1362  * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and
1363  * put it into the @vport's ndlp list. If an inactive ndlp found on the list,
1364  * it will just be enabled and made active. The lpfc_issue_els_flogi() routine
1365  * is then invoked with the @vport and the ndlp to perform the FLOGI for the
1366  * @vport.
1367  *
1368  * Return code
1369  *   0 - failed to issue initial flogi for @vport
1370  *   1 - successfully issued initial flogi for @vport
1371  **/
1372 int
1373 lpfc_initial_flogi(struct lpfc_vport *vport)
1374 {
1375 	struct lpfc_nodelist *ndlp;
1376 
1377 	vport->port_state = LPFC_FLOGI;
1378 	lpfc_set_disctmo(vport);
1379 
1380 	/* First look for the Fabric ndlp */
1381 	ndlp = lpfc_findnode_did(vport, Fabric_DID);
1382 	if (!ndlp) {
1383 		/* Cannot find existing Fabric ndlp, so allocate a new one */
1384 		ndlp = lpfc_nlp_init(vport, Fabric_DID);
1385 		if (!ndlp)
1386 			return 0;
1387 		/* Set the node type */
1388 		ndlp->nlp_type |= NLP_FABRIC;
1389 		/* Put ndlp onto node list */
1390 		lpfc_enqueue_node(vport, ndlp);
1391 	} else if (!NLP_CHK_NODE_ACT(ndlp)) {
1392 		/* re-setup ndlp without removing from node list */
1393 		ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
1394 		if (!ndlp)
1395 			return 0;
1396 	}
1397 
1398 	if (lpfc_issue_els_flogi(vport, ndlp, 0)) {
1399 		/* This decrement of reference count to node shall kick off
1400 		 * the release of the node.
1401 		 */
1402 		lpfc_nlp_put(ndlp);
1403 		return 0;
1404 	}
1405 	return 1;
1406 }
1407 
1408 /**
1409  * lpfc_initial_fdisc - Issue an initial fabric discovery for a vport
1410  * @vport: pointer to a host virtual N_Port data structure.
1411  *
1412  * This routine issues an initial Fabric Discover (FDISC) for the @vport
1413  * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from
1414  * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and
1415  * put it into the @vport's ndlp list. If an inactive ndlp found on the list,
1416  * it will just be enabled and made active. The lpfc_issue_els_fdisc() routine
1417  * is then invoked with the @vport and the ndlp to perform the FDISC for the
1418  * @vport.
1419  *
1420  * Return code
1421  *   0 - failed to issue initial fdisc for @vport
1422  *   1 - successfully issued initial fdisc for @vport
1423  **/
1424 int
1425 lpfc_initial_fdisc(struct lpfc_vport *vport)
1426 {
1427 	struct lpfc_nodelist *ndlp;
1428 
1429 	/* First look for the Fabric ndlp */
1430 	ndlp = lpfc_findnode_did(vport, Fabric_DID);
1431 	if (!ndlp) {
1432 		/* Cannot find existing Fabric ndlp, so allocate a new one */
1433 		ndlp = lpfc_nlp_init(vport, Fabric_DID);
1434 		if (!ndlp)
1435 			return 0;
1436 		/* Put ndlp onto node list */
1437 		lpfc_enqueue_node(vport, ndlp);
1438 	} else if (!NLP_CHK_NODE_ACT(ndlp)) {
1439 		/* re-setup ndlp without removing from node list */
1440 		ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
1441 		if (!ndlp)
1442 			return 0;
1443 	}
1444 
1445 	if (lpfc_issue_els_fdisc(vport, ndlp, 0)) {
1446 		/* decrement node reference count to trigger the release of
1447 		 * the node.
1448 		 */
1449 		lpfc_nlp_put(ndlp);
1450 		return 0;
1451 	}
1452 	return 1;
1453 }
1454 
1455 /**
1456  * lpfc_more_plogi - Check and issue remaining plogis for a vport
1457  * @vport: pointer to a host virtual N_Port data structure.
1458  *
1459  * This routine checks whether there are more remaining Port Logins
1460  * (PLOGI) to be issued for the @vport. If so, it will invoke the routine
1461  * lpfc_els_disc_plogi() to go through the Node Port Recovery (NPR) nodes
1462  * to issue ELS PLOGIs up to the configured discover threads with the
1463  * @vport (@vport->cfg_discovery_threads). The function also decrement
1464  * the @vport's num_disc_node by 1 if it is not already 0.
1465  **/
1466 void
1467 lpfc_more_plogi(struct lpfc_vport *vport)
1468 {
1469 	if (vport->num_disc_nodes)
1470 		vport->num_disc_nodes--;
1471 
1472 	/* Continue discovery with <num_disc_nodes> PLOGIs to go */
1473 	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1474 			 "0232 Continue discovery with %d PLOGIs to go "
1475 			 "Data: x%x x%x x%x\n",
1476 			 vport->num_disc_nodes, vport->fc_plogi_cnt,
1477 			 vport->fc_flag, vport->port_state);
1478 	/* Check to see if there are more PLOGIs to be sent */
1479 	if (vport->fc_flag & FC_NLP_MORE)
1480 		/* go thru NPR nodes and issue any remaining ELS PLOGIs */
1481 		lpfc_els_disc_plogi(vport);
1482 
1483 	return;
1484 }
1485 
1486 /**
1487  * lpfc_plogi_confirm_nport - Confirm pologi wwpn matches stored ndlp
1488  * @phba: pointer to lpfc hba data structure.
1489  * @prsp: pointer to response IOCB payload.
1490  * @ndlp: pointer to a node-list data structure.
1491  *
1492  * This routine checks and indicates whether the WWPN of an N_Port, retrieved
1493  * from a PLOGI, matches the WWPN that is stored in the @ndlp for that N_POrt.
1494  * The following cases are considered N_Port confirmed:
1495  * 1) The N_Port is a Fabric ndlp; 2) The @ndlp is on vport list and matches
1496  * the WWPN of the N_Port logged into; 3) The @ndlp is not on vport list but
1497  * it does not have WWPN assigned either. If the WWPN is confirmed, the
1498  * pointer to the @ndlp will be returned. If the WWPN is not confirmed:
1499  * 1) if there is a node on vport list other than the @ndlp with the same
1500  * WWPN of the N_Port PLOGI logged into, the lpfc_unreg_rpi() will be invoked
1501  * on that node to release the RPI associated with the node; 2) if there is
1502  * no node found on vport list with the same WWPN of the N_Port PLOGI logged
1503  * into, a new node shall be allocated (or activated). In either case, the
1504  * parameters of the @ndlp shall be copied to the new_ndlp, the @ndlp shall
1505  * be released and the new_ndlp shall be put on to the vport node list and
1506  * its pointer returned as the confirmed node.
1507  *
1508  * Note that before the @ndlp got "released", the keepDID from not-matching
1509  * or inactive "new_ndlp" on the vport node list is assigned to the nlp_DID
1510  * of the @ndlp. This is because the release of @ndlp is actually to put it
1511  * into an inactive state on the vport node list and the vport node list
1512  * management algorithm does not allow two node with a same DID.
1513  *
1514  * Return code
1515  *   pointer to the PLOGI N_Port @ndlp
1516  **/
1517 static struct lpfc_nodelist *
1518 lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
1519 			 struct lpfc_nodelist *ndlp)
1520 {
1521 	struct lpfc_vport *vport = ndlp->vport;
1522 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1523 	struct lpfc_nodelist *new_ndlp;
1524 	struct lpfc_rport_data *rdata;
1525 	struct fc_rport *rport;
1526 	struct serv_parm *sp;
1527 	uint8_t  name[sizeof(struct lpfc_name)];
1528 	uint32_t rc, keepDID = 0, keep_nlp_flag = 0;
1529 	uint16_t keep_nlp_state;
1530 	int  put_node;
1531 	int  put_rport;
1532 	unsigned long *active_rrqs_xri_bitmap = NULL;
1533 
1534 	/* Fabric nodes can have the same WWPN so we don't bother searching
1535 	 * by WWPN.  Just return the ndlp that was given to us.
1536 	 */
1537 	if (ndlp->nlp_type & NLP_FABRIC)
1538 		return ndlp;
1539 
1540 	sp = (struct serv_parm *) ((uint8_t *) prsp + sizeof(uint32_t));
1541 	memset(name, 0, sizeof(struct lpfc_name));
1542 
1543 	/* Now we find out if the NPort we are logging into, matches the WWPN
1544 	 * we have for that ndlp. If not, we have some work to do.
1545 	 */
1546 	new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName);
1547 
1548 	if (new_ndlp == ndlp && NLP_CHK_NODE_ACT(new_ndlp))
1549 		return ndlp;
1550 	if (phba->sli_rev == LPFC_SLI_REV4) {
1551 		active_rrqs_xri_bitmap = mempool_alloc(phba->active_rrq_pool,
1552 						       GFP_KERNEL);
1553 		if (active_rrqs_xri_bitmap)
1554 			memset(active_rrqs_xri_bitmap, 0,
1555 			       phba->cfg_rrq_xri_bitmap_sz);
1556 	}
1557 
1558 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1559 		 "3178 PLOGI confirm: ndlp %p x%x: new_ndlp %p\n",
1560 		 ndlp, ndlp->nlp_DID, new_ndlp);
1561 
1562 	if (!new_ndlp) {
1563 		rc = memcmp(&ndlp->nlp_portname, name,
1564 			    sizeof(struct lpfc_name));
1565 		if (!rc) {
1566 			if (active_rrqs_xri_bitmap)
1567 				mempool_free(active_rrqs_xri_bitmap,
1568 					     phba->active_rrq_pool);
1569 			return ndlp;
1570 		}
1571 		new_ndlp = lpfc_nlp_init(vport, ndlp->nlp_DID);
1572 		if (!new_ndlp) {
1573 			if (active_rrqs_xri_bitmap)
1574 				mempool_free(active_rrqs_xri_bitmap,
1575 					     phba->active_rrq_pool);
1576 			return ndlp;
1577 		}
1578 	} else if (!NLP_CHK_NODE_ACT(new_ndlp)) {
1579 		rc = memcmp(&ndlp->nlp_portname, name,
1580 			    sizeof(struct lpfc_name));
1581 		if (!rc) {
1582 			if (active_rrqs_xri_bitmap)
1583 				mempool_free(active_rrqs_xri_bitmap,
1584 					     phba->active_rrq_pool);
1585 			return ndlp;
1586 		}
1587 		new_ndlp = lpfc_enable_node(vport, new_ndlp,
1588 						NLP_STE_UNUSED_NODE);
1589 		if (!new_ndlp) {
1590 			if (active_rrqs_xri_bitmap)
1591 				mempool_free(active_rrqs_xri_bitmap,
1592 					     phba->active_rrq_pool);
1593 			return ndlp;
1594 		}
1595 		keepDID = new_ndlp->nlp_DID;
1596 		if ((phba->sli_rev == LPFC_SLI_REV4) && active_rrqs_xri_bitmap)
1597 			memcpy(active_rrqs_xri_bitmap,
1598 			       new_ndlp->active_rrqs_xri_bitmap,
1599 			       phba->cfg_rrq_xri_bitmap_sz);
1600 	} else {
1601 		keepDID = new_ndlp->nlp_DID;
1602 		if (phba->sli_rev == LPFC_SLI_REV4 &&
1603 		    active_rrqs_xri_bitmap)
1604 			memcpy(active_rrqs_xri_bitmap,
1605 			       new_ndlp->active_rrqs_xri_bitmap,
1606 			       phba->cfg_rrq_xri_bitmap_sz);
1607 	}
1608 
1609 	lpfc_unreg_rpi(vport, new_ndlp);
1610 	new_ndlp->nlp_DID = ndlp->nlp_DID;
1611 	new_ndlp->nlp_prev_state = ndlp->nlp_prev_state;
1612 	if (phba->sli_rev == LPFC_SLI_REV4)
1613 		memcpy(new_ndlp->active_rrqs_xri_bitmap,
1614 		       ndlp->active_rrqs_xri_bitmap,
1615 		       phba->cfg_rrq_xri_bitmap_sz);
1616 
1617 	spin_lock_irq(shost->host_lock);
1618 	keep_nlp_flag = new_ndlp->nlp_flag;
1619 	new_ndlp->nlp_flag = ndlp->nlp_flag;
1620 	ndlp->nlp_flag = keep_nlp_flag;
1621 	spin_unlock_irq(shost->host_lock);
1622 
1623 	/* Set nlp_states accordingly */
1624 	keep_nlp_state = new_ndlp->nlp_state;
1625 	lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state);
1626 
1627 	/* Move this back to NPR state */
1628 	if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) {
1629 		/* The new_ndlp is replacing ndlp totally, so we need
1630 		 * to put ndlp on UNUSED list and try to free it.
1631 		 */
1632 		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1633 			 "3179 PLOGI confirm NEW: %x %x\n",
1634 			 new_ndlp->nlp_DID, keepDID);
1635 
1636 		/* Fix up the rport accordingly */
1637 		rport =  ndlp->rport;
1638 		if (rport) {
1639 			rdata = rport->dd_data;
1640 			if (rdata->pnode == ndlp) {
1641 				/* break the link before dropping the ref */
1642 				ndlp->rport = NULL;
1643 				lpfc_nlp_put(ndlp);
1644 				rdata->pnode = lpfc_nlp_get(new_ndlp);
1645 				new_ndlp->rport = rport;
1646 			}
1647 			new_ndlp->nlp_type = ndlp->nlp_type;
1648 		}
1649 		/* We shall actually free the ndlp with both nlp_DID and
1650 		 * nlp_portname fields equals 0 to avoid any ndlp on the
1651 		 * nodelist never to be used.
1652 		 */
1653 		if (ndlp->nlp_DID == 0) {
1654 			spin_lock_irq(&phba->ndlp_lock);
1655 			NLP_SET_FREE_REQ(ndlp);
1656 			spin_unlock_irq(&phba->ndlp_lock);
1657 		}
1658 
1659 		/* Two ndlps cannot have the same did on the nodelist */
1660 		ndlp->nlp_DID = keepDID;
1661 		if (phba->sli_rev == LPFC_SLI_REV4 &&
1662 		    active_rrqs_xri_bitmap)
1663 			memcpy(ndlp->active_rrqs_xri_bitmap,
1664 			       active_rrqs_xri_bitmap,
1665 			       phba->cfg_rrq_xri_bitmap_sz);
1666 
1667 		if (!NLP_CHK_NODE_ACT(ndlp))
1668 			lpfc_drop_node(vport, ndlp);
1669 	}
1670 	else {
1671 		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1672 			 "3180 PLOGI confirm SWAP: %x %x\n",
1673 			 new_ndlp->nlp_DID, keepDID);
1674 
1675 		lpfc_unreg_rpi(vport, ndlp);
1676 
1677 		/* Two ndlps cannot have the same did */
1678 		ndlp->nlp_DID = keepDID;
1679 		if (phba->sli_rev == LPFC_SLI_REV4 &&
1680 		    active_rrqs_xri_bitmap)
1681 			memcpy(ndlp->active_rrqs_xri_bitmap,
1682 			       active_rrqs_xri_bitmap,
1683 			       phba->cfg_rrq_xri_bitmap_sz);
1684 
1685 		/* Since we are switching over to the new_ndlp,
1686 		 * reset the old ndlp state
1687 		 */
1688 		if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) ||
1689 		    (ndlp->nlp_state == NLP_STE_MAPPED_NODE))
1690 			keep_nlp_state = NLP_STE_NPR_NODE;
1691 		lpfc_nlp_set_state(vport, ndlp, keep_nlp_state);
1692 
1693 		/* Fix up the rport accordingly */
1694 		rport = ndlp->rport;
1695 		if (rport) {
1696 			rdata = rport->dd_data;
1697 			put_node = rdata->pnode != NULL;
1698 			put_rport = ndlp->rport != NULL;
1699 			rdata->pnode = NULL;
1700 			ndlp->rport = NULL;
1701 			if (put_node)
1702 				lpfc_nlp_put(ndlp);
1703 			if (put_rport)
1704 				put_device(&rport->dev);
1705 		}
1706 	}
1707 	if (phba->sli_rev == LPFC_SLI_REV4 &&
1708 	    active_rrqs_xri_bitmap)
1709 		mempool_free(active_rrqs_xri_bitmap,
1710 			     phba->active_rrq_pool);
1711 	return new_ndlp;
1712 }
1713 
1714 /**
1715  * lpfc_end_rscn - Check and handle more rscn for a vport
1716  * @vport: pointer to a host virtual N_Port data structure.
1717  *
1718  * This routine checks whether more Registration State Change
1719  * Notifications (RSCNs) came in while the discovery state machine was in
1720  * the FC_RSCN_MODE. If so, the lpfc_els_handle_rscn() routine will be
1721  * invoked to handle the additional RSCNs for the @vport. Otherwise, the
1722  * FC_RSCN_MODE bit will be cleared with the @vport to mark as the end of
1723  * handling the RSCNs.
1724  **/
1725 void
1726 lpfc_end_rscn(struct lpfc_vport *vport)
1727 {
1728 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1729 
1730 	if (vport->fc_flag & FC_RSCN_MODE) {
1731 		/*
1732 		 * Check to see if more RSCNs came in while we were
1733 		 * processing this one.
1734 		 */
1735 		if (vport->fc_rscn_id_cnt ||
1736 		    (vport->fc_flag & FC_RSCN_DISCOVERY) != 0)
1737 			lpfc_els_handle_rscn(vport);
1738 		else {
1739 			spin_lock_irq(shost->host_lock);
1740 			vport->fc_flag &= ~FC_RSCN_MODE;
1741 			spin_unlock_irq(shost->host_lock);
1742 		}
1743 	}
1744 }
1745 
1746 /**
1747  * lpfc_cmpl_els_rrq - Completion handled for els RRQs.
1748  * @phba: pointer to lpfc hba data structure.
1749  * @cmdiocb: pointer to lpfc command iocb data structure.
1750  * @rspiocb: pointer to lpfc response iocb data structure.
1751  *
1752  * This routine will call the clear rrq function to free the rrq and
1753  * clear the xri's bit in the ndlp's xri_bitmap. If the ndlp does not
1754  * exist then the clear_rrq is still called because the rrq needs to
1755  * be freed.
1756  **/
1757 
1758 static void
1759 lpfc_cmpl_els_rrq(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1760 		    struct lpfc_iocbq *rspiocb)
1761 {
1762 	struct lpfc_vport *vport = cmdiocb->vport;
1763 	IOCB_t *irsp;
1764 	struct lpfc_nodelist *ndlp;
1765 	struct lpfc_node_rrq *rrq;
1766 
1767 	/* we pass cmdiocb to state machine which needs rspiocb as well */
1768 	rrq = cmdiocb->context_un.rrq;
1769 	cmdiocb->context_un.rsp_iocb = rspiocb;
1770 
1771 	irsp = &rspiocb->iocb;
1772 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1773 		"RRQ cmpl:      status:x%x/x%x did:x%x",
1774 		irsp->ulpStatus, irsp->un.ulpWord[4],
1775 		irsp->un.elsreq64.remoteID);
1776 
1777 	ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID);
1778 	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || ndlp != rrq->ndlp) {
1779 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1780 				 "2882 RRQ completes to NPort x%x "
1781 				 "with no ndlp. Data: x%x x%x x%x\n",
1782 				 irsp->un.elsreq64.remoteID,
1783 				 irsp->ulpStatus, irsp->un.ulpWord[4],
1784 				 irsp->ulpIoTag);
1785 		goto out;
1786 	}
1787 
1788 	/* rrq completes to NPort <nlp_DID> */
1789 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1790 			 "2880 RRQ completes to NPort x%x "
1791 			 "Data: x%x x%x x%x x%x x%x\n",
1792 			 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
1793 			 irsp->ulpTimeout, rrq->xritag, rrq->rxid);
1794 
1795 	if (irsp->ulpStatus) {
1796 		/* Check for retry */
1797 		/* RRQ failed Don't print the vport to vport rjts */
1798 		if (irsp->ulpStatus != IOSTAT_LS_RJT ||
1799 			(((irsp->un.ulpWord[4]) >> 16 != LSRJT_INVALID_CMD) &&
1800 			((irsp->un.ulpWord[4]) >> 16 != LSRJT_UNABLE_TPC)) ||
1801 			(phba)->pport->cfg_log_verbose & LOG_ELS)
1802 			lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1803 				 "2881 RRQ failure DID:%06X Status:x%x/x%x\n",
1804 				 ndlp->nlp_DID, irsp->ulpStatus,
1805 				 irsp->un.ulpWord[4]);
1806 	}
1807 out:
1808 	if (rrq)
1809 		lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
1810 	lpfc_els_free_iocb(phba, cmdiocb);
1811 	return;
1812 }
1813 /**
1814  * lpfc_cmpl_els_plogi - Completion callback function for plogi
1815  * @phba: pointer to lpfc hba data structure.
1816  * @cmdiocb: pointer to lpfc command iocb data structure.
1817  * @rspiocb: pointer to lpfc response iocb data structure.
1818  *
1819  * This routine is the completion callback function for issuing the Port
1820  * Login (PLOGI) command. For PLOGI completion, there must be an active
1821  * ndlp on the vport node list that matches the remote node ID from the
1822  * PLOGI response IOCB. If such ndlp does not exist, the PLOGI is simply
1823  * ignored and command IOCB released. The PLOGI response IOCB status is
1824  * checked for error conditons. If there is error status reported, PLOGI
1825  * retry shall be attempted by invoking the lpfc_els_retry() routine.
1826  * Otherwise, the lpfc_plogi_confirm_nport() routine shall be invoked on
1827  * the ndlp and the NLP_EVT_CMPL_PLOGI state to the Discover State Machine
1828  * (DSM) is set for this PLOGI completion. Finally, it checks whether
1829  * there are additional N_Port nodes with the vport that need to perform
1830  * PLOGI. If so, the lpfc_more_plogi() routine is invoked to issue addition
1831  * PLOGIs.
1832  **/
1833 static void
1834 lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1835 		    struct lpfc_iocbq *rspiocb)
1836 {
1837 	struct lpfc_vport *vport = cmdiocb->vport;
1838 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
1839 	IOCB_t *irsp;
1840 	struct lpfc_nodelist *ndlp;
1841 	struct lpfc_dmabuf *prsp;
1842 	int disc, rc;
1843 
1844 	/* we pass cmdiocb to state machine which needs rspiocb as well */
1845 	cmdiocb->context_un.rsp_iocb = rspiocb;
1846 
1847 	irsp = &rspiocb->iocb;
1848 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1849 		"PLOGI cmpl:      status:x%x/x%x did:x%x",
1850 		irsp->ulpStatus, irsp->un.ulpWord[4],
1851 		irsp->un.elsreq64.remoteID);
1852 
1853 	ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID);
1854 	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
1855 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1856 				 "0136 PLOGI completes to NPort x%x "
1857 				 "with no ndlp. Data: x%x x%x x%x\n",
1858 				 irsp->un.elsreq64.remoteID,
1859 				 irsp->ulpStatus, irsp->un.ulpWord[4],
1860 				 irsp->ulpIoTag);
1861 		goto out;
1862 	}
1863 
1864 	/* Since ndlp can be freed in the disc state machine, note if this node
1865 	 * is being used during discovery.
1866 	 */
1867 	spin_lock_irq(shost->host_lock);
1868 	disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
1869 	ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
1870 	spin_unlock_irq(shost->host_lock);
1871 	rc   = 0;
1872 
1873 	/* PLOGI completes to NPort <nlp_DID> */
1874 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1875 			 "0102 PLOGI completes to NPort x%06x "
1876 			 "Data: x%x x%x x%x x%x x%x\n",
1877 			 ndlp->nlp_DID, ndlp->nlp_fc4_type,
1878 			 irsp->ulpStatus, irsp->un.ulpWord[4],
1879 			 disc, vport->num_disc_nodes);
1880 
1881 	/* Check to see if link went down during discovery */
1882 	if (lpfc_els_chk_latt(vport)) {
1883 		spin_lock_irq(shost->host_lock);
1884 		ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1885 		spin_unlock_irq(shost->host_lock);
1886 		goto out;
1887 	}
1888 
1889 	if (irsp->ulpStatus) {
1890 		/* Check for retry */
1891 		if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
1892 			/* ELS command is being retried */
1893 			if (disc) {
1894 				spin_lock_irq(shost->host_lock);
1895 				ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1896 				spin_unlock_irq(shost->host_lock);
1897 			}
1898 			goto out;
1899 		}
1900 		/* PLOGI failed Don't print the vport to vport rjts */
1901 		if (irsp->ulpStatus != IOSTAT_LS_RJT ||
1902 			(((irsp->un.ulpWord[4]) >> 16 != LSRJT_INVALID_CMD) &&
1903 			((irsp->un.ulpWord[4]) >> 16 != LSRJT_UNABLE_TPC)) ||
1904 			(phba)->pport->cfg_log_verbose & LOG_ELS)
1905 			lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1906 				 "2753 PLOGI failure DID:%06X Status:x%x/x%x\n",
1907 				 ndlp->nlp_DID, irsp->ulpStatus,
1908 				 irsp->un.ulpWord[4]);
1909 		/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
1910 		if (lpfc_error_lost_link(irsp))
1911 			rc = NLP_STE_FREED_NODE;
1912 		else
1913 			rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb,
1914 						     NLP_EVT_CMPL_PLOGI);
1915 	} else {
1916 		/* Good status, call state machine */
1917 		prsp = list_entry(((struct lpfc_dmabuf *)
1918 				   cmdiocb->context2)->list.next,
1919 				  struct lpfc_dmabuf, list);
1920 		ndlp = lpfc_plogi_confirm_nport(phba, prsp->virt, ndlp);
1921 		rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb,
1922 					     NLP_EVT_CMPL_PLOGI);
1923 	}
1924 
1925 	if (disc && vport->num_disc_nodes) {
1926 		/* Check to see if there are more PLOGIs to be sent */
1927 		lpfc_more_plogi(vport);
1928 
1929 		if (vport->num_disc_nodes == 0) {
1930 			spin_lock_irq(shost->host_lock);
1931 			vport->fc_flag &= ~FC_NDISC_ACTIVE;
1932 			spin_unlock_irq(shost->host_lock);
1933 
1934 			lpfc_can_disctmo(vport);
1935 			lpfc_end_rscn(vport);
1936 		}
1937 	}
1938 
1939 out:
1940 	lpfc_els_free_iocb(phba, cmdiocb);
1941 	return;
1942 }
1943 
1944 /**
1945  * lpfc_issue_els_plogi - Issue an plogi iocb command for a vport
1946  * @vport: pointer to a host virtual N_Port data structure.
1947  * @did: destination port identifier.
1948  * @retry: number of retries to the command IOCB.
1949  *
1950  * This routine issues a Port Login (PLOGI) command to a remote N_Port
1951  * (with the @did) for a @vport. Before issuing a PLOGI to a remote N_Port,
1952  * the ndlp with the remote N_Port DID must exist on the @vport's ndlp list.
1953  * This routine constructs the proper feilds of the PLOGI IOCB and invokes
1954  * the lpfc_sli_issue_iocb() routine to send out PLOGI ELS command.
1955  *
1956  * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
1957  * will be incremented by 1 for holding the ndlp and the reference to ndlp
1958  * will be stored into the context1 field of the IOCB for the completion
1959  * callback function to the PLOGI ELS command.
1960  *
1961  * Return code
1962  *   0 - Successfully issued a plogi for @vport
1963  *   1 - failed to issue a plogi for @vport
1964  **/
1965 int
1966 lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
1967 {
1968 	struct lpfc_hba  *phba = vport->phba;
1969 	struct serv_parm *sp;
1970 	struct lpfc_nodelist *ndlp;
1971 	struct lpfc_iocbq *elsiocb;
1972 	uint8_t *pcmd;
1973 	uint16_t cmdsize;
1974 	int ret;
1975 
1976 	ndlp = lpfc_findnode_did(vport, did);
1977 	if (ndlp && !NLP_CHK_NODE_ACT(ndlp))
1978 		ndlp = NULL;
1979 
1980 	/* If ndlp is not NULL, we will bump the reference count on it */
1981 	cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
1982 	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did,
1983 				     ELS_CMD_PLOGI);
1984 	if (!elsiocb)
1985 		return 1;
1986 
1987 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1988 
1989 	/* For PLOGI request, remainder of payload is service parameters */
1990 	*((uint32_t *) (pcmd)) = ELS_CMD_PLOGI;
1991 	pcmd += sizeof(uint32_t);
1992 	memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
1993 	sp = (struct serv_parm *) pcmd;
1994 
1995 	/*
1996 	 * If we are a N-port connected to a Fabric, fix-up paramm's so logins
1997 	 * to device on remote loops work.
1998 	 */
1999 	if ((vport->fc_flag & FC_FABRIC) && !(vport->fc_flag & FC_PUBLIC_LOOP))
2000 		sp->cmn.altBbCredit = 1;
2001 
2002 	if (sp->cmn.fcphLow < FC_PH_4_3)
2003 		sp->cmn.fcphLow = FC_PH_4_3;
2004 
2005 	if (sp->cmn.fcphHigh < FC_PH3)
2006 		sp->cmn.fcphHigh = FC_PH3;
2007 
2008 	sp->cmn.valid_vendor_ver_level = 0;
2009 	memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion));
2010 
2011 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2012 		"Issue PLOGI:     did:x%x",
2013 		did, 0, 0);
2014 
2015 	/* If our firmware supports this feature, convey that
2016 	 * information to the target using the vendor specific field.
2017 	 */
2018 	if (phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) {
2019 		sp->cmn.valid_vendor_ver_level = 1;
2020 		sp->un.vv.vid = cpu_to_be32(LPFC_VV_EMLX_ID);
2021 		sp->un.vv.flags = cpu_to_be32(LPFC_VV_SUPPRESS_RSP);
2022 	}
2023 
2024 	phba->fc_stat.elsXmitPLOGI++;
2025 	elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi;
2026 	ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
2027 
2028 	if (ret == IOCB_ERROR) {
2029 		lpfc_els_free_iocb(phba, elsiocb);
2030 		return 1;
2031 	}
2032 	return 0;
2033 }
2034 
2035 /**
2036  * lpfc_cmpl_els_prli - Completion callback function for prli
2037  * @phba: pointer to lpfc hba data structure.
2038  * @cmdiocb: pointer to lpfc command iocb data structure.
2039  * @rspiocb: pointer to lpfc response iocb data structure.
2040  *
2041  * This routine is the completion callback function for a Process Login
2042  * (PRLI) ELS command. The PRLI response IOCB status is checked for error
2043  * status. If there is error status reported, PRLI retry shall be attempted
2044  * by invoking the lpfc_els_retry() routine. Otherwise, the state
2045  * NLP_EVT_CMPL_PRLI is sent to the Discover State Machine (DSM) for this
2046  * ndlp to mark the PRLI completion.
2047  **/
2048 static void
2049 lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2050 		   struct lpfc_iocbq *rspiocb)
2051 {
2052 	struct lpfc_vport *vport = cmdiocb->vport;
2053 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
2054 	IOCB_t *irsp;
2055 	struct lpfc_nodelist *ndlp;
2056 
2057 	/* we pass cmdiocb to state machine which needs rspiocb as well */
2058 	cmdiocb->context_un.rsp_iocb = rspiocb;
2059 
2060 	irsp = &(rspiocb->iocb);
2061 	ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
2062 	spin_lock_irq(shost->host_lock);
2063 	ndlp->nlp_flag &= ~NLP_PRLI_SND;
2064 	spin_unlock_irq(shost->host_lock);
2065 
2066 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2067 		"PRLI cmpl:       status:x%x/x%x did:x%x",
2068 		irsp->ulpStatus, irsp->un.ulpWord[4],
2069 		ndlp->nlp_DID);
2070 
2071 	/* Ddriver supports multiple FC4 types.  Counters matter. */
2072 	vport->fc_prli_sent--;
2073 
2074 	/* PRLI completes to NPort <nlp_DID> */
2075 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2076 			 "0103 PRLI completes to NPort x%06x "
2077 			 "Data: x%x x%x x%x x%x\n",
2078 			 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
2079 			 vport->num_disc_nodes, ndlp->fc4_prli_sent);
2080 
2081 	/* Check to see if link went down during discovery */
2082 	if (lpfc_els_chk_latt(vport))
2083 		goto out;
2084 
2085 	if (irsp->ulpStatus) {
2086 		/* Check for retry */
2087 		ndlp->fc4_prli_sent--;
2088 		if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
2089 			/* ELS command is being retried */
2090 			goto out;
2091 		}
2092 
2093 		/* PRLI failed */
2094 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2095 				 "2754 PRLI failure DID:%06X Status:x%x/x%x, "
2096 				 "data: x%x\n",
2097 				 ndlp->nlp_DID, irsp->ulpStatus,
2098 				 irsp->un.ulpWord[4], ndlp->fc4_prli_sent);
2099 
2100 		/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
2101 		if (lpfc_error_lost_link(irsp))
2102 			goto out;
2103 		else
2104 			lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2105 						NLP_EVT_CMPL_PRLI);
2106 	} else
2107 		/* Good status, call state machine.  However, if another
2108 		 * PRLI is outstanding, don't call the state machine
2109 		 * because final disposition to Mapped or Unmapped is
2110 		 * completed there.
2111 		 */
2112 		lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2113 					NLP_EVT_CMPL_PRLI);
2114 
2115 out:
2116 	lpfc_els_free_iocb(phba, cmdiocb);
2117 	return;
2118 }
2119 
2120 /**
2121  * lpfc_issue_els_prli - Issue a prli iocb command for a vport
2122  * @vport: pointer to a host virtual N_Port data structure.
2123  * @ndlp: pointer to a node-list data structure.
2124  * @retry: number of retries to the command IOCB.
2125  *
2126  * This routine issues a Process Login (PRLI) ELS command for the
2127  * @vport. The PRLI service parameters are set up in the payload of the
2128  * PRLI Request command and the pointer to lpfc_cmpl_els_prli() routine
2129  * is put to the IOCB completion callback func field before invoking the
2130  * routine lpfc_sli_issue_iocb() to send out PRLI command.
2131  *
2132  * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
2133  * will be incremented by 1 for holding the ndlp and the reference to ndlp
2134  * will be stored into the context1 field of the IOCB for the completion
2135  * callback function to the PRLI ELS command.
2136  *
2137  * Return code
2138  *   0 - successfully issued prli iocb command for @vport
2139  *   1 - failed to issue prli iocb command for @vport
2140  **/
2141 int
2142 lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2143 		    uint8_t retry)
2144 {
2145 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2146 	struct lpfc_hba *phba = vport->phba;
2147 	PRLI *npr;
2148 	struct lpfc_nvme_prli *npr_nvme;
2149 	struct lpfc_iocbq *elsiocb;
2150 	uint8_t *pcmd;
2151 	uint16_t cmdsize;
2152 	u32 local_nlp_type, elscmd;
2153 
2154 	local_nlp_type = ndlp->nlp_fc4_type;
2155 
2156  send_next_prli:
2157 	if (local_nlp_type & NLP_FC4_FCP) {
2158 		/* Payload is 4 + 16 = 20 x14 bytes. */
2159 		cmdsize = (sizeof(uint32_t) + sizeof(PRLI));
2160 		elscmd = ELS_CMD_PRLI;
2161 	} else if (local_nlp_type & NLP_FC4_NVME) {
2162 		/* Payload is 4 + 20 = 24 x18 bytes. */
2163 		cmdsize = (sizeof(uint32_t) + sizeof(struct lpfc_nvme_prli));
2164 		elscmd = ELS_CMD_NVMEPRLI;
2165 	} else {
2166 		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2167 				 "3083 Unknown FC_TYPE x%x ndlp x%06x\n",
2168 				 ndlp->nlp_fc4_type, ndlp->nlp_DID);
2169 		return 1;
2170 	}
2171 
2172 	/* SLI3 ports don't support NVME.  If this rport is a strict NVME
2173 	 * FC4 type, implicitly LOGO.
2174 	 */
2175 	if (phba->sli_rev == LPFC_SLI_REV3 &&
2176 	    ndlp->nlp_fc4_type == NLP_FC4_NVME) {
2177 		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2178 				 "3088 Rport fc4 type 0x%x not supported by SLI3 adapter\n",
2179 				 ndlp->nlp_type);
2180 		lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
2181 		return 1;
2182 	}
2183 
2184 	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
2185 				     ndlp->nlp_DID, elscmd);
2186 	if (!elsiocb)
2187 		return 1;
2188 
2189 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2190 
2191 	/* For PRLI request, remainder of payload is service parameters */
2192 	memset(pcmd, 0, cmdsize);
2193 
2194 	if (local_nlp_type & NLP_FC4_FCP) {
2195 		/* Remainder of payload is FCP PRLI parameter page.
2196 		 * Note: this data structure is defined as
2197 		 * BE/LE in the structure definition so no
2198 		 * byte swap call is made.
2199 		 */
2200 		*((uint32_t *)(pcmd)) = ELS_CMD_PRLI;
2201 		pcmd += sizeof(uint32_t);
2202 		npr = (PRLI *)pcmd;
2203 
2204 		/*
2205 		 * If our firmware version is 3.20 or later,
2206 		 * set the following bits for FC-TAPE support.
2207 		 */
2208 		if (phba->vpd.rev.feaLevelHigh >= 0x02) {
2209 			npr->ConfmComplAllowed = 1;
2210 			npr->Retry = 1;
2211 			npr->TaskRetryIdReq = 1;
2212 		}
2213 		npr->estabImagePair = 1;
2214 		npr->readXferRdyDis = 1;
2215 		if (vport->cfg_first_burst_size)
2216 			npr->writeXferRdyDis = 1;
2217 
2218 		/* For FCP support */
2219 		npr->prliType = PRLI_FCP_TYPE;
2220 		npr->initiatorFunc = 1;
2221 		elsiocb->iocb_flag |= LPFC_PRLI_FCP_REQ;
2222 
2223 		/* Remove FCP type - processed. */
2224 		local_nlp_type &= ~NLP_FC4_FCP;
2225 	} else if (local_nlp_type & NLP_FC4_NVME) {
2226 		/* Remainder of payload is NVME PRLI parameter page.
2227 		 * This data structure is the newer definition that
2228 		 * uses bf macros so a byte swap is required.
2229 		 */
2230 		*((uint32_t *)(pcmd)) = ELS_CMD_NVMEPRLI;
2231 		pcmd += sizeof(uint32_t);
2232 		npr_nvme = (struct lpfc_nvme_prli *)pcmd;
2233 		bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE);
2234 		bf_set(prli_estabImagePair, npr_nvme, 0);  /* Should be 0 */
2235 
2236 		/* Only initiators request first burst. */
2237 		if ((phba->cfg_nvme_enable_fb) &&
2238 		    !phba->nvmet_support)
2239 			bf_set(prli_fba, npr_nvme, 1);
2240 
2241 		if (phba->nvmet_support) {
2242 			bf_set(prli_tgt, npr_nvme, 1);
2243 			bf_set(prli_disc, npr_nvme, 1);
2244 
2245 		} else {
2246 			bf_set(prli_init, npr_nvme, 1);
2247 		}
2248 		npr_nvme->word1 = cpu_to_be32(npr_nvme->word1);
2249 		npr_nvme->word4 = cpu_to_be32(npr_nvme->word4);
2250 		elsiocb->iocb_flag |= LPFC_PRLI_NVME_REQ;
2251 
2252 		/* Remove NVME type - processed. */
2253 		local_nlp_type &= ~NLP_FC4_NVME;
2254 	}
2255 
2256 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2257 		"Issue PRLI:      did:x%x",
2258 		ndlp->nlp_DID, 0, 0);
2259 
2260 	phba->fc_stat.elsXmitPRLI++;
2261 	elsiocb->iocb_cmpl = lpfc_cmpl_els_prli;
2262 	spin_lock_irq(shost->host_lock);
2263 	ndlp->nlp_flag |= NLP_PRLI_SND;
2264 	spin_unlock_irq(shost->host_lock);
2265 	if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
2266 	    IOCB_ERROR) {
2267 		spin_lock_irq(shost->host_lock);
2268 		ndlp->nlp_flag &= ~NLP_PRLI_SND;
2269 		spin_unlock_irq(shost->host_lock);
2270 		lpfc_els_free_iocb(phba, elsiocb);
2271 		return 1;
2272 	}
2273 
2274 	/* The vport counters are used for lpfc_scan_finished, but
2275 	 * the ndlp is used to track outstanding PRLIs for different
2276 	 * FC4 types.
2277 	 */
2278 	vport->fc_prli_sent++;
2279 	ndlp->fc4_prli_sent++;
2280 
2281 	/* The driver supports 2 FC4 types.  Make sure
2282 	 * a PRLI is issued for all types before exiting.
2283 	 */
2284 	if (phba->sli_rev == LPFC_SLI_REV4 &&
2285 	    local_nlp_type & (NLP_FC4_FCP | NLP_FC4_NVME))
2286 		goto send_next_prli;
2287 
2288 	return 0;
2289 }
2290 
2291 /**
2292  * lpfc_rscn_disc - Perform rscn discovery for a vport
2293  * @vport: pointer to a host virtual N_Port data structure.
2294  *
2295  * This routine performs Registration State Change Notification (RSCN)
2296  * discovery for a @vport. If the @vport's node port recovery count is not
2297  * zero, it will invoke the lpfc_els_disc_plogi() to perform PLOGI for all
2298  * the nodes that need recovery. If none of the PLOGI were needed through
2299  * the lpfc_els_disc_plogi() routine, the lpfc_end_rscn() routine shall be
2300  * invoked to check and handle possible more RSCN came in during the period
2301  * of processing the current ones.
2302  **/
2303 static void
2304 lpfc_rscn_disc(struct lpfc_vport *vport)
2305 {
2306 	lpfc_can_disctmo(vport);
2307 
2308 	/* RSCN discovery */
2309 	/* go thru NPR nodes and issue ELS PLOGIs */
2310 	if (vport->fc_npr_cnt)
2311 		if (lpfc_els_disc_plogi(vport))
2312 			return;
2313 
2314 	lpfc_end_rscn(vport);
2315 }
2316 
2317 /**
2318  * lpfc_adisc_done - Complete the adisc phase of discovery
2319  * @vport: pointer to lpfc_vport hba data structure that finished all ADISCs.
2320  *
2321  * This function is called when the final ADISC is completed during discovery.
2322  * This function handles clearing link attention or issuing reg_vpi depending
2323  * on whether npiv is enabled. This function also kicks off the PLOGI phase of
2324  * discovery.
2325  * This function is called with no locks held.
2326  **/
2327 static void
2328 lpfc_adisc_done(struct lpfc_vport *vport)
2329 {
2330 	struct Scsi_Host   *shost = lpfc_shost_from_vport(vport);
2331 	struct lpfc_hba   *phba = vport->phba;
2332 
2333 	/*
2334 	 * For NPIV, cmpl_reg_vpi will set port_state to READY,
2335 	 * and continue discovery.
2336 	 */
2337 	if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
2338 	    !(vport->fc_flag & FC_RSCN_MODE) &&
2339 	    (phba->sli_rev < LPFC_SLI_REV4)) {
2340 		/* The ADISCs are complete.  Doesn't matter if they
2341 		 * succeeded or failed because the ADISC completion
2342 		 * routine guarantees to call the state machine and
2343 		 * the RPI is either unregistered (failed ADISC response)
2344 		 * or the RPI is still valid and the node is marked
2345 		 * mapped for a target.  The exchanges should be in the
2346 		 * correct state. This code is specific to SLI3.
2347 		 */
2348 		lpfc_issue_clear_la(phba, vport);
2349 		lpfc_issue_reg_vpi(phba, vport);
2350 		return;
2351 	}
2352 	/*
2353 	* For SLI2, we need to set port_state to READY
2354 	* and continue discovery.
2355 	*/
2356 	if (vport->port_state < LPFC_VPORT_READY) {
2357 		/* If we get here, there is nothing to ADISC */
2358 		lpfc_issue_clear_la(phba, vport);
2359 		if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
2360 			vport->num_disc_nodes = 0;
2361 			/* go thru NPR list, issue ELS PLOGIs */
2362 			if (vport->fc_npr_cnt)
2363 				lpfc_els_disc_plogi(vport);
2364 			if (!vport->num_disc_nodes) {
2365 				spin_lock_irq(shost->host_lock);
2366 				vport->fc_flag &= ~FC_NDISC_ACTIVE;
2367 				spin_unlock_irq(shost->host_lock);
2368 				lpfc_can_disctmo(vport);
2369 				lpfc_end_rscn(vport);
2370 			}
2371 		}
2372 		vport->port_state = LPFC_VPORT_READY;
2373 	} else
2374 		lpfc_rscn_disc(vport);
2375 }
2376 
2377 /**
2378  * lpfc_more_adisc - Issue more adisc as needed
2379  * @vport: pointer to a host virtual N_Port data structure.
2380  *
2381  * This routine determines whether there are more ndlps on a @vport
2382  * node list need to have Address Discover (ADISC) issued. If so, it will
2383  * invoke the lpfc_els_disc_adisc() routine to issue ADISC on the @vport's
2384  * remaining nodes which need to have ADISC sent.
2385  **/
2386 void
2387 lpfc_more_adisc(struct lpfc_vport *vport)
2388 {
2389 	if (vport->num_disc_nodes)
2390 		vport->num_disc_nodes--;
2391 	/* Continue discovery with <num_disc_nodes> ADISCs to go */
2392 	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2393 			 "0210 Continue discovery with %d ADISCs to go "
2394 			 "Data: x%x x%x x%x\n",
2395 			 vport->num_disc_nodes, vport->fc_adisc_cnt,
2396 			 vport->fc_flag, vport->port_state);
2397 	/* Check to see if there are more ADISCs to be sent */
2398 	if (vport->fc_flag & FC_NLP_MORE) {
2399 		lpfc_set_disctmo(vport);
2400 		/* go thru NPR nodes and issue any remaining ELS ADISCs */
2401 		lpfc_els_disc_adisc(vport);
2402 	}
2403 	if (!vport->num_disc_nodes)
2404 		lpfc_adisc_done(vport);
2405 	return;
2406 }
2407 
2408 /**
2409  * lpfc_cmpl_els_adisc - Completion callback function for adisc
2410  * @phba: pointer to lpfc hba data structure.
2411  * @cmdiocb: pointer to lpfc command iocb data structure.
2412  * @rspiocb: pointer to lpfc response iocb data structure.
2413  *
2414  * This routine is the completion function for issuing the Address Discover
2415  * (ADISC) command. It first checks to see whether link went down during
2416  * the discovery process. If so, the node will be marked as node port
2417  * recovery for issuing discover IOCB by the link attention handler and
2418  * exit. Otherwise, the response status is checked. If error was reported
2419  * in the response status, the ADISC command shall be retried by invoking
2420  * the lpfc_els_retry() routine. Otherwise, if no error was reported in
2421  * the response status, the state machine is invoked to set transition
2422  * with respect to NLP_EVT_CMPL_ADISC event.
2423  **/
2424 static void
2425 lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2426 		    struct lpfc_iocbq *rspiocb)
2427 {
2428 	struct lpfc_vport *vport = cmdiocb->vport;
2429 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
2430 	IOCB_t *irsp;
2431 	struct lpfc_nodelist *ndlp;
2432 	int  disc;
2433 
2434 	/* we pass cmdiocb to state machine which needs rspiocb as well */
2435 	cmdiocb->context_un.rsp_iocb = rspiocb;
2436 
2437 	irsp = &(rspiocb->iocb);
2438 	ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
2439 
2440 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2441 		"ADISC cmpl:      status:x%x/x%x did:x%x",
2442 		irsp->ulpStatus, irsp->un.ulpWord[4],
2443 		ndlp->nlp_DID);
2444 
2445 	/* Since ndlp can be freed in the disc state machine, note if this node
2446 	 * is being used during discovery.
2447 	 */
2448 	spin_lock_irq(shost->host_lock);
2449 	disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
2450 	ndlp->nlp_flag &= ~(NLP_ADISC_SND | NLP_NPR_2B_DISC);
2451 	spin_unlock_irq(shost->host_lock);
2452 	/* ADISC completes to NPort <nlp_DID> */
2453 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2454 			 "0104 ADISC completes to NPort x%x "
2455 			 "Data: x%x x%x x%x x%x x%x\n",
2456 			 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
2457 			 irsp->ulpTimeout, disc, vport->num_disc_nodes);
2458 	/* Check to see if link went down during discovery */
2459 	if (lpfc_els_chk_latt(vport)) {
2460 		spin_lock_irq(shost->host_lock);
2461 		ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2462 		spin_unlock_irq(shost->host_lock);
2463 		goto out;
2464 	}
2465 
2466 	if (irsp->ulpStatus) {
2467 		/* Check for retry */
2468 		if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
2469 			/* ELS command is being retried */
2470 			if (disc) {
2471 				spin_lock_irq(shost->host_lock);
2472 				ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2473 				spin_unlock_irq(shost->host_lock);
2474 				lpfc_set_disctmo(vport);
2475 			}
2476 			goto out;
2477 		}
2478 		/* ADISC failed */
2479 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2480 				 "2755 ADISC failure DID:%06X Status:x%x/x%x\n",
2481 				 ndlp->nlp_DID, irsp->ulpStatus,
2482 				 irsp->un.ulpWord[4]);
2483 		/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
2484 		if (!lpfc_error_lost_link(irsp))
2485 			lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2486 						NLP_EVT_CMPL_ADISC);
2487 	} else
2488 		/* Good status, call state machine */
2489 		lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2490 					NLP_EVT_CMPL_ADISC);
2491 
2492 	/* Check to see if there are more ADISCs to be sent */
2493 	if (disc && vport->num_disc_nodes)
2494 		lpfc_more_adisc(vport);
2495 out:
2496 	lpfc_els_free_iocb(phba, cmdiocb);
2497 	return;
2498 }
2499 
2500 /**
2501  * lpfc_issue_els_adisc - Issue an address discover iocb to an node on a vport
2502  * @vport: pointer to a virtual N_Port data structure.
2503  * @ndlp: pointer to a node-list data structure.
2504  * @retry: number of retries to the command IOCB.
2505  *
2506  * This routine issues an Address Discover (ADISC) for an @ndlp on a
2507  * @vport. It prepares the payload of the ADISC ELS command, updates the
2508  * and states of the ndlp, and invokes the lpfc_sli_issue_iocb() routine
2509  * to issue the ADISC ELS command.
2510  *
2511  * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
2512  * will be incremented by 1 for holding the ndlp and the reference to ndlp
2513  * will be stored into the context1 field of the IOCB for the completion
2514  * callback function to the ADISC ELS command.
2515  *
2516  * Return code
2517  *   0 - successfully issued adisc
2518  *   1 - failed to issue adisc
2519  **/
2520 int
2521 lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2522 		     uint8_t retry)
2523 {
2524 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2525 	struct lpfc_hba  *phba = vport->phba;
2526 	ADISC *ap;
2527 	struct lpfc_iocbq *elsiocb;
2528 	uint8_t *pcmd;
2529 	uint16_t cmdsize;
2530 
2531 	cmdsize = (sizeof(uint32_t) + sizeof(ADISC));
2532 	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
2533 				     ndlp->nlp_DID, ELS_CMD_ADISC);
2534 	if (!elsiocb)
2535 		return 1;
2536 
2537 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2538 
2539 	/* For ADISC request, remainder of payload is service parameters */
2540 	*((uint32_t *) (pcmd)) = ELS_CMD_ADISC;
2541 	pcmd += sizeof(uint32_t);
2542 
2543 	/* Fill in ADISC payload */
2544 	ap = (ADISC *) pcmd;
2545 	ap->hardAL_PA = phba->fc_pref_ALPA;
2546 	memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name));
2547 	memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
2548 	ap->DID = be32_to_cpu(vport->fc_myDID);
2549 
2550 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2551 		"Issue ADISC:     did:x%x",
2552 		ndlp->nlp_DID, 0, 0);
2553 
2554 	phba->fc_stat.elsXmitADISC++;
2555 	elsiocb->iocb_cmpl = lpfc_cmpl_els_adisc;
2556 	spin_lock_irq(shost->host_lock);
2557 	ndlp->nlp_flag |= NLP_ADISC_SND;
2558 	spin_unlock_irq(shost->host_lock);
2559 	if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
2560 	    IOCB_ERROR) {
2561 		spin_lock_irq(shost->host_lock);
2562 		ndlp->nlp_flag &= ~NLP_ADISC_SND;
2563 		spin_unlock_irq(shost->host_lock);
2564 		lpfc_els_free_iocb(phba, elsiocb);
2565 		return 1;
2566 	}
2567 	return 0;
2568 }
2569 
2570 /**
2571  * lpfc_cmpl_els_logo - Completion callback function for logo
2572  * @phba: pointer to lpfc hba data structure.
2573  * @cmdiocb: pointer to lpfc command iocb data structure.
2574  * @rspiocb: pointer to lpfc response iocb data structure.
2575  *
2576  * This routine is the completion function for issuing the ELS Logout (LOGO)
2577  * command. If no error status was reported from the LOGO response, the
2578  * state machine of the associated ndlp shall be invoked for transition with
2579  * respect to NLP_EVT_CMPL_LOGO event. Otherwise, if error status was reported,
2580  * the lpfc_els_retry() routine will be invoked to retry the LOGO command.
2581  **/
2582 static void
2583 lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2584 		   struct lpfc_iocbq *rspiocb)
2585 {
2586 	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
2587 	struct lpfc_vport *vport = ndlp->vport;
2588 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
2589 	IOCB_t *irsp;
2590 	struct lpfcMboxq *mbox;
2591 	unsigned long flags;
2592 	uint32_t skip_recovery = 0;
2593 
2594 	/* we pass cmdiocb to state machine which needs rspiocb as well */
2595 	cmdiocb->context_un.rsp_iocb = rspiocb;
2596 
2597 	irsp = &(rspiocb->iocb);
2598 	spin_lock_irq(shost->host_lock);
2599 	ndlp->nlp_flag &= ~NLP_LOGO_SND;
2600 	spin_unlock_irq(shost->host_lock);
2601 
2602 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2603 		"LOGO cmpl:       status:x%x/x%x did:x%x",
2604 		irsp->ulpStatus, irsp->un.ulpWord[4],
2605 		ndlp->nlp_DID);
2606 
2607 	/* LOGO completes to NPort <nlp_DID> */
2608 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2609 			 "0105 LOGO completes to NPort x%x "
2610 			 "Data: x%x x%x x%x x%x\n",
2611 			 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
2612 			 irsp->ulpTimeout, vport->num_disc_nodes);
2613 
2614 	if (lpfc_els_chk_latt(vport)) {
2615 		skip_recovery = 1;
2616 		goto out;
2617 	}
2618 
2619 	/* Check to see if link went down during discovery */
2620 	if (ndlp->nlp_flag & NLP_TARGET_REMOVE) {
2621 	        /* NLP_EVT_DEVICE_RM should unregister the RPI
2622 		 * which should abort all outstanding IOs.
2623 		 */
2624 		lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2625 					NLP_EVT_DEVICE_RM);
2626 		skip_recovery = 1;
2627 		goto out;
2628 	}
2629 
2630 	if (irsp->ulpStatus) {
2631 		/* Check for retry */
2632 		if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
2633 			/* ELS command is being retried */
2634 			skip_recovery = 1;
2635 			goto out;
2636 		}
2637 		/* LOGO failed */
2638 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2639 				 "2756 LOGO failure DID:%06X Status:x%x/x%x\n",
2640 				 ndlp->nlp_DID, irsp->ulpStatus,
2641 				 irsp->un.ulpWord[4]);
2642 		/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
2643 		if (lpfc_error_lost_link(irsp)) {
2644 			skip_recovery = 1;
2645 			goto out;
2646 		}
2647 	}
2648 
2649 	/* Call state machine. This will unregister the rpi if needed. */
2650 	lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_CMPL_LOGO);
2651 
2652 out:
2653 	lpfc_els_free_iocb(phba, cmdiocb);
2654 	/* If we are in pt2pt mode, we could rcv new S_ID on PLOGI */
2655 	if ((vport->fc_flag & FC_PT2PT) &&
2656 		!(vport->fc_flag & FC_PT2PT_PLOGI)) {
2657 		phba->pport->fc_myDID = 0;
2658 
2659 		if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
2660 		    (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
2661 			if (phba->nvmet_support)
2662 				lpfc_nvmet_update_targetport(phba);
2663 			else
2664 				lpfc_nvme_update_localport(phba->pport);
2665 		}
2666 
2667 		mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2668 		if (mbox) {
2669 			lpfc_config_link(phba, mbox);
2670 			mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2671 			mbox->vport = vport;
2672 			if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) ==
2673 				MBX_NOT_FINISHED) {
2674 				mempool_free(mbox, phba->mbox_mem_pool);
2675 				skip_recovery = 1;
2676 			}
2677 		}
2678 	}
2679 
2680 	/*
2681 	 * If the node is a target, the handling attempts to recover the port.
2682 	 * For any other port type, the rpi is unregistered as an implicit
2683 	 * LOGO.
2684 	 */
2685 	if ((ndlp->nlp_type & NLP_FCP_TARGET) && (skip_recovery == 0)) {
2686 		lpfc_cancel_retry_delay_tmo(vport, ndlp);
2687 		spin_lock_irqsave(shost->host_lock, flags);
2688 		ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2689 		spin_unlock_irqrestore(shost->host_lock, flags);
2690 
2691 		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2692 				 "3187 LOGO completes to NPort x%x: Start "
2693 				 "Recovery Data: x%x x%x x%x x%x\n",
2694 				 ndlp->nlp_DID, irsp->ulpStatus,
2695 				 irsp->un.ulpWord[4], irsp->ulpTimeout,
2696 				 vport->num_disc_nodes);
2697 		lpfc_disc_start(vport);
2698 	}
2699 	return;
2700 }
2701 
2702 /**
2703  * lpfc_issue_els_logo - Issue a logo to an node on a vport
2704  * @vport: pointer to a virtual N_Port data structure.
2705  * @ndlp: pointer to a node-list data structure.
2706  * @retry: number of retries to the command IOCB.
2707  *
2708  * This routine constructs and issues an ELS Logout (LOGO) iocb command
2709  * to a remote node, referred by an @ndlp on a @vport. It constructs the
2710  * payload of the IOCB, properly sets up the @ndlp state, and invokes the
2711  * lpfc_sli_issue_iocb() routine to send out the LOGO ELS command.
2712  *
2713  * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
2714  * will be incremented by 1 for holding the ndlp and the reference to ndlp
2715  * will be stored into the context1 field of the IOCB for the completion
2716  * callback function to the LOGO ELS command.
2717  *
2718  * Return code
2719  *   0 - successfully issued logo
2720  *   1 - failed to issue logo
2721  **/
2722 int
2723 lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2724 		    uint8_t retry)
2725 {
2726 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2727 	struct lpfc_hba  *phba = vport->phba;
2728 	struct lpfc_iocbq *elsiocb;
2729 	uint8_t *pcmd;
2730 	uint16_t cmdsize;
2731 	int rc;
2732 
2733 	spin_lock_irq(shost->host_lock);
2734 	if (ndlp->nlp_flag & NLP_LOGO_SND) {
2735 		spin_unlock_irq(shost->host_lock);
2736 		return 0;
2737 	}
2738 	spin_unlock_irq(shost->host_lock);
2739 
2740 	cmdsize = (2 * sizeof(uint32_t)) + sizeof(struct lpfc_name);
2741 	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
2742 				     ndlp->nlp_DID, ELS_CMD_LOGO);
2743 	if (!elsiocb)
2744 		return 1;
2745 
2746 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2747 	*((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
2748 	pcmd += sizeof(uint32_t);
2749 
2750 	/* Fill in LOGO payload */
2751 	*((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID);
2752 	pcmd += sizeof(uint32_t);
2753 	memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name));
2754 
2755 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2756 		"Issue LOGO:      did:x%x",
2757 		ndlp->nlp_DID, 0, 0);
2758 
2759 	/*
2760 	 * If we are issuing a LOGO, we may try to recover the remote NPort
2761 	 * by issuing a PLOGI later. Even though we issue ELS cmds by the
2762 	 * VPI, if we have a valid RPI, and that RPI gets unreg'ed while
2763 	 * that ELS command is in-flight, the HBA returns a IOERR_INVALID_RPI
2764 	 * for that ELS cmd. To avoid this situation, lets get rid of the
2765 	 * RPI right now, before any ELS cmds are sent.
2766 	 */
2767 	spin_lock_irq(shost->host_lock);
2768 	ndlp->nlp_flag |= NLP_ISSUE_LOGO;
2769 	spin_unlock_irq(shost->host_lock);
2770 	if (lpfc_unreg_rpi(vport, ndlp)) {
2771 		lpfc_els_free_iocb(phba, elsiocb);
2772 		return 0;
2773 	}
2774 
2775 	phba->fc_stat.elsXmitLOGO++;
2776 	elsiocb->iocb_cmpl = lpfc_cmpl_els_logo;
2777 	spin_lock_irq(shost->host_lock);
2778 	ndlp->nlp_flag |= NLP_LOGO_SND;
2779 	ndlp->nlp_flag &= ~NLP_ISSUE_LOGO;
2780 	spin_unlock_irq(shost->host_lock);
2781 	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
2782 
2783 	if (rc == IOCB_ERROR) {
2784 		spin_lock_irq(shost->host_lock);
2785 		ndlp->nlp_flag &= ~NLP_LOGO_SND;
2786 		spin_unlock_irq(shost->host_lock);
2787 		lpfc_els_free_iocb(phba, elsiocb);
2788 		return 1;
2789 	}
2790 	return 0;
2791 }
2792 
2793 /**
2794  * lpfc_cmpl_els_cmd - Completion callback function for generic els command
2795  * @phba: pointer to lpfc hba data structure.
2796  * @cmdiocb: pointer to lpfc command iocb data structure.
2797  * @rspiocb: pointer to lpfc response iocb data structure.
2798  *
2799  * This routine is a generic completion callback function for ELS commands.
2800  * Specifically, it is the callback function which does not need to perform
2801  * any command specific operations. It is currently used by the ELS command
2802  * issuing routines for the ELS State Change  Request (SCR),
2803  * lpfc_issue_els_scr(), and the ELS Fibre Channel Address Resolution
2804  * Protocol Response (FARPR) routine, lpfc_issue_els_farpr(). Other than
2805  * certain debug loggings, this callback function simply invokes the
2806  * lpfc_els_chk_latt() routine to check whether link went down during the
2807  * discovery process.
2808  **/
2809 static void
2810 lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2811 		  struct lpfc_iocbq *rspiocb)
2812 {
2813 	struct lpfc_vport *vport = cmdiocb->vport;
2814 	IOCB_t *irsp;
2815 
2816 	irsp = &rspiocb->iocb;
2817 
2818 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2819 		"ELS cmd cmpl:    status:x%x/x%x did:x%x",
2820 		irsp->ulpStatus, irsp->un.ulpWord[4],
2821 		irsp->un.elsreq64.remoteID);
2822 	/* ELS cmd tag <ulpIoTag> completes */
2823 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2824 			 "0106 ELS cmd tag x%x completes Data: x%x x%x x%x\n",
2825 			 irsp->ulpIoTag, irsp->ulpStatus,
2826 			 irsp->un.ulpWord[4], irsp->ulpTimeout);
2827 	/* Check to see if link went down during discovery */
2828 	lpfc_els_chk_latt(vport);
2829 	lpfc_els_free_iocb(phba, cmdiocb);
2830 	return;
2831 }
2832 
2833 /**
2834  * lpfc_issue_els_scr - Issue a scr to an node on a vport
2835  * @vport: pointer to a host virtual N_Port data structure.
2836  * @nportid: N_Port identifier to the remote node.
2837  * @retry: number of retries to the command IOCB.
2838  *
2839  * This routine issues a State Change Request (SCR) to a fabric node
2840  * on a @vport. The remote node @nportid is passed into the function. It
2841  * first search the @vport node list to find the matching ndlp. If no such
2842  * ndlp is found, a new ndlp shall be created for this (SCR) purpose. An
2843  * IOCB is allocated, payload prepared, and the lpfc_sli_issue_iocb()
2844  * routine is invoked to send the SCR IOCB.
2845  *
2846  * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
2847  * will be incremented by 1 for holding the ndlp and the reference to ndlp
2848  * will be stored into the context1 field of the IOCB for the completion
2849  * callback function to the SCR ELS command.
2850  *
2851  * Return code
2852  *   0 - Successfully issued scr command
2853  *   1 - Failed to issue scr command
2854  **/
2855 int
2856 lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
2857 {
2858 	struct lpfc_hba  *phba = vport->phba;
2859 	struct lpfc_iocbq *elsiocb;
2860 	uint8_t *pcmd;
2861 	uint16_t cmdsize;
2862 	struct lpfc_nodelist *ndlp;
2863 
2864 	cmdsize = (sizeof(uint32_t) + sizeof(SCR));
2865 
2866 	ndlp = lpfc_findnode_did(vport, nportid);
2867 	if (!ndlp) {
2868 		ndlp = lpfc_nlp_init(vport, nportid);
2869 		if (!ndlp)
2870 			return 1;
2871 		lpfc_enqueue_node(vport, ndlp);
2872 	} else if (!NLP_CHK_NODE_ACT(ndlp)) {
2873 		ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
2874 		if (!ndlp)
2875 			return 1;
2876 	}
2877 
2878 	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
2879 				     ndlp->nlp_DID, ELS_CMD_SCR);
2880 
2881 	if (!elsiocb) {
2882 		/* This will trigger the release of the node just
2883 		 * allocated
2884 		 */
2885 		lpfc_nlp_put(ndlp);
2886 		return 1;
2887 	}
2888 
2889 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2890 
2891 	*((uint32_t *) (pcmd)) = ELS_CMD_SCR;
2892 	pcmd += sizeof(uint32_t);
2893 
2894 	/* For SCR, remainder of payload is SCR parameter page */
2895 	memset(pcmd, 0, sizeof(SCR));
2896 	((SCR *) pcmd)->Function = SCR_FUNC_FULL;
2897 
2898 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2899 		"Issue SCR:       did:x%x",
2900 		ndlp->nlp_DID, 0, 0);
2901 
2902 	phba->fc_stat.elsXmitSCR++;
2903 	elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
2904 	if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
2905 	    IOCB_ERROR) {
2906 		/* The additional lpfc_nlp_put will cause the following
2907 		 * lpfc_els_free_iocb routine to trigger the rlease of
2908 		 * the node.
2909 		 */
2910 		lpfc_nlp_put(ndlp);
2911 		lpfc_els_free_iocb(phba, elsiocb);
2912 		return 1;
2913 	}
2914 	/* This will cause the callback-function lpfc_cmpl_els_cmd to
2915 	 * trigger the release of node.
2916 	 */
2917 
2918 	lpfc_nlp_put(ndlp);
2919 	return 0;
2920 }
2921 
2922 /**
2923  * lpfc_issue_els_farpr - Issue a farp to an node on a vport
2924  * @vport: pointer to a host virtual N_Port data structure.
2925  * @nportid: N_Port identifier to the remote node.
2926  * @retry: number of retries to the command IOCB.
2927  *
2928  * This routine issues a Fibre Channel Address Resolution Response
2929  * (FARPR) to a node on a vport. The remote node N_Port identifier (@nportid)
2930  * is passed into the function. It first search the @vport node list to find
2931  * the matching ndlp. If no such ndlp is found, a new ndlp shall be created
2932  * for this (FARPR) purpose. An IOCB is allocated, payload prepared, and the
2933  * lpfc_sli_issue_iocb() routine is invoked to send the FARPR ELS command.
2934  *
2935  * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
2936  * will be incremented by 1 for holding the ndlp and the reference to ndlp
2937  * will be stored into the context1 field of the IOCB for the completion
2938  * callback function to the PARPR ELS command.
2939  *
2940  * Return code
2941  *   0 - Successfully issued farpr command
2942  *   1 - Failed to issue farpr command
2943  **/
2944 static int
2945 lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
2946 {
2947 	struct lpfc_hba  *phba = vport->phba;
2948 	struct lpfc_iocbq *elsiocb;
2949 	FARP *fp;
2950 	uint8_t *pcmd;
2951 	uint32_t *lp;
2952 	uint16_t cmdsize;
2953 	struct lpfc_nodelist *ondlp;
2954 	struct lpfc_nodelist *ndlp;
2955 
2956 	cmdsize = (sizeof(uint32_t) + sizeof(FARP));
2957 
2958 	ndlp = lpfc_findnode_did(vport, nportid);
2959 	if (!ndlp) {
2960 		ndlp = lpfc_nlp_init(vport, nportid);
2961 		if (!ndlp)
2962 			return 1;
2963 		lpfc_enqueue_node(vport, ndlp);
2964 	} else if (!NLP_CHK_NODE_ACT(ndlp)) {
2965 		ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
2966 		if (!ndlp)
2967 			return 1;
2968 	}
2969 
2970 	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
2971 				     ndlp->nlp_DID, ELS_CMD_RNID);
2972 	if (!elsiocb) {
2973 		/* This will trigger the release of the node just
2974 		 * allocated
2975 		 */
2976 		lpfc_nlp_put(ndlp);
2977 		return 1;
2978 	}
2979 
2980 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2981 
2982 	*((uint32_t *) (pcmd)) = ELS_CMD_FARPR;
2983 	pcmd += sizeof(uint32_t);
2984 
2985 	/* Fill in FARPR payload */
2986 	fp = (FARP *) (pcmd);
2987 	memset(fp, 0, sizeof(FARP));
2988 	lp = (uint32_t *) pcmd;
2989 	*lp++ = be32_to_cpu(nportid);
2990 	*lp++ = be32_to_cpu(vport->fc_myDID);
2991 	fp->Rflags = 0;
2992 	fp->Mflags = (FARP_MATCH_PORT | FARP_MATCH_NODE);
2993 
2994 	memcpy(&fp->RportName, &vport->fc_portname, sizeof(struct lpfc_name));
2995 	memcpy(&fp->RnodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
2996 	ondlp = lpfc_findnode_did(vport, nportid);
2997 	if (ondlp && NLP_CHK_NODE_ACT(ondlp)) {
2998 		memcpy(&fp->OportName, &ondlp->nlp_portname,
2999 		       sizeof(struct lpfc_name));
3000 		memcpy(&fp->OnodeName, &ondlp->nlp_nodename,
3001 		       sizeof(struct lpfc_name));
3002 	}
3003 
3004 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
3005 		"Issue FARPR:     did:x%x",
3006 		ndlp->nlp_DID, 0, 0);
3007 
3008 	phba->fc_stat.elsXmitFARPR++;
3009 	elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
3010 	if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
3011 	    IOCB_ERROR) {
3012 		/* The additional lpfc_nlp_put will cause the following
3013 		 * lpfc_els_free_iocb routine to trigger the release of
3014 		 * the node.
3015 		 */
3016 		lpfc_nlp_put(ndlp);
3017 		lpfc_els_free_iocb(phba, elsiocb);
3018 		return 1;
3019 	}
3020 	/* This will cause the callback-function lpfc_cmpl_els_cmd to
3021 	 * trigger the release of the node.
3022 	 */
3023 	lpfc_nlp_put(ndlp);
3024 	return 0;
3025 }
3026 
3027 /**
3028  * lpfc_cancel_retry_delay_tmo - Cancel the timer with delayed iocb-cmd retry
3029  * @vport: pointer to a host virtual N_Port data structure.
3030  * @nlp: pointer to a node-list data structure.
3031  *
3032  * This routine cancels the timer with a delayed IOCB-command retry for
3033  * a @vport's @ndlp. It stops the timer for the delayed function retrial and
3034  * removes the ELS retry event if it presents. In addition, if the
3035  * NLP_NPR_2B_DISC bit is set in the @nlp's nlp_flag bitmap, ADISC IOCB
3036  * commands are sent for the @vport's nodes that require issuing discovery
3037  * ADISC.
3038  **/
3039 void
3040 lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp)
3041 {
3042 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3043 	struct lpfc_work_evt *evtp;
3044 
3045 	if (!(nlp->nlp_flag & NLP_DELAY_TMO))
3046 		return;
3047 	spin_lock_irq(shost->host_lock);
3048 	nlp->nlp_flag &= ~NLP_DELAY_TMO;
3049 	spin_unlock_irq(shost->host_lock);
3050 	del_timer_sync(&nlp->nlp_delayfunc);
3051 	nlp->nlp_last_elscmd = 0;
3052 	if (!list_empty(&nlp->els_retry_evt.evt_listp)) {
3053 		list_del_init(&nlp->els_retry_evt.evt_listp);
3054 		/* Decrement nlp reference count held for the delayed retry */
3055 		evtp = &nlp->els_retry_evt;
3056 		lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1);
3057 	}
3058 	if (nlp->nlp_flag & NLP_NPR_2B_DISC) {
3059 		spin_lock_irq(shost->host_lock);
3060 		nlp->nlp_flag &= ~NLP_NPR_2B_DISC;
3061 		spin_unlock_irq(shost->host_lock);
3062 		if (vport->num_disc_nodes) {
3063 			if (vport->port_state < LPFC_VPORT_READY) {
3064 				/* Check if there are more ADISCs to be sent */
3065 				lpfc_more_adisc(vport);
3066 			} else {
3067 				/* Check if there are more PLOGIs to be sent */
3068 				lpfc_more_plogi(vport);
3069 				if (vport->num_disc_nodes == 0) {
3070 					spin_lock_irq(shost->host_lock);
3071 					vport->fc_flag &= ~FC_NDISC_ACTIVE;
3072 					spin_unlock_irq(shost->host_lock);
3073 					lpfc_can_disctmo(vport);
3074 					lpfc_end_rscn(vport);
3075 				}
3076 			}
3077 		}
3078 	}
3079 	return;
3080 }
3081 
3082 /**
3083  * lpfc_els_retry_delay - Timer function with a ndlp delayed function timer
3084  * @ptr: holder for the pointer to the timer function associated data (ndlp).
3085  *
3086  * This routine is invoked by the ndlp delayed-function timer to check
3087  * whether there is any pending ELS retry event(s) with the node. If not, it
3088  * simply returns. Otherwise, if there is at least one ELS delayed event, it
3089  * adds the delayed events to the HBA work list and invokes the
3090  * lpfc_worker_wake_up() routine to wake up worker thread to process the
3091  * event. Note that lpfc_nlp_get() is called before posting the event to
3092  * the work list to hold reference count of ndlp so that it guarantees the
3093  * reference to ndlp will still be available when the worker thread gets
3094  * to the event associated with the ndlp.
3095  **/
3096 void
3097 lpfc_els_retry_delay(unsigned long ptr)
3098 {
3099 	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) ptr;
3100 	struct lpfc_vport *vport = ndlp->vport;
3101 	struct lpfc_hba   *phba = vport->phba;
3102 	unsigned long flags;
3103 	struct lpfc_work_evt  *evtp = &ndlp->els_retry_evt;
3104 
3105 	spin_lock_irqsave(&phba->hbalock, flags);
3106 	if (!list_empty(&evtp->evt_listp)) {
3107 		spin_unlock_irqrestore(&phba->hbalock, flags);
3108 		return;
3109 	}
3110 
3111 	/* We need to hold the node by incrementing the reference
3112 	 * count until the queued work is done
3113 	 */
3114 	evtp->evt_arg1  = lpfc_nlp_get(ndlp);
3115 	if (evtp->evt_arg1) {
3116 		evtp->evt = LPFC_EVT_ELS_RETRY;
3117 		list_add_tail(&evtp->evt_listp, &phba->work_list);
3118 		lpfc_worker_wake_up(phba);
3119 	}
3120 	spin_unlock_irqrestore(&phba->hbalock, flags);
3121 	return;
3122 }
3123 
3124 /**
3125  * lpfc_els_retry_delay_handler - Work thread handler for ndlp delayed function
3126  * @ndlp: pointer to a node-list data structure.
3127  *
3128  * This routine is the worker-thread handler for processing the @ndlp delayed
3129  * event(s), posted by the lpfc_els_retry_delay() routine. It simply retrieves
3130  * the last ELS command from the associated ndlp and invokes the proper ELS
3131  * function according to the delayed ELS command to retry the command.
3132  **/
3133 void
3134 lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp)
3135 {
3136 	struct lpfc_vport *vport = ndlp->vport;
3137 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
3138 	uint32_t cmd, retry;
3139 
3140 	spin_lock_irq(shost->host_lock);
3141 	cmd = ndlp->nlp_last_elscmd;
3142 	ndlp->nlp_last_elscmd = 0;
3143 
3144 	if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
3145 		spin_unlock_irq(shost->host_lock);
3146 		return;
3147 	}
3148 
3149 	ndlp->nlp_flag &= ~NLP_DELAY_TMO;
3150 	spin_unlock_irq(shost->host_lock);
3151 	/*
3152 	 * If a discovery event readded nlp_delayfunc after timer
3153 	 * firing and before processing the timer, cancel the
3154 	 * nlp_delayfunc.
3155 	 */
3156 	del_timer_sync(&ndlp->nlp_delayfunc);
3157 	retry = ndlp->nlp_retry;
3158 	ndlp->nlp_retry = 0;
3159 
3160 	switch (cmd) {
3161 	case ELS_CMD_FLOGI:
3162 		lpfc_issue_els_flogi(vport, ndlp, retry);
3163 		break;
3164 	case ELS_CMD_PLOGI:
3165 		if (!lpfc_issue_els_plogi(vport, ndlp->nlp_DID, retry)) {
3166 			ndlp->nlp_prev_state = ndlp->nlp_state;
3167 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
3168 		}
3169 		break;
3170 	case ELS_CMD_ADISC:
3171 		if (!lpfc_issue_els_adisc(vport, ndlp, retry)) {
3172 			ndlp->nlp_prev_state = ndlp->nlp_state;
3173 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
3174 		}
3175 		break;
3176 	case ELS_CMD_PRLI:
3177 	case ELS_CMD_NVMEPRLI:
3178 		if (!lpfc_issue_els_prli(vport, ndlp, retry)) {
3179 			ndlp->nlp_prev_state = ndlp->nlp_state;
3180 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
3181 		}
3182 		break;
3183 	case ELS_CMD_LOGO:
3184 		if (!lpfc_issue_els_logo(vport, ndlp, retry)) {
3185 			ndlp->nlp_prev_state = ndlp->nlp_state;
3186 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE);
3187 		}
3188 		break;
3189 	case ELS_CMD_FDISC:
3190 		if (!(vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI))
3191 			lpfc_issue_els_fdisc(vport, ndlp, retry);
3192 		break;
3193 	}
3194 	return;
3195 }
3196 
3197 /**
3198  * lpfc_els_retry - Make retry decision on an els command iocb
3199  * @phba: pointer to lpfc hba data structure.
3200  * @cmdiocb: pointer to lpfc command iocb data structure.
3201  * @rspiocb: pointer to lpfc response iocb data structure.
3202  *
3203  * This routine makes a retry decision on an ELS command IOCB, which has
3204  * failed. The following ELS IOCBs use this function for retrying the command
3205  * when previously issued command responsed with error status: FLOGI, PLOGI,
3206  * PRLI, ADISC, LOGO, and FDISC. Based on the ELS command type and the
3207  * returned error status, it makes the decision whether a retry shall be
3208  * issued for the command, and whether a retry shall be made immediately or
3209  * delayed. In the former case, the corresponding ELS command issuing-function
3210  * is called to retry the command. In the later case, the ELS command shall
3211  * be posted to the ndlp delayed event and delayed function timer set to the
3212  * ndlp for the delayed command issusing.
3213  *
3214  * Return code
3215  *   0 - No retry of els command is made
3216  *   1 - Immediate or delayed retry of els command is made
3217  **/
3218 static int
3219 lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3220 	       struct lpfc_iocbq *rspiocb)
3221 {
3222 	struct lpfc_vport *vport = cmdiocb->vport;
3223 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
3224 	IOCB_t *irsp = &rspiocb->iocb;
3225 	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
3226 	struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
3227 	uint32_t *elscmd;
3228 	struct ls_rjt stat;
3229 	int retry = 0, maxretry = lpfc_max_els_tries, delay = 0;
3230 	int logerr = 0;
3231 	uint32_t cmd = 0;
3232 	uint32_t did;
3233 
3234 
3235 	/* Note: context2 may be 0 for internal driver abort
3236 	 * of delays ELS command.
3237 	 */
3238 
3239 	if (pcmd && pcmd->virt) {
3240 		elscmd = (uint32_t *) (pcmd->virt);
3241 		cmd = *elscmd++;
3242 	}
3243 
3244 	if (ndlp && NLP_CHK_NODE_ACT(ndlp))
3245 		did = ndlp->nlp_DID;
3246 	else {
3247 		/* We should only hit this case for retrying PLOGI */
3248 		did = irsp->un.elsreq64.remoteID;
3249 		ndlp = lpfc_findnode_did(vport, did);
3250 		if ((!ndlp || !NLP_CHK_NODE_ACT(ndlp))
3251 		    && (cmd != ELS_CMD_PLOGI))
3252 			return 1;
3253 	}
3254 
3255 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
3256 		"Retry ELS:       wd7:x%x wd4:x%x did:x%x",
3257 		*(((uint32_t *) irsp) + 7), irsp->un.ulpWord[4], ndlp->nlp_DID);
3258 
3259 	switch (irsp->ulpStatus) {
3260 	case IOSTAT_FCP_RSP_ERROR:
3261 		break;
3262 	case IOSTAT_REMOTE_STOP:
3263 		if (phba->sli_rev == LPFC_SLI_REV4) {
3264 			/* This IO was aborted by the target, we don't
3265 			 * know the rxid and because we did not send the
3266 			 * ABTS we cannot generate and RRQ.
3267 			 */
3268 			lpfc_set_rrq_active(phba, ndlp,
3269 					 cmdiocb->sli4_lxritag, 0, 0);
3270 		}
3271 		break;
3272 	case IOSTAT_LOCAL_REJECT:
3273 		switch ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK)) {
3274 		case IOERR_LOOP_OPEN_FAILURE:
3275 			if (cmd == ELS_CMD_FLOGI) {
3276 				if (PCI_DEVICE_ID_HORNET ==
3277 					phba->pcidev->device) {
3278 					phba->fc_topology = LPFC_TOPOLOGY_LOOP;
3279 					phba->pport->fc_myDID = 0;
3280 					phba->alpa_map[0] = 0;
3281 					phba->alpa_map[1] = 0;
3282 				}
3283 			}
3284 			if (cmd == ELS_CMD_PLOGI && cmdiocb->retry == 0)
3285 				delay = 1000;
3286 			retry = 1;
3287 			break;
3288 
3289 		case IOERR_ILLEGAL_COMMAND:
3290 			lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
3291 					 "0124 Retry illegal cmd x%x "
3292 					 "retry:x%x delay:x%x\n",
3293 					 cmd, cmdiocb->retry, delay);
3294 			retry = 1;
3295 			/* All command's retry policy */
3296 			maxretry = 8;
3297 			if (cmdiocb->retry > 2)
3298 				delay = 1000;
3299 			break;
3300 
3301 		case IOERR_NO_RESOURCES:
3302 			logerr = 1; /* HBA out of resources */
3303 			retry = 1;
3304 			if (cmdiocb->retry > 100)
3305 				delay = 100;
3306 			maxretry = 250;
3307 			break;
3308 
3309 		case IOERR_ILLEGAL_FRAME:
3310 			delay = 100;
3311 			retry = 1;
3312 			break;
3313 
3314 		case IOERR_SEQUENCE_TIMEOUT:
3315 		case IOERR_INVALID_RPI:
3316 			if (cmd == ELS_CMD_PLOGI &&
3317 			    did == NameServer_DID) {
3318 				/* Continue forever if plogi to */
3319 				/* the nameserver fails */
3320 				maxretry = 0;
3321 				delay = 100;
3322 			}
3323 			retry = 1;
3324 			break;
3325 		}
3326 		break;
3327 
3328 	case IOSTAT_NPORT_RJT:
3329 	case IOSTAT_FABRIC_RJT:
3330 		if (irsp->un.ulpWord[4] & RJT_UNAVAIL_TEMP) {
3331 			retry = 1;
3332 			break;
3333 		}
3334 		break;
3335 
3336 	case IOSTAT_NPORT_BSY:
3337 	case IOSTAT_FABRIC_BSY:
3338 		logerr = 1; /* Fabric / Remote NPort out of resources */
3339 		retry = 1;
3340 		break;
3341 
3342 	case IOSTAT_LS_RJT:
3343 		stat.un.lsRjtError = be32_to_cpu(irsp->un.ulpWord[4]);
3344 		/* Added for Vendor specifc support
3345 		 * Just keep retrying for these Rsn / Exp codes
3346 		 */
3347 		switch (stat.un.b.lsRjtRsnCode) {
3348 		case LSRJT_UNABLE_TPC:
3349 			/* The driver has a VALID PLOGI but the rport has
3350 			 * rejected the PRLI - can't do it now.  Delay
3351 			 * for 1 second and try again - don't care about
3352 			 * the explanation.
3353 			 */
3354 			if (cmd == ELS_CMD_PRLI || cmd == ELS_CMD_NVMEPRLI) {
3355 				delay = 1000;
3356 				maxretry = lpfc_max_els_tries + 1;
3357 				retry = 1;
3358 				break;
3359 			}
3360 
3361 			/* Legacy bug fix code for targets with PLOGI delays. */
3362 			if (stat.un.b.lsRjtRsnCodeExp ==
3363 			    LSEXP_CMD_IN_PROGRESS) {
3364 				if (cmd == ELS_CMD_PLOGI) {
3365 					delay = 1000;
3366 					maxretry = 48;
3367 				}
3368 				retry = 1;
3369 				break;
3370 			}
3371 			if (stat.un.b.lsRjtRsnCodeExp ==
3372 			    LSEXP_CANT_GIVE_DATA) {
3373 				if (cmd == ELS_CMD_PLOGI) {
3374 					delay = 1000;
3375 					maxretry = 48;
3376 				}
3377 				retry = 1;
3378 				break;
3379 			}
3380 			if (cmd == ELS_CMD_PLOGI) {
3381 				delay = 1000;
3382 				maxretry = lpfc_max_els_tries + 1;
3383 				retry = 1;
3384 				break;
3385 			}
3386 			if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
3387 			  (cmd == ELS_CMD_FDISC) &&
3388 			  (stat.un.b.lsRjtRsnCodeExp == LSEXP_OUT_OF_RESOURCE)){
3389 				lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
3390 						 "0125 FDISC Failed (x%x). "
3391 						 "Fabric out of resources\n",
3392 						 stat.un.lsRjtError);
3393 				lpfc_vport_set_state(vport,
3394 						     FC_VPORT_NO_FABRIC_RSCS);
3395 			}
3396 			break;
3397 
3398 		case LSRJT_LOGICAL_BSY:
3399 			if ((cmd == ELS_CMD_PLOGI) ||
3400 			    (cmd == ELS_CMD_PRLI) ||
3401 			    (cmd == ELS_CMD_NVMEPRLI)) {
3402 				delay = 1000;
3403 				maxretry = 48;
3404 			} else if (cmd == ELS_CMD_FDISC) {
3405 				/* FDISC retry policy */
3406 				maxretry = 48;
3407 				if (cmdiocb->retry >= 32)
3408 					delay = 1000;
3409 			}
3410 			retry = 1;
3411 			break;
3412 
3413 		case LSRJT_LOGICAL_ERR:
3414 			/* There are some cases where switches return this
3415 			 * error when they are not ready and should be returning
3416 			 * Logical Busy. We should delay every time.
3417 			 */
3418 			if (cmd == ELS_CMD_FDISC &&
3419 			    stat.un.b.lsRjtRsnCodeExp == LSEXP_PORT_LOGIN_REQ) {
3420 				maxretry = 3;
3421 				delay = 1000;
3422 				retry = 1;
3423 				break;
3424 			}
3425 		case LSRJT_PROTOCOL_ERR:
3426 			if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
3427 			  (cmd == ELS_CMD_FDISC) &&
3428 			  ((stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_PNAME) ||
3429 			  (stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_NPORT_ID))
3430 			  ) {
3431 				lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
3432 						 "0122 FDISC Failed (x%x). "
3433 						 "Fabric Detected Bad WWN\n",
3434 						 stat.un.lsRjtError);
3435 				lpfc_vport_set_state(vport,
3436 						     FC_VPORT_FABRIC_REJ_WWN);
3437 			}
3438 			break;
3439 		case LSRJT_VENDOR_UNIQUE:
3440 			if ((stat.un.b.vendorUnique == 0x45) &&
3441 			    (cmd == ELS_CMD_FLOGI)) {
3442 				goto out_retry;
3443 			}
3444 			break;
3445 		}
3446 		break;
3447 
3448 	case IOSTAT_INTERMED_RSP:
3449 	case IOSTAT_BA_RJT:
3450 		break;
3451 
3452 	default:
3453 		break;
3454 	}
3455 
3456 	if (did == FDMI_DID)
3457 		retry = 1;
3458 
3459 	if ((cmd == ELS_CMD_FLOGI) &&
3460 	    (phba->fc_topology != LPFC_TOPOLOGY_LOOP) &&
3461 	    !lpfc_error_lost_link(irsp)) {
3462 		/* FLOGI retry policy */
3463 		retry = 1;
3464 		/* retry FLOGI forever */
3465 		if (phba->link_flag != LS_LOOPBACK_MODE)
3466 			maxretry = 0;
3467 		else
3468 			maxretry = 2;
3469 
3470 		if (cmdiocb->retry >= 100)
3471 			delay = 5000;
3472 		else if (cmdiocb->retry >= 32)
3473 			delay = 1000;
3474 	} else if ((cmd == ELS_CMD_FDISC) && !lpfc_error_lost_link(irsp)) {
3475 		/* retry FDISCs every second up to devloss */
3476 		retry = 1;
3477 		maxretry = vport->cfg_devloss_tmo;
3478 		delay = 1000;
3479 	}
3480 
3481 	cmdiocb->retry++;
3482 	if (maxretry && (cmdiocb->retry >= maxretry)) {
3483 		phba->fc_stat.elsRetryExceeded++;
3484 		retry = 0;
3485 	}
3486 
3487 	if ((vport->load_flag & FC_UNLOADING) != 0)
3488 		retry = 0;
3489 
3490 out_retry:
3491 	if (retry) {
3492 		if ((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_FDISC)) {
3493 			/* Stop retrying PLOGI and FDISC if in FCF discovery */
3494 			if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
3495 				lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3496 						 "2849 Stop retry ELS command "
3497 						 "x%x to remote NPORT x%x, "
3498 						 "Data: x%x x%x\n", cmd, did,
3499 						 cmdiocb->retry, delay);
3500 				return 0;
3501 			}
3502 		}
3503 
3504 		/* Retry ELS command <elsCmd> to remote NPORT <did> */
3505 		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3506 				 "0107 Retry ELS command x%x to remote "
3507 				 "NPORT x%x Data: x%x x%x\n",
3508 				 cmd, did, cmdiocb->retry, delay);
3509 
3510 		if (((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) &&
3511 			((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
3512 			((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
3513 			IOERR_NO_RESOURCES))) {
3514 			/* Don't reset timer for no resources */
3515 
3516 			/* If discovery / RSCN timer is running, reset it */
3517 			if (timer_pending(&vport->fc_disctmo) ||
3518 			    (vport->fc_flag & FC_RSCN_MODE))
3519 				lpfc_set_disctmo(vport);
3520 		}
3521 
3522 		phba->fc_stat.elsXmitRetry++;
3523 		if (ndlp && NLP_CHK_NODE_ACT(ndlp) && delay) {
3524 			phba->fc_stat.elsDelayRetry++;
3525 			ndlp->nlp_retry = cmdiocb->retry;
3526 
3527 			/* delay is specified in milliseconds */
3528 			mod_timer(&ndlp->nlp_delayfunc,
3529 				jiffies + msecs_to_jiffies(delay));
3530 			spin_lock_irq(shost->host_lock);
3531 			ndlp->nlp_flag |= NLP_DELAY_TMO;
3532 			spin_unlock_irq(shost->host_lock);
3533 
3534 			ndlp->nlp_prev_state = ndlp->nlp_state;
3535 			if ((cmd == ELS_CMD_PRLI) ||
3536 			    (cmd == ELS_CMD_NVMEPRLI))
3537 				lpfc_nlp_set_state(vport, ndlp,
3538 					NLP_STE_PRLI_ISSUE);
3539 			else
3540 				lpfc_nlp_set_state(vport, ndlp,
3541 					NLP_STE_NPR_NODE);
3542 			ndlp->nlp_last_elscmd = cmd;
3543 
3544 			return 1;
3545 		}
3546 		switch (cmd) {
3547 		case ELS_CMD_FLOGI:
3548 			lpfc_issue_els_flogi(vport, ndlp, cmdiocb->retry);
3549 			return 1;
3550 		case ELS_CMD_FDISC:
3551 			lpfc_issue_els_fdisc(vport, ndlp, cmdiocb->retry);
3552 			return 1;
3553 		case ELS_CMD_PLOGI:
3554 			if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
3555 				ndlp->nlp_prev_state = ndlp->nlp_state;
3556 				lpfc_nlp_set_state(vport, ndlp,
3557 						   NLP_STE_PLOGI_ISSUE);
3558 			}
3559 			lpfc_issue_els_plogi(vport, did, cmdiocb->retry);
3560 			return 1;
3561 		case ELS_CMD_ADISC:
3562 			ndlp->nlp_prev_state = ndlp->nlp_state;
3563 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
3564 			lpfc_issue_els_adisc(vport, ndlp, cmdiocb->retry);
3565 			return 1;
3566 		case ELS_CMD_PRLI:
3567 		case ELS_CMD_NVMEPRLI:
3568 			ndlp->nlp_prev_state = ndlp->nlp_state;
3569 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
3570 			lpfc_issue_els_prli(vport, ndlp, cmdiocb->retry);
3571 			return 1;
3572 		case ELS_CMD_LOGO:
3573 			ndlp->nlp_prev_state = ndlp->nlp_state;
3574 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE);
3575 			lpfc_issue_els_logo(vport, ndlp, cmdiocb->retry);
3576 			return 1;
3577 		}
3578 	}
3579 	/* No retry ELS command <elsCmd> to remote NPORT <did> */
3580 	if (logerr) {
3581 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
3582 			 "0137 No retry ELS command x%x to remote "
3583 			 "NPORT x%x: Out of Resources: Error:x%x/%x\n",
3584 			 cmd, did, irsp->ulpStatus,
3585 			 irsp->un.ulpWord[4]);
3586 	}
3587 	else {
3588 		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3589 			 "0108 No retry ELS command x%x to remote "
3590 			 "NPORT x%x Retried:%d Error:x%x/%x\n",
3591 			 cmd, did, cmdiocb->retry, irsp->ulpStatus,
3592 			 irsp->un.ulpWord[4]);
3593 	}
3594 	return 0;
3595 }
3596 
3597 /**
3598  * lpfc_els_free_data - Free lpfc dma buffer and data structure with an iocb
3599  * @phba: pointer to lpfc hba data structure.
3600  * @buf_ptr1: pointer to the lpfc DMA buffer data structure.
3601  *
3602  * This routine releases the lpfc DMA (Direct Memory Access) buffer(s)
3603  * associated with a command IOCB back to the lpfc DMA buffer pool. It first
3604  * checks to see whether there is a lpfc DMA buffer associated with the
3605  * response of the command IOCB. If so, it will be released before releasing
3606  * the lpfc DMA buffer associated with the IOCB itself.
3607  *
3608  * Return code
3609  *   0 - Successfully released lpfc DMA buffer (currently, always return 0)
3610  **/
3611 static int
3612 lpfc_els_free_data(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr1)
3613 {
3614 	struct lpfc_dmabuf *buf_ptr;
3615 
3616 	/* Free the response before processing the command. */
3617 	if (!list_empty(&buf_ptr1->list)) {
3618 		list_remove_head(&buf_ptr1->list, buf_ptr,
3619 				 struct lpfc_dmabuf,
3620 				 list);
3621 		lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
3622 		kfree(buf_ptr);
3623 	}
3624 	lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys);
3625 	kfree(buf_ptr1);
3626 	return 0;
3627 }
3628 
3629 /**
3630  * lpfc_els_free_bpl - Free lpfc dma buffer and data structure with bpl
3631  * @phba: pointer to lpfc hba data structure.
3632  * @buf_ptr: pointer to the lpfc dma buffer data structure.
3633  *
3634  * This routine releases the lpfc Direct Memory Access (DMA) buffer
3635  * associated with a Buffer Pointer List (BPL) back to the lpfc DMA buffer
3636  * pool.
3637  *
3638  * Return code
3639  *   0 - Successfully released lpfc DMA buffer (currently, always return 0)
3640  **/
3641 static int
3642 lpfc_els_free_bpl(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr)
3643 {
3644 	lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
3645 	kfree(buf_ptr);
3646 	return 0;
3647 }
3648 
3649 /**
3650  * lpfc_els_free_iocb - Free a command iocb and its associated resources
3651  * @phba: pointer to lpfc hba data structure.
3652  * @elsiocb: pointer to lpfc els command iocb data structure.
3653  *
3654  * This routine frees a command IOCB and its associated resources. The
3655  * command IOCB data structure contains the reference to various associated
3656  * resources, these fields must be set to NULL if the associated reference
3657  * not present:
3658  *   context1 - reference to ndlp
3659  *   context2 - reference to cmd
3660  *   context2->next - reference to rsp
3661  *   context3 - reference to bpl
3662  *
3663  * It first properly decrements the reference count held on ndlp for the
3664  * IOCB completion callback function. If LPFC_DELAY_MEM_FREE flag is not
3665  * set, it invokes the lpfc_els_free_data() routine to release the Direct
3666  * Memory Access (DMA) buffers associated with the IOCB. Otherwise, it
3667  * adds the DMA buffer the @phba data structure for the delayed release.
3668  * If reference to the Buffer Pointer List (BPL) is present, the
3669  * lpfc_els_free_bpl() routine is invoked to release the DMA memory
3670  * associated with BPL. Finally, the lpfc_sli_release_iocbq() routine is
3671  * invoked to release the IOCB data structure back to @phba IOCBQ list.
3672  *
3673  * Return code
3674  *   0 - Success (currently, always return 0)
3675  **/
3676 int
3677 lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
3678 {
3679 	struct lpfc_dmabuf *buf_ptr, *buf_ptr1;
3680 	struct lpfc_nodelist *ndlp;
3681 
3682 	ndlp = (struct lpfc_nodelist *)elsiocb->context1;
3683 	if (ndlp) {
3684 		if (ndlp->nlp_flag & NLP_DEFER_RM) {
3685 			lpfc_nlp_put(ndlp);
3686 
3687 			/* If the ndlp is not being used by another discovery
3688 			 * thread, free it.
3689 			 */
3690 			if (!lpfc_nlp_not_used(ndlp)) {
3691 				/* If ndlp is being used by another discovery
3692 				 * thread, just clear NLP_DEFER_RM
3693 				 */
3694 				ndlp->nlp_flag &= ~NLP_DEFER_RM;
3695 			}
3696 		}
3697 		else
3698 			lpfc_nlp_put(ndlp);
3699 		elsiocb->context1 = NULL;
3700 	}
3701 	/* context2  = cmd,  context2->next = rsp, context3 = bpl */
3702 	if (elsiocb->context2) {
3703 		if (elsiocb->iocb_flag & LPFC_DELAY_MEM_FREE) {
3704 			/* Firmware could still be in progress of DMAing
3705 			 * payload, so don't free data buffer till after
3706 			 * a hbeat.
3707 			 */
3708 			elsiocb->iocb_flag &= ~LPFC_DELAY_MEM_FREE;
3709 			buf_ptr = elsiocb->context2;
3710 			elsiocb->context2 = NULL;
3711 			if (buf_ptr) {
3712 				buf_ptr1 = NULL;
3713 				spin_lock_irq(&phba->hbalock);
3714 				if (!list_empty(&buf_ptr->list)) {
3715 					list_remove_head(&buf_ptr->list,
3716 						buf_ptr1, struct lpfc_dmabuf,
3717 						list);
3718 					INIT_LIST_HEAD(&buf_ptr1->list);
3719 					list_add_tail(&buf_ptr1->list,
3720 						&phba->elsbuf);
3721 					phba->elsbuf_cnt++;
3722 				}
3723 				INIT_LIST_HEAD(&buf_ptr->list);
3724 				list_add_tail(&buf_ptr->list, &phba->elsbuf);
3725 				phba->elsbuf_cnt++;
3726 				spin_unlock_irq(&phba->hbalock);
3727 			}
3728 		} else {
3729 			buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2;
3730 			lpfc_els_free_data(phba, buf_ptr1);
3731 			elsiocb->context2 = NULL;
3732 		}
3733 	}
3734 
3735 	if (elsiocb->context3) {
3736 		buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3;
3737 		lpfc_els_free_bpl(phba, buf_ptr);
3738 		elsiocb->context3 = NULL;
3739 	}
3740 	lpfc_sli_release_iocbq(phba, elsiocb);
3741 	return 0;
3742 }
3743 
3744 /**
3745  * lpfc_cmpl_els_logo_acc - Completion callback function to logo acc response
3746  * @phba: pointer to lpfc hba data structure.
3747  * @cmdiocb: pointer to lpfc command iocb data structure.
3748  * @rspiocb: pointer to lpfc response iocb data structure.
3749  *
3750  * This routine is the completion callback function to the Logout (LOGO)
3751  * Accept (ACC) Response ELS command. This routine is invoked to indicate
3752  * the completion of the LOGO process. It invokes the lpfc_nlp_not_used() to
3753  * release the ndlp if it has the last reference remaining (reference count
3754  * is 1). If succeeded (meaning ndlp released), it sets the IOCB context1
3755  * field to NULL to inform the following lpfc_els_free_iocb() routine no
3756  * ndlp reference count needs to be decremented. Otherwise, the ndlp
3757  * reference use-count shall be decremented by the lpfc_els_free_iocb()
3758  * routine. Finally, the lpfc_els_free_iocb() is invoked to release the
3759  * IOCB data structure.
3760  **/
3761 static void
3762 lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3763 		       struct lpfc_iocbq *rspiocb)
3764 {
3765 	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
3766 	struct lpfc_vport *vport = cmdiocb->vport;
3767 	IOCB_t *irsp;
3768 
3769 	irsp = &rspiocb->iocb;
3770 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
3771 		"ACC LOGO cmpl:   status:x%x/x%x did:x%x",
3772 		irsp->ulpStatus, irsp->un.ulpWord[4], ndlp->nlp_DID);
3773 	/* ACC to LOGO completes to NPort <nlp_DID> */
3774 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3775 			 "0109 ACC to LOGO completes to NPort x%x "
3776 			 "Data: x%x x%x x%x\n",
3777 			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
3778 			 ndlp->nlp_rpi);
3779 
3780 	if (ndlp->nlp_state == NLP_STE_NPR_NODE) {
3781 		/* NPort Recovery mode or node is just allocated */
3782 		if (!lpfc_nlp_not_used(ndlp)) {
3783 			/* If the ndlp is being used by another discovery
3784 			 * thread, just unregister the RPI.
3785 			 */
3786 			lpfc_unreg_rpi(vport, ndlp);
3787 		} else {
3788 			/* Indicate the node has already released, should
3789 			 * not reference to it from within lpfc_els_free_iocb.
3790 			 */
3791 			cmdiocb->context1 = NULL;
3792 		}
3793 	}
3794 
3795 	/*
3796 	 * The driver received a LOGO from the rport and has ACK'd it.
3797 	 * At this point, the driver is done so release the IOCB
3798 	 */
3799 	lpfc_els_free_iocb(phba, cmdiocb);
3800 }
3801 
3802 /**
3803  * lpfc_mbx_cmpl_dflt_rpi - Completion callbk func for unreg dflt rpi mbox cmd
3804  * @phba: pointer to lpfc hba data structure.
3805  * @pmb: pointer to the driver internal queue element for mailbox command.
3806  *
3807  * This routine is the completion callback function for unregister default
3808  * RPI (Remote Port Index) mailbox command to the @phba. It simply releases
3809  * the associated lpfc Direct Memory Access (DMA) buffer back to the pool and
3810  * decrements the ndlp reference count held for this completion callback
3811  * function. After that, it invokes the lpfc_nlp_not_used() to check
3812  * whether there is only one reference left on the ndlp. If so, it will
3813  * perform one more decrement and trigger the release of the ndlp.
3814  **/
3815 void
3816 lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3817 {
3818 	struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
3819 	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
3820 
3821 	pmb->context1 = NULL;
3822 	pmb->context2 = NULL;
3823 
3824 	lpfc_mbuf_free(phba, mp->virt, mp->phys);
3825 	kfree(mp);
3826 	mempool_free(pmb, phba->mbox_mem_pool);
3827 	if (ndlp) {
3828 		lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
3829 				 "0006 rpi%x DID:%x flg:%x %d map:%x %p\n",
3830 				 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
3831 				 kref_read(&ndlp->kref),
3832 				 ndlp->nlp_usg_map, ndlp);
3833 		if (NLP_CHK_NODE_ACT(ndlp)) {
3834 			lpfc_nlp_put(ndlp);
3835 			/* This is the end of the default RPI cleanup logic for
3836 			 * this ndlp. If no other discovery threads are using
3837 			 * this ndlp, free all resources associated with it.
3838 			 */
3839 			lpfc_nlp_not_used(ndlp);
3840 		} else {
3841 			lpfc_drop_node(ndlp->vport, ndlp);
3842 		}
3843 	}
3844 
3845 	return;
3846 }
3847 
3848 /**
3849  * lpfc_cmpl_els_rsp - Completion callback function for els response iocb cmd
3850  * @phba: pointer to lpfc hba data structure.
3851  * @cmdiocb: pointer to lpfc command iocb data structure.
3852  * @rspiocb: pointer to lpfc response iocb data structure.
3853  *
3854  * This routine is the completion callback function for ELS Response IOCB
3855  * command. In normal case, this callback function just properly sets the
3856  * nlp_flag bitmap in the ndlp data structure, if the mbox command reference
3857  * field in the command IOCB is not NULL, the referred mailbox command will
3858  * be send out, and then invokes the lpfc_els_free_iocb() routine to release
3859  * the IOCB. Under error conditions, such as when a LS_RJT is returned or a
3860  * link down event occurred during the discovery, the lpfc_nlp_not_used()
3861  * routine shall be invoked trying to release the ndlp if no other threads
3862  * are currently referring it.
3863  **/
3864 static void
3865 lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3866 		  struct lpfc_iocbq *rspiocb)
3867 {
3868 	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
3869 	struct lpfc_vport *vport = ndlp ? ndlp->vport : NULL;
3870 	struct Scsi_Host  *shost = vport ? lpfc_shost_from_vport(vport) : NULL;
3871 	IOCB_t  *irsp;
3872 	uint8_t *pcmd;
3873 	LPFC_MBOXQ_t *mbox = NULL;
3874 	struct lpfc_dmabuf *mp = NULL;
3875 	uint32_t ls_rjt = 0;
3876 
3877 	irsp = &rspiocb->iocb;
3878 
3879 	if (cmdiocb->context_un.mbox)
3880 		mbox = cmdiocb->context_un.mbox;
3881 
3882 	/* First determine if this is a LS_RJT cmpl. Note, this callback
3883 	 * function can have cmdiocb->contest1 (ndlp) field set to NULL.
3884 	 */
3885 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt);
3886 	if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
3887 	    (*((uint32_t *) (pcmd)) == ELS_CMD_LS_RJT)) {
3888 		/* A LS_RJT associated with Default RPI cleanup has its own
3889 		 * separate code path.
3890 		 */
3891 		if (!(ndlp->nlp_flag & NLP_RM_DFLT_RPI))
3892 			ls_rjt = 1;
3893 	}
3894 
3895 	/* Check to see if link went down during discovery */
3896 	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || lpfc_els_chk_latt(vport)) {
3897 		if (mbox) {
3898 			mp = (struct lpfc_dmabuf *) mbox->context1;
3899 			if (mp) {
3900 				lpfc_mbuf_free(phba, mp->virt, mp->phys);
3901 				kfree(mp);
3902 			}
3903 			mempool_free(mbox, phba->mbox_mem_pool);
3904 		}
3905 		if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
3906 		    (ndlp->nlp_flag & NLP_RM_DFLT_RPI))
3907 			if (lpfc_nlp_not_used(ndlp)) {
3908 				ndlp = NULL;
3909 				/* Indicate the node has already released,
3910 				 * should not reference to it from within
3911 				 * the routine lpfc_els_free_iocb.
3912 				 */
3913 				cmdiocb->context1 = NULL;
3914 			}
3915 		goto out;
3916 	}
3917 
3918 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
3919 		"ELS rsp cmpl:    status:x%x/x%x did:x%x",
3920 		irsp->ulpStatus, irsp->un.ulpWord[4],
3921 		cmdiocb->iocb.un.elsreq64.remoteID);
3922 	/* ELS response tag <ulpIoTag> completes */
3923 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3924 			 "0110 ELS response tag x%x completes "
3925 			 "Data: x%x x%x x%x x%x x%x x%x x%x\n",
3926 			 cmdiocb->iocb.ulpIoTag, rspiocb->iocb.ulpStatus,
3927 			 rspiocb->iocb.un.ulpWord[4], rspiocb->iocb.ulpTimeout,
3928 			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
3929 			 ndlp->nlp_rpi);
3930 	if (mbox) {
3931 		if ((rspiocb->iocb.ulpStatus == 0)
3932 		    && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) {
3933 			lpfc_unreg_rpi(vport, ndlp);
3934 			/* Increment reference count to ndlp to hold the
3935 			 * reference to ndlp for the callback function.
3936 			 */
3937 			mbox->context2 = lpfc_nlp_get(ndlp);
3938 			mbox->vport = vport;
3939 			if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) {
3940 				mbox->mbox_flag |= LPFC_MBX_IMED_UNREG;
3941 				mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
3942 			}
3943 			else {
3944 				mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
3945 				ndlp->nlp_prev_state = ndlp->nlp_state;
3946 				lpfc_nlp_set_state(vport, ndlp,
3947 					   NLP_STE_REG_LOGIN_ISSUE);
3948 			}
3949 
3950 			ndlp->nlp_flag |= NLP_REG_LOGIN_SEND;
3951 			if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
3952 			    != MBX_NOT_FINISHED)
3953 				goto out;
3954 
3955 			/* Decrement the ndlp reference count we
3956 			 * set for this failed mailbox command.
3957 			 */
3958 			lpfc_nlp_put(ndlp);
3959 			ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
3960 
3961 			/* ELS rsp: Cannot issue reg_login for <NPortid> */
3962 			lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
3963 				"0138 ELS rsp: Cannot issue reg_login for x%x "
3964 				"Data: x%x x%x x%x\n",
3965 				ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
3966 				ndlp->nlp_rpi);
3967 
3968 			if (lpfc_nlp_not_used(ndlp)) {
3969 				ndlp = NULL;
3970 				/* Indicate node has already been released,
3971 				 * should not reference to it from within
3972 				 * the routine lpfc_els_free_iocb.
3973 				 */
3974 				cmdiocb->context1 = NULL;
3975 			}
3976 		} else {
3977 			/* Do not drop node for lpfc_els_abort'ed ELS cmds */
3978 			if (!lpfc_error_lost_link(irsp) &&
3979 			    ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
3980 				if (lpfc_nlp_not_used(ndlp)) {
3981 					ndlp = NULL;
3982 					/* Indicate node has already been
3983 					 * released, should not reference
3984 					 * to it from within the routine
3985 					 * lpfc_els_free_iocb.
3986 					 */
3987 					cmdiocb->context1 = NULL;
3988 				}
3989 			}
3990 		}
3991 		mp = (struct lpfc_dmabuf *) mbox->context1;
3992 		if (mp) {
3993 			lpfc_mbuf_free(phba, mp->virt, mp->phys);
3994 			kfree(mp);
3995 		}
3996 		mempool_free(mbox, phba->mbox_mem_pool);
3997 	}
3998 out:
3999 	if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
4000 		spin_lock_irq(shost->host_lock);
4001 		ndlp->nlp_flag &= ~(NLP_ACC_REGLOGIN | NLP_RM_DFLT_RPI);
4002 		spin_unlock_irq(shost->host_lock);
4003 
4004 		/* If the node is not being used by another discovery thread,
4005 		 * and we are sending a reject, we are done with it.
4006 		 * Release driver reference count here and free associated
4007 		 * resources.
4008 		 */
4009 		if (ls_rjt)
4010 			if (lpfc_nlp_not_used(ndlp))
4011 				/* Indicate node has already been released,
4012 				 * should not reference to it from within
4013 				 * the routine lpfc_els_free_iocb.
4014 				 */
4015 				cmdiocb->context1 = NULL;
4016 
4017 	}
4018 
4019 	lpfc_els_free_iocb(phba, cmdiocb);
4020 	return;
4021 }
4022 
4023 /**
4024  * lpfc_els_rsp_acc - Prepare and issue an acc response iocb command
4025  * @vport: pointer to a host virtual N_Port data structure.
4026  * @flag: the els command code to be accepted.
4027  * @oldiocb: pointer to the original lpfc command iocb data structure.
4028  * @ndlp: pointer to a node-list data structure.
4029  * @mbox: pointer to the driver internal queue element for mailbox command.
4030  *
4031  * This routine prepares and issues an Accept (ACC) response IOCB
4032  * command. It uses the @flag to properly set up the IOCB field for the
4033  * specific ACC response command to be issued and invokes the
4034  * lpfc_sli_issue_iocb() routine to send out ACC response IOCB. If a
4035  * @mbox pointer is passed in, it will be put into the context_un.mbox
4036  * field of the IOCB for the completion callback function to issue the
4037  * mailbox command to the HBA later when callback is invoked.
4038  *
4039  * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
4040  * will be incremented by 1 for holding the ndlp and the reference to ndlp
4041  * will be stored into the context1 field of the IOCB for the completion
4042  * callback function to the corresponding response ELS IOCB command.
4043  *
4044  * Return code
4045  *   0 - Successfully issued acc response
4046  *   1 - Failed to issue acc response
4047  **/
4048 int
4049 lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
4050 		 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp,
4051 		 LPFC_MBOXQ_t *mbox)
4052 {
4053 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4054 	struct lpfc_hba  *phba = vport->phba;
4055 	IOCB_t *icmd;
4056 	IOCB_t *oldcmd;
4057 	struct lpfc_iocbq *elsiocb;
4058 	uint8_t *pcmd;
4059 	struct serv_parm *sp;
4060 	uint16_t cmdsize;
4061 	int rc;
4062 	ELS_PKT *els_pkt_ptr;
4063 
4064 	oldcmd = &oldiocb->iocb;
4065 
4066 	switch (flag) {
4067 	case ELS_CMD_ACC:
4068 		cmdsize = sizeof(uint32_t);
4069 		elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
4070 					     ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
4071 		if (!elsiocb) {
4072 			spin_lock_irq(shost->host_lock);
4073 			ndlp->nlp_flag &= ~NLP_LOGO_ACC;
4074 			spin_unlock_irq(shost->host_lock);
4075 			return 1;
4076 		}
4077 
4078 		icmd = &elsiocb->iocb;
4079 		icmd->ulpContext = oldcmd->ulpContext;	/* Xri / rx_id */
4080 		icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
4081 		pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
4082 		*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
4083 		pcmd += sizeof(uint32_t);
4084 
4085 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
4086 			"Issue ACC:       did:x%x flg:x%x",
4087 			ndlp->nlp_DID, ndlp->nlp_flag, 0);
4088 		break;
4089 	case ELS_CMD_FLOGI:
4090 	case ELS_CMD_PLOGI:
4091 		cmdsize = (sizeof(struct serv_parm) + sizeof(uint32_t));
4092 		elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
4093 					     ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
4094 		if (!elsiocb)
4095 			return 1;
4096 
4097 		icmd = &elsiocb->iocb;
4098 		icmd->ulpContext = oldcmd->ulpContext;	/* Xri / rx_id */
4099 		icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
4100 		pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
4101 
4102 		if (mbox)
4103 			elsiocb->context_un.mbox = mbox;
4104 
4105 		*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
4106 		pcmd += sizeof(uint32_t);
4107 		sp = (struct serv_parm *)pcmd;
4108 
4109 		if (flag == ELS_CMD_FLOGI) {
4110 			/* Copy the received service parameters back */
4111 			memcpy(sp, &phba->fc_fabparam,
4112 			       sizeof(struct serv_parm));
4113 
4114 			/* Clear the F_Port bit */
4115 			sp->cmn.fPort = 0;
4116 
4117 			/* Mark all class service parameters as invalid */
4118 			sp->cls1.classValid = 0;
4119 			sp->cls2.classValid = 0;
4120 			sp->cls3.classValid = 0;
4121 			sp->cls4.classValid = 0;
4122 
4123 			/* Copy our worldwide names */
4124 			memcpy(&sp->portName, &vport->fc_sparam.portName,
4125 			       sizeof(struct lpfc_name));
4126 			memcpy(&sp->nodeName, &vport->fc_sparam.nodeName,
4127 			       sizeof(struct lpfc_name));
4128 		} else {
4129 			memcpy(pcmd, &vport->fc_sparam,
4130 			       sizeof(struct serv_parm));
4131 
4132 			sp->cmn.valid_vendor_ver_level = 0;
4133 			memset(sp->un.vendorVersion, 0,
4134 			       sizeof(sp->un.vendorVersion));
4135 
4136 			/* If our firmware supports this feature, convey that
4137 			 * info to the target using the vendor specific field.
4138 			 */
4139 			if (phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) {
4140 				sp->cmn.valid_vendor_ver_level = 1;
4141 				sp->un.vv.vid = cpu_to_be32(LPFC_VV_EMLX_ID);
4142 				sp->un.vv.flags =
4143 					cpu_to_be32(LPFC_VV_SUPPRESS_RSP);
4144 			}
4145 		}
4146 
4147 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
4148 			"Issue ACC FLOGI/PLOGI: did:x%x flg:x%x",
4149 			ndlp->nlp_DID, ndlp->nlp_flag, 0);
4150 		break;
4151 	case ELS_CMD_PRLO:
4152 		cmdsize = sizeof(uint32_t) + sizeof(PRLO);
4153 		elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
4154 					     ndlp, ndlp->nlp_DID, ELS_CMD_PRLO);
4155 		if (!elsiocb)
4156 			return 1;
4157 
4158 		icmd = &elsiocb->iocb;
4159 		icmd->ulpContext = oldcmd->ulpContext;	/* Xri / rx_id */
4160 		icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
4161 		pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
4162 
4163 		memcpy(pcmd, ((struct lpfc_dmabuf *) oldiocb->context2)->virt,
4164 		       sizeof(uint32_t) + sizeof(PRLO));
4165 		*((uint32_t *) (pcmd)) = ELS_CMD_PRLO_ACC;
4166 		els_pkt_ptr = (ELS_PKT *) pcmd;
4167 		els_pkt_ptr->un.prlo.acceptRspCode = PRLO_REQ_EXECUTED;
4168 
4169 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
4170 			"Issue ACC PRLO:  did:x%x flg:x%x",
4171 			ndlp->nlp_DID, ndlp->nlp_flag, 0);
4172 		break;
4173 	default:
4174 		return 1;
4175 	}
4176 	/* Xmit ELS ACC response tag <ulpIoTag> */
4177 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4178 			 "0128 Xmit ELS ACC response tag x%x, XRI: x%x, "
4179 			 "DID: x%x, nlp_flag: x%x nlp_state: x%x RPI: x%x "
4180 			 "fc_flag x%x\n",
4181 			 elsiocb->iotag, elsiocb->iocb.ulpContext,
4182 			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
4183 			 ndlp->nlp_rpi, vport->fc_flag);
4184 	if (ndlp->nlp_flag & NLP_LOGO_ACC) {
4185 		spin_lock_irq(shost->host_lock);
4186 		if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED ||
4187 			ndlp->nlp_flag & NLP_REG_LOGIN_SEND))
4188 			ndlp->nlp_flag &= ~NLP_LOGO_ACC;
4189 		spin_unlock_irq(shost->host_lock);
4190 		elsiocb->iocb_cmpl = lpfc_cmpl_els_logo_acc;
4191 	} else {
4192 		elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
4193 	}
4194 
4195 	phba->fc_stat.elsXmitACC++;
4196 	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
4197 	if (rc == IOCB_ERROR) {
4198 		lpfc_els_free_iocb(phba, elsiocb);
4199 		return 1;
4200 	}
4201 	return 0;
4202 }
4203 
4204 /**
4205  * lpfc_els_rsp_reject - Propare and issue a rjt response iocb command
4206  * @vport: pointer to a virtual N_Port data structure.
4207  * @rejectError:
4208  * @oldiocb: pointer to the original lpfc command iocb data structure.
4209  * @ndlp: pointer to a node-list data structure.
4210  * @mbox: pointer to the driver internal queue element for mailbox command.
4211  *
4212  * This routine prepares and issue an Reject (RJT) response IOCB
4213  * command. If a @mbox pointer is passed in, it will be put into the
4214  * context_un.mbox field of the IOCB for the completion callback function
4215  * to issue to the HBA later.
4216  *
4217  * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
4218  * will be incremented by 1 for holding the ndlp and the reference to ndlp
4219  * will be stored into the context1 field of the IOCB for the completion
4220  * callback function to the reject response ELS IOCB command.
4221  *
4222  * Return code
4223  *   0 - Successfully issued reject response
4224  *   1 - Failed to issue reject response
4225  **/
4226 int
4227 lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
4228 		    struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp,
4229 		    LPFC_MBOXQ_t *mbox)
4230 {
4231 	struct lpfc_hba  *phba = vport->phba;
4232 	IOCB_t *icmd;
4233 	IOCB_t *oldcmd;
4234 	struct lpfc_iocbq *elsiocb;
4235 	uint8_t *pcmd;
4236 	uint16_t cmdsize;
4237 	int rc;
4238 
4239 	cmdsize = 2 * sizeof(uint32_t);
4240 	elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
4241 				     ndlp->nlp_DID, ELS_CMD_LS_RJT);
4242 	if (!elsiocb)
4243 		return 1;
4244 
4245 	icmd = &elsiocb->iocb;
4246 	oldcmd = &oldiocb->iocb;
4247 	icmd->ulpContext = oldcmd->ulpContext;	/* Xri / rx_id */
4248 	icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
4249 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
4250 
4251 	*((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT;
4252 	pcmd += sizeof(uint32_t);
4253 	*((uint32_t *) (pcmd)) = rejectError;
4254 
4255 	if (mbox)
4256 		elsiocb->context_un.mbox = mbox;
4257 
4258 	/* Xmit ELS RJT <err> response tag <ulpIoTag> */
4259 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4260 			 "0129 Xmit ELS RJT x%x response tag x%x "
4261 			 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
4262 			 "rpi x%x\n",
4263 			 rejectError, elsiocb->iotag,
4264 			 elsiocb->iocb.ulpContext, ndlp->nlp_DID,
4265 			 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
4266 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
4267 		"Issue LS_RJT:    did:x%x flg:x%x err:x%x",
4268 		ndlp->nlp_DID, ndlp->nlp_flag, rejectError);
4269 
4270 	phba->fc_stat.elsXmitLSRJT++;
4271 	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
4272 	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
4273 
4274 	if (rc == IOCB_ERROR) {
4275 		lpfc_els_free_iocb(phba, elsiocb);
4276 		return 1;
4277 	}
4278 	return 0;
4279 }
4280 
4281 /**
4282  * lpfc_els_rsp_adisc_acc - Prepare and issue acc response to adisc iocb cmd
4283  * @vport: pointer to a virtual N_Port data structure.
4284  * @oldiocb: pointer to the original lpfc command iocb data structure.
4285  * @ndlp: pointer to a node-list data structure.
4286  *
4287  * This routine prepares and issues an Accept (ACC) response to Address
4288  * Discover (ADISC) ELS command. It simply prepares the payload of the IOCB
4289  * and invokes the lpfc_sli_issue_iocb() routine to send out the command.
4290  *
4291  * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
4292  * will be incremented by 1 for holding the ndlp and the reference to ndlp
4293  * will be stored into the context1 field of the IOCB for the completion
4294  * callback function to the ADISC Accept response ELS IOCB command.
4295  *
4296  * Return code
4297  *   0 - Successfully issued acc adisc response
4298  *   1 - Failed to issue adisc acc response
4299  **/
4300 int
4301 lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
4302 		       struct lpfc_nodelist *ndlp)
4303 {
4304 	struct lpfc_hba  *phba = vport->phba;
4305 	ADISC *ap;
4306 	IOCB_t *icmd, *oldcmd;
4307 	struct lpfc_iocbq *elsiocb;
4308 	uint8_t *pcmd;
4309 	uint16_t cmdsize;
4310 	int rc;
4311 
4312 	cmdsize = sizeof(uint32_t) + sizeof(ADISC);
4313 	elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
4314 				     ndlp->nlp_DID, ELS_CMD_ACC);
4315 	if (!elsiocb)
4316 		return 1;
4317 
4318 	icmd = &elsiocb->iocb;
4319 	oldcmd = &oldiocb->iocb;
4320 	icmd->ulpContext = oldcmd->ulpContext;	/* Xri / rx_id */
4321 	icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
4322 
4323 	/* Xmit ADISC ACC response tag <ulpIoTag> */
4324 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4325 			 "0130 Xmit ADISC ACC response iotag x%x xri: "
4326 			 "x%x, did x%x, nlp_flag x%x, nlp_state x%x rpi x%x\n",
4327 			 elsiocb->iotag, elsiocb->iocb.ulpContext,
4328 			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
4329 			 ndlp->nlp_rpi);
4330 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
4331 
4332 	*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
4333 	pcmd += sizeof(uint32_t);
4334 
4335 	ap = (ADISC *) (pcmd);
4336 	ap->hardAL_PA = phba->fc_pref_ALPA;
4337 	memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name));
4338 	memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
4339 	ap->DID = be32_to_cpu(vport->fc_myDID);
4340 
4341 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
4342 		"Issue ACC ADISC: did:x%x flg:x%x",
4343 		ndlp->nlp_DID, ndlp->nlp_flag, 0);
4344 
4345 	phba->fc_stat.elsXmitACC++;
4346 	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
4347 	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
4348 	if (rc == IOCB_ERROR) {
4349 		lpfc_els_free_iocb(phba, elsiocb);
4350 		return 1;
4351 	}
4352 	return 0;
4353 }
4354 
4355 /**
4356  * lpfc_els_rsp_prli_acc - Prepare and issue acc response to prli iocb cmd
4357  * @vport: pointer to a virtual N_Port data structure.
4358  * @oldiocb: pointer to the original lpfc command iocb data structure.
4359  * @ndlp: pointer to a node-list data structure.
4360  *
4361  * This routine prepares and issues an Accept (ACC) response to Process
4362  * Login (PRLI) ELS command. It simply prepares the payload of the IOCB
4363  * and invokes the lpfc_sli_issue_iocb() routine to send out the command.
4364  *
4365  * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
4366  * will be incremented by 1 for holding the ndlp and the reference to ndlp
4367  * will be stored into the context1 field of the IOCB for the completion
4368  * callback function to the PRLI Accept response ELS IOCB command.
4369  *
4370  * Return code
4371  *   0 - Successfully issued acc prli response
4372  *   1 - Failed to issue acc prli response
4373  **/
4374 int
4375 lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
4376 		      struct lpfc_nodelist *ndlp)
4377 {
4378 	struct lpfc_hba  *phba = vport->phba;
4379 	PRLI *npr;
4380 	struct lpfc_nvme_prli *npr_nvme;
4381 	lpfc_vpd_t *vpd;
4382 	IOCB_t *icmd;
4383 	IOCB_t *oldcmd;
4384 	struct lpfc_iocbq *elsiocb;
4385 	uint8_t *pcmd;
4386 	uint16_t cmdsize;
4387 	uint32_t prli_fc4_req, *req_payload;
4388 	struct lpfc_dmabuf *req_buf;
4389 	int rc;
4390 	u32 elsrspcmd;
4391 
4392 	/* Need the incoming PRLI payload to determine if the ACC is for an
4393 	 * FC4 or NVME PRLI type.  The PRLI type is at word 1.
4394 	 */
4395 	req_buf = (struct lpfc_dmabuf *)oldiocb->context2;
4396 	req_payload = (((uint32_t *)req_buf->virt) + 1);
4397 
4398 	/* PRLI type payload is at byte 3 for FCP or NVME. */
4399 	prli_fc4_req = be32_to_cpu(*req_payload);
4400 	prli_fc4_req = (prli_fc4_req >> 24) & 0xff;
4401 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4402 			 "6127 PRLI_ACC:  Req Type x%x, Word1 x%08x\n",
4403 			 prli_fc4_req, *((uint32_t *)req_payload));
4404 
4405 	if (prli_fc4_req == PRLI_FCP_TYPE) {
4406 		cmdsize = sizeof(uint32_t) + sizeof(PRLI);
4407 		elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK));
4408 	} else if (prli_fc4_req & PRLI_NVME_TYPE) {
4409 		cmdsize = sizeof(uint32_t) + sizeof(struct lpfc_nvme_prli);
4410 		elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_NVMEPRLI & ~ELS_RSP_MASK));
4411 	} else {
4412 		return 1;
4413 	}
4414 
4415 	elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
4416 		ndlp->nlp_DID, elsrspcmd);
4417 	if (!elsiocb)
4418 		return 1;
4419 
4420 	icmd = &elsiocb->iocb;
4421 	oldcmd = &oldiocb->iocb;
4422 	icmd->ulpContext = oldcmd->ulpContext;	/* Xri / rx_id */
4423 	icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
4424 
4425 	/* Xmit PRLI ACC response tag <ulpIoTag> */
4426 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4427 			 "0131 Xmit PRLI ACC response tag x%x xri x%x, "
4428 			 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
4429 			 elsiocb->iotag, elsiocb->iocb.ulpContext,
4430 			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
4431 			 ndlp->nlp_rpi);
4432 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
4433 	memset(pcmd, 0, cmdsize);
4434 
4435 	*((uint32_t *)(pcmd)) = elsrspcmd;
4436 	pcmd += sizeof(uint32_t);
4437 
4438 	/* For PRLI, remainder of payload is PRLI parameter page */
4439 	vpd = &phba->vpd;
4440 
4441 	if (prli_fc4_req == PRLI_FCP_TYPE) {
4442 		/*
4443 		 * If the remote port is a target and our firmware version
4444 		 * is 3.20 or later, set the following bits for FC-TAPE
4445 		 * support.
4446 		 */
4447 		npr = (PRLI *) pcmd;
4448 		if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
4449 		    (vpd->rev.feaLevelHigh >= 0x02)) {
4450 			npr->ConfmComplAllowed = 1;
4451 			npr->Retry = 1;
4452 			npr->TaskRetryIdReq = 1;
4453 		}
4454 		npr->acceptRspCode = PRLI_REQ_EXECUTED;
4455 		npr->estabImagePair = 1;
4456 		npr->readXferRdyDis = 1;
4457 		npr->ConfmComplAllowed = 1;
4458 		npr->prliType = PRLI_FCP_TYPE;
4459 		npr->initiatorFunc = 1;
4460 	} else if (prli_fc4_req & PRLI_NVME_TYPE) {
4461 		/* Respond with an NVME PRLI Type */
4462 		npr_nvme = (struct lpfc_nvme_prli *) pcmd;
4463 		bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE);
4464 		bf_set(prli_estabImagePair, npr_nvme, 0);  /* Should be 0 */
4465 		bf_set(prli_acc_rsp_code, npr_nvme, PRLI_REQ_EXECUTED);
4466 		if (phba->nvmet_support) {
4467 			bf_set(prli_tgt, npr_nvme, 1);
4468 			bf_set(prli_disc, npr_nvme, 1);
4469 			if (phba->cfg_nvme_enable_fb) {
4470 				bf_set(prli_fba, npr_nvme, 1);
4471 
4472 				/* TBD.  Target mode needs to post buffers
4473 				 * that support the configured first burst
4474 				 * byte size.
4475 				 */
4476 				bf_set(prli_fb_sz, npr_nvme,
4477 				       phba->cfg_nvmet_fb_size);
4478 			}
4479 		} else {
4480 			bf_set(prli_init, npr_nvme, 1);
4481 		}
4482 
4483 		lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
4484 				 "6015 NVME issue PRLI ACC word1 x%08x "
4485 				 "word4 x%08x word5 x%08x flag x%x, "
4486 				 "fcp_info x%x nlp_type x%x\n",
4487 				 npr_nvme->word1, npr_nvme->word4,
4488 				 npr_nvme->word5, ndlp->nlp_flag,
4489 				 ndlp->nlp_fcp_info, ndlp->nlp_type);
4490 		npr_nvme->word1 = cpu_to_be32(npr_nvme->word1);
4491 		npr_nvme->word4 = cpu_to_be32(npr_nvme->word4);
4492 		npr_nvme->word5 = cpu_to_be32(npr_nvme->word5);
4493 	} else
4494 		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
4495 				 "6128 Unknown FC_TYPE x%x x%x ndlp x%06x\n",
4496 				 prli_fc4_req, ndlp->nlp_fc4_type,
4497 				 ndlp->nlp_DID);
4498 
4499 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
4500 		"Issue ACC PRLI:  did:x%x flg:x%x",
4501 		ndlp->nlp_DID, ndlp->nlp_flag, 0);
4502 
4503 	phba->fc_stat.elsXmitACC++;
4504 	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
4505 
4506 	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
4507 	if (rc == IOCB_ERROR) {
4508 		lpfc_els_free_iocb(phba, elsiocb);
4509 		return 1;
4510 	}
4511 	return 0;
4512 }
4513 
4514 /**
4515  * lpfc_els_rsp_rnid_acc - Issue rnid acc response iocb command
4516  * @vport: pointer to a virtual N_Port data structure.
4517  * @format: rnid command format.
4518  * @oldiocb: pointer to the original lpfc command iocb data structure.
4519  * @ndlp: pointer to a node-list data structure.
4520  *
4521  * This routine issues a Request Node Identification Data (RNID) Accept
4522  * (ACC) response. It constructs the RNID ACC response command according to
4523  * the proper @format and then calls the lpfc_sli_issue_iocb() routine to
4524  * issue the response. Note that this command does not need to hold the ndlp
4525  * reference count for the callback. So, the ndlp reference count taken by
4526  * the lpfc_prep_els_iocb() routine is put back and the context1 field of
4527  * IOCB is set to NULL to indicate to the lpfc_els_free_iocb() routine that
4528  * there is no ndlp reference available.
4529  *
4530  * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
4531  * will be incremented by 1 for holding the ndlp and the reference to ndlp
4532  * will be stored into the context1 field of the IOCB for the completion
4533  * callback function. However, for the RNID Accept Response ELS command,
4534  * this is undone later by this routine after the IOCB is allocated.
4535  *
4536  * Return code
4537  *   0 - Successfully issued acc rnid response
4538  *   1 - Failed to issue acc rnid response
4539  **/
4540 static int
4541 lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
4542 		      struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
4543 {
4544 	struct lpfc_hba  *phba = vport->phba;
4545 	RNID *rn;
4546 	IOCB_t *icmd, *oldcmd;
4547 	struct lpfc_iocbq *elsiocb;
4548 	uint8_t *pcmd;
4549 	uint16_t cmdsize;
4550 	int rc;
4551 
4552 	cmdsize = sizeof(uint32_t) + sizeof(uint32_t)
4553 					+ (2 * sizeof(struct lpfc_name));
4554 	if (format)
4555 		cmdsize += sizeof(RNID_TOP_DISC);
4556 
4557 	elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
4558 				     ndlp->nlp_DID, ELS_CMD_ACC);
4559 	if (!elsiocb)
4560 		return 1;
4561 
4562 	icmd = &elsiocb->iocb;
4563 	oldcmd = &oldiocb->iocb;
4564 	icmd->ulpContext = oldcmd->ulpContext;	/* Xri / rx_id */
4565 	icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
4566 
4567 	/* Xmit RNID ACC response tag <ulpIoTag> */
4568 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4569 			 "0132 Xmit RNID ACC response tag x%x xri x%x\n",
4570 			 elsiocb->iotag, elsiocb->iocb.ulpContext);
4571 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
4572 	*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
4573 	pcmd += sizeof(uint32_t);
4574 
4575 	memset(pcmd, 0, sizeof(RNID));
4576 	rn = (RNID *) (pcmd);
4577 	rn->Format = format;
4578 	rn->CommonLen = (2 * sizeof(struct lpfc_name));
4579 	memcpy(&rn->portName, &vport->fc_portname, sizeof(struct lpfc_name));
4580 	memcpy(&rn->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
4581 	switch (format) {
4582 	case 0:
4583 		rn->SpecificLen = 0;
4584 		break;
4585 	case RNID_TOPOLOGY_DISC:
4586 		rn->SpecificLen = sizeof(RNID_TOP_DISC);
4587 		memcpy(&rn->un.topologyDisc.portName,
4588 		       &vport->fc_portname, sizeof(struct lpfc_name));
4589 		rn->un.topologyDisc.unitType = RNID_HBA;
4590 		rn->un.topologyDisc.physPort = 0;
4591 		rn->un.topologyDisc.attachedNodes = 0;
4592 		break;
4593 	default:
4594 		rn->CommonLen = 0;
4595 		rn->SpecificLen = 0;
4596 		break;
4597 	}
4598 
4599 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
4600 		"Issue ACC RNID:  did:x%x flg:x%x",
4601 		ndlp->nlp_DID, ndlp->nlp_flag, 0);
4602 
4603 	phba->fc_stat.elsXmitACC++;
4604 	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
4605 
4606 	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
4607 	if (rc == IOCB_ERROR) {
4608 		lpfc_els_free_iocb(phba, elsiocb);
4609 		return 1;
4610 	}
4611 	return 0;
4612 }
4613 
4614 /**
4615  * lpfc_els_clear_rrq - Clear the rq that this rrq describes.
4616  * @vport: pointer to a virtual N_Port data structure.
4617  * @iocb: pointer to the lpfc command iocb data structure.
4618  * @ndlp: pointer to a node-list data structure.
4619  *
4620  * Return
4621  **/
4622 static void
4623 lpfc_els_clear_rrq(struct lpfc_vport *vport,
4624 		   struct lpfc_iocbq *iocb, struct lpfc_nodelist *ndlp)
4625 {
4626 	struct lpfc_hba  *phba = vport->phba;
4627 	uint8_t *pcmd;
4628 	struct RRQ *rrq;
4629 	uint16_t rxid;
4630 	uint16_t xri;
4631 	struct lpfc_node_rrq *prrq;
4632 
4633 
4634 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) iocb->context2)->virt);
4635 	pcmd += sizeof(uint32_t);
4636 	rrq = (struct RRQ *)pcmd;
4637 	rrq->rrq_exchg = be32_to_cpu(rrq->rrq_exchg);
4638 	rxid = bf_get(rrq_rxid, rrq);
4639 
4640 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4641 			"2883 Clear RRQ for SID:x%x OXID:x%x RXID:x%x"
4642 			" x%x x%x\n",
4643 			be32_to_cpu(bf_get(rrq_did, rrq)),
4644 			bf_get(rrq_oxid, rrq),
4645 			rxid,
4646 			iocb->iotag, iocb->iocb.ulpContext);
4647 
4648 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
4649 		"Clear RRQ:  did:x%x flg:x%x exchg:x%.08x",
4650 		ndlp->nlp_DID, ndlp->nlp_flag, rrq->rrq_exchg);
4651 	if (vport->fc_myDID == be32_to_cpu(bf_get(rrq_did, rrq)))
4652 		xri = bf_get(rrq_oxid, rrq);
4653 	else
4654 		xri = rxid;
4655 	prrq = lpfc_get_active_rrq(vport, xri, ndlp->nlp_DID);
4656 	if (prrq)
4657 		lpfc_clr_rrq_active(phba, xri, prrq);
4658 	return;
4659 }
4660 
4661 /**
4662  * lpfc_els_rsp_echo_acc - Issue echo acc response
4663  * @vport: pointer to a virtual N_Port data structure.
4664  * @data: pointer to echo data to return in the accept.
4665  * @oldiocb: pointer to the original lpfc command iocb data structure.
4666  * @ndlp: pointer to a node-list data structure.
4667  *
4668  * Return code
4669  *   0 - Successfully issued acc echo response
4670  *   1 - Failed to issue acc echo response
4671  **/
4672 static int
4673 lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data,
4674 		      struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
4675 {
4676 	struct lpfc_hba  *phba = vport->phba;
4677 	struct lpfc_iocbq *elsiocb;
4678 	uint8_t *pcmd;
4679 	uint16_t cmdsize;
4680 	int rc;
4681 
4682 	cmdsize = oldiocb->iocb.unsli3.rcvsli3.acc_len;
4683 
4684 	/* The accumulated length can exceed the BPL_SIZE.  For
4685 	 * now, use this as the limit
4686 	 */
4687 	if (cmdsize > LPFC_BPL_SIZE)
4688 		cmdsize = LPFC_BPL_SIZE;
4689 	elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
4690 				     ndlp->nlp_DID, ELS_CMD_ACC);
4691 	if (!elsiocb)
4692 		return 1;
4693 
4694 	elsiocb->iocb.ulpContext = oldiocb->iocb.ulpContext;  /* Xri / rx_id */
4695 	elsiocb->iocb.unsli3.rcvsli3.ox_id = oldiocb->iocb.unsli3.rcvsli3.ox_id;
4696 
4697 	/* Xmit ECHO ACC response tag <ulpIoTag> */
4698 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4699 			 "2876 Xmit ECHO ACC response tag x%x xri x%x\n",
4700 			 elsiocb->iotag, elsiocb->iocb.ulpContext);
4701 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
4702 	*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
4703 	pcmd += sizeof(uint32_t);
4704 	memcpy(pcmd, data, cmdsize - sizeof(uint32_t));
4705 
4706 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
4707 		"Issue ACC ECHO:  did:x%x flg:x%x",
4708 		ndlp->nlp_DID, ndlp->nlp_flag, 0);
4709 
4710 	phba->fc_stat.elsXmitACC++;
4711 	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
4712 
4713 	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
4714 	if (rc == IOCB_ERROR) {
4715 		lpfc_els_free_iocb(phba, elsiocb);
4716 		return 1;
4717 	}
4718 	return 0;
4719 }
4720 
4721 /**
4722  * lpfc_els_disc_adisc - Issue remaining adisc iocbs to npr nodes of a vport
4723  * @vport: pointer to a host virtual N_Port data structure.
4724  *
4725  * This routine issues Address Discover (ADISC) ELS commands to those
4726  * N_Ports which are in node port recovery state and ADISC has not been issued
4727  * for the @vport. Each time an ELS ADISC IOCB is issued by invoking the
4728  * lpfc_issue_els_adisc() routine, the per @vport number of discover count
4729  * (num_disc_nodes) shall be incremented. If the num_disc_nodes reaches a
4730  * pre-configured threshold (cfg_discovery_threads), the @vport fc_flag will
4731  * be marked with FC_NLP_MORE bit and the process of issuing remaining ADISC
4732  * IOCBs quit for later pick up. On the other hand, after walking through
4733  * all the ndlps with the @vport and there is none ADISC IOCB issued, the
4734  * @vport fc_flag shall be cleared with FC_NLP_MORE bit indicating there is
4735  * no more ADISC need to be sent.
4736  *
4737  * Return code
4738  *    The number of N_Ports with adisc issued.
4739  **/
4740 int
4741 lpfc_els_disc_adisc(struct lpfc_vport *vport)
4742 {
4743 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4744 	struct lpfc_nodelist *ndlp, *next_ndlp;
4745 	int sentadisc = 0;
4746 
4747 	/* go thru NPR nodes and issue any remaining ELS ADISCs */
4748 	list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
4749 		if (!NLP_CHK_NODE_ACT(ndlp))
4750 			continue;
4751 		if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
4752 		    (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
4753 		    (ndlp->nlp_flag & NLP_NPR_ADISC) != 0) {
4754 			spin_lock_irq(shost->host_lock);
4755 			ndlp->nlp_flag &= ~NLP_NPR_ADISC;
4756 			spin_unlock_irq(shost->host_lock);
4757 			ndlp->nlp_prev_state = ndlp->nlp_state;
4758 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
4759 			lpfc_issue_els_adisc(vport, ndlp, 0);
4760 			sentadisc++;
4761 			vport->num_disc_nodes++;
4762 			if (vport->num_disc_nodes >=
4763 			    vport->cfg_discovery_threads) {
4764 				spin_lock_irq(shost->host_lock);
4765 				vport->fc_flag |= FC_NLP_MORE;
4766 				spin_unlock_irq(shost->host_lock);
4767 				break;
4768 			}
4769 		}
4770 	}
4771 	if (sentadisc == 0) {
4772 		spin_lock_irq(shost->host_lock);
4773 		vport->fc_flag &= ~FC_NLP_MORE;
4774 		spin_unlock_irq(shost->host_lock);
4775 	}
4776 	return sentadisc;
4777 }
4778 
4779 /**
4780  * lpfc_els_disc_plogi - Issue plogi for all npr nodes of a vport before adisc
4781  * @vport: pointer to a host virtual N_Port data structure.
4782  *
4783  * This routine issues Port Login (PLOGI) ELS commands to all the N_Ports
4784  * which are in node port recovery state, with a @vport. Each time an ELS
4785  * ADISC PLOGI IOCB is issued by invoking the lpfc_issue_els_plogi() routine,
4786  * the per @vport number of discover count (num_disc_nodes) shall be
4787  * incremented. If the num_disc_nodes reaches a pre-configured threshold
4788  * (cfg_discovery_threads), the @vport fc_flag will be marked with FC_NLP_MORE
4789  * bit set and quit the process of issuing remaining ADISC PLOGIN IOCBs for
4790  * later pick up. On the other hand, after walking through all the ndlps with
4791  * the @vport and there is none ADISC PLOGI IOCB issued, the @vport fc_flag
4792  * shall be cleared with the FC_NLP_MORE bit indicating there is no more ADISC
4793  * PLOGI need to be sent.
4794  *
4795  * Return code
4796  *   The number of N_Ports with plogi issued.
4797  **/
4798 int
4799 lpfc_els_disc_plogi(struct lpfc_vport *vport)
4800 {
4801 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4802 	struct lpfc_nodelist *ndlp, *next_ndlp;
4803 	int sentplogi = 0;
4804 
4805 	/* go thru NPR nodes and issue any remaining ELS PLOGIs */
4806 	list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
4807 		if (!NLP_CHK_NODE_ACT(ndlp))
4808 			continue;
4809 		if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
4810 				(ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
4811 				(ndlp->nlp_flag & NLP_DELAY_TMO) == 0 &&
4812 				(ndlp->nlp_flag & NLP_NPR_ADISC) == 0) {
4813 			ndlp->nlp_prev_state = ndlp->nlp_state;
4814 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
4815 			lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
4816 			sentplogi++;
4817 			vport->num_disc_nodes++;
4818 			if (vport->num_disc_nodes >=
4819 					vport->cfg_discovery_threads) {
4820 				spin_lock_irq(shost->host_lock);
4821 				vport->fc_flag |= FC_NLP_MORE;
4822 				spin_unlock_irq(shost->host_lock);
4823 				break;
4824 			}
4825 		}
4826 	}
4827 	if (sentplogi) {
4828 		lpfc_set_disctmo(vport);
4829 	}
4830 	else {
4831 		spin_lock_irq(shost->host_lock);
4832 		vport->fc_flag &= ~FC_NLP_MORE;
4833 		spin_unlock_irq(shost->host_lock);
4834 	}
4835 	return sentplogi;
4836 }
4837 
4838 static uint32_t
4839 lpfc_rdp_res_link_service(struct fc_rdp_link_service_desc *desc,
4840 		uint32_t word0)
4841 {
4842 
4843 	desc->tag = cpu_to_be32(RDP_LINK_SERVICE_DESC_TAG);
4844 	desc->payload.els_req = word0;
4845 	desc->length = cpu_to_be32(sizeof(desc->payload));
4846 
4847 	return sizeof(struct fc_rdp_link_service_desc);
4848 }
4849 
4850 static uint32_t
4851 lpfc_rdp_res_sfp_desc(struct fc_rdp_sfp_desc *desc,
4852 		uint8_t *page_a0, uint8_t *page_a2)
4853 {
4854 	uint16_t wavelength;
4855 	uint16_t temperature;
4856 	uint16_t rx_power;
4857 	uint16_t tx_bias;
4858 	uint16_t tx_power;
4859 	uint16_t vcc;
4860 	uint16_t flag = 0;
4861 	struct sff_trasnceiver_codes_byte4 *trasn_code_byte4;
4862 	struct sff_trasnceiver_codes_byte5 *trasn_code_byte5;
4863 
4864 	desc->tag = cpu_to_be32(RDP_SFP_DESC_TAG);
4865 
4866 	trasn_code_byte4 = (struct sff_trasnceiver_codes_byte4 *)
4867 			&page_a0[SSF_TRANSCEIVER_CODE_B4];
4868 	trasn_code_byte5 = (struct sff_trasnceiver_codes_byte5 *)
4869 			&page_a0[SSF_TRANSCEIVER_CODE_B5];
4870 
4871 	if ((trasn_code_byte4->fc_sw_laser) ||
4872 	    (trasn_code_byte5->fc_sw_laser_sl) ||
4873 	    (trasn_code_byte5->fc_sw_laser_sn)) {  /* check if its short WL */
4874 		flag |= (SFP_FLAG_PT_SWLASER << SFP_FLAG_PT_SHIFT);
4875 	} else if (trasn_code_byte4->fc_lw_laser) {
4876 		wavelength = (page_a0[SSF_WAVELENGTH_B1] << 8) |
4877 			page_a0[SSF_WAVELENGTH_B0];
4878 		if (wavelength == SFP_WAVELENGTH_LC1310)
4879 			flag |= SFP_FLAG_PT_LWLASER_LC1310 << SFP_FLAG_PT_SHIFT;
4880 		if (wavelength == SFP_WAVELENGTH_LL1550)
4881 			flag |= SFP_FLAG_PT_LWLASER_LL1550 << SFP_FLAG_PT_SHIFT;
4882 	}
4883 	/* check if its SFP+ */
4884 	flag |= ((page_a0[SSF_IDENTIFIER] == SFF_PG0_IDENT_SFP) ?
4885 			SFP_FLAG_CT_SFP_PLUS : SFP_FLAG_CT_UNKNOWN)
4886 					<< SFP_FLAG_CT_SHIFT;
4887 
4888 	/* check if its OPTICAL */
4889 	flag |= ((page_a0[SSF_CONNECTOR] == SFF_PG0_CONNECTOR_LC) ?
4890 			SFP_FLAG_IS_OPTICAL_PORT : 0)
4891 					<< SFP_FLAG_IS_OPTICAL_SHIFT;
4892 
4893 	temperature = (page_a2[SFF_TEMPERATURE_B1] << 8 |
4894 		page_a2[SFF_TEMPERATURE_B0]);
4895 	vcc = (page_a2[SFF_VCC_B1] << 8 |
4896 		page_a2[SFF_VCC_B0]);
4897 	tx_power = (page_a2[SFF_TXPOWER_B1] << 8 |
4898 		page_a2[SFF_TXPOWER_B0]);
4899 	tx_bias = (page_a2[SFF_TX_BIAS_CURRENT_B1] << 8 |
4900 		page_a2[SFF_TX_BIAS_CURRENT_B0]);
4901 	rx_power = (page_a2[SFF_RXPOWER_B1] << 8 |
4902 		page_a2[SFF_RXPOWER_B0]);
4903 	desc->sfp_info.temperature = cpu_to_be16(temperature);
4904 	desc->sfp_info.rx_power = cpu_to_be16(rx_power);
4905 	desc->sfp_info.tx_bias = cpu_to_be16(tx_bias);
4906 	desc->sfp_info.tx_power = cpu_to_be16(tx_power);
4907 	desc->sfp_info.vcc = cpu_to_be16(vcc);
4908 
4909 	desc->sfp_info.flags = cpu_to_be16(flag);
4910 	desc->length = cpu_to_be32(sizeof(desc->sfp_info));
4911 
4912 	return sizeof(struct fc_rdp_sfp_desc);
4913 }
4914 
4915 static uint32_t
4916 lpfc_rdp_res_link_error(struct fc_rdp_link_error_status_desc *desc,
4917 		READ_LNK_VAR *stat)
4918 {
4919 	uint32_t type;
4920 
4921 	desc->tag = cpu_to_be32(RDP_LINK_ERROR_STATUS_DESC_TAG);
4922 
4923 	type = VN_PT_PHY_PF_PORT << VN_PT_PHY_SHIFT;
4924 
4925 	desc->info.port_type = cpu_to_be32(type);
4926 
4927 	desc->info.link_status.link_failure_cnt =
4928 		cpu_to_be32(stat->linkFailureCnt);
4929 	desc->info.link_status.loss_of_synch_cnt =
4930 		cpu_to_be32(stat->lossSyncCnt);
4931 	desc->info.link_status.loss_of_signal_cnt =
4932 		cpu_to_be32(stat->lossSignalCnt);
4933 	desc->info.link_status.primitive_seq_proto_err =
4934 		cpu_to_be32(stat->primSeqErrCnt);
4935 	desc->info.link_status.invalid_trans_word =
4936 		cpu_to_be32(stat->invalidXmitWord);
4937 	desc->info.link_status.invalid_crc_cnt = cpu_to_be32(stat->crcCnt);
4938 
4939 	desc->length = cpu_to_be32(sizeof(desc->info));
4940 
4941 	return sizeof(struct fc_rdp_link_error_status_desc);
4942 }
4943 
4944 static uint32_t
4945 lpfc_rdp_res_bbc_desc(struct fc_rdp_bbc_desc *desc, READ_LNK_VAR *stat,
4946 		      struct lpfc_vport *vport)
4947 {
4948 	uint32_t bbCredit;
4949 
4950 	desc->tag = cpu_to_be32(RDP_BBC_DESC_TAG);
4951 
4952 	bbCredit = vport->fc_sparam.cmn.bbCreditLsb |
4953 			(vport->fc_sparam.cmn.bbCreditMsb << 8);
4954 	desc->bbc_info.port_bbc = cpu_to_be32(bbCredit);
4955 	if (vport->phba->fc_topology != LPFC_TOPOLOGY_LOOP) {
4956 		bbCredit = vport->phba->fc_fabparam.cmn.bbCreditLsb |
4957 			(vport->phba->fc_fabparam.cmn.bbCreditMsb << 8);
4958 		desc->bbc_info.attached_port_bbc = cpu_to_be32(bbCredit);
4959 	} else {
4960 		desc->bbc_info.attached_port_bbc = 0;
4961 	}
4962 
4963 	desc->bbc_info.rtt = 0;
4964 	desc->length = cpu_to_be32(sizeof(desc->bbc_info));
4965 
4966 	return sizeof(struct fc_rdp_bbc_desc);
4967 }
4968 
4969 static uint32_t
4970 lpfc_rdp_res_oed_temp_desc(struct lpfc_hba *phba,
4971 			   struct fc_rdp_oed_sfp_desc *desc, uint8_t *page_a2)
4972 {
4973 	uint32_t flags = 0;
4974 
4975 	desc->tag = cpu_to_be32(RDP_OED_DESC_TAG);
4976 
4977 	desc->oed_info.hi_alarm = page_a2[SSF_TEMP_HIGH_ALARM];
4978 	desc->oed_info.lo_alarm = page_a2[SSF_TEMP_LOW_ALARM];
4979 	desc->oed_info.hi_warning = page_a2[SSF_TEMP_HIGH_WARNING];
4980 	desc->oed_info.lo_warning = page_a2[SSF_TEMP_LOW_WARNING];
4981 
4982 	if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TEMPERATURE)
4983 		flags |= RDP_OET_HIGH_ALARM;
4984 	if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TEMPERATURE)
4985 		flags |= RDP_OET_LOW_ALARM;
4986 	if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TEMPERATURE)
4987 		flags |= RDP_OET_HIGH_WARNING;
4988 	if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TEMPERATURE)
4989 		flags |= RDP_OET_LOW_WARNING;
4990 
4991 	flags |= ((0xf & RDP_OED_TEMPERATURE) << RDP_OED_TYPE_SHIFT);
4992 	desc->oed_info.function_flags = cpu_to_be32(flags);
4993 	desc->length = cpu_to_be32(sizeof(desc->oed_info));
4994 	return sizeof(struct fc_rdp_oed_sfp_desc);
4995 }
4996 
4997 static uint32_t
4998 lpfc_rdp_res_oed_voltage_desc(struct lpfc_hba *phba,
4999 			      struct fc_rdp_oed_sfp_desc *desc,
5000 			      uint8_t *page_a2)
5001 {
5002 	uint32_t flags = 0;
5003 
5004 	desc->tag = cpu_to_be32(RDP_OED_DESC_TAG);
5005 
5006 	desc->oed_info.hi_alarm = page_a2[SSF_VOLTAGE_HIGH_ALARM];
5007 	desc->oed_info.lo_alarm = page_a2[SSF_VOLTAGE_LOW_ALARM];
5008 	desc->oed_info.hi_warning = page_a2[SSF_VOLTAGE_HIGH_WARNING];
5009 	desc->oed_info.lo_warning = page_a2[SSF_VOLTAGE_LOW_WARNING];
5010 
5011 	if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_VOLTAGE)
5012 		flags |= RDP_OET_HIGH_ALARM;
5013 	if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_VOLTAGE)
5014 		flags |= RDP_OET_LOW_ALARM;
5015 	if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_VOLTAGE)
5016 		flags |= RDP_OET_HIGH_WARNING;
5017 	if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_VOLTAGE)
5018 		flags |= RDP_OET_LOW_WARNING;
5019 
5020 	flags |= ((0xf & RDP_OED_VOLTAGE) << RDP_OED_TYPE_SHIFT);
5021 	desc->oed_info.function_flags = cpu_to_be32(flags);
5022 	desc->length = cpu_to_be32(sizeof(desc->oed_info));
5023 	return sizeof(struct fc_rdp_oed_sfp_desc);
5024 }
5025 
5026 static uint32_t
5027 lpfc_rdp_res_oed_txbias_desc(struct lpfc_hba *phba,
5028 			     struct fc_rdp_oed_sfp_desc *desc,
5029 			     uint8_t *page_a2)
5030 {
5031 	uint32_t flags = 0;
5032 
5033 	desc->tag = cpu_to_be32(RDP_OED_DESC_TAG);
5034 
5035 	desc->oed_info.hi_alarm = page_a2[SSF_BIAS_HIGH_ALARM];
5036 	desc->oed_info.lo_alarm = page_a2[SSF_BIAS_LOW_ALARM];
5037 	desc->oed_info.hi_warning = page_a2[SSF_BIAS_HIGH_WARNING];
5038 	desc->oed_info.lo_warning = page_a2[SSF_BIAS_LOW_WARNING];
5039 
5040 	if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TXBIAS)
5041 		flags |= RDP_OET_HIGH_ALARM;
5042 	if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TXBIAS)
5043 		flags |= RDP_OET_LOW_ALARM;
5044 	if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TXBIAS)
5045 		flags |= RDP_OET_HIGH_WARNING;
5046 	if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TXBIAS)
5047 		flags |= RDP_OET_LOW_WARNING;
5048 
5049 	flags |= ((0xf & RDP_OED_TXBIAS) << RDP_OED_TYPE_SHIFT);
5050 	desc->oed_info.function_flags = cpu_to_be32(flags);
5051 	desc->length = cpu_to_be32(sizeof(desc->oed_info));
5052 	return sizeof(struct fc_rdp_oed_sfp_desc);
5053 }
5054 
5055 static uint32_t
5056 lpfc_rdp_res_oed_txpower_desc(struct lpfc_hba *phba,
5057 			      struct fc_rdp_oed_sfp_desc *desc,
5058 			      uint8_t *page_a2)
5059 {
5060 	uint32_t flags = 0;
5061 
5062 	desc->tag = cpu_to_be32(RDP_OED_DESC_TAG);
5063 
5064 	desc->oed_info.hi_alarm = page_a2[SSF_TXPOWER_HIGH_ALARM];
5065 	desc->oed_info.lo_alarm = page_a2[SSF_TXPOWER_LOW_ALARM];
5066 	desc->oed_info.hi_warning = page_a2[SSF_TXPOWER_HIGH_WARNING];
5067 	desc->oed_info.lo_warning = page_a2[SSF_TXPOWER_LOW_WARNING];
5068 
5069 	if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TXPOWER)
5070 		flags |= RDP_OET_HIGH_ALARM;
5071 	if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TXPOWER)
5072 		flags |= RDP_OET_LOW_ALARM;
5073 	if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TXPOWER)
5074 		flags |= RDP_OET_HIGH_WARNING;
5075 	if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TXPOWER)
5076 		flags |= RDP_OET_LOW_WARNING;
5077 
5078 	flags |= ((0xf & RDP_OED_TXPOWER) << RDP_OED_TYPE_SHIFT);
5079 	desc->oed_info.function_flags = cpu_to_be32(flags);
5080 	desc->length = cpu_to_be32(sizeof(desc->oed_info));
5081 	return sizeof(struct fc_rdp_oed_sfp_desc);
5082 }
5083 
5084 
5085 static uint32_t
5086 lpfc_rdp_res_oed_rxpower_desc(struct lpfc_hba *phba,
5087 			      struct fc_rdp_oed_sfp_desc *desc,
5088 			      uint8_t *page_a2)
5089 {
5090 	uint32_t flags = 0;
5091 
5092 	desc->tag = cpu_to_be32(RDP_OED_DESC_TAG);
5093 
5094 	desc->oed_info.hi_alarm = page_a2[SSF_RXPOWER_HIGH_ALARM];
5095 	desc->oed_info.lo_alarm = page_a2[SSF_RXPOWER_LOW_ALARM];
5096 	desc->oed_info.hi_warning = page_a2[SSF_RXPOWER_HIGH_WARNING];
5097 	desc->oed_info.lo_warning = page_a2[SSF_RXPOWER_LOW_WARNING];
5098 
5099 	if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_RXPOWER)
5100 		flags |= RDP_OET_HIGH_ALARM;
5101 	if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_RXPOWER)
5102 		flags |= RDP_OET_LOW_ALARM;
5103 	if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_RXPOWER)
5104 		flags |= RDP_OET_HIGH_WARNING;
5105 	if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_RXPOWER)
5106 		flags |= RDP_OET_LOW_WARNING;
5107 
5108 	flags |= ((0xf & RDP_OED_RXPOWER) << RDP_OED_TYPE_SHIFT);
5109 	desc->oed_info.function_flags = cpu_to_be32(flags);
5110 	desc->length = cpu_to_be32(sizeof(desc->oed_info));
5111 	return sizeof(struct fc_rdp_oed_sfp_desc);
5112 }
5113 
5114 static uint32_t
5115 lpfc_rdp_res_opd_desc(struct fc_rdp_opd_sfp_desc *desc,
5116 		      uint8_t *page_a0, struct lpfc_vport *vport)
5117 {
5118 	desc->tag = cpu_to_be32(RDP_OPD_DESC_TAG);
5119 	memcpy(desc->opd_info.vendor_name, &page_a0[SSF_VENDOR_NAME], 16);
5120 	memcpy(desc->opd_info.model_number, &page_a0[SSF_VENDOR_PN], 16);
5121 	memcpy(desc->opd_info.serial_number, &page_a0[SSF_VENDOR_SN], 16);
5122 	memcpy(desc->opd_info.revision, &page_a0[SSF_VENDOR_REV], 4);
5123 	memcpy(desc->opd_info.date, &page_a0[SSF_DATE_CODE], 8);
5124 	desc->length = cpu_to_be32(sizeof(desc->opd_info));
5125 	return sizeof(struct fc_rdp_opd_sfp_desc);
5126 }
5127 
5128 static uint32_t
5129 lpfc_rdp_res_fec_desc(struct fc_fec_rdp_desc *desc, READ_LNK_VAR *stat)
5130 {
5131 	if (bf_get(lpfc_read_link_stat_gec2, stat) == 0)
5132 		return 0;
5133 	desc->tag = cpu_to_be32(RDP_FEC_DESC_TAG);
5134 
5135 	desc->info.CorrectedBlocks =
5136 		cpu_to_be32(stat->fecCorrBlkCount);
5137 	desc->info.UncorrectableBlocks =
5138 		cpu_to_be32(stat->fecUncorrBlkCount);
5139 
5140 	desc->length = cpu_to_be32(sizeof(desc->info));
5141 
5142 	return sizeof(struct fc_fec_rdp_desc);
5143 }
5144 
5145 static uint32_t
5146 lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba)
5147 {
5148 	uint16_t rdp_cap = 0;
5149 	uint16_t rdp_speed;
5150 
5151 	desc->tag = cpu_to_be32(RDP_PORT_SPEED_DESC_TAG);
5152 
5153 	switch (phba->fc_linkspeed) {
5154 	case LPFC_LINK_SPEED_1GHZ:
5155 		rdp_speed = RDP_PS_1GB;
5156 		break;
5157 	case LPFC_LINK_SPEED_2GHZ:
5158 		rdp_speed = RDP_PS_2GB;
5159 		break;
5160 	case LPFC_LINK_SPEED_4GHZ:
5161 		rdp_speed = RDP_PS_4GB;
5162 		break;
5163 	case LPFC_LINK_SPEED_8GHZ:
5164 		rdp_speed = RDP_PS_8GB;
5165 		break;
5166 	case LPFC_LINK_SPEED_10GHZ:
5167 		rdp_speed = RDP_PS_10GB;
5168 		break;
5169 	case LPFC_LINK_SPEED_16GHZ:
5170 		rdp_speed = RDP_PS_16GB;
5171 		break;
5172 	case LPFC_LINK_SPEED_32GHZ:
5173 		rdp_speed = RDP_PS_32GB;
5174 		break;
5175 	default:
5176 		rdp_speed = RDP_PS_UNKNOWN;
5177 		break;
5178 	}
5179 
5180 	desc->info.port_speed.speed = cpu_to_be16(rdp_speed);
5181 
5182 	if (phba->lmt & LMT_32Gb)
5183 		rdp_cap |= RDP_PS_32GB;
5184 	if (phba->lmt & LMT_16Gb)
5185 		rdp_cap |= RDP_PS_16GB;
5186 	if (phba->lmt & LMT_10Gb)
5187 		rdp_cap |= RDP_PS_10GB;
5188 	if (phba->lmt & LMT_8Gb)
5189 		rdp_cap |= RDP_PS_8GB;
5190 	if (phba->lmt & LMT_4Gb)
5191 		rdp_cap |= RDP_PS_4GB;
5192 	if (phba->lmt & LMT_2Gb)
5193 		rdp_cap |= RDP_PS_2GB;
5194 	if (phba->lmt & LMT_1Gb)
5195 		rdp_cap |= RDP_PS_1GB;
5196 
5197 	if (rdp_cap == 0)
5198 		rdp_cap = RDP_CAP_UNKNOWN;
5199 	if (phba->cfg_link_speed != LPFC_USER_LINK_SPEED_AUTO)
5200 		rdp_cap |= RDP_CAP_USER_CONFIGURED;
5201 
5202 	desc->info.port_speed.capabilities = cpu_to_be16(rdp_cap);
5203 	desc->length = cpu_to_be32(sizeof(desc->info));
5204 	return sizeof(struct fc_rdp_port_speed_desc);
5205 }
5206 
5207 static uint32_t
5208 lpfc_rdp_res_diag_port_names(struct fc_rdp_port_name_desc *desc,
5209 		struct lpfc_vport *vport)
5210 {
5211 
5212 	desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG);
5213 
5214 	memcpy(desc->port_names.wwnn, &vport->fc_nodename,
5215 			sizeof(desc->port_names.wwnn));
5216 
5217 	memcpy(desc->port_names.wwpn, &vport->fc_portname,
5218 			sizeof(desc->port_names.wwpn));
5219 
5220 	desc->length = cpu_to_be32(sizeof(desc->port_names));
5221 	return sizeof(struct fc_rdp_port_name_desc);
5222 }
5223 
5224 static uint32_t
5225 lpfc_rdp_res_attach_port_names(struct fc_rdp_port_name_desc *desc,
5226 		struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
5227 {
5228 
5229 	desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG);
5230 	if (vport->fc_flag & FC_FABRIC) {
5231 		memcpy(desc->port_names.wwnn, &vport->fabric_nodename,
5232 				sizeof(desc->port_names.wwnn));
5233 
5234 		memcpy(desc->port_names.wwpn, &vport->fabric_portname,
5235 				sizeof(desc->port_names.wwpn));
5236 	} else {  /* Point to Point */
5237 		memcpy(desc->port_names.wwnn, &ndlp->nlp_nodename,
5238 				sizeof(desc->port_names.wwnn));
5239 
5240 		memcpy(desc->port_names.wwnn, &ndlp->nlp_portname,
5241 				sizeof(desc->port_names.wwpn));
5242 	}
5243 
5244 	desc->length = cpu_to_be32(sizeof(desc->port_names));
5245 	return sizeof(struct fc_rdp_port_name_desc);
5246 }
5247 
5248 static void
5249 lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context,
5250 		int status)
5251 {
5252 	struct lpfc_nodelist *ndlp = rdp_context->ndlp;
5253 	struct lpfc_vport *vport = ndlp->vport;
5254 	struct lpfc_iocbq *elsiocb;
5255 	struct ulp_bde64 *bpl;
5256 	IOCB_t *icmd;
5257 	uint8_t *pcmd;
5258 	struct ls_rjt *stat;
5259 	struct fc_rdp_res_frame *rdp_res;
5260 	uint32_t cmdsize, len;
5261 	uint16_t *flag_ptr;
5262 	int rc;
5263 
5264 	if (status != SUCCESS)
5265 		goto error;
5266 
5267 	/* This will change once we know the true size of the RDP payload */
5268 	cmdsize = sizeof(struct fc_rdp_res_frame);
5269 
5270 	elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize,
5271 			lpfc_max_els_tries, rdp_context->ndlp,
5272 			rdp_context->ndlp->nlp_DID, ELS_CMD_ACC);
5273 	lpfc_nlp_put(ndlp);
5274 	if (!elsiocb)
5275 		goto free_rdp_context;
5276 
5277 	icmd = &elsiocb->iocb;
5278 	icmd->ulpContext = rdp_context->rx_id;
5279 	icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id;
5280 
5281 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5282 			"2171 Xmit RDP response tag x%x xri x%x, "
5283 			"did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x",
5284 			elsiocb->iotag, elsiocb->iocb.ulpContext,
5285 			ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
5286 			ndlp->nlp_rpi);
5287 	rdp_res = (struct fc_rdp_res_frame *)
5288 		(((struct lpfc_dmabuf *) elsiocb->context2)->virt);
5289 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
5290 	memset(pcmd, 0, sizeof(struct fc_rdp_res_frame));
5291 	*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
5292 
5293 	/* Update Alarm and Warning */
5294 	flag_ptr = (uint16_t *)(rdp_context->page_a2 + SSF_ALARM_FLAGS);
5295 	phba->sfp_alarm |= *flag_ptr;
5296 	flag_ptr = (uint16_t *)(rdp_context->page_a2 + SSF_WARNING_FLAGS);
5297 	phba->sfp_warning |= *flag_ptr;
5298 
5299 	/* For RDP payload */
5300 	len = 8;
5301 	len += lpfc_rdp_res_link_service((struct fc_rdp_link_service_desc *)
5302 					 (len + pcmd), ELS_CMD_RDP);
5303 
5304 	len += lpfc_rdp_res_sfp_desc((struct fc_rdp_sfp_desc *)(len + pcmd),
5305 			rdp_context->page_a0, rdp_context->page_a2);
5306 	len += lpfc_rdp_res_speed((struct fc_rdp_port_speed_desc *)(len + pcmd),
5307 				  phba);
5308 	len += lpfc_rdp_res_link_error((struct fc_rdp_link_error_status_desc *)
5309 				       (len + pcmd), &rdp_context->link_stat);
5310 	len += lpfc_rdp_res_diag_port_names((struct fc_rdp_port_name_desc *)
5311 					     (len + pcmd), vport);
5312 	len += lpfc_rdp_res_attach_port_names((struct fc_rdp_port_name_desc *)
5313 					(len + pcmd), vport, ndlp);
5314 	len += lpfc_rdp_res_fec_desc((struct fc_fec_rdp_desc *)(len + pcmd),
5315 			&rdp_context->link_stat);
5316 	/* Check if nport is logged, BZ190632 */
5317 	if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED))
5318 		goto lpfc_skip_descriptor;
5319 
5320 	len += lpfc_rdp_res_bbc_desc((struct fc_rdp_bbc_desc *)(len + pcmd),
5321 				     &rdp_context->link_stat, vport);
5322 	len += lpfc_rdp_res_oed_temp_desc(phba,
5323 				(struct fc_rdp_oed_sfp_desc *)(len + pcmd),
5324 				rdp_context->page_a2);
5325 	len += lpfc_rdp_res_oed_voltage_desc(phba,
5326 				(struct fc_rdp_oed_sfp_desc *)(len + pcmd),
5327 				rdp_context->page_a2);
5328 	len += lpfc_rdp_res_oed_txbias_desc(phba,
5329 				(struct fc_rdp_oed_sfp_desc *)(len + pcmd),
5330 				rdp_context->page_a2);
5331 	len += lpfc_rdp_res_oed_txpower_desc(phba,
5332 				(struct fc_rdp_oed_sfp_desc *)(len + pcmd),
5333 				rdp_context->page_a2);
5334 	len += lpfc_rdp_res_oed_rxpower_desc(phba,
5335 				(struct fc_rdp_oed_sfp_desc *)(len + pcmd),
5336 				rdp_context->page_a2);
5337 	len += lpfc_rdp_res_opd_desc((struct fc_rdp_opd_sfp_desc *)(len + pcmd),
5338 				     rdp_context->page_a0, vport);
5339 
5340 lpfc_skip_descriptor:
5341 	rdp_res->length = cpu_to_be32(len - 8);
5342 	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
5343 
5344 	/* Now that we know the true size of the payload, update the BPL */
5345 	bpl = (struct ulp_bde64 *)
5346 		(((struct lpfc_dmabuf *)(elsiocb->context3))->virt);
5347 	bpl->tus.f.bdeSize = len;
5348 	bpl->tus.f.bdeFlags = 0;
5349 	bpl->tus.w = le32_to_cpu(bpl->tus.w);
5350 
5351 	phba->fc_stat.elsXmitACC++;
5352 	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
5353 	if (rc == IOCB_ERROR)
5354 		lpfc_els_free_iocb(phba, elsiocb);
5355 
5356 	kfree(rdp_context);
5357 
5358 	return;
5359 error:
5360 	cmdsize = 2 * sizeof(uint32_t);
5361 	elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, lpfc_max_els_tries,
5362 			ndlp, ndlp->nlp_DID, ELS_CMD_LS_RJT);
5363 	lpfc_nlp_put(ndlp);
5364 	if (!elsiocb)
5365 		goto free_rdp_context;
5366 
5367 	icmd = &elsiocb->iocb;
5368 	icmd->ulpContext = rdp_context->rx_id;
5369 	icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id;
5370 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
5371 
5372 	*((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT;
5373 	stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t));
5374 	stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
5375 
5376 	phba->fc_stat.elsXmitLSRJT++;
5377 	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
5378 	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
5379 
5380 	if (rc == IOCB_ERROR)
5381 		lpfc_els_free_iocb(phba, elsiocb);
5382 free_rdp_context:
5383 	kfree(rdp_context);
5384 }
5385 
5386 static int
5387 lpfc_get_rdp_info(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context)
5388 {
5389 	LPFC_MBOXQ_t *mbox = NULL;
5390 	int rc;
5391 
5392 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5393 	if (!mbox) {
5394 		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_ELS,
5395 				"7105 failed to allocate mailbox memory");
5396 		return 1;
5397 	}
5398 
5399 	if (lpfc_sli4_dump_page_a0(phba, mbox))
5400 		goto prep_mbox_fail;
5401 	mbox->vport = rdp_context->ndlp->vport;
5402 	mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_page_a0;
5403 	mbox->context2 = (struct lpfc_rdp_context *) rdp_context;
5404 	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
5405 	if (rc == MBX_NOT_FINISHED)
5406 		goto issue_mbox_fail;
5407 
5408 	return 0;
5409 
5410 prep_mbox_fail:
5411 issue_mbox_fail:
5412 	mempool_free(mbox, phba->mbox_mem_pool);
5413 	return 1;
5414 }
5415 
5416 /*
5417  * lpfc_els_rcv_rdp - Process an unsolicited RDP ELS.
5418  * @vport: pointer to a host virtual N_Port data structure.
5419  * @cmdiocb: pointer to lpfc command iocb data structure.
5420  * @ndlp: pointer to a node-list data structure.
5421  *
5422  * This routine processes an unsolicited RDP(Read Diagnostic Parameters)
5423  * IOCB. First, the payload of the unsolicited RDP is checked.
5424  * Then it will (1) send MBX_DUMP_MEMORY, Embedded DMP_LMSD sub command TYPE-3
5425  * for Page A0, (2) send MBX_DUMP_MEMORY, DMP_LMSD for Page A2,
5426  * (3) send MBX_READ_LNK_STAT to get link stat, (4) Call lpfc_els_rdp_cmpl
5427  * gather all data and send RDP response.
5428  *
5429  * Return code
5430  *   0 - Sent the acc response
5431  *   1 - Sent the reject response.
5432  */
5433 static int
5434 lpfc_els_rcv_rdp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5435 		struct lpfc_nodelist *ndlp)
5436 {
5437 	struct lpfc_hba *phba = vport->phba;
5438 	struct lpfc_dmabuf *pcmd;
5439 	uint8_t rjt_err, rjt_expl = LSEXP_NOTHING_MORE;
5440 	struct fc_rdp_req_frame *rdp_req;
5441 	struct lpfc_rdp_context *rdp_context;
5442 	IOCB_t *cmd = NULL;
5443 	struct ls_rjt stat;
5444 
5445 	if (phba->sli_rev < LPFC_SLI_REV4 ||
5446 	    bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
5447 						LPFC_SLI_INTF_IF_TYPE_2) {
5448 		rjt_err = LSRJT_UNABLE_TPC;
5449 		rjt_expl = LSEXP_REQ_UNSUPPORTED;
5450 		goto error;
5451 	}
5452 
5453 	if (phba->sli_rev < LPFC_SLI_REV4 || (phba->hba_flag & HBA_FCOE_MODE)) {
5454 		rjt_err = LSRJT_UNABLE_TPC;
5455 		rjt_expl = LSEXP_REQ_UNSUPPORTED;
5456 		goto error;
5457 	}
5458 
5459 	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
5460 	rdp_req = (struct fc_rdp_req_frame *) pcmd->virt;
5461 
5462 
5463 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5464 			 "2422 ELS RDP Request "
5465 			 "dec len %d tag x%x port_id %d len %d\n",
5466 			 be32_to_cpu(rdp_req->rdp_des_length),
5467 			 be32_to_cpu(rdp_req->nport_id_desc.tag),
5468 			 be32_to_cpu(rdp_req->nport_id_desc.nport_id),
5469 			 be32_to_cpu(rdp_req->nport_id_desc.length));
5470 
5471 	if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED) &&
5472 	    !phba->cfg_enable_SmartSAN) {
5473 		rjt_err = LSRJT_UNABLE_TPC;
5474 		rjt_expl = LSEXP_PORT_LOGIN_REQ;
5475 		goto error;
5476 	}
5477 	if (sizeof(struct fc_rdp_nport_desc) !=
5478 			be32_to_cpu(rdp_req->rdp_des_length))
5479 		goto rjt_logerr;
5480 	if (RDP_N_PORT_DESC_TAG != be32_to_cpu(rdp_req->nport_id_desc.tag))
5481 		goto rjt_logerr;
5482 	if (RDP_NPORT_ID_SIZE !=
5483 			be32_to_cpu(rdp_req->nport_id_desc.length))
5484 		goto rjt_logerr;
5485 	rdp_context = kzalloc(sizeof(struct lpfc_rdp_context), GFP_KERNEL);
5486 	if (!rdp_context) {
5487 		rjt_err = LSRJT_UNABLE_TPC;
5488 		goto error;
5489 	}
5490 
5491 	cmd = &cmdiocb->iocb;
5492 	rdp_context->ndlp = lpfc_nlp_get(ndlp);
5493 	rdp_context->ox_id = cmd->unsli3.rcvsli3.ox_id;
5494 	rdp_context->rx_id = cmd->ulpContext;
5495 	rdp_context->cmpl = lpfc_els_rdp_cmpl;
5496 	if (lpfc_get_rdp_info(phba, rdp_context)) {
5497 		lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_ELS,
5498 				 "2423 Unable to send mailbox");
5499 		kfree(rdp_context);
5500 		rjt_err = LSRJT_UNABLE_TPC;
5501 		lpfc_nlp_put(ndlp);
5502 		goto error;
5503 	}
5504 
5505 	return 0;
5506 
5507 rjt_logerr:
5508 	rjt_err = LSRJT_LOGICAL_ERR;
5509 
5510 error:
5511 	memset(&stat, 0, sizeof(stat));
5512 	stat.un.b.lsRjtRsnCode = rjt_err;
5513 	stat.un.b.lsRjtRsnCodeExp = rjt_expl;
5514 	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
5515 	return 1;
5516 }
5517 
5518 
5519 static void
5520 lpfc_els_lcb_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5521 {
5522 	MAILBOX_t *mb;
5523 	IOCB_t *icmd;
5524 	uint8_t *pcmd;
5525 	struct lpfc_iocbq *elsiocb;
5526 	struct lpfc_nodelist *ndlp;
5527 	struct ls_rjt *stat;
5528 	union lpfc_sli4_cfg_shdr *shdr;
5529 	struct lpfc_lcb_context *lcb_context;
5530 	struct fc_lcb_res_frame *lcb_res;
5531 	uint32_t cmdsize, shdr_status, shdr_add_status;
5532 	int rc;
5533 
5534 	mb = &pmb->u.mb;
5535 	lcb_context = (struct lpfc_lcb_context *)pmb->context1;
5536 	ndlp = lcb_context->ndlp;
5537 	pmb->context1 = NULL;
5538 	pmb->context2 = NULL;
5539 
5540 	shdr = (union lpfc_sli4_cfg_shdr *)
5541 			&pmb->u.mqe.un.beacon_config.header.cfg_shdr;
5542 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5543 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5544 
5545 	lpfc_printf_log(phba, KERN_INFO, LOG_MBOX,
5546 				"0194 SET_BEACON_CONFIG mailbox "
5547 				"completed with status x%x add_status x%x,"
5548 				" mbx status x%x\n",
5549 				shdr_status, shdr_add_status, mb->mbxStatus);
5550 
5551 	if (mb->mbxStatus && !(shdr_status &&
5552 		shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE)) {
5553 		mempool_free(pmb, phba->mbox_mem_pool);
5554 		goto error;
5555 	}
5556 
5557 	mempool_free(pmb, phba->mbox_mem_pool);
5558 	cmdsize = sizeof(struct fc_lcb_res_frame);
5559 	elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
5560 			lpfc_max_els_tries, ndlp,
5561 			ndlp->nlp_DID, ELS_CMD_ACC);
5562 
5563 	/* Decrement the ndlp reference count from previous mbox command */
5564 	lpfc_nlp_put(ndlp);
5565 
5566 	if (!elsiocb)
5567 		goto free_lcb_context;
5568 
5569 	lcb_res = (struct fc_lcb_res_frame *)
5570 		(((struct lpfc_dmabuf *)elsiocb->context2)->virt);
5571 
5572 	icmd = &elsiocb->iocb;
5573 	icmd->ulpContext = lcb_context->rx_id;
5574 	icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id;
5575 
5576 	pcmd = (uint8_t *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt);
5577 	*((uint32_t *)(pcmd)) = ELS_CMD_ACC;
5578 	lcb_res->lcb_sub_command = lcb_context->sub_command;
5579 	lcb_res->lcb_type = lcb_context->type;
5580 	lcb_res->lcb_frequency = lcb_context->frequency;
5581 	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
5582 	phba->fc_stat.elsXmitACC++;
5583 	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
5584 	if (rc == IOCB_ERROR)
5585 		lpfc_els_free_iocb(phba, elsiocb);
5586 
5587 	kfree(lcb_context);
5588 	return;
5589 
5590 error:
5591 	cmdsize = sizeof(struct fc_lcb_res_frame);
5592 	elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
5593 			lpfc_max_els_tries, ndlp,
5594 			ndlp->nlp_DID, ELS_CMD_LS_RJT);
5595 	lpfc_nlp_put(ndlp);
5596 	if (!elsiocb)
5597 		goto free_lcb_context;
5598 
5599 	icmd = &elsiocb->iocb;
5600 	icmd->ulpContext = lcb_context->rx_id;
5601 	icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id;
5602 	pcmd = (uint8_t *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt);
5603 
5604 	*((uint32_t *)(pcmd)) = ELS_CMD_LS_RJT;
5605 	stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t));
5606 	stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
5607 
5608 	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
5609 	phba->fc_stat.elsXmitLSRJT++;
5610 	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
5611 	if (rc == IOCB_ERROR)
5612 		lpfc_els_free_iocb(phba, elsiocb);
5613 free_lcb_context:
5614 	kfree(lcb_context);
5615 }
5616 
5617 static int
5618 lpfc_sli4_set_beacon(struct lpfc_vport *vport,
5619 		     struct lpfc_lcb_context *lcb_context,
5620 		     uint32_t beacon_state)
5621 {
5622 	struct lpfc_hba *phba = vport->phba;
5623 	LPFC_MBOXQ_t *mbox = NULL;
5624 	uint32_t len;
5625 	int rc;
5626 
5627 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5628 	if (!mbox)
5629 		return 1;
5630 
5631 	len = sizeof(struct lpfc_mbx_set_beacon_config) -
5632 		sizeof(struct lpfc_sli4_cfg_mhdr);
5633 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5634 			 LPFC_MBOX_OPCODE_SET_BEACON_CONFIG, len,
5635 			 LPFC_SLI4_MBX_EMBED);
5636 	mbox->context1 = (void *)lcb_context;
5637 	mbox->vport = phba->pport;
5638 	mbox->mbox_cmpl = lpfc_els_lcb_rsp;
5639 	bf_set(lpfc_mbx_set_beacon_port_num, &mbox->u.mqe.un.beacon_config,
5640 	       phba->sli4_hba.physical_port);
5641 	bf_set(lpfc_mbx_set_beacon_state, &mbox->u.mqe.un.beacon_config,
5642 	       beacon_state);
5643 	bf_set(lpfc_mbx_set_beacon_port_type, &mbox->u.mqe.un.beacon_config, 1);
5644 	bf_set(lpfc_mbx_set_beacon_duration, &mbox->u.mqe.un.beacon_config, 0);
5645 	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
5646 	if (rc == MBX_NOT_FINISHED) {
5647 		mempool_free(mbox, phba->mbox_mem_pool);
5648 		return 1;
5649 	}
5650 
5651 	return 0;
5652 }
5653 
5654 
5655 /**
5656  * lpfc_els_rcv_lcb - Process an unsolicited LCB
5657  * @vport: pointer to a host virtual N_Port data structure.
5658  * @cmdiocb: pointer to lpfc command iocb data structure.
5659  * @ndlp: pointer to a node-list data structure.
5660  *
5661  * This routine processes an unsolicited LCB(LINK CABLE BEACON) IOCB.
5662  * First, the payload of the unsolicited LCB is checked.
5663  * Then based on Subcommand beacon will either turn on or off.
5664  *
5665  * Return code
5666  * 0 - Sent the acc response
5667  * 1 - Sent the reject response.
5668  **/
5669 static int
5670 lpfc_els_rcv_lcb(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5671 		 struct lpfc_nodelist *ndlp)
5672 {
5673 	struct lpfc_hba *phba = vport->phba;
5674 	struct lpfc_dmabuf *pcmd;
5675 	uint8_t *lp;
5676 	struct fc_lcb_request_frame *beacon;
5677 	struct lpfc_lcb_context *lcb_context;
5678 	uint8_t state, rjt_err;
5679 	struct ls_rjt stat;
5680 
5681 	pcmd = (struct lpfc_dmabuf *)cmdiocb->context2;
5682 	lp = (uint8_t *)pcmd->virt;
5683 	beacon = (struct fc_lcb_request_frame *)pcmd->virt;
5684 
5685 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5686 			"0192 ELS LCB Data x%x x%x x%x x%x sub x%x "
5687 			"type x%x frequency %x duration x%x\n",
5688 			lp[0], lp[1], lp[2],
5689 			beacon->lcb_command,
5690 			beacon->lcb_sub_command,
5691 			beacon->lcb_type,
5692 			beacon->lcb_frequency,
5693 			be16_to_cpu(beacon->lcb_duration));
5694 
5695 	if (phba->sli_rev < LPFC_SLI_REV4 ||
5696 	    (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
5697 	    LPFC_SLI_INTF_IF_TYPE_2)) {
5698 		rjt_err = LSRJT_CMD_UNSUPPORTED;
5699 		goto rjt;
5700 	}
5701 
5702 	if (phba->hba_flag & HBA_FCOE_MODE) {
5703 		rjt_err = LSRJT_CMD_UNSUPPORTED;
5704 		goto rjt;
5705 	}
5706 	if (beacon->lcb_sub_command != LPFC_LCB_ON &&
5707 	    beacon->lcb_sub_command != LPFC_LCB_OFF) {
5708 		rjt_err = LSRJT_CMD_UNSUPPORTED;
5709 		goto rjt;
5710 	}
5711 	if (beacon->lcb_sub_command == LPFC_LCB_ON &&
5712 	    be16_to_cpu(beacon->lcb_duration) != 0) {
5713 		rjt_err = LSRJT_CMD_UNSUPPORTED;
5714 		goto rjt;
5715 	}
5716 
5717 	lcb_context = kmalloc(sizeof(*lcb_context), GFP_KERNEL);
5718 	if (!lcb_context) {
5719 		rjt_err = LSRJT_UNABLE_TPC;
5720 		goto rjt;
5721 	}
5722 
5723 	state = (beacon->lcb_sub_command == LPFC_LCB_ON) ? 1 : 0;
5724 	lcb_context->sub_command = beacon->lcb_sub_command;
5725 	lcb_context->type = beacon->lcb_type;
5726 	lcb_context->frequency = beacon->lcb_frequency;
5727 	lcb_context->ox_id = cmdiocb->iocb.unsli3.rcvsli3.ox_id;
5728 	lcb_context->rx_id = cmdiocb->iocb.ulpContext;
5729 	lcb_context->ndlp = lpfc_nlp_get(ndlp);
5730 	if (lpfc_sli4_set_beacon(vport, lcb_context, state)) {
5731 		lpfc_printf_vlog(ndlp->vport, KERN_ERR,
5732 				 LOG_ELS, "0193 failed to send mail box");
5733 		kfree(lcb_context);
5734 		lpfc_nlp_put(ndlp);
5735 		rjt_err = LSRJT_UNABLE_TPC;
5736 		goto rjt;
5737 	}
5738 	return 0;
5739 rjt:
5740 	memset(&stat, 0, sizeof(stat));
5741 	stat.un.b.lsRjtRsnCode = rjt_err;
5742 	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
5743 	return 1;
5744 }
5745 
5746 
5747 /**
5748  * lpfc_els_flush_rscn - Clean up any rscn activities with a vport
5749  * @vport: pointer to a host virtual N_Port data structure.
5750  *
5751  * This routine cleans up any Registration State Change Notification
5752  * (RSCN) activity with a @vport. Note that the fc_rscn_flush flag of the
5753  * @vport together with the host_lock is used to prevent multiple thread
5754  * trying to access the RSCN array on a same @vport at the same time.
5755  **/
5756 void
5757 lpfc_els_flush_rscn(struct lpfc_vport *vport)
5758 {
5759 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5760 	struct lpfc_hba  *phba = vport->phba;
5761 	int i;
5762 
5763 	spin_lock_irq(shost->host_lock);
5764 	if (vport->fc_rscn_flush) {
5765 		/* Another thread is walking fc_rscn_id_list on this vport */
5766 		spin_unlock_irq(shost->host_lock);
5767 		return;
5768 	}
5769 	/* Indicate we are walking lpfc_els_flush_rscn on this vport */
5770 	vport->fc_rscn_flush = 1;
5771 	spin_unlock_irq(shost->host_lock);
5772 
5773 	for (i = 0; i < vport->fc_rscn_id_cnt; i++) {
5774 		lpfc_in_buf_free(phba, vport->fc_rscn_id_list[i]);
5775 		vport->fc_rscn_id_list[i] = NULL;
5776 	}
5777 	spin_lock_irq(shost->host_lock);
5778 	vport->fc_rscn_id_cnt = 0;
5779 	vport->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY);
5780 	spin_unlock_irq(shost->host_lock);
5781 	lpfc_can_disctmo(vport);
5782 	/* Indicate we are done walking this fc_rscn_id_list */
5783 	vport->fc_rscn_flush = 0;
5784 }
5785 
5786 /**
5787  * lpfc_rscn_payload_check - Check whether there is a pending rscn to a did
5788  * @vport: pointer to a host virtual N_Port data structure.
5789  * @did: remote destination port identifier.
5790  *
5791  * This routine checks whether there is any pending Registration State
5792  * Configuration Notification (RSCN) to a @did on @vport.
5793  *
5794  * Return code
5795  *   None zero - The @did matched with a pending rscn
5796  *   0 - not able to match @did with a pending rscn
5797  **/
5798 int
5799 lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did)
5800 {
5801 	D_ID ns_did;
5802 	D_ID rscn_did;
5803 	uint32_t *lp;
5804 	uint32_t payload_len, i;
5805 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5806 
5807 	ns_did.un.word = did;
5808 
5809 	/* Never match fabric nodes for RSCNs */
5810 	if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
5811 		return 0;
5812 
5813 	/* If we are doing a FULL RSCN rediscovery, match everything */
5814 	if (vport->fc_flag & FC_RSCN_DISCOVERY)
5815 		return did;
5816 
5817 	spin_lock_irq(shost->host_lock);
5818 	if (vport->fc_rscn_flush) {
5819 		/* Another thread is walking fc_rscn_id_list on this vport */
5820 		spin_unlock_irq(shost->host_lock);
5821 		return 0;
5822 	}
5823 	/* Indicate we are walking fc_rscn_id_list on this vport */
5824 	vport->fc_rscn_flush = 1;
5825 	spin_unlock_irq(shost->host_lock);
5826 	for (i = 0; i < vport->fc_rscn_id_cnt; i++) {
5827 		lp = vport->fc_rscn_id_list[i]->virt;
5828 		payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK);
5829 		payload_len -= sizeof(uint32_t);	/* take off word 0 */
5830 		while (payload_len) {
5831 			rscn_did.un.word = be32_to_cpu(*lp++);
5832 			payload_len -= sizeof(uint32_t);
5833 			switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) {
5834 			case RSCN_ADDRESS_FORMAT_PORT:
5835 				if ((ns_did.un.b.domain == rscn_did.un.b.domain)
5836 				    && (ns_did.un.b.area == rscn_did.un.b.area)
5837 				    && (ns_did.un.b.id == rscn_did.un.b.id))
5838 					goto return_did_out;
5839 				break;
5840 			case RSCN_ADDRESS_FORMAT_AREA:
5841 				if ((ns_did.un.b.domain == rscn_did.un.b.domain)
5842 				    && (ns_did.un.b.area == rscn_did.un.b.area))
5843 					goto return_did_out;
5844 				break;
5845 			case RSCN_ADDRESS_FORMAT_DOMAIN:
5846 				if (ns_did.un.b.domain == rscn_did.un.b.domain)
5847 					goto return_did_out;
5848 				break;
5849 			case RSCN_ADDRESS_FORMAT_FABRIC:
5850 				goto return_did_out;
5851 			}
5852 		}
5853 	}
5854 	/* Indicate we are done with walking fc_rscn_id_list on this vport */
5855 	vport->fc_rscn_flush = 0;
5856 	return 0;
5857 return_did_out:
5858 	/* Indicate we are done with walking fc_rscn_id_list on this vport */
5859 	vport->fc_rscn_flush = 0;
5860 	return did;
5861 }
5862 
5863 /**
5864  * lpfc_rscn_recovery_check - Send recovery event to vport nodes matching rscn
5865  * @vport: pointer to a host virtual N_Port data structure.
5866  *
5867  * This routine sends recovery (NLP_EVT_DEVICE_RECOVERY) event to the
5868  * state machine for a @vport's nodes that are with pending RSCN (Registration
5869  * State Change Notification).
5870  *
5871  * Return code
5872  *   0 - Successful (currently alway return 0)
5873  **/
5874 static int
5875 lpfc_rscn_recovery_check(struct lpfc_vport *vport)
5876 {
5877 	struct lpfc_nodelist *ndlp = NULL;
5878 
5879 	/* Move all affected nodes by pending RSCNs to NPR state. */
5880 	list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
5881 		if (!NLP_CHK_NODE_ACT(ndlp) ||
5882 		    (ndlp->nlp_state == NLP_STE_UNUSED_NODE) ||
5883 		    !lpfc_rscn_payload_check(vport, ndlp->nlp_DID))
5884 			continue;
5885 
5886 		/* NVME Target mode does not do RSCN Recovery. */
5887 		if (vport->phba->nvmet_support)
5888 			continue;
5889 
5890 		lpfc_disc_state_machine(vport, ndlp, NULL,
5891 					NLP_EVT_DEVICE_RECOVERY);
5892 		lpfc_cancel_retry_delay_tmo(vport, ndlp);
5893 	}
5894 	return 0;
5895 }
5896 
5897 /**
5898  * lpfc_send_rscn_event - Send an RSCN event to management application
5899  * @vport: pointer to a host virtual N_Port data structure.
5900  * @cmdiocb: pointer to lpfc command iocb data structure.
5901  *
5902  * lpfc_send_rscn_event sends an RSCN netlink event to management
5903  * applications.
5904  */
5905 static void
5906 lpfc_send_rscn_event(struct lpfc_vport *vport,
5907 		struct lpfc_iocbq *cmdiocb)
5908 {
5909 	struct lpfc_dmabuf *pcmd;
5910 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5911 	uint32_t *payload_ptr;
5912 	uint32_t payload_len;
5913 	struct lpfc_rscn_event_header *rscn_event_data;
5914 
5915 	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
5916 	payload_ptr = (uint32_t *) pcmd->virt;
5917 	payload_len = be32_to_cpu(*payload_ptr & ~ELS_CMD_MASK);
5918 
5919 	rscn_event_data = kmalloc(sizeof(struct lpfc_rscn_event_header) +
5920 		payload_len, GFP_KERNEL);
5921 	if (!rscn_event_data) {
5922 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
5923 			"0147 Failed to allocate memory for RSCN event\n");
5924 		return;
5925 	}
5926 	rscn_event_data->event_type = FC_REG_RSCN_EVENT;
5927 	rscn_event_data->payload_length = payload_len;
5928 	memcpy(rscn_event_data->rscn_payload, payload_ptr,
5929 		payload_len);
5930 
5931 	fc_host_post_vendor_event(shost,
5932 		fc_get_event_number(),
5933 		sizeof(struct lpfc_rscn_event_header) + payload_len,
5934 		(char *)rscn_event_data,
5935 		LPFC_NL_VENDOR_ID);
5936 
5937 	kfree(rscn_event_data);
5938 }
5939 
5940 /**
5941  * lpfc_els_rcv_rscn - Process an unsolicited rscn iocb
5942  * @vport: pointer to a host virtual N_Port data structure.
5943  * @cmdiocb: pointer to lpfc command iocb data structure.
5944  * @ndlp: pointer to a node-list data structure.
5945  *
5946  * This routine processes an unsolicited RSCN (Registration State Change
5947  * Notification) IOCB. First, the payload of the unsolicited RSCN is walked
5948  * to invoke fc_host_post_event() routine to the FC transport layer. If the
5949  * discover state machine is about to begin discovery, it just accepts the
5950  * RSCN and the discovery process will satisfy the RSCN. If this RSCN only
5951  * contains N_Port IDs for other vports on this HBA, it just accepts the
5952  * RSCN and ignore processing it. If the state machine is in the recovery
5953  * state, the fc_rscn_id_list of this @vport is walked and the
5954  * lpfc_rscn_recovery_check() routine is invoked to send recovery event for
5955  * all nodes that match RSCN payload. Otherwise, the lpfc_els_handle_rscn()
5956  * routine is invoked to handle the RSCN event.
5957  *
5958  * Return code
5959  *   0 - Just sent the acc response
5960  *   1 - Sent the acc response and waited for name server completion
5961  **/
5962 static int
5963 lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5964 		  struct lpfc_nodelist *ndlp)
5965 {
5966 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5967 	struct lpfc_hba  *phba = vport->phba;
5968 	struct lpfc_dmabuf *pcmd;
5969 	uint32_t *lp, *datap;
5970 	uint32_t payload_len, length, nportid, *cmd;
5971 	int rscn_cnt;
5972 	int rscn_id = 0, hba_id = 0;
5973 	int i;
5974 
5975 	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
5976 	lp = (uint32_t *) pcmd->virt;
5977 
5978 	payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK);
5979 	payload_len -= sizeof(uint32_t);	/* take off word 0 */
5980 	/* RSCN received */
5981 	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
5982 			 "0214 RSCN received Data: x%x x%x x%x x%x\n",
5983 			 vport->fc_flag, payload_len, *lp,
5984 			 vport->fc_rscn_id_cnt);
5985 
5986 	/* Send an RSCN event to the management application */
5987 	lpfc_send_rscn_event(vport, cmdiocb);
5988 
5989 	for (i = 0; i < payload_len/sizeof(uint32_t); i++)
5990 		fc_host_post_event(shost, fc_get_event_number(),
5991 			FCH_EVT_RSCN, lp[i]);
5992 
5993 	/* If we are about to begin discovery, just ACC the RSCN.
5994 	 * Discovery processing will satisfy it.
5995 	 */
5996 	if (vport->port_state <= LPFC_NS_QRY) {
5997 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5998 			"RCV RSCN ignore: did:x%x/ste:x%x flg:x%x",
5999 			ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
6000 
6001 		lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
6002 		return 0;
6003 	}
6004 
6005 	/* If this RSCN just contains NPortIDs for other vports on this HBA,
6006 	 * just ACC and ignore it.
6007 	 */
6008 	if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
6009 		!(vport->cfg_peer_port_login)) {
6010 		i = payload_len;
6011 		datap = lp;
6012 		while (i > 0) {
6013 			nportid = *datap++;
6014 			nportid = ((be32_to_cpu(nportid)) & Mask_DID);
6015 			i -= sizeof(uint32_t);
6016 			rscn_id++;
6017 			if (lpfc_find_vport_by_did(phba, nportid))
6018 				hba_id++;
6019 		}
6020 		if (rscn_id == hba_id) {
6021 			/* ALL NPortIDs in RSCN are on HBA */
6022 			lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
6023 					 "0219 Ignore RSCN "
6024 					 "Data: x%x x%x x%x x%x\n",
6025 					 vport->fc_flag, payload_len,
6026 					 *lp, vport->fc_rscn_id_cnt);
6027 			lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6028 				"RCV RSCN vport:  did:x%x/ste:x%x flg:x%x",
6029 				ndlp->nlp_DID, vport->port_state,
6030 				ndlp->nlp_flag);
6031 
6032 			lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb,
6033 				ndlp, NULL);
6034 			return 0;
6035 		}
6036 	}
6037 
6038 	spin_lock_irq(shost->host_lock);
6039 	if (vport->fc_rscn_flush) {
6040 		/* Another thread is walking fc_rscn_id_list on this vport */
6041 		vport->fc_flag |= FC_RSCN_DISCOVERY;
6042 		spin_unlock_irq(shost->host_lock);
6043 		/* Send back ACC */
6044 		lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
6045 		return 0;
6046 	}
6047 	/* Indicate we are walking fc_rscn_id_list on this vport */
6048 	vport->fc_rscn_flush = 1;
6049 	spin_unlock_irq(shost->host_lock);
6050 	/* Get the array count after successfully have the token */
6051 	rscn_cnt = vport->fc_rscn_id_cnt;
6052 	/* If we are already processing an RSCN, save the received
6053 	 * RSCN payload buffer, cmdiocb->context2 to process later.
6054 	 */
6055 	if (vport->fc_flag & (FC_RSCN_MODE | FC_NDISC_ACTIVE)) {
6056 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6057 			"RCV RSCN defer:  did:x%x/ste:x%x flg:x%x",
6058 			ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
6059 
6060 		spin_lock_irq(shost->host_lock);
6061 		vport->fc_flag |= FC_RSCN_DEFERRED;
6062 		if ((rscn_cnt < FC_MAX_HOLD_RSCN) &&
6063 		    !(vport->fc_flag & FC_RSCN_DISCOVERY)) {
6064 			vport->fc_flag |= FC_RSCN_MODE;
6065 			spin_unlock_irq(shost->host_lock);
6066 			if (rscn_cnt) {
6067 				cmd = vport->fc_rscn_id_list[rscn_cnt-1]->virt;
6068 				length = be32_to_cpu(*cmd & ~ELS_CMD_MASK);
6069 			}
6070 			if ((rscn_cnt) &&
6071 			    (payload_len + length <= LPFC_BPL_SIZE)) {
6072 				*cmd &= ELS_CMD_MASK;
6073 				*cmd |= cpu_to_be32(payload_len + length);
6074 				memcpy(((uint8_t *)cmd) + length, lp,
6075 				       payload_len);
6076 			} else {
6077 				vport->fc_rscn_id_list[rscn_cnt] = pcmd;
6078 				vport->fc_rscn_id_cnt++;
6079 				/* If we zero, cmdiocb->context2, the calling
6080 				 * routine will not try to free it.
6081 				 */
6082 				cmdiocb->context2 = NULL;
6083 			}
6084 			/* Deferred RSCN */
6085 			lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
6086 					 "0235 Deferred RSCN "
6087 					 "Data: x%x x%x x%x\n",
6088 					 vport->fc_rscn_id_cnt, vport->fc_flag,
6089 					 vport->port_state);
6090 		} else {
6091 			vport->fc_flag |= FC_RSCN_DISCOVERY;
6092 			spin_unlock_irq(shost->host_lock);
6093 			/* ReDiscovery RSCN */
6094 			lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
6095 					 "0234 ReDiscovery RSCN "
6096 					 "Data: x%x x%x x%x\n",
6097 					 vport->fc_rscn_id_cnt, vport->fc_flag,
6098 					 vport->port_state);
6099 		}
6100 		/* Indicate we are done walking fc_rscn_id_list on this vport */
6101 		vport->fc_rscn_flush = 0;
6102 		/* Send back ACC */
6103 		lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
6104 		/* send RECOVERY event for ALL nodes that match RSCN payload */
6105 		lpfc_rscn_recovery_check(vport);
6106 		spin_lock_irq(shost->host_lock);
6107 		vport->fc_flag &= ~FC_RSCN_DEFERRED;
6108 		spin_unlock_irq(shost->host_lock);
6109 		return 0;
6110 	}
6111 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
6112 		"RCV RSCN:        did:x%x/ste:x%x flg:x%x",
6113 		ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
6114 
6115 	spin_lock_irq(shost->host_lock);
6116 	vport->fc_flag |= FC_RSCN_MODE;
6117 	spin_unlock_irq(shost->host_lock);
6118 	vport->fc_rscn_id_list[vport->fc_rscn_id_cnt++] = pcmd;
6119 	/* Indicate we are done walking fc_rscn_id_list on this vport */
6120 	vport->fc_rscn_flush = 0;
6121 	/*
6122 	 * If we zero, cmdiocb->context2, the calling routine will
6123 	 * not try to free it.
6124 	 */
6125 	cmdiocb->context2 = NULL;
6126 	lpfc_set_disctmo(vport);
6127 	/* Send back ACC */
6128 	lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
6129 	/* send RECOVERY event for ALL nodes that match RSCN payload */
6130 	lpfc_rscn_recovery_check(vport);
6131 	return lpfc_els_handle_rscn(vport);
6132 }
6133 
6134 /**
6135  * lpfc_els_handle_rscn - Handle rscn for a vport
6136  * @vport: pointer to a host virtual N_Port data structure.
6137  *
6138  * This routine handles the Registration State Configuration Notification
6139  * (RSCN) for a @vport. If login to NameServer does not exist, a new ndlp shall
6140  * be created and a Port Login (PLOGI) to the NameServer is issued. Otherwise,
6141  * if the ndlp to NameServer exists, a Common Transport (CT) command to the
6142  * NameServer shall be issued. If CT command to the NameServer fails to be
6143  * issued, the lpfc_els_flush_rscn() routine shall be invoked to clean up any
6144  * RSCN activities with the @vport.
6145  *
6146  * Return code
6147  *   0 - Cleaned up rscn on the @vport
6148  *   1 - Wait for plogi to name server before proceed
6149  **/
6150 int
6151 lpfc_els_handle_rscn(struct lpfc_vport *vport)
6152 {
6153 	struct lpfc_nodelist *ndlp;
6154 
6155 	/* Ignore RSCN if the port is being torn down. */
6156 	if (vport->load_flag & FC_UNLOADING) {
6157 		lpfc_els_flush_rscn(vport);
6158 		return 0;
6159 	}
6160 
6161 	/* Start timer for RSCN processing */
6162 	lpfc_set_disctmo(vport);
6163 
6164 	/* RSCN processed */
6165 	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
6166 			 "0215 RSCN processed Data: x%x x%x x%x x%x\n",
6167 			 vport->fc_flag, 0, vport->fc_rscn_id_cnt,
6168 			 vport->port_state);
6169 
6170 	/* To process RSCN, first compare RSCN data with NameServer */
6171 	vport->fc_ns_retry = 0;
6172 	vport->num_disc_nodes = 0;
6173 
6174 	ndlp = lpfc_findnode_did(vport, NameServer_DID);
6175 	if (ndlp && NLP_CHK_NODE_ACT(ndlp)
6176 	    && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
6177 		/* Good ndlp, issue CT Request to NameServer.  Need to
6178 		 * know how many gidfts were issued.  If none, then just
6179 		 * flush the RSCN.  Otherwise, the outstanding requests
6180 		 * need to complete.
6181 		 */
6182 		vport->gidft_inp = 0;
6183 		if (lpfc_issue_gidft(vport) > 0)
6184 			return 1;
6185 	} else {
6186 		/* Nameserver login in question.  Revalidate. */
6187 		if (ndlp) {
6188 			ndlp = lpfc_enable_node(vport, ndlp,
6189 						NLP_STE_PLOGI_ISSUE);
6190 			if (!ndlp) {
6191 				lpfc_els_flush_rscn(vport);
6192 				return 0;
6193 			}
6194 			ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE;
6195 		} else {
6196 			ndlp = lpfc_nlp_init(vport, NameServer_DID);
6197 			if (!ndlp) {
6198 				lpfc_els_flush_rscn(vport);
6199 				return 0;
6200 			}
6201 			ndlp->nlp_prev_state = ndlp->nlp_state;
6202 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
6203 		}
6204 		ndlp->nlp_type |= NLP_FABRIC;
6205 		lpfc_issue_els_plogi(vport, NameServer_DID, 0);
6206 		/* Wait for NameServer login cmpl before we can
6207 		 * continue
6208 		 */
6209 		return 1;
6210 	}
6211 
6212 	lpfc_els_flush_rscn(vport);
6213 	return 0;
6214 }
6215 
6216 /**
6217  * lpfc_els_rcv_flogi - Process an unsolicited flogi iocb
6218  * @vport: pointer to a host virtual N_Port data structure.
6219  * @cmdiocb: pointer to lpfc command iocb data structure.
6220  * @ndlp: pointer to a node-list data structure.
6221  *
6222  * This routine processes Fabric Login (FLOGI) IOCB received as an ELS
6223  * unsolicited event. An unsolicited FLOGI can be received in a point-to-
6224  * point topology. As an unsolicited FLOGI should not be received in a loop
6225  * mode, any unsolicited FLOGI received in loop mode shall be ignored. The
6226  * lpfc_check_sparm() routine is invoked to check the parameters in the
6227  * unsolicited FLOGI. If parameters validation failed, the routine
6228  * lpfc_els_rsp_reject() shall be called with reject reason code set to
6229  * LSEXP_SPARM_OPTIONS to reject the FLOGI. Otherwise, the Port WWN in the
6230  * FLOGI shall be compared with the Port WWN of the @vport to determine who
6231  * will initiate PLOGI. The higher lexicographical value party shall has
6232  * higher priority (as the winning port) and will initiate PLOGI and
6233  * communicate Port_IDs (Addresses) for both nodes in PLOGI. The result
6234  * of this will be marked in the @vport fc_flag field with FC_PT2PT_PLOGI
6235  * and then the lpfc_els_rsp_acc() routine is invoked to accept the FLOGI.
6236  *
6237  * Return code
6238  *   0 - Successfully processed the unsolicited flogi
6239  *   1 - Failed to process the unsolicited flogi
6240  **/
6241 static int
6242 lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
6243 		   struct lpfc_nodelist *ndlp)
6244 {
6245 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6246 	struct lpfc_hba  *phba = vport->phba;
6247 	struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
6248 	uint32_t *lp = (uint32_t *) pcmd->virt;
6249 	IOCB_t *icmd = &cmdiocb->iocb;
6250 	struct serv_parm *sp;
6251 	LPFC_MBOXQ_t *mbox;
6252 	uint32_t cmd, did;
6253 	int rc;
6254 	uint32_t fc_flag = 0;
6255 	uint32_t port_state = 0;
6256 
6257 	cmd = *lp++;
6258 	sp = (struct serv_parm *) lp;
6259 
6260 	/* FLOGI received */
6261 
6262 	lpfc_set_disctmo(vport);
6263 
6264 	if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
6265 		/* We should never receive a FLOGI in loop mode, ignore it */
6266 		did = icmd->un.elsreq64.remoteID;
6267 
6268 		/* An FLOGI ELS command <elsCmd> was received from DID <did> in
6269 		   Loop Mode */
6270 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
6271 				 "0113 An FLOGI ELS command x%x was "
6272 				 "received from DID x%x in Loop Mode\n",
6273 				 cmd, did);
6274 		return 1;
6275 	}
6276 
6277 	(void) lpfc_check_sparm(vport, ndlp, sp, CLASS3, 1);
6278 
6279 	/*
6280 	 * If our portname is greater than the remote portname,
6281 	 * then we initiate Nport login.
6282 	 */
6283 
6284 	rc = memcmp(&vport->fc_portname, &sp->portName,
6285 		    sizeof(struct lpfc_name));
6286 
6287 	if (!rc) {
6288 		if (phba->sli_rev < LPFC_SLI_REV4) {
6289 			mbox = mempool_alloc(phba->mbox_mem_pool,
6290 					     GFP_KERNEL);
6291 			if (!mbox)
6292 				return 1;
6293 			lpfc_linkdown(phba);
6294 			lpfc_init_link(phba, mbox,
6295 				       phba->cfg_topology,
6296 				       phba->cfg_link_speed);
6297 			mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
6298 			mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
6299 			mbox->vport = vport;
6300 			rc = lpfc_sli_issue_mbox(phba, mbox,
6301 						 MBX_NOWAIT);
6302 			lpfc_set_loopback_flag(phba);
6303 			if (rc == MBX_NOT_FINISHED)
6304 				mempool_free(mbox, phba->mbox_mem_pool);
6305 			return 1;
6306 		}
6307 
6308 		/* abort the flogi coming back to ourselves
6309 		 * due to external loopback on the port.
6310 		 */
6311 		lpfc_els_abort_flogi(phba);
6312 		return 0;
6313 
6314 	} else if (rc > 0) {	/* greater than */
6315 		spin_lock_irq(shost->host_lock);
6316 		vport->fc_flag |= FC_PT2PT_PLOGI;
6317 		spin_unlock_irq(shost->host_lock);
6318 
6319 		/* If we have the high WWPN we can assign our own
6320 		 * myDID; otherwise, we have to WAIT for a PLOGI
6321 		 * from the remote NPort to find out what it
6322 		 * will be.
6323 		 */
6324 		vport->fc_myDID = PT2PT_LocalID;
6325 	} else {
6326 		vport->fc_myDID = PT2PT_RemoteID;
6327 	}
6328 
6329 	/*
6330 	 * The vport state should go to LPFC_FLOGI only
6331 	 * AFTER we issue a FLOGI, not receive one.
6332 	 */
6333 	spin_lock_irq(shost->host_lock);
6334 	fc_flag = vport->fc_flag;
6335 	port_state = vport->port_state;
6336 	vport->fc_flag |= FC_PT2PT;
6337 	vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
6338 	spin_unlock_irq(shost->host_lock);
6339 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
6340 			 "3311 Rcv Flogi PS x%x new PS x%x "
6341 			 "fc_flag x%x new fc_flag x%x\n",
6342 			 port_state, vport->port_state,
6343 			 fc_flag, vport->fc_flag);
6344 
6345 	/*
6346 	 * We temporarily set fc_myDID to make it look like we are
6347 	 * a Fabric. This is done just so we end up with the right
6348 	 * did / sid on the FLOGI ACC rsp.
6349 	 */
6350 	did = vport->fc_myDID;
6351 	vport->fc_myDID = Fabric_DID;
6352 
6353 	memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
6354 
6355 	/* Send back ACC */
6356 	lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, cmdiocb, ndlp, NULL);
6357 
6358 	/* Now lets put fc_myDID back to what its supposed to be */
6359 	vport->fc_myDID = did;
6360 
6361 	return 0;
6362 }
6363 
6364 /**
6365  * lpfc_els_rcv_rnid - Process an unsolicited rnid iocb
6366  * @vport: pointer to a host virtual N_Port data structure.
6367  * @cmdiocb: pointer to lpfc command iocb data structure.
6368  * @ndlp: pointer to a node-list data structure.
6369  *
6370  * This routine processes Request Node Identification Data (RNID) IOCB
6371  * received as an ELS unsolicited event. Only when the RNID specified format
6372  * 0x0 or 0xDF (Topology Discovery Specific Node Identification Data)
6373  * present, this routine will invoke the lpfc_els_rsp_rnid_acc() routine to
6374  * Accept (ACC) the RNID ELS command. All the other RNID formats are
6375  * rejected by invoking the lpfc_els_rsp_reject() routine.
6376  *
6377  * Return code
6378  *   0 - Successfully processed rnid iocb (currently always return 0)
6379  **/
6380 static int
6381 lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
6382 		  struct lpfc_nodelist *ndlp)
6383 {
6384 	struct lpfc_dmabuf *pcmd;
6385 	uint32_t *lp;
6386 	RNID *rn;
6387 	struct ls_rjt stat;
6388 	uint32_t cmd;
6389 
6390 	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
6391 	lp = (uint32_t *) pcmd->virt;
6392 
6393 	cmd = *lp++;
6394 	rn = (RNID *) lp;
6395 
6396 	/* RNID received */
6397 
6398 	switch (rn->Format) {
6399 	case 0:
6400 	case RNID_TOPOLOGY_DISC:
6401 		/* Send back ACC */
6402 		lpfc_els_rsp_rnid_acc(vport, rn->Format, cmdiocb, ndlp);
6403 		break;
6404 	default:
6405 		/* Reject this request because format not supported */
6406 		stat.un.b.lsRjtRsvd0 = 0;
6407 		stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
6408 		stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
6409 		stat.un.b.vendorUnique = 0;
6410 		lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
6411 			NULL);
6412 	}
6413 	return 0;
6414 }
6415 
6416 /**
6417  * lpfc_els_rcv_echo - Process an unsolicited echo iocb
6418  * @vport: pointer to a host virtual N_Port data structure.
6419  * @cmdiocb: pointer to lpfc command iocb data structure.
6420  * @ndlp: pointer to a node-list data structure.
6421  *
6422  * Return code
6423  *   0 - Successfully processed echo iocb (currently always return 0)
6424  **/
6425 static int
6426 lpfc_els_rcv_echo(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
6427 		  struct lpfc_nodelist *ndlp)
6428 {
6429 	uint8_t *pcmd;
6430 
6431 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt);
6432 
6433 	/* skip over first word of echo command to find echo data */
6434 	pcmd += sizeof(uint32_t);
6435 
6436 	lpfc_els_rsp_echo_acc(vport, pcmd, cmdiocb, ndlp);
6437 	return 0;
6438 }
6439 
6440 /**
6441  * lpfc_els_rcv_lirr - Process an unsolicited lirr iocb
6442  * @vport: pointer to a host virtual N_Port data structure.
6443  * @cmdiocb: pointer to lpfc command iocb data structure.
6444  * @ndlp: pointer to a node-list data structure.
6445  *
6446  * This routine processes a Link Incident Report Registration(LIRR) IOCB
6447  * received as an ELS unsolicited event. Currently, this function just invokes
6448  * the lpfc_els_rsp_reject() routine to reject the LIRR IOCB unconditionally.
6449  *
6450  * Return code
6451  *   0 - Successfully processed lirr iocb (currently always return 0)
6452  **/
6453 static int
6454 lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
6455 		  struct lpfc_nodelist *ndlp)
6456 {
6457 	struct ls_rjt stat;
6458 
6459 	/* For now, unconditionally reject this command */
6460 	stat.un.b.lsRjtRsvd0 = 0;
6461 	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
6462 	stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
6463 	stat.un.b.vendorUnique = 0;
6464 	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
6465 	return 0;
6466 }
6467 
6468 /**
6469  * lpfc_els_rcv_rrq - Process an unsolicited rrq iocb
6470  * @vport: pointer to a host virtual N_Port data structure.
6471  * @cmdiocb: pointer to lpfc command iocb data structure.
6472  * @ndlp: pointer to a node-list data structure.
6473  *
6474  * This routine processes a Reinstate Recovery Qualifier (RRQ) IOCB
6475  * received as an ELS unsolicited event. A request to RRQ shall only
6476  * be accepted if the Originator Nx_Port N_Port_ID or the Responder
6477  * Nx_Port N_Port_ID of the target Exchange is the same as the
6478  * N_Port_ID of the Nx_Port that makes the request. If the RRQ is
6479  * not accepted, an LS_RJT with reason code "Unable to perform
6480  * command request" and reason code explanation "Invalid Originator
6481  * S_ID" shall be returned. For now, we just unconditionally accept
6482  * RRQ from the target.
6483  **/
6484 static void
6485 lpfc_els_rcv_rrq(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
6486 		 struct lpfc_nodelist *ndlp)
6487 {
6488 	lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
6489 	if (vport->phba->sli_rev == LPFC_SLI_REV4)
6490 		lpfc_els_clear_rrq(vport, cmdiocb, ndlp);
6491 }
6492 
6493 /**
6494  * lpfc_els_rsp_rls_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd
6495  * @phba: pointer to lpfc hba data structure.
6496  * @pmb: pointer to the driver internal queue element for mailbox command.
6497  *
6498  * This routine is the completion callback function for the MBX_READ_LNK_STAT
6499  * mailbox command. This callback function is to actually send the Accept
6500  * (ACC) response to a Read Port Status (RPS) unsolicited IOCB event. It
6501  * collects the link statistics from the completion of the MBX_READ_LNK_STAT
6502  * mailbox command, constructs the RPS response with the link statistics
6503  * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC
6504  * response to the RPS.
6505  *
6506  * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
6507  * will be incremented by 1 for holding the ndlp and the reference to ndlp
6508  * will be stored into the context1 field of the IOCB for the completion
6509  * callback function to the RPS Accept Response ELS IOCB command.
6510  *
6511  **/
6512 static void
6513 lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
6514 {
6515 	MAILBOX_t *mb;
6516 	IOCB_t *icmd;
6517 	struct RLS_RSP *rls_rsp;
6518 	uint8_t *pcmd;
6519 	struct lpfc_iocbq *elsiocb;
6520 	struct lpfc_nodelist *ndlp;
6521 	uint16_t oxid;
6522 	uint16_t rxid;
6523 	uint32_t cmdsize;
6524 
6525 	mb = &pmb->u.mb;
6526 
6527 	ndlp = (struct lpfc_nodelist *) pmb->context2;
6528 	rxid = (uint16_t) ((unsigned long)(pmb->context1) & 0xffff);
6529 	oxid = (uint16_t) (((unsigned long)(pmb->context1) >> 16) & 0xffff);
6530 	pmb->context1 = NULL;
6531 	pmb->context2 = NULL;
6532 
6533 	if (mb->mbxStatus) {
6534 		mempool_free(pmb, phba->mbox_mem_pool);
6535 		return;
6536 	}
6537 
6538 	cmdsize = sizeof(struct RLS_RSP) + sizeof(uint32_t);
6539 	elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
6540 				     lpfc_max_els_tries, ndlp,
6541 				     ndlp->nlp_DID, ELS_CMD_ACC);
6542 
6543 	/* Decrement the ndlp reference count from previous mbox command */
6544 	lpfc_nlp_put(ndlp);
6545 
6546 	if (!elsiocb) {
6547 		mempool_free(pmb, phba->mbox_mem_pool);
6548 		return;
6549 	}
6550 
6551 	icmd = &elsiocb->iocb;
6552 	icmd->ulpContext = rxid;
6553 	icmd->unsli3.rcvsli3.ox_id = oxid;
6554 
6555 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
6556 	*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
6557 	pcmd += sizeof(uint32_t); /* Skip past command */
6558 	rls_rsp = (struct RLS_RSP *)pcmd;
6559 
6560 	rls_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt);
6561 	rls_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt);
6562 	rls_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt);
6563 	rls_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt);
6564 	rls_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord);
6565 	rls_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt);
6566 	mempool_free(pmb, phba->mbox_mem_pool);
6567 	/* Xmit ELS RLS ACC response tag <ulpIoTag> */
6568 	lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
6569 			 "2874 Xmit ELS RLS ACC response tag x%x xri x%x, "
6570 			 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
6571 			 elsiocb->iotag, elsiocb->iocb.ulpContext,
6572 			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
6573 			 ndlp->nlp_rpi);
6574 	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
6575 	phba->fc_stat.elsXmitACC++;
6576 	if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR)
6577 		lpfc_els_free_iocb(phba, elsiocb);
6578 }
6579 
6580 /**
6581  * lpfc_els_rsp_rps_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd
6582  * @phba: pointer to lpfc hba data structure.
6583  * @pmb: pointer to the driver internal queue element for mailbox command.
6584  *
6585  * This routine is the completion callback function for the MBX_READ_LNK_STAT
6586  * mailbox command. This callback function is to actually send the Accept
6587  * (ACC) response to a Read Port Status (RPS) unsolicited IOCB event. It
6588  * collects the link statistics from the completion of the MBX_READ_LNK_STAT
6589  * mailbox command, constructs the RPS response with the link statistics
6590  * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC
6591  * response to the RPS.
6592  *
6593  * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
6594  * will be incremented by 1 for holding the ndlp and the reference to ndlp
6595  * will be stored into the context1 field of the IOCB for the completion
6596  * callback function to the RPS Accept Response ELS IOCB command.
6597  *
6598  **/
6599 static void
6600 lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
6601 {
6602 	MAILBOX_t *mb;
6603 	IOCB_t *icmd;
6604 	RPS_RSP *rps_rsp;
6605 	uint8_t *pcmd;
6606 	struct lpfc_iocbq *elsiocb;
6607 	struct lpfc_nodelist *ndlp;
6608 	uint16_t status;
6609 	uint16_t oxid;
6610 	uint16_t rxid;
6611 	uint32_t cmdsize;
6612 
6613 	mb = &pmb->u.mb;
6614 
6615 	ndlp = (struct lpfc_nodelist *) pmb->context2;
6616 	rxid = (uint16_t) ((unsigned long)(pmb->context1) & 0xffff);
6617 	oxid = (uint16_t) (((unsigned long)(pmb->context1) >> 16) & 0xffff);
6618 	pmb->context1 = NULL;
6619 	pmb->context2 = NULL;
6620 
6621 	if (mb->mbxStatus) {
6622 		mempool_free(pmb, phba->mbox_mem_pool);
6623 		return;
6624 	}
6625 
6626 	cmdsize = sizeof(RPS_RSP) + sizeof(uint32_t);
6627 	mempool_free(pmb, phba->mbox_mem_pool);
6628 	elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
6629 				     lpfc_max_els_tries, ndlp,
6630 				     ndlp->nlp_DID, ELS_CMD_ACC);
6631 
6632 	/* Decrement the ndlp reference count from previous mbox command */
6633 	lpfc_nlp_put(ndlp);
6634 
6635 	if (!elsiocb)
6636 		return;
6637 
6638 	icmd = &elsiocb->iocb;
6639 	icmd->ulpContext = rxid;
6640 	icmd->unsli3.rcvsli3.ox_id = oxid;
6641 
6642 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
6643 	*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
6644 	pcmd += sizeof(uint32_t); /* Skip past command */
6645 	rps_rsp = (RPS_RSP *)pcmd;
6646 
6647 	if (phba->fc_topology != LPFC_TOPOLOGY_LOOP)
6648 		status = 0x10;
6649 	else
6650 		status = 0x8;
6651 	if (phba->pport->fc_flag & FC_FABRIC)
6652 		status |= 0x4;
6653 
6654 	rps_rsp->rsvd1 = 0;
6655 	rps_rsp->portStatus = cpu_to_be16(status);
6656 	rps_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt);
6657 	rps_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt);
6658 	rps_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt);
6659 	rps_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt);
6660 	rps_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord);
6661 	rps_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt);
6662 	/* Xmit ELS RPS ACC response tag <ulpIoTag> */
6663 	lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
6664 			 "0118 Xmit ELS RPS ACC response tag x%x xri x%x, "
6665 			 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
6666 			 elsiocb->iotag, elsiocb->iocb.ulpContext,
6667 			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
6668 			 ndlp->nlp_rpi);
6669 	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
6670 	phba->fc_stat.elsXmitACC++;
6671 	if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR)
6672 		lpfc_els_free_iocb(phba, elsiocb);
6673 	return;
6674 }
6675 
6676 /**
6677  * lpfc_els_rcv_rls - Process an unsolicited rls iocb
6678  * @vport: pointer to a host virtual N_Port data structure.
6679  * @cmdiocb: pointer to lpfc command iocb data structure.
6680  * @ndlp: pointer to a node-list data structure.
6681  *
6682  * This routine processes Read Port Status (RPL) IOCB received as an
6683  * ELS unsolicited event. It first checks the remote port state. If the
6684  * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
6685  * state, it invokes the lpfc_els_rsl_reject() routine to send the reject
6686  * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command
6687  * for reading the HBA link statistics. It is for the callback function,
6688  * lpfc_els_rsp_rls_acc(), set to the MBX_READ_LNK_STAT mailbox command
6689  * to actually sending out RPL Accept (ACC) response.
6690  *
6691  * Return codes
6692  *   0 - Successfully processed rls iocb (currently always return 0)
6693  **/
6694 static int
6695 lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
6696 		 struct lpfc_nodelist *ndlp)
6697 {
6698 	struct lpfc_hba *phba = vport->phba;
6699 	LPFC_MBOXQ_t *mbox;
6700 	struct ls_rjt stat;
6701 
6702 	if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
6703 	    (ndlp->nlp_state != NLP_STE_MAPPED_NODE))
6704 		/* reject the unsolicited RPS request and done with it */
6705 		goto reject_out;
6706 
6707 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
6708 	if (mbox) {
6709 		lpfc_read_lnk_stat(phba, mbox);
6710 		mbox->context1 = (void *)((unsigned long)
6711 			((cmdiocb->iocb.unsli3.rcvsli3.ox_id << 16) |
6712 			cmdiocb->iocb.ulpContext)); /* rx_id */
6713 		mbox->context2 = lpfc_nlp_get(ndlp);
6714 		mbox->vport = vport;
6715 		mbox->mbox_cmpl = lpfc_els_rsp_rls_acc;
6716 		if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
6717 			!= MBX_NOT_FINISHED)
6718 			/* Mbox completion will send ELS Response */
6719 			return 0;
6720 		/* Decrement reference count used for the failed mbox
6721 		 * command.
6722 		 */
6723 		lpfc_nlp_put(ndlp);
6724 		mempool_free(mbox, phba->mbox_mem_pool);
6725 	}
6726 reject_out:
6727 	/* issue rejection response */
6728 	stat.un.b.lsRjtRsvd0 = 0;
6729 	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
6730 	stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
6731 	stat.un.b.vendorUnique = 0;
6732 	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
6733 	return 0;
6734 }
6735 
6736 /**
6737  * lpfc_els_rcv_rtv - Process an unsolicited rtv iocb
6738  * @vport: pointer to a host virtual N_Port data structure.
6739  * @cmdiocb: pointer to lpfc command iocb data structure.
6740  * @ndlp: pointer to a node-list data structure.
6741  *
6742  * This routine processes Read Timout Value (RTV) IOCB received as an
6743  * ELS unsolicited event. It first checks the remote port state. If the
6744  * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
6745  * state, it invokes the lpfc_els_rsl_reject() routine to send the reject
6746  * response. Otherwise, it sends the Accept(ACC) response to a Read Timeout
6747  * Value (RTV) unsolicited IOCB event.
6748  *
6749  * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
6750  * will be incremented by 1 for holding the ndlp and the reference to ndlp
6751  * will be stored into the context1 field of the IOCB for the completion
6752  * callback function to the RPS Accept Response ELS IOCB command.
6753  *
6754  * Return codes
6755  *   0 - Successfully processed rtv iocb (currently always return 0)
6756  **/
6757 static int
6758 lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
6759 		 struct lpfc_nodelist *ndlp)
6760 {
6761 	struct lpfc_hba *phba = vport->phba;
6762 	struct ls_rjt stat;
6763 	struct RTV_RSP *rtv_rsp;
6764 	uint8_t *pcmd;
6765 	struct lpfc_iocbq *elsiocb;
6766 	uint32_t cmdsize;
6767 
6768 
6769 	if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
6770 	    (ndlp->nlp_state != NLP_STE_MAPPED_NODE))
6771 		/* reject the unsolicited RPS request and done with it */
6772 		goto reject_out;
6773 
6774 	cmdsize = sizeof(struct RTV_RSP) + sizeof(uint32_t);
6775 	elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
6776 				     lpfc_max_els_tries, ndlp,
6777 				     ndlp->nlp_DID, ELS_CMD_ACC);
6778 
6779 	if (!elsiocb)
6780 		return 1;
6781 
6782 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
6783 		*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
6784 	pcmd += sizeof(uint32_t); /* Skip past command */
6785 
6786 	/* use the command's xri in the response */
6787 	elsiocb->iocb.ulpContext = cmdiocb->iocb.ulpContext;  /* Xri / rx_id */
6788 	elsiocb->iocb.unsli3.rcvsli3.ox_id = cmdiocb->iocb.unsli3.rcvsli3.ox_id;
6789 
6790 	rtv_rsp = (struct RTV_RSP *)pcmd;
6791 
6792 	/* populate RTV payload */
6793 	rtv_rsp->ratov = cpu_to_be32(phba->fc_ratov * 1000); /* report msecs */
6794 	rtv_rsp->edtov = cpu_to_be32(phba->fc_edtov);
6795 	bf_set(qtov_edtovres, rtv_rsp, phba->fc_edtovResol ? 1 : 0);
6796 	bf_set(qtov_rttov, rtv_rsp, 0); /* Field is for FC ONLY */
6797 	rtv_rsp->qtov = cpu_to_be32(rtv_rsp->qtov);
6798 
6799 	/* Xmit ELS RLS ACC response tag <ulpIoTag> */
6800 	lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
6801 			 "2875 Xmit ELS RTV ACC response tag x%x xri x%x, "
6802 			 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x, "
6803 			 "Data: x%x x%x x%x\n",
6804 			 elsiocb->iotag, elsiocb->iocb.ulpContext,
6805 			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
6806 			 ndlp->nlp_rpi,
6807 			rtv_rsp->ratov, rtv_rsp->edtov, rtv_rsp->qtov);
6808 	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
6809 	phba->fc_stat.elsXmitACC++;
6810 	if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR)
6811 		lpfc_els_free_iocb(phba, elsiocb);
6812 	return 0;
6813 
6814 reject_out:
6815 	/* issue rejection response */
6816 	stat.un.b.lsRjtRsvd0 = 0;
6817 	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
6818 	stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
6819 	stat.un.b.vendorUnique = 0;
6820 	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
6821 	return 0;
6822 }
6823 
6824 /* lpfc_els_rcv_rps - Process an unsolicited rps iocb
6825  * @vport: pointer to a host virtual N_Port data structure.
6826  * @cmdiocb: pointer to lpfc command iocb data structure.
6827  * @ndlp: pointer to a node-list data structure.
6828  *
6829  * This routine processes Read Port Status (RPS) IOCB received as an
6830  * ELS unsolicited event. It first checks the remote port state. If the
6831  * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
6832  * state, it invokes the lpfc_els_rsp_reject() routine to send the reject
6833  * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command
6834  * for reading the HBA link statistics. It is for the callback function,
6835  * lpfc_els_rsp_rps_acc(), set to the MBX_READ_LNK_STAT mailbox command
6836  * to actually sending out RPS Accept (ACC) response.
6837  *
6838  * Return codes
6839  *   0 - Successfully processed rps iocb (currently always return 0)
6840  **/
6841 static int
6842 lpfc_els_rcv_rps(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
6843 		 struct lpfc_nodelist *ndlp)
6844 {
6845 	struct lpfc_hba *phba = vport->phba;
6846 	uint32_t *lp;
6847 	uint8_t flag;
6848 	LPFC_MBOXQ_t *mbox;
6849 	struct lpfc_dmabuf *pcmd;
6850 	RPS *rps;
6851 	struct ls_rjt stat;
6852 
6853 	if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
6854 	    (ndlp->nlp_state != NLP_STE_MAPPED_NODE))
6855 		/* reject the unsolicited RPS request and done with it */
6856 		goto reject_out;
6857 
6858 	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
6859 	lp = (uint32_t *) pcmd->virt;
6860 	flag = (be32_to_cpu(*lp++) & 0xf);
6861 	rps = (RPS *) lp;
6862 
6863 	if ((flag == 0) ||
6864 	    ((flag == 1) && (be32_to_cpu(rps->un.portNum) == 0)) ||
6865 	    ((flag == 2) && (memcmp(&rps->un.portName, &vport->fc_portname,
6866 				    sizeof(struct lpfc_name)) == 0))) {
6867 
6868 		printk("Fix me....\n");
6869 		dump_stack();
6870 		mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
6871 		if (mbox) {
6872 			lpfc_read_lnk_stat(phba, mbox);
6873 			mbox->context1 = (void *)((unsigned long)
6874 				((cmdiocb->iocb.unsli3.rcvsli3.ox_id << 16) |
6875 				cmdiocb->iocb.ulpContext)); /* rx_id */
6876 			mbox->context2 = lpfc_nlp_get(ndlp);
6877 			mbox->vport = vport;
6878 			mbox->mbox_cmpl = lpfc_els_rsp_rps_acc;
6879 			if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
6880 				!= MBX_NOT_FINISHED)
6881 				/* Mbox completion will send ELS Response */
6882 				return 0;
6883 			/* Decrement reference count used for the failed mbox
6884 			 * command.
6885 			 */
6886 			lpfc_nlp_put(ndlp);
6887 			mempool_free(mbox, phba->mbox_mem_pool);
6888 		}
6889 	}
6890 
6891 reject_out:
6892 	/* issue rejection response */
6893 	stat.un.b.lsRjtRsvd0 = 0;
6894 	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
6895 	stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
6896 	stat.un.b.vendorUnique = 0;
6897 	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
6898 	return 0;
6899 }
6900 
6901 /* lpfc_issue_els_rrq - Process an unsolicited rps iocb
6902  * @vport: pointer to a host virtual N_Port data structure.
6903  * @ndlp: pointer to a node-list data structure.
6904  * @did: DID of the target.
6905  * @rrq: Pointer to the rrq struct.
6906  *
6907  * Build a ELS RRQ command and send it to the target. If the issue_iocb is
6908  * Successful the the completion handler will clear the RRQ.
6909  *
6910  * Return codes
6911  *   0 - Successfully sent rrq els iocb.
6912  *   1 - Failed to send rrq els iocb.
6913  **/
6914 static int
6915 lpfc_issue_els_rrq(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
6916 			uint32_t did, struct lpfc_node_rrq *rrq)
6917 {
6918 	struct lpfc_hba  *phba = vport->phba;
6919 	struct RRQ *els_rrq;
6920 	struct lpfc_iocbq *elsiocb;
6921 	uint8_t *pcmd;
6922 	uint16_t cmdsize;
6923 	int ret;
6924 
6925 
6926 	if (ndlp != rrq->ndlp)
6927 		ndlp = rrq->ndlp;
6928 	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
6929 		return 1;
6930 
6931 	/* If ndlp is not NULL, we will bump the reference count on it */
6932 	cmdsize = (sizeof(uint32_t) + sizeof(struct RRQ));
6933 	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, did,
6934 				     ELS_CMD_RRQ);
6935 	if (!elsiocb)
6936 		return 1;
6937 
6938 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
6939 
6940 	/* For RRQ request, remainder of payload is Exchange IDs */
6941 	*((uint32_t *) (pcmd)) = ELS_CMD_RRQ;
6942 	pcmd += sizeof(uint32_t);
6943 	els_rrq = (struct RRQ *) pcmd;
6944 
6945 	bf_set(rrq_oxid, els_rrq, phba->sli4_hba.xri_ids[rrq->xritag]);
6946 	bf_set(rrq_rxid, els_rrq, rrq->rxid);
6947 	bf_set(rrq_did, els_rrq, vport->fc_myDID);
6948 	els_rrq->rrq = cpu_to_be32(els_rrq->rrq);
6949 	els_rrq->rrq_exchg = cpu_to_be32(els_rrq->rrq_exchg);
6950 
6951 
6952 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
6953 		"Issue RRQ:     did:x%x",
6954 		did, rrq->xritag, rrq->rxid);
6955 	elsiocb->context_un.rrq = rrq;
6956 	elsiocb->iocb_cmpl = lpfc_cmpl_els_rrq;
6957 	ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
6958 
6959 	if (ret == IOCB_ERROR) {
6960 		lpfc_els_free_iocb(phba, elsiocb);
6961 		return 1;
6962 	}
6963 	return 0;
6964 }
6965 
6966 /**
6967  * lpfc_send_rrq - Sends ELS RRQ if needed.
6968  * @phba: pointer to lpfc hba data structure.
6969  * @rrq: pointer to the active rrq.
6970  *
6971  * This routine will call the lpfc_issue_els_rrq if the rrq is
6972  * still active for the xri. If this function returns a failure then
6973  * the caller needs to clean up the RRQ by calling lpfc_clr_active_rrq.
6974  *
6975  * Returns 0 Success.
6976  *         1 Failure.
6977  **/
6978 int
6979 lpfc_send_rrq(struct lpfc_hba *phba, struct lpfc_node_rrq *rrq)
6980 {
6981 	struct lpfc_nodelist *ndlp = lpfc_findnode_did(rrq->vport,
6982 							rrq->nlp_DID);
6983 	if (lpfc_test_rrq_active(phba, ndlp, rrq->xritag))
6984 		return lpfc_issue_els_rrq(rrq->vport, ndlp,
6985 					 rrq->nlp_DID, rrq);
6986 	else
6987 		return 1;
6988 }
6989 
6990 /**
6991  * lpfc_els_rsp_rpl_acc - Issue an accept rpl els command
6992  * @vport: pointer to a host virtual N_Port data structure.
6993  * @cmdsize: size of the ELS command.
6994  * @oldiocb: pointer to the original lpfc command iocb data structure.
6995  * @ndlp: pointer to a node-list data structure.
6996  *
6997  * This routine issuees an Accept (ACC) Read Port List (RPL) ELS command.
6998  * It is to be called by the lpfc_els_rcv_rpl() routine to accept the RPL.
6999  *
7000  * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
7001  * will be incremented by 1 for holding the ndlp and the reference to ndlp
7002  * will be stored into the context1 field of the IOCB for the completion
7003  * callback function to the RPL Accept Response ELS command.
7004  *
7005  * Return code
7006  *   0 - Successfully issued ACC RPL ELS command
7007  *   1 - Failed to issue ACC RPL ELS command
7008  **/
7009 static int
7010 lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
7011 		     struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
7012 {
7013 	struct lpfc_hba *phba = vport->phba;
7014 	IOCB_t *icmd, *oldcmd;
7015 	RPL_RSP rpl_rsp;
7016 	struct lpfc_iocbq *elsiocb;
7017 	uint8_t *pcmd;
7018 
7019 	elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
7020 				     ndlp->nlp_DID, ELS_CMD_ACC);
7021 
7022 	if (!elsiocb)
7023 		return 1;
7024 
7025 	icmd = &elsiocb->iocb;
7026 	oldcmd = &oldiocb->iocb;
7027 	icmd->ulpContext = oldcmd->ulpContext;	/* Xri / rx_id */
7028 	icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id;
7029 
7030 	pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
7031 	*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
7032 	pcmd += sizeof(uint16_t);
7033 	*((uint16_t *)(pcmd)) = be16_to_cpu(cmdsize);
7034 	pcmd += sizeof(uint16_t);
7035 
7036 	/* Setup the RPL ACC payload */
7037 	rpl_rsp.listLen = be32_to_cpu(1);
7038 	rpl_rsp.index = 0;
7039 	rpl_rsp.port_num_blk.portNum = 0;
7040 	rpl_rsp.port_num_blk.portID = be32_to_cpu(vport->fc_myDID);
7041 	memcpy(&rpl_rsp.port_num_blk.portName, &vport->fc_portname,
7042 	    sizeof(struct lpfc_name));
7043 	memcpy(pcmd, &rpl_rsp, cmdsize - sizeof(uint32_t));
7044 	/* Xmit ELS RPL ACC response tag <ulpIoTag> */
7045 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
7046 			 "0120 Xmit ELS RPL ACC response tag x%x "
7047 			 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
7048 			 "rpi x%x\n",
7049 			 elsiocb->iotag, elsiocb->iocb.ulpContext,
7050 			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
7051 			 ndlp->nlp_rpi);
7052 	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
7053 	phba->fc_stat.elsXmitACC++;
7054 	if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
7055 	    IOCB_ERROR) {
7056 		lpfc_els_free_iocb(phba, elsiocb);
7057 		return 1;
7058 	}
7059 	return 0;
7060 }
7061 
7062 /**
7063  * lpfc_els_rcv_rpl - Process an unsolicited rpl iocb
7064  * @vport: pointer to a host virtual N_Port data structure.
7065  * @cmdiocb: pointer to lpfc command iocb data structure.
7066  * @ndlp: pointer to a node-list data structure.
7067  *
7068  * This routine processes Read Port List (RPL) IOCB received as an ELS
7069  * unsolicited event. It first checks the remote port state. If the remote
7070  * port is not in NLP_STE_UNMAPPED_NODE and NLP_STE_MAPPED_NODE states, it
7071  * invokes the lpfc_els_rsp_reject() routine to send reject response.
7072  * Otherwise, this routine then invokes the lpfc_els_rsp_rpl_acc() routine
7073  * to accept the RPL.
7074  *
7075  * Return code
7076  *   0 - Successfully processed rpl iocb (currently always return 0)
7077  **/
7078 static int
7079 lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
7080 		 struct lpfc_nodelist *ndlp)
7081 {
7082 	struct lpfc_dmabuf *pcmd;
7083 	uint32_t *lp;
7084 	uint32_t maxsize;
7085 	uint16_t cmdsize;
7086 	RPL *rpl;
7087 	struct ls_rjt stat;
7088 
7089 	if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
7090 	    (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
7091 		/* issue rejection response */
7092 		stat.un.b.lsRjtRsvd0 = 0;
7093 		stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
7094 		stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
7095 		stat.un.b.vendorUnique = 0;
7096 		lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
7097 			NULL);
7098 		/* rejected the unsolicited RPL request and done with it */
7099 		return 0;
7100 	}
7101 
7102 	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
7103 	lp = (uint32_t *) pcmd->virt;
7104 	rpl = (RPL *) (lp + 1);
7105 	maxsize = be32_to_cpu(rpl->maxsize);
7106 
7107 	/* We support only one port */
7108 	if ((rpl->index == 0) &&
7109 	    ((maxsize == 0) ||
7110 	     ((maxsize * sizeof(uint32_t)) >= sizeof(RPL_RSP)))) {
7111 		cmdsize = sizeof(uint32_t) + sizeof(RPL_RSP);
7112 	} else {
7113 		cmdsize = sizeof(uint32_t) + maxsize * sizeof(uint32_t);
7114 	}
7115 	lpfc_els_rsp_rpl_acc(vport, cmdsize, cmdiocb, ndlp);
7116 
7117 	return 0;
7118 }
7119 
7120 /**
7121  * lpfc_els_rcv_farp - Process an unsolicited farp request els command
7122  * @vport: pointer to a virtual N_Port data structure.
7123  * @cmdiocb: pointer to lpfc command iocb data structure.
7124  * @ndlp: pointer to a node-list data structure.
7125  *
7126  * This routine processes Fibre Channel Address Resolution Protocol
7127  * (FARP) Request IOCB received as an ELS unsolicited event. Currently,
7128  * the lpfc driver only supports matching on WWPN or WWNN for FARP. As such,
7129  * FARP_MATCH_PORT flag and FARP_MATCH_NODE flag are checked against the
7130  * Match Flag in the FARP request IOCB: if FARP_MATCH_PORT flag is set, the
7131  * remote PortName is compared against the FC PortName stored in the @vport
7132  * data structure; if FARP_MATCH_NODE flag is set, the remote NodeName is
7133  * compared against the FC NodeName stored in the @vport data structure.
7134  * If any of these matches and the FARP_REQUEST_FARPR flag is set in the
7135  * FARP request IOCB Response Flag, the lpfc_issue_els_farpr() routine is
7136  * invoked to send out FARP Response to the remote node. Before sending the
7137  * FARP Response, however, the FARP_REQUEST_PLOGI flag is check in the FARP
7138  * request IOCB Response Flag and, if it is set, the lpfc_issue_els_plogi()
7139  * routine is invoked to log into the remote port first.
7140  *
7141  * Return code
7142  *   0 - Either the FARP Match Mode not supported or successfully processed
7143  **/
7144 static int
7145 lpfc_els_rcv_farp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
7146 		  struct lpfc_nodelist *ndlp)
7147 {
7148 	struct lpfc_dmabuf *pcmd;
7149 	uint32_t *lp;
7150 	IOCB_t *icmd;
7151 	FARP *fp;
7152 	uint32_t cmd, cnt, did;
7153 
7154 	icmd = &cmdiocb->iocb;
7155 	did = icmd->un.elsreq64.remoteID;
7156 	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
7157 	lp = (uint32_t *) pcmd->virt;
7158 
7159 	cmd = *lp++;
7160 	fp = (FARP *) lp;
7161 	/* FARP-REQ received from DID <did> */
7162 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
7163 			 "0601 FARP-REQ received from DID x%x\n", did);
7164 	/* We will only support match on WWPN or WWNN */
7165 	if (fp->Mflags & ~(FARP_MATCH_NODE | FARP_MATCH_PORT)) {
7166 		return 0;
7167 	}
7168 
7169 	cnt = 0;
7170 	/* If this FARP command is searching for my portname */
7171 	if (fp->Mflags & FARP_MATCH_PORT) {
7172 		if (memcmp(&fp->RportName, &vport->fc_portname,
7173 			   sizeof(struct lpfc_name)) == 0)
7174 			cnt = 1;
7175 	}
7176 
7177 	/* If this FARP command is searching for my nodename */
7178 	if (fp->Mflags & FARP_MATCH_NODE) {
7179 		if (memcmp(&fp->RnodeName, &vport->fc_nodename,
7180 			   sizeof(struct lpfc_name)) == 0)
7181 			cnt = 1;
7182 	}
7183 
7184 	if (cnt) {
7185 		if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) ||
7186 		   (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
7187 			/* Log back into the node before sending the FARP. */
7188 			if (fp->Rflags & FARP_REQUEST_PLOGI) {
7189 				ndlp->nlp_prev_state = ndlp->nlp_state;
7190 				lpfc_nlp_set_state(vport, ndlp,
7191 						   NLP_STE_PLOGI_ISSUE);
7192 				lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
7193 			}
7194 
7195 			/* Send a FARP response to that node */
7196 			if (fp->Rflags & FARP_REQUEST_FARPR)
7197 				lpfc_issue_els_farpr(vport, did, 0);
7198 		}
7199 	}
7200 	return 0;
7201 }
7202 
7203 /**
7204  * lpfc_els_rcv_farpr - Process an unsolicited farp response iocb
7205  * @vport: pointer to a host virtual N_Port data structure.
7206  * @cmdiocb: pointer to lpfc command iocb data structure.
7207  * @ndlp: pointer to a node-list data structure.
7208  *
7209  * This routine processes Fibre Channel Address Resolution Protocol
7210  * Response (FARPR) IOCB received as an ELS unsolicited event. It simply
7211  * invokes the lpfc_els_rsp_acc() routine to the remote node to accept
7212  * the FARP response request.
7213  *
7214  * Return code
7215  *   0 - Successfully processed FARPR IOCB (currently always return 0)
7216  **/
7217 static int
7218 lpfc_els_rcv_farpr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
7219 		   struct lpfc_nodelist  *ndlp)
7220 {
7221 	struct lpfc_dmabuf *pcmd;
7222 	uint32_t *lp;
7223 	IOCB_t *icmd;
7224 	uint32_t cmd, did;
7225 
7226 	icmd = &cmdiocb->iocb;
7227 	did = icmd->un.elsreq64.remoteID;
7228 	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
7229 	lp = (uint32_t *) pcmd->virt;
7230 
7231 	cmd = *lp++;
7232 	/* FARP-RSP received from DID <did> */
7233 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
7234 			 "0600 FARP-RSP received from DID x%x\n", did);
7235 	/* ACCEPT the Farp resp request */
7236 	lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
7237 
7238 	return 0;
7239 }
7240 
7241 /**
7242  * lpfc_els_rcv_fan - Process an unsolicited fan iocb command
7243  * @vport: pointer to a host virtual N_Port data structure.
7244  * @cmdiocb: pointer to lpfc command iocb data structure.
7245  * @fan_ndlp: pointer to a node-list data structure.
7246  *
7247  * This routine processes a Fabric Address Notification (FAN) IOCB
7248  * command received as an ELS unsolicited event. The FAN ELS command will
7249  * only be processed on a physical port (i.e., the @vport represents the
7250  * physical port). The fabric NodeName and PortName from the FAN IOCB are
7251  * compared against those in the phba data structure. If any of those is
7252  * different, the lpfc_initial_flogi() routine is invoked to initialize
7253  * Fabric Login (FLOGI) to the fabric to start the discover over. Otherwise,
7254  * if both of those are identical, the lpfc_issue_fabric_reglogin() routine
7255  * is invoked to register login to the fabric.
7256  *
7257  * Return code
7258  *   0 - Successfully processed fan iocb (currently always return 0).
7259  **/
7260 static int
7261 lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
7262 		 struct lpfc_nodelist *fan_ndlp)
7263 {
7264 	struct lpfc_hba *phba = vport->phba;
7265 	uint32_t *lp;
7266 	FAN *fp;
7267 
7268 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0265 FAN received\n");
7269 	lp = (uint32_t *)((struct lpfc_dmabuf *)cmdiocb->context2)->virt;
7270 	fp = (FAN *) ++lp;
7271 	/* FAN received; Fan does not have a reply sequence */
7272 	if ((vport == phba->pport) &&
7273 	    (vport->port_state == LPFC_LOCAL_CFG_LINK)) {
7274 		if ((memcmp(&phba->fc_fabparam.nodeName, &fp->FnodeName,
7275 			    sizeof(struct lpfc_name))) ||
7276 		    (memcmp(&phba->fc_fabparam.portName, &fp->FportName,
7277 			    sizeof(struct lpfc_name)))) {
7278 			/* This port has switched fabrics. FLOGI is required */
7279 			lpfc_issue_init_vfi(vport);
7280 		} else {
7281 			/* FAN verified - skip FLOGI */
7282 			vport->fc_myDID = vport->fc_prevDID;
7283 			if (phba->sli_rev < LPFC_SLI_REV4)
7284 				lpfc_issue_fabric_reglogin(vport);
7285 			else {
7286 				lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
7287 					"3138 Need register VFI: (x%x/%x)\n",
7288 					vport->fc_prevDID, vport->fc_myDID);
7289 				lpfc_issue_reg_vfi(vport);
7290 			}
7291 		}
7292 	}
7293 	return 0;
7294 }
7295 
7296 /**
7297  * lpfc_els_timeout - Handler funciton to the els timer
7298  * @ptr: holder for the timer function associated data.
7299  *
7300  * This routine is invoked by the ELS timer after timeout. It posts the ELS
7301  * timer timeout event by setting the WORKER_ELS_TMO bit to the work port
7302  * event bitmap and then invokes the lpfc_worker_wake_up() routine to wake
7303  * up the worker thread. It is for the worker thread to invoke the routine
7304  * lpfc_els_timeout_handler() to work on the posted event WORKER_ELS_TMO.
7305  **/
7306 void
7307 lpfc_els_timeout(unsigned long ptr)
7308 {
7309 	struct lpfc_vport *vport = (struct lpfc_vport *) ptr;
7310 	struct lpfc_hba   *phba = vport->phba;
7311 	uint32_t tmo_posted;
7312 	unsigned long iflag;
7313 
7314 	spin_lock_irqsave(&vport->work_port_lock, iflag);
7315 	tmo_posted = vport->work_port_events & WORKER_ELS_TMO;
7316 	if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING)))
7317 		vport->work_port_events |= WORKER_ELS_TMO;
7318 	spin_unlock_irqrestore(&vport->work_port_lock, iflag);
7319 
7320 	if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING)))
7321 		lpfc_worker_wake_up(phba);
7322 	return;
7323 }
7324 
7325 
7326 /**
7327  * lpfc_els_timeout_handler - Process an els timeout event
7328  * @vport: pointer to a virtual N_Port data structure.
7329  *
7330  * This routine is the actual handler function that processes an ELS timeout
7331  * event. It walks the ELS ring to get and abort all the IOCBs (except the
7332  * ABORT/CLOSE/FARP/FARPR/FDISC), which are associated with the @vport by
7333  * invoking the lpfc_sli_issue_abort_iotag() routine.
7334  **/
7335 void
7336 lpfc_els_timeout_handler(struct lpfc_vport *vport)
7337 {
7338 	struct lpfc_hba  *phba = vport->phba;
7339 	struct lpfc_sli_ring *pring;
7340 	struct lpfc_iocbq *tmp_iocb, *piocb;
7341 	IOCB_t *cmd = NULL;
7342 	struct lpfc_dmabuf *pcmd;
7343 	uint32_t els_command = 0;
7344 	uint32_t timeout;
7345 	uint32_t remote_ID = 0xffffffff;
7346 	LIST_HEAD(abort_list);
7347 
7348 
7349 	timeout = (uint32_t)(phba->fc_ratov << 1);
7350 
7351 	pring = lpfc_phba_elsring(phba);
7352 
7353 	if ((phba->pport->load_flag & FC_UNLOADING))
7354 		return;
7355 	spin_lock_irq(&phba->hbalock);
7356 	if (phba->sli_rev == LPFC_SLI_REV4)
7357 		spin_lock(&pring->ring_lock);
7358 
7359 	if ((phba->pport->load_flag & FC_UNLOADING)) {
7360 		if (phba->sli_rev == LPFC_SLI_REV4)
7361 			spin_unlock(&pring->ring_lock);
7362 		spin_unlock_irq(&phba->hbalock);
7363 		return;
7364 	}
7365 
7366 	list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
7367 		cmd = &piocb->iocb;
7368 
7369 		if ((piocb->iocb_flag & LPFC_IO_LIBDFC) != 0 ||
7370 		    piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
7371 		    piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
7372 			continue;
7373 
7374 		if (piocb->vport != vport)
7375 			continue;
7376 
7377 		pcmd = (struct lpfc_dmabuf *) piocb->context2;
7378 		if (pcmd)
7379 			els_command = *(uint32_t *) (pcmd->virt);
7380 
7381 		if (els_command == ELS_CMD_FARP ||
7382 		    els_command == ELS_CMD_FARPR ||
7383 		    els_command == ELS_CMD_FDISC)
7384 			continue;
7385 
7386 		if (piocb->drvrTimeout > 0) {
7387 			if (piocb->drvrTimeout >= timeout)
7388 				piocb->drvrTimeout -= timeout;
7389 			else
7390 				piocb->drvrTimeout = 0;
7391 			continue;
7392 		}
7393 
7394 		remote_ID = 0xffffffff;
7395 		if (cmd->ulpCommand != CMD_GEN_REQUEST64_CR)
7396 			remote_ID = cmd->un.elsreq64.remoteID;
7397 		else {
7398 			struct lpfc_nodelist *ndlp;
7399 			ndlp = __lpfc_findnode_rpi(vport, cmd->ulpContext);
7400 			if (ndlp && NLP_CHK_NODE_ACT(ndlp))
7401 				remote_ID = ndlp->nlp_DID;
7402 		}
7403 		list_add_tail(&piocb->dlist, &abort_list);
7404 	}
7405 	if (phba->sli_rev == LPFC_SLI_REV4)
7406 		spin_unlock(&pring->ring_lock);
7407 	spin_unlock_irq(&phba->hbalock);
7408 
7409 	list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) {
7410 		cmd = &piocb->iocb;
7411 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
7412 			 "0127 ELS timeout Data: x%x x%x x%x "
7413 			 "x%x\n", els_command,
7414 			 remote_ID, cmd->ulpCommand, cmd->ulpIoTag);
7415 		spin_lock_irq(&phba->hbalock);
7416 		list_del_init(&piocb->dlist);
7417 		lpfc_sli_issue_abort_iotag(phba, pring, piocb);
7418 		spin_unlock_irq(&phba->hbalock);
7419 	}
7420 
7421 	if (!list_empty(&pring->txcmplq))
7422 		if (!(phba->pport->load_flag & FC_UNLOADING))
7423 			mod_timer(&vport->els_tmofunc,
7424 				  jiffies + msecs_to_jiffies(1000 * timeout));
7425 }
7426 
7427 /**
7428  * lpfc_els_flush_cmd - Clean up the outstanding els commands to a vport
7429  * @vport: pointer to a host virtual N_Port data structure.
7430  *
7431  * This routine is used to clean up all the outstanding ELS commands on a
7432  * @vport. It first aborts the @vport by invoking lpfc_fabric_abort_vport()
7433  * routine. After that, it walks the ELS transmit queue to remove all the
7434  * IOCBs with the @vport other than the QUE_RING and ABORT/CLOSE IOCBs. For
7435  * the IOCBs with a non-NULL completion callback function, the callback
7436  * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and
7437  * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs with a NULL completion
7438  * callback function, the IOCB will simply be released. Finally, it walks
7439  * the ELS transmit completion queue to issue an abort IOCB to any transmit
7440  * completion queue IOCB that is associated with the @vport and is not
7441  * an IOCB from libdfc (i.e., the management plane IOCBs that are not
7442  * part of the discovery state machine) out to HBA by invoking the
7443  * lpfc_sli_issue_abort_iotag() routine. Note that this function issues the
7444  * abort IOCB to any transmit completion queueed IOCB, it does not guarantee
7445  * the IOCBs are aborted when this function returns.
7446  **/
7447 void
7448 lpfc_els_flush_cmd(struct lpfc_vport *vport)
7449 {
7450 	LIST_HEAD(abort_list);
7451 	struct lpfc_hba  *phba = vport->phba;
7452 	struct lpfc_sli_ring *pring;
7453 	struct lpfc_iocbq *tmp_iocb, *piocb;
7454 	IOCB_t *cmd = NULL;
7455 
7456 	lpfc_fabric_abort_vport(vport);
7457 	/*
7458 	 * For SLI3, only the hbalock is required.  But SLI4 needs to coordinate
7459 	 * with the ring insert operation.  Because lpfc_sli_issue_abort_iotag
7460 	 * ultimately grabs the ring_lock, the driver must splice the list into
7461 	 * a working list and release the locks before calling the abort.
7462 	 */
7463 	spin_lock_irq(&phba->hbalock);
7464 	pring = lpfc_phba_elsring(phba);
7465 
7466 	/* Bail out if we've no ELS wq, like in PCI error recovery case. */
7467 	if (unlikely(!pring)) {
7468 		spin_unlock_irq(&phba->hbalock);
7469 		return;
7470 	}
7471 
7472 	if (phba->sli_rev == LPFC_SLI_REV4)
7473 		spin_lock(&pring->ring_lock);
7474 
7475 	list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
7476 		if (piocb->iocb_flag & LPFC_IO_LIBDFC)
7477 			continue;
7478 
7479 		if (piocb->vport != vport)
7480 			continue;
7481 		list_add_tail(&piocb->dlist, &abort_list);
7482 	}
7483 	if (phba->sli_rev == LPFC_SLI_REV4)
7484 		spin_unlock(&pring->ring_lock);
7485 	spin_unlock_irq(&phba->hbalock);
7486 	/* Abort each iocb on the aborted list and remove the dlist links. */
7487 	list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) {
7488 		spin_lock_irq(&phba->hbalock);
7489 		list_del_init(&piocb->dlist);
7490 		lpfc_sli_issue_abort_iotag(phba, pring, piocb);
7491 		spin_unlock_irq(&phba->hbalock);
7492 	}
7493 	if (!list_empty(&abort_list))
7494 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
7495 				 "3387 abort list for txq not empty\n");
7496 	INIT_LIST_HEAD(&abort_list);
7497 
7498 	spin_lock_irq(&phba->hbalock);
7499 	if (phba->sli_rev == LPFC_SLI_REV4)
7500 		spin_lock(&pring->ring_lock);
7501 
7502 	list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) {
7503 		cmd = &piocb->iocb;
7504 
7505 		if (piocb->iocb_flag & LPFC_IO_LIBDFC) {
7506 			continue;
7507 		}
7508 
7509 		/* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */
7510 		if (cmd->ulpCommand == CMD_QUE_RING_BUF_CN ||
7511 		    cmd->ulpCommand == CMD_QUE_RING_BUF64_CN ||
7512 		    cmd->ulpCommand == CMD_CLOSE_XRI_CN ||
7513 		    cmd->ulpCommand == CMD_ABORT_XRI_CN)
7514 			continue;
7515 
7516 		if (piocb->vport != vport)
7517 			continue;
7518 
7519 		list_del_init(&piocb->list);
7520 		list_add_tail(&piocb->list, &abort_list);
7521 	}
7522 	if (phba->sli_rev == LPFC_SLI_REV4)
7523 		spin_unlock(&pring->ring_lock);
7524 	spin_unlock_irq(&phba->hbalock);
7525 
7526 	/* Cancell all the IOCBs from the completions list */
7527 	lpfc_sli_cancel_iocbs(phba, &abort_list,
7528 			      IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
7529 
7530 	return;
7531 }
7532 
7533 /**
7534  * lpfc_els_flush_all_cmd - Clean up all the outstanding els commands to a HBA
7535  * @phba: pointer to lpfc hba data structure.
7536  *
7537  * This routine is used to clean up all the outstanding ELS commands on a
7538  * @phba. It first aborts the @phba by invoking the lpfc_fabric_abort_hba()
7539  * routine. After that, it walks the ELS transmit queue to remove all the
7540  * IOCBs to the @phba other than the QUE_RING and ABORT/CLOSE IOCBs. For
7541  * the IOCBs with the completion callback function associated, the callback
7542  * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and
7543  * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs without the completion
7544  * callback function associated, the IOCB will simply be released. Finally,
7545  * it walks the ELS transmit completion queue to issue an abort IOCB to any
7546  * transmit completion queue IOCB that is not an IOCB from libdfc (i.e., the
7547  * management plane IOCBs that are not part of the discovery state machine)
7548  * out to HBA by invoking the lpfc_sli_issue_abort_iotag() routine.
7549  **/
7550 void
7551 lpfc_els_flush_all_cmd(struct lpfc_hba  *phba)
7552 {
7553 	struct lpfc_vport *vport;
7554 	list_for_each_entry(vport, &phba->port_list, listentry)
7555 		lpfc_els_flush_cmd(vport);
7556 
7557 	return;
7558 }
7559 
7560 /**
7561  * lpfc_send_els_failure_event - Posts an ELS command failure event
7562  * @phba: Pointer to hba context object.
7563  * @cmdiocbp: Pointer to command iocb which reported error.
7564  * @rspiocbp: Pointer to response iocb which reported error.
7565  *
7566  * This function sends an event when there is an ELS command
7567  * failure.
7568  **/
7569 void
7570 lpfc_send_els_failure_event(struct lpfc_hba *phba,
7571 			struct lpfc_iocbq *cmdiocbp,
7572 			struct lpfc_iocbq *rspiocbp)
7573 {
7574 	struct lpfc_vport *vport = cmdiocbp->vport;
7575 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
7576 	struct lpfc_lsrjt_event lsrjt_event;
7577 	struct lpfc_fabric_event_header fabric_event;
7578 	struct ls_rjt stat;
7579 	struct lpfc_nodelist *ndlp;
7580 	uint32_t *pcmd;
7581 
7582 	ndlp = cmdiocbp->context1;
7583 	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
7584 		return;
7585 
7586 	if (rspiocbp->iocb.ulpStatus == IOSTAT_LS_RJT) {
7587 		lsrjt_event.header.event_type = FC_REG_ELS_EVENT;
7588 		lsrjt_event.header.subcategory = LPFC_EVENT_LSRJT_RCV;
7589 		memcpy(lsrjt_event.header.wwpn, &ndlp->nlp_portname,
7590 			sizeof(struct lpfc_name));
7591 		memcpy(lsrjt_event.header.wwnn, &ndlp->nlp_nodename,
7592 			sizeof(struct lpfc_name));
7593 		pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
7594 			cmdiocbp->context2)->virt);
7595 		lsrjt_event.command = (pcmd != NULL) ? *pcmd : 0;
7596 		stat.un.lsRjtError = be32_to_cpu(rspiocbp->iocb.un.ulpWord[4]);
7597 		lsrjt_event.reason_code = stat.un.b.lsRjtRsnCode;
7598 		lsrjt_event.explanation = stat.un.b.lsRjtRsnCodeExp;
7599 		fc_host_post_vendor_event(shost,
7600 			fc_get_event_number(),
7601 			sizeof(lsrjt_event),
7602 			(char *)&lsrjt_event,
7603 			LPFC_NL_VENDOR_ID);
7604 		return;
7605 	}
7606 	if ((rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY) ||
7607 		(rspiocbp->iocb.ulpStatus == IOSTAT_FABRIC_BSY)) {
7608 		fabric_event.event_type = FC_REG_FABRIC_EVENT;
7609 		if (rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY)
7610 			fabric_event.subcategory = LPFC_EVENT_PORT_BUSY;
7611 		else
7612 			fabric_event.subcategory = LPFC_EVENT_FABRIC_BUSY;
7613 		memcpy(fabric_event.wwpn, &ndlp->nlp_portname,
7614 			sizeof(struct lpfc_name));
7615 		memcpy(fabric_event.wwnn, &ndlp->nlp_nodename,
7616 			sizeof(struct lpfc_name));
7617 		fc_host_post_vendor_event(shost,
7618 			fc_get_event_number(),
7619 			sizeof(fabric_event),
7620 			(char *)&fabric_event,
7621 			LPFC_NL_VENDOR_ID);
7622 		return;
7623 	}
7624 
7625 }
7626 
7627 /**
7628  * lpfc_send_els_event - Posts unsolicited els event
7629  * @vport: Pointer to vport object.
7630  * @ndlp: Pointer FC node object.
7631  * @cmd: ELS command code.
7632  *
7633  * This function posts an event when there is an incoming
7634  * unsolicited ELS command.
7635  **/
7636 static void
7637 lpfc_send_els_event(struct lpfc_vport *vport,
7638 		    struct lpfc_nodelist *ndlp,
7639 		    uint32_t *payload)
7640 {
7641 	struct lpfc_els_event_header *els_data = NULL;
7642 	struct lpfc_logo_event *logo_data = NULL;
7643 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
7644 
7645 	if (*payload == ELS_CMD_LOGO) {
7646 		logo_data = kmalloc(sizeof(struct lpfc_logo_event), GFP_KERNEL);
7647 		if (!logo_data) {
7648 			lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
7649 				"0148 Failed to allocate memory "
7650 				"for LOGO event\n");
7651 			return;
7652 		}
7653 		els_data = &logo_data->header;
7654 	} else {
7655 		els_data = kmalloc(sizeof(struct lpfc_els_event_header),
7656 			GFP_KERNEL);
7657 		if (!els_data) {
7658 			lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
7659 				"0149 Failed to allocate memory "
7660 				"for ELS event\n");
7661 			return;
7662 		}
7663 	}
7664 	els_data->event_type = FC_REG_ELS_EVENT;
7665 	switch (*payload) {
7666 	case ELS_CMD_PLOGI:
7667 		els_data->subcategory = LPFC_EVENT_PLOGI_RCV;
7668 		break;
7669 	case ELS_CMD_PRLO:
7670 		els_data->subcategory = LPFC_EVENT_PRLO_RCV;
7671 		break;
7672 	case ELS_CMD_ADISC:
7673 		els_data->subcategory = LPFC_EVENT_ADISC_RCV;
7674 		break;
7675 	case ELS_CMD_LOGO:
7676 		els_data->subcategory = LPFC_EVENT_LOGO_RCV;
7677 		/* Copy the WWPN in the LOGO payload */
7678 		memcpy(logo_data->logo_wwpn, &payload[2],
7679 			sizeof(struct lpfc_name));
7680 		break;
7681 	default:
7682 		kfree(els_data);
7683 		return;
7684 	}
7685 	memcpy(els_data->wwpn, &ndlp->nlp_portname, sizeof(struct lpfc_name));
7686 	memcpy(els_data->wwnn, &ndlp->nlp_nodename, sizeof(struct lpfc_name));
7687 	if (*payload == ELS_CMD_LOGO) {
7688 		fc_host_post_vendor_event(shost,
7689 			fc_get_event_number(),
7690 			sizeof(struct lpfc_logo_event),
7691 			(char *)logo_data,
7692 			LPFC_NL_VENDOR_ID);
7693 		kfree(logo_data);
7694 	} else {
7695 		fc_host_post_vendor_event(shost,
7696 			fc_get_event_number(),
7697 			sizeof(struct lpfc_els_event_header),
7698 			(char *)els_data,
7699 			LPFC_NL_VENDOR_ID);
7700 		kfree(els_data);
7701 	}
7702 
7703 	return;
7704 }
7705 
7706 
7707 /**
7708  * lpfc_els_unsol_buffer - Process an unsolicited event data buffer
7709  * @phba: pointer to lpfc hba data structure.
7710  * @pring: pointer to a SLI ring.
7711  * @vport: pointer to a host virtual N_Port data structure.
7712  * @elsiocb: pointer to lpfc els command iocb data structure.
7713  *
7714  * This routine is used for processing the IOCB associated with a unsolicited
7715  * event. It first determines whether there is an existing ndlp that matches
7716  * the DID from the unsolicited IOCB. If not, it will create a new one with
7717  * the DID from the unsolicited IOCB. The ELS command from the unsolicited
7718  * IOCB is then used to invoke the proper routine and to set up proper state
7719  * of the discovery state machine.
7720  **/
7721 static void
7722 lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
7723 		      struct lpfc_vport *vport, struct lpfc_iocbq *elsiocb)
7724 {
7725 	struct Scsi_Host  *shost;
7726 	struct lpfc_nodelist *ndlp;
7727 	struct ls_rjt stat;
7728 	uint32_t *payload;
7729 	uint32_t cmd, did, newnode;
7730 	uint8_t rjt_exp, rjt_err = 0;
7731 	IOCB_t *icmd = &elsiocb->iocb;
7732 
7733 	if (!vport || !(elsiocb->context2))
7734 		goto dropit;
7735 
7736 	newnode = 0;
7737 	payload = ((struct lpfc_dmabuf *)elsiocb->context2)->virt;
7738 	cmd = *payload;
7739 	if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) == 0)
7740 		lpfc_post_buffer(phba, pring, 1);
7741 
7742 	did = icmd->un.rcvels.remoteID;
7743 	if (icmd->ulpStatus) {
7744 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
7745 			"RCV Unsol ELS:  status:x%x/x%x did:x%x",
7746 			icmd->ulpStatus, icmd->un.ulpWord[4], did);
7747 		goto dropit;
7748 	}
7749 
7750 	/* Check to see if link went down during discovery */
7751 	if (lpfc_els_chk_latt(vport))
7752 		goto dropit;
7753 
7754 	/* Ignore traffic received during vport shutdown. */
7755 	if (vport->load_flag & FC_UNLOADING)
7756 		goto dropit;
7757 
7758 	/* If NPort discovery is delayed drop incoming ELS */
7759 	if ((vport->fc_flag & FC_DISC_DELAYED) &&
7760 			(cmd != ELS_CMD_PLOGI))
7761 		goto dropit;
7762 
7763 	ndlp = lpfc_findnode_did(vport, did);
7764 	if (!ndlp) {
7765 		/* Cannot find existing Fabric ndlp, so allocate a new one */
7766 		ndlp = lpfc_nlp_init(vport, did);
7767 		if (!ndlp)
7768 			goto dropit;
7769 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
7770 		newnode = 1;
7771 		if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
7772 			ndlp->nlp_type |= NLP_FABRIC;
7773 	} else if (!NLP_CHK_NODE_ACT(ndlp)) {
7774 		ndlp = lpfc_enable_node(vport, ndlp,
7775 					NLP_STE_UNUSED_NODE);
7776 		if (!ndlp)
7777 			goto dropit;
7778 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
7779 		newnode = 1;
7780 		if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
7781 			ndlp->nlp_type |= NLP_FABRIC;
7782 	} else if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
7783 		/* This is similar to the new node path */
7784 		ndlp = lpfc_nlp_get(ndlp);
7785 		if (!ndlp)
7786 			goto dropit;
7787 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
7788 		newnode = 1;
7789 	}
7790 
7791 	phba->fc_stat.elsRcvFrame++;
7792 
7793 	/*
7794 	 * Do not process any unsolicited ELS commands
7795 	 * if the ndlp is in DEV_LOSS
7796 	 */
7797 	shost = lpfc_shost_from_vport(vport);
7798 	spin_lock_irq(shost->host_lock);
7799 	if (ndlp->nlp_flag & NLP_IN_DEV_LOSS) {
7800 		spin_unlock_irq(shost->host_lock);
7801 		goto dropit;
7802 	}
7803 	spin_unlock_irq(shost->host_lock);
7804 
7805 	elsiocb->context1 = lpfc_nlp_get(ndlp);
7806 	elsiocb->vport = vport;
7807 
7808 	if ((cmd & ELS_CMD_MASK) == ELS_CMD_RSCN) {
7809 		cmd &= ELS_CMD_MASK;
7810 	}
7811 	/* ELS command <elsCmd> received from NPORT <did> */
7812 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
7813 			 "0112 ELS command x%x received from NPORT x%x "
7814 			 "Data: x%x x%x x%x x%x\n",
7815 			cmd, did, vport->port_state, vport->fc_flag,
7816 			vport->fc_myDID, vport->fc_prevDID);
7817 
7818 	/* reject till our FLOGI completes */
7819 	if ((vport->port_state < LPFC_FABRIC_CFG_LINK) &&
7820 	    (cmd != ELS_CMD_FLOGI)) {
7821 		rjt_err = LSRJT_LOGICAL_BSY;
7822 		rjt_exp = LSEXP_NOTHING_MORE;
7823 		goto lsrjt;
7824 	}
7825 
7826 	switch (cmd) {
7827 	case ELS_CMD_PLOGI:
7828 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
7829 			"RCV PLOGI:       did:x%x/ste:x%x flg:x%x",
7830 			did, vport->port_state, ndlp->nlp_flag);
7831 
7832 		phba->fc_stat.elsRcvPLOGI++;
7833 		ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp);
7834 		if (phba->sli_rev == LPFC_SLI_REV4 &&
7835 		    (phba->pport->fc_flag & FC_PT2PT)) {
7836 			vport->fc_prevDID = vport->fc_myDID;
7837 			/* Our DID needs to be updated before registering
7838 			 * the vfi. This is done in lpfc_rcv_plogi but
7839 			 * that is called after the reg_vfi.
7840 			 */
7841 			vport->fc_myDID = elsiocb->iocb.un.rcvels.parmRo;
7842 			lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
7843 					 "3312 Remote port assigned DID x%x "
7844 					 "%x\n", vport->fc_myDID,
7845 					 vport->fc_prevDID);
7846 		}
7847 
7848 		lpfc_send_els_event(vport, ndlp, payload);
7849 
7850 		/* If Nport discovery is delayed, reject PLOGIs */
7851 		if (vport->fc_flag & FC_DISC_DELAYED) {
7852 			rjt_err = LSRJT_UNABLE_TPC;
7853 			rjt_exp = LSEXP_NOTHING_MORE;
7854 			break;
7855 		}
7856 
7857 		if (vport->port_state < LPFC_DISC_AUTH) {
7858 			if (!(phba->pport->fc_flag & FC_PT2PT) ||
7859 				(phba->pport->fc_flag & FC_PT2PT_PLOGI)) {
7860 				rjt_err = LSRJT_UNABLE_TPC;
7861 				rjt_exp = LSEXP_NOTHING_MORE;
7862 				break;
7863 			}
7864 		}
7865 
7866 		spin_lock_irq(shost->host_lock);
7867 		ndlp->nlp_flag &= ~NLP_TARGET_REMOVE;
7868 		spin_unlock_irq(shost->host_lock);
7869 
7870 		lpfc_disc_state_machine(vport, ndlp, elsiocb,
7871 					NLP_EVT_RCV_PLOGI);
7872 
7873 		break;
7874 	case ELS_CMD_FLOGI:
7875 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
7876 			"RCV FLOGI:       did:x%x/ste:x%x flg:x%x",
7877 			did, vport->port_state, ndlp->nlp_flag);
7878 
7879 		phba->fc_stat.elsRcvFLOGI++;
7880 		lpfc_els_rcv_flogi(vport, elsiocb, ndlp);
7881 		if (newnode)
7882 			lpfc_nlp_put(ndlp);
7883 		break;
7884 	case ELS_CMD_LOGO:
7885 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
7886 			"RCV LOGO:        did:x%x/ste:x%x flg:x%x",
7887 			did, vport->port_state, ndlp->nlp_flag);
7888 
7889 		phba->fc_stat.elsRcvLOGO++;
7890 		lpfc_send_els_event(vport, ndlp, payload);
7891 		if (vport->port_state < LPFC_DISC_AUTH) {
7892 			rjt_err = LSRJT_UNABLE_TPC;
7893 			rjt_exp = LSEXP_NOTHING_MORE;
7894 			break;
7895 		}
7896 		lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_LOGO);
7897 		break;
7898 	case ELS_CMD_PRLO:
7899 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
7900 			"RCV PRLO:        did:x%x/ste:x%x flg:x%x",
7901 			did, vport->port_state, ndlp->nlp_flag);
7902 
7903 		phba->fc_stat.elsRcvPRLO++;
7904 		lpfc_send_els_event(vport, ndlp, payload);
7905 		if (vport->port_state < LPFC_DISC_AUTH) {
7906 			rjt_err = LSRJT_UNABLE_TPC;
7907 			rjt_exp = LSEXP_NOTHING_MORE;
7908 			break;
7909 		}
7910 		lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLO);
7911 		break;
7912 	case ELS_CMD_LCB:
7913 		phba->fc_stat.elsRcvLCB++;
7914 		lpfc_els_rcv_lcb(vport, elsiocb, ndlp);
7915 		break;
7916 	case ELS_CMD_RDP:
7917 		phba->fc_stat.elsRcvRDP++;
7918 		lpfc_els_rcv_rdp(vport, elsiocb, ndlp);
7919 		break;
7920 	case ELS_CMD_RSCN:
7921 		phba->fc_stat.elsRcvRSCN++;
7922 		lpfc_els_rcv_rscn(vport, elsiocb, ndlp);
7923 		if (newnode)
7924 			lpfc_nlp_put(ndlp);
7925 		break;
7926 	case ELS_CMD_ADISC:
7927 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
7928 			"RCV ADISC:       did:x%x/ste:x%x flg:x%x",
7929 			did, vport->port_state, ndlp->nlp_flag);
7930 
7931 		lpfc_send_els_event(vport, ndlp, payload);
7932 		phba->fc_stat.elsRcvADISC++;
7933 		if (vport->port_state < LPFC_DISC_AUTH) {
7934 			rjt_err = LSRJT_UNABLE_TPC;
7935 			rjt_exp = LSEXP_NOTHING_MORE;
7936 			break;
7937 		}
7938 		lpfc_disc_state_machine(vport, ndlp, elsiocb,
7939 					NLP_EVT_RCV_ADISC);
7940 		break;
7941 	case ELS_CMD_PDISC:
7942 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
7943 			"RCV PDISC:       did:x%x/ste:x%x flg:x%x",
7944 			did, vport->port_state, ndlp->nlp_flag);
7945 
7946 		phba->fc_stat.elsRcvPDISC++;
7947 		if (vport->port_state < LPFC_DISC_AUTH) {
7948 			rjt_err = LSRJT_UNABLE_TPC;
7949 			rjt_exp = LSEXP_NOTHING_MORE;
7950 			break;
7951 		}
7952 		lpfc_disc_state_machine(vport, ndlp, elsiocb,
7953 					NLP_EVT_RCV_PDISC);
7954 		break;
7955 	case ELS_CMD_FARPR:
7956 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
7957 			"RCV FARPR:       did:x%x/ste:x%x flg:x%x",
7958 			did, vport->port_state, ndlp->nlp_flag);
7959 
7960 		phba->fc_stat.elsRcvFARPR++;
7961 		lpfc_els_rcv_farpr(vport, elsiocb, ndlp);
7962 		break;
7963 	case ELS_CMD_FARP:
7964 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
7965 			"RCV FARP:        did:x%x/ste:x%x flg:x%x",
7966 			did, vport->port_state, ndlp->nlp_flag);
7967 
7968 		phba->fc_stat.elsRcvFARP++;
7969 		lpfc_els_rcv_farp(vport, elsiocb, ndlp);
7970 		break;
7971 	case ELS_CMD_FAN:
7972 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
7973 			"RCV FAN:         did:x%x/ste:x%x flg:x%x",
7974 			did, vport->port_state, ndlp->nlp_flag);
7975 
7976 		phba->fc_stat.elsRcvFAN++;
7977 		lpfc_els_rcv_fan(vport, elsiocb, ndlp);
7978 		break;
7979 	case ELS_CMD_PRLI:
7980 	case ELS_CMD_NVMEPRLI:
7981 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
7982 			"RCV PRLI:        did:x%x/ste:x%x flg:x%x",
7983 			did, vport->port_state, ndlp->nlp_flag);
7984 
7985 		phba->fc_stat.elsRcvPRLI++;
7986 		if ((vport->port_state < LPFC_DISC_AUTH) &&
7987 		    (vport->fc_flag & FC_FABRIC)) {
7988 			rjt_err = LSRJT_UNABLE_TPC;
7989 			rjt_exp = LSEXP_NOTHING_MORE;
7990 			break;
7991 		}
7992 		lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLI);
7993 		break;
7994 	case ELS_CMD_LIRR:
7995 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
7996 			"RCV LIRR:        did:x%x/ste:x%x flg:x%x",
7997 			did, vport->port_state, ndlp->nlp_flag);
7998 
7999 		phba->fc_stat.elsRcvLIRR++;
8000 		lpfc_els_rcv_lirr(vport, elsiocb, ndlp);
8001 		if (newnode)
8002 			lpfc_nlp_put(ndlp);
8003 		break;
8004 	case ELS_CMD_RLS:
8005 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
8006 			"RCV RLS:         did:x%x/ste:x%x flg:x%x",
8007 			did, vport->port_state, ndlp->nlp_flag);
8008 
8009 		phba->fc_stat.elsRcvRLS++;
8010 		lpfc_els_rcv_rls(vport, elsiocb, ndlp);
8011 		if (newnode)
8012 			lpfc_nlp_put(ndlp);
8013 		break;
8014 	case ELS_CMD_RPS:
8015 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
8016 			"RCV RPS:         did:x%x/ste:x%x flg:x%x",
8017 			did, vport->port_state, ndlp->nlp_flag);
8018 
8019 		phba->fc_stat.elsRcvRPS++;
8020 		lpfc_els_rcv_rps(vport, elsiocb, ndlp);
8021 		if (newnode)
8022 			lpfc_nlp_put(ndlp);
8023 		break;
8024 	case ELS_CMD_RPL:
8025 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
8026 			"RCV RPL:         did:x%x/ste:x%x flg:x%x",
8027 			did, vport->port_state, ndlp->nlp_flag);
8028 
8029 		phba->fc_stat.elsRcvRPL++;
8030 		lpfc_els_rcv_rpl(vport, elsiocb, ndlp);
8031 		if (newnode)
8032 			lpfc_nlp_put(ndlp);
8033 		break;
8034 	case ELS_CMD_RNID:
8035 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
8036 			"RCV RNID:        did:x%x/ste:x%x flg:x%x",
8037 			did, vport->port_state, ndlp->nlp_flag);
8038 
8039 		phba->fc_stat.elsRcvRNID++;
8040 		lpfc_els_rcv_rnid(vport, elsiocb, ndlp);
8041 		if (newnode)
8042 			lpfc_nlp_put(ndlp);
8043 		break;
8044 	case ELS_CMD_RTV:
8045 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
8046 			"RCV RTV:        did:x%x/ste:x%x flg:x%x",
8047 			did, vport->port_state, ndlp->nlp_flag);
8048 		phba->fc_stat.elsRcvRTV++;
8049 		lpfc_els_rcv_rtv(vport, elsiocb, ndlp);
8050 		if (newnode)
8051 			lpfc_nlp_put(ndlp);
8052 		break;
8053 	case ELS_CMD_RRQ:
8054 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
8055 			"RCV RRQ:         did:x%x/ste:x%x flg:x%x",
8056 			did, vport->port_state, ndlp->nlp_flag);
8057 
8058 		phba->fc_stat.elsRcvRRQ++;
8059 		lpfc_els_rcv_rrq(vport, elsiocb, ndlp);
8060 		if (newnode)
8061 			lpfc_nlp_put(ndlp);
8062 		break;
8063 	case ELS_CMD_ECHO:
8064 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
8065 			"RCV ECHO:        did:x%x/ste:x%x flg:x%x",
8066 			did, vport->port_state, ndlp->nlp_flag);
8067 
8068 		phba->fc_stat.elsRcvECHO++;
8069 		lpfc_els_rcv_echo(vport, elsiocb, ndlp);
8070 		if (newnode)
8071 			lpfc_nlp_put(ndlp);
8072 		break;
8073 	case ELS_CMD_REC:
8074 			/* receive this due to exchange closed */
8075 			rjt_err = LSRJT_UNABLE_TPC;
8076 			rjt_exp = LSEXP_INVALID_OX_RX;
8077 		break;
8078 	default:
8079 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
8080 			"RCV ELS cmd:     cmd:x%x did:x%x/ste:x%x",
8081 			cmd, did, vport->port_state);
8082 
8083 		/* Unsupported ELS command, reject */
8084 		rjt_err = LSRJT_CMD_UNSUPPORTED;
8085 		rjt_exp = LSEXP_NOTHING_MORE;
8086 
8087 		/* Unknown ELS command <elsCmd> received from NPORT <did> */
8088 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
8089 				 "0115 Unknown ELS command x%x "
8090 				 "received from NPORT x%x\n", cmd, did);
8091 		if (newnode)
8092 			lpfc_nlp_put(ndlp);
8093 		break;
8094 	}
8095 
8096 lsrjt:
8097 	/* check if need to LS_RJT received ELS cmd */
8098 	if (rjt_err) {
8099 		memset(&stat, 0, sizeof(stat));
8100 		stat.un.b.lsRjtRsnCode = rjt_err;
8101 		stat.un.b.lsRjtRsnCodeExp = rjt_exp;
8102 		lpfc_els_rsp_reject(vport, stat.un.lsRjtError, elsiocb, ndlp,
8103 			NULL);
8104 	}
8105 
8106 	lpfc_nlp_put(elsiocb->context1);
8107 	elsiocb->context1 = NULL;
8108 	return;
8109 
8110 dropit:
8111 	if (vport && !(vport->load_flag & FC_UNLOADING))
8112 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
8113 			"0111 Dropping received ELS cmd "
8114 			"Data: x%x x%x x%x\n",
8115 			icmd->ulpStatus, icmd->un.ulpWord[4], icmd->ulpTimeout);
8116 	phba->fc_stat.elsRcvDrop++;
8117 }
8118 
8119 /**
8120  * lpfc_els_unsol_event - Process an unsolicited event from an els sli ring
8121  * @phba: pointer to lpfc hba data structure.
8122  * @pring: pointer to a SLI ring.
8123  * @elsiocb: pointer to lpfc els iocb data structure.
8124  *
8125  * This routine is used to process an unsolicited event received from a SLI
8126  * (Service Level Interface) ring. The actual processing of the data buffer
8127  * associated with the unsolicited event is done by invoking the routine
8128  * lpfc_els_unsol_buffer() after properly set up the iocb buffer from the
8129  * SLI ring on which the unsolicited event was received.
8130  **/
8131 void
8132 lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
8133 		     struct lpfc_iocbq *elsiocb)
8134 {
8135 	struct lpfc_vport *vport = phba->pport;
8136 	IOCB_t *icmd = &elsiocb->iocb;
8137 	dma_addr_t paddr;
8138 	struct lpfc_dmabuf *bdeBuf1 = elsiocb->context2;
8139 	struct lpfc_dmabuf *bdeBuf2 = elsiocb->context3;
8140 
8141 	elsiocb->context1 = NULL;
8142 	elsiocb->context2 = NULL;
8143 	elsiocb->context3 = NULL;
8144 
8145 	if (icmd->ulpStatus == IOSTAT_NEED_BUFFER) {
8146 		lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
8147 	} else if (icmd->ulpStatus == IOSTAT_LOCAL_REJECT &&
8148 		   (icmd->un.ulpWord[4] & IOERR_PARAM_MASK) ==
8149 		   IOERR_RCV_BUFFER_WAITING) {
8150 		phba->fc_stat.NoRcvBuf++;
8151 		/* Not enough posted buffers; Try posting more buffers */
8152 		if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
8153 			lpfc_post_buffer(phba, pring, 0);
8154 		return;
8155 	}
8156 
8157 	if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
8158 	    (icmd->ulpCommand == CMD_IOCB_RCV_ELS64_CX ||
8159 	     icmd->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
8160 		if (icmd->unsli3.rcvsli3.vpi == 0xffff)
8161 			vport = phba->pport;
8162 		else
8163 			vport = lpfc_find_vport_by_vpid(phba,
8164 						icmd->unsli3.rcvsli3.vpi);
8165 	}
8166 
8167 	/* If there are no BDEs associated
8168 	 * with this IOCB, there is nothing to do.
8169 	 */
8170 	if (icmd->ulpBdeCount == 0)
8171 		return;
8172 
8173 	/* type of ELS cmd is first 32bit word
8174 	 * in packet
8175 	 */
8176 	if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
8177 		elsiocb->context2 = bdeBuf1;
8178 	} else {
8179 		paddr = getPaddr(icmd->un.cont64[0].addrHigh,
8180 				 icmd->un.cont64[0].addrLow);
8181 		elsiocb->context2 = lpfc_sli_ringpostbuf_get(phba, pring,
8182 							     paddr);
8183 	}
8184 
8185 	lpfc_els_unsol_buffer(phba, pring, vport, elsiocb);
8186 	/*
8187 	 * The different unsolicited event handlers would tell us
8188 	 * if they are done with "mp" by setting context2 to NULL.
8189 	 */
8190 	if (elsiocb->context2) {
8191 		lpfc_in_buf_free(phba, (struct lpfc_dmabuf *)elsiocb->context2);
8192 		elsiocb->context2 = NULL;
8193 	}
8194 
8195 	/* RCV_ELS64_CX provide for 2 BDEs - process 2nd if included */
8196 	if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) &&
8197 	    icmd->ulpBdeCount == 2) {
8198 		elsiocb->context2 = bdeBuf2;
8199 		lpfc_els_unsol_buffer(phba, pring, vport, elsiocb);
8200 		/* free mp if we are done with it */
8201 		if (elsiocb->context2) {
8202 			lpfc_in_buf_free(phba, elsiocb->context2);
8203 			elsiocb->context2 = NULL;
8204 		}
8205 	}
8206 }
8207 
8208 static void
8209 lpfc_start_fdmi(struct lpfc_vport *vport)
8210 {
8211 	struct lpfc_nodelist *ndlp;
8212 
8213 	/* If this is the first time, allocate an ndlp and initialize
8214 	 * it. Otherwise, make sure the node is enabled and then do the
8215 	 * login.
8216 	 */
8217 	ndlp = lpfc_findnode_did(vport, FDMI_DID);
8218 	if (!ndlp) {
8219 		ndlp = lpfc_nlp_init(vport, FDMI_DID);
8220 		if (ndlp) {
8221 			ndlp->nlp_type |= NLP_FABRIC;
8222 		} else {
8223 			return;
8224 		}
8225 	}
8226 	if (!NLP_CHK_NODE_ACT(ndlp))
8227 		ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_NPR_NODE);
8228 
8229 	if (ndlp) {
8230 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
8231 		lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
8232 	}
8233 }
8234 
8235 /**
8236  * lpfc_do_scr_ns_plogi - Issue a plogi to the name server for scr
8237  * @phba: pointer to lpfc hba data structure.
8238  * @vport: pointer to a virtual N_Port data structure.
8239  *
8240  * This routine issues a Port Login (PLOGI) to the Name Server with
8241  * State Change Request (SCR) for a @vport. This routine will create an
8242  * ndlp for the Name Server associated to the @vport if such node does
8243  * not already exist. The PLOGI to Name Server is issued by invoking the
8244  * lpfc_issue_els_plogi() routine. If Fabric-Device Management Interface
8245  * (FDMI) is configured to the @vport, a FDMI node will be created and
8246  * the PLOGI to FDMI is issued by invoking lpfc_issue_els_plogi() routine.
8247  **/
8248 void
8249 lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
8250 {
8251 	struct lpfc_nodelist *ndlp;
8252 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
8253 
8254 	/*
8255 	 * If lpfc_delay_discovery parameter is set and the clean address
8256 	 * bit is cleared and fc fabric parameters chenged, delay FC NPort
8257 	 * discovery.
8258 	 */
8259 	spin_lock_irq(shost->host_lock);
8260 	if (vport->fc_flag & FC_DISC_DELAYED) {
8261 		spin_unlock_irq(shost->host_lock);
8262 		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
8263 				"3334 Delay fc port discovery for %d seconds\n",
8264 				phba->fc_ratov);
8265 		mod_timer(&vport->delayed_disc_tmo,
8266 			jiffies + msecs_to_jiffies(1000 * phba->fc_ratov));
8267 		return;
8268 	}
8269 	spin_unlock_irq(shost->host_lock);
8270 
8271 	ndlp = lpfc_findnode_did(vport, NameServer_DID);
8272 	if (!ndlp) {
8273 		ndlp = lpfc_nlp_init(vport, NameServer_DID);
8274 		if (!ndlp) {
8275 			if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
8276 				lpfc_disc_start(vport);
8277 				return;
8278 			}
8279 			lpfc_vport_set_state(vport, FC_VPORT_FAILED);
8280 			lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
8281 					 "0251 NameServer login: no memory\n");
8282 			return;
8283 		}
8284 	} else if (!NLP_CHK_NODE_ACT(ndlp)) {
8285 		ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
8286 		if (!ndlp) {
8287 			if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
8288 				lpfc_disc_start(vport);
8289 				return;
8290 			}
8291 			lpfc_vport_set_state(vport, FC_VPORT_FAILED);
8292 			lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
8293 					"0348 NameServer login: node freed\n");
8294 			return;
8295 		}
8296 	}
8297 	ndlp->nlp_type |= NLP_FABRIC;
8298 
8299 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
8300 
8301 	if (lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0)) {
8302 		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
8303 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
8304 				 "0252 Cannot issue NameServer login\n");
8305 		return;
8306 	}
8307 
8308 	if ((phba->cfg_enable_SmartSAN ||
8309 	     (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) &&
8310 	     (vport->load_flag & FC_ALLOW_FDMI))
8311 		lpfc_start_fdmi(vport);
8312 }
8313 
8314 /**
8315  * lpfc_cmpl_reg_new_vport - Completion callback function to register new vport
8316  * @phba: pointer to lpfc hba data structure.
8317  * @pmb: pointer to the driver internal queue element for mailbox command.
8318  *
8319  * This routine is the completion callback function to register new vport
8320  * mailbox command. If the new vport mailbox command completes successfully,
8321  * the fabric registration login shall be performed on physical port (the
8322  * new vport created is actually a physical port, with VPI 0) or the port
8323  * login to Name Server for State Change Request (SCR) will be performed
8324  * on virtual port (real virtual port, with VPI greater than 0).
8325  **/
8326 static void
8327 lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
8328 {
8329 	struct lpfc_vport *vport = pmb->vport;
8330 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
8331 	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
8332 	MAILBOX_t *mb = &pmb->u.mb;
8333 	int rc;
8334 
8335 	spin_lock_irq(shost->host_lock);
8336 	vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
8337 	spin_unlock_irq(shost->host_lock);
8338 
8339 	if (mb->mbxStatus) {
8340 		lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
8341 				"0915 Register VPI failed : Status: x%x"
8342 				" upd bit: x%x \n", mb->mbxStatus,
8343 				 mb->un.varRegVpi.upd);
8344 		if (phba->sli_rev == LPFC_SLI_REV4 &&
8345 			mb->un.varRegVpi.upd)
8346 			goto mbox_err_exit ;
8347 
8348 		switch (mb->mbxStatus) {
8349 		case 0x11:	/* unsupported feature */
8350 		case 0x9603:	/* max_vpi exceeded */
8351 		case 0x9602:	/* Link event since CLEAR_LA */
8352 			/* giving up on vport registration */
8353 			lpfc_vport_set_state(vport, FC_VPORT_FAILED);
8354 			spin_lock_irq(shost->host_lock);
8355 			vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
8356 			spin_unlock_irq(shost->host_lock);
8357 			lpfc_can_disctmo(vport);
8358 			break;
8359 		/* If reg_vpi fail with invalid VPI status, re-init VPI */
8360 		case 0x20:
8361 			spin_lock_irq(shost->host_lock);
8362 			vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
8363 			spin_unlock_irq(shost->host_lock);
8364 			lpfc_init_vpi(phba, pmb, vport->vpi);
8365 			pmb->vport = vport;
8366 			pmb->mbox_cmpl = lpfc_init_vpi_cmpl;
8367 			rc = lpfc_sli_issue_mbox(phba, pmb,
8368 				MBX_NOWAIT);
8369 			if (rc == MBX_NOT_FINISHED) {
8370 				lpfc_printf_vlog(vport,
8371 					KERN_ERR, LOG_MBOX,
8372 					"2732 Failed to issue INIT_VPI"
8373 					" mailbox command\n");
8374 			} else {
8375 				lpfc_nlp_put(ndlp);
8376 				return;
8377 			}
8378 
8379 		default:
8380 			/* Try to recover from this error */
8381 			if (phba->sli_rev == LPFC_SLI_REV4)
8382 				lpfc_sli4_unreg_all_rpis(vport);
8383 			lpfc_mbx_unreg_vpi(vport);
8384 			spin_lock_irq(shost->host_lock);
8385 			vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
8386 			spin_unlock_irq(shost->host_lock);
8387 			if (mb->mbxStatus == MBX_NOT_FINISHED)
8388 				break;
8389 			if ((vport->port_type == LPFC_PHYSICAL_PORT) &&
8390 			    !(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG)) {
8391 				if (phba->sli_rev == LPFC_SLI_REV4)
8392 					lpfc_issue_init_vfi(vport);
8393 				else
8394 					lpfc_initial_flogi(vport);
8395 			} else {
8396 				lpfc_initial_fdisc(vport);
8397 			}
8398 			break;
8399 		}
8400 	} else {
8401 		spin_lock_irq(shost->host_lock);
8402 		vport->vpi_state |= LPFC_VPI_REGISTERED;
8403 		spin_unlock_irq(shost->host_lock);
8404 		if (vport == phba->pport) {
8405 			if (phba->sli_rev < LPFC_SLI_REV4)
8406 				lpfc_issue_fabric_reglogin(vport);
8407 			else {
8408 				/*
8409 				 * If the physical port is instantiated using
8410 				 * FDISC, do not start vport discovery.
8411 				 */
8412 				if (vport->port_state != LPFC_FDISC)
8413 					lpfc_start_fdiscs(phba);
8414 				lpfc_do_scr_ns_plogi(phba, vport);
8415 			}
8416 		} else
8417 			lpfc_do_scr_ns_plogi(phba, vport);
8418 	}
8419 mbox_err_exit:
8420 	/* Now, we decrement the ndlp reference count held for this
8421 	 * callback function
8422 	 */
8423 	lpfc_nlp_put(ndlp);
8424 
8425 	mempool_free(pmb, phba->mbox_mem_pool);
8426 	return;
8427 }
8428 
8429 /**
8430  * lpfc_register_new_vport - Register a new vport with a HBA
8431  * @phba: pointer to lpfc hba data structure.
8432  * @vport: pointer to a host virtual N_Port data structure.
8433  * @ndlp: pointer to a node-list data structure.
8434  *
8435  * This routine registers the @vport as a new virtual port with a HBA.
8436  * It is done through a registering vpi mailbox command.
8437  **/
8438 void
8439 lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport,
8440 			struct lpfc_nodelist *ndlp)
8441 {
8442 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
8443 	LPFC_MBOXQ_t *mbox;
8444 
8445 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
8446 	if (mbox) {
8447 		lpfc_reg_vpi(vport, mbox);
8448 		mbox->vport = vport;
8449 		mbox->context2 = lpfc_nlp_get(ndlp);
8450 		mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport;
8451 		if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
8452 		    == MBX_NOT_FINISHED) {
8453 			/* mailbox command not success, decrement ndlp
8454 			 * reference count for this command
8455 			 */
8456 			lpfc_nlp_put(ndlp);
8457 			mempool_free(mbox, phba->mbox_mem_pool);
8458 
8459 			lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
8460 				"0253 Register VPI: Can't send mbox\n");
8461 			goto mbox_err_exit;
8462 		}
8463 	} else {
8464 		lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
8465 				 "0254 Register VPI: no memory\n");
8466 		goto mbox_err_exit;
8467 	}
8468 	return;
8469 
8470 mbox_err_exit:
8471 	lpfc_vport_set_state(vport, FC_VPORT_FAILED);
8472 	spin_lock_irq(shost->host_lock);
8473 	vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
8474 	spin_unlock_irq(shost->host_lock);
8475 	return;
8476 }
8477 
8478 /**
8479  * lpfc_cancel_all_vport_retry_delay_timer - Cancel all vport retry delay timer
8480  * @phba: pointer to lpfc hba data structure.
8481  *
8482  * This routine cancels the retry delay timers to all the vports.
8483  **/
8484 void
8485 lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *phba)
8486 {
8487 	struct lpfc_vport **vports;
8488 	struct lpfc_nodelist *ndlp;
8489 	uint32_t link_state;
8490 	int i;
8491 
8492 	/* Treat this failure as linkdown for all vports */
8493 	link_state = phba->link_state;
8494 	lpfc_linkdown(phba);
8495 	phba->link_state = link_state;
8496 
8497 	vports = lpfc_create_vport_work_array(phba);
8498 
8499 	if (vports) {
8500 		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
8501 			ndlp = lpfc_findnode_did(vports[i], Fabric_DID);
8502 			if (ndlp)
8503 				lpfc_cancel_retry_delay_tmo(vports[i], ndlp);
8504 			lpfc_els_flush_cmd(vports[i]);
8505 		}
8506 		lpfc_destroy_vport_work_array(phba, vports);
8507 	}
8508 }
8509 
8510 /**
8511  * lpfc_retry_pport_discovery - Start timer to retry FLOGI.
8512  * @phba: pointer to lpfc hba data structure.
8513  *
8514  * This routine abort all pending discovery commands and
8515  * start a timer to retry FLOGI for the physical port
8516  * discovery.
8517  **/
8518 void
8519 lpfc_retry_pport_discovery(struct lpfc_hba *phba)
8520 {
8521 	struct lpfc_nodelist *ndlp;
8522 	struct Scsi_Host  *shost;
8523 
8524 	/* Cancel the all vports retry delay retry timers */
8525 	lpfc_cancel_all_vport_retry_delay_timer(phba);
8526 
8527 	/* If fabric require FLOGI, then re-instantiate physical login */
8528 	ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
8529 	if (!ndlp)
8530 		return;
8531 
8532 	shost = lpfc_shost_from_vport(phba->pport);
8533 	mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000));
8534 	spin_lock_irq(shost->host_lock);
8535 	ndlp->nlp_flag |= NLP_DELAY_TMO;
8536 	spin_unlock_irq(shost->host_lock);
8537 	ndlp->nlp_last_elscmd = ELS_CMD_FLOGI;
8538 	phba->pport->port_state = LPFC_FLOGI;
8539 	return;
8540 }
8541 
8542 /**
8543  * lpfc_fabric_login_reqd - Check if FLOGI required.
8544  * @phba: pointer to lpfc hba data structure.
8545  * @cmdiocb: pointer to FDISC command iocb.
8546  * @rspiocb: pointer to FDISC response iocb.
8547  *
8548  * This routine checks if a FLOGI is reguired for FDISC
8549  * to succeed.
8550  **/
8551 static int
8552 lpfc_fabric_login_reqd(struct lpfc_hba *phba,
8553 		struct lpfc_iocbq *cmdiocb,
8554 		struct lpfc_iocbq *rspiocb)
8555 {
8556 
8557 	if ((rspiocb->iocb.ulpStatus != IOSTAT_FABRIC_RJT) ||
8558 		(rspiocb->iocb.un.ulpWord[4] != RJT_LOGIN_REQUIRED))
8559 		return 0;
8560 	else
8561 		return 1;
8562 }
8563 
8564 /**
8565  * lpfc_cmpl_els_fdisc - Completion function for fdisc iocb command
8566  * @phba: pointer to lpfc hba data structure.
8567  * @cmdiocb: pointer to lpfc command iocb data structure.
8568  * @rspiocb: pointer to lpfc response iocb data structure.
8569  *
8570  * This routine is the completion callback function to a Fabric Discover
8571  * (FDISC) ELS command. Since all the FDISC ELS commands are issued
8572  * single threaded, each FDISC completion callback function will reset
8573  * the discovery timer for all vports such that the timers will not get
8574  * unnecessary timeout. The function checks the FDISC IOCB status. If error
8575  * detected, the vport will be set to FC_VPORT_FAILED state. Otherwise,the
8576  * vport will set to FC_VPORT_ACTIVE state. It then checks whether the DID
8577  * assigned to the vport has been changed with the completion of the FDISC
8578  * command. If so, both RPI (Remote Port Index) and VPI (Virtual Port Index)
8579  * are unregistered from the HBA, and then the lpfc_register_new_vport()
8580  * routine is invoked to register new vport with the HBA. Otherwise, the
8581  * lpfc_do_scr_ns_plogi() routine is invoked to issue a PLOGI to the Name
8582  * Server for State Change Request (SCR).
8583  **/
8584 static void
8585 lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
8586 		    struct lpfc_iocbq *rspiocb)
8587 {
8588 	struct lpfc_vport *vport = cmdiocb->vport;
8589 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
8590 	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
8591 	struct lpfc_nodelist *np;
8592 	struct lpfc_nodelist *next_np;
8593 	IOCB_t *irsp = &rspiocb->iocb;
8594 	struct lpfc_iocbq *piocb;
8595 	struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp;
8596 	struct serv_parm *sp;
8597 	uint8_t fabric_param_changed;
8598 
8599 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
8600 			 "0123 FDISC completes. x%x/x%x prevDID: x%x\n",
8601 			 irsp->ulpStatus, irsp->un.ulpWord[4],
8602 			 vport->fc_prevDID);
8603 	/* Since all FDISCs are being single threaded, we
8604 	 * must reset the discovery timer for ALL vports
8605 	 * waiting to send FDISC when one completes.
8606 	 */
8607 	list_for_each_entry(piocb, &phba->fabric_iocb_list, list) {
8608 		lpfc_set_disctmo(piocb->vport);
8609 	}
8610 
8611 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
8612 		"FDISC cmpl:      status:x%x/x%x prevdid:x%x",
8613 		irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_prevDID);
8614 
8615 	if (irsp->ulpStatus) {
8616 
8617 		if (lpfc_fabric_login_reqd(phba, cmdiocb, rspiocb)) {
8618 			lpfc_retry_pport_discovery(phba);
8619 			goto out;
8620 		}
8621 
8622 		/* Check for retry */
8623 		if (lpfc_els_retry(phba, cmdiocb, rspiocb))
8624 			goto out;
8625 		/* FDISC failed */
8626 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
8627 				 "0126 FDISC failed. (x%x/x%x)\n",
8628 				 irsp->ulpStatus, irsp->un.ulpWord[4]);
8629 		goto fdisc_failed;
8630 	}
8631 	spin_lock_irq(shost->host_lock);
8632 	vport->fc_flag &= ~FC_VPORT_CVL_RCVD;
8633 	vport->fc_flag &= ~FC_VPORT_LOGO_RCVD;
8634 	vport->fc_flag |= FC_FABRIC;
8635 	if (vport->phba->fc_topology == LPFC_TOPOLOGY_LOOP)
8636 		vport->fc_flag |=  FC_PUBLIC_LOOP;
8637 	spin_unlock_irq(shost->host_lock);
8638 
8639 	vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
8640 	lpfc_vport_set_state(vport, FC_VPORT_ACTIVE);
8641 	prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
8642 	if (!prsp)
8643 		goto out;
8644 	sp = prsp->virt + sizeof(uint32_t);
8645 	fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp);
8646 	memcpy(&vport->fabric_portname, &sp->portName,
8647 		sizeof(struct lpfc_name));
8648 	memcpy(&vport->fabric_nodename, &sp->nodeName,
8649 		sizeof(struct lpfc_name));
8650 	if (fabric_param_changed &&
8651 		!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
8652 		/* If our NportID changed, we need to ensure all
8653 		 * remaining NPORTs get unreg_login'ed so we can
8654 		 * issue unreg_vpi.
8655 		 */
8656 		list_for_each_entry_safe(np, next_np,
8657 			&vport->fc_nodes, nlp_listp) {
8658 			if (!NLP_CHK_NODE_ACT(ndlp) ||
8659 			    (np->nlp_state != NLP_STE_NPR_NODE) ||
8660 			    !(np->nlp_flag & NLP_NPR_ADISC))
8661 				continue;
8662 			spin_lock_irq(shost->host_lock);
8663 			np->nlp_flag &= ~NLP_NPR_ADISC;
8664 			spin_unlock_irq(shost->host_lock);
8665 			lpfc_unreg_rpi(vport, np);
8666 		}
8667 		lpfc_cleanup_pending_mbox(vport);
8668 
8669 		if (phba->sli_rev == LPFC_SLI_REV4)
8670 			lpfc_sli4_unreg_all_rpis(vport);
8671 
8672 		lpfc_mbx_unreg_vpi(vport);
8673 		spin_lock_irq(shost->host_lock);
8674 		vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
8675 		if (phba->sli_rev == LPFC_SLI_REV4)
8676 			vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
8677 		else
8678 			vport->fc_flag |= FC_LOGO_RCVD_DID_CHNG;
8679 		spin_unlock_irq(shost->host_lock);
8680 	} else if ((phba->sli_rev == LPFC_SLI_REV4) &&
8681 		!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
8682 		/*
8683 		 * Driver needs to re-reg VPI in order for f/w
8684 		 * to update the MAC address.
8685 		 */
8686 		lpfc_register_new_vport(phba, vport, ndlp);
8687 		goto out;
8688 	}
8689 
8690 	if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI)
8691 		lpfc_issue_init_vpi(vport);
8692 	else if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
8693 		lpfc_register_new_vport(phba, vport, ndlp);
8694 	else
8695 		lpfc_do_scr_ns_plogi(phba, vport);
8696 	goto out;
8697 fdisc_failed:
8698 	if (vport->fc_vport &&
8699 	    (vport->fc_vport->vport_state != FC_VPORT_NO_FABRIC_RSCS))
8700 		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
8701 	/* Cancel discovery timer */
8702 	lpfc_can_disctmo(vport);
8703 	lpfc_nlp_put(ndlp);
8704 out:
8705 	lpfc_els_free_iocb(phba, cmdiocb);
8706 }
8707 
8708 /**
8709  * lpfc_issue_els_fdisc - Issue a fdisc iocb command
8710  * @vport: pointer to a virtual N_Port data structure.
8711  * @ndlp: pointer to a node-list data structure.
8712  * @retry: number of retries to the command IOCB.
8713  *
8714  * This routine prepares and issues a Fabric Discover (FDISC) IOCB to
8715  * a remote node (@ndlp) off a @vport. It uses the lpfc_issue_fabric_iocb()
8716  * routine to issue the IOCB, which makes sure only one outstanding fabric
8717  * IOCB will be sent off HBA at any given time.
8718  *
8719  * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
8720  * will be incremented by 1 for holding the ndlp and the reference to ndlp
8721  * will be stored into the context1 field of the IOCB for the completion
8722  * callback function to the FDISC ELS command.
8723  *
8724  * Return code
8725  *   0 - Successfully issued fdisc iocb command
8726  *   1 - Failed to issue fdisc iocb command
8727  **/
8728 static int
8729 lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
8730 		     uint8_t retry)
8731 {
8732 	struct lpfc_hba *phba = vport->phba;
8733 	IOCB_t *icmd;
8734 	struct lpfc_iocbq *elsiocb;
8735 	struct serv_parm *sp;
8736 	uint8_t *pcmd;
8737 	uint16_t cmdsize;
8738 	int did = ndlp->nlp_DID;
8739 	int rc;
8740 
8741 	vport->port_state = LPFC_FDISC;
8742 	vport->fc_myDID = 0;
8743 	cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
8744 	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did,
8745 				     ELS_CMD_FDISC);
8746 	if (!elsiocb) {
8747 		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
8748 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
8749 				 "0255 Issue FDISC: no IOCB\n");
8750 		return 1;
8751 	}
8752 
8753 	icmd = &elsiocb->iocb;
8754 	icmd->un.elsreq64.myID = 0;
8755 	icmd->un.elsreq64.fl = 1;
8756 
8757 	/*
8758 	 * SLI3 ports require a different context type value than SLI4.
8759 	 * Catch SLI3 ports here and override the prep.
8760 	 */
8761 	if (phba->sli_rev == LPFC_SLI_REV3) {
8762 		icmd->ulpCt_h = 1;
8763 		icmd->ulpCt_l = 0;
8764 	}
8765 
8766 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
8767 	*((uint32_t *) (pcmd)) = ELS_CMD_FDISC;
8768 	pcmd += sizeof(uint32_t); /* CSP Word 1 */
8769 	memcpy(pcmd, &vport->phba->pport->fc_sparam, sizeof(struct serv_parm));
8770 	sp = (struct serv_parm *) pcmd;
8771 	/* Setup CSPs accordingly for Fabric */
8772 	sp->cmn.e_d_tov = 0;
8773 	sp->cmn.w2.r_a_tov = 0;
8774 	sp->cmn.virtual_fabric_support = 0;
8775 	sp->cls1.classValid = 0;
8776 	sp->cls2.seqDelivery = 1;
8777 	sp->cls3.seqDelivery = 1;
8778 
8779 	pcmd += sizeof(uint32_t); /* CSP Word 2 */
8780 	pcmd += sizeof(uint32_t); /* CSP Word 3 */
8781 	pcmd += sizeof(uint32_t); /* CSP Word 4 */
8782 	pcmd += sizeof(uint32_t); /* Port Name */
8783 	memcpy(pcmd, &vport->fc_portname, 8);
8784 	pcmd += sizeof(uint32_t); /* Node Name */
8785 	pcmd += sizeof(uint32_t); /* Node Name */
8786 	memcpy(pcmd, &vport->fc_nodename, 8);
8787 	memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion));
8788 	lpfc_set_disctmo(vport);
8789 
8790 	phba->fc_stat.elsXmitFDISC++;
8791 	elsiocb->iocb_cmpl = lpfc_cmpl_els_fdisc;
8792 
8793 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
8794 		"Issue FDISC:     did:x%x",
8795 		did, 0, 0);
8796 
8797 	rc = lpfc_issue_fabric_iocb(phba, elsiocb);
8798 	if (rc == IOCB_ERROR) {
8799 		lpfc_els_free_iocb(phba, elsiocb);
8800 		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
8801 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
8802 				 "0256 Issue FDISC: Cannot send IOCB\n");
8803 		return 1;
8804 	}
8805 	lpfc_vport_set_state(vport, FC_VPORT_INITIALIZING);
8806 	return 0;
8807 }
8808 
8809 /**
8810  * lpfc_cmpl_els_npiv_logo - Completion function with vport logo
8811  * @phba: pointer to lpfc hba data structure.
8812  * @cmdiocb: pointer to lpfc command iocb data structure.
8813  * @rspiocb: pointer to lpfc response iocb data structure.
8814  *
8815  * This routine is the completion callback function to the issuing of a LOGO
8816  * ELS command off a vport. It frees the command IOCB and then decrement the
8817  * reference count held on ndlp for this completion function, indicating that
8818  * the reference to the ndlp is no long needed. Note that the
8819  * lpfc_els_free_iocb() routine decrements the ndlp reference held for this
8820  * callback function and an additional explicit ndlp reference decrementation
8821  * will trigger the actual release of the ndlp.
8822  **/
8823 static void
8824 lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
8825 			struct lpfc_iocbq *rspiocb)
8826 {
8827 	struct lpfc_vport *vport = cmdiocb->vport;
8828 	IOCB_t *irsp;
8829 	struct lpfc_nodelist *ndlp;
8830 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
8831 
8832 	ndlp = (struct lpfc_nodelist *)cmdiocb->context1;
8833 	irsp = &rspiocb->iocb;
8834 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
8835 		"LOGO npiv cmpl:  status:x%x/x%x did:x%x",
8836 		irsp->ulpStatus, irsp->un.ulpWord[4], irsp->un.rcvels.remoteID);
8837 
8838 	lpfc_els_free_iocb(phba, cmdiocb);
8839 	vport->unreg_vpi_cmpl = VPORT_ERROR;
8840 
8841 	/* Trigger the release of the ndlp after logo */
8842 	lpfc_nlp_put(ndlp);
8843 
8844 	/* NPIV LOGO completes to NPort <nlp_DID> */
8845 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
8846 			 "2928 NPIV LOGO completes to NPort x%x "
8847 			 "Data: x%x x%x x%x x%x\n",
8848 			 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
8849 			 irsp->ulpTimeout, vport->num_disc_nodes);
8850 
8851 	if (irsp->ulpStatus == IOSTAT_SUCCESS) {
8852 		spin_lock_irq(shost->host_lock);
8853 		vport->fc_flag &= ~FC_NDISC_ACTIVE;
8854 		vport->fc_flag &= ~FC_FABRIC;
8855 		spin_unlock_irq(shost->host_lock);
8856 		lpfc_can_disctmo(vport);
8857 	}
8858 }
8859 
8860 /**
8861  * lpfc_issue_els_npiv_logo - Issue a logo off a vport
8862  * @vport: pointer to a virtual N_Port data structure.
8863  * @ndlp: pointer to a node-list data structure.
8864  *
8865  * This routine issues a LOGO ELS command to an @ndlp off a @vport.
8866  *
8867  * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
8868  * will be incremented by 1 for holding the ndlp and the reference to ndlp
8869  * will be stored into the context1 field of the IOCB for the completion
8870  * callback function to the LOGO ELS command.
8871  *
8872  * Return codes
8873  *   0 - Successfully issued logo off the @vport
8874  *   1 - Failed to issue logo off the @vport
8875  **/
8876 int
8877 lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
8878 {
8879 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
8880 	struct lpfc_hba  *phba = vport->phba;
8881 	struct lpfc_iocbq *elsiocb;
8882 	uint8_t *pcmd;
8883 	uint16_t cmdsize;
8884 
8885 	cmdsize = 2 * sizeof(uint32_t) + sizeof(struct lpfc_name);
8886 	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, ndlp->nlp_DID,
8887 				     ELS_CMD_LOGO);
8888 	if (!elsiocb)
8889 		return 1;
8890 
8891 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
8892 	*((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
8893 	pcmd += sizeof(uint32_t);
8894 
8895 	/* Fill in LOGO payload */
8896 	*((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID);
8897 	pcmd += sizeof(uint32_t);
8898 	memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name));
8899 
8900 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
8901 		"Issue LOGO npiv  did:x%x flg:x%x",
8902 		ndlp->nlp_DID, ndlp->nlp_flag, 0);
8903 
8904 	elsiocb->iocb_cmpl = lpfc_cmpl_els_npiv_logo;
8905 	spin_lock_irq(shost->host_lock);
8906 	ndlp->nlp_flag |= NLP_LOGO_SND;
8907 	spin_unlock_irq(shost->host_lock);
8908 	if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
8909 	    IOCB_ERROR) {
8910 		spin_lock_irq(shost->host_lock);
8911 		ndlp->nlp_flag &= ~NLP_LOGO_SND;
8912 		spin_unlock_irq(shost->host_lock);
8913 		lpfc_els_free_iocb(phba, elsiocb);
8914 		return 1;
8915 	}
8916 	return 0;
8917 }
8918 
8919 /**
8920  * lpfc_fabric_block_timeout - Handler function to the fabric block timer
8921  * @ptr: holder for the timer function associated data.
8922  *
8923  * This routine is invoked by the fabric iocb block timer after
8924  * timeout. It posts the fabric iocb block timeout event by setting the
8925  * WORKER_FABRIC_BLOCK_TMO bit to work port event bitmap and then invokes
8926  * lpfc_worker_wake_up() routine to wake up the worker thread. It is for
8927  * the worker thread to invoke the lpfc_unblock_fabric_iocbs() on the
8928  * posted event WORKER_FABRIC_BLOCK_TMO.
8929  **/
8930 void
8931 lpfc_fabric_block_timeout(unsigned long ptr)
8932 {
8933 	struct lpfc_hba  *phba = (struct lpfc_hba *) ptr;
8934 	unsigned long iflags;
8935 	uint32_t tmo_posted;
8936 
8937 	spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
8938 	tmo_posted = phba->pport->work_port_events & WORKER_FABRIC_BLOCK_TMO;
8939 	if (!tmo_posted)
8940 		phba->pport->work_port_events |= WORKER_FABRIC_BLOCK_TMO;
8941 	spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
8942 
8943 	if (!tmo_posted)
8944 		lpfc_worker_wake_up(phba);
8945 	return;
8946 }
8947 
8948 /**
8949  * lpfc_resume_fabric_iocbs - Issue a fabric iocb from driver internal list
8950  * @phba: pointer to lpfc hba data structure.
8951  *
8952  * This routine issues one fabric iocb from the driver internal list to
8953  * the HBA. It first checks whether it's ready to issue one fabric iocb to
8954  * the HBA (whether there is no outstanding fabric iocb). If so, it shall
8955  * remove one pending fabric iocb from the driver internal list and invokes
8956  * lpfc_sli_issue_iocb() routine to send the fabric iocb to the HBA.
8957  **/
8958 static void
8959 lpfc_resume_fabric_iocbs(struct lpfc_hba *phba)
8960 {
8961 	struct lpfc_iocbq *iocb;
8962 	unsigned long iflags;
8963 	int ret;
8964 	IOCB_t *cmd;
8965 
8966 repeat:
8967 	iocb = NULL;
8968 	spin_lock_irqsave(&phba->hbalock, iflags);
8969 	/* Post any pending iocb to the SLI layer */
8970 	if (atomic_read(&phba->fabric_iocb_count) == 0) {
8971 		list_remove_head(&phba->fabric_iocb_list, iocb, typeof(*iocb),
8972 				 list);
8973 		if (iocb)
8974 			/* Increment fabric iocb count to hold the position */
8975 			atomic_inc(&phba->fabric_iocb_count);
8976 	}
8977 	spin_unlock_irqrestore(&phba->hbalock, iflags);
8978 	if (iocb) {
8979 		iocb->fabric_iocb_cmpl = iocb->iocb_cmpl;
8980 		iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb;
8981 		iocb->iocb_flag |= LPFC_IO_FABRIC;
8982 
8983 		lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD,
8984 			"Fabric sched1:   ste:x%x",
8985 			iocb->vport->port_state, 0, 0);
8986 
8987 		ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0);
8988 
8989 		if (ret == IOCB_ERROR) {
8990 			iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
8991 			iocb->fabric_iocb_cmpl = NULL;
8992 			iocb->iocb_flag &= ~LPFC_IO_FABRIC;
8993 			cmd = &iocb->iocb;
8994 			cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
8995 			cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
8996 			iocb->iocb_cmpl(phba, iocb, iocb);
8997 
8998 			atomic_dec(&phba->fabric_iocb_count);
8999 			goto repeat;
9000 		}
9001 	}
9002 
9003 	return;
9004 }
9005 
9006 /**
9007  * lpfc_unblock_fabric_iocbs - Unblock issuing fabric iocb command
9008  * @phba: pointer to lpfc hba data structure.
9009  *
9010  * This routine unblocks the  issuing fabric iocb command. The function
9011  * will clear the fabric iocb block bit and then invoke the routine
9012  * lpfc_resume_fabric_iocbs() to issue one of the pending fabric iocb
9013  * from the driver internal fabric iocb list.
9014  **/
9015 void
9016 lpfc_unblock_fabric_iocbs(struct lpfc_hba *phba)
9017 {
9018 	clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
9019 
9020 	lpfc_resume_fabric_iocbs(phba);
9021 	return;
9022 }
9023 
9024 /**
9025  * lpfc_block_fabric_iocbs - Block issuing fabric iocb command
9026  * @phba: pointer to lpfc hba data structure.
9027  *
9028  * This routine blocks the issuing fabric iocb for a specified amount of
9029  * time (currently 100 ms). This is done by set the fabric iocb block bit
9030  * and set up a timeout timer for 100ms. When the block bit is set, no more
9031  * fabric iocb will be issued out of the HBA.
9032  **/
9033 static void
9034 lpfc_block_fabric_iocbs(struct lpfc_hba *phba)
9035 {
9036 	int blocked;
9037 
9038 	blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
9039 	/* Start a timer to unblock fabric iocbs after 100ms */
9040 	if (!blocked)
9041 		mod_timer(&phba->fabric_block_timer,
9042 			  jiffies + msecs_to_jiffies(100));
9043 
9044 	return;
9045 }
9046 
9047 /**
9048  * lpfc_cmpl_fabric_iocb - Completion callback function for fabric iocb
9049  * @phba: pointer to lpfc hba data structure.
9050  * @cmdiocb: pointer to lpfc command iocb data structure.
9051  * @rspiocb: pointer to lpfc response iocb data structure.
9052  *
9053  * This routine is the callback function that is put to the fabric iocb's
9054  * callback function pointer (iocb->iocb_cmpl). The original iocb's callback
9055  * function pointer has been stored in iocb->fabric_iocb_cmpl. This callback
9056  * function first restores and invokes the original iocb's callback function
9057  * and then invokes the lpfc_resume_fabric_iocbs() routine to issue the next
9058  * fabric bound iocb from the driver internal fabric iocb list onto the wire.
9059  **/
9060 static void
9061 lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
9062 	struct lpfc_iocbq *rspiocb)
9063 {
9064 	struct ls_rjt stat;
9065 
9066 	BUG_ON((cmdiocb->iocb_flag & LPFC_IO_FABRIC) != LPFC_IO_FABRIC);
9067 
9068 	switch (rspiocb->iocb.ulpStatus) {
9069 		case IOSTAT_NPORT_RJT:
9070 		case IOSTAT_FABRIC_RJT:
9071 			if (rspiocb->iocb.un.ulpWord[4] & RJT_UNAVAIL_TEMP) {
9072 				lpfc_block_fabric_iocbs(phba);
9073 			}
9074 			break;
9075 
9076 		case IOSTAT_NPORT_BSY:
9077 		case IOSTAT_FABRIC_BSY:
9078 			lpfc_block_fabric_iocbs(phba);
9079 			break;
9080 
9081 		case IOSTAT_LS_RJT:
9082 			stat.un.lsRjtError =
9083 				be32_to_cpu(rspiocb->iocb.un.ulpWord[4]);
9084 			if ((stat.un.b.lsRjtRsnCode == LSRJT_UNABLE_TPC) ||
9085 				(stat.un.b.lsRjtRsnCode == LSRJT_LOGICAL_BSY))
9086 				lpfc_block_fabric_iocbs(phba);
9087 			break;
9088 	}
9089 
9090 	BUG_ON(atomic_read(&phba->fabric_iocb_count) == 0);
9091 
9092 	cmdiocb->iocb_cmpl = cmdiocb->fabric_iocb_cmpl;
9093 	cmdiocb->fabric_iocb_cmpl = NULL;
9094 	cmdiocb->iocb_flag &= ~LPFC_IO_FABRIC;
9095 	cmdiocb->iocb_cmpl(phba, cmdiocb, rspiocb);
9096 
9097 	atomic_dec(&phba->fabric_iocb_count);
9098 	if (!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags)) {
9099 		/* Post any pending iocbs to HBA */
9100 		lpfc_resume_fabric_iocbs(phba);
9101 	}
9102 }
9103 
9104 /**
9105  * lpfc_issue_fabric_iocb - Issue a fabric iocb command
9106  * @phba: pointer to lpfc hba data structure.
9107  * @iocb: pointer to lpfc command iocb data structure.
9108  *
9109  * This routine is used as the top-level API for issuing a fabric iocb command
9110  * such as FLOGI and FDISC. To accommodate certain switch fabric, this driver
9111  * function makes sure that only one fabric bound iocb will be outstanding at
9112  * any given time. As such, this function will first check to see whether there
9113  * is already an outstanding fabric iocb on the wire. If so, it will put the
9114  * newly issued iocb onto the driver internal fabric iocb list, waiting to be
9115  * issued later. Otherwise, it will issue the iocb on the wire and update the
9116  * fabric iocb count it indicate that there is one fabric iocb on the wire.
9117  *
9118  * Note, this implementation has a potential sending out fabric IOCBs out of
9119  * order. The problem is caused by the construction of the "ready" boolen does
9120  * not include the condition that the internal fabric IOCB list is empty. As
9121  * such, it is possible a fabric IOCB issued by this routine might be "jump"
9122  * ahead of the fabric IOCBs in the internal list.
9123  *
9124  * Return code
9125  *   IOCB_SUCCESS - either fabric iocb put on the list or issued successfully
9126  *   IOCB_ERROR - failed to issue fabric iocb
9127  **/
9128 static int
9129 lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb)
9130 {
9131 	unsigned long iflags;
9132 	int ready;
9133 	int ret;
9134 
9135 	BUG_ON(atomic_read(&phba->fabric_iocb_count) > 1);
9136 
9137 	spin_lock_irqsave(&phba->hbalock, iflags);
9138 	ready = atomic_read(&phba->fabric_iocb_count) == 0 &&
9139 		!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
9140 
9141 	if (ready)
9142 		/* Increment fabric iocb count to hold the position */
9143 		atomic_inc(&phba->fabric_iocb_count);
9144 	spin_unlock_irqrestore(&phba->hbalock, iflags);
9145 	if (ready) {
9146 		iocb->fabric_iocb_cmpl = iocb->iocb_cmpl;
9147 		iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb;
9148 		iocb->iocb_flag |= LPFC_IO_FABRIC;
9149 
9150 		lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD,
9151 			"Fabric sched2:   ste:x%x",
9152 			iocb->vport->port_state, 0, 0);
9153 
9154 		ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0);
9155 
9156 		if (ret == IOCB_ERROR) {
9157 			iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
9158 			iocb->fabric_iocb_cmpl = NULL;
9159 			iocb->iocb_flag &= ~LPFC_IO_FABRIC;
9160 			atomic_dec(&phba->fabric_iocb_count);
9161 		}
9162 	} else {
9163 		spin_lock_irqsave(&phba->hbalock, iflags);
9164 		list_add_tail(&iocb->list, &phba->fabric_iocb_list);
9165 		spin_unlock_irqrestore(&phba->hbalock, iflags);
9166 		ret = IOCB_SUCCESS;
9167 	}
9168 	return ret;
9169 }
9170 
9171 /**
9172  * lpfc_fabric_abort_vport - Abort a vport's iocbs from driver fabric iocb list
9173  * @vport: pointer to a virtual N_Port data structure.
9174  *
9175  * This routine aborts all the IOCBs associated with a @vport from the
9176  * driver internal fabric IOCB list. The list contains fabric IOCBs to be
9177  * issued to the ELS IOCB ring. This abort function walks the fabric IOCB
9178  * list, removes each IOCB associated with the @vport off the list, set the
9179  * status feild to IOSTAT_LOCAL_REJECT, and invokes the callback function
9180  * associated with the IOCB.
9181  **/
9182 static void lpfc_fabric_abort_vport(struct lpfc_vport *vport)
9183 {
9184 	LIST_HEAD(completions);
9185 	struct lpfc_hba  *phba = vport->phba;
9186 	struct lpfc_iocbq *tmp_iocb, *piocb;
9187 
9188 	spin_lock_irq(&phba->hbalock);
9189 	list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list,
9190 				 list) {
9191 
9192 		if (piocb->vport != vport)
9193 			continue;
9194 
9195 		list_move_tail(&piocb->list, &completions);
9196 	}
9197 	spin_unlock_irq(&phba->hbalock);
9198 
9199 	/* Cancel all the IOCBs from the completions list */
9200 	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
9201 			      IOERR_SLI_ABORTED);
9202 }
9203 
9204 /**
9205  * lpfc_fabric_abort_nport - Abort a ndlp's iocbs from driver fabric iocb list
9206  * @ndlp: pointer to a node-list data structure.
9207  *
9208  * This routine aborts all the IOCBs associated with an @ndlp from the
9209  * driver internal fabric IOCB list. The list contains fabric IOCBs to be
9210  * issued to the ELS IOCB ring. This abort function walks the fabric IOCB
9211  * list, removes each IOCB associated with the @ndlp off the list, set the
9212  * status feild to IOSTAT_LOCAL_REJECT, and invokes the callback function
9213  * associated with the IOCB.
9214  **/
9215 void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp)
9216 {
9217 	LIST_HEAD(completions);
9218 	struct lpfc_hba  *phba = ndlp->phba;
9219 	struct lpfc_iocbq *tmp_iocb, *piocb;
9220 	struct lpfc_sli_ring *pring;
9221 
9222 	pring = lpfc_phba_elsring(phba);
9223 
9224 	spin_lock_irq(&phba->hbalock);
9225 	list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list,
9226 				 list) {
9227 		if ((lpfc_check_sli_ndlp(phba, pring, piocb, ndlp))) {
9228 
9229 			list_move_tail(&piocb->list, &completions);
9230 		}
9231 	}
9232 	spin_unlock_irq(&phba->hbalock);
9233 
9234 	/* Cancel all the IOCBs from the completions list */
9235 	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
9236 			      IOERR_SLI_ABORTED);
9237 }
9238 
9239 /**
9240  * lpfc_fabric_abort_hba - Abort all iocbs on driver fabric iocb list
9241  * @phba: pointer to lpfc hba data structure.
9242  *
9243  * This routine aborts all the IOCBs currently on the driver internal
9244  * fabric IOCB list. The list contains fabric IOCBs to be issued to the ELS
9245  * IOCB ring. This function takes the entire IOCB list off the fabric IOCB
9246  * list, removes IOCBs off the list, set the status feild to
9247  * IOSTAT_LOCAL_REJECT, and invokes the callback function associated with
9248  * the IOCB.
9249  **/
9250 void lpfc_fabric_abort_hba(struct lpfc_hba *phba)
9251 {
9252 	LIST_HEAD(completions);
9253 
9254 	spin_lock_irq(&phba->hbalock);
9255 	list_splice_init(&phba->fabric_iocb_list, &completions);
9256 	spin_unlock_irq(&phba->hbalock);
9257 
9258 	/* Cancel all the IOCBs from the completions list */
9259 	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
9260 			      IOERR_SLI_ABORTED);
9261 }
9262 
9263 /**
9264  * lpfc_sli4_vport_delete_els_xri_aborted -Remove all ndlp references for vport
9265  * @vport: pointer to lpfc vport data structure.
9266  *
9267  * This routine is invoked by the vport cleanup for deletions and the cleanup
9268  * for an ndlp on removal.
9269  **/
9270 void
9271 lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *vport)
9272 {
9273 	struct lpfc_hba *phba = vport->phba;
9274 	struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
9275 	unsigned long iflag = 0;
9276 
9277 	spin_lock_irqsave(&phba->hbalock, iflag);
9278 	spin_lock(&phba->sli4_hba.sgl_list_lock);
9279 	list_for_each_entry_safe(sglq_entry, sglq_next,
9280 			&phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
9281 		if (sglq_entry->ndlp && sglq_entry->ndlp->vport == vport)
9282 			sglq_entry->ndlp = NULL;
9283 	}
9284 	spin_unlock(&phba->sli4_hba.sgl_list_lock);
9285 	spin_unlock_irqrestore(&phba->hbalock, iflag);
9286 	return;
9287 }
9288 
9289 /**
9290  * lpfc_sli4_els_xri_aborted - Slow-path process of els xri abort
9291  * @phba: pointer to lpfc hba data structure.
9292  * @axri: pointer to the els xri abort wcqe structure.
9293  *
9294  * This routine is invoked by the worker thread to process a SLI4 slow-path
9295  * ELS aborted xri.
9296  **/
9297 void
9298 lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
9299 			  struct sli4_wcqe_xri_aborted *axri)
9300 {
9301 	uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
9302 	uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
9303 	uint16_t lxri = 0;
9304 
9305 	struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
9306 	unsigned long iflag = 0;
9307 	struct lpfc_nodelist *ndlp;
9308 	struct lpfc_sli_ring *pring;
9309 
9310 	pring = lpfc_phba_elsring(phba);
9311 
9312 	spin_lock_irqsave(&phba->hbalock, iflag);
9313 	spin_lock(&phba->sli4_hba.sgl_list_lock);
9314 	list_for_each_entry_safe(sglq_entry, sglq_next,
9315 			&phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
9316 		if (sglq_entry->sli4_xritag == xri) {
9317 			list_del(&sglq_entry->list);
9318 			ndlp = sglq_entry->ndlp;
9319 			sglq_entry->ndlp = NULL;
9320 			list_add_tail(&sglq_entry->list,
9321 				&phba->sli4_hba.lpfc_els_sgl_list);
9322 			sglq_entry->state = SGL_FREED;
9323 			spin_unlock(&phba->sli4_hba.sgl_list_lock);
9324 			spin_unlock_irqrestore(&phba->hbalock, iflag);
9325 			lpfc_set_rrq_active(phba, ndlp,
9326 				sglq_entry->sli4_lxritag,
9327 				rxid, 1);
9328 
9329 			/* Check if TXQ queue needs to be serviced */
9330 			if (!(list_empty(&pring->txq)))
9331 				lpfc_worker_wake_up(phba);
9332 			return;
9333 		}
9334 	}
9335 	spin_unlock(&phba->sli4_hba.sgl_list_lock);
9336 	lxri = lpfc_sli4_xri_inrange(phba, xri);
9337 	if (lxri == NO_XRI) {
9338 		spin_unlock_irqrestore(&phba->hbalock, iflag);
9339 		return;
9340 	}
9341 	spin_lock(&phba->sli4_hba.sgl_list_lock);
9342 	sglq_entry = __lpfc_get_active_sglq(phba, lxri);
9343 	if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) {
9344 		spin_unlock(&phba->sli4_hba.sgl_list_lock);
9345 		spin_unlock_irqrestore(&phba->hbalock, iflag);
9346 		return;
9347 	}
9348 	sglq_entry->state = SGL_XRI_ABORTED;
9349 	spin_unlock(&phba->sli4_hba.sgl_list_lock);
9350 	spin_unlock_irqrestore(&phba->hbalock, iflag);
9351 	return;
9352 }
9353 
9354 /* lpfc_sli_abts_recover_port - Recover a port that failed a BLS_ABORT req.
9355  * @vport: pointer to virtual port object.
9356  * @ndlp: nodelist pointer for the impacted node.
9357  *
9358  * The driver calls this routine in response to an SLI4 XRI ABORT CQE
9359  * or an SLI3 ASYNC_STATUS_CN event from the port.  For either event,
9360  * the driver is required to send a LOGO to the remote node before it
9361  * attempts to recover its login to the remote node.
9362  */
9363 void
9364 lpfc_sli_abts_recover_port(struct lpfc_vport *vport,
9365 			   struct lpfc_nodelist *ndlp)
9366 {
9367 	struct Scsi_Host *shost;
9368 	struct lpfc_hba *phba;
9369 	unsigned long flags = 0;
9370 
9371 	shost = lpfc_shost_from_vport(vport);
9372 	phba = vport->phba;
9373 	if (ndlp->nlp_state != NLP_STE_MAPPED_NODE) {
9374 		lpfc_printf_log(phba, KERN_INFO,
9375 				LOG_SLI, "3093 No rport recovery needed. "
9376 				"rport in state 0x%x\n", ndlp->nlp_state);
9377 		return;
9378 	}
9379 	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9380 			"3094 Start rport recovery on shost id 0x%x "
9381 			"fc_id 0x%06x vpi 0x%x rpi 0x%x state 0x%x "
9382 			"flags 0x%x\n",
9383 			shost->host_no, ndlp->nlp_DID,
9384 			vport->vpi, ndlp->nlp_rpi, ndlp->nlp_state,
9385 			ndlp->nlp_flag);
9386 	/*
9387 	 * The rport is not responding.  Remove the FCP-2 flag to prevent
9388 	 * an ADISC in the follow-up recovery code.
9389 	 */
9390 	spin_lock_irqsave(shost->host_lock, flags);
9391 	ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
9392 	spin_unlock_irqrestore(shost->host_lock, flags);
9393 	lpfc_issue_els_logo(vport, ndlp, 0);
9394 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE);
9395 }
9396 
9397