xref: /openbmc/linux/drivers/scsi/lpfc/lpfc_ct.c (revision 7b7fd0ac7dc1ffcaf24d9bca0f051b0168e43cd4)
1  /*******************************************************************
2   * This file is part of the Emulex Linux Device Driver for         *
3   * Fibre Channel Host Bus Adapters.                                *
4   * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
5   * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
6   * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
7   * EMULEX and SLI are trademarks of Emulex.                        *
8   * www.broadcom.com                                                *
9   *                                                                 *
10   * This program is free software; you can redistribute it and/or   *
11   * modify it under the terms of version 2 of the GNU General       *
12   * Public License as published by the Free Software Foundation.    *
13   * This program is distributed in the hope that it will be useful. *
14   * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
15   * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
16   * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
17   * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
18   * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
19   * more details, a copy of which can be found in the file COPYING  *
20   * included with this package.                                     *
21   *******************************************************************/
22  
23  /*
24   * Fibre Channel SCSI LAN Device Driver CT support: FC Generic Services FC-GS
25   */
26  
27  #include <linux/blkdev.h>
28  #include <linux/pci.h>
29  #include <linux/interrupt.h>
30  #include <linux/slab.h>
31  #include <linux/utsname.h>
32  
33  #include <scsi/scsi.h>
34  #include <scsi/scsi_device.h>
35  #include <scsi/scsi_host.h>
36  #include <scsi/scsi_transport_fc.h>
37  #include <scsi/fc/fc_fs.h>
38  
39  #include "lpfc_hw4.h"
40  #include "lpfc_hw.h"
41  #include "lpfc_sli.h"
42  #include "lpfc_sli4.h"
43  #include "lpfc_nl.h"
44  #include "lpfc_disc.h"
45  #include "lpfc.h"
46  #include "lpfc_scsi.h"
47  #include "lpfc_logmsg.h"
48  #include "lpfc_crtn.h"
49  #include "lpfc_version.h"
50  #include "lpfc_vport.h"
51  #include "lpfc_debugfs.h"
52  
53  /* FDMI Port Speed definitions - FC-GS-7 */
54  #define HBA_PORTSPEED_1GFC		0x00000001	/* 1G FC */
55  #define HBA_PORTSPEED_2GFC		0x00000002	/* 2G FC */
56  #define HBA_PORTSPEED_4GFC		0x00000008	/* 4G FC */
57  #define HBA_PORTSPEED_10GFC		0x00000004	/* 10G FC */
58  #define HBA_PORTSPEED_8GFC		0x00000010	/* 8G FC */
59  #define HBA_PORTSPEED_16GFC		0x00000020	/* 16G FC */
60  #define HBA_PORTSPEED_32GFC		0x00000040	/* 32G FC */
61  #define HBA_PORTSPEED_20GFC		0x00000080	/* 20G FC */
62  #define HBA_PORTSPEED_40GFC		0x00000100	/* 40G FC */
63  #define HBA_PORTSPEED_128GFC		0x00000200	/* 128G FC */
64  #define HBA_PORTSPEED_64GFC		0x00000400	/* 64G FC */
65  #define HBA_PORTSPEED_256GFC		0x00000800	/* 256G FC */
66  #define HBA_PORTSPEED_UNKNOWN		0x00008000	/* Unknown */
67  #define HBA_PORTSPEED_10GE		0x00010000	/* 10G E */
68  #define HBA_PORTSPEED_40GE		0x00020000	/* 40G E */
69  #define HBA_PORTSPEED_100GE		0x00040000	/* 100G E */
70  #define HBA_PORTSPEED_25GE		0x00080000	/* 25G E */
71  #define HBA_PORTSPEED_50GE		0x00100000	/* 50G E */
72  #define HBA_PORTSPEED_400GE		0x00200000	/* 400G E */
73  
74  #define FOURBYTES	4
75  
76  
77  static char *lpfc_release_version = LPFC_DRIVER_VERSION;
78  static void
79  lpfc_cmpl_ct_cmd_vmid(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
80  		      struct lpfc_iocbq *rspiocb);
81  
82  static void
lpfc_ct_ignore_hbq_buffer(struct lpfc_hba * phba,struct lpfc_iocbq * piocbq,struct lpfc_dmabuf * mp,uint32_t size)83  lpfc_ct_ignore_hbq_buffer(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
84  			  struct lpfc_dmabuf *mp, uint32_t size)
85  {
86  	if (!mp) {
87  		lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
88  				"0146 Ignoring unsolicited CT No HBQ "
89  				"status = x%x\n",
90  				get_job_ulpstatus(phba, piocbq));
91  	}
92  	lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
93  			"0145 Ignoring unsolicited CT HBQ Size:%d "
94  			"status = x%x\n",
95  			size, get_job_ulpstatus(phba, piocbq));
96  }
97  
98  static void
lpfc_ct_unsol_buffer(struct lpfc_hba * phba,struct lpfc_iocbq * piocbq,struct lpfc_dmabuf * mp,uint32_t size)99  lpfc_ct_unsol_buffer(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
100  		     struct lpfc_dmabuf *mp, uint32_t size)
101  {
102  	lpfc_ct_ignore_hbq_buffer(phba, piocbq, mp, size);
103  }
104  
105  /**
106   * lpfc_ct_unsol_cmpl : Completion callback function for unsol ct commands
107   * @phba : pointer to lpfc hba data structure.
108   * @cmdiocb : pointer to lpfc command iocb data structure.
109   * @rspiocb : pointer to lpfc response iocb data structure.
110   *
111   * This routine is the callback function for issuing unsol ct reject command.
112   * The memory allocated in the reject command path is freed up here.
113   **/
114  static void
lpfc_ct_unsol_cmpl(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)115  lpfc_ct_unsol_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
116  		   struct lpfc_iocbq *rspiocb)
117  {
118  	struct lpfc_nodelist *ndlp;
119  	struct lpfc_dmabuf *mp, *bmp;
120  
121  	ndlp = cmdiocb->ndlp;
122  	if (ndlp)
123  		lpfc_nlp_put(ndlp);
124  
125  	mp = cmdiocb->rsp_dmabuf;
126  	bmp = cmdiocb->bpl_dmabuf;
127  	if (mp) {
128  		lpfc_mbuf_free(phba, mp->virt, mp->phys);
129  		kfree(mp);
130  		cmdiocb->rsp_dmabuf = NULL;
131  	}
132  
133  	if (bmp) {
134  		lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
135  		kfree(bmp);
136  		cmdiocb->bpl_dmabuf = NULL;
137  	}
138  
139  	lpfc_sli_release_iocbq(phba, cmdiocb);
140  }
141  
142  /**
143   * lpfc_ct_reject_event - Issue reject for unhandled CT MIB commands
144   * @ndlp: pointer to a node-list data structure.
145   * @ct_req: pointer to the CT request data structure.
146   * @ulp_context: context of received UNSOL CT command
147   * @ox_id: ox_id of the UNSOL CT command
148   *
149   * This routine is invoked by the lpfc_ct_handle_mibreq routine for sending
150   * a reject response. Reject response is sent for the unhandled commands.
151   **/
152  static void
lpfc_ct_reject_event(struct lpfc_nodelist * ndlp,struct lpfc_sli_ct_request * ct_req,u16 ulp_context,u16 ox_id)153  lpfc_ct_reject_event(struct lpfc_nodelist *ndlp,
154  		     struct lpfc_sli_ct_request *ct_req,
155  		     u16 ulp_context, u16 ox_id)
156  {
157  	struct lpfc_vport *vport = ndlp->vport;
158  	struct lpfc_hba *phba = vport->phba;
159  	struct lpfc_sli_ct_request *ct_rsp;
160  	struct lpfc_iocbq *cmdiocbq = NULL;
161  	struct lpfc_dmabuf *bmp = NULL;
162  	struct lpfc_dmabuf *mp = NULL;
163  	struct ulp_bde64 *bpl;
164  	u8 rc = 0;
165  	u32 tmo;
166  
167  	/* fill in BDEs for command */
168  	mp = kmalloc(sizeof(*mp), GFP_KERNEL);
169  	if (!mp) {
170  		rc = 1;
171  		goto ct_exit;
172  	}
173  
174  	mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp->phys);
175  	if (!mp->virt) {
176  		rc = 2;
177  		goto ct_free_mp;
178  	}
179  
180  	/* Allocate buffer for Buffer ptr list */
181  	bmp = kmalloc(sizeof(*bmp), GFP_KERNEL);
182  	if (!bmp) {
183  		rc = 3;
184  		goto ct_free_mpvirt;
185  	}
186  
187  	bmp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &bmp->phys);
188  	if (!bmp->virt) {
189  		rc = 4;
190  		goto ct_free_bmp;
191  	}
192  
193  	INIT_LIST_HEAD(&mp->list);
194  	INIT_LIST_HEAD(&bmp->list);
195  
196  	bpl = (struct ulp_bde64 *)bmp->virt;
197  	memset(bpl, 0, sizeof(struct ulp_bde64));
198  	bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys));
199  	bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys));
200  	bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
201  	bpl->tus.f.bdeSize = (LPFC_CT_PREAMBLE - 4);
202  	bpl->tus.w = le32_to_cpu(bpl->tus.w);
203  
204  	ct_rsp = (struct lpfc_sli_ct_request *)mp->virt;
205  	memset(ct_rsp, 0, sizeof(struct lpfc_sli_ct_request));
206  
207  	ct_rsp->RevisionId.bits.Revision = SLI_CT_REVISION;
208  	ct_rsp->RevisionId.bits.InId = 0;
209  	ct_rsp->FsType = ct_req->FsType;
210  	ct_rsp->FsSubType = ct_req->FsSubType;
211  	ct_rsp->CommandResponse.bits.Size = 0;
212  	ct_rsp->CommandResponse.bits.CmdRsp =
213  		cpu_to_be16(SLI_CT_RESPONSE_FS_RJT);
214  	ct_rsp->ReasonCode = SLI_CT_REQ_NOT_SUPPORTED;
215  	ct_rsp->Explanation = SLI_CT_NO_ADDITIONAL_EXPL;
216  
217  	cmdiocbq = lpfc_sli_get_iocbq(phba);
218  	if (!cmdiocbq) {
219  		rc = 5;
220  		goto ct_free_bmpvirt;
221  	}
222  
223  	if (phba->sli_rev == LPFC_SLI_REV4) {
224  		lpfc_sli_prep_xmit_seq64(phba, cmdiocbq, bmp,
225  					 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi],
226  					 ox_id, 1, FC_RCTL_DD_SOL_CTL, 1,
227  					 CMD_XMIT_SEQUENCE64_WQE);
228  	} else {
229  		lpfc_sli_prep_xmit_seq64(phba, cmdiocbq, bmp, 0, ulp_context, 1,
230  					 FC_RCTL_DD_SOL_CTL, 1,
231  					 CMD_XMIT_SEQUENCE64_CX);
232  	}
233  
234  	/* Save for completion so we can release these resources */
235  	cmdiocbq->rsp_dmabuf = mp;
236  	cmdiocbq->bpl_dmabuf = bmp;
237  	cmdiocbq->cmd_cmpl = lpfc_ct_unsol_cmpl;
238  	tmo = (3 * phba->fc_ratov);
239  
240  	cmdiocbq->retry = 0;
241  	cmdiocbq->vport = vport;
242  	cmdiocbq->drvrTimeout = tmo + LPFC_DRVR_TIMEOUT;
243  
244  	cmdiocbq->ndlp = lpfc_nlp_get(ndlp);
245  	if (!cmdiocbq->ndlp)
246  		goto ct_no_ndlp;
247  
248  	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
249  	if (rc) {
250  		lpfc_nlp_put(ndlp);
251  		goto ct_no_ndlp;
252  	}
253  	return;
254  
255  ct_no_ndlp:
256  	rc = 6;
257  	lpfc_sli_release_iocbq(phba, cmdiocbq);
258  ct_free_bmpvirt:
259  	lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
260  ct_free_bmp:
261  	kfree(bmp);
262  ct_free_mpvirt:
263  	lpfc_mbuf_free(phba, mp->virt, mp->phys);
264  ct_free_mp:
265  	kfree(mp);
266  ct_exit:
267  	lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
268  			 "6440 Unsol CT: Rsp err %d Data: x%x\n",
269  			 rc, vport->fc_flag);
270  }
271  
272  /**
273   * lpfc_ct_handle_mibreq - Process an unsolicited CT MIB request data buffer
274   * @phba: pointer to lpfc hba data structure.
275   * @ctiocbq: pointer to lpfc CT command iocb data structure.
276   *
277   * This routine is used for processing the IOCB associated with a unsolicited
278   * CT MIB request. It first determines whether there is an existing ndlp that
279   * matches the DID from the unsolicited IOCB. If not, it will return.
280   **/
281  static void
lpfc_ct_handle_mibreq(struct lpfc_hba * phba,struct lpfc_iocbq * ctiocbq)282  lpfc_ct_handle_mibreq(struct lpfc_hba *phba, struct lpfc_iocbq *ctiocbq)
283  {
284  	struct lpfc_sli_ct_request *ct_req;
285  	struct lpfc_nodelist *ndlp = NULL;
286  	struct lpfc_vport *vport = ctiocbq->vport;
287  	u32 ulp_status = get_job_ulpstatus(phba, ctiocbq);
288  	u32 ulp_word4 = get_job_word4(phba, ctiocbq);
289  	u32 did;
290  	u16 mi_cmd;
291  
292  	did = bf_get(els_rsp64_sid, &ctiocbq->wqe.xmit_els_rsp);
293  	if (ulp_status) {
294  		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
295  				 "6438 Unsol CT: status:x%x/x%x did : x%x\n",
296  				 ulp_status, ulp_word4, did);
297  		return;
298  	}
299  
300  	/* Ignore traffic received during vport shutdown */
301  	if (vport->fc_flag & FC_UNLOADING)
302  		return;
303  
304  	ndlp = lpfc_findnode_did(vport, did);
305  	if (!ndlp) {
306  		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
307  				 "6439 Unsol CT: NDLP Not Found for DID : x%x",
308  				 did);
309  		return;
310  	}
311  
312  	ct_req = (struct lpfc_sli_ct_request *)ctiocbq->cmd_dmabuf->virt;
313  
314  	mi_cmd = be16_to_cpu(ct_req->CommandResponse.bits.CmdRsp);
315  	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
316  			 "6442 : MI Cmd : x%x Not Supported\n", mi_cmd);
317  	lpfc_ct_reject_event(ndlp, ct_req,
318  			     bf_get(wqe_ctxt_tag,
319  				    &ctiocbq->wqe.xmit_els_rsp.wqe_com),
320  			     bf_get(wqe_rcvoxid,
321  				    &ctiocbq->wqe.xmit_els_rsp.wqe_com));
322  }
323  
324  /**
325   * lpfc_ct_unsol_event - Process an unsolicited event from a ct sli ring
326   * @phba: pointer to lpfc hba data structure.
327   * @pring: pointer to a SLI ring.
328   * @ctiocbq: pointer to lpfc ct iocb data structure.
329   *
330   * This routine is used to process an unsolicited event received from a SLI
331   * (Service Level Interface) ring. The actual processing of the data buffer
332   * associated with the unsolicited event is done by invoking appropriate routine
333   * after properly set up the iocb buffer from the SLI ring on which the
334   * unsolicited event was received.
335   **/
336  void
lpfc_ct_unsol_event(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,struct lpfc_iocbq * ctiocbq)337  lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
338  		    struct lpfc_iocbq *ctiocbq)
339  {
340  	struct lpfc_dmabuf *mp = NULL;
341  	IOCB_t *icmd = &ctiocbq->iocb;
342  	int i;
343  	struct lpfc_iocbq *iocbq;
344  	struct lpfc_iocbq *iocb;
345  	dma_addr_t dma_addr;
346  	uint32_t size;
347  	struct list_head head;
348  	struct lpfc_sli_ct_request *ct_req;
349  	struct lpfc_dmabuf *bdeBuf1 = ctiocbq->cmd_dmabuf;
350  	struct lpfc_dmabuf *bdeBuf2 = ctiocbq->bpl_dmabuf;
351  	u32 status, parameter, bde_count = 0;
352  	struct lpfc_wcqe_complete *wcqe_cmpl = NULL;
353  
354  	ctiocbq->cmd_dmabuf = NULL;
355  	ctiocbq->rsp_dmabuf = NULL;
356  	ctiocbq->bpl_dmabuf = NULL;
357  
358  	wcqe_cmpl = &ctiocbq->wcqe_cmpl;
359  	status = get_job_ulpstatus(phba, ctiocbq);
360  	parameter = get_job_word4(phba, ctiocbq);
361  	if (phba->sli_rev == LPFC_SLI_REV4)
362  		bde_count = wcqe_cmpl->word3;
363  	else
364  		bde_count = icmd->ulpBdeCount;
365  
366  	if (unlikely(status == IOSTAT_NEED_BUFFER)) {
367  		lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
368  	} else if ((status == IOSTAT_LOCAL_REJECT) &&
369  		   ((parameter & IOERR_PARAM_MASK) ==
370  		   IOERR_RCV_BUFFER_WAITING)) {
371  		/* Not enough posted buffers; Try posting more buffers */
372  		phba->fc_stat.NoRcvBuf++;
373  		if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
374  			lpfc_sli3_post_buffer(phba, pring, 2);
375  		return;
376  	}
377  
378  	/* If there are no BDEs associated
379  	 * with this IOCB, there is nothing to do.
380  	 */
381  	if (bde_count == 0)
382  		return;
383  
384  	ctiocbq->cmd_dmabuf = bdeBuf1;
385  	if (bde_count == 2)
386  		ctiocbq->bpl_dmabuf = bdeBuf2;
387  
388  	ct_req = (struct lpfc_sli_ct_request *)ctiocbq->cmd_dmabuf->virt;
389  
390  	if (ct_req->FsType == SLI_CT_MANAGEMENT_SERVICE &&
391  	    ct_req->FsSubType == SLI_CT_MIB_Subtypes) {
392  		lpfc_ct_handle_mibreq(phba, ctiocbq);
393  	} else {
394  		if (!lpfc_bsg_ct_unsol_event(phba, pring, ctiocbq))
395  			return;
396  	}
397  
398  	if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
399  		INIT_LIST_HEAD(&head);
400  		list_add_tail(&head, &ctiocbq->list);
401  		list_for_each_entry(iocb, &head, list) {
402  			if (phba->sli_rev == LPFC_SLI_REV4)
403  				bde_count = iocb->wcqe_cmpl.word3;
404  			else
405  				bde_count = iocb->iocb.ulpBdeCount;
406  
407  			if (!bde_count)
408  				continue;
409  			bdeBuf1 = iocb->cmd_dmabuf;
410  			iocb->cmd_dmabuf = NULL;
411  			if (phba->sli_rev == LPFC_SLI_REV4)
412  				size = iocb->wqe.gen_req.bde.tus.f.bdeSize;
413  			else
414  				size  = iocb->iocb.un.cont64[0].tus.f.bdeSize;
415  			lpfc_ct_unsol_buffer(phba, ctiocbq, bdeBuf1, size);
416  			lpfc_in_buf_free(phba, bdeBuf1);
417  			if (bde_count == 2) {
418  				bdeBuf2 = iocb->bpl_dmabuf;
419  				iocb->bpl_dmabuf = NULL;
420  				if (phba->sli_rev == LPFC_SLI_REV4)
421  					size = iocb->unsol_rcv_len;
422  				else
423  					size = iocb->iocb.unsli3.rcvsli3.bde2.tus.f.bdeSize;
424  				lpfc_ct_unsol_buffer(phba, ctiocbq, bdeBuf2,
425  						     size);
426  				lpfc_in_buf_free(phba, bdeBuf2);
427  			}
428  		}
429  		list_del(&head);
430  	} else {
431  		INIT_LIST_HEAD(&head);
432  		list_add_tail(&head, &ctiocbq->list);
433  		list_for_each_entry(iocbq, &head, list) {
434  			icmd = &iocbq->iocb;
435  			if (icmd->ulpBdeCount == 0)
436  				lpfc_ct_unsol_buffer(phba, iocbq, NULL, 0);
437  			for (i = 0; i < icmd->ulpBdeCount; i++) {
438  				dma_addr = getPaddr(icmd->un.cont64[i].addrHigh,
439  						    icmd->un.cont64[i].addrLow);
440  				mp = lpfc_sli_ringpostbuf_get(phba, pring,
441  							      dma_addr);
442  				size = icmd->un.cont64[i].tus.f.bdeSize;
443  				lpfc_ct_unsol_buffer(phba, iocbq, mp, size);
444  				lpfc_in_buf_free(phba, mp);
445  			}
446  			lpfc_sli3_post_buffer(phba, pring, i);
447  		}
448  		list_del(&head);
449  	}
450  }
451  
452  /**
453   * lpfc_ct_handle_unsol_abort - ct upper level protocol abort handler
454   * @phba: Pointer to HBA context object.
455   * @dmabuf: pointer to a dmabuf that describes the FC sequence
456   *
457   * This function serves as the upper level protocol abort handler for CT
458   * protocol.
459   *
460   * Return 1 if abort has been handled, 0 otherwise.
461   **/
462  int
lpfc_ct_handle_unsol_abort(struct lpfc_hba * phba,struct hbq_dmabuf * dmabuf)463  lpfc_ct_handle_unsol_abort(struct lpfc_hba *phba, struct hbq_dmabuf *dmabuf)
464  {
465  	int handled;
466  
467  	/* CT upper level goes through BSG */
468  	handled = lpfc_bsg_ct_unsol_abort(phba, dmabuf);
469  
470  	return handled;
471  }
472  
473  static void
lpfc_free_ct_rsp(struct lpfc_hba * phba,struct lpfc_dmabuf * mlist)474  lpfc_free_ct_rsp(struct lpfc_hba *phba, struct lpfc_dmabuf *mlist)
475  {
476  	struct lpfc_dmabuf *mlast, *next_mlast;
477  
478  	list_for_each_entry_safe(mlast, next_mlast, &mlist->list, list) {
479  		list_del(&mlast->list);
480  		lpfc_mbuf_free(phba, mlast->virt, mlast->phys);
481  		kfree(mlast);
482  	}
483  	lpfc_mbuf_free(phba, mlist->virt, mlist->phys);
484  	kfree(mlist);
485  	return;
486  }
487  
488  static struct lpfc_dmabuf *
lpfc_alloc_ct_rsp(struct lpfc_hba * phba,__be16 cmdcode,struct ulp_bde64 * bpl,uint32_t size,int * entries)489  lpfc_alloc_ct_rsp(struct lpfc_hba *phba, __be16 cmdcode, struct ulp_bde64 *bpl,
490  		  uint32_t size, int *entries)
491  {
492  	struct lpfc_dmabuf *mlist = NULL;
493  	struct lpfc_dmabuf *mp;
494  	int cnt, i = 0;
495  
496  	/* We get chunks of FCELSSIZE */
497  	cnt = size > FCELSSIZE ? FCELSSIZE: size;
498  
499  	while (size) {
500  		/* Allocate buffer for rsp payload */
501  		mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
502  		if (!mp) {
503  			if (mlist)
504  				lpfc_free_ct_rsp(phba, mlist);
505  			return NULL;
506  		}
507  
508  		INIT_LIST_HEAD(&mp->list);
509  
510  		if (be16_to_cpu(cmdcode) == SLI_CTNS_GID_FT ||
511  		    be16_to_cpu(cmdcode) == SLI_CTNS_GFF_ID)
512  			mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(mp->phys));
513  		else
514  			mp->virt = lpfc_mbuf_alloc(phba, 0, &(mp->phys));
515  
516  		if (!mp->virt) {
517  			kfree(mp);
518  			if (mlist)
519  				lpfc_free_ct_rsp(phba, mlist);
520  			return NULL;
521  		}
522  
523  		/* Queue it to a linked list */
524  		if (!mlist)
525  			mlist = mp;
526  		else
527  			list_add_tail(&mp->list, &mlist->list);
528  
529  		bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
530  		/* build buffer ptr list for IOCB */
531  		bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys) );
532  		bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys) );
533  		bpl->tus.f.bdeSize = (uint16_t) cnt;
534  		bpl->tus.w = le32_to_cpu(bpl->tus.w);
535  		bpl++;
536  
537  		i++;
538  		size -= cnt;
539  	}
540  
541  	*entries = i;
542  	return mlist;
543  }
544  
545  int
lpfc_ct_free_iocb(struct lpfc_hba * phba,struct lpfc_iocbq * ctiocb)546  lpfc_ct_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *ctiocb)
547  {
548  	struct lpfc_dmabuf *buf_ptr;
549  
550  	/* IOCBQ job structure gets cleaned during release.  Just release
551  	 * the dma buffers here.
552  	 */
553  	if (ctiocb->cmd_dmabuf) {
554  		buf_ptr = ctiocb->cmd_dmabuf;
555  		lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
556  		kfree(buf_ptr);
557  		ctiocb->cmd_dmabuf = NULL;
558  	}
559  	if (ctiocb->rsp_dmabuf) {
560  		lpfc_free_ct_rsp(phba, ctiocb->rsp_dmabuf);
561  		ctiocb->rsp_dmabuf = NULL;
562  	}
563  
564  	if (ctiocb->bpl_dmabuf) {
565  		buf_ptr = ctiocb->bpl_dmabuf;
566  		lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
567  		kfree(buf_ptr);
568  		ctiocb->bpl_dmabuf = NULL;
569  	}
570  	lpfc_sli_release_iocbq(phba, ctiocb);
571  	return 0;
572  }
573  
574  /*
575   * lpfc_gen_req - Build and issue a GEN_REQUEST command  to the SLI Layer
576   * @vport: pointer to a host virtual N_Port data structure.
577   * @bmp: Pointer to BPL for SLI command
578   * @inp: Pointer to data buffer for response data.
579   * @outp: Pointer to data buffer that hold the CT command.
580   * @cmpl: completion routine to call when command completes
581   * @ndlp: Destination NPort nodelist entry
582   *
583   * This function as the final part for issuing a CT command.
584   */
585  static int
lpfc_gen_req(struct lpfc_vport * vport,struct lpfc_dmabuf * bmp,struct lpfc_dmabuf * inp,struct lpfc_dmabuf * outp,void (* cmpl)(struct lpfc_hba *,struct lpfc_iocbq *,struct lpfc_iocbq *),struct lpfc_nodelist * ndlp,uint32_t event_tag,uint32_t num_entry,uint32_t tmo,uint8_t retry)586  lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
587  	     struct lpfc_dmabuf *inp, struct lpfc_dmabuf *outp,
588  	     void (*cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
589  			  struct lpfc_iocbq *),
590  	     struct lpfc_nodelist *ndlp, uint32_t event_tag, uint32_t num_entry,
591  	     uint32_t tmo, uint8_t retry)
592  {
593  	struct lpfc_hba  *phba = vport->phba;
594  	struct lpfc_iocbq *geniocb;
595  	int rc;
596  	u16 ulp_context;
597  
598  	/* Allocate buffer for  command iocb */
599  	geniocb = lpfc_sli_get_iocbq(phba);
600  
601  	if (geniocb == NULL)
602  		return 1;
603  
604  	/* Update the num_entry bde count */
605  	geniocb->num_bdes = num_entry;
606  
607  	geniocb->bpl_dmabuf = bmp;
608  
609  	/* Save for completion so we can release these resources */
610  	geniocb->cmd_dmabuf = inp;
611  	geniocb->rsp_dmabuf = outp;
612  
613  	geniocb->event_tag = event_tag;
614  
615  	if (!tmo) {
616  		 /* FC spec states we need 3 * ratov for CT requests */
617  		tmo = (3 * phba->fc_ratov);
618  	}
619  
620  	if (phba->sli_rev == LPFC_SLI_REV4)
621  		ulp_context = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
622  	else
623  		ulp_context = ndlp->nlp_rpi;
624  
625  	lpfc_sli_prep_gen_req(phba, geniocb, bmp, ulp_context, num_entry, tmo);
626  
627  	/* Issue GEN REQ IOCB for NPORT <did> */
628  	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
629  			 "0119 Issue GEN REQ IOCB to NPORT x%x "
630  			 "Data: x%x x%x\n",
631  			 ndlp->nlp_DID, geniocb->iotag,
632  			 vport->port_state);
633  	geniocb->cmd_cmpl = cmpl;
634  	geniocb->drvrTimeout = tmo + LPFC_DRVR_TIMEOUT;
635  	geniocb->vport = vport;
636  	geniocb->retry = retry;
637  	geniocb->ndlp = lpfc_nlp_get(ndlp);
638  	if (!geniocb->ndlp)
639  		goto out;
640  
641  	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, geniocb, 0);
642  	if (rc == IOCB_ERROR) {
643  		lpfc_nlp_put(ndlp);
644  		goto out;
645  	}
646  
647  	return 0;
648  out:
649  	lpfc_sli_release_iocbq(phba, geniocb);
650  	return 1;
651  }
652  
653  /*
654   * lpfc_ct_cmd - Build and issue a CT command
655   * @vport: pointer to a host virtual N_Port data structure.
656   * @inmp: Pointer to data buffer for response data.
657   * @bmp: Pointer to BPL for SLI command
658   * @ndlp: Destination NPort nodelist entry
659   * @cmpl: completion routine to call when command completes
660   *
661   * This function is called for issuing a CT command.
662   */
663  static int
lpfc_ct_cmd(struct lpfc_vport * vport,struct lpfc_dmabuf * inmp,struct lpfc_dmabuf * bmp,struct lpfc_nodelist * ndlp,void (* cmpl)(struct lpfc_hba *,struct lpfc_iocbq *,struct lpfc_iocbq *),uint32_t rsp_size,uint8_t retry)664  lpfc_ct_cmd(struct lpfc_vport *vport, struct lpfc_dmabuf *inmp,
665  	    struct lpfc_dmabuf *bmp, struct lpfc_nodelist *ndlp,
666  	    void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
667  			  struct lpfc_iocbq *),
668  	    uint32_t rsp_size, uint8_t retry)
669  {
670  	struct lpfc_hba  *phba = vport->phba;
671  	struct ulp_bde64 *bpl = (struct ulp_bde64 *) bmp->virt;
672  	struct lpfc_dmabuf *outmp;
673  	int cnt = 0, status;
674  	__be16 cmdcode = ((struct lpfc_sli_ct_request *)inmp->virt)->
675  		CommandResponse.bits.CmdRsp;
676  
677  	bpl++;			/* Skip past ct request */
678  
679  	/* Put buffer(s) for ct rsp in bpl */
680  	outmp = lpfc_alloc_ct_rsp(phba, cmdcode, bpl, rsp_size, &cnt);
681  	if (!outmp)
682  		return -ENOMEM;
683  	/*
684  	 * Form the CT IOCB.  The total number of BDEs in this IOCB
685  	 * is the single command plus response count from
686  	 * lpfc_alloc_ct_rsp.
687  	 */
688  	cnt += 1;
689  	status = lpfc_gen_req(vport, bmp, inmp, outmp, cmpl, ndlp,
690  			phba->fc_eventTag, cnt, 0, retry);
691  	if (status) {
692  		lpfc_free_ct_rsp(phba, outmp);
693  		return -ENOMEM;
694  	}
695  	return 0;
696  }
697  
698  struct lpfc_vport *
lpfc_find_vport_by_did(struct lpfc_hba * phba,uint32_t did)699  lpfc_find_vport_by_did(struct lpfc_hba *phba, uint32_t did) {
700  	struct lpfc_vport *vport_curr;
701  	unsigned long flags;
702  
703  	spin_lock_irqsave(&phba->port_list_lock, flags);
704  	list_for_each_entry(vport_curr, &phba->port_list, listentry) {
705  		if ((vport_curr->fc_myDID) && (vport_curr->fc_myDID == did)) {
706  			spin_unlock_irqrestore(&phba->port_list_lock, flags);
707  			return vport_curr;
708  		}
709  	}
710  	spin_unlock_irqrestore(&phba->port_list_lock, flags);
711  	return NULL;
712  }
713  
714  static void
lpfc_prep_node_fc4type(struct lpfc_vport * vport,uint32_t Did,uint8_t fc4_type)715  lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type)
716  {
717  	struct lpfc_nodelist *ndlp;
718  
719  	if ((vport->port_type != LPFC_NPIV_PORT) ||
720  	    !(vport->ct_flags & FC_CT_RFF_ID) || !vport->cfg_restrict_login) {
721  
722  		ndlp = lpfc_setup_disc_node(vport, Did);
723  
724  		if (ndlp) {
725  			lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
726  				"Parse GID_FTrsp: did:x%x flg:x%x x%x",
727  				Did, ndlp->nlp_flag, vport->fc_flag);
728  
729  			/* By default, the driver expects to support FCP FC4 */
730  			if (fc4_type == FC_TYPE_FCP)
731  				ndlp->nlp_fc4_type |= NLP_FC4_FCP;
732  
733  			if (fc4_type == FC_TYPE_NVME)
734  				ndlp->nlp_fc4_type |= NLP_FC4_NVME;
735  
736  			lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
737  					 "0238 Process x%06x NameServer Rsp "
738  					 "Data: x%x x%x x%x x%x x%x\n", Did,
739  					 ndlp->nlp_flag, ndlp->nlp_fc4_type,
740  					 ndlp->nlp_state, vport->fc_flag,
741  					 vport->fc_rscn_id_cnt);
742  
743  			/* if ndlp needs to be discovered and prior
744  			 * state of ndlp hit devloss, change state to
745  			 * allow rediscovery.
746  			 */
747  			if (ndlp->nlp_flag & NLP_NPR_2B_DISC &&
748  			    ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
749  				lpfc_nlp_set_state(vport, ndlp,
750  						   NLP_STE_NPR_NODE);
751  			}
752  		} else {
753  			lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
754  				"Skip1 GID_FTrsp: did:x%x flg:x%x cnt:%d",
755  				Did, vport->fc_flag, vport->fc_rscn_id_cnt);
756  
757  			lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
758  					 "0239 Skip x%06x NameServer Rsp "
759  					 "Data: x%x x%x x%px\n",
760  					 Did, vport->fc_flag,
761  					 vport->fc_rscn_id_cnt, ndlp);
762  		}
763  	} else {
764  		if (!(vport->fc_flag & FC_RSCN_MODE) ||
765  		    lpfc_rscn_payload_check(vport, Did)) {
766  			lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
767  				"Query GID_FTrsp: did:x%x flg:x%x cnt:%d",
768  				Did, vport->fc_flag, vport->fc_rscn_id_cnt);
769  
770  			/*
771  			 * This NPortID was previously a FCP/NVMe target,
772  			 * Don't even bother to send GFF_ID.
773  			 */
774  			ndlp = lpfc_findnode_did(vport, Did);
775  			if (ndlp &&
776  			    (ndlp->nlp_type &
777  			    (NLP_FCP_TARGET | NLP_NVME_TARGET))) {
778  				if (fc4_type == FC_TYPE_FCP)
779  					ndlp->nlp_fc4_type |= NLP_FC4_FCP;
780  				if (fc4_type == FC_TYPE_NVME)
781  					ndlp->nlp_fc4_type |= NLP_FC4_NVME;
782  				lpfc_setup_disc_node(vport, Did);
783  			} else if (lpfc_ns_cmd(vport, SLI_CTNS_GFF_ID,
784  				   0, Did) == 0)
785  				vport->num_disc_nodes++;
786  			else
787  				lpfc_setup_disc_node(vport, Did);
788  		} else {
789  			lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
790  				"Skip2 GID_FTrsp: did:x%x flg:x%x cnt:%d",
791  				Did, vport->fc_flag, vport->fc_rscn_id_cnt);
792  
793  			lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
794  					 "0245 Skip x%06x NameServer Rsp "
795  					 "Data: x%x x%x\n", Did,
796  					 vport->fc_flag,
797  					 vport->fc_rscn_id_cnt);
798  		}
799  	}
800  }
801  
802  static void
lpfc_ns_rsp_audit_did(struct lpfc_vport * vport,uint32_t Did,uint8_t fc4_type)803  lpfc_ns_rsp_audit_did(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type)
804  {
805  	struct lpfc_hba *phba = vport->phba;
806  	struct lpfc_nodelist *ndlp = NULL;
807  	char *str;
808  
809  	if (phba->cfg_ns_query == LPFC_NS_QUERY_GID_FT)
810  		str = "GID_FT";
811  	else
812  		str = "GID_PT";
813  	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
814  			 "6430 Process %s rsp for %08x type %x %s %s\n",
815  			 str, Did, fc4_type,
816  			 (fc4_type == FC_TYPE_FCP) ?  "FCP" : " ",
817  			 (fc4_type == FC_TYPE_NVME) ?  "NVME" : " ");
818  	/*
819  	 * To conserve rpi's, filter out addresses for other
820  	 * vports on the same physical HBAs.
821  	 */
822  	if (Did != vport->fc_myDID &&
823  	    (!lpfc_find_vport_by_did(phba, Did) ||
824  	     vport->cfg_peer_port_login)) {
825  		if (!phba->nvmet_support) {
826  			/* FCPI/NVMEI path. Process Did */
827  			lpfc_prep_node_fc4type(vport, Did, fc4_type);
828  			return;
829  		}
830  		/* NVMET path.  NVMET only cares about NVMEI nodes. */
831  		list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
832  			if (ndlp->nlp_type != NLP_NVME_INITIATOR ||
833  			    ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)
834  				continue;
835  			spin_lock_irq(&ndlp->lock);
836  			if (ndlp->nlp_DID == Did)
837  				ndlp->nlp_flag &= ~NLP_NVMET_RECOV;
838  			else
839  				ndlp->nlp_flag |= NLP_NVMET_RECOV;
840  			spin_unlock_irq(&ndlp->lock);
841  		}
842  	}
843  }
844  
845  static int
lpfc_ns_rsp(struct lpfc_vport * vport,struct lpfc_dmabuf * mp,uint8_t fc4_type,uint32_t Size)846  lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint8_t fc4_type,
847  	    uint32_t Size)
848  {
849  	struct lpfc_sli_ct_request *Response =
850  		(struct lpfc_sli_ct_request *) mp->virt;
851  	struct lpfc_dmabuf *mlast, *next_mp;
852  	uint32_t *ctptr = (uint32_t *) & Response->un.gid.PortType;
853  	uint32_t Did, CTentry;
854  	int Cnt;
855  	struct list_head head;
856  	struct lpfc_nodelist *ndlp = NULL;
857  
858  	lpfc_set_disctmo(vport);
859  	vport->num_disc_nodes = 0;
860  	vport->fc_ns_retry = 0;
861  
862  
863  	list_add_tail(&head, &mp->list);
864  	list_for_each_entry_safe(mp, next_mp, &head, list) {
865  		mlast = mp;
866  
867  		Cnt = Size  > FCELSSIZE ? FCELSSIZE : Size;
868  
869  		Size -= Cnt;
870  
871  		if (!ctptr) {
872  			ctptr = (uint32_t *) mlast->virt;
873  		} else
874  			Cnt -= 16;	/* subtract length of CT header */
875  
876  		/* Loop through entire NameServer list of DIDs */
877  		while (Cnt >= sizeof(uint32_t)) {
878  			/* Get next DID from NameServer List */
879  			CTentry = *ctptr++;
880  			Did = ((be32_to_cpu(CTentry)) & Mask_DID);
881  			lpfc_ns_rsp_audit_did(vport, Did, fc4_type);
882  			if (CTentry & (cpu_to_be32(SLI_CT_LAST_ENTRY)))
883  				goto nsout1;
884  
885  			Cnt -= sizeof(uint32_t);
886  		}
887  		ctptr = NULL;
888  
889  	}
890  
891  	/* All GID_FT entries processed.  If the driver is running in
892  	 * in target mode, put impacted nodes into recovery and drop
893  	 * the RPI to flush outstanding IO.
894  	 */
895  	if (vport->phba->nvmet_support) {
896  		list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
897  			if (!(ndlp->nlp_flag & NLP_NVMET_RECOV))
898  				continue;
899  			lpfc_disc_state_machine(vport, ndlp, NULL,
900  						NLP_EVT_DEVICE_RECOVERY);
901  			spin_lock_irq(&ndlp->lock);
902  			ndlp->nlp_flag &= ~NLP_NVMET_RECOV;
903  			spin_unlock_irq(&ndlp->lock);
904  		}
905  	}
906  
907  nsout1:
908  	list_del(&head);
909  	return 0;
910  }
911  
912  static void
lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)913  lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
914  			struct lpfc_iocbq *rspiocb)
915  {
916  	struct lpfc_vport *vport = cmdiocb->vport;
917  	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
918  	struct lpfc_dmabuf *outp;
919  	struct lpfc_dmabuf *inp;
920  	struct lpfc_sli_ct_request *CTrsp;
921  	struct lpfc_sli_ct_request *CTreq;
922  	struct lpfc_nodelist *ndlp;
923  	u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
924  	u32 ulp_word4 = get_job_word4(phba, rspiocb);
925  	int rc, type;
926  
927  	/* First save ndlp, before we overwrite it */
928  	ndlp = cmdiocb->ndlp;
929  
930  	/* we pass cmdiocb to state machine which needs rspiocb as well */
931  	cmdiocb->rsp_iocb = rspiocb;
932  	inp = cmdiocb->cmd_dmabuf;
933  	outp = cmdiocb->rsp_dmabuf;
934  
935  	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
936  		 "GID_FT cmpl:     status:x%x/x%x rtry:%d",
937  		ulp_status, ulp_word4, vport->fc_ns_retry);
938  
939  	/* Ignore response if link flipped after this request was made */
940  	if (cmdiocb->event_tag != phba->fc_eventTag) {
941  		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
942  				 "9043 Event tag mismatch. Ignoring NS rsp\n");
943  		goto out;
944  	}
945  
946  	/* Don't bother processing response if vport is being torn down. */
947  	if (vport->load_flag & FC_UNLOADING) {
948  		if (vport->fc_flag & FC_RSCN_MODE)
949  			lpfc_els_flush_rscn(vport);
950  		goto out;
951  	}
952  
953  	if (lpfc_els_chk_latt(vport)) {
954  		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
955  				 "0216 Link event during NS query\n");
956  		if (vport->fc_flag & FC_RSCN_MODE)
957  			lpfc_els_flush_rscn(vport);
958  		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
959  		goto out;
960  	}
961  	if (lpfc_error_lost_link(vport, ulp_status, ulp_word4)) {
962  		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
963  				 "0226 NS query failed due to link event: "
964  				 "ulp_status x%x ulp_word4 x%x fc_flag x%x "
965  				 "port_state x%x gidft_inp x%x\n",
966  				 ulp_status, ulp_word4, vport->fc_flag,
967  				 vport->port_state, vport->gidft_inp);
968  		if (vport->fc_flag & FC_RSCN_MODE)
969  			lpfc_els_flush_rscn(vport);
970  		if (vport->gidft_inp)
971  			vport->gidft_inp--;
972  		goto out;
973  	}
974  
975  	spin_lock_irq(shost->host_lock);
976  	if (vport->fc_flag & FC_RSCN_DEFERRED) {
977  		vport->fc_flag &= ~FC_RSCN_DEFERRED;
978  		spin_unlock_irq(shost->host_lock);
979  
980  		/* This is a GID_FT completing so the gidft_inp counter was
981  		 * incremented before the GID_FT was issued to the wire.
982  		 */
983  		if (vport->gidft_inp)
984  			vport->gidft_inp--;
985  
986  		/*
987  		 * Skip processing the NS response
988  		 * Re-issue the NS cmd
989  		 */
990  		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
991  				 "0151 Process Deferred RSCN Data: x%x x%x\n",
992  				 vport->fc_flag, vport->fc_rscn_id_cnt);
993  		lpfc_els_handle_rscn(vport);
994  
995  		goto out;
996  	}
997  	spin_unlock_irq(shost->host_lock);
998  
999  	if (ulp_status) {
1000  		/* Check for retry */
1001  		if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
1002  			if (ulp_status != IOSTAT_LOCAL_REJECT ||
1003  			    (ulp_word4 & IOERR_PARAM_MASK) !=
1004  			    IOERR_NO_RESOURCES)
1005  				vport->fc_ns_retry++;
1006  
1007  			type = lpfc_get_gidft_type(vport, cmdiocb);
1008  			if (type == 0)
1009  				goto out;
1010  
1011  			/* CT command is being retried */
1012  			rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_FT,
1013  					 vport->fc_ns_retry, type);
1014  			if (rc == 0)
1015  				goto out;
1016  			else { /* Unable to send NS cmd */
1017  				if (vport->gidft_inp)
1018  					vport->gidft_inp--;
1019  			}
1020  		}
1021  		if (vport->fc_flag & FC_RSCN_MODE)
1022  			lpfc_els_flush_rscn(vport);
1023  		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1024  		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1025  				 "0257 GID_FT Query error: 0x%x 0x%x\n",
1026  				 ulp_status, vport->fc_ns_retry);
1027  	} else {
1028  		/* Good status, continue checking */
1029  		CTreq = (struct lpfc_sli_ct_request *) inp->virt;
1030  		CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
1031  		if (CTrsp->CommandResponse.bits.CmdRsp ==
1032  		    cpu_to_be16(SLI_CT_RESPONSE_FS_ACC)) {
1033  			lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1034  					 "0208 NameServer Rsp Data: x%x x%x "
1035  					 "x%x x%x sz x%x\n",
1036  					 vport->fc_flag,
1037  					 CTreq->un.gid.Fc4Type,
1038  					 vport->num_disc_nodes,
1039  					 vport->gidft_inp,
1040  					 get_job_data_placed(phba, rspiocb));
1041  
1042  			lpfc_ns_rsp(vport,
1043  				    outp,
1044  				    CTreq->un.gid.Fc4Type,
1045  				    get_job_data_placed(phba, rspiocb));
1046  		} else if (be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp) ==
1047  			   SLI_CT_RESPONSE_FS_RJT) {
1048  			/* NameServer Rsp Error */
1049  			if ((CTrsp->ReasonCode == SLI_CT_UNABLE_TO_PERFORM_REQ)
1050  			    && (CTrsp->Explanation == SLI_CT_NO_FC4_TYPES)) {
1051  				lpfc_printf_vlog(vport, KERN_INFO,
1052  					LOG_DISCOVERY,
1053  					"0269 No NameServer Entries "
1054  					"Data: x%x x%x x%x x%x\n",
1055  					be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp),
1056  					(uint32_t) CTrsp->ReasonCode,
1057  					(uint32_t) CTrsp->Explanation,
1058  					vport->fc_flag);
1059  
1060  				lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
1061  				"GID_FT no entry  cmd:x%x rsn:x%x exp:x%x",
1062  				be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp),
1063  				(uint32_t) CTrsp->ReasonCode,
1064  				(uint32_t) CTrsp->Explanation);
1065  			} else {
1066  				lpfc_printf_vlog(vport, KERN_INFO,
1067  					LOG_DISCOVERY,
1068  					"0240 NameServer Rsp Error "
1069  					"Data: x%x x%x x%x x%x\n",
1070  					be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp),
1071  					(uint32_t) CTrsp->ReasonCode,
1072  					(uint32_t) CTrsp->Explanation,
1073  					vport->fc_flag);
1074  
1075  				lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
1076  				"GID_FT rsp err1  cmd:x%x rsn:x%x exp:x%x",
1077  				be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp),
1078  				(uint32_t) CTrsp->ReasonCode,
1079  				(uint32_t) CTrsp->Explanation);
1080  			}
1081  
1082  
1083  		} else {
1084  			/* NameServer Rsp Error */
1085  			lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1086  					"0241 NameServer Rsp Error "
1087  					"Data: x%x x%x x%x x%x\n",
1088  					be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp),
1089  					(uint32_t) CTrsp->ReasonCode,
1090  					(uint32_t) CTrsp->Explanation,
1091  					vport->fc_flag);
1092  
1093  			lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
1094  				"GID_FT rsp err2  cmd:x%x rsn:x%x exp:x%x",
1095  				be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp),
1096  				(uint32_t) CTrsp->ReasonCode,
1097  				(uint32_t) CTrsp->Explanation);
1098  		}
1099  		if (vport->gidft_inp)
1100  			vport->gidft_inp--;
1101  	}
1102  
1103  	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1104  			 "4216 GID_FT cmpl inp %d disc %d\n",
1105  			 vport->gidft_inp, vport->num_disc_nodes);
1106  
1107  	/* Link up / RSCN discovery */
1108  	if ((vport->num_disc_nodes == 0) &&
1109  	    (vport->gidft_inp == 0)) {
1110  		/*
1111  		 * The driver has cycled through all Nports in the RSCN payload.
1112  		 * Complete the handling by cleaning up and marking the
1113  		 * current driver state.
1114  		 */
1115  		if (vport->port_state >= LPFC_DISC_AUTH) {
1116  			if (vport->fc_flag & FC_RSCN_MODE) {
1117  				lpfc_els_flush_rscn(vport);
1118  				spin_lock_irq(shost->host_lock);
1119  				vport->fc_flag |= FC_RSCN_MODE; /* RSCN still */
1120  				spin_unlock_irq(shost->host_lock);
1121  			}
1122  			else
1123  				lpfc_els_flush_rscn(vport);
1124  		}
1125  
1126  		lpfc_disc_start(vport);
1127  	}
1128  out:
1129  	lpfc_ct_free_iocb(phba, cmdiocb);
1130  	lpfc_nlp_put(ndlp);
1131  	return;
1132  }
1133  
1134  static void
lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)1135  lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1136  			struct lpfc_iocbq *rspiocb)
1137  {
1138  	struct lpfc_vport *vport = cmdiocb->vport;
1139  	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1140  	struct lpfc_dmabuf *outp;
1141  	struct lpfc_dmabuf *inp;
1142  	struct lpfc_sli_ct_request *CTrsp;
1143  	struct lpfc_sli_ct_request *CTreq;
1144  	struct lpfc_nodelist *ndlp;
1145  	u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
1146  	u32 ulp_word4 = get_job_word4(phba, rspiocb);
1147  	int rc;
1148  
1149  	/* First save ndlp, before we overwrite it */
1150  	ndlp = cmdiocb->ndlp;
1151  
1152  	/* we pass cmdiocb to state machine which needs rspiocb as well */
1153  	cmdiocb->rsp_iocb = rspiocb;
1154  	inp = cmdiocb->cmd_dmabuf;
1155  	outp = cmdiocb->rsp_dmabuf;
1156  
1157  	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
1158  			      "GID_PT cmpl:     status:x%x/x%x rtry:%d",
1159  			      ulp_status, ulp_word4,
1160  			      vport->fc_ns_retry);
1161  
1162  	/* Ignore response if link flipped after this request was made */
1163  	if (cmdiocb->event_tag != phba->fc_eventTag) {
1164  		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1165  				 "9044 Event tag mismatch. Ignoring NS rsp\n");
1166  		goto out;
1167  	}
1168  
1169  	/* Don't bother processing response if vport is being torn down. */
1170  	if (vport->load_flag & FC_UNLOADING) {
1171  		if (vport->fc_flag & FC_RSCN_MODE)
1172  			lpfc_els_flush_rscn(vport);
1173  		goto out;
1174  	}
1175  
1176  	if (lpfc_els_chk_latt(vport)) {
1177  		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1178  				 "4108 Link event during NS query\n");
1179  		if (vport->fc_flag & FC_RSCN_MODE)
1180  			lpfc_els_flush_rscn(vport);
1181  		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1182  		goto out;
1183  	}
1184  	if (lpfc_error_lost_link(vport, ulp_status, ulp_word4)) {
1185  		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1186  				 "4166 NS query failed due to link event: "
1187  				 "ulp_status x%x ulp_word4 x%x fc_flag x%x "
1188  				 "port_state x%x gidft_inp x%x\n",
1189  				 ulp_status, ulp_word4, vport->fc_flag,
1190  				 vport->port_state, vport->gidft_inp);
1191  		if (vport->fc_flag & FC_RSCN_MODE)
1192  			lpfc_els_flush_rscn(vport);
1193  		if (vport->gidft_inp)
1194  			vport->gidft_inp--;
1195  		goto out;
1196  	}
1197  
1198  	spin_lock_irq(shost->host_lock);
1199  	if (vport->fc_flag & FC_RSCN_DEFERRED) {
1200  		vport->fc_flag &= ~FC_RSCN_DEFERRED;
1201  		spin_unlock_irq(shost->host_lock);
1202  
1203  		/* This is a GID_PT completing so the gidft_inp counter was
1204  		 * incremented before the GID_PT was issued to the wire.
1205  		 */
1206  		if (vport->gidft_inp)
1207  			vport->gidft_inp--;
1208  
1209  		/*
1210  		 * Skip processing the NS response
1211  		 * Re-issue the NS cmd
1212  		 */
1213  		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1214  				 "4167 Process Deferred RSCN Data: x%x x%x\n",
1215  				 vport->fc_flag, vport->fc_rscn_id_cnt);
1216  		lpfc_els_handle_rscn(vport);
1217  
1218  		goto out;
1219  	}
1220  	spin_unlock_irq(shost->host_lock);
1221  
1222  	if (ulp_status) {
1223  		/* Check for retry */
1224  		if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
1225  			if (ulp_status != IOSTAT_LOCAL_REJECT ||
1226  			    (ulp_word4 & IOERR_PARAM_MASK) !=
1227  			    IOERR_NO_RESOURCES)
1228  				vport->fc_ns_retry++;
1229  
1230  			/* CT command is being retried */
1231  			rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_PT,
1232  					 vport->fc_ns_retry, GID_PT_N_PORT);
1233  			if (rc == 0)
1234  				goto out;
1235  			else { /* Unable to send NS cmd */
1236  				if (vport->gidft_inp)
1237  					vport->gidft_inp--;
1238  			}
1239  		}
1240  		if (vport->fc_flag & FC_RSCN_MODE)
1241  			lpfc_els_flush_rscn(vport);
1242  		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1243  		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1244  				 "4103 GID_FT Query error: 0x%x 0x%x\n",
1245  				 ulp_status, vport->fc_ns_retry);
1246  	} else {
1247  		/* Good status, continue checking */
1248  		CTreq = (struct lpfc_sli_ct_request *)inp->virt;
1249  		CTrsp = (struct lpfc_sli_ct_request *)outp->virt;
1250  		if (be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp) ==
1251  		    SLI_CT_RESPONSE_FS_ACC) {
1252  			lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1253  					 "4105 NameServer Rsp Data: x%x x%x "
1254  					 "x%x x%x sz x%x\n",
1255  					 vport->fc_flag,
1256  					 CTreq->un.gid.Fc4Type,
1257  					 vport->num_disc_nodes,
1258  					 vport->gidft_inp,
1259  					 get_job_data_placed(phba, rspiocb));
1260  
1261  			lpfc_ns_rsp(vport,
1262  				    outp,
1263  				    CTreq->un.gid.Fc4Type,
1264  				    get_job_data_placed(phba, rspiocb));
1265  		} else if (be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp) ==
1266  			   SLI_CT_RESPONSE_FS_RJT) {
1267  			/* NameServer Rsp Error */
1268  			if ((CTrsp->ReasonCode == SLI_CT_UNABLE_TO_PERFORM_REQ)
1269  			    && (CTrsp->Explanation == SLI_CT_NO_FC4_TYPES)) {
1270  				lpfc_printf_vlog(
1271  					vport, KERN_INFO, LOG_DISCOVERY,
1272  					"4106 No NameServer Entries "
1273  					"Data: x%x x%x x%x x%x\n",
1274  					be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp),
1275  					(uint32_t)CTrsp->ReasonCode,
1276  					(uint32_t)CTrsp->Explanation,
1277  					vport->fc_flag);
1278  
1279  				lpfc_debugfs_disc_trc(
1280  				vport, LPFC_DISC_TRC_CT,
1281  				"GID_PT no entry  cmd:x%x rsn:x%x exp:x%x",
1282  				be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp),
1283  				(uint32_t)CTrsp->ReasonCode,
1284  				(uint32_t)CTrsp->Explanation);
1285  			} else {
1286  				lpfc_printf_vlog(
1287  					vport, KERN_INFO, LOG_DISCOVERY,
1288  					"4107 NameServer Rsp Error "
1289  					"Data: x%x x%x x%x x%x\n",
1290  					be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp),
1291  					(uint32_t)CTrsp->ReasonCode,
1292  					(uint32_t)CTrsp->Explanation,
1293  					vport->fc_flag);
1294  
1295  				lpfc_debugfs_disc_trc(
1296  				vport, LPFC_DISC_TRC_CT,
1297  				"GID_PT rsp err1  cmd:x%x rsn:x%x exp:x%x",
1298  				be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp),
1299  				(uint32_t)CTrsp->ReasonCode,
1300  				(uint32_t)CTrsp->Explanation);
1301  			}
1302  		} else {
1303  			/* NameServer Rsp Error */
1304  			lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1305  					 "4109 NameServer Rsp Error "
1306  					 "Data: x%x x%x x%x x%x\n",
1307  					 be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp),
1308  					 (uint32_t)CTrsp->ReasonCode,
1309  					 (uint32_t)CTrsp->Explanation,
1310  					 vport->fc_flag);
1311  
1312  			lpfc_debugfs_disc_trc(
1313  				vport, LPFC_DISC_TRC_CT,
1314  				"GID_PT rsp err2  cmd:x%x rsn:x%x exp:x%x",
1315  				be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp),
1316  				(uint32_t)CTrsp->ReasonCode,
1317  				(uint32_t)CTrsp->Explanation);
1318  		}
1319  		if (vport->gidft_inp)
1320  			vport->gidft_inp--;
1321  	}
1322  
1323  	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1324  			 "6450 GID_PT cmpl inp %d disc %d\n",
1325  			 vport->gidft_inp, vport->num_disc_nodes);
1326  
1327  	/* Link up / RSCN discovery */
1328  	if ((vport->num_disc_nodes == 0) &&
1329  	    (vport->gidft_inp == 0)) {
1330  		/*
1331  		 * The driver has cycled through all Nports in the RSCN payload.
1332  		 * Complete the handling by cleaning up and marking the
1333  		 * current driver state.
1334  		 */
1335  		if (vport->port_state >= LPFC_DISC_AUTH) {
1336  			if (vport->fc_flag & FC_RSCN_MODE) {
1337  				lpfc_els_flush_rscn(vport);
1338  				spin_lock_irq(shost->host_lock);
1339  				vport->fc_flag |= FC_RSCN_MODE; /* RSCN still */
1340  				spin_unlock_irq(shost->host_lock);
1341  			} else {
1342  				lpfc_els_flush_rscn(vport);
1343  			}
1344  		}
1345  
1346  		lpfc_disc_start(vport);
1347  	}
1348  out:
1349  	lpfc_ct_free_iocb(phba, cmdiocb);
1350  	lpfc_nlp_put(ndlp);
1351  }
1352  
1353  static void
lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)1354  lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1355  			struct lpfc_iocbq *rspiocb)
1356  {
1357  	struct lpfc_vport *vport = cmdiocb->vport;
1358  	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1359  	struct lpfc_dmabuf *inp = cmdiocb->cmd_dmabuf;
1360  	struct lpfc_dmabuf *outp = cmdiocb->rsp_dmabuf;
1361  	struct lpfc_sli_ct_request *CTrsp;
1362  	int did, rc, retry;
1363  	uint8_t fbits;
1364  	struct lpfc_nodelist *ndlp = NULL, *free_ndlp = NULL;
1365  	u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
1366  	u32 ulp_word4 = get_job_word4(phba, rspiocb);
1367  
1368  	did = ((struct lpfc_sli_ct_request *) inp->virt)->un.gff.PortId;
1369  	did = be32_to_cpu(did);
1370  
1371  	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
1372  		"GFF_ID cmpl:     status:x%x/x%x did:x%x",
1373  		ulp_status, ulp_word4, did);
1374  
1375  	/* Ignore response if link flipped after this request was made */
1376  	if (cmdiocb->event_tag != phba->fc_eventTag) {
1377  		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1378  				 "9045 Event tag mismatch. Ignoring NS rsp\n");
1379  		goto iocb_free;
1380  	}
1381  
1382  	if (ulp_status == IOSTAT_SUCCESS) {
1383  		/* Good status, continue checking */
1384  		CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
1385  		fbits = CTrsp->un.gff_acc.fbits[FCP_TYPE_FEATURE_OFFSET];
1386  
1387  		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1388  				 "6431 Process GFF_ID rsp for %08x "
1389  				 "fbits %02x %s %s\n",
1390  				 did, fbits,
1391  				 (fbits & FC4_FEATURE_INIT) ? "Initiator" : " ",
1392  				 (fbits & FC4_FEATURE_TARGET) ? "Target" : " ");
1393  
1394  		if (be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp) ==
1395  		    SLI_CT_RESPONSE_FS_ACC) {
1396  			if ((fbits & FC4_FEATURE_INIT) &&
1397  			    !(fbits & FC4_FEATURE_TARGET)) {
1398  				lpfc_printf_vlog(vport, KERN_INFO,
1399  						 LOG_DISCOVERY,
1400  						 "0270 Skip x%x GFF "
1401  						 "NameServer Rsp Data: (init) "
1402  						 "x%x x%x\n", did, fbits,
1403  						 vport->fc_rscn_id_cnt);
1404  				goto out;
1405  			}
1406  		}
1407  	}
1408  	else {
1409  		/* Check for retry */
1410  		if (cmdiocb->retry < LPFC_MAX_NS_RETRY) {
1411  			retry = 1;
1412  			if (ulp_status == IOSTAT_LOCAL_REJECT) {
1413  				switch ((ulp_word4 &
1414  					IOERR_PARAM_MASK)) {
1415  
1416  				case IOERR_NO_RESOURCES:
1417  					/* We don't increment the retry
1418  					 * count for this case.
1419  					 */
1420  					break;
1421  				case IOERR_LINK_DOWN:
1422  				case IOERR_SLI_ABORTED:
1423  				case IOERR_SLI_DOWN:
1424  					retry = 0;
1425  					break;
1426  				default:
1427  					cmdiocb->retry++;
1428  				}
1429  			}
1430  			else
1431  				cmdiocb->retry++;
1432  
1433  			if (retry) {
1434  				/* CT command is being retried */
1435  				rc = lpfc_ns_cmd(vport, SLI_CTNS_GFF_ID,
1436  					 cmdiocb->retry, did);
1437  				if (rc == 0) {
1438  					/* success */
1439  					free_ndlp = cmdiocb->ndlp;
1440  					lpfc_ct_free_iocb(phba, cmdiocb);
1441  					lpfc_nlp_put(free_ndlp);
1442  					return;
1443  				}
1444  			}
1445  		}
1446  		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1447  				 "0267 NameServer GFF Rsp "
1448  				 "x%x Error (%d %d) Data: x%x x%x\n",
1449  				 did, ulp_status, ulp_word4,
1450  				 vport->fc_flag, vport->fc_rscn_id_cnt);
1451  	}
1452  
1453  	/* This is a target port, unregistered port, or the GFF_ID failed */
1454  	ndlp = lpfc_setup_disc_node(vport, did);
1455  	if (ndlp) {
1456  		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1457  				 "0242 Process x%x GFF "
1458  				 "NameServer Rsp Data: x%x x%x x%x\n",
1459  				 did, ndlp->nlp_flag, vport->fc_flag,
1460  				 vport->fc_rscn_id_cnt);
1461  	} else {
1462  		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1463  				 "0243 Skip x%x GFF "
1464  				 "NameServer Rsp Data: x%x x%x\n", did,
1465  				 vport->fc_flag, vport->fc_rscn_id_cnt);
1466  	}
1467  out:
1468  	/* Link up / RSCN discovery */
1469  	if (vport->num_disc_nodes)
1470  		vport->num_disc_nodes--;
1471  
1472  	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1473  			 "6451 GFF_ID cmpl inp %d disc %d\n",
1474  			 vport->gidft_inp, vport->num_disc_nodes);
1475  
1476  	if (vport->num_disc_nodes == 0) {
1477  		/*
1478  		 * The driver has cycled through all Nports in the RSCN payload.
1479  		 * Complete the handling by cleaning up and marking the
1480  		 * current driver state.
1481  		 */
1482  		if (vport->port_state >= LPFC_DISC_AUTH) {
1483  			if (vport->fc_flag & FC_RSCN_MODE) {
1484  				lpfc_els_flush_rscn(vport);
1485  				spin_lock_irq(shost->host_lock);
1486  				vport->fc_flag |= FC_RSCN_MODE; /* RSCN still */
1487  				spin_unlock_irq(shost->host_lock);
1488  			}
1489  			else
1490  				lpfc_els_flush_rscn(vport);
1491  		}
1492  		lpfc_disc_start(vport);
1493  	}
1494  
1495  iocb_free:
1496  	free_ndlp = cmdiocb->ndlp;
1497  	lpfc_ct_free_iocb(phba, cmdiocb);
1498  	lpfc_nlp_put(free_ndlp);
1499  	return;
1500  }
1501  
1502  static void
lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)1503  lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1504  			struct lpfc_iocbq *rspiocb)
1505  {
1506  	struct lpfc_vport *vport = cmdiocb->vport;
1507  	struct lpfc_dmabuf *inp = cmdiocb->cmd_dmabuf;
1508  	struct lpfc_dmabuf *outp = cmdiocb->rsp_dmabuf;
1509  	struct lpfc_sli_ct_request *CTrsp;
1510  	int did;
1511  	struct lpfc_nodelist *ndlp = NULL;
1512  	struct lpfc_nodelist *ns_ndlp = cmdiocb->ndlp;
1513  	uint32_t fc4_data_0, fc4_data_1;
1514  	u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
1515  	u32 ulp_word4 = get_job_word4(phba, rspiocb);
1516  
1517  	did = ((struct lpfc_sli_ct_request *)inp->virt)->un.gft.PortId;
1518  	did = be32_to_cpu(did);
1519  
1520  	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
1521  			      "GFT_ID cmpl: status:x%x/x%x did:x%x",
1522  			      ulp_status, ulp_word4, did);
1523  
1524  	/* Ignore response if link flipped after this request was made */
1525  	if ((uint32_t)cmdiocb->event_tag != phba->fc_eventTag) {
1526  		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1527  				 "9046 Event tag mismatch. Ignoring NS rsp\n");
1528  		goto out;
1529  	}
1530  
1531  	if (ulp_status == IOSTAT_SUCCESS) {
1532  		/* Good status, continue checking */
1533  		CTrsp = (struct lpfc_sli_ct_request *)outp->virt;
1534  		fc4_data_0 = be32_to_cpu(CTrsp->un.gft_acc.fc4_types[0]);
1535  		fc4_data_1 = be32_to_cpu(CTrsp->un.gft_acc.fc4_types[1]);
1536  
1537  		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1538  				 "6432 Process GFT_ID rsp for %08x "
1539  				 "Data %08x %08x %s %s\n",
1540  				 did, fc4_data_0, fc4_data_1,
1541  				 (fc4_data_0 & LPFC_FC4_TYPE_BITMASK) ?
1542  				  "FCP" : " ",
1543  				 (fc4_data_1 & LPFC_FC4_TYPE_BITMASK) ?
1544  				  "NVME" : " ");
1545  
1546  		/* Lookup the NPort_ID queried in the GFT_ID and find the
1547  		 * driver's local node.  It's an error if the driver
1548  		 * doesn't have one.
1549  		 */
1550  		ndlp = lpfc_findnode_did(vport, did);
1551  		if (ndlp) {
1552  			/* The bitmask value for FCP and NVME FCP types is
1553  			 * the same because they are 32 bits distant from
1554  			 * each other in word0 and word0.
1555  			 */
1556  			if (fc4_data_0 & LPFC_FC4_TYPE_BITMASK)
1557  				ndlp->nlp_fc4_type |= NLP_FC4_FCP;
1558  			if (fc4_data_1 &  LPFC_FC4_TYPE_BITMASK)
1559  				ndlp->nlp_fc4_type |= NLP_FC4_NVME;
1560  			lpfc_printf_vlog(vport, KERN_INFO,
1561  					 LOG_DISCOVERY | LOG_NODE,
1562  					 "3064 Setting ndlp x%px, DID x%06x "
1563  					 "with FC4 x%08x, Data: x%08x x%08x "
1564  					 "%d\n",
1565  					 ndlp, did, ndlp->nlp_fc4_type,
1566  					 FC_TYPE_FCP, FC_TYPE_NVME,
1567  					 ndlp->nlp_state);
1568  
1569  			if (ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE &&
1570  			    ndlp->nlp_fc4_type) {
1571  				ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1572  				/* This is a fabric topology so if discovery
1573  				 * started with an unsolicited PLOGI, don't
1574  				 * send a PRLI.  Targets don't issue PLOGI or
1575  				 * PRLI when acting as a target. Likely this is
1576  				 * an initiator function.
1577  				 */
1578  				if (!(ndlp->nlp_flag & NLP_RCV_PLOGI)) {
1579  					lpfc_nlp_set_state(vport, ndlp,
1580  							   NLP_STE_PRLI_ISSUE);
1581  					lpfc_issue_els_prli(vport, ndlp, 0);
1582  				}
1583  			} else if (!ndlp->nlp_fc4_type) {
1584  				/* If fc4 type is still unknown, then LOGO */
1585  				lpfc_printf_vlog(vport, KERN_INFO,
1586  						 LOG_DISCOVERY | LOG_NODE,
1587  						 "6443 Sending LOGO ndlp x%px,"
1588  						 "DID x%06x with fc4_type: "
1589  						 "x%08x, state: %d\n",
1590  						 ndlp, did, ndlp->nlp_fc4_type,
1591  						 ndlp->nlp_state);
1592  				lpfc_issue_els_logo(vport, ndlp, 0);
1593  				ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1594  				lpfc_nlp_set_state(vport, ndlp,
1595  						   NLP_STE_NPR_NODE);
1596  			}
1597  		}
1598  	} else
1599  		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1600  				 "3065 GFT_ID failed x%08x\n", ulp_status);
1601  
1602  out:
1603  	lpfc_ct_free_iocb(phba, cmdiocb);
1604  	lpfc_nlp_put(ns_ndlp);
1605  }
1606  
1607  static void
lpfc_cmpl_ct(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)1608  lpfc_cmpl_ct(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1609  	     struct lpfc_iocbq *rspiocb)
1610  {
1611  	struct lpfc_vport *vport = cmdiocb->vport;
1612  	struct lpfc_dmabuf *inp;
1613  	struct lpfc_dmabuf *outp;
1614  	struct lpfc_sli_ct_request *CTrsp;
1615  	struct lpfc_nodelist *ndlp;
1616  	int cmdcode, rc;
1617  	uint8_t retry;
1618  	uint32_t latt;
1619  	u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
1620  	u32 ulp_word4 = get_job_word4(phba, rspiocb);
1621  
1622  	/* First save ndlp, before we overwrite it */
1623  	ndlp = cmdiocb->ndlp;
1624  
1625  	/* we pass cmdiocb to state machine which needs rspiocb as well */
1626  	cmdiocb->rsp_iocb = rspiocb;
1627  
1628  	inp = cmdiocb->cmd_dmabuf;
1629  	outp = cmdiocb->rsp_dmabuf;
1630  
1631  	cmdcode = be16_to_cpu(((struct lpfc_sli_ct_request *) inp->virt)->
1632  					CommandResponse.bits.CmdRsp);
1633  	CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
1634  
1635  	latt = lpfc_els_chk_latt(vport);
1636  
1637  	/* RFT request completes status <ulp_status> CmdRsp <CmdRsp> */
1638  	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1639  			 "0209 CT Request completes, latt %d, "
1640  			 "ulp_status x%x CmdRsp x%x, Context x%x, Tag x%x\n",
1641  			 latt, ulp_status,
1642  			 be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp),
1643  			 get_job_ulpcontext(phba, cmdiocb), cmdiocb->iotag);
1644  
1645  	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
1646  		"CT cmd cmpl:     status:x%x/x%x cmd:x%x",
1647  		ulp_status, ulp_word4, cmdcode);
1648  
1649  	if (ulp_status) {
1650  		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1651  				 "0268 NS cmd x%x Error (x%x x%x)\n",
1652  				 cmdcode, ulp_status, ulp_word4);
1653  
1654  		if (ulp_status == IOSTAT_LOCAL_REJECT &&
1655  		    (((ulp_word4 & IOERR_PARAM_MASK) ==
1656  		      IOERR_SLI_DOWN) ||
1657  		     ((ulp_word4 & IOERR_PARAM_MASK) ==
1658  		      IOERR_SLI_ABORTED)))
1659  			goto out;
1660  
1661  		retry = cmdiocb->retry;
1662  		if (retry >= LPFC_MAX_NS_RETRY)
1663  			goto out;
1664  
1665  		retry++;
1666  		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1667  				 "0250 Retrying NS cmd %x\n", cmdcode);
1668  		rc = lpfc_ns_cmd(vport, cmdcode, retry, 0);
1669  		if (rc == 0)
1670  			goto out;
1671  	}
1672  
1673  out:
1674  	/* If the caller wanted a synchronous DA_ID completion, signal the
1675  	 * wait obj and clear flag to reset the vport.
1676  	 */
1677  	if (ndlp->save_flags & NLP_WAIT_FOR_DA_ID) {
1678  		if (ndlp->da_id_waitq)
1679  			wake_up(ndlp->da_id_waitq);
1680  	}
1681  
1682  	spin_lock_irq(&ndlp->lock);
1683  	ndlp->save_flags &= ~NLP_WAIT_FOR_DA_ID;
1684  	spin_unlock_irq(&ndlp->lock);
1685  
1686  	lpfc_ct_free_iocb(phba, cmdiocb);
1687  	lpfc_nlp_put(ndlp);
1688  	return;
1689  }
1690  
1691  static void
lpfc_cmpl_ct_cmd_rft_id(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)1692  lpfc_cmpl_ct_cmd_rft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1693  			struct lpfc_iocbq *rspiocb)
1694  {
1695  	struct lpfc_vport *vport = cmdiocb->vport;
1696  	u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
1697  
1698  	if (ulp_status == IOSTAT_SUCCESS) {
1699  		struct lpfc_dmabuf *outp;
1700  		struct lpfc_sli_ct_request *CTrsp;
1701  
1702  		outp = cmdiocb->rsp_dmabuf;
1703  		CTrsp = (struct lpfc_sli_ct_request *)outp->virt;
1704  		if (be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp) ==
1705  		    SLI_CT_RESPONSE_FS_ACC)
1706  			vport->ct_flags |= FC_CT_RFT_ID;
1707  	}
1708  	lpfc_cmpl_ct(phba, cmdiocb, rspiocb);
1709  	return;
1710  }
1711  
1712  static void
lpfc_cmpl_ct_cmd_rnn_id(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)1713  lpfc_cmpl_ct_cmd_rnn_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1714  			struct lpfc_iocbq *rspiocb)
1715  {
1716  	struct lpfc_vport *vport = cmdiocb->vport;
1717  	u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
1718  
1719  	if (ulp_status == IOSTAT_SUCCESS) {
1720  		struct lpfc_dmabuf *outp;
1721  		struct lpfc_sli_ct_request *CTrsp;
1722  
1723  		outp = cmdiocb->rsp_dmabuf;
1724  		CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
1725  		if (be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp) ==
1726  		    SLI_CT_RESPONSE_FS_ACC)
1727  			vport->ct_flags |= FC_CT_RNN_ID;
1728  	}
1729  	lpfc_cmpl_ct(phba, cmdiocb, rspiocb);
1730  	return;
1731  }
1732  
1733  static void
lpfc_cmpl_ct_cmd_rspn_id(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)1734  lpfc_cmpl_ct_cmd_rspn_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1735  			 struct lpfc_iocbq *rspiocb)
1736  {
1737  	struct lpfc_vport *vport = cmdiocb->vport;
1738  	u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
1739  
1740  	if (ulp_status == IOSTAT_SUCCESS) {
1741  		struct lpfc_dmabuf *outp;
1742  		struct lpfc_sli_ct_request *CTrsp;
1743  
1744  		outp = cmdiocb->rsp_dmabuf;
1745  		CTrsp = (struct lpfc_sli_ct_request *)outp->virt;
1746  		if (be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp) ==
1747  		    SLI_CT_RESPONSE_FS_ACC)
1748  			vport->ct_flags |= FC_CT_RSPN_ID;
1749  	}
1750  	lpfc_cmpl_ct(phba, cmdiocb, rspiocb);
1751  	return;
1752  }
1753  
1754  static void
lpfc_cmpl_ct_cmd_rsnn_nn(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)1755  lpfc_cmpl_ct_cmd_rsnn_nn(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1756  			 struct lpfc_iocbq *rspiocb)
1757  {
1758  	struct lpfc_vport *vport = cmdiocb->vport;
1759  	u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
1760  
1761  	if (ulp_status == IOSTAT_SUCCESS) {
1762  		struct lpfc_dmabuf *outp;
1763  		struct lpfc_sli_ct_request *CTrsp;
1764  
1765  		outp = cmdiocb->rsp_dmabuf;
1766  		CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
1767  		if (be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp) ==
1768  		    SLI_CT_RESPONSE_FS_ACC)
1769  			vport->ct_flags |= FC_CT_RSNN_NN;
1770  	}
1771  	lpfc_cmpl_ct(phba, cmdiocb, rspiocb);
1772  	return;
1773  }
1774  
1775  static void
lpfc_cmpl_ct_cmd_da_id(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)1776  lpfc_cmpl_ct_cmd_da_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1777   struct lpfc_iocbq *rspiocb)
1778  {
1779  	struct lpfc_vport *vport = cmdiocb->vport;
1780  
1781  	/* even if it fails we will act as though it succeeded. */
1782  	vport->ct_flags = 0;
1783  	lpfc_cmpl_ct(phba, cmdiocb, rspiocb);
1784  	return;
1785  }
1786  
1787  static void
lpfc_cmpl_ct_cmd_rff_id(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)1788  lpfc_cmpl_ct_cmd_rff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1789  			struct lpfc_iocbq *rspiocb)
1790  {
1791  	struct lpfc_vport *vport = cmdiocb->vport;
1792  	u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
1793  
1794  	if (ulp_status == IOSTAT_SUCCESS) {
1795  		struct lpfc_dmabuf *outp;
1796  		struct lpfc_sli_ct_request *CTrsp;
1797  
1798  		outp = cmdiocb->rsp_dmabuf;
1799  		CTrsp = (struct lpfc_sli_ct_request *)outp->virt;
1800  		if (be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp) ==
1801  		    SLI_CT_RESPONSE_FS_ACC)
1802  			vport->ct_flags |= FC_CT_RFF_ID;
1803  	}
1804  	lpfc_cmpl_ct(phba, cmdiocb, rspiocb);
1805  	return;
1806  }
1807  
1808  /*
1809   * Although the symbolic port name is thought to be an integer
1810   * as of January 18, 2016, leave it as a string until more of
1811   * the record state becomes defined.
1812   */
1813  int
lpfc_vport_symbolic_port_name(struct lpfc_vport * vport,char * symbol,size_t size)1814  lpfc_vport_symbolic_port_name(struct lpfc_vport *vport, char *symbol,
1815  	size_t size)
1816  {
1817  	int n;
1818  
1819  	/*
1820  	 * Use the lpfc board number as the Symbolic Port
1821  	 * Name object.  NPIV is not in play so this integer
1822  	 * value is sufficient and unique per FC-ID.
1823  	 */
1824  	n = scnprintf(symbol, size, "%d", vport->phba->brd_no);
1825  	return n;
1826  }
1827  
1828  
1829  int
lpfc_vport_symbolic_node_name(struct lpfc_vport * vport,char * symbol,size_t size)1830  lpfc_vport_symbolic_node_name(struct lpfc_vport *vport, char *symbol,
1831  	size_t size)
1832  {
1833  	char fwrev[FW_REV_STR_SIZE] = {0};
1834  	char tmp[MAXHOSTNAMELEN] = {0};
1835  
1836  	memset(symbol, 0, size);
1837  
1838  	scnprintf(tmp, sizeof(tmp), "Emulex %s", vport->phba->ModelName);
1839  	if (strlcat(symbol, tmp, size) >= size)
1840  		goto buffer_done;
1841  
1842  	lpfc_decode_firmware_rev(vport->phba, fwrev, 0);
1843  	scnprintf(tmp, sizeof(tmp), " FV%s", fwrev);
1844  	if (strlcat(symbol, tmp, size) >= size)
1845  		goto buffer_done;
1846  
1847  	scnprintf(tmp, sizeof(tmp), " DV%s", lpfc_release_version);
1848  	if (strlcat(symbol, tmp, size) >= size)
1849  		goto buffer_done;
1850  
1851  	scnprintf(tmp, sizeof(tmp), " HN:%s", vport->phba->os_host_name);
1852  	if (strlcat(symbol, tmp, size) >= size)
1853  		goto buffer_done;
1854  
1855  	/* Note :- OS name is "Linux" */
1856  	scnprintf(tmp, sizeof(tmp), " OS:%s", init_utsname()->sysname);
1857  	strlcat(symbol, tmp, size);
1858  
1859  buffer_done:
1860  	return strnlen(symbol, size);
1861  
1862  }
1863  
1864  static uint32_t
lpfc_find_map_node(struct lpfc_vport * vport)1865  lpfc_find_map_node(struct lpfc_vport *vport)
1866  {
1867  	struct lpfc_nodelist *ndlp, *next_ndlp;
1868  	struct Scsi_Host  *shost;
1869  	uint32_t cnt = 0;
1870  
1871  	shost = lpfc_shost_from_vport(vport);
1872  	spin_lock_irq(shost->host_lock);
1873  	list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
1874  		if (ndlp->nlp_type & NLP_FABRIC)
1875  			continue;
1876  		if ((ndlp->nlp_state == NLP_STE_MAPPED_NODE) ||
1877  		    (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE))
1878  			cnt++;
1879  	}
1880  	spin_unlock_irq(shost->host_lock);
1881  	return cnt;
1882  }
1883  
1884  /*
1885   * This routine will return the FC4 Type associated with the CT
1886   * GID_FT command.
1887   */
1888  int
lpfc_get_gidft_type(struct lpfc_vport * vport,struct lpfc_iocbq * cmdiocb)1889  lpfc_get_gidft_type(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb)
1890  {
1891  	struct lpfc_sli_ct_request *CtReq;
1892  	struct lpfc_dmabuf *mp;
1893  	uint32_t type;
1894  
1895  	mp = cmdiocb->cmd_dmabuf;
1896  	if (mp == NULL)
1897  		return 0;
1898  	CtReq = (struct lpfc_sli_ct_request *)mp->virt;
1899  	type = (uint32_t)CtReq->un.gid.Fc4Type;
1900  	if ((type != SLI_CTPT_FCP) && (type != SLI_CTPT_NVME))
1901  		return 0;
1902  	return type;
1903  }
1904  
1905  /*
1906   * lpfc_ns_cmd
1907   * Description:
1908   *    Issue Cmd to NameServer
1909   *       SLI_CTNS_GID_FT
1910   *       LI_CTNS_RFT_ID
1911   */
1912  int
lpfc_ns_cmd(struct lpfc_vport * vport,int cmdcode,uint8_t retry,uint32_t context)1913  lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
1914  	    uint8_t retry, uint32_t context)
1915  {
1916  	struct lpfc_nodelist * ndlp;
1917  	struct lpfc_hba *phba = vport->phba;
1918  	struct lpfc_dmabuf *mp, *bmp;
1919  	struct lpfc_sli_ct_request *CtReq;
1920  	struct ulp_bde64 *bpl;
1921  	void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
1922  		      struct lpfc_iocbq *) = NULL;
1923  	uint32_t *ptr;
1924  	uint32_t rsp_size = 1024;
1925  	size_t   size;
1926  	int rc = 0;
1927  
1928  	ndlp = lpfc_findnode_did(vport, NameServer_DID);
1929  	if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) {
1930  		rc=1;
1931  		goto ns_cmd_exit;
1932  	}
1933  
1934  	/* fill in BDEs for command */
1935  	/* Allocate buffer for command payload */
1936  	mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1937  	if (!mp) {
1938  		rc=2;
1939  		goto ns_cmd_exit;
1940  	}
1941  
1942  	INIT_LIST_HEAD(&mp->list);
1943  	mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(mp->phys));
1944  	if (!mp->virt) {
1945  		rc=3;
1946  		goto ns_cmd_free_mp;
1947  	}
1948  
1949  	/* Allocate buffer for Buffer ptr list */
1950  	bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1951  	if (!bmp) {
1952  		rc=4;
1953  		goto ns_cmd_free_mpvirt;
1954  	}
1955  
1956  	INIT_LIST_HEAD(&bmp->list);
1957  	bmp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(bmp->phys));
1958  	if (!bmp->virt) {
1959  		rc=5;
1960  		goto ns_cmd_free_bmp;
1961  	}
1962  
1963  	/* NameServer Req */
1964  	lpfc_printf_vlog(vport, KERN_INFO ,LOG_DISCOVERY,
1965  			 "0236 NameServer Req Data: x%x x%x x%x x%x\n",
1966  			 cmdcode, vport->fc_flag, vport->fc_rscn_id_cnt,
1967  			 context);
1968  
1969  	bpl = (struct ulp_bde64 *) bmp->virt;
1970  	memset(bpl, 0, sizeof(struct ulp_bde64));
1971  	bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys) );
1972  	bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys) );
1973  	bpl->tus.f.bdeFlags = 0;
1974  	if (cmdcode == SLI_CTNS_GID_FT)
1975  		bpl->tus.f.bdeSize = GID_REQUEST_SZ;
1976  	else if (cmdcode == SLI_CTNS_GID_PT)
1977  		bpl->tus.f.bdeSize = GID_REQUEST_SZ;
1978  	else if (cmdcode == SLI_CTNS_GFF_ID)
1979  		bpl->tus.f.bdeSize = GFF_REQUEST_SZ;
1980  	else if (cmdcode == SLI_CTNS_GFT_ID)
1981  		bpl->tus.f.bdeSize = GFT_REQUEST_SZ;
1982  	else if (cmdcode == SLI_CTNS_RFT_ID)
1983  		bpl->tus.f.bdeSize = RFT_REQUEST_SZ;
1984  	else if (cmdcode == SLI_CTNS_RNN_ID)
1985  		bpl->tus.f.bdeSize = RNN_REQUEST_SZ;
1986  	else if (cmdcode == SLI_CTNS_RSPN_ID)
1987  		bpl->tus.f.bdeSize = RSPN_REQUEST_SZ;
1988  	else if (cmdcode == SLI_CTNS_RSNN_NN)
1989  		bpl->tus.f.bdeSize = RSNN_REQUEST_SZ;
1990  	else if (cmdcode == SLI_CTNS_DA_ID)
1991  		bpl->tus.f.bdeSize = DA_ID_REQUEST_SZ;
1992  	else if (cmdcode == SLI_CTNS_RFF_ID)
1993  		bpl->tus.f.bdeSize = RFF_REQUEST_SZ;
1994  	else
1995  		bpl->tus.f.bdeSize = 0;
1996  	bpl->tus.w = le32_to_cpu(bpl->tus.w);
1997  
1998  	CtReq = (struct lpfc_sli_ct_request *) mp->virt;
1999  	memset(CtReq, 0, sizeof(struct lpfc_sli_ct_request));
2000  	CtReq->RevisionId.bits.Revision = SLI_CT_REVISION;
2001  	CtReq->RevisionId.bits.InId = 0;
2002  	CtReq->FsType = SLI_CT_DIRECTORY_SERVICE;
2003  	CtReq->FsSubType = SLI_CT_DIRECTORY_NAME_SERVER;
2004  	CtReq->CommandResponse.bits.Size = 0;
2005  	switch (cmdcode) {
2006  	case SLI_CTNS_GID_FT:
2007  		CtReq->CommandResponse.bits.CmdRsp =
2008  		    cpu_to_be16(SLI_CTNS_GID_FT);
2009  		CtReq->un.gid.Fc4Type = context;
2010  
2011  		if (vport->port_state < LPFC_NS_QRY)
2012  			vport->port_state = LPFC_NS_QRY;
2013  		lpfc_set_disctmo(vport);
2014  		cmpl = lpfc_cmpl_ct_cmd_gid_ft;
2015  		rsp_size = FC_MAX_NS_RSP;
2016  		break;
2017  
2018  	case SLI_CTNS_GID_PT:
2019  		CtReq->CommandResponse.bits.CmdRsp =
2020  		    cpu_to_be16(SLI_CTNS_GID_PT);
2021  		CtReq->un.gid.PortType = context;
2022  
2023  		if (vport->port_state < LPFC_NS_QRY)
2024  			vport->port_state = LPFC_NS_QRY;
2025  		lpfc_set_disctmo(vport);
2026  		cmpl = lpfc_cmpl_ct_cmd_gid_pt;
2027  		rsp_size = FC_MAX_NS_RSP;
2028  		break;
2029  
2030  	case SLI_CTNS_GFF_ID:
2031  		CtReq->CommandResponse.bits.CmdRsp =
2032  			cpu_to_be16(SLI_CTNS_GFF_ID);
2033  		CtReq->un.gff.PortId = cpu_to_be32(context);
2034  		cmpl = lpfc_cmpl_ct_cmd_gff_id;
2035  		break;
2036  
2037  	case SLI_CTNS_GFT_ID:
2038  		CtReq->CommandResponse.bits.CmdRsp =
2039  			cpu_to_be16(SLI_CTNS_GFT_ID);
2040  		CtReq->un.gft.PortId = cpu_to_be32(context);
2041  		cmpl = lpfc_cmpl_ct_cmd_gft_id;
2042  		break;
2043  
2044  	case SLI_CTNS_RFT_ID:
2045  		vport->ct_flags &= ~FC_CT_RFT_ID;
2046  		CtReq->CommandResponse.bits.CmdRsp =
2047  		    cpu_to_be16(SLI_CTNS_RFT_ID);
2048  		CtReq->un.rft.port_id = cpu_to_be32(vport->fc_myDID);
2049  
2050  		/* Register Application Services type if vmid enabled. */
2051  		if (phba->cfg_vmid_app_header)
2052  			CtReq->un.rft.app_serv_reg =
2053  				cpu_to_be32(RFT_APP_SERV_REG);
2054  
2055  		/* Register FC4 FCP type if enabled.  */
2056  		if (vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH ||
2057  		    vport->cfg_enable_fc4_type == LPFC_ENABLE_FCP)
2058  			CtReq->un.rft.fcp_reg = cpu_to_be32(RFT_FCP_REG);
2059  
2060  		/* Register NVME type if enabled. */
2061  		if (vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH ||
2062  		    vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)
2063  			CtReq->un.rft.nvme_reg = cpu_to_be32(RFT_NVME_REG);
2064  
2065  		ptr = (uint32_t *)CtReq;
2066  		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2067  				 "6433 Issue RFT (%s %s %s): %08x %08x %08x "
2068  				 "%08x %08x %08x %08x %08x\n",
2069  				 CtReq->un.rft.fcp_reg ? "FCP" : " ",
2070  				 CtReq->un.rft.nvme_reg ? "NVME" : " ",
2071  				 CtReq->un.rft.app_serv_reg ? "APPS" : " ",
2072  				 *ptr, *(ptr + 1), *(ptr + 2), *(ptr + 3),
2073  				 *(ptr + 4), *(ptr + 5),
2074  				 *(ptr + 6), *(ptr + 7));
2075  		cmpl = lpfc_cmpl_ct_cmd_rft_id;
2076  		break;
2077  
2078  	case SLI_CTNS_RNN_ID:
2079  		vport->ct_flags &= ~FC_CT_RNN_ID;
2080  		CtReq->CommandResponse.bits.CmdRsp =
2081  		    cpu_to_be16(SLI_CTNS_RNN_ID);
2082  		CtReq->un.rnn.PortId = cpu_to_be32(vport->fc_myDID);
2083  		memcpy(CtReq->un.rnn.wwnn,  &vport->fc_nodename,
2084  		       sizeof(struct lpfc_name));
2085  		cmpl = lpfc_cmpl_ct_cmd_rnn_id;
2086  		break;
2087  
2088  	case SLI_CTNS_RSPN_ID:
2089  		vport->ct_flags &= ~FC_CT_RSPN_ID;
2090  		CtReq->CommandResponse.bits.CmdRsp =
2091  		    cpu_to_be16(SLI_CTNS_RSPN_ID);
2092  		CtReq->un.rspn.PortId = cpu_to_be32(vport->fc_myDID);
2093  		size = sizeof(CtReq->un.rspn.symbname);
2094  		CtReq->un.rspn.len =
2095  			lpfc_vport_symbolic_port_name(vport,
2096  			CtReq->un.rspn.symbname, size);
2097  		cmpl = lpfc_cmpl_ct_cmd_rspn_id;
2098  		break;
2099  	case SLI_CTNS_RSNN_NN:
2100  		vport->ct_flags &= ~FC_CT_RSNN_NN;
2101  		CtReq->CommandResponse.bits.CmdRsp =
2102  		    cpu_to_be16(SLI_CTNS_RSNN_NN);
2103  		memcpy(CtReq->un.rsnn.wwnn, &vport->fc_nodename,
2104  		       sizeof(struct lpfc_name));
2105  		size = sizeof(CtReq->un.rsnn.symbname);
2106  		CtReq->un.rsnn.len =
2107  			lpfc_vport_symbolic_node_name(vport,
2108  			CtReq->un.rsnn.symbname, size);
2109  		cmpl = lpfc_cmpl_ct_cmd_rsnn_nn;
2110  		break;
2111  	case SLI_CTNS_DA_ID:
2112  		/* Implement DA_ID Nameserver request */
2113  		CtReq->CommandResponse.bits.CmdRsp =
2114  			cpu_to_be16(SLI_CTNS_DA_ID);
2115  		CtReq->un.da_id.port_id = cpu_to_be32(vport->fc_myDID);
2116  		cmpl = lpfc_cmpl_ct_cmd_da_id;
2117  		break;
2118  	case SLI_CTNS_RFF_ID:
2119  		vport->ct_flags &= ~FC_CT_RFF_ID;
2120  		CtReq->CommandResponse.bits.CmdRsp =
2121  		    cpu_to_be16(SLI_CTNS_RFF_ID);
2122  		CtReq->un.rff.PortId = cpu_to_be32(vport->fc_myDID);
2123  		CtReq->un.rff.fbits = FC4_FEATURE_INIT;
2124  
2125  		/* The driver always supports FC_TYPE_FCP.  However, the
2126  		 * caller can specify NVME (type x28) as well.  But only
2127  		 * these that FC4 type is supported.
2128  		 */
2129  		if (((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
2130  		     (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) &&
2131  		    (context == FC_TYPE_NVME)) {
2132  			if ((vport == phba->pport) && phba->nvmet_support) {
2133  				CtReq->un.rff.fbits = (FC4_FEATURE_TARGET |
2134  					FC4_FEATURE_NVME_DISC);
2135  				lpfc_nvmet_update_targetport(phba);
2136  			} else {
2137  				lpfc_nvme_update_localport(vport);
2138  			}
2139  			CtReq->un.rff.type_code = context;
2140  
2141  		} else if (((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
2142  			    (vport->cfg_enable_fc4_type == LPFC_ENABLE_FCP)) &&
2143  			   (context == FC_TYPE_FCP))
2144  			CtReq->un.rff.type_code = context;
2145  
2146  		else
2147  			goto ns_cmd_free_bmpvirt;
2148  
2149  		ptr = (uint32_t *)CtReq;
2150  		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2151  				 "6434 Issue RFF (%s): %08x %08x %08x %08x "
2152  				 "%08x %08x %08x %08x\n",
2153  				 (context == FC_TYPE_NVME) ? "NVME" : "FCP",
2154  				 *ptr, *(ptr + 1), *(ptr + 2), *(ptr + 3),
2155  				 *(ptr + 4), *(ptr + 5),
2156  				 *(ptr + 6), *(ptr + 7));
2157  		cmpl = lpfc_cmpl_ct_cmd_rff_id;
2158  		break;
2159  	}
2160  	/* The lpfc_ct_cmd/lpfc_get_req shall increment ndlp reference count
2161  	 * to hold ndlp reference for the corresponding callback function.
2162  	 */
2163  	if (!lpfc_ct_cmd(vport, mp, bmp, ndlp, cmpl, rsp_size, retry)) {
2164  		/* On success, The cmpl function will free the buffers */
2165  		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
2166  			"Issue CT cmd:    cmd:x%x did:x%x",
2167  			cmdcode, ndlp->nlp_DID, 0);
2168  		return 0;
2169  	}
2170  	rc=6;
2171  
2172  ns_cmd_free_bmpvirt:
2173  	lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
2174  ns_cmd_free_bmp:
2175  	kfree(bmp);
2176  ns_cmd_free_mpvirt:
2177  	lpfc_mbuf_free(phba, mp->virt, mp->phys);
2178  ns_cmd_free_mp:
2179  	kfree(mp);
2180  ns_cmd_exit:
2181  	lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2182  			 "0266 Issue NameServer Req x%x err %d Data: x%x x%x\n",
2183  			 cmdcode, rc, vport->fc_flag, vport->fc_rscn_id_cnt);
2184  	return 1;
2185  }
2186  
2187  /**
2188   * lpfc_fdmi_rprt_defer - Check for any deferred FDMI RPRT commands
2189   * @phba: Pointer to HBA context object.
2190   * @mask: Initial port attributes mask
2191   *
2192   * This function checks to see if any vports have deferred their FDMI RPRT.
2193   * A vports RPRT may be deferred if it is issued before the primary ports
2194   * RHBA completes.
2195   */
2196  static void
lpfc_fdmi_rprt_defer(struct lpfc_hba * phba,uint32_t mask)2197  lpfc_fdmi_rprt_defer(struct lpfc_hba *phba, uint32_t mask)
2198  {
2199  	struct lpfc_vport **vports;
2200  	struct lpfc_vport *vport;
2201  	struct lpfc_nodelist *ndlp;
2202  	int i;
2203  
2204  	phba->hba_flag |= HBA_RHBA_CMPL;
2205  	vports = lpfc_create_vport_work_array(phba);
2206  	if (vports) {
2207  		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2208  			vport = vports[i];
2209  			ndlp = lpfc_findnode_did(phba->pport, FDMI_DID);
2210  			if (!ndlp)
2211  				continue;
2212  			if (vport->ct_flags & FC_CT_RPRT_DEFER) {
2213  				vport->ct_flags &= ~FC_CT_RPRT_DEFER;
2214  				vport->fdmi_port_mask = mask;
2215  				lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPRT, 0);
2216  			}
2217  		}
2218  	}
2219  	lpfc_destroy_vport_work_array(phba, vports);
2220  }
2221  
2222  /**
2223   * lpfc_cmpl_ct_disc_fdmi - Handle a discovery FDMI completion
2224   * @phba: Pointer to HBA context object.
2225   * @cmdiocb: Pointer to the command IOCBQ.
2226   * @rspiocb: Pointer to the response IOCBQ.
2227   *
2228   * This function to handle the completion of a driver initiated FDMI
2229   * CT command issued during discovery.
2230   */
2231  static void
lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)2232  lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2233  		       struct lpfc_iocbq *rspiocb)
2234  {
2235  	struct lpfc_vport *vport = cmdiocb->vport;
2236  	struct lpfc_dmabuf *inp = cmdiocb->cmd_dmabuf;
2237  	struct lpfc_dmabuf *outp = cmdiocb->rsp_dmabuf;
2238  	struct lpfc_sli_ct_request *CTcmd = inp->virt;
2239  	struct lpfc_sli_ct_request *CTrsp = outp->virt;
2240  	__be16 fdmi_cmd = CTcmd->CommandResponse.bits.CmdRsp;
2241  	__be16 fdmi_rsp = CTrsp->CommandResponse.bits.CmdRsp;
2242  	struct lpfc_nodelist *ndlp, *free_ndlp = NULL;
2243  	uint32_t latt, cmd, err;
2244  	u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
2245  	u32 ulp_word4 = get_job_word4(phba, rspiocb);
2246  
2247  	latt = lpfc_els_chk_latt(vport);
2248  	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
2249  		"FDMI cmpl:       status:x%x/x%x latt:%d",
2250  		ulp_status, ulp_word4, latt);
2251  
2252  	if (latt || ulp_status) {
2253  
2254  		/* Look for a retryable error */
2255  		if (ulp_status == IOSTAT_LOCAL_REJECT) {
2256  			switch ((ulp_word4 & IOERR_PARAM_MASK)) {
2257  			case IOERR_SLI_ABORTED:
2258  			case IOERR_SLI_DOWN:
2259  				/* Driver aborted this IO.  No retry as error
2260  				 * is likely Offline->Online or some adapter
2261  				 * error.  Recovery will try again.
2262  				 */
2263  				break;
2264  			case IOERR_ABORT_IN_PROGRESS:
2265  			case IOERR_SEQUENCE_TIMEOUT:
2266  			case IOERR_ILLEGAL_FRAME:
2267  			case IOERR_NO_RESOURCES:
2268  			case IOERR_ILLEGAL_COMMAND:
2269  				cmdiocb->retry++;
2270  				if (cmdiocb->retry >= LPFC_FDMI_MAX_RETRY)
2271  					break;
2272  
2273  				/* Retry the same FDMI command */
2274  				err = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING,
2275  							  cmdiocb, 0);
2276  				if (err == IOCB_ERROR)
2277  					break;
2278  				return;
2279  			default:
2280  				break;
2281  			}
2282  		}
2283  
2284  		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2285  				 "0229 FDMI cmd %04x failed, latt = %d "
2286  				 "ulp_status: x%x, rid x%x\n",
2287  				 be16_to_cpu(fdmi_cmd), latt, ulp_status,
2288  				 ulp_word4);
2289  	}
2290  
2291  	free_ndlp = cmdiocb->ndlp;
2292  	lpfc_ct_free_iocb(phba, cmdiocb);
2293  	lpfc_nlp_put(free_ndlp);
2294  
2295  	ndlp = lpfc_findnode_did(vport, FDMI_DID);
2296  	if (!ndlp)
2297  		return;
2298  
2299  	/* Check for a CT LS_RJT response */
2300  	cmd =  be16_to_cpu(fdmi_cmd);
2301  	if (be16_to_cpu(fdmi_rsp) == SLI_CT_RESPONSE_FS_RJT) {
2302  		/* FDMI rsp failed */
2303  		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY | LOG_ELS,
2304  				 "0220 FDMI cmd failed FS_RJT Data: x%x", cmd);
2305  
2306  		/* Should we fallback to FDMI-2 / FDMI-1 ? */
2307  		switch (cmd) {
2308  		case SLI_MGMT_RHBA:
2309  			if (vport->fdmi_hba_mask == LPFC_FDMI2_HBA_ATTR) {
2310  				/* Fallback to FDMI-1 for HBA attributes */
2311  				vport->fdmi_hba_mask = LPFC_FDMI1_HBA_ATTR;
2312  
2313  				/* If HBA attributes are FDMI1, so should
2314  				 * port attributes be for consistency.
2315  				 */
2316  				vport->fdmi_port_mask = LPFC_FDMI1_PORT_ATTR;
2317  				/* Start over */
2318  				lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA, 0);
2319  			}
2320  			return;
2321  
2322  		case SLI_MGMT_RPRT:
2323  			if (vport->port_type != LPFC_PHYSICAL_PORT) {
2324  				ndlp = lpfc_findnode_did(phba->pport, FDMI_DID);
2325  				if (!ndlp)
2326  					return;
2327  			}
2328  			if (vport->fdmi_port_mask == LPFC_FDMI2_PORT_ATTR) {
2329  				/* Fallback to FDMI-1 */
2330  				vport->fdmi_port_mask = LPFC_FDMI1_PORT_ATTR;
2331  				/* Start over */
2332  				lpfc_fdmi_cmd(vport, ndlp, cmd, 0);
2333  				return;
2334  			}
2335  			if (vport->fdmi_port_mask == LPFC_FDMI2_SMART_ATTR) {
2336  				vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
2337  				/* Retry the same command */
2338  				lpfc_fdmi_cmd(vport, ndlp, cmd, 0);
2339  			}
2340  			return;
2341  
2342  		case SLI_MGMT_RPA:
2343  			/* No retry on Vendor, RPA only done on physical port */
2344  			if (phba->link_flag & LS_CT_VEN_RPA) {
2345  				phba->link_flag &= ~LS_CT_VEN_RPA;
2346  				if (phba->cmf_active_mode == LPFC_CFG_OFF)
2347  					return;
2348  				lpfc_printf_log(phba, KERN_WARNING,
2349  						LOG_DISCOVERY | LOG_ELS,
2350  						"6460 VEN FDMI RPA RJT\n");
2351  				return;
2352  			}
2353  			if (vport->fdmi_port_mask == LPFC_FDMI2_PORT_ATTR) {
2354  				/* Fallback to FDMI-1 */
2355  				vport->fdmi_hba_mask = LPFC_FDMI1_HBA_ATTR;
2356  				vport->fdmi_port_mask = LPFC_FDMI1_PORT_ATTR;
2357  				/* Start over */
2358  				lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA, 0);
2359  				return;
2360  			}
2361  			if (vport->fdmi_port_mask == LPFC_FDMI2_SMART_ATTR) {
2362  				vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
2363  				/* Retry the same command */
2364  				lpfc_fdmi_cmd(vport, ndlp, cmd, 0);
2365  			}
2366  			return;
2367  		}
2368  	}
2369  
2370  	/*
2371  	 * On success, need to cycle thru FDMI registration for discovery
2372  	 * DHBA -> DPRT -> RHBA -> RPA  (physical port)
2373  	 * DPRT -> RPRT (vports)
2374  	 */
2375  	switch (cmd) {
2376  	case SLI_MGMT_RHBA:
2377  		/* Check for any RPRTs deferred till after RHBA completes */
2378  		lpfc_fdmi_rprt_defer(phba, vport->fdmi_port_mask);
2379  
2380  		lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPA, 0);
2381  		break;
2382  
2383  	case SLI_MGMT_DHBA:
2384  		lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DPRT, 0);
2385  		break;
2386  
2387  	case SLI_MGMT_DPRT:
2388  		if (vport->port_type == LPFC_PHYSICAL_PORT) {
2389  			lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RHBA, 0);
2390  		} else {
2391  			ndlp = lpfc_findnode_did(phba->pport, FDMI_DID);
2392  			if (!ndlp)
2393  				return;
2394  
2395  			/* Only issue a RPRT for the vport if the RHBA
2396  			 * for the physical port completes successfully.
2397  			 * We may have to defer the RPRT accordingly.
2398  			 */
2399  			if (phba->hba_flag & HBA_RHBA_CMPL) {
2400  				lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPRT, 0);
2401  			} else {
2402  				lpfc_printf_vlog(vport, KERN_INFO,
2403  						 LOG_DISCOVERY,
2404  						 "6078 RPRT deferred\n");
2405  				vport->ct_flags |= FC_CT_RPRT_DEFER;
2406  			}
2407  		}
2408  		break;
2409  	case SLI_MGMT_RPA:
2410  		if (vport->port_type == LPFC_PHYSICAL_PORT &&
2411  		    phba->sli4_hba.pc_sli4_params.mi_ver) {
2412  			/* mi is only for the phyical port, no vports */
2413  			if (phba->link_flag & LS_CT_VEN_RPA) {
2414  				lpfc_printf_vlog(vport, KERN_INFO,
2415  						 LOG_DISCOVERY | LOG_ELS |
2416  						 LOG_CGN_MGMT,
2417  						 "6449 VEN RPA FDMI Success\n");
2418  				phba->link_flag &= ~LS_CT_VEN_RPA;
2419  				break;
2420  			}
2421  
2422  			lpfc_printf_log(phba, KERN_INFO,
2423  					LOG_DISCOVERY | LOG_CGN_MGMT,
2424  					"6210 Issue Vendor MI FDMI %x\n",
2425  					phba->sli4_hba.pc_sli4_params.mi_ver);
2426  
2427  			/* CGN is only for the physical port, no vports */
2428  			if (lpfc_fdmi_cmd(vport, ndlp, cmd,
2429  					  LPFC_FDMI_VENDOR_ATTR_mi) == 0)
2430  				phba->link_flag |= LS_CT_VEN_RPA;
2431  			lpfc_printf_log(phba, KERN_INFO,
2432  					LOG_DISCOVERY | LOG_ELS,
2433  					"6458 Send MI FDMI:%x Flag x%x\n",
2434  					phba->sli4_hba.pc_sli4_params.mi_ver,
2435  					phba->link_flag);
2436  		} else {
2437  			lpfc_printf_log(phba, KERN_INFO,
2438  					LOG_DISCOVERY | LOG_ELS,
2439  					"6459 No FDMI VEN MI support - "
2440  					"RPA Success\n");
2441  		}
2442  		break;
2443  	}
2444  	return;
2445  }
2446  
2447  
2448  /**
2449   * lpfc_fdmi_change_check - Check for changed FDMI parameters
2450   * @vport: pointer to a host virtual N_Port data structure.
2451   *
2452   * Check how many mapped NPorts we are connected to
2453   * Check if our hostname changed
2454   * Called from hbeat timeout routine to check if any FDMI parameters
2455   * changed. If so, re-register those Attributes.
2456   */
2457  void
lpfc_fdmi_change_check(struct lpfc_vport * vport)2458  lpfc_fdmi_change_check(struct lpfc_vport *vport)
2459  {
2460  	struct lpfc_hba *phba = vport->phba;
2461  	struct lpfc_nodelist *ndlp;
2462  	uint16_t cnt;
2463  
2464  	if (!lpfc_is_link_up(phba))
2465  		return;
2466  
2467  	/* Must be connected to a Fabric */
2468  	if (!(vport->fc_flag & FC_FABRIC))
2469  		return;
2470  
2471  	ndlp = lpfc_findnode_did(vport, FDMI_DID);
2472  	if (!ndlp)
2473  		return;
2474  
2475  	/* Check if system hostname changed */
2476  	if (strcmp(phba->os_host_name, init_utsname()->nodename)) {
2477  		memset(phba->os_host_name, 0, sizeof(phba->os_host_name));
2478  		scnprintf(phba->os_host_name, sizeof(phba->os_host_name), "%s",
2479  			  init_utsname()->nodename);
2480  		lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0);
2481  
2482  		/* Since this effects multiple HBA and PORT attributes, we need
2483  		 * de-register and go thru the whole FDMI registration cycle.
2484  		 * DHBA -> DPRT -> RHBA -> RPA  (physical port)
2485  		 * DPRT -> RPRT (vports)
2486  		 */
2487  		if (vport->port_type == LPFC_PHYSICAL_PORT) {
2488  			/* For extra Vendor RPA */
2489  			phba->link_flag &= ~LS_CT_VEN_RPA;
2490  			lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA, 0);
2491  		} else {
2492  			ndlp = lpfc_findnode_did(phba->pport, FDMI_DID);
2493  			if (!ndlp)
2494  				return;
2495  			lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DPRT, 0);
2496  		}
2497  
2498  		/* Since this code path registers all the port attributes
2499  		 * we can just return without further checking.
2500  		 */
2501  		return;
2502  	}
2503  
2504  	if (!(vport->fdmi_port_mask & LPFC_FDMI_PORT_ATTR_num_disc))
2505  		return;
2506  
2507  	/* Check if the number of mapped NPorts changed */
2508  	cnt = lpfc_find_map_node(vport);
2509  	if (cnt == vport->fdmi_num_disc)
2510  		return;
2511  
2512  	if (vport->port_type == LPFC_PHYSICAL_PORT) {
2513  		lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPA,
2514  			      LPFC_FDMI_PORT_ATTR_num_disc);
2515  	} else {
2516  		ndlp = lpfc_findnode_did(phba->pport, FDMI_DID);
2517  		if (!ndlp)
2518  			return;
2519  		lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPRT,
2520  			      LPFC_FDMI_PORT_ATTR_num_disc);
2521  	}
2522  }
2523  
2524  static inline int
lpfc_fdmi_set_attr_u32(void * attr,uint16_t attrtype,uint32_t attrval)2525  lpfc_fdmi_set_attr_u32(void *attr, uint16_t attrtype, uint32_t attrval)
2526  {
2527  	struct lpfc_fdmi_attr_u32 *ae = attr;
2528  	int size = sizeof(*ae);
2529  
2530  	ae->type = cpu_to_be16(attrtype);
2531  	ae->len = cpu_to_be16(size);
2532  	ae->value_u32 = cpu_to_be32(attrval);
2533  
2534  	return size;
2535  }
2536  
2537  static inline int
lpfc_fdmi_set_attr_wwn(void * attr,uint16_t attrtype,struct lpfc_name * wwn)2538  lpfc_fdmi_set_attr_wwn(void *attr, uint16_t attrtype, struct lpfc_name *wwn)
2539  {
2540  	struct lpfc_fdmi_attr_wwn *ae = attr;
2541  	int size = sizeof(*ae);
2542  
2543  	ae->type = cpu_to_be16(attrtype);
2544  	ae->len = cpu_to_be16(size);
2545  	/* WWN's assumed to be bytestreams - Big Endian presentation */
2546  	memcpy(ae->name, wwn,
2547  	       min_t(size_t, sizeof(struct lpfc_name), sizeof(__be64)));
2548  
2549  	return size;
2550  }
2551  
2552  static inline int
lpfc_fdmi_set_attr_fullwwn(void * attr,uint16_t attrtype,struct lpfc_name * wwnn,struct lpfc_name * wwpn)2553  lpfc_fdmi_set_attr_fullwwn(void *attr, uint16_t attrtype,
2554  			   struct lpfc_name *wwnn, struct lpfc_name *wwpn)
2555  {
2556  	struct lpfc_fdmi_attr_fullwwn *ae = attr;
2557  	u8 *nname = ae->nname;
2558  	u8 *pname = ae->pname;
2559  	int size = sizeof(*ae);
2560  
2561  	ae->type = cpu_to_be16(attrtype);
2562  	ae->len = cpu_to_be16(size);
2563  	/* WWN's assumed to be bytestreams - Big Endian presentation */
2564  	memcpy(nname, wwnn,
2565  	       min_t(size_t, sizeof(struct lpfc_name), sizeof(__be64)));
2566  	memcpy(pname, wwpn,
2567  	       min_t(size_t, sizeof(struct lpfc_name), sizeof(__be64)));
2568  
2569  	return size;
2570  }
2571  
2572  static inline int
lpfc_fdmi_set_attr_string(void * attr,uint16_t attrtype,char * attrstring)2573  lpfc_fdmi_set_attr_string(void *attr, uint16_t attrtype, char *attrstring)
2574  {
2575  	struct lpfc_fdmi_attr_string *ae = attr;
2576  	int len, size;
2577  
2578  	/*
2579  	 * We are trusting the caller that if a fdmi string field
2580  	 * is capped at 64 bytes, the caller passes in a string of
2581  	 * 64 bytes or less.
2582  	 */
2583  
2584  	strncpy(ae->value_string, attrstring, sizeof(ae->value_string));
2585  	len = strnlen(ae->value_string, sizeof(ae->value_string));
2586  	/* round string length to a 32bit boundary. Ensure there's a NULL */
2587  	len += (len & 3) ? (4 - (len & 3)) : 4;
2588  	/* size is Type/Len (4 bytes) plus string length */
2589  	size = FOURBYTES + len;
2590  
2591  	ae->type = cpu_to_be16(attrtype);
2592  	ae->len = cpu_to_be16(size);
2593  
2594  	return size;
2595  }
2596  
2597  /* Bitfields for FC4 Types that can be reported */
2598  #define ATTR_FC4_CT	0x00000001
2599  #define ATTR_FC4_FCP	0x00000002
2600  #define ATTR_FC4_NVME	0x00000004
2601  
2602  static inline int
lpfc_fdmi_set_attr_fc4types(void * attr,uint16_t attrtype,uint32_t typemask)2603  lpfc_fdmi_set_attr_fc4types(void *attr, uint16_t attrtype, uint32_t typemask)
2604  {
2605  	struct lpfc_fdmi_attr_fc4types *ae = attr;
2606  	int size = sizeof(*ae);
2607  
2608  	ae->type = cpu_to_be16(attrtype);
2609  	ae->len = cpu_to_be16(size);
2610  
2611  	if (typemask & ATTR_FC4_FCP)
2612  		ae->value_types[2] = 0x01; /* Type 0x8 - FCP */
2613  
2614  	if (typemask & ATTR_FC4_CT)
2615  		ae->value_types[7] = 0x01; /* Type 0x20 - CT */
2616  
2617  	if (typemask & ATTR_FC4_NVME)
2618  		ae->value_types[6] = 0x01; /* Type 0x28 - NVME */
2619  
2620  	return size;
2621  }
2622  
2623  /* Routines for all individual HBA attributes */
2624  static int
lpfc_fdmi_hba_attr_wwnn(struct lpfc_vport * vport,void * attr)2625  lpfc_fdmi_hba_attr_wwnn(struct lpfc_vport *vport, void *attr)
2626  {
2627  	return lpfc_fdmi_set_attr_wwn(attr, RHBA_NODENAME,
2628  			&vport->fc_sparam.nodeName);
2629  }
2630  
2631  static int
lpfc_fdmi_hba_attr_manufacturer(struct lpfc_vport * vport,void * attr)2632  lpfc_fdmi_hba_attr_manufacturer(struct lpfc_vport *vport, void *attr)
2633  {
2634  	/* This string MUST be consistent with other FC platforms
2635  	 * supported by Broadcom.
2636  	 */
2637  	return lpfc_fdmi_set_attr_string(attr, RHBA_MANUFACTURER,
2638  			"Emulex Corporation");
2639  }
2640  
2641  static int
lpfc_fdmi_hba_attr_sn(struct lpfc_vport * vport,void * attr)2642  lpfc_fdmi_hba_attr_sn(struct lpfc_vport *vport, void *attr)
2643  {
2644  	struct lpfc_hba *phba = vport->phba;
2645  
2646  	return lpfc_fdmi_set_attr_string(attr, RHBA_SERIAL_NUMBER,
2647  			phba->SerialNumber);
2648  }
2649  
2650  static int
lpfc_fdmi_hba_attr_model(struct lpfc_vport * vport,void * attr)2651  lpfc_fdmi_hba_attr_model(struct lpfc_vport *vport, void *attr)
2652  {
2653  	struct lpfc_hba *phba = vport->phba;
2654  
2655  	return lpfc_fdmi_set_attr_string(attr, RHBA_MODEL,
2656  			phba->ModelName);
2657  }
2658  
2659  static int
lpfc_fdmi_hba_attr_description(struct lpfc_vport * vport,void * attr)2660  lpfc_fdmi_hba_attr_description(struct lpfc_vport *vport, void *attr)
2661  {
2662  	struct lpfc_hba *phba = vport->phba;
2663  
2664  	return lpfc_fdmi_set_attr_string(attr, RHBA_MODEL_DESCRIPTION,
2665  			phba->ModelDesc);
2666  }
2667  
2668  static int
lpfc_fdmi_hba_attr_hdw_ver(struct lpfc_vport * vport,void * attr)2669  lpfc_fdmi_hba_attr_hdw_ver(struct lpfc_vport *vport, void *attr)
2670  {
2671  	struct lpfc_hba *phba = vport->phba;
2672  	lpfc_vpd_t *vp = &phba->vpd;
2673  	char buf[16] = { 0 };
2674  
2675  	snprintf(buf, sizeof(buf), "%08x", vp->rev.biuRev);
2676  
2677  	return lpfc_fdmi_set_attr_string(attr, RHBA_HARDWARE_VERSION, buf);
2678  }
2679  
2680  static int
lpfc_fdmi_hba_attr_drvr_ver(struct lpfc_vport * vport,void * attr)2681  lpfc_fdmi_hba_attr_drvr_ver(struct lpfc_vport *vport, void *attr)
2682  {
2683  	return lpfc_fdmi_set_attr_string(attr, RHBA_DRIVER_VERSION,
2684  			lpfc_release_version);
2685  }
2686  
2687  static int
lpfc_fdmi_hba_attr_rom_ver(struct lpfc_vport * vport,void * attr)2688  lpfc_fdmi_hba_attr_rom_ver(struct lpfc_vport *vport, void *attr)
2689  {
2690  	struct lpfc_hba *phba = vport->phba;
2691  	char buf[64] = { 0 };
2692  
2693  	if (phba->sli_rev == LPFC_SLI_REV4) {
2694  		lpfc_decode_firmware_rev(phba, buf, 1);
2695  
2696  		return lpfc_fdmi_set_attr_string(attr, RHBA_OPTION_ROM_VERSION,
2697  				buf);
2698  	}
2699  
2700  	return lpfc_fdmi_set_attr_string(attr, RHBA_OPTION_ROM_VERSION,
2701  			phba->OptionROMVersion);
2702  }
2703  
2704  static int
lpfc_fdmi_hba_attr_fmw_ver(struct lpfc_vport * vport,void * attr)2705  lpfc_fdmi_hba_attr_fmw_ver(struct lpfc_vport *vport, void *attr)
2706  {
2707  	struct lpfc_hba *phba = vport->phba;
2708  	char buf[64] = { 0 };
2709  
2710  	lpfc_decode_firmware_rev(phba, buf, 1);
2711  
2712  	return lpfc_fdmi_set_attr_string(attr, RHBA_FIRMWARE_VERSION, buf);
2713  }
2714  
2715  static int
lpfc_fdmi_hba_attr_os_ver(struct lpfc_vport * vport,void * attr)2716  lpfc_fdmi_hba_attr_os_ver(struct lpfc_vport *vport, void *attr)
2717  {
2718  	char buf[256] = { 0 };
2719  
2720  	snprintf(buf, sizeof(buf), "%s %s %s",
2721  		 init_utsname()->sysname,
2722  		 init_utsname()->release,
2723  		 init_utsname()->version);
2724  
2725  	return lpfc_fdmi_set_attr_string(attr, RHBA_OS_NAME_VERSION, buf);
2726  }
2727  
2728  static int
lpfc_fdmi_hba_attr_ct_len(struct lpfc_vport * vport,void * attr)2729  lpfc_fdmi_hba_attr_ct_len(struct lpfc_vport *vport, void *attr)
2730  {
2731  	return lpfc_fdmi_set_attr_u32(attr, RHBA_MAX_CT_PAYLOAD_LEN,
2732  			LPFC_MAX_CT_SIZE);
2733  }
2734  
2735  static int
lpfc_fdmi_hba_attr_symbolic_name(struct lpfc_vport * vport,void * attr)2736  lpfc_fdmi_hba_attr_symbolic_name(struct lpfc_vport *vport, void *attr)
2737  {
2738  	char buf[256] = { 0 };
2739  
2740  	lpfc_vport_symbolic_node_name(vport, buf, sizeof(buf));
2741  
2742  	return lpfc_fdmi_set_attr_string(attr, RHBA_SYM_NODENAME, buf);
2743  }
2744  
2745  static int
lpfc_fdmi_hba_attr_vendor_info(struct lpfc_vport * vport,void * attr)2746  lpfc_fdmi_hba_attr_vendor_info(struct lpfc_vport *vport, void *attr)
2747  {
2748  	return lpfc_fdmi_set_attr_u32(attr, RHBA_VENDOR_INFO, 0);
2749  }
2750  
2751  static int
lpfc_fdmi_hba_attr_num_ports(struct lpfc_vport * vport,void * attr)2752  lpfc_fdmi_hba_attr_num_ports(struct lpfc_vport *vport, void *attr)
2753  {
2754  	/* Each driver instance corresponds to a single port */
2755  	return lpfc_fdmi_set_attr_u32(attr, RHBA_NUM_PORTS, 1);
2756  }
2757  
2758  static int
lpfc_fdmi_hba_attr_fabric_wwnn(struct lpfc_vport * vport,void * attr)2759  lpfc_fdmi_hba_attr_fabric_wwnn(struct lpfc_vport *vport, void *attr)
2760  {
2761  	return lpfc_fdmi_set_attr_wwn(attr, RHBA_FABRIC_WWNN,
2762  			&vport->fabric_nodename);
2763  }
2764  
2765  static int
lpfc_fdmi_hba_attr_bios_ver(struct lpfc_vport * vport,void * attr)2766  lpfc_fdmi_hba_attr_bios_ver(struct lpfc_vport *vport, void *attr)
2767  {
2768  	struct lpfc_hba *phba = vport->phba;
2769  
2770  	return lpfc_fdmi_set_attr_string(attr, RHBA_BIOS_VERSION,
2771  			phba->BIOSVersion);
2772  }
2773  
2774  static int
lpfc_fdmi_hba_attr_bios_state(struct lpfc_vport * vport,void * attr)2775  lpfc_fdmi_hba_attr_bios_state(struct lpfc_vport *vport, void *attr)
2776  {
2777  	/* Driver doesn't have access to this information */
2778  	return lpfc_fdmi_set_attr_u32(attr, RHBA_BIOS_STATE, 0);
2779  }
2780  
2781  static int
lpfc_fdmi_hba_attr_vendor_id(struct lpfc_vport * vport,void * attr)2782  lpfc_fdmi_hba_attr_vendor_id(struct lpfc_vport *vport, void *attr)
2783  {
2784  	return lpfc_fdmi_set_attr_string(attr, RHBA_VENDOR_ID, "EMULEX");
2785  }
2786  
2787  /*
2788   * Routines for all individual PORT attributes
2789   */
2790  
2791  static int
lpfc_fdmi_port_attr_fc4type(struct lpfc_vport * vport,void * attr)2792  lpfc_fdmi_port_attr_fc4type(struct lpfc_vport *vport, void *attr)
2793  {
2794  	struct lpfc_hba   *phba = vport->phba;
2795  	u32 fc4types;
2796  
2797  	fc4types = (ATTR_FC4_CT | ATTR_FC4_FCP);
2798  
2799  	/* Check to see if Firmware supports NVME and on physical port */
2800  	if ((phba->sli_rev == LPFC_SLI_REV4) && (vport == phba->pport) &&
2801  	    phba->sli4_hba.pc_sli4_params.nvme)
2802  		fc4types |= ATTR_FC4_NVME;
2803  
2804  	return lpfc_fdmi_set_attr_fc4types(attr, RPRT_SUPPORTED_FC4_TYPES,
2805  			fc4types);
2806  }
2807  
2808  static int
lpfc_fdmi_port_attr_support_speed(struct lpfc_vport * vport,void * attr)2809  lpfc_fdmi_port_attr_support_speed(struct lpfc_vport *vport, void *attr)
2810  {
2811  	struct lpfc_hba *phba = vport->phba;
2812  	u32 speeds = 0;
2813  	u32 tcfg;
2814  	u8 i, cnt;
2815  
2816  	if (!(phba->hba_flag & HBA_FCOE_MODE)) {
2817  		cnt = 0;
2818  		if (phba->sli_rev == LPFC_SLI_REV4) {
2819  			tcfg = phba->sli4_hba.conf_trunk;
2820  			for (i = 0; i < 4; i++, tcfg >>= 1)
2821  				if (tcfg & 1)
2822  					cnt++;
2823  		}
2824  
2825  		if (cnt > 2) { /* 4 lane trunk group */
2826  			if (phba->lmt & LMT_64Gb)
2827  				speeds |= HBA_PORTSPEED_256GFC;
2828  			if (phba->lmt & LMT_32Gb)
2829  				speeds |= HBA_PORTSPEED_128GFC;
2830  			if (phba->lmt & LMT_16Gb)
2831  				speeds |= HBA_PORTSPEED_64GFC;
2832  		} else if (cnt) { /* 2 lane trunk group */
2833  			if (phba->lmt & LMT_128Gb)
2834  				speeds |= HBA_PORTSPEED_256GFC;
2835  			if (phba->lmt & LMT_64Gb)
2836  				speeds |= HBA_PORTSPEED_128GFC;
2837  			if (phba->lmt & LMT_32Gb)
2838  				speeds |= HBA_PORTSPEED_64GFC;
2839  			if (phba->lmt & LMT_16Gb)
2840  				speeds |= HBA_PORTSPEED_32GFC;
2841  		} else {
2842  			if (phba->lmt & LMT_256Gb)
2843  				speeds |= HBA_PORTSPEED_256GFC;
2844  			if (phba->lmt & LMT_128Gb)
2845  				speeds |= HBA_PORTSPEED_128GFC;
2846  			if (phba->lmt & LMT_64Gb)
2847  				speeds |= HBA_PORTSPEED_64GFC;
2848  			if (phba->lmt & LMT_32Gb)
2849  				speeds |= HBA_PORTSPEED_32GFC;
2850  			if (phba->lmt & LMT_16Gb)
2851  				speeds |= HBA_PORTSPEED_16GFC;
2852  			if (phba->lmt & LMT_10Gb)
2853  				speeds |= HBA_PORTSPEED_10GFC;
2854  			if (phba->lmt & LMT_8Gb)
2855  				speeds |= HBA_PORTSPEED_8GFC;
2856  			if (phba->lmt & LMT_4Gb)
2857  				speeds |= HBA_PORTSPEED_4GFC;
2858  			if (phba->lmt & LMT_2Gb)
2859  				speeds |= HBA_PORTSPEED_2GFC;
2860  			if (phba->lmt & LMT_1Gb)
2861  				speeds |= HBA_PORTSPEED_1GFC;
2862  		}
2863  	} else {
2864  		/* FCoE links support only one speed */
2865  		switch (phba->fc_linkspeed) {
2866  		case LPFC_ASYNC_LINK_SPEED_10GBPS:
2867  			speeds = HBA_PORTSPEED_10GE;
2868  			break;
2869  		case LPFC_ASYNC_LINK_SPEED_25GBPS:
2870  			speeds = HBA_PORTSPEED_25GE;
2871  			break;
2872  		case LPFC_ASYNC_LINK_SPEED_40GBPS:
2873  			speeds = HBA_PORTSPEED_40GE;
2874  			break;
2875  		case LPFC_ASYNC_LINK_SPEED_100GBPS:
2876  			speeds = HBA_PORTSPEED_100GE;
2877  			break;
2878  		}
2879  	}
2880  
2881  	return lpfc_fdmi_set_attr_u32(attr, RPRT_SUPPORTED_SPEED, speeds);
2882  }
2883  
2884  static int
lpfc_fdmi_port_attr_speed(struct lpfc_vport * vport,void * attr)2885  lpfc_fdmi_port_attr_speed(struct lpfc_vport *vport, void *attr)
2886  {
2887  	struct lpfc_hba   *phba = vport->phba;
2888  	u32 speeds = 0;
2889  
2890  	if (!(phba->hba_flag & HBA_FCOE_MODE)) {
2891  		switch (phba->fc_linkspeed) {
2892  		case LPFC_LINK_SPEED_1GHZ:
2893  			speeds = HBA_PORTSPEED_1GFC;
2894  			break;
2895  		case LPFC_LINK_SPEED_2GHZ:
2896  			speeds = HBA_PORTSPEED_2GFC;
2897  			break;
2898  		case LPFC_LINK_SPEED_4GHZ:
2899  			speeds = HBA_PORTSPEED_4GFC;
2900  			break;
2901  		case LPFC_LINK_SPEED_8GHZ:
2902  			speeds = HBA_PORTSPEED_8GFC;
2903  			break;
2904  		case LPFC_LINK_SPEED_10GHZ:
2905  			speeds = HBA_PORTSPEED_10GFC;
2906  			break;
2907  		case LPFC_LINK_SPEED_16GHZ:
2908  			speeds = HBA_PORTSPEED_16GFC;
2909  			break;
2910  		case LPFC_LINK_SPEED_32GHZ:
2911  			speeds = HBA_PORTSPEED_32GFC;
2912  			break;
2913  		case LPFC_LINK_SPEED_64GHZ:
2914  			speeds = HBA_PORTSPEED_64GFC;
2915  			break;
2916  		case LPFC_LINK_SPEED_128GHZ:
2917  			speeds = HBA_PORTSPEED_128GFC;
2918  			break;
2919  		case LPFC_LINK_SPEED_256GHZ:
2920  			speeds = HBA_PORTSPEED_256GFC;
2921  			break;
2922  		default:
2923  			speeds = HBA_PORTSPEED_UNKNOWN;
2924  			break;
2925  		}
2926  	} else {
2927  		switch (phba->fc_linkspeed) {
2928  		case LPFC_ASYNC_LINK_SPEED_10GBPS:
2929  			speeds = HBA_PORTSPEED_10GE;
2930  			break;
2931  		case LPFC_ASYNC_LINK_SPEED_25GBPS:
2932  			speeds = HBA_PORTSPEED_25GE;
2933  			break;
2934  		case LPFC_ASYNC_LINK_SPEED_40GBPS:
2935  			speeds = HBA_PORTSPEED_40GE;
2936  			break;
2937  		case LPFC_ASYNC_LINK_SPEED_100GBPS:
2938  			speeds = HBA_PORTSPEED_100GE;
2939  			break;
2940  		default:
2941  			speeds = HBA_PORTSPEED_UNKNOWN;
2942  			break;
2943  		}
2944  	}
2945  
2946  	return lpfc_fdmi_set_attr_u32(attr, RPRT_PORT_SPEED, speeds);
2947  }
2948  
2949  static int
lpfc_fdmi_port_attr_max_frame(struct lpfc_vport * vport,void * attr)2950  lpfc_fdmi_port_attr_max_frame(struct lpfc_vport *vport, void *attr)
2951  {
2952  	struct serv_parm *hsp = (struct serv_parm *)&vport->fc_sparam;
2953  
2954  	return lpfc_fdmi_set_attr_u32(attr, RPRT_MAX_FRAME_SIZE,
2955  			(((uint32_t)hsp->cmn.bbRcvSizeMsb & 0x0F) << 8) |
2956  			  (uint32_t)hsp->cmn.bbRcvSizeLsb);
2957  }
2958  
2959  static int
lpfc_fdmi_port_attr_os_devname(struct lpfc_vport * vport,void * attr)2960  lpfc_fdmi_port_attr_os_devname(struct lpfc_vport *vport, void *attr)
2961  {
2962  	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2963  	char buf[64] = { 0 };
2964  
2965  	snprintf(buf, sizeof(buf), "/sys/class/scsi_host/host%d",
2966  		 shost->host_no);
2967  
2968  	return lpfc_fdmi_set_attr_string(attr, RPRT_OS_DEVICE_NAME, buf);
2969  }
2970  
2971  static int
lpfc_fdmi_port_attr_host_name(struct lpfc_vport * vport,void * attr)2972  lpfc_fdmi_port_attr_host_name(struct lpfc_vport *vport, void *attr)
2973  {
2974  	char buf[64] = { 0 };
2975  
2976  	scnprintf(buf, sizeof(buf), "%s", vport->phba->os_host_name);
2977  
2978  	return lpfc_fdmi_set_attr_string(attr, RPRT_HOST_NAME, buf);
2979  }
2980  
2981  static int
lpfc_fdmi_port_attr_wwnn(struct lpfc_vport * vport,void * attr)2982  lpfc_fdmi_port_attr_wwnn(struct lpfc_vport *vport, void *attr)
2983  {
2984  	return lpfc_fdmi_set_attr_wwn(attr, RPRT_NODENAME,
2985  			&vport->fc_sparam.nodeName);
2986  }
2987  
2988  static int
lpfc_fdmi_port_attr_wwpn(struct lpfc_vport * vport,void * attr)2989  lpfc_fdmi_port_attr_wwpn(struct lpfc_vport *vport, void *attr)
2990  {
2991  	return lpfc_fdmi_set_attr_wwn(attr, RPRT_PORTNAME,
2992  			&vport->fc_sparam.portName);
2993  }
2994  
2995  static int
lpfc_fdmi_port_attr_symbolic_name(struct lpfc_vport * vport,void * attr)2996  lpfc_fdmi_port_attr_symbolic_name(struct lpfc_vport *vport, void *attr)
2997  {
2998  	char buf[256] = { 0 };
2999  
3000  	lpfc_vport_symbolic_port_name(vport, buf, sizeof(buf));
3001  
3002  	return lpfc_fdmi_set_attr_string(attr, RPRT_SYM_PORTNAME, buf);
3003  }
3004  
3005  static int
lpfc_fdmi_port_attr_port_type(struct lpfc_vport * vport,void * attr)3006  lpfc_fdmi_port_attr_port_type(struct lpfc_vport *vport, void *attr)
3007  {
3008  	struct lpfc_hba *phba = vport->phba;
3009  
3010  	return lpfc_fdmi_set_attr_u32(attr, RPRT_PORT_TYPE,
3011  			(phba->fc_topology == LPFC_TOPOLOGY_LOOP) ?
3012  				LPFC_FDMI_PORTTYPE_NLPORT :
3013  				LPFC_FDMI_PORTTYPE_NPORT);
3014  }
3015  
3016  static int
lpfc_fdmi_port_attr_class(struct lpfc_vport * vport,void * attr)3017  lpfc_fdmi_port_attr_class(struct lpfc_vport *vport, void *attr)
3018  {
3019  	return lpfc_fdmi_set_attr_u32(attr, RPRT_SUPPORTED_CLASS,
3020  			FC_COS_CLASS2 | FC_COS_CLASS3);
3021  }
3022  
3023  static int
lpfc_fdmi_port_attr_fabric_wwpn(struct lpfc_vport * vport,void * attr)3024  lpfc_fdmi_port_attr_fabric_wwpn(struct lpfc_vport *vport, void *attr)
3025  {
3026  	return lpfc_fdmi_set_attr_wwn(attr, RPRT_FABRICNAME,
3027  			&vport->fabric_portname);
3028  }
3029  
3030  static int
lpfc_fdmi_port_attr_active_fc4type(struct lpfc_vport * vport,void * attr)3031  lpfc_fdmi_port_attr_active_fc4type(struct lpfc_vport *vport, void *attr)
3032  {
3033  	struct lpfc_hba *phba = vport->phba;
3034  	u32 fc4types;
3035  
3036  	fc4types = (ATTR_FC4_CT | ATTR_FC4_FCP);
3037  
3038  	/* Check to see if NVME is configured or not */
3039  	if (vport == phba->pport &&
3040  	    phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3041  		fc4types |= ATTR_FC4_NVME;
3042  
3043  	return lpfc_fdmi_set_attr_fc4types(attr, RPRT_ACTIVE_FC4_TYPES,
3044  			fc4types);
3045  }
3046  
3047  static int
lpfc_fdmi_port_attr_port_state(struct lpfc_vport * vport,void * attr)3048  lpfc_fdmi_port_attr_port_state(struct lpfc_vport *vport, void *attr)
3049  {
3050  	return lpfc_fdmi_set_attr_u32(attr, RPRT_PORT_STATE,
3051  			LPFC_FDMI_PORTSTATE_ONLINE);
3052  }
3053  
3054  static int
lpfc_fdmi_port_attr_num_disc(struct lpfc_vport * vport,void * attr)3055  lpfc_fdmi_port_attr_num_disc(struct lpfc_vport *vport, void *attr)
3056  {
3057  	vport->fdmi_num_disc = lpfc_find_map_node(vport);
3058  
3059  	return lpfc_fdmi_set_attr_u32(attr, RPRT_DISC_PORT,
3060  			vport->fdmi_num_disc);
3061  }
3062  
3063  static int
lpfc_fdmi_port_attr_nportid(struct lpfc_vport * vport,void * attr)3064  lpfc_fdmi_port_attr_nportid(struct lpfc_vport *vport, void *attr)
3065  {
3066  	return lpfc_fdmi_set_attr_u32(attr, RPRT_PORT_ID, vport->fc_myDID);
3067  }
3068  
3069  static int
lpfc_fdmi_smart_attr_service(struct lpfc_vport * vport,void * attr)3070  lpfc_fdmi_smart_attr_service(struct lpfc_vport *vport, void *attr)
3071  {
3072  	return lpfc_fdmi_set_attr_string(attr, RPRT_SMART_SERVICE,
3073  			"Smart SAN Initiator");
3074  }
3075  
3076  static int
lpfc_fdmi_smart_attr_guid(struct lpfc_vport * vport,void * attr)3077  lpfc_fdmi_smart_attr_guid(struct lpfc_vport *vport, void *attr)
3078  {
3079  	return lpfc_fdmi_set_attr_fullwwn(attr, RPRT_SMART_GUID,
3080  			&vport->fc_sparam.nodeName,
3081  			&vport->fc_sparam.portName);
3082  }
3083  
3084  static int
lpfc_fdmi_smart_attr_version(struct lpfc_vport * vport,void * attr)3085  lpfc_fdmi_smart_attr_version(struct lpfc_vport *vport, void *attr)
3086  {
3087  	return lpfc_fdmi_set_attr_string(attr, RPRT_SMART_VERSION,
3088  			"Smart SAN Version 2.0");
3089  }
3090  
3091  static int
lpfc_fdmi_smart_attr_model(struct lpfc_vport * vport,void * attr)3092  lpfc_fdmi_smart_attr_model(struct lpfc_vport *vport, void *attr)
3093  {
3094  	struct lpfc_hba *phba = vport->phba;
3095  
3096  	return lpfc_fdmi_set_attr_string(attr, RPRT_SMART_MODEL,
3097  			phba->ModelName);
3098  }
3099  
3100  static int
lpfc_fdmi_smart_attr_port_info(struct lpfc_vport * vport,void * attr)3101  lpfc_fdmi_smart_attr_port_info(struct lpfc_vport *vport, void *attr)
3102  {
3103  	/* SRIOV (type 3) is not supported */
3104  
3105  	return lpfc_fdmi_set_attr_u32(attr, RPRT_SMART_PORT_INFO,
3106  			(vport->vpi) ?  2 /* NPIV */ : 1 /* Physical */);
3107  }
3108  
3109  static int
lpfc_fdmi_smart_attr_qos(struct lpfc_vport * vport,void * attr)3110  lpfc_fdmi_smart_attr_qos(struct lpfc_vport *vport, void *attr)
3111  {
3112  	return lpfc_fdmi_set_attr_u32(attr, RPRT_SMART_QOS, 0);
3113  }
3114  
3115  static int
lpfc_fdmi_smart_attr_security(struct lpfc_vport * vport,void * attr)3116  lpfc_fdmi_smart_attr_security(struct lpfc_vport *vport, void *attr)
3117  {
3118  	return lpfc_fdmi_set_attr_u32(attr, RPRT_SMART_SECURITY, 1);
3119  }
3120  
3121  static int
lpfc_fdmi_vendor_attr_mi(struct lpfc_vport * vport,void * attr)3122  lpfc_fdmi_vendor_attr_mi(struct lpfc_vport *vport, void *attr)
3123  {
3124  	struct lpfc_hba *phba = vport->phba;
3125  	char buf[32] = { 0 };
3126  
3127  	sprintf(buf, "ELXE2EM:%04d", phba->sli4_hba.pc_sli4_params.mi_ver);
3128  
3129  	return lpfc_fdmi_set_attr_string(attr, RPRT_VENDOR_MI, buf);
3130  }
3131  
3132  /* RHBA attribute jump table */
3133  static int (*lpfc_fdmi_hba_action[])
3134  	(struct lpfc_vport *vport, void *attrbuf) = {
3135  	/* Action routine                 Mask bit     Attribute type */
3136  	lpfc_fdmi_hba_attr_wwnn,	  /* bit0     RHBA_NODENAME           */
3137  	lpfc_fdmi_hba_attr_manufacturer,  /* bit1     RHBA_MANUFACTURER       */
3138  	lpfc_fdmi_hba_attr_sn,		  /* bit2     RHBA_SERIAL_NUMBER      */
3139  	lpfc_fdmi_hba_attr_model,	  /* bit3     RHBA_MODEL              */
3140  	lpfc_fdmi_hba_attr_description,	  /* bit4     RHBA_MODEL_DESCRIPTION  */
3141  	lpfc_fdmi_hba_attr_hdw_ver,	  /* bit5     RHBA_HARDWARE_VERSION   */
3142  	lpfc_fdmi_hba_attr_drvr_ver,	  /* bit6     RHBA_DRIVER_VERSION     */
3143  	lpfc_fdmi_hba_attr_rom_ver,	  /* bit7     RHBA_OPTION_ROM_VERSION */
3144  	lpfc_fdmi_hba_attr_fmw_ver,	  /* bit8     RHBA_FIRMWARE_VERSION   */
3145  	lpfc_fdmi_hba_attr_os_ver,	  /* bit9     RHBA_OS_NAME_VERSION    */
3146  	lpfc_fdmi_hba_attr_ct_len,	  /* bit10    RHBA_MAX_CT_PAYLOAD_LEN */
3147  	lpfc_fdmi_hba_attr_symbolic_name, /* bit11    RHBA_SYM_NODENAME       */
3148  	lpfc_fdmi_hba_attr_vendor_info,	  /* bit12    RHBA_VENDOR_INFO        */
3149  	lpfc_fdmi_hba_attr_num_ports,	  /* bit13    RHBA_NUM_PORTS          */
3150  	lpfc_fdmi_hba_attr_fabric_wwnn,	  /* bit14    RHBA_FABRIC_WWNN        */
3151  	lpfc_fdmi_hba_attr_bios_ver,	  /* bit15    RHBA_BIOS_VERSION       */
3152  	lpfc_fdmi_hba_attr_bios_state,	  /* bit16    RHBA_BIOS_STATE         */
3153  	lpfc_fdmi_hba_attr_vendor_id,	  /* bit17    RHBA_VENDOR_ID          */
3154  };
3155  
3156  /* RPA / RPRT attribute jump table */
3157  static int (*lpfc_fdmi_port_action[])
3158  	(struct lpfc_vport *vport, void *attrbuf) = {
3159  	/* Action routine                   Mask bit   Attribute type */
3160  	lpfc_fdmi_port_attr_fc4type,        /* bit0   RPRT_SUPPORT_FC4_TYPES  */
3161  	lpfc_fdmi_port_attr_support_speed,  /* bit1   RPRT_SUPPORTED_SPEED    */
3162  	lpfc_fdmi_port_attr_speed,          /* bit2   RPRT_PORT_SPEED         */
3163  	lpfc_fdmi_port_attr_max_frame,      /* bit3   RPRT_MAX_FRAME_SIZE     */
3164  	lpfc_fdmi_port_attr_os_devname,     /* bit4   RPRT_OS_DEVICE_NAME     */
3165  	lpfc_fdmi_port_attr_host_name,      /* bit5   RPRT_HOST_NAME          */
3166  	lpfc_fdmi_port_attr_wwnn,           /* bit6   RPRT_NODENAME           */
3167  	lpfc_fdmi_port_attr_wwpn,           /* bit7   RPRT_PORTNAME           */
3168  	lpfc_fdmi_port_attr_symbolic_name,  /* bit8   RPRT_SYM_PORTNAME       */
3169  	lpfc_fdmi_port_attr_port_type,      /* bit9   RPRT_PORT_TYPE          */
3170  	lpfc_fdmi_port_attr_class,          /* bit10  RPRT_SUPPORTED_CLASS    */
3171  	lpfc_fdmi_port_attr_fabric_wwpn,    /* bit11  RPRT_FABRICNAME         */
3172  	lpfc_fdmi_port_attr_active_fc4type, /* bit12  RPRT_ACTIVE_FC4_TYPES   */
3173  	lpfc_fdmi_port_attr_port_state,     /* bit13  RPRT_PORT_STATE         */
3174  	lpfc_fdmi_port_attr_num_disc,       /* bit14  RPRT_DISC_PORT          */
3175  	lpfc_fdmi_port_attr_nportid,        /* bit15  RPRT_PORT_ID            */
3176  	lpfc_fdmi_smart_attr_service,       /* bit16  RPRT_SMART_SERVICE      */
3177  	lpfc_fdmi_smart_attr_guid,          /* bit17  RPRT_SMART_GUID         */
3178  	lpfc_fdmi_smart_attr_version,       /* bit18  RPRT_SMART_VERSION      */
3179  	lpfc_fdmi_smart_attr_model,         /* bit19  RPRT_SMART_MODEL        */
3180  	lpfc_fdmi_smart_attr_port_info,     /* bit20  RPRT_SMART_PORT_INFO    */
3181  	lpfc_fdmi_smart_attr_qos,           /* bit21  RPRT_SMART_QOS          */
3182  	lpfc_fdmi_smart_attr_security,      /* bit22  RPRT_SMART_SECURITY     */
3183  	lpfc_fdmi_vendor_attr_mi,           /* bit23  RPRT_VENDOR_MI          */
3184  };
3185  
3186  /**
3187   * lpfc_fdmi_cmd - Build and send a FDMI cmd to the specified NPort
3188   * @vport: pointer to a host virtual N_Port data structure.
3189   * @ndlp: ndlp to send FDMI cmd to (if NULL use FDMI_DID)
3190   * @cmdcode: FDMI command to send
3191   * @new_mask: Mask of HBA or PORT Attributes to send
3192   *
3193   * Builds and sends a FDMI command using the CT subsystem.
3194   */
3195  int
lpfc_fdmi_cmd(struct lpfc_vport * vport,struct lpfc_nodelist * ndlp,int cmdcode,uint32_t new_mask)3196  lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
3197  	      int cmdcode, uint32_t new_mask)
3198  {
3199  	struct lpfc_hba *phba = vport->phba;
3200  	struct lpfc_dmabuf *rq, *rsp;
3201  	struct lpfc_sli_ct_request *CtReq;
3202  	struct ulp_bde64_le *bde;
3203  	uint32_t bit_pos;
3204  	uint32_t size, addsz;
3205  	uint32_t rsp_size;
3206  	uint32_t mask;
3207  	struct lpfc_fdmi_reg_hba *rh;
3208  	struct lpfc_fdmi_port_entry *pe;
3209  	struct lpfc_fdmi_reg_portattr *pab = NULL, *base = NULL;
3210  	struct lpfc_fdmi_attr_block *ab = NULL;
3211  	int  (*func)(struct lpfc_vport *vport, void *attrbuf);
3212  	void (*cmpl)(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3213  		     struct lpfc_iocbq *rspiocb);
3214  
3215  	if (!ndlp)
3216  		return 0;
3217  
3218  	cmpl = lpfc_cmpl_ct_disc_fdmi; /* called from discovery */
3219  
3220  	/* fill in BDEs for command */
3221  	/* Allocate buffer for command payload */
3222  	rq = kmalloc(sizeof(*rq), GFP_KERNEL);
3223  	if (!rq)
3224  		goto fdmi_cmd_exit;
3225  
3226  	rq->virt = lpfc_mbuf_alloc(phba, 0, &rq->phys);
3227  	if (!rq->virt)
3228  		goto fdmi_cmd_free_rq;
3229  
3230  	/* Allocate buffer for Buffer ptr list */
3231  	rsp = kmalloc(sizeof(*rsp), GFP_KERNEL);
3232  	if (!rsp)
3233  		goto fdmi_cmd_free_rqvirt;
3234  
3235  	rsp->virt = lpfc_mbuf_alloc(phba, 0, &rsp->phys);
3236  	if (!rsp->virt)
3237  		goto fdmi_cmd_free_rsp;
3238  
3239  	INIT_LIST_HEAD(&rq->list);
3240  	INIT_LIST_HEAD(&rsp->list);
3241  
3242  	/* mbuf buffers are 1K in length - aka LPFC_BPL_SIZE */
3243  	memset(rq->virt, 0, LPFC_BPL_SIZE);
3244  	rsp_size = LPFC_BPL_SIZE;
3245  
3246  	/* FDMI request */
3247  	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
3248  			 "0218 FDMI Request x%x mask x%x Data: x%x x%x x%x\n",
3249  			 cmdcode, new_mask, vport->fdmi_port_mask,
3250  			 vport->fc_flag, vport->port_state);
3251  
3252  	CtReq = (struct lpfc_sli_ct_request *)rq->virt;
3253  
3254  	/* First populate the CT_IU preamble */
3255  	CtReq->RevisionId.bits.Revision = SLI_CT_REVISION;
3256  	CtReq->RevisionId.bits.InId = 0;
3257  
3258  	CtReq->FsType = SLI_CT_MANAGEMENT_SERVICE;
3259  	CtReq->FsSubType = SLI_CT_FDMI_Subtypes;
3260  
3261  	CtReq->CommandResponse.bits.CmdRsp = cpu_to_be16(cmdcode);
3262  
3263  	size = 0;
3264  
3265  	/* Next fill in the specific FDMI cmd information */
3266  	switch (cmdcode) {
3267  	case SLI_MGMT_RHAT:
3268  	case SLI_MGMT_RHBA:
3269  		rh = (struct lpfc_fdmi_reg_hba *)&CtReq->un;
3270  		/* HBA Identifier */
3271  		memcpy(&rh->hi.PortName, &phba->pport->fc_sparam.portName,
3272  		       sizeof(struct lpfc_name));
3273  		size += sizeof(struct lpfc_fdmi_hba_ident);
3274  
3275  		if (cmdcode == SLI_MGMT_RHBA) {
3276  			/* Registered Port List */
3277  			/* One entry (port) per adapter */
3278  			rh->rpl.EntryCnt = cpu_to_be32(1);
3279  			memcpy(&rh->rpl.pe.PortName,
3280  			       &phba->pport->fc_sparam.portName,
3281  			       sizeof(struct lpfc_name));
3282  			size += sizeof(struct lpfc_fdmi_reg_port_list);
3283  		}
3284  
3285  		ab = (struct lpfc_fdmi_attr_block *)((uint8_t *)rh + size);
3286  		ab->EntryCnt = 0;
3287  		size += FOURBYTES;	/* add length of EntryCnt field */
3288  
3289  		bit_pos = 0;
3290  		if (new_mask)
3291  			mask = new_mask;
3292  		else
3293  			mask = vport->fdmi_hba_mask;
3294  
3295  		/* Mask will dictate what attributes to build in the request */
3296  		while (mask) {
3297  			if (mask & 0x1) {
3298  				func = lpfc_fdmi_hba_action[bit_pos];
3299  				addsz = func(vport, ((uint8_t *)rh + size));
3300  				if (addsz) {
3301  					ab->EntryCnt++;
3302  					size += addsz;
3303  				}
3304  				/* check if another attribute fits */
3305  				if ((size + FDMI_MAX_ATTRLEN) >
3306  				    (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
3307  					goto hba_out;
3308  			}
3309  			mask = mask >> 1;
3310  			bit_pos++;
3311  		}
3312  hba_out:
3313  		ab->EntryCnt = cpu_to_be32(ab->EntryCnt);
3314  		/* Total size */
3315  		size += GID_REQUEST_SZ - 4;
3316  		break;
3317  
3318  	case SLI_MGMT_RPRT:
3319  		if (vport->port_type != LPFC_PHYSICAL_PORT) {
3320  			ndlp = lpfc_findnode_did(phba->pport, FDMI_DID);
3321  			if (!ndlp)
3322  				return 0;
3323  		}
3324  		fallthrough;
3325  	case SLI_MGMT_RPA:
3326  		/* Store base ptr right after preamble */
3327  		base = (struct lpfc_fdmi_reg_portattr *)&CtReq->un;
3328  
3329  		if (cmdcode == SLI_MGMT_RPRT) {
3330  			rh = (struct lpfc_fdmi_reg_hba *)base;
3331  			/* HBA Identifier */
3332  			memcpy(&rh->hi.PortName,
3333  			       &phba->pport->fc_sparam.portName,
3334  			       sizeof(struct lpfc_name));
3335  			pab = (struct lpfc_fdmi_reg_portattr *)
3336  				((uint8_t *)base + sizeof(struct lpfc_name));
3337  			size += sizeof(struct lpfc_name);
3338  		} else {
3339  			pab = base;
3340  		}
3341  
3342  		memcpy((uint8_t *)&pab->PortName,
3343  		       (uint8_t *)&vport->fc_sparam.portName,
3344  		       sizeof(struct lpfc_name));
3345  		pab->ab.EntryCnt = 0;
3346  		/* add length of name and EntryCnt field */
3347  		size += sizeof(struct lpfc_name) + FOURBYTES;
3348  
3349  		bit_pos = 0;
3350  		if (new_mask)
3351  			mask = new_mask;
3352  		else
3353  			mask = vport->fdmi_port_mask;
3354  
3355  		/* Mask will dictate what attributes to build in the request */
3356  		while (mask) {
3357  			if (mask & 0x1) {
3358  				func = lpfc_fdmi_port_action[bit_pos];
3359  				addsz = func(vport, ((uint8_t *)base + size));
3360  				if (addsz) {
3361  					pab->ab.EntryCnt++;
3362  					size += addsz;
3363  				}
3364  				/* check if another attribute fits */
3365  				if ((size + FDMI_MAX_ATTRLEN) >
3366  				    (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
3367  					goto port_out;
3368  			}
3369  			mask = mask >> 1;
3370  			bit_pos++;
3371  		}
3372  port_out:
3373  		pab->ab.EntryCnt = cpu_to_be32(pab->ab.EntryCnt);
3374  		size += GID_REQUEST_SZ - 4;
3375  		break;
3376  
3377  	case SLI_MGMT_GHAT:
3378  	case SLI_MGMT_GRPL:
3379  		rsp_size = FC_MAX_NS_RSP;
3380  		fallthrough;
3381  	case SLI_MGMT_DHBA:
3382  	case SLI_MGMT_DHAT:
3383  		pe = (struct lpfc_fdmi_port_entry *)&CtReq->un;
3384  		memcpy((uint8_t *)&pe->PortName,
3385  		       (uint8_t *)&vport->fc_sparam.portName,
3386  		       sizeof(struct lpfc_name));
3387  		size = GID_REQUEST_SZ - 4 + sizeof(struct lpfc_name);
3388  		break;
3389  
3390  	case SLI_MGMT_GPAT:
3391  	case SLI_MGMT_GPAS:
3392  		rsp_size = FC_MAX_NS_RSP;
3393  		fallthrough;
3394  	case SLI_MGMT_DPRT:
3395  		if (vport->port_type != LPFC_PHYSICAL_PORT) {
3396  			ndlp = lpfc_findnode_did(phba->pport, FDMI_DID);
3397  			if (!ndlp)
3398  				return 0;
3399  		}
3400  		fallthrough;
3401  	case SLI_MGMT_DPA:
3402  		pe = (struct lpfc_fdmi_port_entry *)&CtReq->un;
3403  		memcpy((uint8_t *)&pe->PortName,
3404  		       (uint8_t *)&vport->fc_sparam.portName,
3405  		       sizeof(struct lpfc_name));
3406  		size = GID_REQUEST_SZ - 4 + sizeof(struct lpfc_name);
3407  		break;
3408  	case SLI_MGMT_GRHL:
3409  		size = GID_REQUEST_SZ - 4;
3410  		break;
3411  	default:
3412  		lpfc_printf_vlog(vport, KERN_WARNING, LOG_DISCOVERY,
3413  				 "0298 FDMI cmdcode x%x not supported\n",
3414  				 cmdcode);
3415  		goto fdmi_cmd_free_rspvirt;
3416  	}
3417  	CtReq->CommandResponse.bits.Size = cpu_to_be16(rsp_size);
3418  
3419  	bde = (struct ulp_bde64_le *)rsp->virt;
3420  	bde->addr_high = cpu_to_le32(putPaddrHigh(rq->phys));
3421  	bde->addr_low = cpu_to_le32(putPaddrLow(rq->phys));
3422  	bde->type_size = cpu_to_le32(ULP_BDE64_TYPE_BDE_64 <<
3423  				     ULP_BDE64_TYPE_SHIFT);
3424  	bde->type_size |= cpu_to_le32(size);
3425  
3426  	/*
3427  	 * The lpfc_ct_cmd/lpfc_get_req shall increment ndlp reference count
3428  	 * to hold ndlp reference for the corresponding callback function.
3429  	 */
3430  	if (!lpfc_ct_cmd(vport, rq, rsp, ndlp, cmpl, rsp_size, 0))
3431  		return 0;
3432  
3433  fdmi_cmd_free_rspvirt:
3434  	lpfc_mbuf_free(phba, rsp->virt, rsp->phys);
3435  fdmi_cmd_free_rsp:
3436  	kfree(rsp);
3437  fdmi_cmd_free_rqvirt:
3438  	lpfc_mbuf_free(phba, rq->virt, rq->phys);
3439  fdmi_cmd_free_rq:
3440  	kfree(rq);
3441  fdmi_cmd_exit:
3442  	/* Issue FDMI request failed */
3443  	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
3444  			 "0244 Issue FDMI request failed Data: x%x\n",
3445  			 cmdcode);
3446  	return 1;
3447  }
3448  
3449  /**
3450   * lpfc_delayed_disc_tmo - Timeout handler for delayed discovery timer.
3451   * @t: Context object of the timer.
3452   *
3453   * This function set the WORKER_DELAYED_DISC_TMO flag and wake up
3454   * the worker thread.
3455   **/
3456  void
lpfc_delayed_disc_tmo(struct timer_list * t)3457  lpfc_delayed_disc_tmo(struct timer_list *t)
3458  {
3459  	struct lpfc_vport *vport = from_timer(vport, t, delayed_disc_tmo);
3460  	struct lpfc_hba   *phba = vport->phba;
3461  	uint32_t tmo_posted;
3462  	unsigned long iflag;
3463  
3464  	spin_lock_irqsave(&vport->work_port_lock, iflag);
3465  	tmo_posted = vport->work_port_events & WORKER_DELAYED_DISC_TMO;
3466  	if (!tmo_posted)
3467  		vport->work_port_events |= WORKER_DELAYED_DISC_TMO;
3468  	spin_unlock_irqrestore(&vport->work_port_lock, iflag);
3469  
3470  	if (!tmo_posted)
3471  		lpfc_worker_wake_up(phba);
3472  	return;
3473  }
3474  
3475  /**
3476   * lpfc_delayed_disc_timeout_handler - Function called by worker thread to
3477   *      handle delayed discovery.
3478   * @vport: pointer to a host virtual N_Port data structure.
3479   *
3480   * This function start nport discovery of the vport.
3481   **/
3482  void
lpfc_delayed_disc_timeout_handler(struct lpfc_vport * vport)3483  lpfc_delayed_disc_timeout_handler(struct lpfc_vport *vport)
3484  {
3485  	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3486  
3487  	spin_lock_irq(shost->host_lock);
3488  	if (!(vport->fc_flag & FC_DISC_DELAYED)) {
3489  		spin_unlock_irq(shost->host_lock);
3490  		return;
3491  	}
3492  	vport->fc_flag &= ~FC_DISC_DELAYED;
3493  	spin_unlock_irq(shost->host_lock);
3494  
3495  	lpfc_do_scr_ns_plogi(vport->phba, vport);
3496  }
3497  
3498  void
lpfc_decode_firmware_rev(struct lpfc_hba * phba,char * fwrevision,int flag)3499  lpfc_decode_firmware_rev(struct lpfc_hba *phba, char *fwrevision, int flag)
3500  {
3501  	struct lpfc_sli *psli = &phba->sli;
3502  	lpfc_vpd_t *vp = &phba->vpd;
3503  	uint32_t b1, b2, b3, b4, i, rev;
3504  	char c;
3505  	uint32_t *ptr, str[4];
3506  	uint8_t *fwname;
3507  
3508  	if (phba->sli_rev == LPFC_SLI_REV4)
3509  		snprintf(fwrevision, FW_REV_STR_SIZE, "%s", vp->rev.opFwName);
3510  	else if (vp->rev.rBit) {
3511  		if (psli->sli_flag & LPFC_SLI_ACTIVE)
3512  			rev = vp->rev.sli2FwRev;
3513  		else
3514  			rev = vp->rev.sli1FwRev;
3515  
3516  		b1 = (rev & 0x0000f000) >> 12;
3517  		b2 = (rev & 0x00000f00) >> 8;
3518  		b3 = (rev & 0x000000c0) >> 6;
3519  		b4 = (rev & 0x00000030) >> 4;
3520  
3521  		switch (b4) {
3522  		case 0:
3523  			c = 'N';
3524  			break;
3525  		case 1:
3526  			c = 'A';
3527  			break;
3528  		case 2:
3529  			c = 'B';
3530  			break;
3531  		case 3:
3532  			c = 'X';
3533  			break;
3534  		default:
3535  			c = 0;
3536  			break;
3537  		}
3538  		b4 = (rev & 0x0000000f);
3539  
3540  		if (psli->sli_flag & LPFC_SLI_ACTIVE)
3541  			fwname = vp->rev.sli2FwName;
3542  		else
3543  			fwname = vp->rev.sli1FwName;
3544  
3545  		for (i = 0; i < 16; i++)
3546  			if (fwname[i] == 0x20)
3547  				fwname[i] = 0;
3548  
3549  		ptr = (uint32_t*)fwname;
3550  
3551  		for (i = 0; i < 3; i++)
3552  			str[i] = be32_to_cpu(*ptr++);
3553  
3554  		if (c == 0) {
3555  			if (flag)
3556  				sprintf(fwrevision, "%d.%d%d (%s)",
3557  					b1, b2, b3, (char *)str);
3558  			else
3559  				sprintf(fwrevision, "%d.%d%d", b1,
3560  					b2, b3);
3561  		} else {
3562  			if (flag)
3563  				sprintf(fwrevision, "%d.%d%d%c%d (%s)",
3564  					b1, b2, b3, c,
3565  					b4, (char *)str);
3566  			else
3567  				sprintf(fwrevision, "%d.%d%d%c%d",
3568  					b1, b2, b3, c, b4);
3569  		}
3570  	} else {
3571  		rev = vp->rev.smFwRev;
3572  
3573  		b1 = (rev & 0xff000000) >> 24;
3574  		b2 = (rev & 0x00f00000) >> 20;
3575  		b3 = (rev & 0x000f0000) >> 16;
3576  		c  = (rev & 0x0000ff00) >> 8;
3577  		b4 = (rev & 0x000000ff);
3578  
3579  		sprintf(fwrevision, "%d.%d%d%c%d", b1, b2, b3, c, b4);
3580  	}
3581  	return;
3582  }
3583  
3584  static void
lpfc_cmpl_ct_cmd_vmid(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)3585  lpfc_cmpl_ct_cmd_vmid(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3586  		      struct lpfc_iocbq *rspiocb)
3587  {
3588  	struct lpfc_vport *vport = cmdiocb->vport;
3589  	struct lpfc_dmabuf *inp = cmdiocb->cmd_dmabuf;
3590  	struct lpfc_dmabuf *outp = cmdiocb->rsp_dmabuf;
3591  	struct lpfc_sli_ct_request *ctcmd = inp->virt;
3592  	struct lpfc_sli_ct_request *ctrsp = outp->virt;
3593  	__be16 rsp = ctrsp->CommandResponse.bits.CmdRsp;
3594  	struct app_id_object *app;
3595  	struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
3596  	u32 cmd, hash, bucket;
3597  	struct lpfc_vmid *vmp, *cur;
3598  	u8 *data = outp->virt;
3599  	int i;
3600  
3601  	cmd = be16_to_cpu(ctcmd->CommandResponse.bits.CmdRsp);
3602  	if (cmd == SLI_CTAS_DALLAPP_ID)
3603  		lpfc_ct_free_iocb(phba, cmdiocb);
3604  
3605  	if (lpfc_els_chk_latt(vport) || get_job_ulpstatus(phba, rspiocb)) {
3606  		if (cmd != SLI_CTAS_DALLAPP_ID)
3607  			goto free_res;
3608  	}
3609  	/* Check for a CT LS_RJT response */
3610  	if (be16_to_cpu(rsp) == SLI_CT_RESPONSE_FS_RJT) {
3611  		if (cmd != SLI_CTAS_DALLAPP_ID)
3612  			lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY,
3613  					 "3306 VMID FS_RJT Data: x%x x%x x%x\n",
3614  					 cmd, ctrsp->ReasonCode,
3615  					 ctrsp->Explanation);
3616  		if ((cmd != SLI_CTAS_DALLAPP_ID) ||
3617  		    (ctrsp->ReasonCode != SLI_CT_UNABLE_TO_PERFORM_REQ) ||
3618  		    (ctrsp->Explanation != SLI_CT_APP_ID_NOT_AVAILABLE)) {
3619  			/* If DALLAPP_ID failed retry later */
3620  			if (cmd == SLI_CTAS_DALLAPP_ID)
3621  				vport->load_flag |= FC_DEREGISTER_ALL_APP_ID;
3622  			goto free_res;
3623  		}
3624  	}
3625  
3626  	switch (cmd) {
3627  	case SLI_CTAS_RAPP_IDENT:
3628  		app = (struct app_id_object *)(RAPP_IDENT_OFFSET + data);
3629  		lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY,
3630  				 "6712 RAPP_IDENT app id %d  port id x%x id "
3631  				 "len %d\n", be32_to_cpu(app->app_id),
3632  				 be32_to_cpu(app->port_id),
3633  				 app->obj.entity_id_len);
3634  
3635  		if (app->obj.entity_id_len == 0 || app->port_id == 0)
3636  			goto free_res;
3637  
3638  		hash = lpfc_vmid_hash_fn(app->obj.entity_id,
3639  					 app->obj.entity_id_len);
3640  		vmp = lpfc_get_vmid_from_hashtable(vport, hash,
3641  						  app->obj.entity_id);
3642  		if (vmp) {
3643  			write_lock(&vport->vmid_lock);
3644  			vmp->un.app_id = be32_to_cpu(app->app_id);
3645  			vmp->flag |= LPFC_VMID_REGISTERED;
3646  			vmp->flag &= ~LPFC_VMID_REQ_REGISTER;
3647  			write_unlock(&vport->vmid_lock);
3648  			/* Set IN USE flag */
3649  			vport->vmid_flag |= LPFC_VMID_IN_USE;
3650  		} else {
3651  			lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY,
3652  					 "6901 No entry found %s hash %d\n",
3653  					 app->obj.entity_id, hash);
3654  		}
3655  		break;
3656  	case SLI_CTAS_DAPP_IDENT:
3657  		app = (struct app_id_object *)(DAPP_IDENT_OFFSET + data);
3658  		lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY,
3659  				 "6713 DAPP_IDENT app id %d  port id x%x\n",
3660  				 be32_to_cpu(app->app_id),
3661  				 be32_to_cpu(app->port_id));
3662  		break;
3663  	case SLI_CTAS_DALLAPP_ID:
3664  		lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY,
3665  				 "8856 Deregistered all app ids\n");
3666  		read_lock(&vport->vmid_lock);
3667  		for (i = 0; i < phba->cfg_max_vmid; i++) {
3668  			vmp = &vport->vmid[i];
3669  			if (vmp->flag != LPFC_VMID_SLOT_FREE)
3670  				memset(vmp, 0, sizeof(struct lpfc_vmid));
3671  		}
3672  		read_unlock(&vport->vmid_lock);
3673  		/* for all elements in the hash table */
3674  		if (!hash_empty(vport->hash_table))
3675  			hash_for_each(vport->hash_table, bucket, cur, hnode)
3676  				hash_del(&cur->hnode);
3677  		vport->load_flag |= FC_ALLOW_VMID;
3678  		break;
3679  	default:
3680  		lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY,
3681  				 "8857 Invalid command code\n");
3682  	}
3683  free_res:
3684  	lpfc_ct_free_iocb(phba, cmdiocb);
3685  	lpfc_nlp_put(ndlp);
3686  }
3687  
3688  /**
3689   * lpfc_vmid_cmd - Build and send a FDMI cmd to the specified NPort
3690   * @vport: pointer to a host virtual N_Port data structure.
3691   * @cmdcode: application server command code to send
3692   * @vmid: pointer to vmid info structure
3693   *
3694   * Builds and sends a FDMI command using the CT subsystem.
3695   */
3696  int
lpfc_vmid_cmd(struct lpfc_vport * vport,int cmdcode,struct lpfc_vmid * vmid)3697  lpfc_vmid_cmd(struct lpfc_vport *vport,
3698  	      int cmdcode, struct lpfc_vmid *vmid)
3699  {
3700  	struct lpfc_hba *phba = vport->phba;
3701  	struct lpfc_dmabuf *mp, *bmp;
3702  	struct lpfc_sli_ct_request *ctreq;
3703  	struct ulp_bde64 *bpl;
3704  	u32 size;
3705  	u32 rsp_size;
3706  	u8 *data;
3707  	struct lpfc_vmid_rapp_ident_list *rap;
3708  	struct lpfc_vmid_dapp_ident_list *dap;
3709  	u8 retry = 0;
3710  	struct lpfc_nodelist *ndlp;
3711  
3712  	void (*cmpl)(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3713  		     struct lpfc_iocbq *rspiocb);
3714  
3715  	ndlp = lpfc_findnode_did(vport, FDMI_DID);
3716  	if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)
3717  		return 0;
3718  
3719  	cmpl = lpfc_cmpl_ct_cmd_vmid;
3720  
3721  	/* fill in BDEs for command */
3722  	/* Allocate buffer for command payload */
3723  	mp = kmalloc(sizeof(*mp), GFP_KERNEL);
3724  	if (!mp)
3725  		goto vmid_free_mp_exit;
3726  
3727  	mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
3728  	if (!mp->virt)
3729  		goto vmid_free_mp_virt_exit;
3730  
3731  	/* Allocate buffer for Buffer ptr list */
3732  	bmp = kmalloc(sizeof(*bmp), GFP_KERNEL);
3733  	if (!bmp)
3734  		goto vmid_free_bmp_exit;
3735  
3736  	bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
3737  	if (!bmp->virt)
3738  		goto vmid_free_bmp_virt_exit;
3739  
3740  	INIT_LIST_HEAD(&mp->list);
3741  	INIT_LIST_HEAD(&bmp->list);
3742  
3743  	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
3744  			 "3275 VMID Request Data: x%x x%x x%x\n",
3745  			 vport->fc_flag, vport->port_state, cmdcode);
3746  	ctreq = (struct lpfc_sli_ct_request *)mp->virt;
3747  	data = mp->virt;
3748  	/* First populate the CT_IU preamble */
3749  	memset(data, 0, LPFC_BPL_SIZE);
3750  	ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
3751  	ctreq->RevisionId.bits.InId = 0;
3752  
3753  	ctreq->FsType = SLI_CT_MANAGEMENT_SERVICE;
3754  	ctreq->FsSubType = SLI_CT_APP_SEV_Subtypes;
3755  
3756  	ctreq->CommandResponse.bits.CmdRsp = cpu_to_be16(cmdcode);
3757  	rsp_size = LPFC_BPL_SIZE;
3758  	size = 0;
3759  
3760  	switch (cmdcode) {
3761  	case SLI_CTAS_RAPP_IDENT:
3762  		lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY,
3763  				 "1329 RAPP_IDENT for %s\n", vmid->host_vmid);
3764  		ctreq->un.PortID = cpu_to_be32(vport->fc_myDID);
3765  		rap = (struct lpfc_vmid_rapp_ident_list *)
3766  			(DAPP_IDENT_OFFSET + data);
3767  		rap->no_of_objects = cpu_to_be32(1);
3768  		rap->obj[0].entity_id_len = vmid->vmid_len;
3769  		memcpy(rap->obj[0].entity_id, vmid->host_vmid, vmid->vmid_len);
3770  		size = RAPP_IDENT_OFFSET +
3771  		       struct_size(rap, obj, be32_to_cpu(rap->no_of_objects));
3772  		retry = 1;
3773  		break;
3774  
3775  	case SLI_CTAS_GALLAPPIA_ID:
3776  		ctreq->un.PortID = cpu_to_be32(vport->fc_myDID);
3777  		size = GALLAPPIA_ID_SIZE;
3778  		break;
3779  
3780  	case SLI_CTAS_DAPP_IDENT:
3781  		lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY,
3782  				 "1469 DAPP_IDENT for %s\n", vmid->host_vmid);
3783  		ctreq->un.PortID = cpu_to_be32(vport->fc_myDID);
3784  		dap = (struct lpfc_vmid_dapp_ident_list *)
3785  			(DAPP_IDENT_OFFSET + data);
3786  		dap->no_of_objects = cpu_to_be32(1);
3787  		dap->obj[0].entity_id_len = vmid->vmid_len;
3788  		memcpy(dap->obj[0].entity_id, vmid->host_vmid, vmid->vmid_len);
3789  		size = DAPP_IDENT_OFFSET +
3790  		       struct_size(dap, obj, be32_to_cpu(dap->no_of_objects));
3791  		write_lock(&vport->vmid_lock);
3792  		vmid->flag &= ~LPFC_VMID_REGISTERED;
3793  		write_unlock(&vport->vmid_lock);
3794  		retry = 1;
3795  		break;
3796  
3797  	case SLI_CTAS_DALLAPP_ID:
3798  		ctreq->un.PortID = cpu_to_be32(vport->fc_myDID);
3799  		size = DALLAPP_ID_SIZE;
3800  		break;
3801  
3802  	default:
3803  		lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY,
3804  				 "7062 VMID cmdcode x%x not supported\n",
3805  				 cmdcode);
3806  		goto vmid_free_all_mem;
3807  	}
3808  
3809  	ctreq->CommandResponse.bits.Size = cpu_to_be16(rsp_size);
3810  
3811  	bpl = (struct ulp_bde64 *)bmp->virt;
3812  	bpl->addrHigh = putPaddrHigh(mp->phys);
3813  	bpl->addrLow = putPaddrLow(mp->phys);
3814  	bpl->tus.f.bdeFlags = 0;
3815  	bpl->tus.f.bdeSize = size;
3816  
3817  	/* The lpfc_ct_cmd/lpfc_get_req shall increment ndlp reference count
3818  	 * to hold ndlp reference for the corresponding callback function.
3819  	 */
3820  	if (!lpfc_ct_cmd(vport, mp, bmp, ndlp, cmpl, rsp_size, retry))
3821  		return 0;
3822  
3823   vmid_free_all_mem:
3824  	lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
3825   vmid_free_bmp_virt_exit:
3826  	kfree(bmp);
3827   vmid_free_bmp_exit:
3828  	lpfc_mbuf_free(phba, mp->virt, mp->phys);
3829   vmid_free_mp_virt_exit:
3830  	kfree(mp);
3831   vmid_free_mp_exit:
3832  
3833  	/* Issue CT request failed */
3834  	lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY,
3835  			 "3276 VMID CT request failed Data: x%x\n", cmdcode);
3836  	return -EIO;
3837  }
3838