xref: /openbmc/linux/drivers/scsi/qla2xxx/qla_gs.c (revision 078b39c9)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * QLogic Fibre Channel HBA Driver
4  * Copyright (c)  2003-2014 QLogic Corporation
5  */
6 #include "qla_def.h"
7 #include "qla_target.h"
8 #include <linux/utsname.h>
9 
10 static int qla2x00_sns_ga_nxt(scsi_qla_host_t *, fc_port_t *);
11 static int qla2x00_sns_gid_pt(scsi_qla_host_t *, sw_info_t *);
12 static int qla2x00_sns_gpn_id(scsi_qla_host_t *, sw_info_t *);
13 static int qla2x00_sns_gnn_id(scsi_qla_host_t *, sw_info_t *);
14 static int qla2x00_sns_rft_id(scsi_qla_host_t *);
15 static int qla2x00_sns_rnn_id(scsi_qla_host_t *);
16 static int qla_async_rftid(scsi_qla_host_t *, port_id_t *);
17 static int qla_async_rffid(scsi_qla_host_t *, port_id_t *, u8, u8);
18 static int qla_async_rnnid(scsi_qla_host_t *, port_id_t *, u8*);
19 static int qla_async_rsnn_nn(scsi_qla_host_t *);
20 
21 
22 
23 /**
24  * qla2x00_prep_ms_iocb() - Prepare common MS/CT IOCB fields for SNS CT query.
25  * @vha: HA context
26  * @arg: CT arguments
27  *
28  * Returns a pointer to the @vha's ms_iocb.
29  */
30 void *
31 qla2x00_prep_ms_iocb(scsi_qla_host_t *vha, struct ct_arg *arg)
32 {
33 	struct qla_hw_data *ha = vha->hw;
34 	ms_iocb_entry_t *ms_pkt;
35 
36 	ms_pkt = (ms_iocb_entry_t *)arg->iocb;
37 	memset(ms_pkt, 0, sizeof(ms_iocb_entry_t));
38 
39 	ms_pkt->entry_type = MS_IOCB_TYPE;
40 	ms_pkt->entry_count = 1;
41 	SET_TARGET_ID(ha, ms_pkt->loop_id, SIMPLE_NAME_SERVER);
42 	ms_pkt->control_flags = cpu_to_le16(CF_READ | CF_HEAD_TAG);
43 	ms_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
44 	ms_pkt->cmd_dsd_count = cpu_to_le16(1);
45 	ms_pkt->total_dsd_count = cpu_to_le16(2);
46 	ms_pkt->rsp_bytecount = cpu_to_le32(arg->rsp_size);
47 	ms_pkt->req_bytecount = cpu_to_le32(arg->req_size);
48 
49 	put_unaligned_le64(arg->req_dma, &ms_pkt->req_dsd.address);
50 	ms_pkt->req_dsd.length = ms_pkt->req_bytecount;
51 
52 	put_unaligned_le64(arg->rsp_dma, &ms_pkt->rsp_dsd.address);
53 	ms_pkt->rsp_dsd.length = ms_pkt->rsp_bytecount;
54 
55 	vha->qla_stats.control_requests++;
56 
57 	return (ms_pkt);
58 }
59 
60 /**
61  * qla24xx_prep_ms_iocb() - Prepare common CT IOCB fields for SNS CT query.
62  * @vha: HA context
63  * @arg: CT arguments
64  *
65  * Returns a pointer to the @ha's ms_iocb.
66  */
67 void *
68 qla24xx_prep_ms_iocb(scsi_qla_host_t *vha, struct ct_arg *arg)
69 {
70 	struct qla_hw_data *ha = vha->hw;
71 	struct ct_entry_24xx *ct_pkt;
72 
73 	ct_pkt = (struct ct_entry_24xx *)arg->iocb;
74 	memset(ct_pkt, 0, sizeof(struct ct_entry_24xx));
75 
76 	ct_pkt->entry_type = CT_IOCB_TYPE;
77 	ct_pkt->entry_count = 1;
78 	ct_pkt->nport_handle = cpu_to_le16(arg->nport_handle);
79 	ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
80 	ct_pkt->cmd_dsd_count = cpu_to_le16(1);
81 	ct_pkt->rsp_dsd_count = cpu_to_le16(1);
82 	ct_pkt->rsp_byte_count = cpu_to_le32(arg->rsp_size);
83 	ct_pkt->cmd_byte_count = cpu_to_le32(arg->req_size);
84 
85 	put_unaligned_le64(arg->req_dma, &ct_pkt->dsd[0].address);
86 	ct_pkt->dsd[0].length = ct_pkt->cmd_byte_count;
87 
88 	put_unaligned_le64(arg->rsp_dma, &ct_pkt->dsd[1].address);
89 	ct_pkt->dsd[1].length = ct_pkt->rsp_byte_count;
90 	ct_pkt->vp_index = vha->vp_idx;
91 
92 	vha->qla_stats.control_requests++;
93 
94 	return (ct_pkt);
95 }
96 
97 /**
98  * qla2x00_prep_ct_req() - Prepare common CT request fields for SNS query.
99  * @p: CT request buffer
100  * @cmd: GS command
101  * @rsp_size: response size in bytes
102  *
103  * Returns a pointer to the intitialized @ct_req.
104  */
105 static inline struct ct_sns_req *
106 qla2x00_prep_ct_req(struct ct_sns_pkt *p, uint16_t cmd, uint16_t rsp_size)
107 {
108 	memset(p, 0, sizeof(struct ct_sns_pkt));
109 
110 	p->p.req.header.revision = 0x01;
111 	p->p.req.header.gs_type = 0xFC;
112 	p->p.req.header.gs_subtype = 0x02;
113 	p->p.req.command = cpu_to_be16(cmd);
114 	p->p.req.max_rsp_size = cpu_to_be16((rsp_size - 16) / 4);
115 
116 	return &p->p.req;
117 }
118 
119 int
120 qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt,
121     struct ct_sns_rsp *ct_rsp, const char *routine)
122 {
123 	int rval;
124 	uint16_t comp_status;
125 	struct qla_hw_data *ha = vha->hw;
126 	bool lid_is_sns = false;
127 
128 	rval = QLA_FUNCTION_FAILED;
129 	if (ms_pkt->entry_status != 0) {
130 		ql_dbg(ql_dbg_disc, vha, 0x2031,
131 		    "%s failed, error status (%x) on port_id: %02x%02x%02x.\n",
132 		    routine, ms_pkt->entry_status, vha->d_id.b.domain,
133 		    vha->d_id.b.area, vha->d_id.b.al_pa);
134 	} else {
135 		if (IS_FWI2_CAPABLE(ha))
136 			comp_status = le16_to_cpu(
137 			    ((struct ct_entry_24xx *)ms_pkt)->comp_status);
138 		else
139 			comp_status = le16_to_cpu(ms_pkt->status);
140 		switch (comp_status) {
141 		case CS_COMPLETE:
142 		case CS_DATA_UNDERRUN:
143 		case CS_DATA_OVERRUN:		/* Overrun? */
144 			if (ct_rsp->header.response !=
145 			    cpu_to_be16(CT_ACCEPT_RESPONSE)) {
146 				ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2077,
147 				    "%s failed rejected request on port_id: %02x%02x%02x Completion status 0x%x, response 0x%x\n",
148 				    routine, vha->d_id.b.domain,
149 				    vha->d_id.b.area, vha->d_id.b.al_pa,
150 				    comp_status, ct_rsp->header.response);
151 				ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha,
152 				    0x2078, ct_rsp,
153 				    offsetof(typeof(*ct_rsp), rsp));
154 				rval = QLA_INVALID_COMMAND;
155 			} else
156 				rval = QLA_SUCCESS;
157 			break;
158 		case CS_PORT_LOGGED_OUT:
159 			if (IS_FWI2_CAPABLE(ha)) {
160 				if (le16_to_cpu(ms_pkt->loop_id.extended) ==
161 				    NPH_SNS)
162 					lid_is_sns = true;
163 			} else {
164 				if (le16_to_cpu(ms_pkt->loop_id.extended) ==
165 				    SIMPLE_NAME_SERVER)
166 					lid_is_sns = true;
167 			}
168 			if (lid_is_sns) {
169 				ql_dbg(ql_dbg_async, vha, 0x502b,
170 					"%s failed, Name server has logged out",
171 					routine);
172 				rval = QLA_NOT_LOGGED_IN;
173 				set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
174 				set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
175 			}
176 			break;
177 		case CS_TIMEOUT:
178 			rval = QLA_FUNCTION_TIMEOUT;
179 			fallthrough;
180 		default:
181 			ql_dbg(ql_dbg_disc, vha, 0x2033,
182 			    "%s failed, completion status (%x) on port_id: "
183 			    "%02x%02x%02x.\n", routine, comp_status,
184 			    vha->d_id.b.domain, vha->d_id.b.area,
185 			    vha->d_id.b.al_pa);
186 			break;
187 		}
188 	}
189 	return rval;
190 }
191 
192 /**
193  * qla2x00_ga_nxt() - SNS scan for fabric devices via GA_NXT command.
194  * @vha: HA context
195  * @fcport: fcport entry to updated
196  *
197  * Returns 0 on success.
198  */
199 int
200 qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
201 {
202 	int		rval;
203 
204 	ms_iocb_entry_t	*ms_pkt;
205 	struct ct_sns_req	*ct_req;
206 	struct ct_sns_rsp	*ct_rsp;
207 	struct qla_hw_data *ha = vha->hw;
208 	struct ct_arg arg;
209 
210 	if (IS_QLA2100(ha) || IS_QLA2200(ha))
211 		return qla2x00_sns_ga_nxt(vha, fcport);
212 
213 	arg.iocb = ha->ms_iocb;
214 	arg.req_dma = ha->ct_sns_dma;
215 	arg.rsp_dma = ha->ct_sns_dma;
216 	arg.req_size = GA_NXT_REQ_SIZE;
217 	arg.rsp_size = GA_NXT_RSP_SIZE;
218 	arg.nport_handle = NPH_SNS;
219 
220 	/* Issue GA_NXT */
221 	/* Prepare common MS IOCB */
222 	ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
223 
224 	/* Prepare CT request */
225 	ct_req = qla2x00_prep_ct_req(ha->ct_sns, GA_NXT_CMD,
226 	    GA_NXT_RSP_SIZE);
227 	ct_rsp = &ha->ct_sns->p.rsp;
228 
229 	/* Prepare CT arguments -- port_id */
230 	ct_req->req.port_id.port_id = port_id_to_be_id(fcport->d_id);
231 
232 	/* Execute MS IOCB */
233 	rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
234 	    sizeof(ms_iocb_entry_t));
235 	if (rval != QLA_SUCCESS) {
236 		/*EMPTY*/
237 		ql_dbg(ql_dbg_disc, vha, 0x2062,
238 		    "GA_NXT issue IOCB failed (%d).\n", rval);
239 	} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GA_NXT") !=
240 	    QLA_SUCCESS) {
241 		rval = QLA_FUNCTION_FAILED;
242 	} else {
243 		/* Populate fc_port_t entry. */
244 		fcport->d_id = be_to_port_id(ct_rsp->rsp.ga_nxt.port_id);
245 
246 		memcpy(fcport->node_name, ct_rsp->rsp.ga_nxt.node_name,
247 		    WWN_SIZE);
248 		memcpy(fcport->port_name, ct_rsp->rsp.ga_nxt.port_name,
249 		    WWN_SIZE);
250 
251 		fcport->fc4_type = (ct_rsp->rsp.ga_nxt.fc4_types[2] & BIT_0) ?
252 		    FS_FC4TYPE_FCP : FC4_TYPE_OTHER;
253 
254 		if (ct_rsp->rsp.ga_nxt.port_type != NS_N_PORT_TYPE &&
255 		    ct_rsp->rsp.ga_nxt.port_type != NS_NL_PORT_TYPE)
256 			fcport->d_id.b.domain = 0xf0;
257 
258 		ql_dbg(ql_dbg_disc, vha, 0x2063,
259 		    "GA_NXT entry - nn %8phN pn %8phN "
260 		    "port_id=%02x%02x%02x.\n",
261 		    fcport->node_name, fcport->port_name,
262 		    fcport->d_id.b.domain, fcport->d_id.b.area,
263 		    fcport->d_id.b.al_pa);
264 	}
265 
266 	return (rval);
267 }
268 
269 static inline int
270 qla2x00_gid_pt_rsp_size(scsi_qla_host_t *vha)
271 {
272 	return vha->hw->max_fibre_devices * 4 + 16;
273 }
274 
275 /**
276  * qla2x00_gid_pt() - SNS scan for fabric devices via GID_PT command.
277  * @vha: HA context
278  * @list: switch info entries to populate
279  *
280  * NOTE: Non-Nx_Ports are not requested.
281  *
282  * Returns 0 on success.
283  */
284 int
285 qla2x00_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
286 {
287 	int		rval;
288 	uint16_t	i;
289 
290 	ms_iocb_entry_t	*ms_pkt;
291 	struct ct_sns_req	*ct_req;
292 	struct ct_sns_rsp	*ct_rsp;
293 
294 	struct ct_sns_gid_pt_data *gid_data;
295 	struct qla_hw_data *ha = vha->hw;
296 	uint16_t gid_pt_rsp_size;
297 	struct ct_arg arg;
298 
299 	if (IS_QLA2100(ha) || IS_QLA2200(ha))
300 		return qla2x00_sns_gid_pt(vha, list);
301 
302 	gid_data = NULL;
303 	gid_pt_rsp_size = qla2x00_gid_pt_rsp_size(vha);
304 
305 	arg.iocb = ha->ms_iocb;
306 	arg.req_dma = ha->ct_sns_dma;
307 	arg.rsp_dma = ha->ct_sns_dma;
308 	arg.req_size = GID_PT_REQ_SIZE;
309 	arg.rsp_size = gid_pt_rsp_size;
310 	arg.nport_handle = NPH_SNS;
311 
312 	/* Issue GID_PT */
313 	/* Prepare common MS IOCB */
314 	ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
315 
316 	/* Prepare CT request */
317 	ct_req = qla2x00_prep_ct_req(ha->ct_sns, GID_PT_CMD, gid_pt_rsp_size);
318 	ct_rsp = &ha->ct_sns->p.rsp;
319 
320 	/* Prepare CT arguments -- port_type */
321 	ct_req->req.gid_pt.port_type = NS_NX_PORT_TYPE;
322 
323 	/* Execute MS IOCB */
324 	rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
325 	    sizeof(ms_iocb_entry_t));
326 	if (rval != QLA_SUCCESS) {
327 		/*EMPTY*/
328 		ql_dbg(ql_dbg_disc, vha, 0x2055,
329 		    "GID_PT issue IOCB failed (%d).\n", rval);
330 	} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GID_PT") !=
331 	    QLA_SUCCESS) {
332 		rval = QLA_FUNCTION_FAILED;
333 	} else {
334 		/* Set port IDs in switch info list. */
335 		for (i = 0; i < ha->max_fibre_devices; i++) {
336 			gid_data = &ct_rsp->rsp.gid_pt.entries[i];
337 			list[i].d_id = be_to_port_id(gid_data->port_id);
338 			memset(list[i].fabric_port_name, 0, WWN_SIZE);
339 			list[i].fp_speed = PORT_SPEED_UNKNOWN;
340 
341 			/* Last one exit. */
342 			if (gid_data->control_byte & BIT_7) {
343 				list[i].d_id.b.rsvd_1 = gid_data->control_byte;
344 				break;
345 			}
346 		}
347 
348 		/*
349 		 * If we've used all available slots, then the switch is
350 		 * reporting back more devices than we can handle with this
351 		 * single call.  Return a failed status, and let GA_NXT handle
352 		 * the overload.
353 		 */
354 		if (i == ha->max_fibre_devices)
355 			rval = QLA_FUNCTION_FAILED;
356 	}
357 
358 	return (rval);
359 }
360 
361 /**
362  * qla2x00_gpn_id() - SNS Get Port Name (GPN_ID) query.
363  * @vha: HA context
364  * @list: switch info entries to populate
365  *
366  * Returns 0 on success.
367  */
368 int
369 qla2x00_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
370 {
371 	int		rval = QLA_SUCCESS;
372 	uint16_t	i;
373 
374 	ms_iocb_entry_t	*ms_pkt;
375 	struct ct_sns_req	*ct_req;
376 	struct ct_sns_rsp	*ct_rsp;
377 	struct qla_hw_data *ha = vha->hw;
378 	struct ct_arg arg;
379 
380 	if (IS_QLA2100(ha) || IS_QLA2200(ha))
381 		return qla2x00_sns_gpn_id(vha, list);
382 
383 	arg.iocb = ha->ms_iocb;
384 	arg.req_dma = ha->ct_sns_dma;
385 	arg.rsp_dma = ha->ct_sns_dma;
386 	arg.req_size = GPN_ID_REQ_SIZE;
387 	arg.rsp_size = GPN_ID_RSP_SIZE;
388 	arg.nport_handle = NPH_SNS;
389 
390 	for (i = 0; i < ha->max_fibre_devices; i++) {
391 		/* Issue GPN_ID */
392 		/* Prepare common MS IOCB */
393 		ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
394 
395 		/* Prepare CT request */
396 		ct_req = qla2x00_prep_ct_req(ha->ct_sns, GPN_ID_CMD,
397 		    GPN_ID_RSP_SIZE);
398 		ct_rsp = &ha->ct_sns->p.rsp;
399 
400 		/* Prepare CT arguments -- port_id */
401 		ct_req->req.port_id.port_id = port_id_to_be_id(list[i].d_id);
402 
403 		/* Execute MS IOCB */
404 		rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
405 		    sizeof(ms_iocb_entry_t));
406 		if (rval != QLA_SUCCESS) {
407 			/*EMPTY*/
408 			ql_dbg(ql_dbg_disc, vha, 0x2056,
409 			    "GPN_ID issue IOCB failed (%d).\n", rval);
410 			break;
411 		} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
412 		    "GPN_ID") != QLA_SUCCESS) {
413 			rval = QLA_FUNCTION_FAILED;
414 			break;
415 		} else {
416 			/* Save portname */
417 			memcpy(list[i].port_name,
418 			    ct_rsp->rsp.gpn_id.port_name, WWN_SIZE);
419 		}
420 
421 		/* Last device exit. */
422 		if (list[i].d_id.b.rsvd_1 != 0)
423 			break;
424 	}
425 
426 	return (rval);
427 }
428 
429 /**
430  * qla2x00_gnn_id() - SNS Get Node Name (GNN_ID) query.
431  * @vha: HA context
432  * @list: switch info entries to populate
433  *
434  * Returns 0 on success.
435  */
436 int
437 qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
438 {
439 	int		rval = QLA_SUCCESS;
440 	uint16_t	i;
441 	struct qla_hw_data *ha = vha->hw;
442 	ms_iocb_entry_t	*ms_pkt;
443 	struct ct_sns_req	*ct_req;
444 	struct ct_sns_rsp	*ct_rsp;
445 	struct ct_arg arg;
446 
447 	if (IS_QLA2100(ha) || IS_QLA2200(ha))
448 		return qla2x00_sns_gnn_id(vha, list);
449 
450 	arg.iocb = ha->ms_iocb;
451 	arg.req_dma = ha->ct_sns_dma;
452 	arg.rsp_dma = ha->ct_sns_dma;
453 	arg.req_size = GNN_ID_REQ_SIZE;
454 	arg.rsp_size = GNN_ID_RSP_SIZE;
455 	arg.nport_handle = NPH_SNS;
456 
457 	for (i = 0; i < ha->max_fibre_devices; i++) {
458 		/* Issue GNN_ID */
459 		/* Prepare common MS IOCB */
460 		ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
461 
462 		/* Prepare CT request */
463 		ct_req = qla2x00_prep_ct_req(ha->ct_sns, GNN_ID_CMD,
464 		    GNN_ID_RSP_SIZE);
465 		ct_rsp = &ha->ct_sns->p.rsp;
466 
467 		/* Prepare CT arguments -- port_id */
468 		ct_req->req.port_id.port_id = port_id_to_be_id(list[i].d_id);
469 
470 		/* Execute MS IOCB */
471 		rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
472 		    sizeof(ms_iocb_entry_t));
473 		if (rval != QLA_SUCCESS) {
474 			/*EMPTY*/
475 			ql_dbg(ql_dbg_disc, vha, 0x2057,
476 			    "GNN_ID issue IOCB failed (%d).\n", rval);
477 			break;
478 		} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
479 		    "GNN_ID") != QLA_SUCCESS) {
480 			rval = QLA_FUNCTION_FAILED;
481 			break;
482 		} else {
483 			/* Save nodename */
484 			memcpy(list[i].node_name,
485 			    ct_rsp->rsp.gnn_id.node_name, WWN_SIZE);
486 
487 			ql_dbg(ql_dbg_disc, vha, 0x2058,
488 			    "GID_PT entry - nn %8phN pn %8phN "
489 			    "portid=%02x%02x%02x.\n",
490 			    list[i].node_name, list[i].port_name,
491 			    list[i].d_id.b.domain, list[i].d_id.b.area,
492 			    list[i].d_id.b.al_pa);
493 		}
494 
495 		/* Last device exit. */
496 		if (list[i].d_id.b.rsvd_1 != 0)
497 			break;
498 	}
499 
500 	return (rval);
501 }
502 
503 static void qla2x00_async_sns_sp_done(srb_t *sp, int rc)
504 {
505 	struct scsi_qla_host *vha = sp->vha;
506 	struct ct_sns_pkt *ct_sns;
507 	struct qla_work_evt *e;
508 
509 	sp->rc = rc;
510 	if (rc == QLA_SUCCESS) {
511 		ql_dbg(ql_dbg_disc, vha, 0x204f,
512 		    "Async done-%s exiting normally.\n",
513 		    sp->name);
514 	} else if (rc == QLA_FUNCTION_TIMEOUT) {
515 		ql_dbg(ql_dbg_disc, vha, 0x204f,
516 		    "Async done-%s timeout\n", sp->name);
517 	} else {
518 		ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
519 		memset(ct_sns, 0, sizeof(*ct_sns));
520 		sp->retry_count++;
521 		if (sp->retry_count > 3)
522 			goto err;
523 
524 		ql_dbg(ql_dbg_disc, vha, 0x204f,
525 		    "Async done-%s fail rc %x.  Retry count %d\n",
526 		    sp->name, rc, sp->retry_count);
527 
528 		e = qla2x00_alloc_work(vha, QLA_EVT_SP_RETRY);
529 		if (!e)
530 			goto err2;
531 
532 		e->u.iosb.sp = sp;
533 		qla2x00_post_work(vha, e);
534 		return;
535 	}
536 
537 err:
538 	e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
539 err2:
540 	if (!e) {
541 		/* please ignore kernel warning. otherwise, we have mem leak. */
542 		if (sp->u.iocb_cmd.u.ctarg.req) {
543 			dma_free_coherent(&vha->hw->pdev->dev,
544 			    sp->u.iocb_cmd.u.ctarg.req_allocated_size,
545 			    sp->u.iocb_cmd.u.ctarg.req,
546 			    sp->u.iocb_cmd.u.ctarg.req_dma);
547 			sp->u.iocb_cmd.u.ctarg.req = NULL;
548 		}
549 
550 		if (sp->u.iocb_cmd.u.ctarg.rsp) {
551 			dma_free_coherent(&vha->hw->pdev->dev,
552 			    sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
553 			    sp->u.iocb_cmd.u.ctarg.rsp,
554 			    sp->u.iocb_cmd.u.ctarg.rsp_dma);
555 			sp->u.iocb_cmd.u.ctarg.rsp = NULL;
556 		}
557 
558 		/* ref: INIT */
559 		kref_put(&sp->cmd_kref, qla2x00_sp_release);
560 		return;
561 	}
562 
563 	e->u.iosb.sp = sp;
564 	qla2x00_post_work(vha, e);
565 }
566 
567 /**
568  * qla2x00_rft_id() - SNS Register FC-4 TYPEs (RFT_ID) supported by the HBA.
569  * @vha: HA context
570  *
571  * Returns 0 on success.
572  */
573 int
574 qla2x00_rft_id(scsi_qla_host_t *vha)
575 {
576 	struct qla_hw_data *ha = vha->hw;
577 
578 	if (IS_QLA2100(ha) || IS_QLA2200(ha))
579 		return qla2x00_sns_rft_id(vha);
580 
581 	return qla_async_rftid(vha, &vha->d_id);
582 }
583 
584 static int qla_async_rftid(scsi_qla_host_t *vha, port_id_t *d_id)
585 {
586 	int rval = QLA_MEMORY_ALLOC_FAILED;
587 	struct ct_sns_req *ct_req;
588 	srb_t *sp;
589 	struct ct_sns_pkt *ct_sns;
590 
591 	if (!vha->flags.online)
592 		goto done;
593 
594 	/* ref: INIT */
595 	sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
596 	if (!sp)
597 		goto done;
598 
599 	sp->type = SRB_CT_PTHRU_CMD;
600 	sp->name = "rft_id";
601 	qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
602 			      qla2x00_async_sns_sp_done);
603 
604 	sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
605 	    sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
606 	    GFP_KERNEL);
607 	sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
608 	if (!sp->u.iocb_cmd.u.ctarg.req) {
609 		ql_log(ql_log_warn, vha, 0xd041,
610 		    "%s: Failed to allocate ct_sns request.\n",
611 		    __func__);
612 		goto done_free_sp;
613 	}
614 
615 	sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
616 	    sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
617 	    GFP_KERNEL);
618 	sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
619 	if (!sp->u.iocb_cmd.u.ctarg.rsp) {
620 		ql_log(ql_log_warn, vha, 0xd042,
621 		    "%s: Failed to allocate ct_sns request.\n",
622 		    __func__);
623 		goto done_free_sp;
624 	}
625 	ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
626 	memset(ct_sns, 0, sizeof(*ct_sns));
627 	ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
628 
629 	/* Prepare CT request */
630 	ct_req = qla2x00_prep_ct_req(ct_sns, RFT_ID_CMD, RFT_ID_RSP_SIZE);
631 
632 	/* Prepare CT arguments -- port_id, FC-4 types */
633 	ct_req->req.rft_id.port_id = port_id_to_be_id(vha->d_id);
634 	ct_req->req.rft_id.fc4_types[2] = 0x01;		/* FCP-3 */
635 
636 	if (vha->flags.nvme_enabled && qla_ini_mode_enabled(vha))
637 		ct_req->req.rft_id.fc4_types[6] = 1;    /* NVMe type 28h */
638 
639 	sp->u.iocb_cmd.u.ctarg.req_size = RFT_ID_REQ_SIZE;
640 	sp->u.iocb_cmd.u.ctarg.rsp_size = RFT_ID_RSP_SIZE;
641 	sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
642 
643 	ql_dbg(ql_dbg_disc, vha, 0xffff,
644 	    "Async-%s - hdl=%x portid %06x.\n",
645 	    sp->name, sp->handle, d_id->b24);
646 
647 	rval = qla2x00_start_sp(sp);
648 	if (rval != QLA_SUCCESS) {
649 		ql_dbg(ql_dbg_disc, vha, 0x2043,
650 		    "RFT_ID issue IOCB failed (%d).\n", rval);
651 		goto done_free_sp;
652 	}
653 	return rval;
654 done_free_sp:
655 	/* ref: INIT */
656 	kref_put(&sp->cmd_kref, qla2x00_sp_release);
657 done:
658 	return rval;
659 }
660 
661 /**
662  * qla2x00_rff_id() - SNS Register FC-4 Features (RFF_ID) supported by the HBA.
663  * @vha: HA context
664  * @type: not used
665  *
666  * Returns 0 on success.
667  */
668 int
669 qla2x00_rff_id(scsi_qla_host_t *vha, u8 type)
670 {
671 	struct qla_hw_data *ha = vha->hw;
672 
673 	if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
674 		ql_dbg(ql_dbg_disc, vha, 0x2046,
675 		    "RFF_ID call not supported on ISP2100/ISP2200.\n");
676 		return (QLA_SUCCESS);
677 	}
678 
679 	return qla_async_rffid(vha, &vha->d_id, qlt_rff_id(vha), type);
680 }
681 
682 static int qla_async_rffid(scsi_qla_host_t *vha, port_id_t *d_id,
683     u8 fc4feature, u8 fc4type)
684 {
685 	int rval = QLA_MEMORY_ALLOC_FAILED;
686 	struct ct_sns_req *ct_req;
687 	srb_t *sp;
688 	struct ct_sns_pkt *ct_sns;
689 
690 	/* ref: INIT */
691 	sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
692 	if (!sp)
693 		goto done;
694 
695 	sp->type = SRB_CT_PTHRU_CMD;
696 	sp->name = "rff_id";
697 	qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
698 			      qla2x00_async_sns_sp_done);
699 
700 	sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
701 	    sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
702 	    GFP_KERNEL);
703 	sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
704 	if (!sp->u.iocb_cmd.u.ctarg.req) {
705 		ql_log(ql_log_warn, vha, 0xd041,
706 		    "%s: Failed to allocate ct_sns request.\n",
707 		    __func__);
708 		goto done_free_sp;
709 	}
710 
711 	sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
712 	    sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
713 	    GFP_KERNEL);
714 	sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
715 	if (!sp->u.iocb_cmd.u.ctarg.rsp) {
716 		ql_log(ql_log_warn, vha, 0xd042,
717 		    "%s: Failed to allocate ct_sns request.\n",
718 		    __func__);
719 		goto done_free_sp;
720 	}
721 	ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
722 	memset(ct_sns, 0, sizeof(*ct_sns));
723 	ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
724 
725 	/* Prepare CT request */
726 	ct_req = qla2x00_prep_ct_req(ct_sns, RFF_ID_CMD, RFF_ID_RSP_SIZE);
727 
728 	/* Prepare CT arguments -- port_id, FC-4 feature, FC-4 type */
729 	ct_req->req.rff_id.port_id = port_id_to_be_id(*d_id);
730 	ct_req->req.rff_id.fc4_feature = fc4feature;
731 	ct_req->req.rff_id.fc4_type = fc4type;		/* SCSI-FCP or FC-NVMe */
732 
733 	sp->u.iocb_cmd.u.ctarg.req_size = RFF_ID_REQ_SIZE;
734 	sp->u.iocb_cmd.u.ctarg.rsp_size = RFF_ID_RSP_SIZE;
735 	sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
736 
737 	ql_dbg(ql_dbg_disc, vha, 0xffff,
738 	    "Async-%s - hdl=%x portid %06x feature %x type %x.\n",
739 	    sp->name, sp->handle, d_id->b24, fc4feature, fc4type);
740 
741 	rval = qla2x00_start_sp(sp);
742 	if (rval != QLA_SUCCESS) {
743 		ql_dbg(ql_dbg_disc, vha, 0x2047,
744 		    "RFF_ID issue IOCB failed (%d).\n", rval);
745 		goto done_free_sp;
746 	}
747 
748 	return rval;
749 
750 done_free_sp:
751 	/* ref: INIT */
752 	kref_put(&sp->cmd_kref, qla2x00_sp_release);
753 done:
754 	return rval;
755 }
756 
757 /**
758  * qla2x00_rnn_id() - SNS Register Node Name (RNN_ID) of the HBA.
759  * @vha: HA context
760  *
761  * Returns 0 on success.
762  */
763 int
764 qla2x00_rnn_id(scsi_qla_host_t *vha)
765 {
766 	struct qla_hw_data *ha = vha->hw;
767 
768 	if (IS_QLA2100(ha) || IS_QLA2200(ha))
769 		return qla2x00_sns_rnn_id(vha);
770 
771 	return  qla_async_rnnid(vha, &vha->d_id, vha->node_name);
772 }
773 
774 static int qla_async_rnnid(scsi_qla_host_t *vha, port_id_t *d_id,
775 	u8 *node_name)
776 {
777 	int rval = QLA_MEMORY_ALLOC_FAILED;
778 	struct ct_sns_req *ct_req;
779 	srb_t *sp;
780 	struct ct_sns_pkt *ct_sns;
781 
782 	/* ref: INIT */
783 	sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
784 	if (!sp)
785 		goto done;
786 
787 	sp->type = SRB_CT_PTHRU_CMD;
788 	sp->name = "rnid";
789 	qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
790 			      qla2x00_async_sns_sp_done);
791 
792 	sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
793 	    sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
794 	    GFP_KERNEL);
795 	sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
796 	if (!sp->u.iocb_cmd.u.ctarg.req) {
797 		ql_log(ql_log_warn, vha, 0xd041,
798 		    "%s: Failed to allocate ct_sns request.\n",
799 		    __func__);
800 		goto done_free_sp;
801 	}
802 
803 	sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
804 	    sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
805 	    GFP_KERNEL);
806 	sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
807 	if (!sp->u.iocb_cmd.u.ctarg.rsp) {
808 		ql_log(ql_log_warn, vha, 0xd042,
809 		    "%s: Failed to allocate ct_sns request.\n",
810 		    __func__);
811 		goto done_free_sp;
812 	}
813 	ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
814 	memset(ct_sns, 0, sizeof(*ct_sns));
815 	ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
816 
817 	/* Prepare CT request */
818 	ct_req = qla2x00_prep_ct_req(ct_sns, RNN_ID_CMD, RNN_ID_RSP_SIZE);
819 
820 	/* Prepare CT arguments -- port_id, node_name */
821 	ct_req->req.rnn_id.port_id = port_id_to_be_id(vha->d_id);
822 	memcpy(ct_req->req.rnn_id.node_name, vha->node_name, WWN_SIZE);
823 
824 	sp->u.iocb_cmd.u.ctarg.req_size = RNN_ID_REQ_SIZE;
825 	sp->u.iocb_cmd.u.ctarg.rsp_size = RNN_ID_RSP_SIZE;
826 	sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
827 
828 	ql_dbg(ql_dbg_disc, vha, 0xffff,
829 	    "Async-%s - hdl=%x portid %06x\n",
830 	    sp->name, sp->handle, d_id->b24);
831 
832 	rval = qla2x00_start_sp(sp);
833 	if (rval != QLA_SUCCESS) {
834 		ql_dbg(ql_dbg_disc, vha, 0x204d,
835 		    "RNN_ID issue IOCB failed (%d).\n", rval);
836 		goto done_free_sp;
837 	}
838 
839 	return rval;
840 
841 done_free_sp:
842 	/* ref: INIT */
843 	kref_put(&sp->cmd_kref, qla2x00_sp_release);
844 done:
845 	return rval;
846 }
847 
848 size_t
849 qla2x00_get_sym_node_name(scsi_qla_host_t *vha, uint8_t *snn, size_t size)
850 {
851 	struct qla_hw_data *ha = vha->hw;
852 
853 	if (IS_QLAFX00(ha))
854 		return scnprintf(snn, size, "%s FW:v%s DVR:v%s",
855 		    ha->model_number, ha->mr.fw_version, qla2x00_version_str);
856 
857 	return scnprintf(snn, size, "%s FW:v%d.%02d.%02d DVR:v%s",
858 	    ha->model_number, ha->fw_major_version, ha->fw_minor_version,
859 	    ha->fw_subminor_version, qla2x00_version_str);
860 }
861 
862 /**
863  * qla2x00_rsnn_nn() - SNS Register Symbolic Node Name (RSNN_NN) of the HBA.
864  * @vha: HA context
865  *
866  * Returns 0 on success.
867  */
868 int
869 qla2x00_rsnn_nn(scsi_qla_host_t *vha)
870 {
871 	struct qla_hw_data *ha = vha->hw;
872 
873 	if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
874 		ql_dbg(ql_dbg_disc, vha, 0x2050,
875 		    "RSNN_ID call unsupported on ISP2100/ISP2200.\n");
876 		return (QLA_SUCCESS);
877 	}
878 
879 	return qla_async_rsnn_nn(vha);
880 }
881 
882 static int qla_async_rsnn_nn(scsi_qla_host_t *vha)
883 {
884 	int rval = QLA_MEMORY_ALLOC_FAILED;
885 	struct ct_sns_req *ct_req;
886 	srb_t *sp;
887 	struct ct_sns_pkt *ct_sns;
888 
889 	/* ref: INIT */
890 	sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
891 	if (!sp)
892 		goto done;
893 
894 	sp->type = SRB_CT_PTHRU_CMD;
895 	sp->name = "rsnn_nn";
896 	qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
897 			      qla2x00_async_sns_sp_done);
898 
899 	sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
900 	    sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
901 	    GFP_KERNEL);
902 	sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
903 	if (!sp->u.iocb_cmd.u.ctarg.req) {
904 		ql_log(ql_log_warn, vha, 0xd041,
905 		    "%s: Failed to allocate ct_sns request.\n",
906 		    __func__);
907 		goto done_free_sp;
908 	}
909 
910 	sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
911 	    sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
912 	    GFP_KERNEL);
913 	sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
914 	if (!sp->u.iocb_cmd.u.ctarg.rsp) {
915 		ql_log(ql_log_warn, vha, 0xd042,
916 		    "%s: Failed to allocate ct_sns request.\n",
917 		    __func__);
918 		goto done_free_sp;
919 	}
920 	ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
921 	memset(ct_sns, 0, sizeof(*ct_sns));
922 	ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
923 
924 	/* Prepare CT request */
925 	ct_req = qla2x00_prep_ct_req(ct_sns, RSNN_NN_CMD, RSNN_NN_RSP_SIZE);
926 
927 	/* Prepare CT arguments -- node_name, symbolic node_name, size */
928 	memcpy(ct_req->req.rsnn_nn.node_name, vha->node_name, WWN_SIZE);
929 
930 	/* Prepare the Symbolic Node Name */
931 	qla2x00_get_sym_node_name(vha, ct_req->req.rsnn_nn.sym_node_name,
932 	    sizeof(ct_req->req.rsnn_nn.sym_node_name));
933 	ct_req->req.rsnn_nn.name_len =
934 	    (uint8_t)strlen(ct_req->req.rsnn_nn.sym_node_name);
935 
936 
937 	sp->u.iocb_cmd.u.ctarg.req_size = 24 + 1 + ct_req->req.rsnn_nn.name_len;
938 	sp->u.iocb_cmd.u.ctarg.rsp_size = RSNN_NN_RSP_SIZE;
939 	sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
940 
941 	ql_dbg(ql_dbg_disc, vha, 0xffff,
942 	    "Async-%s - hdl=%x.\n",
943 	    sp->name, sp->handle);
944 
945 	rval = qla2x00_start_sp(sp);
946 	if (rval != QLA_SUCCESS) {
947 		ql_dbg(ql_dbg_disc, vha, 0x2043,
948 		    "RFT_ID issue IOCB failed (%d).\n", rval);
949 		goto done_free_sp;
950 	}
951 
952 	return rval;
953 
954 done_free_sp:
955 	/* ref: INIT */
956 	kref_put(&sp->cmd_kref, qla2x00_sp_release);
957 done:
958 	return rval;
959 }
960 
961 /**
962  * qla2x00_prep_sns_cmd() - Prepare common SNS command request fields for query.
963  * @vha: HA context
964  * @cmd: GS command
965  * @scmd_len: Subcommand length
966  * @data_size: response size in bytes
967  *
968  * Returns a pointer to the @ha's sns_cmd.
969  */
970 static inline struct sns_cmd_pkt *
971 qla2x00_prep_sns_cmd(scsi_qla_host_t *vha, uint16_t cmd, uint16_t scmd_len,
972     uint16_t data_size)
973 {
974 	uint16_t		wc;
975 	struct sns_cmd_pkt	*sns_cmd;
976 	struct qla_hw_data *ha = vha->hw;
977 
978 	sns_cmd = ha->sns_cmd;
979 	memset(sns_cmd, 0, sizeof(struct sns_cmd_pkt));
980 	wc = data_size / 2;			/* Size in 16bit words. */
981 	sns_cmd->p.cmd.buffer_length = cpu_to_le16(wc);
982 	put_unaligned_le64(ha->sns_cmd_dma, &sns_cmd->p.cmd.buffer_address);
983 	sns_cmd->p.cmd.subcommand_length = cpu_to_le16(scmd_len);
984 	sns_cmd->p.cmd.subcommand = cpu_to_le16(cmd);
985 	wc = (data_size - 16) / 4;		/* Size in 32bit words. */
986 	sns_cmd->p.cmd.size = cpu_to_le16(wc);
987 
988 	vha->qla_stats.control_requests++;
989 
990 	return (sns_cmd);
991 }
992 
993 /**
994  * qla2x00_sns_ga_nxt() - SNS scan for fabric devices via GA_NXT command.
995  * @vha: HA context
996  * @fcport: fcport entry to updated
997  *
998  * This command uses the old Exectute SNS Command mailbox routine.
999  *
1000  * Returns 0 on success.
1001  */
1002 static int
1003 qla2x00_sns_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
1004 {
1005 	int		rval = QLA_SUCCESS;
1006 	struct qla_hw_data *ha = vha->hw;
1007 	struct sns_cmd_pkt	*sns_cmd;
1008 
1009 	/* Issue GA_NXT. */
1010 	/* Prepare SNS command request. */
1011 	sns_cmd = qla2x00_prep_sns_cmd(vha, GA_NXT_CMD, GA_NXT_SNS_SCMD_LEN,
1012 	    GA_NXT_SNS_DATA_SIZE);
1013 
1014 	/* Prepare SNS command arguments -- port_id. */
1015 	sns_cmd->p.cmd.param[0] = fcport->d_id.b.al_pa;
1016 	sns_cmd->p.cmd.param[1] = fcport->d_id.b.area;
1017 	sns_cmd->p.cmd.param[2] = fcport->d_id.b.domain;
1018 
1019 	/* Execute SNS command. */
1020 	rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, GA_NXT_SNS_CMD_SIZE / 2,
1021 	    sizeof(struct sns_cmd_pkt));
1022 	if (rval != QLA_SUCCESS) {
1023 		/*EMPTY*/
1024 		ql_dbg(ql_dbg_disc, vha, 0x205f,
1025 		    "GA_NXT Send SNS failed (%d).\n", rval);
1026 	} else if (sns_cmd->p.gan_data[8] != 0x80 ||
1027 	    sns_cmd->p.gan_data[9] != 0x02) {
1028 		ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2084,
1029 		    "GA_NXT failed, rejected request ga_nxt_rsp:\n");
1030 		ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2074,
1031 		    sns_cmd->p.gan_data, 16);
1032 		rval = QLA_FUNCTION_FAILED;
1033 	} else {
1034 		/* Populate fc_port_t entry. */
1035 		fcport->d_id.b.domain = sns_cmd->p.gan_data[17];
1036 		fcport->d_id.b.area = sns_cmd->p.gan_data[18];
1037 		fcport->d_id.b.al_pa = sns_cmd->p.gan_data[19];
1038 
1039 		memcpy(fcport->node_name, &sns_cmd->p.gan_data[284], WWN_SIZE);
1040 		memcpy(fcport->port_name, &sns_cmd->p.gan_data[20], WWN_SIZE);
1041 
1042 		if (sns_cmd->p.gan_data[16] != NS_N_PORT_TYPE &&
1043 		    sns_cmd->p.gan_data[16] != NS_NL_PORT_TYPE)
1044 			fcport->d_id.b.domain = 0xf0;
1045 
1046 		ql_dbg(ql_dbg_disc, vha, 0x2061,
1047 		    "GA_NXT entry - nn %8phN pn %8phN "
1048 		    "port_id=%02x%02x%02x.\n",
1049 		    fcport->node_name, fcport->port_name,
1050 		    fcport->d_id.b.domain, fcport->d_id.b.area,
1051 		    fcport->d_id.b.al_pa);
1052 	}
1053 
1054 	return (rval);
1055 }
1056 
1057 /**
1058  * qla2x00_sns_gid_pt() - SNS scan for fabric devices via GID_PT command.
1059  * @vha: HA context
1060  * @list: switch info entries to populate
1061  *
1062  * This command uses the old Exectute SNS Command mailbox routine.
1063  *
1064  * NOTE: Non-Nx_Ports are not requested.
1065  *
1066  * Returns 0 on success.
1067  */
1068 static int
1069 qla2x00_sns_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
1070 {
1071 	int		rval;
1072 	struct qla_hw_data *ha = vha->hw;
1073 	uint16_t	i;
1074 	uint8_t		*entry;
1075 	struct sns_cmd_pkt	*sns_cmd;
1076 	uint16_t gid_pt_sns_data_size;
1077 
1078 	gid_pt_sns_data_size = qla2x00_gid_pt_rsp_size(vha);
1079 
1080 	/* Issue GID_PT. */
1081 	/* Prepare SNS command request. */
1082 	sns_cmd = qla2x00_prep_sns_cmd(vha, GID_PT_CMD, GID_PT_SNS_SCMD_LEN,
1083 	    gid_pt_sns_data_size);
1084 
1085 	/* Prepare SNS command arguments -- port_type. */
1086 	sns_cmd->p.cmd.param[0] = NS_NX_PORT_TYPE;
1087 
1088 	/* Execute SNS command. */
1089 	rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, GID_PT_SNS_CMD_SIZE / 2,
1090 	    sizeof(struct sns_cmd_pkt));
1091 	if (rval != QLA_SUCCESS) {
1092 		/*EMPTY*/
1093 		ql_dbg(ql_dbg_disc, vha, 0x206d,
1094 		    "GID_PT Send SNS failed (%d).\n", rval);
1095 	} else if (sns_cmd->p.gid_data[8] != 0x80 ||
1096 	    sns_cmd->p.gid_data[9] != 0x02) {
1097 		ql_dbg(ql_dbg_disc, vha, 0x202f,
1098 		    "GID_PT failed, rejected request, gid_rsp:\n");
1099 		ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2081,
1100 		    sns_cmd->p.gid_data, 16);
1101 		rval = QLA_FUNCTION_FAILED;
1102 	} else {
1103 		/* Set port IDs in switch info list. */
1104 		for (i = 0; i < ha->max_fibre_devices; i++) {
1105 			entry = &sns_cmd->p.gid_data[(i * 4) + 16];
1106 			list[i].d_id.b.domain = entry[1];
1107 			list[i].d_id.b.area = entry[2];
1108 			list[i].d_id.b.al_pa = entry[3];
1109 
1110 			/* Last one exit. */
1111 			if (entry[0] & BIT_7) {
1112 				list[i].d_id.b.rsvd_1 = entry[0];
1113 				break;
1114 			}
1115 		}
1116 
1117 		/*
1118 		 * If we've used all available slots, then the switch is
1119 		 * reporting back more devices that we can handle with this
1120 		 * single call.  Return a failed status, and let GA_NXT handle
1121 		 * the overload.
1122 		 */
1123 		if (i == ha->max_fibre_devices)
1124 			rval = QLA_FUNCTION_FAILED;
1125 	}
1126 
1127 	return (rval);
1128 }
1129 
1130 /**
1131  * qla2x00_sns_gpn_id() - SNS Get Port Name (GPN_ID) query.
1132  * @vha: HA context
1133  * @list: switch info entries to populate
1134  *
1135  * This command uses the old Exectute SNS Command mailbox routine.
1136  *
1137  * Returns 0 on success.
1138  */
1139 static int
1140 qla2x00_sns_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
1141 {
1142 	int		rval = QLA_SUCCESS;
1143 	struct qla_hw_data *ha = vha->hw;
1144 	uint16_t	i;
1145 	struct sns_cmd_pkt	*sns_cmd;
1146 
1147 	for (i = 0; i < ha->max_fibre_devices; i++) {
1148 		/* Issue GPN_ID */
1149 		/* Prepare SNS command request. */
1150 		sns_cmd = qla2x00_prep_sns_cmd(vha, GPN_ID_CMD,
1151 		    GPN_ID_SNS_SCMD_LEN, GPN_ID_SNS_DATA_SIZE);
1152 
1153 		/* Prepare SNS command arguments -- port_id. */
1154 		sns_cmd->p.cmd.param[0] = list[i].d_id.b.al_pa;
1155 		sns_cmd->p.cmd.param[1] = list[i].d_id.b.area;
1156 		sns_cmd->p.cmd.param[2] = list[i].d_id.b.domain;
1157 
1158 		/* Execute SNS command. */
1159 		rval = qla2x00_send_sns(vha, ha->sns_cmd_dma,
1160 		    GPN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt));
1161 		if (rval != QLA_SUCCESS) {
1162 			/*EMPTY*/
1163 			ql_dbg(ql_dbg_disc, vha, 0x2032,
1164 			    "GPN_ID Send SNS failed (%d).\n", rval);
1165 		} else if (sns_cmd->p.gpn_data[8] != 0x80 ||
1166 		    sns_cmd->p.gpn_data[9] != 0x02) {
1167 			ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207e,
1168 			    "GPN_ID failed, rejected request, gpn_rsp:\n");
1169 			ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207f,
1170 			    sns_cmd->p.gpn_data, 16);
1171 			rval = QLA_FUNCTION_FAILED;
1172 		} else {
1173 			/* Save portname */
1174 			memcpy(list[i].port_name, &sns_cmd->p.gpn_data[16],
1175 			    WWN_SIZE);
1176 		}
1177 
1178 		/* Last device exit. */
1179 		if (list[i].d_id.b.rsvd_1 != 0)
1180 			break;
1181 	}
1182 
1183 	return (rval);
1184 }
1185 
1186 /**
1187  * qla2x00_sns_gnn_id() - SNS Get Node Name (GNN_ID) query.
1188  * @vha: HA context
1189  * @list: switch info entries to populate
1190  *
1191  * This command uses the old Exectute SNS Command mailbox routine.
1192  *
1193  * Returns 0 on success.
1194  */
1195 static int
1196 qla2x00_sns_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
1197 {
1198 	int		rval = QLA_SUCCESS;
1199 	struct qla_hw_data *ha = vha->hw;
1200 	uint16_t	i;
1201 	struct sns_cmd_pkt	*sns_cmd;
1202 
1203 	for (i = 0; i < ha->max_fibre_devices; i++) {
1204 		/* Issue GNN_ID */
1205 		/* Prepare SNS command request. */
1206 		sns_cmd = qla2x00_prep_sns_cmd(vha, GNN_ID_CMD,
1207 		    GNN_ID_SNS_SCMD_LEN, GNN_ID_SNS_DATA_SIZE);
1208 
1209 		/* Prepare SNS command arguments -- port_id. */
1210 		sns_cmd->p.cmd.param[0] = list[i].d_id.b.al_pa;
1211 		sns_cmd->p.cmd.param[1] = list[i].d_id.b.area;
1212 		sns_cmd->p.cmd.param[2] = list[i].d_id.b.domain;
1213 
1214 		/* Execute SNS command. */
1215 		rval = qla2x00_send_sns(vha, ha->sns_cmd_dma,
1216 		    GNN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt));
1217 		if (rval != QLA_SUCCESS) {
1218 			/*EMPTY*/
1219 			ql_dbg(ql_dbg_disc, vha, 0x203f,
1220 			    "GNN_ID Send SNS failed (%d).\n", rval);
1221 		} else if (sns_cmd->p.gnn_data[8] != 0x80 ||
1222 		    sns_cmd->p.gnn_data[9] != 0x02) {
1223 			ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2082,
1224 			    "GNN_ID failed, rejected request, gnn_rsp:\n");
1225 			ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207a,
1226 			    sns_cmd->p.gnn_data, 16);
1227 			rval = QLA_FUNCTION_FAILED;
1228 		} else {
1229 			/* Save nodename */
1230 			memcpy(list[i].node_name, &sns_cmd->p.gnn_data[16],
1231 			    WWN_SIZE);
1232 
1233 			ql_dbg(ql_dbg_disc, vha, 0x206e,
1234 			    "GID_PT entry - nn %8phN pn %8phN "
1235 			    "port_id=%02x%02x%02x.\n",
1236 			    list[i].node_name, list[i].port_name,
1237 			    list[i].d_id.b.domain, list[i].d_id.b.area,
1238 			    list[i].d_id.b.al_pa);
1239 		}
1240 
1241 		/* Last device exit. */
1242 		if (list[i].d_id.b.rsvd_1 != 0)
1243 			break;
1244 	}
1245 
1246 	return (rval);
1247 }
1248 
1249 /**
1250  * qla2x00_sns_rft_id() - SNS Register FC-4 TYPEs (RFT_ID) supported by the HBA.
1251  * @vha: HA context
1252  *
1253  * This command uses the old Exectute SNS Command mailbox routine.
1254  *
1255  * Returns 0 on success.
1256  */
1257 static int
1258 qla2x00_sns_rft_id(scsi_qla_host_t *vha)
1259 {
1260 	int		rval;
1261 	struct qla_hw_data *ha = vha->hw;
1262 	struct sns_cmd_pkt	*sns_cmd;
1263 
1264 	/* Issue RFT_ID. */
1265 	/* Prepare SNS command request. */
1266 	sns_cmd = qla2x00_prep_sns_cmd(vha, RFT_ID_CMD, RFT_ID_SNS_SCMD_LEN,
1267 	    RFT_ID_SNS_DATA_SIZE);
1268 
1269 	/* Prepare SNS command arguments -- port_id, FC-4 types */
1270 	sns_cmd->p.cmd.param[0] = vha->d_id.b.al_pa;
1271 	sns_cmd->p.cmd.param[1] = vha->d_id.b.area;
1272 	sns_cmd->p.cmd.param[2] = vha->d_id.b.domain;
1273 
1274 	sns_cmd->p.cmd.param[5] = 0x01;			/* FCP-3 */
1275 
1276 	/* Execute SNS command. */
1277 	rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, RFT_ID_SNS_CMD_SIZE / 2,
1278 	    sizeof(struct sns_cmd_pkt));
1279 	if (rval != QLA_SUCCESS) {
1280 		/*EMPTY*/
1281 		ql_dbg(ql_dbg_disc, vha, 0x2060,
1282 		    "RFT_ID Send SNS failed (%d).\n", rval);
1283 	} else if (sns_cmd->p.rft_data[8] != 0x80 ||
1284 	    sns_cmd->p.rft_data[9] != 0x02) {
1285 		ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2083,
1286 		    "RFT_ID failed, rejected request rft_rsp:\n");
1287 		ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2080,
1288 		    sns_cmd->p.rft_data, 16);
1289 		rval = QLA_FUNCTION_FAILED;
1290 	} else {
1291 		ql_dbg(ql_dbg_disc, vha, 0x2073,
1292 		    "RFT_ID exiting normally.\n");
1293 	}
1294 
1295 	return (rval);
1296 }
1297 
1298 /**
1299  * qla2x00_sns_rnn_id() - SNS Register Node Name (RNN_ID) of the HBA.
1300  * @vha: HA context
1301  *
1302  * This command uses the old Exectute SNS Command mailbox routine.
1303  *
1304  * Returns 0 on success.
1305  */
1306 static int
1307 qla2x00_sns_rnn_id(scsi_qla_host_t *vha)
1308 {
1309 	int		rval;
1310 	struct qla_hw_data *ha = vha->hw;
1311 	struct sns_cmd_pkt	*sns_cmd;
1312 
1313 	/* Issue RNN_ID. */
1314 	/* Prepare SNS command request. */
1315 	sns_cmd = qla2x00_prep_sns_cmd(vha, RNN_ID_CMD, RNN_ID_SNS_SCMD_LEN,
1316 	    RNN_ID_SNS_DATA_SIZE);
1317 
1318 	/* Prepare SNS command arguments -- port_id, nodename. */
1319 	sns_cmd->p.cmd.param[0] = vha->d_id.b.al_pa;
1320 	sns_cmd->p.cmd.param[1] = vha->d_id.b.area;
1321 	sns_cmd->p.cmd.param[2] = vha->d_id.b.domain;
1322 
1323 	sns_cmd->p.cmd.param[4] = vha->node_name[7];
1324 	sns_cmd->p.cmd.param[5] = vha->node_name[6];
1325 	sns_cmd->p.cmd.param[6] = vha->node_name[5];
1326 	sns_cmd->p.cmd.param[7] = vha->node_name[4];
1327 	sns_cmd->p.cmd.param[8] = vha->node_name[3];
1328 	sns_cmd->p.cmd.param[9] = vha->node_name[2];
1329 	sns_cmd->p.cmd.param[10] = vha->node_name[1];
1330 	sns_cmd->p.cmd.param[11] = vha->node_name[0];
1331 
1332 	/* Execute SNS command. */
1333 	rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, RNN_ID_SNS_CMD_SIZE / 2,
1334 	    sizeof(struct sns_cmd_pkt));
1335 	if (rval != QLA_SUCCESS) {
1336 		/*EMPTY*/
1337 		ql_dbg(ql_dbg_disc, vha, 0x204a,
1338 		    "RNN_ID Send SNS failed (%d).\n", rval);
1339 	} else if (sns_cmd->p.rnn_data[8] != 0x80 ||
1340 	    sns_cmd->p.rnn_data[9] != 0x02) {
1341 		ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207b,
1342 		    "RNN_ID failed, rejected request, rnn_rsp:\n");
1343 		ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207c,
1344 		    sns_cmd->p.rnn_data, 16);
1345 		rval = QLA_FUNCTION_FAILED;
1346 	} else {
1347 		ql_dbg(ql_dbg_disc, vha, 0x204c,
1348 		    "RNN_ID exiting normally.\n");
1349 	}
1350 
1351 	return (rval);
1352 }
1353 
1354 /**
1355  * qla2x00_mgmt_svr_login() - Login to fabric Management Service.
1356  * @vha: HA context
1357  *
1358  * Returns 0 on success.
1359  */
1360 int
1361 qla2x00_mgmt_svr_login(scsi_qla_host_t *vha)
1362 {
1363 	int ret, rval;
1364 	uint16_t mb[MAILBOX_REGISTER_COUNT];
1365 	struct qla_hw_data *ha = vha->hw;
1366 
1367 	ret = QLA_SUCCESS;
1368 	if (vha->flags.management_server_logged_in)
1369 		return ret;
1370 
1371 	rval = ha->isp_ops->fabric_login(vha, vha->mgmt_svr_loop_id, 0xff, 0xff,
1372 	    0xfa, mb, BIT_1);
1373 	if (rval != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) {
1374 		if (rval == QLA_MEMORY_ALLOC_FAILED)
1375 			ql_dbg(ql_dbg_disc, vha, 0x2085,
1376 			    "Failed management_server login: loopid=%x "
1377 			    "rval=%d\n", vha->mgmt_svr_loop_id, rval);
1378 		else
1379 			ql_dbg(ql_dbg_disc, vha, 0x2024,
1380 			    "Failed management_server login: loopid=%x "
1381 			    "mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x.\n",
1382 			    vha->mgmt_svr_loop_id, mb[0], mb[1], mb[2], mb[6],
1383 			    mb[7]);
1384 		ret = QLA_FUNCTION_FAILED;
1385 	} else
1386 		vha->flags.management_server_logged_in = 1;
1387 
1388 	return ret;
1389 }
1390 
1391 /**
1392  * qla2x00_prep_ms_fdmi_iocb() - Prepare common MS IOCB fields for FDMI query.
1393  * @vha: HA context
1394  * @req_size: request size in bytes
1395  * @rsp_size: response size in bytes
1396  *
1397  * Returns a pointer to the @ha's ms_iocb.
1398  */
1399 void *
1400 qla2x00_prep_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size,
1401     uint32_t rsp_size)
1402 {
1403 	ms_iocb_entry_t *ms_pkt;
1404 	struct qla_hw_data *ha = vha->hw;
1405 
1406 	ms_pkt = ha->ms_iocb;
1407 	memset(ms_pkt, 0, sizeof(ms_iocb_entry_t));
1408 
1409 	ms_pkt->entry_type = MS_IOCB_TYPE;
1410 	ms_pkt->entry_count = 1;
1411 	SET_TARGET_ID(ha, ms_pkt->loop_id, vha->mgmt_svr_loop_id);
1412 	ms_pkt->control_flags = cpu_to_le16(CF_READ | CF_HEAD_TAG);
1413 	ms_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
1414 	ms_pkt->cmd_dsd_count = cpu_to_le16(1);
1415 	ms_pkt->total_dsd_count = cpu_to_le16(2);
1416 	ms_pkt->rsp_bytecount = cpu_to_le32(rsp_size);
1417 	ms_pkt->req_bytecount = cpu_to_le32(req_size);
1418 
1419 	put_unaligned_le64(ha->ct_sns_dma, &ms_pkt->req_dsd.address);
1420 	ms_pkt->req_dsd.length = ms_pkt->req_bytecount;
1421 
1422 	put_unaligned_le64(ha->ct_sns_dma, &ms_pkt->rsp_dsd.address);
1423 	ms_pkt->rsp_dsd.length = ms_pkt->rsp_bytecount;
1424 
1425 	return ms_pkt;
1426 }
1427 
1428 /**
1429  * qla24xx_prep_ms_fdmi_iocb() - Prepare common MS IOCB fields for FDMI query.
1430  * @vha: HA context
1431  * @req_size: request size in bytes
1432  * @rsp_size: response size in bytes
1433  *
1434  * Returns a pointer to the @ha's ms_iocb.
1435  */
1436 void *
1437 qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size,
1438     uint32_t rsp_size)
1439 {
1440 	struct ct_entry_24xx *ct_pkt;
1441 	struct qla_hw_data *ha = vha->hw;
1442 
1443 	ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb;
1444 	memset(ct_pkt, 0, sizeof(struct ct_entry_24xx));
1445 
1446 	ct_pkt->entry_type = CT_IOCB_TYPE;
1447 	ct_pkt->entry_count = 1;
1448 	ct_pkt->nport_handle = cpu_to_le16(vha->mgmt_svr_loop_id);
1449 	ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
1450 	ct_pkt->cmd_dsd_count = cpu_to_le16(1);
1451 	ct_pkt->rsp_dsd_count = cpu_to_le16(1);
1452 	ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size);
1453 	ct_pkt->cmd_byte_count = cpu_to_le32(req_size);
1454 
1455 	put_unaligned_le64(ha->ct_sns_dma, &ct_pkt->dsd[0].address);
1456 	ct_pkt->dsd[0].length = ct_pkt->cmd_byte_count;
1457 
1458 	put_unaligned_le64(ha->ct_sns_dma, &ct_pkt->dsd[1].address);
1459 	ct_pkt->dsd[1].length = ct_pkt->rsp_byte_count;
1460 	ct_pkt->vp_index = vha->vp_idx;
1461 
1462 	return ct_pkt;
1463 }
1464 
1465 static void
1466 qla2x00_update_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size)
1467 {
1468 	struct qla_hw_data *ha = vha->hw;
1469 	ms_iocb_entry_t *ms_pkt = ha->ms_iocb;
1470 	struct ct_entry_24xx *ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb;
1471 
1472 	if (IS_FWI2_CAPABLE(ha)) {
1473 		ct_pkt->cmd_byte_count = cpu_to_le32(req_size);
1474 		ct_pkt->dsd[0].length = ct_pkt->cmd_byte_count;
1475 	} else {
1476 		ms_pkt->req_bytecount = cpu_to_le32(req_size);
1477 		ms_pkt->req_dsd.length = ms_pkt->req_bytecount;
1478 	}
1479 }
1480 
1481 /**
1482  * qla2x00_prep_ct_fdmi_req() - Prepare common CT request fields for SNS query.
1483  * @p: CT request buffer
1484  * @cmd: GS command
1485  * @rsp_size: response size in bytes
1486  *
1487  * Returns a pointer to the intitialized @ct_req.
1488  */
1489 static inline struct ct_sns_req *
1490 qla2x00_prep_ct_fdmi_req(struct ct_sns_pkt *p, uint16_t cmd,
1491     uint16_t rsp_size)
1492 {
1493 	memset(p, 0, sizeof(struct ct_sns_pkt));
1494 
1495 	p->p.req.header.revision = 0x01;
1496 	p->p.req.header.gs_type = 0xFA;
1497 	p->p.req.header.gs_subtype = 0x10;
1498 	p->p.req.command = cpu_to_be16(cmd);
1499 	p->p.req.max_rsp_size = cpu_to_be16((rsp_size - 16) / 4);
1500 
1501 	return &p->p.req;
1502 }
1503 
1504 uint
1505 qla25xx_fdmi_port_speed_capability(struct qla_hw_data *ha)
1506 {
1507 	uint speeds = 0;
1508 
1509 	if (IS_CNA_CAPABLE(ha))
1510 		return FDMI_PORT_SPEED_10GB;
1511 	if (IS_QLA28XX(ha) || IS_QLA27XX(ha)) {
1512 		if (ha->max_supported_speed == 2) {
1513 			if (ha->min_supported_speed <= 6)
1514 				speeds |= FDMI_PORT_SPEED_64GB;
1515 		}
1516 		if (ha->max_supported_speed == 2 ||
1517 		    ha->max_supported_speed == 1) {
1518 			if (ha->min_supported_speed <= 5)
1519 				speeds |= FDMI_PORT_SPEED_32GB;
1520 		}
1521 		if (ha->max_supported_speed == 2 ||
1522 		    ha->max_supported_speed == 1 ||
1523 		    ha->max_supported_speed == 0) {
1524 			if (ha->min_supported_speed <= 4)
1525 				speeds |= FDMI_PORT_SPEED_16GB;
1526 		}
1527 		if (ha->max_supported_speed == 1 ||
1528 		    ha->max_supported_speed == 0) {
1529 			if (ha->min_supported_speed <= 3)
1530 				speeds |= FDMI_PORT_SPEED_8GB;
1531 		}
1532 		if (ha->max_supported_speed == 0) {
1533 			if (ha->min_supported_speed <= 2)
1534 				speeds |= FDMI_PORT_SPEED_4GB;
1535 		}
1536 		return speeds;
1537 	}
1538 	if (IS_QLA2031(ha)) {
1539 		if ((ha->pdev->subsystem_vendor == 0x103C) &&
1540 		    ((ha->pdev->subsystem_device == 0x8002) ||
1541 		    (ha->pdev->subsystem_device == 0x8086))) {
1542 			speeds = FDMI_PORT_SPEED_16GB;
1543 		} else {
1544 			speeds = FDMI_PORT_SPEED_16GB|FDMI_PORT_SPEED_8GB|
1545 				FDMI_PORT_SPEED_4GB;
1546 		}
1547 		return speeds;
1548 	}
1549 	if (IS_QLA25XX(ha) || IS_QLAFX00(ha))
1550 		return FDMI_PORT_SPEED_8GB|FDMI_PORT_SPEED_4GB|
1551 			FDMI_PORT_SPEED_2GB|FDMI_PORT_SPEED_1GB;
1552 	if (IS_QLA24XX_TYPE(ha))
1553 		return FDMI_PORT_SPEED_4GB|FDMI_PORT_SPEED_2GB|
1554 			FDMI_PORT_SPEED_1GB;
1555 	if (IS_QLA23XX(ha))
1556 		return FDMI_PORT_SPEED_2GB|FDMI_PORT_SPEED_1GB;
1557 	return FDMI_PORT_SPEED_1GB;
1558 }
1559 
1560 uint
1561 qla25xx_fdmi_port_speed_currently(struct qla_hw_data *ha)
1562 {
1563 	switch (ha->link_data_rate) {
1564 	case PORT_SPEED_1GB:
1565 		return FDMI_PORT_SPEED_1GB;
1566 	case PORT_SPEED_2GB:
1567 		return FDMI_PORT_SPEED_2GB;
1568 	case PORT_SPEED_4GB:
1569 		return FDMI_PORT_SPEED_4GB;
1570 	case PORT_SPEED_8GB:
1571 		return FDMI_PORT_SPEED_8GB;
1572 	case PORT_SPEED_10GB:
1573 		return FDMI_PORT_SPEED_10GB;
1574 	case PORT_SPEED_16GB:
1575 		return FDMI_PORT_SPEED_16GB;
1576 	case PORT_SPEED_32GB:
1577 		return FDMI_PORT_SPEED_32GB;
1578 	case PORT_SPEED_64GB:
1579 		return FDMI_PORT_SPEED_64GB;
1580 	default:
1581 		return FDMI_PORT_SPEED_UNKNOWN;
1582 	}
1583 }
1584 
1585 /**
1586  * qla2x00_hba_attributes() - perform HBA attributes registration
1587  * @vha: HA context
1588  * @entries: number of entries to use
1589  * @callopt: Option to issue extended or standard FDMI
1590  *           command parameter
1591  *
1592  * Returns 0 on success.
1593  */
1594 static unsigned long
1595 qla2x00_hba_attributes(scsi_qla_host_t *vha, void *entries,
1596 	unsigned int callopt)
1597 {
1598 	struct qla_hw_data *ha = vha->hw;
1599 	struct new_utsname *p_sysid = utsname();
1600 	struct ct_fdmi_hba_attr *eiter;
1601 	uint16_t alen;
1602 	unsigned long size = 0;
1603 
1604 	/* Nodename. */
1605 	eiter = entries + size;
1606 	eiter->type = cpu_to_be16(FDMI_HBA_NODE_NAME);
1607 	memcpy(eiter->a.node_name, vha->node_name, sizeof(eiter->a.node_name));
1608 	alen = sizeof(eiter->a.node_name);
1609 	alen += FDMI_ATTR_TYPELEN(eiter);
1610 	eiter->len = cpu_to_be16(alen);
1611 	size += alen;
1612 	ql_dbg(ql_dbg_disc, vha, 0x20a0,
1613 	    "NODENAME = %016llx.\n", wwn_to_u64(eiter->a.node_name));
1614 	/* Manufacturer. */
1615 	eiter = entries + size;
1616 	eiter->type = cpu_to_be16(FDMI_HBA_MANUFACTURER);
1617 	alen = scnprintf(
1618 		eiter->a.manufacturer, sizeof(eiter->a.manufacturer),
1619 		"%s", QLA2XXX_MANUFACTURER);
1620 	alen += FDMI_ATTR_ALIGNMENT(alen);
1621 	alen += FDMI_ATTR_TYPELEN(eiter);
1622 	eiter->len = cpu_to_be16(alen);
1623 	size += alen;
1624 	ql_dbg(ql_dbg_disc, vha, 0x20a1,
1625 	    "MANUFACTURER = %s.\n", eiter->a.manufacturer);
1626 	/* Serial number. */
1627 	eiter = entries + size;
1628 	eiter->type = cpu_to_be16(FDMI_HBA_SERIAL_NUMBER);
1629 	alen = 0;
1630 	if (IS_FWI2_CAPABLE(ha)) {
1631 		alen = qla2xxx_get_vpd_field(vha, "SN",
1632 		    eiter->a.serial_num, sizeof(eiter->a.serial_num));
1633 	}
1634 	if (!alen) {
1635 		uint32_t sn = ((ha->serial0 & 0x1f) << 16) |
1636 			(ha->serial2 << 8) | ha->serial1;
1637 		alen = scnprintf(
1638 			eiter->a.serial_num, sizeof(eiter->a.serial_num),
1639 			"%c%05d", 'A' + sn / 100000, sn % 100000);
1640 	}
1641 	alen += FDMI_ATTR_ALIGNMENT(alen);
1642 	alen += FDMI_ATTR_TYPELEN(eiter);
1643 	eiter->len = cpu_to_be16(alen);
1644 	size += alen;
1645 	ql_dbg(ql_dbg_disc, vha, 0x20a2,
1646 	    "SERIAL NUMBER = %s.\n", eiter->a.serial_num);
1647 	/* Model name. */
1648 	eiter = entries + size;
1649 	eiter->type = cpu_to_be16(FDMI_HBA_MODEL);
1650 	alen = scnprintf(
1651 		eiter->a.model, sizeof(eiter->a.model),
1652 		"%s", ha->model_number);
1653 	alen += FDMI_ATTR_ALIGNMENT(alen);
1654 	alen += FDMI_ATTR_TYPELEN(eiter);
1655 	eiter->len = cpu_to_be16(alen);
1656 	size += alen;
1657 	ql_dbg(ql_dbg_disc, vha, 0x20a3,
1658 	    "MODEL NAME = %s.\n", eiter->a.model);
1659 	/* Model description. */
1660 	eiter = entries + size;
1661 	eiter->type = cpu_to_be16(FDMI_HBA_MODEL_DESCRIPTION);
1662 	alen = scnprintf(
1663 		eiter->a.model_desc, sizeof(eiter->a.model_desc),
1664 		"%s", ha->model_desc);
1665 	alen += FDMI_ATTR_ALIGNMENT(alen);
1666 	alen += FDMI_ATTR_TYPELEN(eiter);
1667 	eiter->len = cpu_to_be16(alen);
1668 	size += alen;
1669 	ql_dbg(ql_dbg_disc, vha, 0x20a4,
1670 	    "MODEL DESCRIPTION = %s.\n", eiter->a.model_desc);
1671 	/* Hardware version. */
1672 	eiter = entries + size;
1673 	eiter->type = cpu_to_be16(FDMI_HBA_HARDWARE_VERSION);
1674 	alen = 0;
1675 	if (IS_FWI2_CAPABLE(ha)) {
1676 		if (!alen) {
1677 			alen = qla2xxx_get_vpd_field(vha, "MN",
1678 			    eiter->a.hw_version, sizeof(eiter->a.hw_version));
1679 		}
1680 		if (!alen) {
1681 			alen = qla2xxx_get_vpd_field(vha, "EC",
1682 			    eiter->a.hw_version, sizeof(eiter->a.hw_version));
1683 		}
1684 	}
1685 	if (!alen) {
1686 		alen = scnprintf(
1687 			eiter->a.hw_version, sizeof(eiter->a.hw_version),
1688 			"HW:%s", ha->adapter_id);
1689 	}
1690 	alen += FDMI_ATTR_ALIGNMENT(alen);
1691 	alen += FDMI_ATTR_TYPELEN(eiter);
1692 	eiter->len = cpu_to_be16(alen);
1693 	size += alen;
1694 	ql_dbg(ql_dbg_disc, vha, 0x20a5,
1695 	    "HARDWARE VERSION = %s.\n", eiter->a.hw_version);
1696 	/* Driver version. */
1697 	eiter = entries + size;
1698 	eiter->type = cpu_to_be16(FDMI_HBA_DRIVER_VERSION);
1699 	alen = scnprintf(
1700 		eiter->a.driver_version, sizeof(eiter->a.driver_version),
1701 		"%s", qla2x00_version_str);
1702 	alen += FDMI_ATTR_ALIGNMENT(alen);
1703 	alen += FDMI_ATTR_TYPELEN(eiter);
1704 	eiter->len = cpu_to_be16(alen);
1705 	size += alen;
1706 	ql_dbg(ql_dbg_disc, vha, 0x20a6,
1707 	    "DRIVER VERSION = %s.\n", eiter->a.driver_version);
1708 	/* Option ROM version. */
1709 	eiter = entries + size;
1710 	eiter->type = cpu_to_be16(FDMI_HBA_OPTION_ROM_VERSION);
1711 	alen = scnprintf(
1712 		eiter->a.orom_version, sizeof(eiter->a.orom_version),
1713 		"%d.%02d", ha->bios_revision[1], ha->bios_revision[0]);
1714 	alen += FDMI_ATTR_ALIGNMENT(alen);
1715 	alen += FDMI_ATTR_TYPELEN(eiter);
1716 	eiter->len = cpu_to_be16(alen);
1717 	size += alen;
1718 
1719 	ql_dbg(ql_dbg_disc, vha, 0x20a7,
1720 	    "OPTROM VERSION = %d.%02d.\n",
1721 	    eiter->a.orom_version[1], eiter->a.orom_version[0]);
1722 	/* Firmware version */
1723 	eiter = entries + size;
1724 	eiter->type = cpu_to_be16(FDMI_HBA_FIRMWARE_VERSION);
1725 	ha->isp_ops->fw_version_str(vha, eiter->a.fw_version,
1726 	    sizeof(eiter->a.fw_version));
1727 	alen += FDMI_ATTR_ALIGNMENT(alen);
1728 	alen += FDMI_ATTR_TYPELEN(eiter);
1729 	eiter->len = cpu_to_be16(alen);
1730 	size += alen;
1731 	ql_dbg(ql_dbg_disc, vha, 0x20a8,
1732 	    "FIRMWARE VERSION = %s.\n", eiter->a.fw_version);
1733 	/* OS Name and Version */
1734 	eiter = entries + size;
1735 	eiter->type = cpu_to_be16(FDMI_HBA_OS_NAME_AND_VERSION);
1736 	alen = 0;
1737 	if (p_sysid) {
1738 		alen = scnprintf(
1739 			eiter->a.os_version, sizeof(eiter->a.os_version),
1740 			"%s %s %s",
1741 			p_sysid->sysname, p_sysid->release, p_sysid->machine);
1742 	}
1743 	if (!alen) {
1744 		alen = scnprintf(
1745 			eiter->a.os_version, sizeof(eiter->a.os_version),
1746 			"%s %s",
1747 			"Linux", fc_host_system_hostname(vha->host));
1748 	}
1749 	alen += FDMI_ATTR_ALIGNMENT(alen);
1750 	alen += FDMI_ATTR_TYPELEN(eiter);
1751 	eiter->len = cpu_to_be16(alen);
1752 	size += alen;
1753 	ql_dbg(ql_dbg_disc, vha, 0x20a9,
1754 	    "OS VERSION = %s.\n", eiter->a.os_version);
1755 	if (callopt == CALLOPT_FDMI1)
1756 		goto done;
1757 	/* MAX CT Payload Length */
1758 	eiter = entries + size;
1759 	eiter->type = cpu_to_be16(FDMI_HBA_MAXIMUM_CT_PAYLOAD_LENGTH);
1760 	eiter->a.max_ct_len = cpu_to_be32(ha->frame_payload_size >> 2);
1761 
1762 	alen = sizeof(eiter->a.max_ct_len);
1763 	alen += FDMI_ATTR_TYPELEN(eiter);
1764 	eiter->len = cpu_to_be16(alen);
1765 	size += alen;
1766 	ql_dbg(ql_dbg_disc, vha, 0x20aa,
1767 	    "CT PAYLOAD LENGTH = 0x%x.\n", be32_to_cpu(eiter->a.max_ct_len));
1768 	/* Node Symbolic Name */
1769 	eiter = entries + size;
1770 	eiter->type = cpu_to_be16(FDMI_HBA_NODE_SYMBOLIC_NAME);
1771 	alen = qla2x00_get_sym_node_name(vha, eiter->a.sym_name,
1772 	    sizeof(eiter->a.sym_name));
1773 	alen += FDMI_ATTR_ALIGNMENT(alen);
1774 	alen += FDMI_ATTR_TYPELEN(eiter);
1775 	eiter->len = cpu_to_be16(alen);
1776 	size += alen;
1777 	ql_dbg(ql_dbg_disc, vha, 0x20ab,
1778 	    "SYMBOLIC NAME = %s.\n", eiter->a.sym_name);
1779 	/* Vendor Specific information */
1780 	eiter = entries + size;
1781 	eiter->type = cpu_to_be16(FDMI_HBA_VENDOR_SPECIFIC_INFO);
1782 	eiter->a.vendor_specific_info = cpu_to_be32(PCI_VENDOR_ID_QLOGIC);
1783 	alen = sizeof(eiter->a.vendor_specific_info);
1784 	alen += FDMI_ATTR_TYPELEN(eiter);
1785 	eiter->len = cpu_to_be16(alen);
1786 	size += alen;
1787 	ql_dbg(ql_dbg_disc, vha, 0x20ac,
1788 	    "VENDOR SPECIFIC INFO = 0x%x.\n",
1789 	    be32_to_cpu(eiter->a.vendor_specific_info));
1790 	/* Num Ports */
1791 	eiter = entries + size;
1792 	eiter->type = cpu_to_be16(FDMI_HBA_NUM_PORTS);
1793 	eiter->a.num_ports = cpu_to_be32(1);
1794 	alen = sizeof(eiter->a.num_ports);
1795 	alen += FDMI_ATTR_TYPELEN(eiter);
1796 	eiter->len = cpu_to_be16(alen);
1797 	size += alen;
1798 	ql_dbg(ql_dbg_disc, vha, 0x20ad,
1799 	    "PORT COUNT = %x.\n", be32_to_cpu(eiter->a.num_ports));
1800 	/* Fabric Name */
1801 	eiter = entries + size;
1802 	eiter->type = cpu_to_be16(FDMI_HBA_FABRIC_NAME);
1803 	memcpy(eiter->a.fabric_name, vha->fabric_node_name,
1804 	    sizeof(eiter->a.fabric_name));
1805 	alen = sizeof(eiter->a.fabric_name);
1806 	alen += FDMI_ATTR_TYPELEN(eiter);
1807 	eiter->len = cpu_to_be16(alen);
1808 	size += alen;
1809 	ql_dbg(ql_dbg_disc, vha, 0x20ae,
1810 	    "FABRIC NAME = %016llx.\n", wwn_to_u64(eiter->a.fabric_name));
1811 	/* BIOS Version */
1812 	eiter = entries + size;
1813 	eiter->type = cpu_to_be16(FDMI_HBA_BOOT_BIOS_NAME);
1814 	alen = scnprintf(
1815 		eiter->a.bios_name, sizeof(eiter->a.bios_name),
1816 		"BIOS %d.%02d", ha->bios_revision[1], ha->bios_revision[0]);
1817 	alen += FDMI_ATTR_ALIGNMENT(alen);
1818 	alen += FDMI_ATTR_TYPELEN(eiter);
1819 	eiter->len = cpu_to_be16(alen);
1820 	size += alen;
1821 	ql_dbg(ql_dbg_disc, vha, 0x20af,
1822 	    "BIOS NAME = %s\n", eiter->a.bios_name);
1823 	/* Vendor Identifier */
1824 	eiter = entries + size;
1825 	eiter->type = cpu_to_be16(FDMI_HBA_VENDOR_IDENTIFIER);
1826 	alen = scnprintf(
1827 		eiter->a.vendor_identifier, sizeof(eiter->a.vendor_identifier),
1828 		"%s", "QLGC");
1829 	alen += FDMI_ATTR_ALIGNMENT(alen);
1830 	alen += FDMI_ATTR_TYPELEN(eiter);
1831 	eiter->len = cpu_to_be16(alen);
1832 	size += alen;
1833 	ql_dbg(ql_dbg_disc, vha, 0x20b0,
1834 	    "VENDOR IDENTIFIER = %s.\n", eiter->a.vendor_identifier);
1835 done:
1836 	return size;
1837 }
1838 
1839 /**
1840  * qla2x00_port_attributes() - perform Port attributes registration
1841  * @vha: HA context
1842  * @entries: number of entries to use
1843  * @callopt: Option to issue extended or standard FDMI
1844  *           command parameter
1845  *
1846  * Returns 0 on success.
1847  */
1848 static unsigned long
1849 qla2x00_port_attributes(scsi_qla_host_t *vha, void *entries,
1850 	unsigned int callopt)
1851 {
1852 	struct qla_hw_data *ha = vha->hw;
1853 	struct new_utsname *p_sysid = utsname();
1854 	char *hostname = p_sysid ?
1855 		p_sysid->nodename : fc_host_system_hostname(vha->host);
1856 	struct ct_fdmi_port_attr *eiter;
1857 	uint16_t alen;
1858 	unsigned long size = 0;
1859 
1860 	/* FC4 types. */
1861 	eiter = entries + size;
1862 	eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPES);
1863 	eiter->a.fc4_types[0] = 0x00;
1864 	eiter->a.fc4_types[1] = 0x00;
1865 	eiter->a.fc4_types[2] = 0x01;
1866 	eiter->a.fc4_types[3] = 0x00;
1867 	alen = sizeof(eiter->a.fc4_types);
1868 	alen += FDMI_ATTR_TYPELEN(eiter);
1869 	eiter->len = cpu_to_be16(alen);
1870 	size += alen;
1871 	ql_dbg(ql_dbg_disc, vha, 0x20c0,
1872 	    "FC4 TYPES = %016llx.\n", *(uint64_t *)eiter->a.fc4_types);
1873 	if (vha->flags.nvme_enabled) {
1874 		eiter->a.fc4_types[6] = 1;      /* NVMe type 28h */
1875 		ql_dbg(ql_dbg_disc, vha, 0x211f,
1876 		    "NVME FC4 Type = %02x 0x0 0x0 0x0 0x0 0x0.\n",
1877 		    eiter->a.fc4_types[6]);
1878 	}
1879 	/* Supported speed. */
1880 	eiter = entries + size;
1881 	eiter->type = cpu_to_be16(FDMI_PORT_SUPPORT_SPEED);
1882 	eiter->a.sup_speed = cpu_to_be32(
1883 		qla25xx_fdmi_port_speed_capability(ha));
1884 	alen = sizeof(eiter->a.sup_speed);
1885 	alen += FDMI_ATTR_TYPELEN(eiter);
1886 	eiter->len = cpu_to_be16(alen);
1887 	size += alen;
1888 	ql_dbg(ql_dbg_disc, vha, 0x20c1,
1889 	    "SUPPORTED SPEED = %x.\n", be32_to_cpu(eiter->a.sup_speed));
1890 	/* Current speed. */
1891 	eiter = entries + size;
1892 	eiter->type = cpu_to_be16(FDMI_PORT_CURRENT_SPEED);
1893 	eiter->a.cur_speed = cpu_to_be32(
1894 		qla25xx_fdmi_port_speed_currently(ha));
1895 	alen = sizeof(eiter->a.cur_speed);
1896 	alen += FDMI_ATTR_TYPELEN(eiter);
1897 	eiter->len = cpu_to_be16(alen);
1898 	size += alen;
1899 	ql_dbg(ql_dbg_disc, vha, 0x20c2,
1900 	    "CURRENT SPEED = %x.\n", be32_to_cpu(eiter->a.cur_speed));
1901 	/* Max frame size. */
1902 	eiter = entries + size;
1903 	eiter->type = cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE);
1904 	eiter->a.max_frame_size = cpu_to_be32(ha->frame_payload_size);
1905 	alen = sizeof(eiter->a.max_frame_size);
1906 	alen += FDMI_ATTR_TYPELEN(eiter);
1907 	eiter->len = cpu_to_be16(alen);
1908 	size += alen;
1909 	ql_dbg(ql_dbg_disc, vha, 0x20c3,
1910 	    "MAX FRAME SIZE = %x.\n", be32_to_cpu(eiter->a.max_frame_size));
1911 	/* OS device name. */
1912 	eiter = entries + size;
1913 	eiter->type = cpu_to_be16(FDMI_PORT_OS_DEVICE_NAME);
1914 	alen = scnprintf(
1915 		eiter->a.os_dev_name, sizeof(eiter->a.os_dev_name),
1916 		"%s:host%lu", QLA2XXX_DRIVER_NAME, vha->host_no);
1917 	alen += FDMI_ATTR_ALIGNMENT(alen);
1918 	alen += FDMI_ATTR_TYPELEN(eiter);
1919 	eiter->len = cpu_to_be16(alen);
1920 	size += alen;
1921 	ql_dbg(ql_dbg_disc, vha, 0x20c4,
1922 	    "OS DEVICE NAME = %s.\n", eiter->a.os_dev_name);
1923 	/* Hostname. */
1924 	eiter = entries + size;
1925 	eiter->type = cpu_to_be16(FDMI_PORT_HOST_NAME);
1926 	if (!*hostname || !strncmp(hostname, "(none)", 6))
1927 		hostname = "Linux-default";
1928 	alen = scnprintf(
1929 		eiter->a.host_name, sizeof(eiter->a.host_name),
1930 		"%s", hostname);
1931 	alen += FDMI_ATTR_ALIGNMENT(alen);
1932 	alen += FDMI_ATTR_TYPELEN(eiter);
1933 	eiter->len = cpu_to_be16(alen);
1934 	size += alen;
1935 	ql_dbg(ql_dbg_disc, vha, 0x20c5,
1936 	    "HOSTNAME = %s.\n", eiter->a.host_name);
1937 
1938 	if (callopt == CALLOPT_FDMI1)
1939 		goto done;
1940 
1941 	/* Node Name */
1942 	eiter = entries + size;
1943 	eiter->type = cpu_to_be16(FDMI_PORT_NODE_NAME);
1944 	memcpy(eiter->a.node_name, vha->node_name, sizeof(eiter->a.node_name));
1945 	alen = sizeof(eiter->a.node_name);
1946 	alen += FDMI_ATTR_TYPELEN(eiter);
1947 	eiter->len = cpu_to_be16(alen);
1948 	size += alen;
1949 	ql_dbg(ql_dbg_disc, vha, 0x20c6,
1950 	    "NODENAME = %016llx.\n", wwn_to_u64(eiter->a.node_name));
1951 
1952 	/* Port Name */
1953 	eiter = entries + size;
1954 	eiter->type = cpu_to_be16(FDMI_PORT_NAME);
1955 	memcpy(eiter->a.port_name, vha->port_name, sizeof(eiter->a.port_name));
1956 	alen = sizeof(eiter->a.port_name);
1957 	alen += FDMI_ATTR_TYPELEN(eiter);
1958 	eiter->len = cpu_to_be16(alen);
1959 	size += alen;
1960 	ql_dbg(ql_dbg_disc, vha, 0x20c7,
1961 	    "PORTNAME = %016llx.\n", wwn_to_u64(eiter->a.port_name));
1962 
1963 	/* Port Symbolic Name */
1964 	eiter = entries + size;
1965 	eiter->type = cpu_to_be16(FDMI_PORT_SYM_NAME);
1966 	alen = qla2x00_get_sym_node_name(vha, eiter->a.port_sym_name,
1967 	    sizeof(eiter->a.port_sym_name));
1968 	alen += FDMI_ATTR_ALIGNMENT(alen);
1969 	alen += FDMI_ATTR_TYPELEN(eiter);
1970 	eiter->len = cpu_to_be16(alen);
1971 	size += alen;
1972 	ql_dbg(ql_dbg_disc, vha, 0x20c8,
1973 	    "PORT SYMBOLIC NAME = %s\n", eiter->a.port_sym_name);
1974 
1975 	/* Port Type */
1976 	eiter = entries + size;
1977 	eiter->type = cpu_to_be16(FDMI_PORT_TYPE);
1978 	eiter->a.port_type = cpu_to_be32(NS_NX_PORT_TYPE);
1979 	alen = sizeof(eiter->a.port_type);
1980 	alen += FDMI_ATTR_TYPELEN(eiter);
1981 	eiter->len = cpu_to_be16(alen);
1982 	size += alen;
1983 	ql_dbg(ql_dbg_disc, vha, 0x20c9,
1984 	    "PORT TYPE = %x.\n", be32_to_cpu(eiter->a.port_type));
1985 
1986 	/* Supported Class of Service */
1987 	eiter = entries + size;
1988 	eiter->type = cpu_to_be16(FDMI_PORT_SUPP_COS);
1989 	eiter->a.port_supported_cos = cpu_to_be32(FC_CLASS_3);
1990 	alen = sizeof(eiter->a.port_supported_cos);
1991 	alen += FDMI_ATTR_TYPELEN(eiter);
1992 	eiter->len = cpu_to_be16(alen);
1993 	size += alen;
1994 	ql_dbg(ql_dbg_disc, vha, 0x20ca,
1995 	    "SUPPORTED COS = %08x\n", be32_to_cpu(eiter->a.port_supported_cos));
1996 
1997 	/* Port Fabric Name */
1998 	eiter = entries + size;
1999 	eiter->type = cpu_to_be16(FDMI_PORT_FABRIC_NAME);
2000 	memcpy(eiter->a.fabric_name, vha->fabric_node_name,
2001 	    sizeof(eiter->a.fabric_name));
2002 	alen = sizeof(eiter->a.fabric_name);
2003 	alen += FDMI_ATTR_TYPELEN(eiter);
2004 	eiter->len = cpu_to_be16(alen);
2005 	size += alen;
2006 	ql_dbg(ql_dbg_disc, vha, 0x20cb,
2007 	    "FABRIC NAME = %016llx.\n", wwn_to_u64(eiter->a.fabric_name));
2008 
2009 	/* FC4_type */
2010 	eiter = entries + size;
2011 	eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPE);
2012 	eiter->a.port_fc4_type[0] = 0x00;
2013 	eiter->a.port_fc4_type[1] = 0x00;
2014 	eiter->a.port_fc4_type[2] = 0x01;
2015 	eiter->a.port_fc4_type[3] = 0x00;
2016 	alen = sizeof(eiter->a.port_fc4_type);
2017 	alen += FDMI_ATTR_TYPELEN(eiter);
2018 	eiter->len = cpu_to_be16(alen);
2019 	size += alen;
2020 	ql_dbg(ql_dbg_disc, vha, 0x20cc,
2021 	    "PORT ACTIVE FC4 TYPE = %016llx.\n",
2022 	    *(uint64_t *)eiter->a.port_fc4_type);
2023 
2024 	/* Port State */
2025 	eiter = entries + size;
2026 	eiter->type = cpu_to_be16(FDMI_PORT_STATE);
2027 	eiter->a.port_state = cpu_to_be32(2);
2028 	alen = sizeof(eiter->a.port_state);
2029 	alen += FDMI_ATTR_TYPELEN(eiter);
2030 	eiter->len = cpu_to_be16(alen);
2031 	size += alen;
2032 	ql_dbg(ql_dbg_disc, vha, 0x20cd,
2033 	    "PORT_STATE = %x.\n", be32_to_cpu(eiter->a.port_state));
2034 
2035 	/* Number of Ports */
2036 	eiter = entries + size;
2037 	eiter->type = cpu_to_be16(FDMI_PORT_COUNT);
2038 	eiter->a.num_ports = cpu_to_be32(1);
2039 	alen = sizeof(eiter->a.num_ports);
2040 	alen += FDMI_ATTR_TYPELEN(eiter);
2041 	eiter->len = cpu_to_be16(alen);
2042 	size += alen;
2043 	ql_dbg(ql_dbg_disc, vha, 0x20ce,
2044 	    "PORT COUNT = %x.\n", be32_to_cpu(eiter->a.num_ports));
2045 
2046 	/* Port Identifier */
2047 	eiter = entries + size;
2048 	eiter->type = cpu_to_be16(FDMI_PORT_IDENTIFIER);
2049 	eiter->a.port_id = cpu_to_be32(vha->d_id.b24);
2050 	alen = sizeof(eiter->a.port_id);
2051 	alen += FDMI_ATTR_TYPELEN(eiter);
2052 	eiter->len = cpu_to_be16(alen);
2053 	size += alen;
2054 	ql_dbg(ql_dbg_disc, vha, 0x20cf,
2055 	    "PORT ID = %x.\n", be32_to_cpu(eiter->a.port_id));
2056 
2057 	if (callopt == CALLOPT_FDMI2 || !ql2xsmartsan)
2058 		goto done;
2059 
2060 	/* Smart SAN Service Category (Populate Smart SAN Initiator)*/
2061 	eiter = entries + size;
2062 	eiter->type = cpu_to_be16(FDMI_SMARTSAN_SERVICE);
2063 	alen = scnprintf(
2064 		eiter->a.smartsan_service, sizeof(eiter->a.smartsan_service),
2065 		"%s", "Smart SAN Initiator");
2066 	alen += FDMI_ATTR_ALIGNMENT(alen);
2067 	alen += FDMI_ATTR_TYPELEN(eiter);
2068 	eiter->len = cpu_to_be16(alen);
2069 	size += alen;
2070 	ql_dbg(ql_dbg_disc, vha, 0x20d0,
2071 	    "SMARTSAN SERVICE CATEGORY = %s.\n", eiter->a.smartsan_service);
2072 
2073 	/* Smart SAN GUID (NWWN+PWWN) */
2074 	eiter = entries + size;
2075 	eiter->type = cpu_to_be16(FDMI_SMARTSAN_GUID);
2076 	memcpy(eiter->a.smartsan_guid, vha->node_name, WWN_SIZE);
2077 	memcpy(eiter->a.smartsan_guid + WWN_SIZE, vha->port_name, WWN_SIZE);
2078 	alen = sizeof(eiter->a.smartsan_guid);
2079 	alen += FDMI_ATTR_TYPELEN(eiter);
2080 	eiter->len = cpu_to_be16(alen);
2081 	size += alen;
2082 	ql_dbg(ql_dbg_disc, vha, 0x20d1,
2083 	    "Smart SAN GUID = %016llx-%016llx\n",
2084 	    wwn_to_u64(eiter->a.smartsan_guid),
2085 	    wwn_to_u64(eiter->a.smartsan_guid + WWN_SIZE));
2086 
2087 	/* Smart SAN Version (populate "Smart SAN Version 1.0") */
2088 	eiter = entries + size;
2089 	eiter->type = cpu_to_be16(FDMI_SMARTSAN_VERSION);
2090 	alen = scnprintf(
2091 		eiter->a.smartsan_version, sizeof(eiter->a.smartsan_version),
2092 		"%s", "Smart SAN Version 2.0");
2093 	alen += FDMI_ATTR_ALIGNMENT(alen);
2094 	alen += FDMI_ATTR_TYPELEN(eiter);
2095 	eiter->len = cpu_to_be16(alen);
2096 	size += alen;
2097 	ql_dbg(ql_dbg_disc, vha, 0x20d2,
2098 	    "SMARTSAN VERSION = %s\n", eiter->a.smartsan_version);
2099 
2100 	/* Smart SAN Product Name (Specify Adapter Model No) */
2101 	eiter = entries + size;
2102 	eiter->type = cpu_to_be16(FDMI_SMARTSAN_PROD_NAME);
2103 	alen = scnprintf(eiter->a.smartsan_prod_name,
2104 		sizeof(eiter->a.smartsan_prod_name),
2105 		"ISP%04x", ha->pdev->device);
2106 	alen += FDMI_ATTR_ALIGNMENT(alen);
2107 	alen += FDMI_ATTR_TYPELEN(eiter);
2108 	eiter->len = cpu_to_be16(alen);
2109 	size += alen;
2110 	ql_dbg(ql_dbg_disc, vha, 0x20d3,
2111 	    "SMARTSAN PRODUCT NAME = %s\n", eiter->a.smartsan_prod_name);
2112 
2113 	/* Smart SAN Port Info (specify: 1=Physical, 2=NPIV, 3=SRIOV) */
2114 	eiter = entries + size;
2115 	eiter->type = cpu_to_be16(FDMI_SMARTSAN_PORT_INFO);
2116 	eiter->a.smartsan_port_info = cpu_to_be32(vha->vp_idx ? 2 : 1);
2117 	alen = sizeof(eiter->a.smartsan_port_info);
2118 	alen += FDMI_ATTR_TYPELEN(eiter);
2119 	eiter->len = cpu_to_be16(alen);
2120 	size += alen;
2121 	ql_dbg(ql_dbg_disc, vha, 0x20d4,
2122 	    "SMARTSAN PORT INFO = %x\n", eiter->a.smartsan_port_info);
2123 
2124 	/* Smart SAN Security Support */
2125 	eiter = entries + size;
2126 	eiter->type = cpu_to_be16(FDMI_SMARTSAN_SECURITY_SUPPORT);
2127 	eiter->a.smartsan_security_support = cpu_to_be32(1);
2128 	alen = sizeof(eiter->a.smartsan_security_support);
2129 	alen += FDMI_ATTR_TYPELEN(eiter);
2130 	eiter->len = cpu_to_be16(alen);
2131 	size += alen;
2132 	ql_dbg(ql_dbg_disc, vha, 0x20d6,
2133 	    "SMARTSAN SECURITY SUPPORT = %d\n",
2134 	    be32_to_cpu(eiter->a.smartsan_security_support));
2135 
2136 done:
2137 	return size;
2138 }
2139 
2140 /**
2141  * qla2x00_fdmi_rhba() - perform RHBA FDMI registration
2142  * @vha: HA context
2143  * @callopt: Option to issue FDMI registration
2144  *
2145  * Returns 0 on success.
2146  */
2147 static int
2148 qla2x00_fdmi_rhba(scsi_qla_host_t *vha, unsigned int callopt)
2149 {
2150 	struct qla_hw_data *ha = vha->hw;
2151 	unsigned long size = 0;
2152 	unsigned int rval, count;
2153 	ms_iocb_entry_t *ms_pkt;
2154 	struct ct_sns_req *ct_req;
2155 	struct ct_sns_rsp *ct_rsp;
2156 	void *entries;
2157 
2158 	count = callopt != CALLOPT_FDMI1 ?
2159 	    FDMI2_HBA_ATTR_COUNT : FDMI1_HBA_ATTR_COUNT;
2160 
2161 	size = RHBA_RSP_SIZE;
2162 
2163 	ql_dbg(ql_dbg_disc, vha, 0x20e0,
2164 	    "RHBA (callopt=%x count=%u size=%lu).\n", callopt, count, size);
2165 
2166 	/*   Request size adjusted after CT preparation */
2167 	ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, size);
2168 
2169 	/* Prepare CT request */
2170 	ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RHBA_CMD, size);
2171 	ct_rsp = &ha->ct_sns->p.rsp;
2172 
2173 	/* Prepare FDMI command entries */
2174 	memcpy(ct_req->req.rhba.hba_identifier, vha->port_name,
2175 	    sizeof(ct_req->req.rhba.hba_identifier));
2176 	size += sizeof(ct_req->req.rhba.hba_identifier);
2177 
2178 	ct_req->req.rhba.entry_count = cpu_to_be32(1);
2179 	size += sizeof(ct_req->req.rhba.entry_count);
2180 
2181 	memcpy(ct_req->req.rhba.port_name, vha->port_name,
2182 	    sizeof(ct_req->req.rhba.port_name));
2183 	size += sizeof(ct_req->req.rhba.port_name);
2184 
2185 	/* Attribute count */
2186 	ct_req->req.rhba.attrs.count = cpu_to_be32(count);
2187 	size += sizeof(ct_req->req.rhba.attrs.count);
2188 
2189 	/* Attribute block */
2190 	entries = &ct_req->req.rhba.attrs.entry;
2191 
2192 	size += qla2x00_hba_attributes(vha, entries, callopt);
2193 
2194 	/* Update MS request size. */
2195 	qla2x00_update_ms_fdmi_iocb(vha, size + 16);
2196 
2197 	ql_dbg(ql_dbg_disc, vha, 0x20e1,
2198 	    "RHBA %016llx %016llx.\n",
2199 	    wwn_to_u64(ct_req->req.rhba.hba_identifier),
2200 	    wwn_to_u64(ct_req->req.rhba.port_name));
2201 
2202 	ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x20e2,
2203 	    entries, size);
2204 
2205 	/* Execute MS IOCB */
2206 	rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2207 	    sizeof(*ha->ms_iocb));
2208 	if (rval) {
2209 		ql_dbg(ql_dbg_disc, vha, 0x20e3,
2210 		    "RHBA iocb failed (%d).\n", rval);
2211 		return rval;
2212 	}
2213 
2214 	rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RHBA");
2215 	if (rval) {
2216 		if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
2217 		    ct_rsp->header.explanation_code ==
2218 		    CT_EXPL_ALREADY_REGISTERED) {
2219 			ql_dbg(ql_dbg_disc, vha, 0x20e4,
2220 			    "RHBA already registered.\n");
2221 			return QLA_ALREADY_REGISTERED;
2222 		}
2223 
2224 		ql_dbg(ql_dbg_disc, vha, 0x20e5,
2225 		    "RHBA failed, CT Reason %#x, CT Explanation %#x\n",
2226 		    ct_rsp->header.reason_code,
2227 		    ct_rsp->header.explanation_code);
2228 		return rval;
2229 	}
2230 
2231 	ql_dbg(ql_dbg_disc, vha, 0x20e6, "RHBA exiting normally.\n");
2232 	return rval;
2233 }
2234 
2235 
2236 static int
2237 qla2x00_fdmi_dhba(scsi_qla_host_t *vha)
2238 {
2239 	int rval;
2240 	struct qla_hw_data *ha = vha->hw;
2241 	ms_iocb_entry_t *ms_pkt;
2242 	struct ct_sns_req *ct_req;
2243 	struct ct_sns_rsp *ct_rsp;
2244 	/* Issue RPA */
2245 	/* Prepare common MS IOCB */
2246 	ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, DHBA_REQ_SIZE,
2247 	    DHBA_RSP_SIZE);
2248 	/* Prepare CT request */
2249 	ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, DHBA_CMD, DHBA_RSP_SIZE);
2250 	ct_rsp = &ha->ct_sns->p.rsp;
2251 	/* Prepare FDMI command arguments -- portname. */
2252 	memcpy(ct_req->req.dhba.port_name, vha->port_name, WWN_SIZE);
2253 	ql_dbg(ql_dbg_disc, vha, 0x2036,
2254 	    "DHBA portname = %8phN.\n", ct_req->req.dhba.port_name);
2255 	/* Execute MS IOCB */
2256 	rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2257 	    sizeof(ms_iocb_entry_t));
2258 	if (rval != QLA_SUCCESS) {
2259 		/*EMPTY*/
2260 		ql_dbg(ql_dbg_disc, vha, 0x2037,
2261 		    "DHBA issue IOCB failed (%d).\n", rval);
2262 	} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "DHBA") !=
2263 	    QLA_SUCCESS) {
2264 		rval = QLA_FUNCTION_FAILED;
2265 	} else {
2266 		ql_dbg(ql_dbg_disc, vha, 0x2038,
2267 		    "DHBA exiting normally.\n");
2268 	}
2269 	return rval;
2270 }
2271 
2272 /**
2273  * qla2x00_fdmi_rprt() - perform RPRT registration
2274  * @vha: HA context
2275  * @callopt: Option to issue extended or standard FDMI
2276  *           command parameter
2277  *
2278  * Returns 0 on success.
2279  */
2280 static int
2281 qla2x00_fdmi_rprt(scsi_qla_host_t *vha, int callopt)
2282 {
2283 	struct scsi_qla_host *base_vha = pci_get_drvdata(vha->hw->pdev);
2284 	struct qla_hw_data *ha = vha->hw;
2285 	ulong size = 0;
2286 	uint rval, count;
2287 	ms_iocb_entry_t *ms_pkt;
2288 	struct ct_sns_req *ct_req;
2289 	struct ct_sns_rsp *ct_rsp;
2290 	void *entries;
2291 	count = callopt == CALLOPT_FDMI2_SMARTSAN && ql2xsmartsan ?
2292 		FDMI2_SMARTSAN_PORT_ATTR_COUNT :
2293 		callopt != CALLOPT_FDMI1 ?
2294 		FDMI2_PORT_ATTR_COUNT : FDMI1_PORT_ATTR_COUNT;
2295 
2296 	size = RPRT_RSP_SIZE;
2297 	ql_dbg(ql_dbg_disc, vha, 0x20e8,
2298 	    "RPRT (callopt=%x count=%u size=%lu).\n", callopt, count, size);
2299 	/* Request size adjusted after CT preparation */
2300 	ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, size);
2301 	/* Prepare CT request */
2302 	ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RPRT_CMD, size);
2303 	ct_rsp = &ha->ct_sns->p.rsp;
2304 	/* Prepare FDMI command entries */
2305 	memcpy(ct_req->req.rprt.hba_identifier, base_vha->port_name,
2306 	    sizeof(ct_req->req.rprt.hba_identifier));
2307 	size += sizeof(ct_req->req.rprt.hba_identifier);
2308 	memcpy(ct_req->req.rprt.port_name, vha->port_name,
2309 	    sizeof(ct_req->req.rprt.port_name));
2310 	size += sizeof(ct_req->req.rprt.port_name);
2311 	/* Attribute count */
2312 	ct_req->req.rprt.attrs.count = cpu_to_be32(count);
2313 	size += sizeof(ct_req->req.rprt.attrs.count);
2314 	/* Attribute block */
2315 	entries = ct_req->req.rprt.attrs.entry;
2316 	size += qla2x00_port_attributes(vha, entries, callopt);
2317 	/* Update MS request size. */
2318 	qla2x00_update_ms_fdmi_iocb(vha, size + 16);
2319 	ql_dbg(ql_dbg_disc, vha, 0x20e9,
2320 	    "RPRT %016llx  %016llx.\n",
2321 	    wwn_to_u64(ct_req->req.rprt.port_name),
2322 	    wwn_to_u64(ct_req->req.rprt.port_name));
2323 	ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x20ea,
2324 	    entries, size);
2325 	/* Execute MS IOCB */
2326 	rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2327 	    sizeof(*ha->ms_iocb));
2328 	if (rval) {
2329 		ql_dbg(ql_dbg_disc, vha, 0x20eb,
2330 		    "RPRT iocb failed (%d).\n", rval);
2331 		return rval;
2332 	}
2333 	rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPRT");
2334 	if (rval) {
2335 		if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
2336 		    ct_rsp->header.explanation_code ==
2337 		    CT_EXPL_ALREADY_REGISTERED) {
2338 			ql_dbg(ql_dbg_disc, vha, 0x20ec,
2339 			    "RPRT already registered.\n");
2340 			return QLA_ALREADY_REGISTERED;
2341 		}
2342 
2343 		ql_dbg(ql_dbg_disc, vha, 0x20ed,
2344 		    "RPRT failed, CT Reason code: %#x, CT Explanation %#x\n",
2345 		    ct_rsp->header.reason_code,
2346 		    ct_rsp->header.explanation_code);
2347 		return rval;
2348 	}
2349 	ql_dbg(ql_dbg_disc, vha, 0x20ee, "RPRT exiting normally.\n");
2350 	return rval;
2351 }
2352 
2353 /**
2354  * qla2x00_fdmi_rpa() - perform RPA registration
2355  * @vha: HA context
2356  * @callopt: Option to issue FDMI registration
2357  *
2358  * Returns 0 on success.
2359  */
2360 static int
2361 qla2x00_fdmi_rpa(scsi_qla_host_t *vha, uint callopt)
2362 {
2363 	struct qla_hw_data *ha = vha->hw;
2364 	ulong size = 0;
2365 	uint rval, count;
2366 	ms_iocb_entry_t *ms_pkt;
2367 	struct ct_sns_req *ct_req;
2368 	struct ct_sns_rsp *ct_rsp;
2369 	void *entries;
2370 
2371 	count =
2372 	    callopt == CALLOPT_FDMI2_SMARTSAN && ql2xsmartsan ?
2373 		FDMI2_SMARTSAN_PORT_ATTR_COUNT :
2374 	    callopt != CALLOPT_FDMI1 ?
2375 		FDMI2_PORT_ATTR_COUNT : FDMI1_PORT_ATTR_COUNT;
2376 
2377 	size =
2378 	    callopt != CALLOPT_FDMI1 ?
2379 		SMARTSAN_RPA_RSP_SIZE : RPA_RSP_SIZE;
2380 
2381 	ql_dbg(ql_dbg_disc, vha, 0x20f0,
2382 	    "RPA (callopt=%x count=%u size=%lu).\n", callopt, count, size);
2383 
2384 	/* Request size adjusted after CT preparation */
2385 	ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, size);
2386 
2387 	/* Prepare CT request */
2388 	ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RPA_CMD, size);
2389 	ct_rsp = &ha->ct_sns->p.rsp;
2390 
2391 	/* Prepare FDMI command entries. */
2392 	memcpy(ct_req->req.rpa.port_name, vha->port_name,
2393 	    sizeof(ct_req->req.rpa.port_name));
2394 	size += sizeof(ct_req->req.rpa.port_name);
2395 
2396 	/* Attribute count */
2397 	ct_req->req.rpa.attrs.count = cpu_to_be32(count);
2398 	size += sizeof(ct_req->req.rpa.attrs.count);
2399 
2400 	/* Attribute block */
2401 	entries = ct_req->req.rpa.attrs.entry;
2402 
2403 	size += qla2x00_port_attributes(vha, entries, callopt);
2404 
2405 	/* Update MS request size. */
2406 	qla2x00_update_ms_fdmi_iocb(vha, size + 16);
2407 
2408 	ql_dbg(ql_dbg_disc, vha, 0x20f1,
2409 	    "RPA %016llx.\n", wwn_to_u64(ct_req->req.rpa.port_name));
2410 
2411 	ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x20f2,
2412 	    entries, size);
2413 
2414 	/* Execute MS IOCB */
2415 	rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2416 	    sizeof(*ha->ms_iocb));
2417 	if (rval) {
2418 		ql_dbg(ql_dbg_disc, vha, 0x20f3,
2419 		    "RPA iocb failed (%d).\n", rval);
2420 		return rval;
2421 	}
2422 
2423 	rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPA");
2424 	if (rval) {
2425 		if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
2426 		    ct_rsp->header.explanation_code ==
2427 		    CT_EXPL_ALREADY_REGISTERED) {
2428 			ql_dbg(ql_dbg_disc, vha, 0x20f4,
2429 			    "RPA already registered.\n");
2430 			return QLA_ALREADY_REGISTERED;
2431 		}
2432 
2433 		ql_dbg(ql_dbg_disc, vha, 0x20f5,
2434 		    "RPA failed, CT Reason code: %#x, CT Explanation %#x\n",
2435 		    ct_rsp->header.reason_code,
2436 		    ct_rsp->header.explanation_code);
2437 		return rval;
2438 	}
2439 
2440 	ql_dbg(ql_dbg_disc, vha, 0x20f6, "RPA exiting normally.\n");
2441 	return rval;
2442 }
2443 
2444 /**
2445  * qla2x00_fdmi_register() -
2446  * @vha: HA context
2447  *
2448  * Returns 0 on success.
2449  */
2450 int
2451 qla2x00_fdmi_register(scsi_qla_host_t *vha)
2452 {
2453 	int rval = QLA_SUCCESS;
2454 	struct qla_hw_data *ha = vha->hw;
2455 
2456 	if (IS_QLA2100(ha) || IS_QLA2200(ha) ||
2457 	    IS_QLAFX00(ha))
2458 		return rval;
2459 
2460 	rval = qla2x00_mgmt_svr_login(vha);
2461 	if (rval)
2462 		return rval;
2463 
2464 	/* For npiv/vport send rprt only */
2465 	if (vha->vp_idx) {
2466 		if (ql2xsmartsan)
2467 			rval = qla2x00_fdmi_rprt(vha, CALLOPT_FDMI2_SMARTSAN);
2468 		if (rval || !ql2xsmartsan)
2469 			rval = qla2x00_fdmi_rprt(vha, CALLOPT_FDMI2);
2470 		if (rval)
2471 			rval = qla2x00_fdmi_rprt(vha, CALLOPT_FDMI1);
2472 
2473 		return rval;
2474 	}
2475 
2476 	/* Try fdmi2 first, if fails then try fdmi1 */
2477 	rval = qla2x00_fdmi_rhba(vha, CALLOPT_FDMI2);
2478 	if (rval) {
2479 		if (rval != QLA_ALREADY_REGISTERED)
2480 			goto try_fdmi;
2481 
2482 		rval = qla2x00_fdmi_dhba(vha);
2483 		if (rval)
2484 			goto try_fdmi;
2485 
2486 		rval = qla2x00_fdmi_rhba(vha, CALLOPT_FDMI2);
2487 		if (rval)
2488 			goto try_fdmi;
2489 	}
2490 
2491 	if (ql2xsmartsan)
2492 		rval = qla2x00_fdmi_rpa(vha, CALLOPT_FDMI2_SMARTSAN);
2493 	if (rval || !ql2xsmartsan)
2494 		rval = qla2x00_fdmi_rpa(vha, CALLOPT_FDMI2);
2495 	if (rval)
2496 		goto try_fdmi;
2497 
2498 	return rval;
2499 
2500 try_fdmi:
2501 	rval = qla2x00_fdmi_rhba(vha, CALLOPT_FDMI1);
2502 	if (rval) {
2503 		if (rval != QLA_ALREADY_REGISTERED)
2504 			return rval;
2505 
2506 		rval = qla2x00_fdmi_dhba(vha);
2507 		if (rval)
2508 			return rval;
2509 
2510 		rval = qla2x00_fdmi_rhba(vha, CALLOPT_FDMI1);
2511 		if (rval)
2512 			return rval;
2513 	}
2514 
2515 	rval = qla2x00_fdmi_rpa(vha, CALLOPT_FDMI1);
2516 
2517 	return rval;
2518 }
2519 
2520 /**
2521  * qla2x00_gfpn_id() - SNS Get Fabric Port Name (GFPN_ID) query.
2522  * @vha: HA context
2523  * @list: switch info entries to populate
2524  *
2525  * Returns 0 on success.
2526  */
2527 int
2528 qla2x00_gfpn_id(scsi_qla_host_t *vha, sw_info_t *list)
2529 {
2530 	int		rval = QLA_SUCCESS;
2531 	uint16_t	i;
2532 	struct qla_hw_data *ha = vha->hw;
2533 	ms_iocb_entry_t	*ms_pkt;
2534 	struct ct_sns_req	*ct_req;
2535 	struct ct_sns_rsp	*ct_rsp;
2536 	struct ct_arg arg;
2537 
2538 	if (!IS_IIDMA_CAPABLE(ha))
2539 		return QLA_FUNCTION_FAILED;
2540 
2541 	arg.iocb = ha->ms_iocb;
2542 	arg.req_dma = ha->ct_sns_dma;
2543 	arg.rsp_dma = ha->ct_sns_dma;
2544 	arg.req_size = GFPN_ID_REQ_SIZE;
2545 	arg.rsp_size = GFPN_ID_RSP_SIZE;
2546 	arg.nport_handle = NPH_SNS;
2547 
2548 	for (i = 0; i < ha->max_fibre_devices; i++) {
2549 		/* Issue GFPN_ID */
2550 		/* Prepare common MS IOCB */
2551 		ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
2552 
2553 		/* Prepare CT request */
2554 		ct_req = qla2x00_prep_ct_req(ha->ct_sns, GFPN_ID_CMD,
2555 		    GFPN_ID_RSP_SIZE);
2556 		ct_rsp = &ha->ct_sns->p.rsp;
2557 
2558 		/* Prepare CT arguments -- port_id */
2559 		ct_req->req.port_id.port_id = port_id_to_be_id(list[i].d_id);
2560 
2561 		/* Execute MS IOCB */
2562 		rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2563 		    sizeof(ms_iocb_entry_t));
2564 		if (rval != QLA_SUCCESS) {
2565 			/*EMPTY*/
2566 			ql_dbg(ql_dbg_disc, vha, 0x2023,
2567 			    "GFPN_ID issue IOCB failed (%d).\n", rval);
2568 			break;
2569 		} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
2570 		    "GFPN_ID") != QLA_SUCCESS) {
2571 			rval = QLA_FUNCTION_FAILED;
2572 			break;
2573 		} else {
2574 			/* Save fabric portname */
2575 			memcpy(list[i].fabric_port_name,
2576 			    ct_rsp->rsp.gfpn_id.port_name, WWN_SIZE);
2577 		}
2578 
2579 		/* Last device exit. */
2580 		if (list[i].d_id.b.rsvd_1 != 0)
2581 			break;
2582 	}
2583 
2584 	return (rval);
2585 }
2586 
2587 
2588 static inline struct ct_sns_req *
2589 qla24xx_prep_ct_fm_req(struct ct_sns_pkt *p, uint16_t cmd,
2590     uint16_t rsp_size)
2591 {
2592 	memset(p, 0, sizeof(struct ct_sns_pkt));
2593 
2594 	p->p.req.header.revision = 0x01;
2595 	p->p.req.header.gs_type = 0xFA;
2596 	p->p.req.header.gs_subtype = 0x01;
2597 	p->p.req.command = cpu_to_be16(cmd);
2598 	p->p.req.max_rsp_size = cpu_to_be16((rsp_size - 16) / 4);
2599 
2600 	return &p->p.req;
2601 }
2602 
2603 static uint16_t
2604 qla2x00_port_speed_capability(uint16_t speed)
2605 {
2606 	switch (speed) {
2607 	case BIT_15:
2608 		return PORT_SPEED_1GB;
2609 	case BIT_14:
2610 		return PORT_SPEED_2GB;
2611 	case BIT_13:
2612 		return PORT_SPEED_4GB;
2613 	case BIT_12:
2614 		return PORT_SPEED_10GB;
2615 	case BIT_11:
2616 		return PORT_SPEED_8GB;
2617 	case BIT_10:
2618 		return PORT_SPEED_16GB;
2619 	case BIT_8:
2620 		return PORT_SPEED_32GB;
2621 	case BIT_7:
2622 		return PORT_SPEED_64GB;
2623 	default:
2624 		return PORT_SPEED_UNKNOWN;
2625 	}
2626 }
2627 
2628 /**
2629  * qla2x00_gpsc() - FCS Get Port Speed Capabilities (GPSC) query.
2630  * @vha: HA context
2631  * @list: switch info entries to populate
2632  *
2633  * Returns 0 on success.
2634  */
2635 int
2636 qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
2637 {
2638 	int		rval;
2639 	uint16_t	i;
2640 	struct qla_hw_data *ha = vha->hw;
2641 	ms_iocb_entry_t *ms_pkt;
2642 	struct ct_sns_req	*ct_req;
2643 	struct ct_sns_rsp	*ct_rsp;
2644 	struct ct_arg arg;
2645 
2646 	if (!IS_IIDMA_CAPABLE(ha))
2647 		return QLA_FUNCTION_FAILED;
2648 	if (!ha->flags.gpsc_supported)
2649 		return QLA_FUNCTION_FAILED;
2650 
2651 	rval = qla2x00_mgmt_svr_login(vha);
2652 	if (rval)
2653 		return rval;
2654 
2655 	arg.iocb = ha->ms_iocb;
2656 	arg.req_dma = ha->ct_sns_dma;
2657 	arg.rsp_dma = ha->ct_sns_dma;
2658 	arg.req_size = GPSC_REQ_SIZE;
2659 	arg.rsp_size = GPSC_RSP_SIZE;
2660 	arg.nport_handle = vha->mgmt_svr_loop_id;
2661 
2662 	for (i = 0; i < ha->max_fibre_devices; i++) {
2663 		/* Issue GFPN_ID */
2664 		/* Prepare common MS IOCB */
2665 		ms_pkt = qla24xx_prep_ms_iocb(vha, &arg);
2666 
2667 		/* Prepare CT request */
2668 		ct_req = qla24xx_prep_ct_fm_req(ha->ct_sns, GPSC_CMD,
2669 		    GPSC_RSP_SIZE);
2670 		ct_rsp = &ha->ct_sns->p.rsp;
2671 
2672 		/* Prepare CT arguments -- port_name */
2673 		memcpy(ct_req->req.gpsc.port_name, list[i].fabric_port_name,
2674 		    WWN_SIZE);
2675 
2676 		/* Execute MS IOCB */
2677 		rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2678 		    sizeof(ms_iocb_entry_t));
2679 		if (rval != QLA_SUCCESS) {
2680 			/*EMPTY*/
2681 			ql_dbg(ql_dbg_disc, vha, 0x2059,
2682 			    "GPSC issue IOCB failed (%d).\n", rval);
2683 		} else if ((rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
2684 		    "GPSC")) != QLA_SUCCESS) {
2685 			/* FM command unsupported? */
2686 			if (rval == QLA_INVALID_COMMAND &&
2687 			    (ct_rsp->header.reason_code ==
2688 				CT_REASON_INVALID_COMMAND_CODE ||
2689 			     ct_rsp->header.reason_code ==
2690 				CT_REASON_COMMAND_UNSUPPORTED)) {
2691 				ql_dbg(ql_dbg_disc, vha, 0x205a,
2692 				    "GPSC command unsupported, disabling "
2693 				    "query.\n");
2694 				ha->flags.gpsc_supported = 0;
2695 				rval = QLA_FUNCTION_FAILED;
2696 				break;
2697 			}
2698 			rval = QLA_FUNCTION_FAILED;
2699 		} else {
2700 			list->fp_speed = qla2x00_port_speed_capability(
2701 			    be16_to_cpu(ct_rsp->rsp.gpsc.speed));
2702 			ql_dbg(ql_dbg_disc, vha, 0x205b,
2703 			    "GPSC ext entry - fpn "
2704 			    "%8phN speeds=%04x speed=%04x.\n",
2705 			    list[i].fabric_port_name,
2706 			    be16_to_cpu(ct_rsp->rsp.gpsc.speeds),
2707 			    be16_to_cpu(ct_rsp->rsp.gpsc.speed));
2708 		}
2709 
2710 		/* Last device exit. */
2711 		if (list[i].d_id.b.rsvd_1 != 0)
2712 			break;
2713 	}
2714 
2715 	return (rval);
2716 }
2717 
2718 /**
2719  * qla2x00_gff_id() - SNS Get FC-4 Features (GFF_ID) query.
2720  *
2721  * @vha: HA context
2722  * @list: switch info entries to populate
2723  *
2724  */
2725 void
2726 qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list)
2727 {
2728 	int		rval;
2729 	uint16_t	i;
2730 
2731 	ms_iocb_entry_t	*ms_pkt;
2732 	struct ct_sns_req	*ct_req;
2733 	struct ct_sns_rsp	*ct_rsp;
2734 	struct qla_hw_data *ha = vha->hw;
2735 	uint8_t fcp_scsi_features = 0, nvme_features = 0;
2736 	struct ct_arg arg;
2737 
2738 	for (i = 0; i < ha->max_fibre_devices; i++) {
2739 		/* Set default FC4 Type as UNKNOWN so the default is to
2740 		 * Process this port */
2741 		list[i].fc4_type = 0;
2742 
2743 		/* Do not attempt GFF_ID if we are not FWI_2 capable */
2744 		if (!IS_FWI2_CAPABLE(ha))
2745 			continue;
2746 
2747 		arg.iocb = ha->ms_iocb;
2748 		arg.req_dma = ha->ct_sns_dma;
2749 		arg.rsp_dma = ha->ct_sns_dma;
2750 		arg.req_size = GFF_ID_REQ_SIZE;
2751 		arg.rsp_size = GFF_ID_RSP_SIZE;
2752 		arg.nport_handle = NPH_SNS;
2753 
2754 		/* Prepare common MS IOCB */
2755 		ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
2756 
2757 		/* Prepare CT request */
2758 		ct_req = qla2x00_prep_ct_req(ha->ct_sns, GFF_ID_CMD,
2759 		    GFF_ID_RSP_SIZE);
2760 		ct_rsp = &ha->ct_sns->p.rsp;
2761 
2762 		/* Prepare CT arguments -- port_id */
2763 		ct_req->req.port_id.port_id = port_id_to_be_id(list[i].d_id);
2764 
2765 		/* Execute MS IOCB */
2766 		rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2767 		   sizeof(ms_iocb_entry_t));
2768 
2769 		if (rval != QLA_SUCCESS) {
2770 			ql_dbg(ql_dbg_disc, vha, 0x205c,
2771 			    "GFF_ID issue IOCB failed (%d).\n", rval);
2772 		} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
2773 			       "GFF_ID") != QLA_SUCCESS) {
2774 			ql_dbg(ql_dbg_disc, vha, 0x205d,
2775 			    "GFF_ID IOCB status had a failure status code.\n");
2776 		} else {
2777 			fcp_scsi_features =
2778 			   ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET];
2779 			fcp_scsi_features &= 0x0f;
2780 
2781 			if (fcp_scsi_features) {
2782 				list[i].fc4_type = FS_FC4TYPE_FCP;
2783 				list[i].fc4_features = fcp_scsi_features;
2784 			}
2785 
2786 			nvme_features =
2787 			    ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET];
2788 			nvme_features &= 0xf;
2789 
2790 			if (nvme_features) {
2791 				list[i].fc4_type |= FS_FC4TYPE_NVME;
2792 				list[i].fc4_features = nvme_features;
2793 			}
2794 		}
2795 
2796 		/* Last device exit. */
2797 		if (list[i].d_id.b.rsvd_1 != 0)
2798 			break;
2799 	}
2800 }
2801 
2802 int qla24xx_post_gpsc_work(struct scsi_qla_host *vha, fc_port_t *fcport)
2803 {
2804 	struct qla_work_evt *e;
2805 
2806 	e = qla2x00_alloc_work(vha, QLA_EVT_GPSC);
2807 	if (!e)
2808 		return QLA_FUNCTION_FAILED;
2809 
2810 	e->u.fcport.fcport = fcport;
2811 	return qla2x00_post_work(vha, e);
2812 }
2813 
2814 void qla24xx_handle_gpsc_event(scsi_qla_host_t *vha, struct event_arg *ea)
2815 {
2816 	struct fc_port *fcport = ea->fcport;
2817 
2818 	ql_dbg(ql_dbg_disc, vha, 0x20d8,
2819 	    "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d lid %d\n",
2820 	    __func__, fcport->port_name, fcport->disc_state,
2821 	    fcport->fw_login_state, ea->rc, ea->sp->gen2, fcport->login_gen,
2822 	    ea->sp->gen2, fcport->rscn_gen|ea->sp->gen1, fcport->loop_id);
2823 
2824 	if (fcport->disc_state == DSC_DELETE_PEND)
2825 		return;
2826 
2827 	/* We will figure-out what happen after AUTH completes */
2828 	if (fcport->disc_state == DSC_LOGIN_AUTH_PEND)
2829 		return;
2830 
2831 	if (ea->sp->gen2 != fcport->login_gen) {
2832 		/* target side must have changed it. */
2833 		ql_dbg(ql_dbg_disc, vha, 0x20d3,
2834 		    "%s %8phC generation changed\n",
2835 		    __func__, fcport->port_name);
2836 		return;
2837 	} else if (ea->sp->gen1 != fcport->rscn_gen) {
2838 		return;
2839 	}
2840 
2841 	qla_post_iidma_work(vha, fcport);
2842 }
2843 
2844 static void qla24xx_async_gpsc_sp_done(srb_t *sp, int res)
2845 {
2846 	struct scsi_qla_host *vha = sp->vha;
2847 	struct qla_hw_data *ha = vha->hw;
2848 	fc_port_t *fcport = sp->fcport;
2849 	struct ct_sns_rsp       *ct_rsp;
2850 	struct event_arg ea;
2851 
2852 	ct_rsp = &fcport->ct_desc.ct_sns->p.rsp;
2853 
2854 	ql_dbg(ql_dbg_disc, vha, 0x2053,
2855 	    "Async done-%s res %x, WWPN %8phC \n",
2856 	    sp->name, res, fcport->port_name);
2857 
2858 	fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
2859 
2860 	if (res == QLA_FUNCTION_TIMEOUT)
2861 		goto done;
2862 
2863 	if (res == (DID_ERROR << 16)) {
2864 		/* entry status error */
2865 		goto done;
2866 	} else if (res) {
2867 		if ((ct_rsp->header.reason_code ==
2868 			 CT_REASON_INVALID_COMMAND_CODE) ||
2869 			(ct_rsp->header.reason_code ==
2870 			CT_REASON_COMMAND_UNSUPPORTED)) {
2871 			ql_dbg(ql_dbg_disc, vha, 0x2019,
2872 			    "GPSC command unsupported, disabling query.\n");
2873 			ha->flags.gpsc_supported = 0;
2874 			goto done;
2875 		}
2876 	} else {
2877 		fcport->fp_speed = qla2x00_port_speed_capability(
2878 		    be16_to_cpu(ct_rsp->rsp.gpsc.speed));
2879 
2880 		ql_dbg(ql_dbg_disc, vha, 0x2054,
2881 		    "Async-%s OUT WWPN %8phC speeds=%04x speed=%04x.\n",
2882 		    sp->name, fcport->fabric_port_name,
2883 		    be16_to_cpu(ct_rsp->rsp.gpsc.speeds),
2884 		    be16_to_cpu(ct_rsp->rsp.gpsc.speed));
2885 	}
2886 	memset(&ea, 0, sizeof(ea));
2887 	ea.rc = res;
2888 	ea.fcport = fcport;
2889 	ea.sp = sp;
2890 	qla24xx_handle_gpsc_event(vha, &ea);
2891 
2892 done:
2893 	/* ref: INIT */
2894 	kref_put(&sp->cmd_kref, qla2x00_sp_release);
2895 }
2896 
2897 int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport)
2898 {
2899 	int rval = QLA_FUNCTION_FAILED;
2900 	struct ct_sns_req       *ct_req;
2901 	srb_t *sp;
2902 
2903 	if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
2904 		return rval;
2905 
2906 	/* ref: INIT */
2907 	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2908 	if (!sp)
2909 		goto done;
2910 
2911 	sp->type = SRB_CT_PTHRU_CMD;
2912 	sp->name = "gpsc";
2913 	sp->gen1 = fcport->rscn_gen;
2914 	sp->gen2 = fcport->login_gen;
2915 	qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
2916 			      qla24xx_async_gpsc_sp_done);
2917 
2918 	/* CT_IU preamble  */
2919 	ct_req = qla24xx_prep_ct_fm_req(fcport->ct_desc.ct_sns, GPSC_CMD,
2920 		GPSC_RSP_SIZE);
2921 
2922 	/* GPSC req */
2923 	memcpy(ct_req->req.gpsc.port_name, fcport->fabric_port_name,
2924 		WWN_SIZE);
2925 
2926 	sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
2927 	sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
2928 	sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
2929 	sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
2930 	sp->u.iocb_cmd.u.ctarg.req_size = GPSC_REQ_SIZE;
2931 	sp->u.iocb_cmd.u.ctarg.rsp_size = GPSC_RSP_SIZE;
2932 	sp->u.iocb_cmd.u.ctarg.nport_handle = vha->mgmt_svr_loop_id;
2933 
2934 	ql_dbg(ql_dbg_disc, vha, 0x205e,
2935 	    "Async-%s %8phC hdl=%x loopid=%x portid=%02x%02x%02x.\n",
2936 	    sp->name, fcport->port_name, sp->handle,
2937 	    fcport->loop_id, fcport->d_id.b.domain,
2938 	    fcport->d_id.b.area, fcport->d_id.b.al_pa);
2939 
2940 	rval = qla2x00_start_sp(sp);
2941 	if (rval != QLA_SUCCESS)
2942 		goto done_free_sp;
2943 	return rval;
2944 
2945 done_free_sp:
2946 	/* ref: INIT */
2947 	kref_put(&sp->cmd_kref, qla2x00_sp_release);
2948 done:
2949 	return rval;
2950 }
2951 
2952 void qla24xx_sp_unmap(scsi_qla_host_t *vha, srb_t *sp)
2953 {
2954 	struct srb_iocb *c = &sp->u.iocb_cmd;
2955 
2956 	switch (sp->type) {
2957 	case SRB_ELS_DCMD:
2958 		qla2x00_els_dcmd2_free(vha, &c->u.els_plogi);
2959 		break;
2960 	case SRB_CT_PTHRU_CMD:
2961 	default:
2962 		if (sp->u.iocb_cmd.u.ctarg.req) {
2963 			dma_free_coherent(&vha->hw->pdev->dev,
2964 			    sp->u.iocb_cmd.u.ctarg.req_allocated_size,
2965 			    sp->u.iocb_cmd.u.ctarg.req,
2966 			    sp->u.iocb_cmd.u.ctarg.req_dma);
2967 			sp->u.iocb_cmd.u.ctarg.req = NULL;
2968 		}
2969 
2970 		if (sp->u.iocb_cmd.u.ctarg.rsp) {
2971 			dma_free_coherent(&vha->hw->pdev->dev,
2972 			    sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
2973 			    sp->u.iocb_cmd.u.ctarg.rsp,
2974 			    sp->u.iocb_cmd.u.ctarg.rsp_dma);
2975 			sp->u.iocb_cmd.u.ctarg.rsp = NULL;
2976 		}
2977 		break;
2978 	}
2979 
2980 	/* ref: INIT */
2981 	kref_put(&sp->cmd_kref, qla2x00_sp_release);
2982 }
2983 
2984 void qla24xx_async_gffid_sp_done(srb_t *sp, int res)
2985 {
2986 	struct scsi_qla_host *vha = sp->vha;
2987 	fc_port_t *fcport = sp->fcport;
2988 	struct ct_sns_rsp *ct_rsp;
2989 	uint8_t fc4_scsi_feat;
2990 	uint8_t fc4_nvme_feat;
2991 
2992 	ql_dbg(ql_dbg_disc, vha, 0x2133,
2993 	       "Async done-%s res %x ID %x. %8phC\n",
2994 	       sp->name, res, fcport->d_id.b24, fcport->port_name);
2995 
2996 	ct_rsp = sp->u.iocb_cmd.u.ctarg.rsp;
2997 	fc4_scsi_feat = ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET];
2998 	fc4_nvme_feat = ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET];
2999 	sp->rc = res;
3000 
3001 	/*
3002 	 * FC-GS-7, 5.2.3.12 FC-4 Features - format
3003 	 * The format of the FC-4 Features object, as defined by the FC-4,
3004 	 * Shall be an array of 4-bit values, one for each type code value
3005 	 */
3006 	if (!res) {
3007 		if (fc4_scsi_feat & 0xf) {
3008 			/* w1 b00:03 */
3009 			fcport->fc4_type = FS_FC4TYPE_FCP;
3010 			fcport->fc4_features = fc4_scsi_feat & 0xf;
3011 		}
3012 
3013 		if (fc4_nvme_feat & 0xf) {
3014 			/* w5 [00:03]/28h */
3015 			fcport->fc4_type |= FS_FC4TYPE_NVME;
3016 			fcport->fc4_features = fc4_nvme_feat & 0xf;
3017 		}
3018 	}
3019 
3020 	if (sp->flags & SRB_WAKEUP_ON_COMP) {
3021 		complete(sp->comp);
3022 	} else  {
3023 		if (sp->u.iocb_cmd.u.ctarg.req) {
3024 			dma_free_coherent(&vha->hw->pdev->dev,
3025 				sp->u.iocb_cmd.u.ctarg.req_allocated_size,
3026 				sp->u.iocb_cmd.u.ctarg.req,
3027 				sp->u.iocb_cmd.u.ctarg.req_dma);
3028 			sp->u.iocb_cmd.u.ctarg.req = NULL;
3029 		}
3030 
3031 		if (sp->u.iocb_cmd.u.ctarg.rsp) {
3032 			dma_free_coherent(&vha->hw->pdev->dev,
3033 				sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
3034 				sp->u.iocb_cmd.u.ctarg.rsp,
3035 				sp->u.iocb_cmd.u.ctarg.rsp_dma);
3036 			sp->u.iocb_cmd.u.ctarg.rsp = NULL;
3037 		}
3038 
3039 		/* ref: INIT */
3040 		kref_put(&sp->cmd_kref, qla2x00_sp_release);
3041 		/* we should not be here */
3042 		dump_stack();
3043 	}
3044 }
3045 
3046 /* Get FC4 Feature with Nport ID. */
3047 int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport, bool wait)
3048 {
3049 	int rval = QLA_FUNCTION_FAILED;
3050 	struct ct_sns_req       *ct_req;
3051 	srb_t *sp;
3052 	DECLARE_COMPLETION_ONSTACK(comp);
3053 
3054 	/* this routine does not have handling for no wait */
3055 	if (!vha->flags.online || !wait)
3056 		return rval;
3057 
3058 	/* ref: INIT */
3059 	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
3060 	if (!sp)
3061 		return rval;
3062 
3063 	sp->type = SRB_CT_PTHRU_CMD;
3064 	sp->name = "gffid";
3065 	sp->gen1 = fcport->rscn_gen;
3066 	sp->gen2 = fcport->login_gen;
3067 	qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
3068 			      qla24xx_async_gffid_sp_done);
3069 	sp->comp = &comp;
3070 	sp->u.iocb_cmd.timeout = qla2x00_els_dcmd2_iocb_timeout;
3071 
3072 	if (wait)
3073 		sp->flags = SRB_WAKEUP_ON_COMP;
3074 
3075 	sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
3076 	sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
3077 				sp->u.iocb_cmd.u.ctarg.req_allocated_size,
3078 				&sp->u.iocb_cmd.u.ctarg.req_dma,
3079 	    GFP_KERNEL);
3080 	if (!sp->u.iocb_cmd.u.ctarg.req) {
3081 		ql_log(ql_log_warn, vha, 0xd041,
3082 		       "%s: Failed to allocate ct_sns request.\n",
3083 		       __func__);
3084 		goto done_free_sp;
3085 	}
3086 
3087 	sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
3088 	sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
3089 				sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
3090 				&sp->u.iocb_cmd.u.ctarg.rsp_dma,
3091 	    GFP_KERNEL);
3092 	if (!sp->u.iocb_cmd.u.ctarg.rsp) {
3093 		ql_log(ql_log_warn, vha, 0xd041,
3094 		       "%s: Failed to allocate ct_sns response.\n",
3095 		       __func__);
3096 		goto done_free_sp;
3097 	}
3098 
3099 	/* CT_IU preamble  */
3100 	ct_req = qla2x00_prep_ct_req(sp->u.iocb_cmd.u.ctarg.req, GFF_ID_CMD, GFF_ID_RSP_SIZE);
3101 
3102 	ct_req->req.gff_id.port_id[0] = fcport->d_id.b.domain;
3103 	ct_req->req.gff_id.port_id[1] = fcport->d_id.b.area;
3104 	ct_req->req.gff_id.port_id[2] = fcport->d_id.b.al_pa;
3105 
3106 	sp->u.iocb_cmd.u.ctarg.req_size = GFF_ID_REQ_SIZE;
3107 	sp->u.iocb_cmd.u.ctarg.rsp_size = GFF_ID_RSP_SIZE;
3108 	sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
3109 
3110 	rval = qla2x00_start_sp(sp);
3111 
3112 	if (rval != QLA_SUCCESS) {
3113 		rval = QLA_FUNCTION_FAILED;
3114 		goto done_free_sp;
3115 	} else {
3116 		ql_dbg(ql_dbg_disc, vha, 0x3074,
3117 		       "Async-%s hdl=%x portid %06x\n",
3118 		       sp->name, sp->handle, fcport->d_id.b24);
3119 	}
3120 
3121 	wait_for_completion(sp->comp);
3122 	rval = sp->rc;
3123 
3124 done_free_sp:
3125 	if (sp->u.iocb_cmd.u.ctarg.req) {
3126 		dma_free_coherent(&vha->hw->pdev->dev,
3127 				  sp->u.iocb_cmd.u.ctarg.req_allocated_size,
3128 				  sp->u.iocb_cmd.u.ctarg.req,
3129 				  sp->u.iocb_cmd.u.ctarg.req_dma);
3130 		sp->u.iocb_cmd.u.ctarg.req = NULL;
3131 	}
3132 
3133 	if (sp->u.iocb_cmd.u.ctarg.rsp) {
3134 		dma_free_coherent(&vha->hw->pdev->dev,
3135 				  sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
3136 				  sp->u.iocb_cmd.u.ctarg.rsp,
3137 				  sp->u.iocb_cmd.u.ctarg.rsp_dma);
3138 		sp->u.iocb_cmd.u.ctarg.rsp = NULL;
3139 	}
3140 
3141 	/* ref: INIT */
3142 	kref_put(&sp->cmd_kref, qla2x00_sp_release);
3143 	return rval;
3144 }
3145 
3146 /* GPN_FT + GNN_FT*/
3147 static int qla2x00_is_a_vp(scsi_qla_host_t *vha, u64 wwn)
3148 {
3149 	struct qla_hw_data *ha = vha->hw;
3150 	scsi_qla_host_t *vp;
3151 	unsigned long flags;
3152 	u64 twwn;
3153 	int rc = 0;
3154 
3155 	if (!ha->num_vhosts)
3156 		return 0;
3157 
3158 	spin_lock_irqsave(&ha->vport_slock, flags);
3159 	list_for_each_entry(vp, &ha->vp_list, list) {
3160 		twwn = wwn_to_u64(vp->port_name);
3161 		if (wwn == twwn) {
3162 			rc = 1;
3163 			break;
3164 		}
3165 	}
3166 	spin_unlock_irqrestore(&ha->vport_slock, flags);
3167 
3168 	return rc;
3169 }
3170 
3171 void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
3172 {
3173 	fc_port_t *fcport;
3174 	u32 i, rc;
3175 	bool found;
3176 	struct fab_scan_rp *rp, *trp;
3177 	unsigned long flags;
3178 	u8 recheck = 0;
3179 	u16 dup = 0, dup_cnt = 0;
3180 
3181 	ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
3182 	    "%s enter\n", __func__);
3183 
3184 	if (sp->gen1 != vha->hw->base_qpair->chip_reset) {
3185 		ql_dbg(ql_dbg_disc, vha, 0xffff,
3186 		    "%s scan stop due to chip reset %x/%x\n",
3187 		    sp->name, sp->gen1, vha->hw->base_qpair->chip_reset);
3188 		goto out;
3189 	}
3190 
3191 	rc = sp->rc;
3192 	if (rc) {
3193 		vha->scan.scan_retry++;
3194 		if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
3195 			set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3196 			set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3197 			goto out;
3198 		} else {
3199 			ql_dbg(ql_dbg_disc, vha, 0xffff,
3200 			    "%s: Fabric scan failed for %d retries.\n",
3201 			    __func__, vha->scan.scan_retry);
3202 			/*
3203 			 * Unable to scan any rports. logout loop below
3204 			 * will unregister all sessions.
3205 			 */
3206 			list_for_each_entry(fcport, &vha->vp_fcports, list) {
3207 				if ((fcport->flags & FCF_FABRIC_DEVICE) != 0) {
3208 					fcport->scan_state = QLA_FCPORT_SCAN;
3209 					if (fcport->loop_id == FC_NO_LOOP_ID)
3210 						fcport->logout_on_delete = 0;
3211 					else
3212 						fcport->logout_on_delete = 1;
3213 				}
3214 			}
3215 			goto login_logout;
3216 		}
3217 	}
3218 	vha->scan.scan_retry = 0;
3219 
3220 	list_for_each_entry(fcport, &vha->vp_fcports, list)
3221 		fcport->scan_state = QLA_FCPORT_SCAN;
3222 
3223 	for (i = 0; i < vha->hw->max_fibre_devices; i++) {
3224 		u64 wwn;
3225 		int k;
3226 
3227 		rp = &vha->scan.l[i];
3228 		found = false;
3229 
3230 		wwn = wwn_to_u64(rp->port_name);
3231 		if (wwn == 0)
3232 			continue;
3233 
3234 		/* Remove duplicate NPORT ID entries from switch data base */
3235 		for (k = i + 1; k < vha->hw->max_fibre_devices; k++) {
3236 			trp = &vha->scan.l[k];
3237 			if (rp->id.b24 == trp->id.b24) {
3238 				dup = 1;
3239 				dup_cnt++;
3240 				ql_dbg(ql_dbg_disc + ql_dbg_verbose,
3241 				    vha, 0xffff,
3242 				    "Detected duplicate NPORT ID from switch data base: ID %06x WWN %8phN WWN %8phN\n",
3243 				    rp->id.b24, rp->port_name, trp->port_name);
3244 				memset(trp, 0, sizeof(*trp));
3245 			}
3246 		}
3247 
3248 		if (!memcmp(rp->port_name, vha->port_name, WWN_SIZE))
3249 			continue;
3250 
3251 		/* Bypass reserved domain fields. */
3252 		if ((rp->id.b.domain & 0xf0) == 0xf0)
3253 			continue;
3254 
3255 		/* Bypass virtual ports of the same host. */
3256 		if (qla2x00_is_a_vp(vha, wwn))
3257 			continue;
3258 
3259 		list_for_each_entry(fcport, &vha->vp_fcports, list) {
3260 			if (memcmp(rp->port_name, fcport->port_name, WWN_SIZE))
3261 				continue;
3262 			fcport->scan_state = QLA_FCPORT_FOUND;
3263 			fcport->last_rscn_gen = fcport->rscn_gen;
3264 			fcport->fc4_type = rp->fc4type;
3265 			found = true;
3266 
3267 			if (fcport->scan_needed) {
3268 				if (NVME_PRIORITY(vha->hw, fcport))
3269 					fcport->do_prli_nvme = 1;
3270 				else
3271 					fcport->do_prli_nvme = 0;
3272 			}
3273 
3274 			/*
3275 			 * If device was not a fabric device before.
3276 			 */
3277 			if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
3278 				qla2x00_clear_loop_id(fcport);
3279 				fcport->flags |= FCF_FABRIC_DEVICE;
3280 			} else if (fcport->d_id.b24 != rp->id.b24 ||
3281 				   (fcport->scan_needed &&
3282 				    fcport->port_type != FCT_INITIATOR &&
3283 				    fcport->port_type != FCT_NVME_INITIATOR)) {
3284 				qlt_schedule_sess_for_deletion(fcport);
3285 			}
3286 			fcport->d_id.b24 = rp->id.b24;
3287 			fcport->scan_needed = 0;
3288 			break;
3289 		}
3290 
3291 		if (!found) {
3292 			ql_dbg(ql_dbg_disc, vha, 0xffff,
3293 			    "%s %d %8phC post new sess\n",
3294 			    __func__, __LINE__, rp->port_name);
3295 			qla24xx_post_newsess_work(vha, &rp->id, rp->port_name,
3296 			    rp->node_name, NULL, rp->fc4type);
3297 		}
3298 	}
3299 
3300 	if (dup) {
3301 		ql_log(ql_log_warn, vha, 0xffff,
3302 		    "Detected %d duplicate NPORT ID(s) from switch data base\n",
3303 		    dup_cnt);
3304 	}
3305 
3306 login_logout:
3307 	/*
3308 	 * Logout all previous fabric dev marked lost, except FCP2 devices.
3309 	 */
3310 	list_for_each_entry(fcport, &vha->vp_fcports, list) {
3311 		if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
3312 			fcport->scan_needed = 0;
3313 			continue;
3314 		}
3315 
3316 		if (fcport->scan_state != QLA_FCPORT_FOUND) {
3317 			bool do_delete = false;
3318 
3319 			if (fcport->scan_needed &&
3320 			    fcport->disc_state == DSC_LOGIN_PEND) {
3321 				/* Cable got disconnected after we sent
3322 				 * a login. Do delete to prevent timeout.
3323 				 */
3324 				fcport->logout_on_delete = 1;
3325 				do_delete = true;
3326 			}
3327 
3328 			fcport->scan_needed = 0;
3329 			if (((qla_dual_mode_enabled(vha) ||
3330 			      qla_ini_mode_enabled(vha)) &&
3331 			    atomic_read(&fcport->state) == FCS_ONLINE) ||
3332 				do_delete) {
3333 				if (fcport->loop_id != FC_NO_LOOP_ID) {
3334 					if (fcport->flags & FCF_FCP2_DEVICE)
3335 						continue;
3336 
3337 					ql_log(ql_log_warn, vha, 0x20f0,
3338 					       "%s %d %8phC post del sess\n",
3339 					       __func__, __LINE__,
3340 					       fcport->port_name);
3341 
3342 					fcport->tgt_link_down_time = 0;
3343 					qlt_schedule_sess_for_deletion(fcport);
3344 					continue;
3345 				}
3346 			}
3347 		} else {
3348 			if (fcport->scan_needed ||
3349 			    fcport->disc_state != DSC_LOGIN_COMPLETE) {
3350 				if (fcport->login_retry == 0) {
3351 					fcport->login_retry =
3352 						vha->hw->login_retry_count;
3353 					ql_dbg(ql_dbg_disc, vha, 0x20a3,
3354 					    "Port login retry %8phN, lid 0x%04x retry cnt=%d.\n",
3355 					    fcport->port_name, fcport->loop_id,
3356 					    fcport->login_retry);
3357 				}
3358 				fcport->scan_needed = 0;
3359 				qla24xx_fcport_handle_login(vha, fcport);
3360 			}
3361 		}
3362 	}
3363 
3364 	recheck = 1;
3365 out:
3366 	qla24xx_sp_unmap(vha, sp);
3367 	spin_lock_irqsave(&vha->work_lock, flags);
3368 	vha->scan.scan_flags &= ~SF_SCANNING;
3369 	spin_unlock_irqrestore(&vha->work_lock, flags);
3370 
3371 	if (recheck) {
3372 		list_for_each_entry(fcport, &vha->vp_fcports, list) {
3373 			if (fcport->scan_needed) {
3374 				set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3375 				set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3376 				break;
3377 			}
3378 		}
3379 	}
3380 }
3381 
3382 static int qla2x00_post_gnnft_gpnft_done_work(struct scsi_qla_host *vha,
3383     srb_t *sp, int cmd)
3384 {
3385 	struct qla_work_evt *e;
3386 
3387 	if (cmd != QLA_EVT_GPNFT_DONE && cmd != QLA_EVT_GNNFT_DONE)
3388 		return QLA_PARAMETER_ERROR;
3389 
3390 	e = qla2x00_alloc_work(vha, cmd);
3391 	if (!e)
3392 		return QLA_FUNCTION_FAILED;
3393 
3394 	e->u.iosb.sp = sp;
3395 
3396 	return qla2x00_post_work(vha, e);
3397 }
3398 
3399 static int qla2x00_post_nvme_gpnft_work(struct scsi_qla_host *vha,
3400     srb_t *sp, int cmd)
3401 {
3402 	struct qla_work_evt *e;
3403 
3404 	if (cmd != QLA_EVT_GPNFT)
3405 		return QLA_PARAMETER_ERROR;
3406 
3407 	e = qla2x00_alloc_work(vha, cmd);
3408 	if (!e)
3409 		return QLA_FUNCTION_FAILED;
3410 
3411 	e->u.gpnft.fc4_type = FC4_TYPE_NVME;
3412 	e->u.gpnft.sp = sp;
3413 
3414 	return qla2x00_post_work(vha, e);
3415 }
3416 
3417 static void qla2x00_find_free_fcp_nvme_slot(struct scsi_qla_host *vha,
3418 	struct srb *sp)
3419 {
3420 	struct qla_hw_data *ha = vha->hw;
3421 	int num_fibre_dev = ha->max_fibre_devices;
3422 	struct ct_sns_req *ct_req =
3423 		(struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req;
3424 	struct ct_sns_gpnft_rsp *ct_rsp =
3425 		(struct ct_sns_gpnft_rsp *)sp->u.iocb_cmd.u.ctarg.rsp;
3426 	struct ct_sns_gpn_ft_data *d;
3427 	struct fab_scan_rp *rp;
3428 	u16 cmd = be16_to_cpu(ct_req->command);
3429 	u8 fc4_type = sp->gen2;
3430 	int i, j, k;
3431 	port_id_t id;
3432 	u8 found;
3433 	u64 wwn;
3434 
3435 	j = 0;
3436 	for (i = 0; i < num_fibre_dev; i++) {
3437 		d  = &ct_rsp->entries[i];
3438 
3439 		id.b.rsvd_1 = 0;
3440 		id.b.domain = d->port_id[0];
3441 		id.b.area   = d->port_id[1];
3442 		id.b.al_pa  = d->port_id[2];
3443 		wwn = wwn_to_u64(d->port_name);
3444 
3445 		if (id.b24 == 0 || wwn == 0)
3446 			continue;
3447 
3448 		if (fc4_type == FC4_TYPE_FCP_SCSI) {
3449 			if (cmd == GPN_FT_CMD) {
3450 				rp = &vha->scan.l[j];
3451 				rp->id = id;
3452 				memcpy(rp->port_name, d->port_name, 8);
3453 				j++;
3454 				rp->fc4type = FS_FC4TYPE_FCP;
3455 			} else {
3456 				for (k = 0; k < num_fibre_dev; k++) {
3457 					rp = &vha->scan.l[k];
3458 					if (id.b24 == rp->id.b24) {
3459 						memcpy(rp->node_name,
3460 						    d->port_name, 8);
3461 						break;
3462 					}
3463 				}
3464 			}
3465 		} else {
3466 			/* Search if the fibre device supports FC4_TYPE_NVME */
3467 			if (cmd == GPN_FT_CMD) {
3468 				found = 0;
3469 
3470 				for (k = 0; k < num_fibre_dev; k++) {
3471 					rp = &vha->scan.l[k];
3472 					if (!memcmp(rp->port_name,
3473 					    d->port_name, 8)) {
3474 						/*
3475 						 * Supports FC-NVMe & FCP
3476 						 */
3477 						rp->fc4type |= FS_FC4TYPE_NVME;
3478 						found = 1;
3479 						break;
3480 					}
3481 				}
3482 
3483 				/* We found new FC-NVMe only port */
3484 				if (!found) {
3485 					for (k = 0; k < num_fibre_dev; k++) {
3486 						rp = &vha->scan.l[k];
3487 						if (wwn_to_u64(rp->port_name)) {
3488 							continue;
3489 						} else {
3490 							rp->id = id;
3491 							memcpy(rp->port_name,
3492 							    d->port_name, 8);
3493 							rp->fc4type =
3494 							    FS_FC4TYPE_NVME;
3495 							break;
3496 						}
3497 					}
3498 				}
3499 			} else {
3500 				for (k = 0; k < num_fibre_dev; k++) {
3501 					rp = &vha->scan.l[k];
3502 					if (id.b24 == rp->id.b24) {
3503 						memcpy(rp->node_name,
3504 						    d->port_name, 8);
3505 						break;
3506 					}
3507 				}
3508 			}
3509 		}
3510 	}
3511 }
3512 
3513 static void qla2x00_async_gpnft_gnnft_sp_done(srb_t *sp, int res)
3514 {
3515 	struct scsi_qla_host *vha = sp->vha;
3516 	struct ct_sns_req *ct_req =
3517 		(struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req;
3518 	u16 cmd = be16_to_cpu(ct_req->command);
3519 	u8 fc4_type = sp->gen2;
3520 	unsigned long flags;
3521 	int rc;
3522 
3523 	/* gen2 field is holding the fc4type */
3524 	ql_dbg(ql_dbg_disc, vha, 0xffff,
3525 	    "Async done-%s res %x FC4Type %x\n",
3526 	    sp->name, res, sp->gen2);
3527 
3528 	sp->rc = res;
3529 	if (res) {
3530 		unsigned long flags;
3531 		const char *name = sp->name;
3532 
3533 		if (res == QLA_OS_TIMER_EXPIRED) {
3534 			/* switch is ignoring all commands.
3535 			 * This might be a zone disable behavior.
3536 			 * This means we hit 64s timeout.
3537 			 * 22s GPNFT + 44s Abort = 64s
3538 			 */
3539 			ql_dbg(ql_dbg_disc, vha, 0xffff,
3540 			       "%s: Switch Zone check please .\n",
3541 			       name);
3542 			qla2x00_mark_all_devices_lost(vha);
3543 		}
3544 
3545 		/*
3546 		 * We are in an Interrupt context, queue up this
3547 		 * sp for GNNFT_DONE work. This will allow all
3548 		 * the resource to get freed up.
3549 		 */
3550 		rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp,
3551 		    QLA_EVT_GNNFT_DONE);
3552 		if (rc) {
3553 			/* Cleanup here to prevent memory leak */
3554 			qla24xx_sp_unmap(vha, sp);
3555 
3556 			spin_lock_irqsave(&vha->work_lock, flags);
3557 			vha->scan.scan_flags &= ~SF_SCANNING;
3558 			vha->scan.scan_retry++;
3559 			spin_unlock_irqrestore(&vha->work_lock, flags);
3560 
3561 			if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
3562 				set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3563 				set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3564 				qla2xxx_wake_dpc(vha);
3565 			} else {
3566 				ql_dbg(ql_dbg_disc, vha, 0xffff,
3567 				    "Async done-%s rescan failed on all retries.\n",
3568 				    name);
3569 			}
3570 		}
3571 		return;
3572 	}
3573 
3574 	qla2x00_find_free_fcp_nvme_slot(vha, sp);
3575 
3576 	if ((fc4_type == FC4_TYPE_FCP_SCSI) && vha->flags.nvme_enabled &&
3577 	    cmd == GNN_FT_CMD) {
3578 		spin_lock_irqsave(&vha->work_lock, flags);
3579 		vha->scan.scan_flags &= ~SF_SCANNING;
3580 		spin_unlock_irqrestore(&vha->work_lock, flags);
3581 
3582 		sp->rc = res;
3583 		rc = qla2x00_post_nvme_gpnft_work(vha, sp, QLA_EVT_GPNFT);
3584 		if (rc) {
3585 			qla24xx_sp_unmap(vha, sp);
3586 			set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3587 			set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3588 		}
3589 		return;
3590 	}
3591 
3592 	if (cmd == GPN_FT_CMD) {
3593 		rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp,
3594 		    QLA_EVT_GPNFT_DONE);
3595 	} else {
3596 		rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp,
3597 		    QLA_EVT_GNNFT_DONE);
3598 	}
3599 
3600 	if (rc) {
3601 		qla24xx_sp_unmap(vha, sp);
3602 		set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3603 		set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3604 		return;
3605 	}
3606 }
3607 
3608 /*
3609  * Get WWNN list for fc4_type
3610  *
3611  * It is assumed the same SRB is re-used from GPNFT to avoid
3612  * mem free & re-alloc
3613  */
3614 static int qla24xx_async_gnnft(scsi_qla_host_t *vha, struct srb *sp,
3615     u8 fc4_type)
3616 {
3617 	int rval = QLA_FUNCTION_FAILED;
3618 	struct ct_sns_req *ct_req;
3619 	struct ct_sns_pkt *ct_sns;
3620 	unsigned long flags;
3621 
3622 	if (!vha->flags.online) {
3623 		spin_lock_irqsave(&vha->work_lock, flags);
3624 		vha->scan.scan_flags &= ~SF_SCANNING;
3625 		spin_unlock_irqrestore(&vha->work_lock, flags);
3626 		goto done_free_sp;
3627 	}
3628 
3629 	if (!sp->u.iocb_cmd.u.ctarg.req || !sp->u.iocb_cmd.u.ctarg.rsp) {
3630 		ql_log(ql_log_warn, vha, 0xffff,
3631 		    "%s: req %p rsp %p are not setup\n",
3632 		    __func__, sp->u.iocb_cmd.u.ctarg.req,
3633 		    sp->u.iocb_cmd.u.ctarg.rsp);
3634 		spin_lock_irqsave(&vha->work_lock, flags);
3635 		vha->scan.scan_flags &= ~SF_SCANNING;
3636 		spin_unlock_irqrestore(&vha->work_lock, flags);
3637 		WARN_ON(1);
3638 		set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3639 		set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3640 		goto done_free_sp;
3641 	}
3642 
3643 	ql_dbg(ql_dbg_disc, vha, 0xfffff,
3644 	    "%s: FC4Type %x, CT-PASSTHRU %s command ctarg rsp size %d, ctarg req size %d\n",
3645 	    __func__, fc4_type, sp->name, sp->u.iocb_cmd.u.ctarg.rsp_size,
3646 	     sp->u.iocb_cmd.u.ctarg.req_size);
3647 
3648 	sp->type = SRB_CT_PTHRU_CMD;
3649 	sp->name = "gnnft";
3650 	sp->gen1 = vha->hw->base_qpair->chip_reset;
3651 	sp->gen2 = fc4_type;
3652 	qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
3653 			      qla2x00_async_gpnft_gnnft_sp_done);
3654 
3655 	memset(sp->u.iocb_cmd.u.ctarg.rsp, 0, sp->u.iocb_cmd.u.ctarg.rsp_size);
3656 	memset(sp->u.iocb_cmd.u.ctarg.req, 0, sp->u.iocb_cmd.u.ctarg.req_size);
3657 
3658 	ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
3659 	/* CT_IU preamble  */
3660 	ct_req = qla2x00_prep_ct_req(ct_sns, GNN_FT_CMD,
3661 	    sp->u.iocb_cmd.u.ctarg.rsp_size);
3662 
3663 	/* GPN_FT req */
3664 	ct_req->req.gpn_ft.port_type = fc4_type;
3665 
3666 	sp->u.iocb_cmd.u.ctarg.req_size = GNN_FT_REQ_SIZE;
3667 	sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
3668 
3669 	ql_dbg(ql_dbg_disc, vha, 0xffff,
3670 	    "Async-%s hdl=%x FC4Type %x.\n", sp->name,
3671 	    sp->handle, ct_req->req.gpn_ft.port_type);
3672 
3673 	rval = qla2x00_start_sp(sp);
3674 	if (rval != QLA_SUCCESS) {
3675 		goto done_free_sp;
3676 	}
3677 
3678 	return rval;
3679 
3680 done_free_sp:
3681 	if (sp->u.iocb_cmd.u.ctarg.req) {
3682 		dma_free_coherent(&vha->hw->pdev->dev,
3683 		    sp->u.iocb_cmd.u.ctarg.req_allocated_size,
3684 		    sp->u.iocb_cmd.u.ctarg.req,
3685 		    sp->u.iocb_cmd.u.ctarg.req_dma);
3686 		sp->u.iocb_cmd.u.ctarg.req = NULL;
3687 	}
3688 	if (sp->u.iocb_cmd.u.ctarg.rsp) {
3689 		dma_free_coherent(&vha->hw->pdev->dev,
3690 		    sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
3691 		    sp->u.iocb_cmd.u.ctarg.rsp,
3692 		    sp->u.iocb_cmd.u.ctarg.rsp_dma);
3693 		sp->u.iocb_cmd.u.ctarg.rsp = NULL;
3694 	}
3695 	/* ref: INIT */
3696 	kref_put(&sp->cmd_kref, qla2x00_sp_release);
3697 
3698 	spin_lock_irqsave(&vha->work_lock, flags);
3699 	vha->scan.scan_flags &= ~SF_SCANNING;
3700 	if (vha->scan.scan_flags == 0) {
3701 		ql_dbg(ql_dbg_disc, vha, 0xffff,
3702 		    "%s: schedule\n", __func__);
3703 		vha->scan.scan_flags |= SF_QUEUED;
3704 		schedule_delayed_work(&vha->scan.scan_work, 5);
3705 	}
3706 	spin_unlock_irqrestore(&vha->work_lock, flags);
3707 
3708 
3709 	return rval;
3710 } /* GNNFT */
3711 
3712 void qla24xx_async_gpnft_done(scsi_qla_host_t *vha, srb_t *sp)
3713 {
3714 	ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
3715 	    "%s enter\n", __func__);
3716 	qla24xx_async_gnnft(vha, sp, sp->gen2);
3717 }
3718 
3719 /* Get WWPN list for certain fc4_type */
3720 int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
3721 {
3722 	int rval = QLA_FUNCTION_FAILED;
3723 	struct ct_sns_req       *ct_req;
3724 	struct ct_sns_pkt *ct_sns;
3725 	u32 rspsz;
3726 	unsigned long flags;
3727 
3728 	ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
3729 	    "%s enter\n", __func__);
3730 
3731 	if (!vha->flags.online)
3732 		return rval;
3733 
3734 	spin_lock_irqsave(&vha->work_lock, flags);
3735 	if (vha->scan.scan_flags & SF_SCANNING) {
3736 		spin_unlock_irqrestore(&vha->work_lock, flags);
3737 		ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
3738 		    "%s: scan active\n", __func__);
3739 		return rval;
3740 	}
3741 	vha->scan.scan_flags |= SF_SCANNING;
3742 	spin_unlock_irqrestore(&vha->work_lock, flags);
3743 
3744 	if (fc4_type == FC4_TYPE_FCP_SCSI) {
3745 		ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
3746 		    "%s: Performing FCP Scan\n", __func__);
3747 
3748 		if (sp) {
3749 			/* ref: INIT */
3750 			kref_put(&sp->cmd_kref, qla2x00_sp_release);
3751 		}
3752 
3753 		/* ref: INIT */
3754 		sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
3755 		if (!sp) {
3756 			spin_lock_irqsave(&vha->work_lock, flags);
3757 			vha->scan.scan_flags &= ~SF_SCANNING;
3758 			spin_unlock_irqrestore(&vha->work_lock, flags);
3759 			return rval;
3760 		}
3761 
3762 		sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
3763 								sizeof(struct ct_sns_pkt),
3764 								&sp->u.iocb_cmd.u.ctarg.req_dma,
3765 								GFP_KERNEL);
3766 		sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
3767 		if (!sp->u.iocb_cmd.u.ctarg.req) {
3768 			ql_log(ql_log_warn, vha, 0xffff,
3769 			    "Failed to allocate ct_sns request.\n");
3770 			spin_lock_irqsave(&vha->work_lock, flags);
3771 			vha->scan.scan_flags &= ~SF_SCANNING;
3772 			spin_unlock_irqrestore(&vha->work_lock, flags);
3773 			qla2x00_rel_sp(sp);
3774 			return rval;
3775 		}
3776 		sp->u.iocb_cmd.u.ctarg.req_size = GPN_FT_REQ_SIZE;
3777 
3778 		rspsz = sizeof(struct ct_sns_gpnft_rsp) +
3779 			vha->hw->max_fibre_devices *
3780 			    sizeof(struct ct_sns_gpn_ft_data);
3781 
3782 		sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
3783 								rspsz,
3784 								&sp->u.iocb_cmd.u.ctarg.rsp_dma,
3785 								GFP_KERNEL);
3786 		sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = rspsz;
3787 		if (!sp->u.iocb_cmd.u.ctarg.rsp) {
3788 			ql_log(ql_log_warn, vha, 0xffff,
3789 			    "Failed to allocate ct_sns request.\n");
3790 			spin_lock_irqsave(&vha->work_lock, flags);
3791 			vha->scan.scan_flags &= ~SF_SCANNING;
3792 			spin_unlock_irqrestore(&vha->work_lock, flags);
3793 			dma_free_coherent(&vha->hw->pdev->dev,
3794 			    sp->u.iocb_cmd.u.ctarg.req_allocated_size,
3795 			    sp->u.iocb_cmd.u.ctarg.req,
3796 			    sp->u.iocb_cmd.u.ctarg.req_dma);
3797 			sp->u.iocb_cmd.u.ctarg.req = NULL;
3798 			/* ref: INIT */
3799 			qla2x00_rel_sp(sp);
3800 			return rval;
3801 		}
3802 		sp->u.iocb_cmd.u.ctarg.rsp_size = rspsz;
3803 
3804 		ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
3805 		    "%s scan list size %d\n", __func__, vha->scan.size);
3806 
3807 		memset(vha->scan.l, 0, vha->scan.size);
3808 	} else if (!sp) {
3809 		ql_dbg(ql_dbg_disc, vha, 0xffff,
3810 		    "NVME scan did not provide SP\n");
3811 		return rval;
3812 	}
3813 
3814 	sp->type = SRB_CT_PTHRU_CMD;
3815 	sp->name = "gpnft";
3816 	sp->gen1 = vha->hw->base_qpair->chip_reset;
3817 	sp->gen2 = fc4_type;
3818 	qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
3819 			      qla2x00_async_gpnft_gnnft_sp_done);
3820 
3821 	rspsz = sp->u.iocb_cmd.u.ctarg.rsp_size;
3822 	memset(sp->u.iocb_cmd.u.ctarg.rsp, 0, sp->u.iocb_cmd.u.ctarg.rsp_size);
3823 	memset(sp->u.iocb_cmd.u.ctarg.req, 0, sp->u.iocb_cmd.u.ctarg.req_size);
3824 
3825 	ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
3826 	/* CT_IU preamble  */
3827 	ct_req = qla2x00_prep_ct_req(ct_sns, GPN_FT_CMD, rspsz);
3828 
3829 	/* GPN_FT req */
3830 	ct_req->req.gpn_ft.port_type = fc4_type;
3831 
3832 	sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
3833 
3834 	ql_dbg(ql_dbg_disc, vha, 0xffff,
3835 	    "Async-%s hdl=%x FC4Type %x.\n", sp->name,
3836 	    sp->handle, ct_req->req.gpn_ft.port_type);
3837 
3838 	rval = qla2x00_start_sp(sp);
3839 	if (rval != QLA_SUCCESS) {
3840 		goto done_free_sp;
3841 	}
3842 
3843 	return rval;
3844 
3845 done_free_sp:
3846 	if (sp->u.iocb_cmd.u.ctarg.req) {
3847 		dma_free_coherent(&vha->hw->pdev->dev,
3848 		    sp->u.iocb_cmd.u.ctarg.req_allocated_size,
3849 		    sp->u.iocb_cmd.u.ctarg.req,
3850 		    sp->u.iocb_cmd.u.ctarg.req_dma);
3851 		sp->u.iocb_cmd.u.ctarg.req = NULL;
3852 	}
3853 	if (sp->u.iocb_cmd.u.ctarg.rsp) {
3854 		dma_free_coherent(&vha->hw->pdev->dev,
3855 		    sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
3856 		    sp->u.iocb_cmd.u.ctarg.rsp,
3857 		    sp->u.iocb_cmd.u.ctarg.rsp_dma);
3858 		sp->u.iocb_cmd.u.ctarg.rsp = NULL;
3859 	}
3860 
3861 	/* ref: INIT */
3862 	kref_put(&sp->cmd_kref, qla2x00_sp_release);
3863 
3864 	spin_lock_irqsave(&vha->work_lock, flags);
3865 	vha->scan.scan_flags &= ~SF_SCANNING;
3866 	if (vha->scan.scan_flags == 0) {
3867 		ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
3868 		    "%s: Scan scheduled.\n", __func__);
3869 		vha->scan.scan_flags |= SF_QUEUED;
3870 		schedule_delayed_work(&vha->scan.scan_work, 5);
3871 	}
3872 	spin_unlock_irqrestore(&vha->work_lock, flags);
3873 
3874 
3875 	return rval;
3876 }
3877 
3878 void qla_scan_work_fn(struct work_struct *work)
3879 {
3880 	struct fab_scan *s = container_of(to_delayed_work(work),
3881 	    struct fab_scan, scan_work);
3882 	struct scsi_qla_host *vha = container_of(s, struct scsi_qla_host,
3883 	    scan);
3884 	unsigned long flags;
3885 
3886 	ql_dbg(ql_dbg_disc, vha, 0xffff,
3887 	    "%s: schedule loop resync\n", __func__);
3888 	set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3889 	set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3890 	qla2xxx_wake_dpc(vha);
3891 	spin_lock_irqsave(&vha->work_lock, flags);
3892 	vha->scan.scan_flags &= ~SF_QUEUED;
3893 	spin_unlock_irqrestore(&vha->work_lock, flags);
3894 }
3895 
3896 /* GPFN_ID */
3897 void qla24xx_handle_gfpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
3898 {
3899 	fc_port_t *fcport = ea->fcport;
3900 
3901 	ql_dbg(ql_dbg_disc, vha, 0xffff,
3902 	    "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d fcpcnt %d\n",
3903 	    __func__, fcport->port_name, fcport->disc_state,
3904 	    fcport->fw_login_state, ea->rc, fcport->login_gen, ea->sp->gen2,
3905 	    fcport->rscn_gen, ea->sp->gen1, vha->fcport_count);
3906 
3907 	if (fcport->disc_state == DSC_DELETE_PEND)
3908 		return;
3909 
3910 	if (ea->sp->gen2 != fcport->login_gen) {
3911 		/* target side must have changed it. */
3912 		ql_dbg(ql_dbg_disc, vha, 0x20d3,
3913 		    "%s %8phC generation changed\n",
3914 		    __func__, fcport->port_name);
3915 		return;
3916 	} else if (ea->sp->gen1 != fcport->rscn_gen) {
3917 		return;
3918 	}
3919 
3920 	qla24xx_post_gpsc_work(vha, fcport);
3921 }
3922 
3923 static void qla2x00_async_gfpnid_sp_done(srb_t *sp, int res)
3924 {
3925 	struct scsi_qla_host *vha = sp->vha;
3926 	fc_port_t *fcport = sp->fcport;
3927 	u8 *fpn = fcport->ct_desc.ct_sns->p.rsp.rsp.gfpn_id.port_name;
3928 	struct event_arg ea;
3929 	u64 wwn;
3930 
3931 	wwn = wwn_to_u64(fpn);
3932 	if (wwn)
3933 		memcpy(fcport->fabric_port_name, fpn, WWN_SIZE);
3934 
3935 	memset(&ea, 0, sizeof(ea));
3936 	ea.fcport = fcport;
3937 	ea.sp = sp;
3938 	ea.rc = res;
3939 
3940 	ql_dbg(ql_dbg_disc, vha, 0x204f,
3941 	    "Async done-%s res %x, WWPN %8phC %8phC\n",
3942 	    sp->name, res, fcport->port_name, fcport->fabric_port_name);
3943 
3944 	qla24xx_handle_gfpnid_event(vha, &ea);
3945 
3946 	/* ref: INIT */
3947 	kref_put(&sp->cmd_kref, qla2x00_sp_release);
3948 }
3949 
3950 int qla24xx_async_gfpnid(scsi_qla_host_t *vha, fc_port_t *fcport)
3951 {
3952 	int rval = QLA_FUNCTION_FAILED;
3953 	struct ct_sns_req       *ct_req;
3954 	srb_t *sp;
3955 
3956 	if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
3957 		return rval;
3958 
3959 	/* ref: INIT */
3960 	sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
3961 	if (!sp)
3962 		goto done;
3963 
3964 	sp->type = SRB_CT_PTHRU_CMD;
3965 	sp->name = "gfpnid";
3966 	sp->gen1 = fcport->rscn_gen;
3967 	sp->gen2 = fcport->login_gen;
3968 	qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
3969 			      qla2x00_async_gfpnid_sp_done);
3970 
3971 	/* CT_IU preamble  */
3972 	ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GFPN_ID_CMD,
3973 	    GFPN_ID_RSP_SIZE);
3974 
3975 	/* GFPN_ID req */
3976 	ct_req->req.port_id.port_id = port_id_to_be_id(fcport->d_id);
3977 
3978 
3979 	/* req & rsp use the same buffer */
3980 	sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
3981 	sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
3982 	sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
3983 	sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
3984 	sp->u.iocb_cmd.u.ctarg.req_size = GFPN_ID_REQ_SIZE;
3985 	sp->u.iocb_cmd.u.ctarg.rsp_size = GFPN_ID_RSP_SIZE;
3986 	sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
3987 
3988 	ql_dbg(ql_dbg_disc, vha, 0xffff,
3989 	    "Async-%s - %8phC hdl=%x loopid=%x portid %06x.\n",
3990 	    sp->name, fcport->port_name,
3991 	    sp->handle, fcport->loop_id, fcport->d_id.b24);
3992 
3993 	rval = qla2x00_start_sp(sp);
3994 	if (rval != QLA_SUCCESS)
3995 		goto done_free_sp;
3996 
3997 	return rval;
3998 
3999 done_free_sp:
4000 	/* ref: INIT */
4001 	kref_put(&sp->cmd_kref, qla2x00_sp_release);
4002 done:
4003 	return rval;
4004 }
4005 
4006 int qla24xx_post_gfpnid_work(struct scsi_qla_host *vha, fc_port_t *fcport)
4007 {
4008 	struct qla_work_evt *e;
4009 	int ls;
4010 
4011 	ls = atomic_read(&vha->loop_state);
4012 	if (((ls != LOOP_READY) && (ls != LOOP_UP)) ||
4013 		test_bit(UNLOADING, &vha->dpc_flags))
4014 		return 0;
4015 
4016 	e = qla2x00_alloc_work(vha, QLA_EVT_GFPNID);
4017 	if (!e)
4018 		return QLA_FUNCTION_FAILED;
4019 
4020 	e->u.fcport.fcport = fcport;
4021 	return qla2x00_post_work(vha, e);
4022 }
4023