xref: /openbmc/linux/drivers/scsi/bfa/bfad_bsg.c (revision e2c75e76)
1 /*
2  * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
3  * Copyright (c) 2014- QLogic Corporation.
4  * All rights reserved
5  * www.qlogic.com
6  *
7  * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
8  *
9  * This program is free software; you can redistribute it and/or modify it
10  * under the terms of the GNU General Public License (GPL) Version 2 as
11  * published by the Free Software Foundation
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  */
18 
19 #include <linux/uaccess.h>
20 #include "bfad_drv.h"
21 #include "bfad_im.h"
22 #include "bfad_bsg.h"
23 
24 BFA_TRC_FILE(LDRV, BSG);
25 
26 int
27 bfad_iocmd_ioc_enable(struct bfad_s *bfad, void *cmd)
28 {
29 	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
30 	unsigned long	flags;
31 
32 	spin_lock_irqsave(&bfad->bfad_lock, flags);
33 	/* If IOC is not in disabled state - return */
34 	if (!bfa_ioc_is_disabled(&bfad->bfa.ioc)) {
35 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
36 		iocmd->status = BFA_STATUS_OK;
37 		return 0;
38 	}
39 
40 	init_completion(&bfad->enable_comp);
41 	bfa_iocfc_enable(&bfad->bfa);
42 	iocmd->status = BFA_STATUS_OK;
43 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
44 	wait_for_completion(&bfad->enable_comp);
45 
46 	return 0;
47 }
48 
49 int
50 bfad_iocmd_ioc_disable(struct bfad_s *bfad, void *cmd)
51 {
52 	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
53 	unsigned long	flags;
54 
55 	spin_lock_irqsave(&bfad->bfad_lock, flags);
56 	if (bfa_ioc_is_disabled(&bfad->bfa.ioc)) {
57 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
58 		iocmd->status = BFA_STATUS_OK;
59 		return 0;
60 	}
61 
62 	if (bfad->disable_active) {
63 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
64 		return -EBUSY;
65 	}
66 
67 	bfad->disable_active = BFA_TRUE;
68 	init_completion(&bfad->disable_comp);
69 	bfa_iocfc_disable(&bfad->bfa);
70 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
71 
72 	wait_for_completion(&bfad->disable_comp);
73 	bfad->disable_active = BFA_FALSE;
74 	iocmd->status = BFA_STATUS_OK;
75 
76 	return 0;
77 }
78 
79 static int
80 bfad_iocmd_ioc_get_info(struct bfad_s *bfad, void *cmd)
81 {
82 	int	i;
83 	struct bfa_bsg_ioc_info_s *iocmd = (struct bfa_bsg_ioc_info_s *)cmd;
84 	struct bfad_im_port_s	*im_port;
85 	struct bfa_port_attr_s	pattr;
86 	unsigned long	flags;
87 
88 	spin_lock_irqsave(&bfad->bfad_lock, flags);
89 	bfa_fcport_get_attr(&bfad->bfa, &pattr);
90 	iocmd->nwwn = pattr.nwwn;
91 	iocmd->pwwn = pattr.pwwn;
92 	iocmd->ioc_type = bfa_get_type(&bfad->bfa);
93 	iocmd->mac = bfa_get_mac(&bfad->bfa);
94 	iocmd->factory_mac = bfa_get_mfg_mac(&bfad->bfa);
95 	bfa_get_adapter_serial_num(&bfad->bfa, iocmd->serialnum);
96 	iocmd->factorynwwn = pattr.factorynwwn;
97 	iocmd->factorypwwn = pattr.factorypwwn;
98 	iocmd->bfad_num = bfad->inst_no;
99 	im_port = bfad->pport.im_port;
100 	iocmd->host = im_port->shost->host_no;
101 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
102 
103 	strcpy(iocmd->name, bfad->adapter_name);
104 	strcpy(iocmd->port_name, bfad->port_name);
105 	strcpy(iocmd->hwpath, bfad->pci_name);
106 
107 	/* set adapter hw path */
108 	strcpy(iocmd->adapter_hwpath, bfad->pci_name);
109 	for (i = 0; iocmd->adapter_hwpath[i] != ':' && i < BFA_STRING_32; i++)
110 		;
111 	for (; iocmd->adapter_hwpath[++i] != ':' && i < BFA_STRING_32; )
112 		;
113 	iocmd->adapter_hwpath[i] = '\0';
114 	iocmd->status = BFA_STATUS_OK;
115 	return 0;
116 }
117 
118 static int
119 bfad_iocmd_ioc_get_attr(struct bfad_s *bfad, void *cmd)
120 {
121 	struct bfa_bsg_ioc_attr_s *iocmd = (struct bfa_bsg_ioc_attr_s *)cmd;
122 	unsigned long	flags;
123 
124 	spin_lock_irqsave(&bfad->bfad_lock, flags);
125 	bfa_ioc_get_attr(&bfad->bfa.ioc, &iocmd->ioc_attr);
126 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
127 
128 	/* fill in driver attr info */
129 	strcpy(iocmd->ioc_attr.driver_attr.driver, BFAD_DRIVER_NAME);
130 	strlcpy(iocmd->ioc_attr.driver_attr.driver_ver,
131 		BFAD_DRIVER_VERSION, BFA_VERSION_LEN);
132 	strcpy(iocmd->ioc_attr.driver_attr.fw_ver,
133 		iocmd->ioc_attr.adapter_attr.fw_ver);
134 	strcpy(iocmd->ioc_attr.driver_attr.bios_ver,
135 		iocmd->ioc_attr.adapter_attr.optrom_ver);
136 
137 	/* copy chip rev info first otherwise it will be overwritten */
138 	memcpy(bfad->pci_attr.chip_rev, iocmd->ioc_attr.pci_attr.chip_rev,
139 		sizeof(bfad->pci_attr.chip_rev));
140 	memcpy(&iocmd->ioc_attr.pci_attr, &bfad->pci_attr,
141 		sizeof(struct bfa_ioc_pci_attr_s));
142 
143 	iocmd->status = BFA_STATUS_OK;
144 	return 0;
145 }
146 
147 int
148 bfad_iocmd_ioc_get_stats(struct bfad_s *bfad, void *cmd)
149 {
150 	struct bfa_bsg_ioc_stats_s *iocmd = (struct bfa_bsg_ioc_stats_s *)cmd;
151 
152 	bfa_ioc_get_stats(&bfad->bfa, &iocmd->ioc_stats);
153 	iocmd->status = BFA_STATUS_OK;
154 	return 0;
155 }
156 
157 int
158 bfad_iocmd_ioc_get_fwstats(struct bfad_s *bfad, void *cmd,
159 			unsigned int payload_len)
160 {
161 	struct bfa_bsg_ioc_fwstats_s *iocmd =
162 			(struct bfa_bsg_ioc_fwstats_s *)cmd;
163 	void	*iocmd_bufptr;
164 	unsigned long	flags;
165 
166 	if (bfad_chk_iocmd_sz(payload_len,
167 			sizeof(struct bfa_bsg_ioc_fwstats_s),
168 			sizeof(struct bfa_fw_stats_s)) != BFA_STATUS_OK) {
169 		iocmd->status = BFA_STATUS_VERSION_FAIL;
170 		goto out;
171 	}
172 
173 	iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_ioc_fwstats_s);
174 	spin_lock_irqsave(&bfad->bfad_lock, flags);
175 	iocmd->status = bfa_ioc_fw_stats_get(&bfad->bfa.ioc, iocmd_bufptr);
176 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
177 
178 	if (iocmd->status != BFA_STATUS_OK) {
179 		bfa_trc(bfad, iocmd->status);
180 		goto out;
181 	}
182 out:
183 	bfa_trc(bfad, 0x6666);
184 	return 0;
185 }
186 
187 int
188 bfad_iocmd_ioc_reset_stats(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
189 {
190 	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
191 	unsigned long	flags;
192 
193 	if (v_cmd == IOCMD_IOC_RESET_STATS) {
194 		bfa_ioc_clear_stats(&bfad->bfa);
195 		iocmd->status = BFA_STATUS_OK;
196 	} else if (v_cmd == IOCMD_IOC_RESET_FWSTATS) {
197 		spin_lock_irqsave(&bfad->bfad_lock, flags);
198 		iocmd->status = bfa_ioc_fw_stats_clear(&bfad->bfa.ioc);
199 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
200 	}
201 
202 	return 0;
203 }
204 
205 int
206 bfad_iocmd_ioc_set_name(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
207 {
208 	struct bfa_bsg_ioc_name_s *iocmd = (struct bfa_bsg_ioc_name_s *) cmd;
209 
210 	if (v_cmd == IOCMD_IOC_SET_ADAPTER_NAME)
211 		strcpy(bfad->adapter_name, iocmd->name);
212 	else if (v_cmd == IOCMD_IOC_SET_PORT_NAME)
213 		strcpy(bfad->port_name, iocmd->name);
214 
215 	iocmd->status = BFA_STATUS_OK;
216 	return 0;
217 }
218 
219 int
220 bfad_iocmd_iocfc_get_attr(struct bfad_s *bfad, void *cmd)
221 {
222 	struct bfa_bsg_iocfc_attr_s *iocmd = (struct bfa_bsg_iocfc_attr_s *)cmd;
223 
224 	iocmd->status = BFA_STATUS_OK;
225 	bfa_iocfc_get_attr(&bfad->bfa, &iocmd->iocfc_attr);
226 
227 	return 0;
228 }
229 
230 int
231 bfad_iocmd_ioc_fw_sig_inv(struct bfad_s *bfad, void *cmd)
232 {
233 	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
234 	unsigned long flags;
235 
236 	spin_lock_irqsave(&bfad->bfad_lock, flags);
237 	iocmd->status = bfa_ioc_fwsig_invalidate(&bfad->bfa.ioc);
238 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
239 	return 0;
240 }
241 
242 int
243 bfad_iocmd_iocfc_set_intr(struct bfad_s *bfad, void *cmd)
244 {
245 	struct bfa_bsg_iocfc_intr_s *iocmd = (struct bfa_bsg_iocfc_intr_s *)cmd;
246 	unsigned long	flags;
247 
248 	spin_lock_irqsave(&bfad->bfad_lock, flags);
249 	iocmd->status = bfa_iocfc_israttr_set(&bfad->bfa, &iocmd->attr);
250 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
251 
252 	return 0;
253 }
254 
255 int
256 bfad_iocmd_port_enable(struct bfad_s *bfad, void *cmd)
257 {
258 	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
259 	struct bfad_hal_comp fcomp;
260 	unsigned long flags;
261 
262 	init_completion(&fcomp.comp);
263 	spin_lock_irqsave(&bfad->bfad_lock, flags);
264 	iocmd->status = bfa_port_enable(&bfad->bfa.modules.port,
265 					bfad_hcb_comp, &fcomp);
266 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
267 	if (iocmd->status != BFA_STATUS_OK) {
268 		bfa_trc(bfad, iocmd->status);
269 		return 0;
270 	}
271 	wait_for_completion(&fcomp.comp);
272 	iocmd->status = fcomp.status;
273 	return 0;
274 }
275 
276 int
277 bfad_iocmd_port_disable(struct bfad_s *bfad, void *cmd)
278 {
279 	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
280 	struct bfad_hal_comp fcomp;
281 	unsigned long flags;
282 
283 	init_completion(&fcomp.comp);
284 	spin_lock_irqsave(&bfad->bfad_lock, flags);
285 	iocmd->status = bfa_port_disable(&bfad->bfa.modules.port,
286 				bfad_hcb_comp, &fcomp);
287 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
288 
289 	if (iocmd->status != BFA_STATUS_OK) {
290 		bfa_trc(bfad, iocmd->status);
291 		return 0;
292 	}
293 	wait_for_completion(&fcomp.comp);
294 	iocmd->status = fcomp.status;
295 	return 0;
296 }
297 
298 static int
299 bfad_iocmd_port_get_attr(struct bfad_s *bfad, void *cmd)
300 {
301 	struct bfa_bsg_port_attr_s *iocmd = (struct bfa_bsg_port_attr_s *)cmd;
302 	struct bfa_lport_attr_s	port_attr;
303 	unsigned long	flags;
304 
305 	spin_lock_irqsave(&bfad->bfad_lock, flags);
306 	bfa_fcport_get_attr(&bfad->bfa, &iocmd->attr);
307 	bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr);
308 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
309 
310 	if (iocmd->attr.topology != BFA_PORT_TOPOLOGY_NONE)
311 		iocmd->attr.pid = port_attr.pid;
312 	else
313 		iocmd->attr.pid = 0;
314 
315 	iocmd->attr.port_type = port_attr.port_type;
316 	iocmd->attr.loopback = port_attr.loopback;
317 	iocmd->attr.authfail = port_attr.authfail;
318 	strlcpy(iocmd->attr.port_symname.symname,
319 		port_attr.port_cfg.sym_name.symname,
320 		sizeof(iocmd->attr.port_symname.symname));
321 
322 	iocmd->status = BFA_STATUS_OK;
323 	return 0;
324 }
325 
326 int
327 bfad_iocmd_port_get_stats(struct bfad_s *bfad, void *cmd,
328 			unsigned int payload_len)
329 {
330 	struct bfa_bsg_port_stats_s *iocmd = (struct bfa_bsg_port_stats_s *)cmd;
331 	struct bfad_hal_comp fcomp;
332 	void	*iocmd_bufptr;
333 	unsigned long	flags;
334 
335 	if (bfad_chk_iocmd_sz(payload_len,
336 			sizeof(struct bfa_bsg_port_stats_s),
337 			sizeof(union bfa_port_stats_u)) != BFA_STATUS_OK) {
338 		iocmd->status = BFA_STATUS_VERSION_FAIL;
339 		return 0;
340 	}
341 
342 	iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_port_stats_s);
343 
344 	init_completion(&fcomp.comp);
345 	spin_lock_irqsave(&bfad->bfad_lock, flags);
346 	iocmd->status = bfa_port_get_stats(&bfad->bfa.modules.port,
347 				iocmd_bufptr, bfad_hcb_comp, &fcomp);
348 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
349 	if (iocmd->status != BFA_STATUS_OK) {
350 		bfa_trc(bfad, iocmd->status);
351 		goto out;
352 	}
353 
354 	wait_for_completion(&fcomp.comp);
355 	iocmd->status = fcomp.status;
356 out:
357 	return 0;
358 }
359 
360 int
361 bfad_iocmd_port_reset_stats(struct bfad_s *bfad, void *cmd)
362 {
363 	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
364 	struct bfad_hal_comp fcomp;
365 	unsigned long	flags;
366 
367 	init_completion(&fcomp.comp);
368 	spin_lock_irqsave(&bfad->bfad_lock, flags);
369 	iocmd->status = bfa_port_clear_stats(&bfad->bfa.modules.port,
370 					bfad_hcb_comp, &fcomp);
371 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
372 	if (iocmd->status != BFA_STATUS_OK) {
373 		bfa_trc(bfad, iocmd->status);
374 		return 0;
375 	}
376 	wait_for_completion(&fcomp.comp);
377 	iocmd->status = fcomp.status;
378 	return 0;
379 }
380 
381 int
382 bfad_iocmd_set_port_cfg(struct bfad_s *bfad, void *iocmd, unsigned int v_cmd)
383 {
384 	struct bfa_bsg_port_cfg_s *cmd = (struct bfa_bsg_port_cfg_s *)iocmd;
385 	unsigned long	flags;
386 
387 	spin_lock_irqsave(&bfad->bfad_lock, flags);
388 	if (v_cmd == IOCMD_PORT_CFG_TOPO)
389 		cmd->status = bfa_fcport_cfg_topology(&bfad->bfa, cmd->param);
390 	else if (v_cmd == IOCMD_PORT_CFG_SPEED)
391 		cmd->status = bfa_fcport_cfg_speed(&bfad->bfa, cmd->param);
392 	else if (v_cmd == IOCMD_PORT_CFG_ALPA)
393 		cmd->status = bfa_fcport_cfg_hardalpa(&bfad->bfa, cmd->param);
394 	else if (v_cmd == IOCMD_PORT_CLR_ALPA)
395 		cmd->status = bfa_fcport_clr_hardalpa(&bfad->bfa);
396 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
397 
398 	return 0;
399 }
400 
401 int
402 bfad_iocmd_port_cfg_maxfrsize(struct bfad_s *bfad, void *cmd)
403 {
404 	struct bfa_bsg_port_cfg_maxfrsize_s *iocmd =
405 				(struct bfa_bsg_port_cfg_maxfrsize_s *)cmd;
406 	unsigned long	flags;
407 
408 	spin_lock_irqsave(&bfad->bfad_lock, flags);
409 	iocmd->status = bfa_fcport_cfg_maxfrsize(&bfad->bfa, iocmd->maxfrsize);
410 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
411 
412 	return 0;
413 }
414 
415 int
416 bfad_iocmd_port_cfg_bbcr(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
417 {
418 	struct bfa_bsg_bbcr_enable_s *iocmd =
419 			(struct bfa_bsg_bbcr_enable_s *)pcmd;
420 	unsigned long flags;
421 	int rc;
422 
423 	spin_lock_irqsave(&bfad->bfad_lock, flags);
424 	if (cmd == IOCMD_PORT_BBCR_ENABLE)
425 		rc = bfa_fcport_cfg_bbcr(&bfad->bfa, BFA_TRUE, iocmd->bb_scn);
426 	else if (cmd == IOCMD_PORT_BBCR_DISABLE)
427 		rc = bfa_fcport_cfg_bbcr(&bfad->bfa, BFA_FALSE, 0);
428 	else {
429 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
430 		return -EINVAL;
431 	}
432 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
433 
434 	iocmd->status = rc;
435 	return 0;
436 }
437 
438 int
439 bfad_iocmd_port_get_bbcr_attr(struct bfad_s *bfad, void *pcmd)
440 {
441 	struct bfa_bsg_bbcr_attr_s *iocmd = (struct bfa_bsg_bbcr_attr_s *) pcmd;
442 	unsigned long flags;
443 
444 	spin_lock_irqsave(&bfad->bfad_lock, flags);
445 	iocmd->status =
446 		bfa_fcport_get_bbcr_attr(&bfad->bfa, &iocmd->attr);
447 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
448 
449 	return 0;
450 }
451 
452 
453 static int
454 bfad_iocmd_lport_get_attr(struct bfad_s *bfad, void *cmd)
455 {
456 	struct bfa_fcs_lport_s	*fcs_port;
457 	struct bfa_bsg_lport_attr_s *iocmd = (struct bfa_bsg_lport_attr_s *)cmd;
458 	unsigned long	flags;
459 
460 	spin_lock_irqsave(&bfad->bfad_lock, flags);
461 	fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
462 				iocmd->vf_id, iocmd->pwwn);
463 	if (fcs_port == NULL) {
464 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
465 		iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
466 		goto out;
467 	}
468 
469 	bfa_fcs_lport_get_attr(fcs_port, &iocmd->port_attr);
470 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
471 	iocmd->status = BFA_STATUS_OK;
472 out:
473 	return 0;
474 }
475 
476 int
477 bfad_iocmd_lport_get_stats(struct bfad_s *bfad, void *cmd)
478 {
479 	struct bfa_fcs_lport_s *fcs_port;
480 	struct bfa_bsg_lport_stats_s *iocmd =
481 			(struct bfa_bsg_lport_stats_s *)cmd;
482 	unsigned long	flags;
483 
484 	spin_lock_irqsave(&bfad->bfad_lock, flags);
485 	fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
486 				iocmd->vf_id, iocmd->pwwn);
487 	if (fcs_port == NULL) {
488 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
489 		iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
490 		goto out;
491 	}
492 
493 	bfa_fcs_lport_get_stats(fcs_port, &iocmd->port_stats);
494 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
495 	iocmd->status = BFA_STATUS_OK;
496 out:
497 	return 0;
498 }
499 
500 int
501 bfad_iocmd_lport_reset_stats(struct bfad_s *bfad, void *cmd)
502 {
503 	struct bfa_fcs_lport_s *fcs_port;
504 	struct bfa_bsg_reset_stats_s *iocmd =
505 			(struct bfa_bsg_reset_stats_s *)cmd;
506 	struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa);
507 	struct list_head *qe, *qen;
508 	struct bfa_itnim_s *itnim;
509 	unsigned long	flags;
510 
511 	spin_lock_irqsave(&bfad->bfad_lock, flags);
512 	fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
513 				iocmd->vf_id, iocmd->vpwwn);
514 	if (fcs_port == NULL) {
515 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
516 		iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
517 		goto out;
518 	}
519 
520 	bfa_fcs_lport_clear_stats(fcs_port);
521 	/* clear IO stats from all active itnims */
522 	list_for_each_safe(qe, qen, &fcpim->itnim_q) {
523 		itnim = (struct bfa_itnim_s *) qe;
524 		if (itnim->rport->rport_info.lp_tag != fcs_port->lp_tag)
525 			continue;
526 		bfa_itnim_clear_stats(itnim);
527 	}
528 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
529 	iocmd->status = BFA_STATUS_OK;
530 out:
531 	return 0;
532 }
533 
534 int
535 bfad_iocmd_lport_get_iostats(struct bfad_s *bfad, void *cmd)
536 {
537 	struct bfa_fcs_lport_s *fcs_port;
538 	struct bfa_bsg_lport_iostats_s *iocmd =
539 			(struct bfa_bsg_lport_iostats_s *)cmd;
540 	unsigned long	flags;
541 
542 	spin_lock_irqsave(&bfad->bfad_lock, flags);
543 	fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
544 				iocmd->vf_id, iocmd->pwwn);
545 	if (fcs_port == NULL) {
546 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
547 		iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
548 		goto out;
549 	}
550 
551 	bfa_fcpim_port_iostats(&bfad->bfa, &iocmd->iostats,
552 			fcs_port->lp_tag);
553 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
554 	iocmd->status = BFA_STATUS_OK;
555 out:
556 	return 0;
557 }
558 
559 int
560 bfad_iocmd_lport_get_rports(struct bfad_s *bfad, void *cmd,
561 			unsigned int payload_len)
562 {
563 	struct bfa_bsg_lport_get_rports_s *iocmd =
564 			(struct bfa_bsg_lport_get_rports_s *)cmd;
565 	struct bfa_fcs_lport_s *fcs_port;
566 	unsigned long	flags;
567 	void	*iocmd_bufptr;
568 
569 	if (iocmd->nrports == 0)
570 		return -EINVAL;
571 
572 	if (bfad_chk_iocmd_sz(payload_len,
573 			sizeof(struct bfa_bsg_lport_get_rports_s),
574 			sizeof(struct bfa_rport_qualifier_s) * iocmd->nrports)
575 			!= BFA_STATUS_OK) {
576 		iocmd->status = BFA_STATUS_VERSION_FAIL;
577 		return 0;
578 	}
579 
580 	iocmd_bufptr = (char *)iocmd +
581 			sizeof(struct bfa_bsg_lport_get_rports_s);
582 	spin_lock_irqsave(&bfad->bfad_lock, flags);
583 	fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
584 				iocmd->vf_id, iocmd->pwwn);
585 	if (fcs_port == NULL) {
586 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
587 		bfa_trc(bfad, 0);
588 		iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
589 		goto out;
590 	}
591 
592 	bfa_fcs_lport_get_rport_quals(fcs_port,
593 			(struct bfa_rport_qualifier_s *)iocmd_bufptr,
594 			&iocmd->nrports);
595 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
596 	iocmd->status = BFA_STATUS_OK;
597 out:
598 	return 0;
599 }
600 
601 int
602 bfad_iocmd_rport_get_attr(struct bfad_s *bfad, void *cmd)
603 {
604 	struct bfa_bsg_rport_attr_s *iocmd = (struct bfa_bsg_rport_attr_s *)cmd;
605 	struct bfa_fcs_lport_s *fcs_port;
606 	struct bfa_fcs_rport_s *fcs_rport;
607 	unsigned long	flags;
608 
609 	spin_lock_irqsave(&bfad->bfad_lock, flags);
610 	fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
611 				iocmd->vf_id, iocmd->pwwn);
612 	if (fcs_port == NULL) {
613 		bfa_trc(bfad, 0);
614 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
615 		iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
616 		goto out;
617 	}
618 
619 	if (iocmd->pid)
620 		fcs_rport = bfa_fcs_lport_get_rport_by_qualifier(fcs_port,
621 						iocmd->rpwwn, iocmd->pid);
622 	else
623 		fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
624 	if (fcs_rport == NULL) {
625 		bfa_trc(bfad, 0);
626 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
627 		iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
628 		goto out;
629 	}
630 
631 	bfa_fcs_rport_get_attr(fcs_rport, &iocmd->attr);
632 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
633 	iocmd->status = BFA_STATUS_OK;
634 out:
635 	return 0;
636 }
637 
638 static int
639 bfad_iocmd_rport_get_addr(struct bfad_s *bfad, void *cmd)
640 {
641 	struct bfa_bsg_rport_scsi_addr_s *iocmd =
642 			(struct bfa_bsg_rport_scsi_addr_s *)cmd;
643 	struct bfa_fcs_lport_s	*fcs_port;
644 	struct bfa_fcs_itnim_s	*fcs_itnim;
645 	struct bfad_itnim_s	*drv_itnim;
646 	unsigned long	flags;
647 
648 	spin_lock_irqsave(&bfad->bfad_lock, flags);
649 	fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
650 				iocmd->vf_id, iocmd->pwwn);
651 	if (fcs_port == NULL) {
652 		bfa_trc(bfad, 0);
653 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
654 		iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
655 		goto out;
656 	}
657 
658 	fcs_itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
659 	if (fcs_itnim == NULL) {
660 		bfa_trc(bfad, 0);
661 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
662 		iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
663 		goto out;
664 	}
665 
666 	drv_itnim = fcs_itnim->itnim_drv;
667 
668 	if (drv_itnim && drv_itnim->im_port)
669 		iocmd->host = drv_itnim->im_port->shost->host_no;
670 	else {
671 		bfa_trc(bfad, 0);
672 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
673 		iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
674 		goto out;
675 	}
676 
677 	iocmd->target = drv_itnim->scsi_tgt_id;
678 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
679 
680 	iocmd->bus = 0;
681 	iocmd->lun = 0;
682 	iocmd->status = BFA_STATUS_OK;
683 out:
684 	return 0;
685 }
686 
687 int
688 bfad_iocmd_rport_get_stats(struct bfad_s *bfad, void *cmd)
689 {
690 	struct bfa_bsg_rport_stats_s *iocmd =
691 			(struct bfa_bsg_rport_stats_s *)cmd;
692 	struct bfa_fcs_lport_s *fcs_port;
693 	struct bfa_fcs_rport_s *fcs_rport;
694 	unsigned long	flags;
695 
696 	spin_lock_irqsave(&bfad->bfad_lock, flags);
697 	fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
698 				iocmd->vf_id, iocmd->pwwn);
699 	if (fcs_port == NULL) {
700 		bfa_trc(bfad, 0);
701 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
702 		iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
703 		goto out;
704 	}
705 
706 	fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
707 	if (fcs_rport == NULL) {
708 		bfa_trc(bfad, 0);
709 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
710 		iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
711 		goto out;
712 	}
713 
714 	memcpy((void *)&iocmd->stats, (void *)&fcs_rport->stats,
715 		sizeof(struct bfa_rport_stats_s));
716 	if (bfa_fcs_rport_get_halrport(fcs_rport)) {
717 		memcpy((void *)&iocmd->stats.hal_stats,
718 		       (void *)&(bfa_fcs_rport_get_halrport(fcs_rport)->stats),
719 			sizeof(struct bfa_rport_hal_stats_s));
720 	}
721 
722 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
723 	iocmd->status = BFA_STATUS_OK;
724 out:
725 	return 0;
726 }
727 
728 int
729 bfad_iocmd_rport_clr_stats(struct bfad_s *bfad, void *cmd)
730 {
731 	struct bfa_bsg_rport_reset_stats_s *iocmd =
732 				(struct bfa_bsg_rport_reset_stats_s *)cmd;
733 	struct bfa_fcs_lport_s *fcs_port;
734 	struct bfa_fcs_rport_s *fcs_rport;
735 	struct bfa_rport_s *rport;
736 	unsigned long	flags;
737 
738 	spin_lock_irqsave(&bfad->bfad_lock, flags);
739 	fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
740 				iocmd->vf_id, iocmd->pwwn);
741 	if (fcs_port == NULL) {
742 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
743 		iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
744 		goto out;
745 	}
746 
747 	fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
748 	if (fcs_rport == NULL) {
749 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
750 		iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
751 		goto out;
752 	}
753 
754 	memset((char *)&fcs_rport->stats, 0, sizeof(struct bfa_rport_stats_s));
755 	rport = bfa_fcs_rport_get_halrport(fcs_rport);
756 	if (rport)
757 		memset(&rport->stats, 0, sizeof(rport->stats));
758 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
759 	iocmd->status = BFA_STATUS_OK;
760 out:
761 	return 0;
762 }
763 
764 int
765 bfad_iocmd_rport_set_speed(struct bfad_s *bfad, void *cmd)
766 {
767 	struct bfa_bsg_rport_set_speed_s *iocmd =
768 				(struct bfa_bsg_rport_set_speed_s *)cmd;
769 	struct bfa_fcs_lport_s *fcs_port;
770 	struct bfa_fcs_rport_s *fcs_rport;
771 	unsigned long	flags;
772 
773 	spin_lock_irqsave(&bfad->bfad_lock, flags);
774 	fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
775 				iocmd->vf_id, iocmd->pwwn);
776 	if (fcs_port == NULL) {
777 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
778 		iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
779 		goto out;
780 	}
781 
782 	fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
783 	if (fcs_rport == NULL) {
784 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
785 		iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
786 		goto out;
787 	}
788 
789 	fcs_rport->rpf.assigned_speed  = iocmd->speed;
790 	/* Set this speed in f/w only if the RPSC speed is not available */
791 	if (fcs_rport->rpf.rpsc_speed == BFA_PORT_SPEED_UNKNOWN)
792 		if (fcs_rport->bfa_rport)
793 			bfa_rport_speed(fcs_rport->bfa_rport, iocmd->speed);
794 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
795 	iocmd->status = BFA_STATUS_OK;
796 out:
797 	return 0;
798 }
799 
800 int
801 bfad_iocmd_vport_get_attr(struct bfad_s *bfad, void *cmd)
802 {
803 	struct bfa_fcs_vport_s *fcs_vport;
804 	struct bfa_bsg_vport_attr_s *iocmd = (struct bfa_bsg_vport_attr_s *)cmd;
805 	unsigned long	flags;
806 
807 	spin_lock_irqsave(&bfad->bfad_lock, flags);
808 	fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs,
809 				iocmd->vf_id, iocmd->vpwwn);
810 	if (fcs_vport == NULL) {
811 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
812 		iocmd->status = BFA_STATUS_UNKNOWN_VWWN;
813 		goto out;
814 	}
815 
816 	bfa_fcs_vport_get_attr(fcs_vport, &iocmd->vport_attr);
817 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
818 	iocmd->status = BFA_STATUS_OK;
819 out:
820 	return 0;
821 }
822 
823 int
824 bfad_iocmd_vport_get_stats(struct bfad_s *bfad, void *cmd)
825 {
826 	struct bfa_fcs_vport_s *fcs_vport;
827 	struct bfa_bsg_vport_stats_s *iocmd =
828 				(struct bfa_bsg_vport_stats_s *)cmd;
829 	unsigned long	flags;
830 
831 	spin_lock_irqsave(&bfad->bfad_lock, flags);
832 	fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs,
833 				iocmd->vf_id, iocmd->vpwwn);
834 	if (fcs_vport == NULL) {
835 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
836 		iocmd->status = BFA_STATUS_UNKNOWN_VWWN;
837 		goto out;
838 	}
839 
840 	memcpy((void *)&iocmd->vport_stats, (void *)&fcs_vport->vport_stats,
841 		sizeof(struct bfa_vport_stats_s));
842 	memcpy((void *)&iocmd->vport_stats.port_stats,
843 	       (void *)&fcs_vport->lport.stats,
844 		sizeof(struct bfa_lport_stats_s));
845 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
846 	iocmd->status = BFA_STATUS_OK;
847 out:
848 	return 0;
849 }
850 
851 int
852 bfad_iocmd_vport_clr_stats(struct bfad_s *bfad, void *cmd)
853 {
854 	struct bfa_fcs_vport_s *fcs_vport;
855 	struct bfa_bsg_reset_stats_s *iocmd =
856 				(struct bfa_bsg_reset_stats_s *)cmd;
857 	unsigned long	flags;
858 
859 	spin_lock_irqsave(&bfad->bfad_lock, flags);
860 	fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs,
861 				iocmd->vf_id, iocmd->vpwwn);
862 	if (fcs_vport == NULL) {
863 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
864 		iocmd->status = BFA_STATUS_UNKNOWN_VWWN;
865 		goto out;
866 	}
867 
868 	memset(&fcs_vport->vport_stats, 0, sizeof(struct bfa_vport_stats_s));
869 	memset(&fcs_vport->lport.stats, 0, sizeof(struct bfa_lport_stats_s));
870 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
871 	iocmd->status = BFA_STATUS_OK;
872 out:
873 	return 0;
874 }
875 
876 static int
877 bfad_iocmd_fabric_get_lports(struct bfad_s *bfad, void *cmd,
878 			unsigned int payload_len)
879 {
880 	struct bfa_bsg_fabric_get_lports_s *iocmd =
881 			(struct bfa_bsg_fabric_get_lports_s *)cmd;
882 	bfa_fcs_vf_t	*fcs_vf;
883 	uint32_t	nports = iocmd->nports;
884 	unsigned long	flags;
885 	void	*iocmd_bufptr;
886 
887 	if (nports == 0) {
888 		iocmd->status = BFA_STATUS_EINVAL;
889 		goto out;
890 	}
891 
892 	if (bfad_chk_iocmd_sz(payload_len,
893 		sizeof(struct bfa_bsg_fabric_get_lports_s),
894 		sizeof(wwn_t[iocmd->nports])) != BFA_STATUS_OK) {
895 		iocmd->status = BFA_STATUS_VERSION_FAIL;
896 		goto out;
897 	}
898 
899 	iocmd_bufptr = (char *)iocmd +
900 			sizeof(struct bfa_bsg_fabric_get_lports_s);
901 
902 	spin_lock_irqsave(&bfad->bfad_lock, flags);
903 	fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id);
904 	if (fcs_vf == NULL) {
905 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
906 		iocmd->status = BFA_STATUS_UNKNOWN_VFID;
907 		goto out;
908 	}
909 	bfa_fcs_vf_get_ports(fcs_vf, (wwn_t *)iocmd_bufptr, &nports);
910 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
911 
912 	iocmd->nports = nports;
913 	iocmd->status = BFA_STATUS_OK;
914 out:
915 	return 0;
916 }
917 
918 int
919 bfad_iocmd_qos_set_bw(struct bfad_s *bfad, void *pcmd)
920 {
921 	struct bfa_bsg_qos_bw_s *iocmd = (struct bfa_bsg_qos_bw_s *)pcmd;
922 	unsigned long	flags;
923 
924 	spin_lock_irqsave(&bfad->bfad_lock, flags);
925 	iocmd->status = bfa_fcport_set_qos_bw(&bfad->bfa, &iocmd->qos_bw);
926 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
927 
928 	return 0;
929 }
930 
931 int
932 bfad_iocmd_ratelim(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
933 {
934 	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
935 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
936 	unsigned long	flags;
937 
938 	spin_lock_irqsave(&bfad->bfad_lock, flags);
939 
940 	if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
941 		(fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
942 		iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
943 	else {
944 		if (cmd == IOCMD_RATELIM_ENABLE)
945 			fcport->cfg.ratelimit = BFA_TRUE;
946 		else if (cmd == IOCMD_RATELIM_DISABLE)
947 			fcport->cfg.ratelimit = BFA_FALSE;
948 
949 		if (fcport->cfg.trl_def_speed == BFA_PORT_SPEED_UNKNOWN)
950 			fcport->cfg.trl_def_speed = BFA_PORT_SPEED_1GBPS;
951 
952 		iocmd->status = BFA_STATUS_OK;
953 	}
954 
955 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
956 
957 	return 0;
958 }
959 
960 int
961 bfad_iocmd_ratelim_speed(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
962 {
963 	struct bfa_bsg_trl_speed_s *iocmd = (struct bfa_bsg_trl_speed_s *)pcmd;
964 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
965 	unsigned long	flags;
966 
967 	spin_lock_irqsave(&bfad->bfad_lock, flags);
968 
969 	/* Auto and speeds greater than the supported speed, are invalid */
970 	if ((iocmd->speed == BFA_PORT_SPEED_AUTO) ||
971 	    (iocmd->speed > fcport->speed_sup)) {
972 		iocmd->status = BFA_STATUS_UNSUPP_SPEED;
973 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
974 		return 0;
975 	}
976 
977 	if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
978 		(fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
979 		iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
980 	else {
981 		fcport->cfg.trl_def_speed = iocmd->speed;
982 		iocmd->status = BFA_STATUS_OK;
983 	}
984 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
985 
986 	return 0;
987 }
988 
989 int
990 bfad_iocmd_cfg_fcpim(struct bfad_s *bfad, void *cmd)
991 {
992 	struct bfa_bsg_fcpim_s *iocmd = (struct bfa_bsg_fcpim_s *)cmd;
993 	unsigned long	flags;
994 
995 	spin_lock_irqsave(&bfad->bfad_lock, flags);
996 	bfa_fcpim_path_tov_set(&bfad->bfa, iocmd->param);
997 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
998 	iocmd->status = BFA_STATUS_OK;
999 	return 0;
1000 }
1001 
1002 int
1003 bfad_iocmd_fcpim_get_modstats(struct bfad_s *bfad, void *cmd)
1004 {
1005 	struct bfa_bsg_fcpim_modstats_s *iocmd =
1006 			(struct bfa_bsg_fcpim_modstats_s *)cmd;
1007 	struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa);
1008 	struct list_head *qe, *qen;
1009 	struct bfa_itnim_s *itnim;
1010 	unsigned long	flags;
1011 
1012 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1013 	/* accumulate IO stats from itnim */
1014 	memset((void *)&iocmd->modstats, 0, sizeof(struct bfa_itnim_iostats_s));
1015 	list_for_each_safe(qe, qen, &fcpim->itnim_q) {
1016 		itnim = (struct bfa_itnim_s *) qe;
1017 		bfa_fcpim_add_stats(&iocmd->modstats, &(itnim->stats));
1018 	}
1019 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1020 	iocmd->status = BFA_STATUS_OK;
1021 	return 0;
1022 }
1023 
1024 int
1025 bfad_iocmd_fcpim_clr_modstats(struct bfad_s *bfad, void *cmd)
1026 {
1027 	struct bfa_bsg_fcpim_modstatsclr_s *iocmd =
1028 				(struct bfa_bsg_fcpim_modstatsclr_s *)cmd;
1029 	struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa);
1030 	struct list_head *qe, *qen;
1031 	struct bfa_itnim_s *itnim;
1032 	unsigned long	flags;
1033 
1034 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1035 	list_for_each_safe(qe, qen, &fcpim->itnim_q) {
1036 		itnim = (struct bfa_itnim_s *) qe;
1037 		bfa_itnim_clear_stats(itnim);
1038 	}
1039 	memset(&fcpim->del_itn_stats, 0,
1040 		sizeof(struct bfa_fcpim_del_itn_stats_s));
1041 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1042 	iocmd->status = BFA_STATUS_OK;
1043 	return 0;
1044 }
1045 
1046 int
1047 bfad_iocmd_fcpim_get_del_itn_stats(struct bfad_s *bfad, void *cmd)
1048 {
1049 	struct bfa_bsg_fcpim_del_itn_stats_s *iocmd =
1050 			(struct bfa_bsg_fcpim_del_itn_stats_s *)cmd;
1051 	struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa);
1052 	unsigned long	flags;
1053 
1054 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1055 	memcpy((void *)&iocmd->modstats, (void *)&fcpim->del_itn_stats,
1056 		sizeof(struct bfa_fcpim_del_itn_stats_s));
1057 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1058 
1059 	iocmd->status = BFA_STATUS_OK;
1060 	return 0;
1061 }
1062 
1063 static int
1064 bfad_iocmd_itnim_get_attr(struct bfad_s *bfad, void *cmd)
1065 {
1066 	struct bfa_bsg_itnim_attr_s *iocmd = (struct bfa_bsg_itnim_attr_s *)cmd;
1067 	struct bfa_fcs_lport_s	*fcs_port;
1068 	unsigned long	flags;
1069 
1070 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1071 	fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
1072 				iocmd->vf_id, iocmd->lpwwn);
1073 	if (!fcs_port)
1074 		iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
1075 	else
1076 		iocmd->status = bfa_fcs_itnim_attr_get(fcs_port,
1077 					iocmd->rpwwn, &iocmd->attr);
1078 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1079 	return 0;
1080 }
1081 
1082 static int
1083 bfad_iocmd_itnim_get_iostats(struct bfad_s *bfad, void *cmd)
1084 {
1085 	struct bfa_bsg_itnim_iostats_s *iocmd =
1086 			(struct bfa_bsg_itnim_iostats_s *)cmd;
1087 	struct bfa_fcs_lport_s *fcs_port;
1088 	struct bfa_fcs_itnim_s *itnim;
1089 	unsigned long	flags;
1090 
1091 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1092 	fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
1093 				iocmd->vf_id, iocmd->lpwwn);
1094 	if (!fcs_port) {
1095 		iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
1096 		bfa_trc(bfad, 0);
1097 	} else {
1098 		itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
1099 		if (itnim == NULL)
1100 			iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
1101 		else {
1102 			iocmd->status = BFA_STATUS_OK;
1103 			if (bfa_fcs_itnim_get_halitn(itnim))
1104 				memcpy((void *)&iocmd->iostats, (void *)
1105 				&(bfa_fcs_itnim_get_halitn(itnim)->stats),
1106 				       sizeof(struct bfa_itnim_iostats_s));
1107 		}
1108 	}
1109 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1110 	return 0;
1111 }
1112 
1113 static int
1114 bfad_iocmd_itnim_reset_stats(struct bfad_s *bfad, void *cmd)
1115 {
1116 	struct bfa_bsg_rport_reset_stats_s *iocmd =
1117 			(struct bfa_bsg_rport_reset_stats_s *)cmd;
1118 	struct bfa_fcs_lport_s	*fcs_port;
1119 	struct bfa_fcs_itnim_s	*itnim;
1120 	unsigned long	flags;
1121 
1122 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1123 	fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
1124 				iocmd->vf_id, iocmd->pwwn);
1125 	if (!fcs_port)
1126 		iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
1127 	else {
1128 		itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
1129 		if (itnim == NULL)
1130 			iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
1131 		else {
1132 			iocmd->status = BFA_STATUS_OK;
1133 			bfa_fcs_itnim_stats_clear(fcs_port, iocmd->rpwwn);
1134 			bfa_itnim_clear_stats(bfa_fcs_itnim_get_halitn(itnim));
1135 		}
1136 	}
1137 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1138 
1139 	return 0;
1140 }
1141 
1142 static int
1143 bfad_iocmd_itnim_get_itnstats(struct bfad_s *bfad, void *cmd)
1144 {
1145 	struct bfa_bsg_itnim_itnstats_s *iocmd =
1146 			(struct bfa_bsg_itnim_itnstats_s *)cmd;
1147 	struct bfa_fcs_lport_s *fcs_port;
1148 	struct bfa_fcs_itnim_s *itnim;
1149 	unsigned long	flags;
1150 
1151 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1152 	fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
1153 				iocmd->vf_id, iocmd->lpwwn);
1154 	if (!fcs_port) {
1155 		iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
1156 		bfa_trc(bfad, 0);
1157 	} else {
1158 		itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
1159 		if (itnim == NULL)
1160 			iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
1161 		else {
1162 			iocmd->status = BFA_STATUS_OK;
1163 			bfa_fcs_itnim_stats_get(fcs_port, iocmd->rpwwn,
1164 					&iocmd->itnstats);
1165 		}
1166 	}
1167 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1168 	return 0;
1169 }
1170 
1171 int
1172 bfad_iocmd_fcport_enable(struct bfad_s *bfad, void *cmd)
1173 {
1174 	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
1175 	unsigned long flags;
1176 
1177 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1178 	iocmd->status = bfa_fcport_enable(&bfad->bfa);
1179 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1180 
1181 	return 0;
1182 }
1183 
1184 int
1185 bfad_iocmd_fcport_disable(struct bfad_s *bfad, void *cmd)
1186 {
1187 	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
1188 	unsigned long flags;
1189 
1190 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1191 	iocmd->status = bfa_fcport_disable(&bfad->bfa);
1192 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1193 
1194 	return 0;
1195 }
1196 
1197 int
1198 bfad_iocmd_ioc_get_pcifn_cfg(struct bfad_s *bfad, void *cmd)
1199 {
1200 	struct bfa_bsg_pcifn_cfg_s *iocmd = (struct bfa_bsg_pcifn_cfg_s *)cmd;
1201 	struct bfad_hal_comp fcomp;
1202 	unsigned long flags;
1203 
1204 	init_completion(&fcomp.comp);
1205 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1206 	iocmd->status = bfa_ablk_query(&bfad->bfa.modules.ablk,
1207 				&iocmd->pcifn_cfg,
1208 				bfad_hcb_comp, &fcomp);
1209 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1210 	if (iocmd->status != BFA_STATUS_OK)
1211 		goto out;
1212 
1213 	wait_for_completion(&fcomp.comp);
1214 	iocmd->status = fcomp.status;
1215 out:
1216 	return 0;
1217 }
1218 
1219 int
1220 bfad_iocmd_pcifn_create(struct bfad_s *bfad, void *cmd)
1221 {
1222 	struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd;
1223 	struct bfad_hal_comp fcomp;
1224 	unsigned long flags;
1225 
1226 	init_completion(&fcomp.comp);
1227 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1228 	iocmd->status = bfa_ablk_pf_create(&bfad->bfa.modules.ablk,
1229 				&iocmd->pcifn_id, iocmd->port,
1230 				iocmd->pcifn_class, iocmd->bw_min,
1231 				iocmd->bw_max, bfad_hcb_comp, &fcomp);
1232 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1233 	if (iocmd->status != BFA_STATUS_OK)
1234 		goto out;
1235 
1236 	wait_for_completion(&fcomp.comp);
1237 	iocmd->status = fcomp.status;
1238 out:
1239 	return 0;
1240 }
1241 
1242 int
1243 bfad_iocmd_pcifn_delete(struct bfad_s *bfad, void *cmd)
1244 {
1245 	struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd;
1246 	struct bfad_hal_comp fcomp;
1247 	unsigned long flags;
1248 
1249 	init_completion(&fcomp.comp);
1250 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1251 	iocmd->status = bfa_ablk_pf_delete(&bfad->bfa.modules.ablk,
1252 				iocmd->pcifn_id,
1253 				bfad_hcb_comp, &fcomp);
1254 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1255 	if (iocmd->status != BFA_STATUS_OK)
1256 		goto out;
1257 
1258 	wait_for_completion(&fcomp.comp);
1259 	iocmd->status = fcomp.status;
1260 out:
1261 	return 0;
1262 }
1263 
1264 int
1265 bfad_iocmd_pcifn_bw(struct bfad_s *bfad, void *cmd)
1266 {
1267 	struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd;
1268 	struct bfad_hal_comp fcomp;
1269 	unsigned long flags;
1270 
1271 	init_completion(&fcomp.comp);
1272 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1273 	iocmd->status = bfa_ablk_pf_update(&bfad->bfa.modules.ablk,
1274 				iocmd->pcifn_id, iocmd->bw_min,
1275 				iocmd->bw_max, bfad_hcb_comp, &fcomp);
1276 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1277 	bfa_trc(bfad, iocmd->status);
1278 	if (iocmd->status != BFA_STATUS_OK)
1279 		goto out;
1280 
1281 	wait_for_completion(&fcomp.comp);
1282 	iocmd->status = fcomp.status;
1283 	bfa_trc(bfad, iocmd->status);
1284 out:
1285 	return 0;
1286 }
1287 
1288 int
1289 bfad_iocmd_adapter_cfg_mode(struct bfad_s *bfad, void *cmd)
1290 {
1291 	struct bfa_bsg_adapter_cfg_mode_s *iocmd =
1292 			(struct bfa_bsg_adapter_cfg_mode_s *)cmd;
1293 	struct bfad_hal_comp fcomp;
1294 	unsigned long flags = 0;
1295 
1296 	init_completion(&fcomp.comp);
1297 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1298 	iocmd->status = bfa_ablk_adapter_config(&bfad->bfa.modules.ablk,
1299 				iocmd->cfg.mode, iocmd->cfg.max_pf,
1300 				iocmd->cfg.max_vf, bfad_hcb_comp, &fcomp);
1301 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1302 	if (iocmd->status != BFA_STATUS_OK)
1303 		goto out;
1304 
1305 	wait_for_completion(&fcomp.comp);
1306 	iocmd->status = fcomp.status;
1307 out:
1308 	return 0;
1309 }
1310 
1311 int
1312 bfad_iocmd_port_cfg_mode(struct bfad_s *bfad, void *cmd)
1313 {
1314 	struct bfa_bsg_port_cfg_mode_s *iocmd =
1315 			(struct bfa_bsg_port_cfg_mode_s *)cmd;
1316 	struct bfad_hal_comp fcomp;
1317 	unsigned long flags = 0;
1318 
1319 	init_completion(&fcomp.comp);
1320 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1321 	iocmd->status = bfa_ablk_port_config(&bfad->bfa.modules.ablk,
1322 				iocmd->instance, iocmd->cfg.mode,
1323 				iocmd->cfg.max_pf, iocmd->cfg.max_vf,
1324 				bfad_hcb_comp, &fcomp);
1325 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1326 	if (iocmd->status != BFA_STATUS_OK)
1327 		goto out;
1328 
1329 	wait_for_completion(&fcomp.comp);
1330 	iocmd->status = fcomp.status;
1331 out:
1332 	return 0;
1333 }
1334 
1335 int
1336 bfad_iocmd_ablk_optrom(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
1337 {
1338 	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
1339 	struct bfad_hal_comp fcomp;
1340 	unsigned long   flags;
1341 
1342 	init_completion(&fcomp.comp);
1343 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1344 	if (cmd == IOCMD_FLASH_ENABLE_OPTROM)
1345 		iocmd->status = bfa_ablk_optrom_en(&bfad->bfa.modules.ablk,
1346 					bfad_hcb_comp, &fcomp);
1347 	else
1348 		iocmd->status = bfa_ablk_optrom_dis(&bfad->bfa.modules.ablk,
1349 					bfad_hcb_comp, &fcomp);
1350 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1351 
1352 	if (iocmd->status != BFA_STATUS_OK)
1353 		goto out;
1354 
1355 	wait_for_completion(&fcomp.comp);
1356 	iocmd->status = fcomp.status;
1357 out:
1358 	return 0;
1359 }
1360 
1361 int
1362 bfad_iocmd_faa_query(struct bfad_s *bfad, void *cmd)
1363 {
1364 	struct bfa_bsg_faa_attr_s *iocmd = (struct bfa_bsg_faa_attr_s *)cmd;
1365 	struct bfad_hal_comp    fcomp;
1366 	unsigned long   flags;
1367 
1368 	init_completion(&fcomp.comp);
1369 	iocmd->status = BFA_STATUS_OK;
1370 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1371 	iocmd->status = bfa_faa_query(&bfad->bfa, &iocmd->faa_attr,
1372 				bfad_hcb_comp, &fcomp);
1373 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1374 
1375 	if (iocmd->status != BFA_STATUS_OK)
1376 		goto out;
1377 
1378 	wait_for_completion(&fcomp.comp);
1379 	iocmd->status = fcomp.status;
1380 out:
1381 	return 0;
1382 }
1383 
1384 int
1385 bfad_iocmd_cee_attr(struct bfad_s *bfad, void *cmd, unsigned int payload_len)
1386 {
1387 	struct bfa_bsg_cee_attr_s *iocmd =
1388 				(struct bfa_bsg_cee_attr_s *)cmd;
1389 	void	*iocmd_bufptr;
1390 	struct bfad_hal_comp	cee_comp;
1391 	unsigned long	flags;
1392 
1393 	if (bfad_chk_iocmd_sz(payload_len,
1394 			sizeof(struct bfa_bsg_cee_attr_s),
1395 			sizeof(struct bfa_cee_attr_s)) != BFA_STATUS_OK) {
1396 		iocmd->status = BFA_STATUS_VERSION_FAIL;
1397 		return 0;
1398 	}
1399 
1400 	iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_cee_attr_s);
1401 
1402 	cee_comp.status = 0;
1403 	init_completion(&cee_comp.comp);
1404 	mutex_lock(&bfad_mutex);
1405 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1406 	iocmd->status = bfa_cee_get_attr(&bfad->bfa.modules.cee, iocmd_bufptr,
1407 					 bfad_hcb_comp, &cee_comp);
1408 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1409 	if (iocmd->status != BFA_STATUS_OK) {
1410 		mutex_unlock(&bfad_mutex);
1411 		bfa_trc(bfad, 0x5555);
1412 		goto out;
1413 	}
1414 	wait_for_completion(&cee_comp.comp);
1415 	mutex_unlock(&bfad_mutex);
1416 out:
1417 	return 0;
1418 }
1419 
1420 int
1421 bfad_iocmd_cee_get_stats(struct bfad_s *bfad, void *cmd,
1422 			unsigned int payload_len)
1423 {
1424 	struct bfa_bsg_cee_stats_s *iocmd =
1425 				(struct bfa_bsg_cee_stats_s *)cmd;
1426 	void	*iocmd_bufptr;
1427 	struct bfad_hal_comp	cee_comp;
1428 	unsigned long	flags;
1429 
1430 	if (bfad_chk_iocmd_sz(payload_len,
1431 			sizeof(struct bfa_bsg_cee_stats_s),
1432 			sizeof(struct bfa_cee_stats_s)) != BFA_STATUS_OK) {
1433 		iocmd->status = BFA_STATUS_VERSION_FAIL;
1434 		return 0;
1435 	}
1436 
1437 	iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_cee_stats_s);
1438 
1439 	cee_comp.status = 0;
1440 	init_completion(&cee_comp.comp);
1441 	mutex_lock(&bfad_mutex);
1442 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1443 	iocmd->status = bfa_cee_get_stats(&bfad->bfa.modules.cee, iocmd_bufptr,
1444 					bfad_hcb_comp, &cee_comp);
1445 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1446 	if (iocmd->status != BFA_STATUS_OK) {
1447 		mutex_unlock(&bfad_mutex);
1448 		bfa_trc(bfad, 0x5555);
1449 		goto out;
1450 	}
1451 	wait_for_completion(&cee_comp.comp);
1452 	mutex_unlock(&bfad_mutex);
1453 out:
1454 	return 0;
1455 }
1456 
1457 int
1458 bfad_iocmd_cee_reset_stats(struct bfad_s *bfad, void *cmd)
1459 {
1460 	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
1461 	unsigned long	flags;
1462 
1463 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1464 	iocmd->status = bfa_cee_reset_stats(&bfad->bfa.modules.cee, NULL, NULL);
1465 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1466 	if (iocmd->status != BFA_STATUS_OK)
1467 		bfa_trc(bfad, 0x5555);
1468 	return 0;
1469 }
1470 
1471 int
1472 bfad_iocmd_sfp_media(struct bfad_s *bfad, void *cmd)
1473 {
1474 	struct bfa_bsg_sfp_media_s *iocmd = (struct bfa_bsg_sfp_media_s *)cmd;
1475 	struct bfad_hal_comp	fcomp;
1476 	unsigned long	flags;
1477 
1478 	init_completion(&fcomp.comp);
1479 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1480 	iocmd->status = bfa_sfp_media(BFA_SFP_MOD(&bfad->bfa), &iocmd->media,
1481 				bfad_hcb_comp, &fcomp);
1482 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1483 	bfa_trc(bfad, iocmd->status);
1484 	if (iocmd->status != BFA_STATUS_SFP_NOT_READY)
1485 		goto out;
1486 
1487 	wait_for_completion(&fcomp.comp);
1488 	iocmd->status = fcomp.status;
1489 out:
1490 	return 0;
1491 }
1492 
1493 int
1494 bfad_iocmd_sfp_speed(struct bfad_s *bfad, void *cmd)
1495 {
1496 	struct bfa_bsg_sfp_speed_s *iocmd = (struct bfa_bsg_sfp_speed_s *)cmd;
1497 	struct bfad_hal_comp	fcomp;
1498 	unsigned long	flags;
1499 
1500 	init_completion(&fcomp.comp);
1501 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1502 	iocmd->status = bfa_sfp_speed(BFA_SFP_MOD(&bfad->bfa), iocmd->speed,
1503 				bfad_hcb_comp, &fcomp);
1504 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1505 	bfa_trc(bfad, iocmd->status);
1506 	if (iocmd->status != BFA_STATUS_SFP_NOT_READY)
1507 		goto out;
1508 	wait_for_completion(&fcomp.comp);
1509 	iocmd->status = fcomp.status;
1510 out:
1511 	return 0;
1512 }
1513 
1514 int
1515 bfad_iocmd_flash_get_attr(struct bfad_s *bfad, void *cmd)
1516 {
1517 	struct bfa_bsg_flash_attr_s *iocmd =
1518 			(struct bfa_bsg_flash_attr_s *)cmd;
1519 	struct bfad_hal_comp fcomp;
1520 	unsigned long	flags;
1521 
1522 	init_completion(&fcomp.comp);
1523 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1524 	iocmd->status = bfa_flash_get_attr(BFA_FLASH(&bfad->bfa), &iocmd->attr,
1525 				bfad_hcb_comp, &fcomp);
1526 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1527 	if (iocmd->status != BFA_STATUS_OK)
1528 		goto out;
1529 	wait_for_completion(&fcomp.comp);
1530 	iocmd->status = fcomp.status;
1531 out:
1532 	return 0;
1533 }
1534 
1535 int
1536 bfad_iocmd_flash_erase_part(struct bfad_s *bfad, void *cmd)
1537 {
1538 	struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd;
1539 	struct bfad_hal_comp fcomp;
1540 	unsigned long	flags;
1541 
1542 	init_completion(&fcomp.comp);
1543 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1544 	iocmd->status = bfa_flash_erase_part(BFA_FLASH(&bfad->bfa), iocmd->type,
1545 				iocmd->instance, bfad_hcb_comp, &fcomp);
1546 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1547 	if (iocmd->status != BFA_STATUS_OK)
1548 		goto out;
1549 	wait_for_completion(&fcomp.comp);
1550 	iocmd->status = fcomp.status;
1551 out:
1552 	return 0;
1553 }
1554 
1555 int
1556 bfad_iocmd_flash_update_part(struct bfad_s *bfad, void *cmd,
1557 			unsigned int payload_len)
1558 {
1559 	struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd;
1560 	void	*iocmd_bufptr;
1561 	struct bfad_hal_comp fcomp;
1562 	unsigned long	flags;
1563 
1564 	if (bfad_chk_iocmd_sz(payload_len,
1565 			sizeof(struct bfa_bsg_flash_s),
1566 			iocmd->bufsz) != BFA_STATUS_OK) {
1567 		iocmd->status = BFA_STATUS_VERSION_FAIL;
1568 		return 0;
1569 	}
1570 
1571 	iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_flash_s);
1572 
1573 	init_completion(&fcomp.comp);
1574 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1575 	iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa),
1576 				iocmd->type, iocmd->instance, iocmd_bufptr,
1577 				iocmd->bufsz, 0, bfad_hcb_comp, &fcomp);
1578 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1579 	if (iocmd->status != BFA_STATUS_OK)
1580 		goto out;
1581 	wait_for_completion(&fcomp.comp);
1582 	iocmd->status = fcomp.status;
1583 out:
1584 	return 0;
1585 }
1586 
1587 int
1588 bfad_iocmd_flash_read_part(struct bfad_s *bfad, void *cmd,
1589 			unsigned int payload_len)
1590 {
1591 	struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd;
1592 	struct bfad_hal_comp fcomp;
1593 	void	*iocmd_bufptr;
1594 	unsigned long	flags;
1595 
1596 	if (bfad_chk_iocmd_sz(payload_len,
1597 			sizeof(struct bfa_bsg_flash_s),
1598 			iocmd->bufsz) != BFA_STATUS_OK) {
1599 		iocmd->status = BFA_STATUS_VERSION_FAIL;
1600 		return 0;
1601 	}
1602 
1603 	iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_flash_s);
1604 
1605 	init_completion(&fcomp.comp);
1606 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1607 	iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa), iocmd->type,
1608 				iocmd->instance, iocmd_bufptr, iocmd->bufsz, 0,
1609 				bfad_hcb_comp, &fcomp);
1610 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1611 	if (iocmd->status != BFA_STATUS_OK)
1612 		goto out;
1613 	wait_for_completion(&fcomp.comp);
1614 	iocmd->status = fcomp.status;
1615 out:
1616 	return 0;
1617 }
1618 
1619 int
1620 bfad_iocmd_diag_temp(struct bfad_s *bfad, void *cmd)
1621 {
1622 	struct bfa_bsg_diag_get_temp_s *iocmd =
1623 			(struct bfa_bsg_diag_get_temp_s *)cmd;
1624 	struct bfad_hal_comp fcomp;
1625 	unsigned long	flags;
1626 
1627 	init_completion(&fcomp.comp);
1628 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1629 	iocmd->status = bfa_diag_tsensor_query(BFA_DIAG_MOD(&bfad->bfa),
1630 				&iocmd->result, bfad_hcb_comp, &fcomp);
1631 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1632 	bfa_trc(bfad, iocmd->status);
1633 	if (iocmd->status != BFA_STATUS_OK)
1634 		goto out;
1635 	wait_for_completion(&fcomp.comp);
1636 	iocmd->status = fcomp.status;
1637 out:
1638 	return 0;
1639 }
1640 
1641 int
1642 bfad_iocmd_diag_memtest(struct bfad_s *bfad, void *cmd)
1643 {
1644 	struct bfa_bsg_diag_memtest_s *iocmd =
1645 			(struct bfa_bsg_diag_memtest_s *)cmd;
1646 	struct bfad_hal_comp fcomp;
1647 	unsigned long   flags;
1648 
1649 	init_completion(&fcomp.comp);
1650 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1651 	iocmd->status = bfa_diag_memtest(BFA_DIAG_MOD(&bfad->bfa),
1652 				&iocmd->memtest, iocmd->pat,
1653 				&iocmd->result, bfad_hcb_comp, &fcomp);
1654 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1655 	bfa_trc(bfad, iocmd->status);
1656 	if (iocmd->status != BFA_STATUS_OK)
1657 		goto out;
1658 	wait_for_completion(&fcomp.comp);
1659 	iocmd->status = fcomp.status;
1660 out:
1661 	return 0;
1662 }
1663 
1664 int
1665 bfad_iocmd_diag_loopback(struct bfad_s *bfad, void *cmd)
1666 {
1667 	struct bfa_bsg_diag_loopback_s *iocmd =
1668 			(struct bfa_bsg_diag_loopback_s *)cmd;
1669 	struct bfad_hal_comp fcomp;
1670 	unsigned long   flags;
1671 
1672 	init_completion(&fcomp.comp);
1673 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1674 	iocmd->status = bfa_fcdiag_loopback(&bfad->bfa, iocmd->opmode,
1675 				iocmd->speed, iocmd->lpcnt, iocmd->pat,
1676 				&iocmd->result, bfad_hcb_comp, &fcomp);
1677 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1678 	bfa_trc(bfad, iocmd->status);
1679 	if (iocmd->status != BFA_STATUS_OK)
1680 		goto out;
1681 	wait_for_completion(&fcomp.comp);
1682 	iocmd->status = fcomp.status;
1683 out:
1684 	return 0;
1685 }
1686 
1687 int
1688 bfad_iocmd_diag_fwping(struct bfad_s *bfad, void *cmd)
1689 {
1690 	struct bfa_bsg_diag_fwping_s *iocmd =
1691 			(struct bfa_bsg_diag_fwping_s *)cmd;
1692 	struct bfad_hal_comp fcomp;
1693 	unsigned long   flags;
1694 
1695 	init_completion(&fcomp.comp);
1696 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1697 	iocmd->status = bfa_diag_fwping(BFA_DIAG_MOD(&bfad->bfa), iocmd->cnt,
1698 				iocmd->pattern, &iocmd->result,
1699 				bfad_hcb_comp, &fcomp);
1700 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1701 	bfa_trc(bfad, iocmd->status);
1702 	if (iocmd->status != BFA_STATUS_OK)
1703 		goto out;
1704 	bfa_trc(bfad, 0x77771);
1705 	wait_for_completion(&fcomp.comp);
1706 	iocmd->status = fcomp.status;
1707 out:
1708 	return 0;
1709 }
1710 
1711 int
1712 bfad_iocmd_diag_queuetest(struct bfad_s *bfad, void *cmd)
1713 {
1714 	struct bfa_bsg_diag_qtest_s *iocmd = (struct bfa_bsg_diag_qtest_s *)cmd;
1715 	struct bfad_hal_comp fcomp;
1716 	unsigned long   flags;
1717 
1718 	init_completion(&fcomp.comp);
1719 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1720 	iocmd->status = bfa_fcdiag_queuetest(&bfad->bfa, iocmd->force,
1721 				iocmd->queue, &iocmd->result,
1722 				bfad_hcb_comp, &fcomp);
1723 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1724 	if (iocmd->status != BFA_STATUS_OK)
1725 		goto out;
1726 	wait_for_completion(&fcomp.comp);
1727 	iocmd->status = fcomp.status;
1728 out:
1729 	return 0;
1730 }
1731 
1732 int
1733 bfad_iocmd_diag_sfp(struct bfad_s *bfad, void *cmd)
1734 {
1735 	struct bfa_bsg_sfp_show_s *iocmd =
1736 			(struct bfa_bsg_sfp_show_s *)cmd;
1737 	struct bfad_hal_comp fcomp;
1738 	unsigned long   flags;
1739 
1740 	init_completion(&fcomp.comp);
1741 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1742 	iocmd->status = bfa_sfp_show(BFA_SFP_MOD(&bfad->bfa), &iocmd->sfp,
1743 				bfad_hcb_comp, &fcomp);
1744 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1745 	bfa_trc(bfad, iocmd->status);
1746 	if (iocmd->status != BFA_STATUS_OK)
1747 		goto out;
1748 	wait_for_completion(&fcomp.comp);
1749 	iocmd->status = fcomp.status;
1750 	bfa_trc(bfad, iocmd->status);
1751 out:
1752 	return 0;
1753 }
1754 
1755 int
1756 bfad_iocmd_diag_led(struct bfad_s *bfad, void *cmd)
1757 {
1758 	struct bfa_bsg_diag_led_s *iocmd = (struct bfa_bsg_diag_led_s *)cmd;
1759 	unsigned long   flags;
1760 
1761 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1762 	iocmd->status = bfa_diag_ledtest(BFA_DIAG_MOD(&bfad->bfa),
1763 				&iocmd->ledtest);
1764 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1765 	return 0;
1766 }
1767 
1768 int
1769 bfad_iocmd_diag_beacon_lport(struct bfad_s *bfad, void *cmd)
1770 {
1771 	struct bfa_bsg_diag_beacon_s *iocmd =
1772 			(struct bfa_bsg_diag_beacon_s *)cmd;
1773 	unsigned long	flags;
1774 
1775 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1776 	iocmd->status = bfa_diag_beacon_port(BFA_DIAG_MOD(&bfad->bfa),
1777 				iocmd->beacon, iocmd->link_e2e_beacon,
1778 				iocmd->second);
1779 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1780 	return 0;
1781 }
1782 
1783 int
1784 bfad_iocmd_diag_lb_stat(struct bfad_s *bfad, void *cmd)
1785 {
1786 	struct bfa_bsg_diag_lb_stat_s *iocmd =
1787 			(struct bfa_bsg_diag_lb_stat_s *)cmd;
1788 	unsigned long	flags;
1789 
1790 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1791 	iocmd->status = bfa_fcdiag_lb_is_running(&bfad->bfa);
1792 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1793 	bfa_trc(bfad, iocmd->status);
1794 
1795 	return 0;
1796 }
1797 
1798 int
1799 bfad_iocmd_diag_dport_enable(struct bfad_s *bfad, void *pcmd)
1800 {
1801 	struct bfa_bsg_dport_enable_s *iocmd =
1802 				(struct bfa_bsg_dport_enable_s *)pcmd;
1803 	unsigned long	flags;
1804 	struct bfad_hal_comp fcomp;
1805 
1806 	init_completion(&fcomp.comp);
1807 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1808 	iocmd->status = bfa_dport_enable(&bfad->bfa, iocmd->lpcnt,
1809 					iocmd->pat, bfad_hcb_comp, &fcomp);
1810 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1811 	if (iocmd->status != BFA_STATUS_OK)
1812 		bfa_trc(bfad, iocmd->status);
1813 	else {
1814 		wait_for_completion(&fcomp.comp);
1815 		iocmd->status = fcomp.status;
1816 	}
1817 	return 0;
1818 }
1819 
1820 int
1821 bfad_iocmd_diag_dport_disable(struct bfad_s *bfad, void *pcmd)
1822 {
1823 	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
1824 	unsigned long	flags;
1825 	struct bfad_hal_comp fcomp;
1826 
1827 	init_completion(&fcomp.comp);
1828 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1829 	iocmd->status = bfa_dport_disable(&bfad->bfa, bfad_hcb_comp, &fcomp);
1830 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1831 	if (iocmd->status != BFA_STATUS_OK)
1832 		bfa_trc(bfad, iocmd->status);
1833 	else {
1834 		wait_for_completion(&fcomp.comp);
1835 		iocmd->status = fcomp.status;
1836 	}
1837 	return 0;
1838 }
1839 
1840 int
1841 bfad_iocmd_diag_dport_start(struct bfad_s *bfad, void *pcmd)
1842 {
1843 	struct bfa_bsg_dport_enable_s *iocmd =
1844 				(struct bfa_bsg_dport_enable_s *)pcmd;
1845 	unsigned long   flags;
1846 	struct bfad_hal_comp fcomp;
1847 
1848 	init_completion(&fcomp.comp);
1849 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1850 	iocmd->status = bfa_dport_start(&bfad->bfa, iocmd->lpcnt,
1851 					iocmd->pat, bfad_hcb_comp,
1852 					&fcomp);
1853 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1854 
1855 	if (iocmd->status != BFA_STATUS_OK) {
1856 		bfa_trc(bfad, iocmd->status);
1857 	} else {
1858 		wait_for_completion(&fcomp.comp);
1859 		iocmd->status = fcomp.status;
1860 	}
1861 
1862 	return 0;
1863 }
1864 
1865 int
1866 bfad_iocmd_diag_dport_show(struct bfad_s *bfad, void *pcmd)
1867 {
1868 	struct bfa_bsg_diag_dport_show_s *iocmd =
1869 				(struct bfa_bsg_diag_dport_show_s *)pcmd;
1870 	unsigned long   flags;
1871 
1872 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1873 	iocmd->status = bfa_dport_show(&bfad->bfa, &iocmd->result);
1874 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1875 
1876 	return 0;
1877 }
1878 
1879 
1880 int
1881 bfad_iocmd_phy_get_attr(struct bfad_s *bfad, void *cmd)
1882 {
1883 	struct bfa_bsg_phy_attr_s *iocmd =
1884 			(struct bfa_bsg_phy_attr_s *)cmd;
1885 	struct bfad_hal_comp fcomp;
1886 	unsigned long	flags;
1887 
1888 	init_completion(&fcomp.comp);
1889 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1890 	iocmd->status = bfa_phy_get_attr(BFA_PHY(&bfad->bfa), iocmd->instance,
1891 				&iocmd->attr, bfad_hcb_comp, &fcomp);
1892 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1893 	if (iocmd->status != BFA_STATUS_OK)
1894 		goto out;
1895 	wait_for_completion(&fcomp.comp);
1896 	iocmd->status = fcomp.status;
1897 out:
1898 	return 0;
1899 }
1900 
1901 int
1902 bfad_iocmd_phy_get_stats(struct bfad_s *bfad, void *cmd)
1903 {
1904 	struct bfa_bsg_phy_stats_s *iocmd =
1905 			(struct bfa_bsg_phy_stats_s *)cmd;
1906 	struct bfad_hal_comp fcomp;
1907 	unsigned long	flags;
1908 
1909 	init_completion(&fcomp.comp);
1910 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1911 	iocmd->status = bfa_phy_get_stats(BFA_PHY(&bfad->bfa), iocmd->instance,
1912 				&iocmd->stats, bfad_hcb_comp, &fcomp);
1913 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1914 	if (iocmd->status != BFA_STATUS_OK)
1915 		goto out;
1916 	wait_for_completion(&fcomp.comp);
1917 	iocmd->status = fcomp.status;
1918 out:
1919 	return 0;
1920 }
1921 
1922 int
1923 bfad_iocmd_phy_read(struct bfad_s *bfad, void *cmd, unsigned int payload_len)
1924 {
1925 	struct bfa_bsg_phy_s *iocmd = (struct bfa_bsg_phy_s *)cmd;
1926 	struct bfad_hal_comp fcomp;
1927 	void	*iocmd_bufptr;
1928 	unsigned long	flags;
1929 
1930 	if (bfad_chk_iocmd_sz(payload_len,
1931 			sizeof(struct bfa_bsg_phy_s),
1932 			iocmd->bufsz) != BFA_STATUS_OK) {
1933 		iocmd->status = BFA_STATUS_VERSION_FAIL;
1934 		return 0;
1935 	}
1936 
1937 	iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_phy_s);
1938 	init_completion(&fcomp.comp);
1939 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1940 	iocmd->status = bfa_phy_read(BFA_PHY(&bfad->bfa),
1941 				iocmd->instance, iocmd_bufptr, iocmd->bufsz,
1942 				0, bfad_hcb_comp, &fcomp);
1943 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1944 	if (iocmd->status != BFA_STATUS_OK)
1945 		goto out;
1946 	wait_for_completion(&fcomp.comp);
1947 	iocmd->status = fcomp.status;
1948 	if (iocmd->status != BFA_STATUS_OK)
1949 		goto out;
1950 out:
1951 	return 0;
1952 }
1953 
1954 int
1955 bfad_iocmd_vhba_query(struct bfad_s *bfad, void *cmd)
1956 {
1957 	struct bfa_bsg_vhba_attr_s *iocmd =
1958 			(struct bfa_bsg_vhba_attr_s *)cmd;
1959 	struct bfa_vhba_attr_s *attr = &iocmd->attr;
1960 	unsigned long flags;
1961 
1962 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1963 	attr->pwwn =  bfad->bfa.ioc.attr->pwwn;
1964 	attr->nwwn =  bfad->bfa.ioc.attr->nwwn;
1965 	attr->plog_enabled = (bfa_boolean_t)bfad->bfa.plog->plog_enabled;
1966 	attr->io_profile = bfa_fcpim_get_io_profile(&bfad->bfa);
1967 	attr->path_tov  = bfa_fcpim_path_tov_get(&bfad->bfa);
1968 	iocmd->status = BFA_STATUS_OK;
1969 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1970 	return 0;
1971 }
1972 
1973 int
1974 bfad_iocmd_phy_update(struct bfad_s *bfad, void *cmd, unsigned int payload_len)
1975 {
1976 	struct bfa_bsg_phy_s *iocmd = (struct bfa_bsg_phy_s *)cmd;
1977 	void	*iocmd_bufptr;
1978 	struct bfad_hal_comp fcomp;
1979 	unsigned long	flags;
1980 
1981 	if (bfad_chk_iocmd_sz(payload_len,
1982 			sizeof(struct bfa_bsg_phy_s),
1983 			iocmd->bufsz) != BFA_STATUS_OK) {
1984 		iocmd->status = BFA_STATUS_VERSION_FAIL;
1985 		return 0;
1986 	}
1987 
1988 	iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_phy_s);
1989 	init_completion(&fcomp.comp);
1990 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1991 	iocmd->status = bfa_phy_update(BFA_PHY(&bfad->bfa),
1992 				iocmd->instance, iocmd_bufptr, iocmd->bufsz,
1993 				0, bfad_hcb_comp, &fcomp);
1994 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1995 	if (iocmd->status != BFA_STATUS_OK)
1996 		goto out;
1997 	wait_for_completion(&fcomp.comp);
1998 	iocmd->status = fcomp.status;
1999 out:
2000 	return 0;
2001 }
2002 
2003 int
2004 bfad_iocmd_porglog_get(struct bfad_s *bfad, void *cmd)
2005 {
2006 	struct bfa_bsg_debug_s *iocmd = (struct bfa_bsg_debug_s *)cmd;
2007 	void *iocmd_bufptr;
2008 
2009 	if (iocmd->bufsz < sizeof(struct bfa_plog_s)) {
2010 		bfa_trc(bfad, sizeof(struct bfa_plog_s));
2011 		iocmd->status = BFA_STATUS_EINVAL;
2012 		goto out;
2013 	}
2014 
2015 	iocmd->status = BFA_STATUS_OK;
2016 	iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_debug_s);
2017 	memcpy(iocmd_bufptr, (u8 *) &bfad->plog_buf, sizeof(struct bfa_plog_s));
2018 out:
2019 	return 0;
2020 }
2021 
2022 #define BFA_DEBUG_FW_CORE_CHUNK_SZ	0x4000U /* 16K chunks for FW dump */
2023 int
2024 bfad_iocmd_debug_fw_core(struct bfad_s *bfad, void *cmd,
2025 			unsigned int payload_len)
2026 {
2027 	struct bfa_bsg_debug_s *iocmd = (struct bfa_bsg_debug_s *)cmd;
2028 	void	*iocmd_bufptr;
2029 	unsigned long	flags;
2030 	u32 offset;
2031 
2032 	if (bfad_chk_iocmd_sz(payload_len, sizeof(struct bfa_bsg_debug_s),
2033 			BFA_DEBUG_FW_CORE_CHUNK_SZ) != BFA_STATUS_OK) {
2034 		iocmd->status = BFA_STATUS_VERSION_FAIL;
2035 		return 0;
2036 	}
2037 
2038 	if (iocmd->bufsz < BFA_DEBUG_FW_CORE_CHUNK_SZ ||
2039 			!IS_ALIGNED(iocmd->bufsz, sizeof(u16)) ||
2040 			!IS_ALIGNED(iocmd->offset, sizeof(u32))) {
2041 		bfa_trc(bfad, BFA_DEBUG_FW_CORE_CHUNK_SZ);
2042 		iocmd->status = BFA_STATUS_EINVAL;
2043 		goto out;
2044 	}
2045 
2046 	iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_debug_s);
2047 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2048 	offset = iocmd->offset;
2049 	iocmd->status = bfa_ioc_debug_fwcore(&bfad->bfa.ioc, iocmd_bufptr,
2050 				&offset, &iocmd->bufsz);
2051 	iocmd->offset = offset;
2052 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2053 out:
2054 	return 0;
2055 }
2056 
2057 int
2058 bfad_iocmd_debug_ctl(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
2059 {
2060 	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
2061 	unsigned long	flags;
2062 
2063 	if (v_cmd == IOCMD_DEBUG_FW_STATE_CLR) {
2064 		spin_lock_irqsave(&bfad->bfad_lock, flags);
2065 		bfad->bfa.ioc.dbg_fwsave_once = BFA_TRUE;
2066 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2067 	} else if (v_cmd == IOCMD_DEBUG_PORTLOG_CLR)
2068 		bfad->plog_buf.head = bfad->plog_buf.tail = 0;
2069 	else if (v_cmd == IOCMD_DEBUG_START_DTRC)
2070 		bfa_trc_init(bfad->trcmod);
2071 	else if (v_cmd == IOCMD_DEBUG_STOP_DTRC)
2072 		bfa_trc_stop(bfad->trcmod);
2073 
2074 	iocmd->status = BFA_STATUS_OK;
2075 	return 0;
2076 }
2077 
2078 int
2079 bfad_iocmd_porglog_ctl(struct bfad_s *bfad, void *cmd)
2080 {
2081 	struct bfa_bsg_portlogctl_s *iocmd = (struct bfa_bsg_portlogctl_s *)cmd;
2082 
2083 	if (iocmd->ctl == BFA_TRUE)
2084 		bfad->plog_buf.plog_enabled = 1;
2085 	else
2086 		bfad->plog_buf.plog_enabled = 0;
2087 
2088 	iocmd->status = BFA_STATUS_OK;
2089 	return 0;
2090 }
2091 
2092 int
2093 bfad_iocmd_fcpim_cfg_profile(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
2094 {
2095 	struct bfa_bsg_fcpim_profile_s *iocmd =
2096 				(struct bfa_bsg_fcpim_profile_s *)cmd;
2097 	unsigned long	flags;
2098 
2099 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2100 	if (v_cmd == IOCMD_FCPIM_PROFILE_ON)
2101 		iocmd->status = bfa_fcpim_profile_on(&bfad->bfa, ktime_get_real_seconds());
2102 	else if (v_cmd == IOCMD_FCPIM_PROFILE_OFF)
2103 		iocmd->status = bfa_fcpim_profile_off(&bfad->bfa);
2104 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2105 
2106 	return 0;
2107 }
2108 
2109 static int
2110 bfad_iocmd_itnim_get_ioprofile(struct bfad_s *bfad, void *cmd)
2111 {
2112 	struct bfa_bsg_itnim_ioprofile_s *iocmd =
2113 				(struct bfa_bsg_itnim_ioprofile_s *)cmd;
2114 	struct bfa_fcs_lport_s *fcs_port;
2115 	struct bfa_fcs_itnim_s *itnim;
2116 	unsigned long   flags;
2117 
2118 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2119 	fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
2120 				iocmd->vf_id, iocmd->lpwwn);
2121 	if (!fcs_port)
2122 		iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
2123 	else {
2124 		itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
2125 		if (itnim == NULL)
2126 			iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
2127 		else
2128 			iocmd->status = bfa_itnim_get_ioprofile(
2129 						bfa_fcs_itnim_get_halitn(itnim),
2130 						&iocmd->ioprofile);
2131 	}
2132 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2133 	return 0;
2134 }
2135 
2136 int
2137 bfad_iocmd_fcport_get_stats(struct bfad_s *bfad, void *cmd)
2138 {
2139 	struct bfa_bsg_fcport_stats_s *iocmd =
2140 				(struct bfa_bsg_fcport_stats_s *)cmd;
2141 	struct bfad_hal_comp fcomp;
2142 	unsigned long	flags;
2143 	struct bfa_cb_pending_q_s cb_qe;
2144 
2145 	init_completion(&fcomp.comp);
2146 	bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
2147 			   &fcomp, &iocmd->stats);
2148 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2149 	iocmd->status = bfa_fcport_get_stats(&bfad->bfa, &cb_qe);
2150 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2151 	if (iocmd->status != BFA_STATUS_OK) {
2152 		bfa_trc(bfad, iocmd->status);
2153 		goto out;
2154 	}
2155 	wait_for_completion(&fcomp.comp);
2156 	iocmd->status = fcomp.status;
2157 out:
2158 	return 0;
2159 }
2160 
2161 int
2162 bfad_iocmd_fcport_reset_stats(struct bfad_s *bfad, void *cmd)
2163 {
2164 	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
2165 	struct bfad_hal_comp fcomp;
2166 	unsigned long	flags;
2167 	struct bfa_cb_pending_q_s cb_qe;
2168 
2169 	init_completion(&fcomp.comp);
2170 	bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp, &fcomp, NULL);
2171 
2172 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2173 	iocmd->status = bfa_fcport_clear_stats(&bfad->bfa, &cb_qe);
2174 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2175 	if (iocmd->status != BFA_STATUS_OK) {
2176 		bfa_trc(bfad, iocmd->status);
2177 		goto out;
2178 	}
2179 	wait_for_completion(&fcomp.comp);
2180 	iocmd->status = fcomp.status;
2181 out:
2182 	return 0;
2183 }
2184 
2185 int
2186 bfad_iocmd_boot_cfg(struct bfad_s *bfad, void *cmd)
2187 {
2188 	struct bfa_bsg_boot_s *iocmd = (struct bfa_bsg_boot_s *)cmd;
2189 	struct bfad_hal_comp fcomp;
2190 	unsigned long	flags;
2191 
2192 	init_completion(&fcomp.comp);
2193 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2194 	iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa),
2195 			BFA_FLASH_PART_BOOT, bfad->bfa.ioc.port_id,
2196 			&iocmd->cfg, sizeof(struct bfa_boot_cfg_s), 0,
2197 			bfad_hcb_comp, &fcomp);
2198 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2199 	if (iocmd->status != BFA_STATUS_OK)
2200 		goto out;
2201 	wait_for_completion(&fcomp.comp);
2202 	iocmd->status = fcomp.status;
2203 out:
2204 	return 0;
2205 }
2206 
2207 int
2208 bfad_iocmd_boot_query(struct bfad_s *bfad, void *cmd)
2209 {
2210 	struct bfa_bsg_boot_s *iocmd = (struct bfa_bsg_boot_s *)cmd;
2211 	struct bfad_hal_comp fcomp;
2212 	unsigned long	flags;
2213 
2214 	init_completion(&fcomp.comp);
2215 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2216 	iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa),
2217 			BFA_FLASH_PART_BOOT, bfad->bfa.ioc.port_id,
2218 			&iocmd->cfg, sizeof(struct bfa_boot_cfg_s), 0,
2219 			bfad_hcb_comp, &fcomp);
2220 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2221 	if (iocmd->status != BFA_STATUS_OK)
2222 		goto out;
2223 	wait_for_completion(&fcomp.comp);
2224 	iocmd->status = fcomp.status;
2225 out:
2226 	return 0;
2227 }
2228 
2229 int
2230 bfad_iocmd_preboot_query(struct bfad_s *bfad, void *cmd)
2231 {
2232 	struct bfa_bsg_preboot_s *iocmd = (struct bfa_bsg_preboot_s *)cmd;
2233 	struct bfi_iocfc_cfgrsp_s *cfgrsp = bfad->bfa.iocfc.cfgrsp;
2234 	struct bfa_boot_pbc_s *pbcfg = &iocmd->cfg;
2235 	unsigned long	flags;
2236 
2237 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2238 	pbcfg->enable = cfgrsp->pbc_cfg.boot_enabled;
2239 	pbcfg->nbluns = cfgrsp->pbc_cfg.nbluns;
2240 	pbcfg->speed = cfgrsp->pbc_cfg.port_speed;
2241 	memcpy(pbcfg->pblun, cfgrsp->pbc_cfg.blun, sizeof(pbcfg->pblun));
2242 	iocmd->status = BFA_STATUS_OK;
2243 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2244 
2245 	return 0;
2246 }
2247 
2248 int
2249 bfad_iocmd_ethboot_cfg(struct bfad_s *bfad, void *cmd)
2250 {
2251 	struct bfa_bsg_ethboot_s *iocmd = (struct bfa_bsg_ethboot_s *)cmd;
2252 	struct bfad_hal_comp fcomp;
2253 	unsigned long	flags;
2254 
2255 	init_completion(&fcomp.comp);
2256 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2257 	iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa),
2258 				BFA_FLASH_PART_PXECFG,
2259 				bfad->bfa.ioc.port_id, &iocmd->cfg,
2260 				sizeof(struct bfa_ethboot_cfg_s), 0,
2261 				bfad_hcb_comp, &fcomp);
2262 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2263 	if (iocmd->status != BFA_STATUS_OK)
2264 		goto out;
2265 	wait_for_completion(&fcomp.comp);
2266 	iocmd->status = fcomp.status;
2267 out:
2268 	return 0;
2269 }
2270 
2271 int
2272 bfad_iocmd_ethboot_query(struct bfad_s *bfad, void *cmd)
2273 {
2274 	struct bfa_bsg_ethboot_s *iocmd = (struct bfa_bsg_ethboot_s *)cmd;
2275 	struct bfad_hal_comp fcomp;
2276 	unsigned long	flags;
2277 
2278 	init_completion(&fcomp.comp);
2279 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2280 	iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa),
2281 				BFA_FLASH_PART_PXECFG,
2282 				bfad->bfa.ioc.port_id, &iocmd->cfg,
2283 				sizeof(struct bfa_ethboot_cfg_s), 0,
2284 				bfad_hcb_comp, &fcomp);
2285 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2286 	if (iocmd->status != BFA_STATUS_OK)
2287 		goto out;
2288 	wait_for_completion(&fcomp.comp);
2289 	iocmd->status = fcomp.status;
2290 out:
2291 	return 0;
2292 }
2293 
2294 int
2295 bfad_iocmd_cfg_trunk(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
2296 {
2297 	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
2298 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
2299 	struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
2300 	unsigned long	flags;
2301 
2302 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2303 
2304 	if (bfa_fcport_is_dport(&bfad->bfa)) {
2305 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2306 		return BFA_STATUS_DPORT_ERR;
2307 	}
2308 
2309 	if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) ||
2310 		(fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
2311 		iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
2312 	else {
2313 		if (v_cmd == IOCMD_TRUNK_ENABLE) {
2314 			trunk->attr.state = BFA_TRUNK_OFFLINE;
2315 			bfa_fcport_disable(&bfad->bfa);
2316 			fcport->cfg.trunked = BFA_TRUE;
2317 		} else if (v_cmd == IOCMD_TRUNK_DISABLE) {
2318 			trunk->attr.state = BFA_TRUNK_DISABLED;
2319 			bfa_fcport_disable(&bfad->bfa);
2320 			fcport->cfg.trunked = BFA_FALSE;
2321 		}
2322 
2323 		if (!bfa_fcport_is_disabled(&bfad->bfa))
2324 			bfa_fcport_enable(&bfad->bfa);
2325 
2326 		iocmd->status = BFA_STATUS_OK;
2327 	}
2328 
2329 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2330 
2331 	return 0;
2332 }
2333 
2334 int
2335 bfad_iocmd_trunk_get_attr(struct bfad_s *bfad, void *cmd)
2336 {
2337 	struct bfa_bsg_trunk_attr_s *iocmd = (struct bfa_bsg_trunk_attr_s *)cmd;
2338 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
2339 	struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
2340 	unsigned long	flags;
2341 
2342 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2343 	if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) ||
2344 		(fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
2345 		iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
2346 	else {
2347 		memcpy((void *)&iocmd->attr, (void *)&trunk->attr,
2348 			sizeof(struct bfa_trunk_attr_s));
2349 		iocmd->attr.port_id = bfa_lps_get_base_pid(&bfad->bfa);
2350 		iocmd->status = BFA_STATUS_OK;
2351 	}
2352 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2353 
2354 	return 0;
2355 }
2356 
2357 int
2358 bfad_iocmd_qos(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
2359 {
2360 	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
2361 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
2362 	unsigned long	flags;
2363 
2364 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2365 	if (bfa_ioc_get_type(&bfad->bfa.ioc) == BFA_IOC_TYPE_FC) {
2366 		if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
2367 		(fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
2368 			iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
2369 		else {
2370 			if (v_cmd == IOCMD_QOS_ENABLE)
2371 				fcport->cfg.qos_enabled = BFA_TRUE;
2372 			else if (v_cmd == IOCMD_QOS_DISABLE) {
2373 				fcport->cfg.qos_enabled = BFA_FALSE;
2374 				fcport->cfg.qos_bw.high = BFA_QOS_BW_HIGH;
2375 				fcport->cfg.qos_bw.med = BFA_QOS_BW_MED;
2376 				fcport->cfg.qos_bw.low = BFA_QOS_BW_LOW;
2377 			}
2378 		}
2379 	}
2380 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2381 
2382 	return 0;
2383 }
2384 
2385 int
2386 bfad_iocmd_qos_get_attr(struct bfad_s *bfad, void *cmd)
2387 {
2388 	struct bfa_bsg_qos_attr_s *iocmd = (struct bfa_bsg_qos_attr_s *)cmd;
2389 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
2390 	unsigned long	flags;
2391 
2392 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2393 	if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
2394 		(fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
2395 		iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
2396 	else {
2397 		iocmd->attr.state = fcport->qos_attr.state;
2398 		iocmd->attr.total_bb_cr =
2399 			be32_to_cpu(fcport->qos_attr.total_bb_cr);
2400 		iocmd->attr.qos_bw.high = fcport->cfg.qos_bw.high;
2401 		iocmd->attr.qos_bw.med = fcport->cfg.qos_bw.med;
2402 		iocmd->attr.qos_bw.low = fcport->cfg.qos_bw.low;
2403 		iocmd->attr.qos_bw_op = fcport->qos_attr.qos_bw_op;
2404 		iocmd->status = BFA_STATUS_OK;
2405 	}
2406 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2407 
2408 	return 0;
2409 }
2410 
2411 int
2412 bfad_iocmd_qos_get_vc_attr(struct bfad_s *bfad, void *cmd)
2413 {
2414 	struct bfa_bsg_qos_vc_attr_s *iocmd =
2415 				(struct bfa_bsg_qos_vc_attr_s *)cmd;
2416 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
2417 	struct bfa_qos_vc_attr_s *bfa_vc_attr = &fcport->qos_vc_attr;
2418 	unsigned long	flags;
2419 	u32	i = 0;
2420 
2421 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2422 	iocmd->attr.total_vc_count = be16_to_cpu(bfa_vc_attr->total_vc_count);
2423 	iocmd->attr.shared_credit  = be16_to_cpu(bfa_vc_attr->shared_credit);
2424 	iocmd->attr.elp_opmode_flags  =
2425 				be32_to_cpu(bfa_vc_attr->elp_opmode_flags);
2426 
2427 	/* Individual VC info */
2428 	while (i < iocmd->attr.total_vc_count) {
2429 		iocmd->attr.vc_info[i].vc_credit =
2430 				bfa_vc_attr->vc_info[i].vc_credit;
2431 		iocmd->attr.vc_info[i].borrow_credit =
2432 				bfa_vc_attr->vc_info[i].borrow_credit;
2433 		iocmd->attr.vc_info[i].priority =
2434 				bfa_vc_attr->vc_info[i].priority;
2435 		i++;
2436 	}
2437 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2438 
2439 	iocmd->status = BFA_STATUS_OK;
2440 	return 0;
2441 }
2442 
2443 int
2444 bfad_iocmd_qos_get_stats(struct bfad_s *bfad, void *cmd)
2445 {
2446 	struct bfa_bsg_fcport_stats_s *iocmd =
2447 				(struct bfa_bsg_fcport_stats_s *)cmd;
2448 	struct bfad_hal_comp fcomp;
2449 	unsigned long	flags;
2450 	struct bfa_cb_pending_q_s cb_qe;
2451 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
2452 
2453 	init_completion(&fcomp.comp);
2454 	bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
2455 			   &fcomp, &iocmd->stats);
2456 
2457 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2458 	WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc));
2459 	if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
2460 		(fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
2461 		iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
2462 	else
2463 		iocmd->status = bfa_fcport_get_stats(&bfad->bfa, &cb_qe);
2464 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2465 	if (iocmd->status != BFA_STATUS_OK) {
2466 		bfa_trc(bfad, iocmd->status);
2467 		goto out;
2468 	}
2469 	wait_for_completion(&fcomp.comp);
2470 	iocmd->status = fcomp.status;
2471 out:
2472 	return 0;
2473 }
2474 
2475 int
2476 bfad_iocmd_qos_reset_stats(struct bfad_s *bfad, void *cmd)
2477 {
2478 	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
2479 	struct bfad_hal_comp fcomp;
2480 	unsigned long	flags;
2481 	struct bfa_cb_pending_q_s cb_qe;
2482 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
2483 
2484 	init_completion(&fcomp.comp);
2485 	bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
2486 			   &fcomp, NULL);
2487 
2488 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2489 	WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc));
2490 	if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
2491 		(fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
2492 		iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
2493 	else
2494 		iocmd->status = bfa_fcport_clear_stats(&bfad->bfa, &cb_qe);
2495 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2496 	if (iocmd->status != BFA_STATUS_OK) {
2497 		bfa_trc(bfad, iocmd->status);
2498 		goto out;
2499 	}
2500 	wait_for_completion(&fcomp.comp);
2501 	iocmd->status = fcomp.status;
2502 out:
2503 	return 0;
2504 }
2505 
2506 int
2507 bfad_iocmd_vf_get_stats(struct bfad_s *bfad, void *cmd)
2508 {
2509 	struct bfa_bsg_vf_stats_s *iocmd =
2510 			(struct bfa_bsg_vf_stats_s *)cmd;
2511 	struct bfa_fcs_fabric_s	*fcs_vf;
2512 	unsigned long	flags;
2513 
2514 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2515 	fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id);
2516 	if (fcs_vf == NULL) {
2517 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2518 		iocmd->status = BFA_STATUS_UNKNOWN_VFID;
2519 		goto out;
2520 	}
2521 	memcpy((void *)&iocmd->stats, (void *)&fcs_vf->stats,
2522 		sizeof(struct bfa_vf_stats_s));
2523 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2524 	iocmd->status = BFA_STATUS_OK;
2525 out:
2526 	return 0;
2527 }
2528 
2529 int
2530 bfad_iocmd_vf_clr_stats(struct bfad_s *bfad, void *cmd)
2531 {
2532 	struct bfa_bsg_vf_reset_stats_s *iocmd =
2533 			(struct bfa_bsg_vf_reset_stats_s *)cmd;
2534 	struct bfa_fcs_fabric_s	*fcs_vf;
2535 	unsigned long	flags;
2536 
2537 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2538 	fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id);
2539 	if (fcs_vf == NULL) {
2540 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2541 		iocmd->status = BFA_STATUS_UNKNOWN_VFID;
2542 		goto out;
2543 	}
2544 	memset((void *)&fcs_vf->stats, 0, sizeof(struct bfa_vf_stats_s));
2545 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2546 	iocmd->status = BFA_STATUS_OK;
2547 out:
2548 	return 0;
2549 }
2550 
2551 /* Function to reset the LUN SCAN mode */
2552 static void
2553 bfad_iocmd_lunmask_reset_lunscan_mode(struct bfad_s *bfad, int lunmask_cfg)
2554 {
2555 	struct bfad_im_port_s *pport_im = bfad->pport.im_port;
2556 	struct bfad_vport_s *vport = NULL;
2557 
2558 	/* Set the scsi device LUN SCAN flags for base port */
2559 	bfad_reset_sdev_bflags(pport_im, lunmask_cfg);
2560 
2561 	/* Set the scsi device LUN SCAN flags for the vports */
2562 	list_for_each_entry(vport, &bfad->vport_list, list_entry)
2563 		bfad_reset_sdev_bflags(vport->drv_port.im_port, lunmask_cfg);
2564 }
2565 
2566 int
2567 bfad_iocmd_lunmask(struct bfad_s *bfad, void *pcmd, unsigned int v_cmd)
2568 {
2569 	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
2570 	unsigned long	flags;
2571 
2572 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2573 	if (v_cmd == IOCMD_FCPIM_LUNMASK_ENABLE) {
2574 		iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_TRUE);
2575 		/* Set the LUN Scanning mode to be Sequential scan */
2576 		if (iocmd->status == BFA_STATUS_OK)
2577 			bfad_iocmd_lunmask_reset_lunscan_mode(bfad, BFA_TRUE);
2578 	} else if (v_cmd == IOCMD_FCPIM_LUNMASK_DISABLE) {
2579 		iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_FALSE);
2580 		/* Set the LUN Scanning mode to default REPORT_LUNS scan */
2581 		if (iocmd->status == BFA_STATUS_OK)
2582 			bfad_iocmd_lunmask_reset_lunscan_mode(bfad, BFA_FALSE);
2583 	} else if (v_cmd == IOCMD_FCPIM_LUNMASK_CLEAR)
2584 		iocmd->status = bfa_fcpim_lunmask_clear(&bfad->bfa);
2585 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2586 	return 0;
2587 }
2588 
2589 int
2590 bfad_iocmd_fcpim_lunmask_query(struct bfad_s *bfad, void *cmd)
2591 {
2592 	struct bfa_bsg_fcpim_lunmask_query_s *iocmd =
2593 			(struct bfa_bsg_fcpim_lunmask_query_s *)cmd;
2594 	struct bfa_lunmask_cfg_s *lun_mask = &iocmd->lun_mask;
2595 	unsigned long	flags;
2596 
2597 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2598 	iocmd->status = bfa_fcpim_lunmask_query(&bfad->bfa, lun_mask);
2599 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2600 	return 0;
2601 }
2602 
2603 int
2604 bfad_iocmd_fcpim_cfg_lunmask(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
2605 {
2606 	struct bfa_bsg_fcpim_lunmask_s *iocmd =
2607 				(struct bfa_bsg_fcpim_lunmask_s *)cmd;
2608 	unsigned long	flags;
2609 
2610 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2611 	if (v_cmd == IOCMD_FCPIM_LUNMASK_ADD)
2612 		iocmd->status = bfa_fcpim_lunmask_add(&bfad->bfa, iocmd->vf_id,
2613 					&iocmd->pwwn, iocmd->rpwwn, iocmd->lun);
2614 	else if (v_cmd == IOCMD_FCPIM_LUNMASK_DELETE)
2615 		iocmd->status = bfa_fcpim_lunmask_delete(&bfad->bfa,
2616 					iocmd->vf_id, &iocmd->pwwn,
2617 					iocmd->rpwwn, iocmd->lun);
2618 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2619 	return 0;
2620 }
2621 
2622 int
2623 bfad_iocmd_fcpim_throttle_query(struct bfad_s *bfad, void *cmd)
2624 {
2625 	struct bfa_bsg_fcpim_throttle_s *iocmd =
2626 			(struct bfa_bsg_fcpim_throttle_s *)cmd;
2627 	unsigned long   flags;
2628 
2629 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2630 	iocmd->status = bfa_fcpim_throttle_get(&bfad->bfa,
2631 				(void *)&iocmd->throttle);
2632 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2633 
2634 	return 0;
2635 }
2636 
2637 int
2638 bfad_iocmd_fcpim_throttle_set(struct bfad_s *bfad, void *cmd)
2639 {
2640 	struct bfa_bsg_fcpim_throttle_s *iocmd =
2641 			(struct bfa_bsg_fcpim_throttle_s *)cmd;
2642 	unsigned long	flags;
2643 
2644 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2645 	iocmd->status = bfa_fcpim_throttle_set(&bfad->bfa,
2646 				iocmd->throttle.cfg_value);
2647 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2648 
2649 	return 0;
2650 }
2651 
2652 int
2653 bfad_iocmd_tfru_read(struct bfad_s *bfad, void *cmd)
2654 {
2655 	struct bfa_bsg_tfru_s *iocmd =
2656 			(struct bfa_bsg_tfru_s *)cmd;
2657 	struct bfad_hal_comp fcomp;
2658 	unsigned long flags = 0;
2659 
2660 	init_completion(&fcomp.comp);
2661 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2662 	iocmd->status = bfa_tfru_read(BFA_FRU(&bfad->bfa),
2663 				&iocmd->data, iocmd->len, iocmd->offset,
2664 				bfad_hcb_comp, &fcomp);
2665 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2666 	if (iocmd->status == BFA_STATUS_OK) {
2667 		wait_for_completion(&fcomp.comp);
2668 		iocmd->status = fcomp.status;
2669 	}
2670 
2671 	return 0;
2672 }
2673 
2674 int
2675 bfad_iocmd_tfru_write(struct bfad_s *bfad, void *cmd)
2676 {
2677 	struct bfa_bsg_tfru_s *iocmd =
2678 			(struct bfa_bsg_tfru_s *)cmd;
2679 	struct bfad_hal_comp fcomp;
2680 	unsigned long flags = 0;
2681 
2682 	init_completion(&fcomp.comp);
2683 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2684 	iocmd->status = bfa_tfru_write(BFA_FRU(&bfad->bfa),
2685 				&iocmd->data, iocmd->len, iocmd->offset,
2686 				bfad_hcb_comp, &fcomp);
2687 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2688 	if (iocmd->status == BFA_STATUS_OK) {
2689 		wait_for_completion(&fcomp.comp);
2690 		iocmd->status = fcomp.status;
2691 	}
2692 
2693 	return 0;
2694 }
2695 
2696 int
2697 bfad_iocmd_fruvpd_read(struct bfad_s *bfad, void *cmd)
2698 {
2699 	struct bfa_bsg_fruvpd_s *iocmd =
2700 			(struct bfa_bsg_fruvpd_s *)cmd;
2701 	struct bfad_hal_comp fcomp;
2702 	unsigned long flags = 0;
2703 
2704 	init_completion(&fcomp.comp);
2705 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2706 	iocmd->status = bfa_fruvpd_read(BFA_FRU(&bfad->bfa),
2707 				&iocmd->data, iocmd->len, iocmd->offset,
2708 				bfad_hcb_comp, &fcomp);
2709 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2710 	if (iocmd->status == BFA_STATUS_OK) {
2711 		wait_for_completion(&fcomp.comp);
2712 		iocmd->status = fcomp.status;
2713 	}
2714 
2715 	return 0;
2716 }
2717 
2718 int
2719 bfad_iocmd_fruvpd_update(struct bfad_s *bfad, void *cmd)
2720 {
2721 	struct bfa_bsg_fruvpd_s *iocmd =
2722 			(struct bfa_bsg_fruvpd_s *)cmd;
2723 	struct bfad_hal_comp fcomp;
2724 	unsigned long flags = 0;
2725 
2726 	init_completion(&fcomp.comp);
2727 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2728 	iocmd->status = bfa_fruvpd_update(BFA_FRU(&bfad->bfa),
2729 				&iocmd->data, iocmd->len, iocmd->offset,
2730 				bfad_hcb_comp, &fcomp, iocmd->trfr_cmpl);
2731 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2732 	if (iocmd->status == BFA_STATUS_OK) {
2733 		wait_for_completion(&fcomp.comp);
2734 		iocmd->status = fcomp.status;
2735 	}
2736 
2737 	return 0;
2738 }
2739 
2740 int
2741 bfad_iocmd_fruvpd_get_max_size(struct bfad_s *bfad, void *cmd)
2742 {
2743 	struct bfa_bsg_fruvpd_max_size_s *iocmd =
2744 			(struct bfa_bsg_fruvpd_max_size_s *)cmd;
2745 	unsigned long flags = 0;
2746 
2747 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2748 	iocmd->status = bfa_fruvpd_get_max_size(BFA_FRU(&bfad->bfa),
2749 						&iocmd->max_size);
2750 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2751 
2752 	return 0;
2753 }
2754 
2755 static int
2756 bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd,
2757 		unsigned int payload_len)
2758 {
2759 	int rc = -EINVAL;
2760 
2761 	switch (cmd) {
2762 	case IOCMD_IOC_ENABLE:
2763 		rc = bfad_iocmd_ioc_enable(bfad, iocmd);
2764 		break;
2765 	case IOCMD_IOC_DISABLE:
2766 		rc = bfad_iocmd_ioc_disable(bfad, iocmd);
2767 		break;
2768 	case IOCMD_IOC_GET_INFO:
2769 		rc = bfad_iocmd_ioc_get_info(bfad, iocmd);
2770 		break;
2771 	case IOCMD_IOC_GET_ATTR:
2772 		rc = bfad_iocmd_ioc_get_attr(bfad, iocmd);
2773 		break;
2774 	case IOCMD_IOC_GET_STATS:
2775 		rc = bfad_iocmd_ioc_get_stats(bfad, iocmd);
2776 		break;
2777 	case IOCMD_IOC_GET_FWSTATS:
2778 		rc = bfad_iocmd_ioc_get_fwstats(bfad, iocmd, payload_len);
2779 		break;
2780 	case IOCMD_IOC_RESET_STATS:
2781 	case IOCMD_IOC_RESET_FWSTATS:
2782 		rc = bfad_iocmd_ioc_reset_stats(bfad, iocmd, cmd);
2783 		break;
2784 	case IOCMD_IOC_SET_ADAPTER_NAME:
2785 	case IOCMD_IOC_SET_PORT_NAME:
2786 		rc = bfad_iocmd_ioc_set_name(bfad, iocmd, cmd);
2787 		break;
2788 	case IOCMD_IOCFC_GET_ATTR:
2789 		rc = bfad_iocmd_iocfc_get_attr(bfad, iocmd);
2790 		break;
2791 	case IOCMD_IOCFC_SET_INTR:
2792 		rc = bfad_iocmd_iocfc_set_intr(bfad, iocmd);
2793 		break;
2794 	case IOCMD_PORT_ENABLE:
2795 		rc = bfad_iocmd_port_enable(bfad, iocmd);
2796 		break;
2797 	case IOCMD_PORT_DISABLE:
2798 		rc = bfad_iocmd_port_disable(bfad, iocmd);
2799 		break;
2800 	case IOCMD_PORT_GET_ATTR:
2801 		rc = bfad_iocmd_port_get_attr(bfad, iocmd);
2802 		break;
2803 	case IOCMD_PORT_GET_STATS:
2804 		rc = bfad_iocmd_port_get_stats(bfad, iocmd, payload_len);
2805 		break;
2806 	case IOCMD_PORT_RESET_STATS:
2807 		rc = bfad_iocmd_port_reset_stats(bfad, iocmd);
2808 		break;
2809 	case IOCMD_PORT_CFG_TOPO:
2810 	case IOCMD_PORT_CFG_SPEED:
2811 	case IOCMD_PORT_CFG_ALPA:
2812 	case IOCMD_PORT_CLR_ALPA:
2813 		rc = bfad_iocmd_set_port_cfg(bfad, iocmd, cmd);
2814 		break;
2815 	case IOCMD_PORT_CFG_MAXFRSZ:
2816 		rc = bfad_iocmd_port_cfg_maxfrsize(bfad, iocmd);
2817 		break;
2818 	case IOCMD_PORT_BBCR_ENABLE:
2819 	case IOCMD_PORT_BBCR_DISABLE:
2820 		rc = bfad_iocmd_port_cfg_bbcr(bfad, cmd, iocmd);
2821 		break;
2822 	case IOCMD_PORT_BBCR_GET_ATTR:
2823 		rc = bfad_iocmd_port_get_bbcr_attr(bfad, iocmd);
2824 		break;
2825 	case IOCMD_LPORT_GET_ATTR:
2826 		rc = bfad_iocmd_lport_get_attr(bfad, iocmd);
2827 		break;
2828 	case IOCMD_LPORT_GET_STATS:
2829 		rc = bfad_iocmd_lport_get_stats(bfad, iocmd);
2830 		break;
2831 	case IOCMD_LPORT_RESET_STATS:
2832 		rc = bfad_iocmd_lport_reset_stats(bfad, iocmd);
2833 		break;
2834 	case IOCMD_LPORT_GET_IOSTATS:
2835 		rc = bfad_iocmd_lport_get_iostats(bfad, iocmd);
2836 		break;
2837 	case IOCMD_LPORT_GET_RPORTS:
2838 		rc = bfad_iocmd_lport_get_rports(bfad, iocmd, payload_len);
2839 		break;
2840 	case IOCMD_RPORT_GET_ATTR:
2841 		rc = bfad_iocmd_rport_get_attr(bfad, iocmd);
2842 		break;
2843 	case IOCMD_RPORT_GET_ADDR:
2844 		rc = bfad_iocmd_rport_get_addr(bfad, iocmd);
2845 		break;
2846 	case IOCMD_RPORT_GET_STATS:
2847 		rc = bfad_iocmd_rport_get_stats(bfad, iocmd);
2848 		break;
2849 	case IOCMD_RPORT_RESET_STATS:
2850 		rc = bfad_iocmd_rport_clr_stats(bfad, iocmd);
2851 		break;
2852 	case IOCMD_RPORT_SET_SPEED:
2853 		rc = bfad_iocmd_rport_set_speed(bfad, iocmd);
2854 		break;
2855 	case IOCMD_VPORT_GET_ATTR:
2856 		rc = bfad_iocmd_vport_get_attr(bfad, iocmd);
2857 		break;
2858 	case IOCMD_VPORT_GET_STATS:
2859 		rc = bfad_iocmd_vport_get_stats(bfad, iocmd);
2860 		break;
2861 	case IOCMD_VPORT_RESET_STATS:
2862 		rc = bfad_iocmd_vport_clr_stats(bfad, iocmd);
2863 		break;
2864 	case IOCMD_FABRIC_GET_LPORTS:
2865 		rc = bfad_iocmd_fabric_get_lports(bfad, iocmd, payload_len);
2866 		break;
2867 	case IOCMD_RATELIM_ENABLE:
2868 	case IOCMD_RATELIM_DISABLE:
2869 		rc = bfad_iocmd_ratelim(bfad, cmd, iocmd);
2870 		break;
2871 	case IOCMD_RATELIM_DEF_SPEED:
2872 		rc = bfad_iocmd_ratelim_speed(bfad, cmd, iocmd);
2873 		break;
2874 	case IOCMD_FCPIM_FAILOVER:
2875 		rc = bfad_iocmd_cfg_fcpim(bfad, iocmd);
2876 		break;
2877 	case IOCMD_FCPIM_MODSTATS:
2878 		rc = bfad_iocmd_fcpim_get_modstats(bfad, iocmd);
2879 		break;
2880 	case IOCMD_FCPIM_MODSTATSCLR:
2881 		rc = bfad_iocmd_fcpim_clr_modstats(bfad, iocmd);
2882 		break;
2883 	case IOCMD_FCPIM_DEL_ITN_STATS:
2884 		rc = bfad_iocmd_fcpim_get_del_itn_stats(bfad, iocmd);
2885 		break;
2886 	case IOCMD_ITNIM_GET_ATTR:
2887 		rc = bfad_iocmd_itnim_get_attr(bfad, iocmd);
2888 		break;
2889 	case IOCMD_ITNIM_GET_IOSTATS:
2890 		rc = bfad_iocmd_itnim_get_iostats(bfad, iocmd);
2891 		break;
2892 	case IOCMD_ITNIM_RESET_STATS:
2893 		rc = bfad_iocmd_itnim_reset_stats(bfad, iocmd);
2894 		break;
2895 	case IOCMD_ITNIM_GET_ITNSTATS:
2896 		rc = bfad_iocmd_itnim_get_itnstats(bfad, iocmd);
2897 		break;
2898 	case IOCMD_FCPORT_ENABLE:
2899 		rc = bfad_iocmd_fcport_enable(bfad, iocmd);
2900 		break;
2901 	case IOCMD_FCPORT_DISABLE:
2902 		rc = bfad_iocmd_fcport_disable(bfad, iocmd);
2903 		break;
2904 	case IOCMD_IOC_PCIFN_CFG:
2905 		rc = bfad_iocmd_ioc_get_pcifn_cfg(bfad, iocmd);
2906 		break;
2907 	case IOCMD_IOC_FW_SIG_INV:
2908 		rc = bfad_iocmd_ioc_fw_sig_inv(bfad, iocmd);
2909 		break;
2910 	case IOCMD_PCIFN_CREATE:
2911 		rc = bfad_iocmd_pcifn_create(bfad, iocmd);
2912 		break;
2913 	case IOCMD_PCIFN_DELETE:
2914 		rc = bfad_iocmd_pcifn_delete(bfad, iocmd);
2915 		break;
2916 	case IOCMD_PCIFN_BW:
2917 		rc = bfad_iocmd_pcifn_bw(bfad, iocmd);
2918 		break;
2919 	case IOCMD_ADAPTER_CFG_MODE:
2920 		rc = bfad_iocmd_adapter_cfg_mode(bfad, iocmd);
2921 		break;
2922 	case IOCMD_PORT_CFG_MODE:
2923 		rc = bfad_iocmd_port_cfg_mode(bfad, iocmd);
2924 		break;
2925 	case IOCMD_FLASH_ENABLE_OPTROM:
2926 	case IOCMD_FLASH_DISABLE_OPTROM:
2927 		rc = bfad_iocmd_ablk_optrom(bfad, cmd, iocmd);
2928 		break;
2929 	case IOCMD_FAA_QUERY:
2930 		rc = bfad_iocmd_faa_query(bfad, iocmd);
2931 		break;
2932 	case IOCMD_CEE_GET_ATTR:
2933 		rc = bfad_iocmd_cee_attr(bfad, iocmd, payload_len);
2934 		break;
2935 	case IOCMD_CEE_GET_STATS:
2936 		rc = bfad_iocmd_cee_get_stats(bfad, iocmd, payload_len);
2937 		break;
2938 	case IOCMD_CEE_RESET_STATS:
2939 		rc = bfad_iocmd_cee_reset_stats(bfad, iocmd);
2940 		break;
2941 	case IOCMD_SFP_MEDIA:
2942 		rc = bfad_iocmd_sfp_media(bfad, iocmd);
2943 		 break;
2944 	case IOCMD_SFP_SPEED:
2945 		rc = bfad_iocmd_sfp_speed(bfad, iocmd);
2946 		break;
2947 	case IOCMD_FLASH_GET_ATTR:
2948 		rc = bfad_iocmd_flash_get_attr(bfad, iocmd);
2949 		break;
2950 	case IOCMD_FLASH_ERASE_PART:
2951 		rc = bfad_iocmd_flash_erase_part(bfad, iocmd);
2952 		break;
2953 	case IOCMD_FLASH_UPDATE_PART:
2954 		rc = bfad_iocmd_flash_update_part(bfad, iocmd, payload_len);
2955 		break;
2956 	case IOCMD_FLASH_READ_PART:
2957 		rc = bfad_iocmd_flash_read_part(bfad, iocmd, payload_len);
2958 		break;
2959 	case IOCMD_DIAG_TEMP:
2960 		rc = bfad_iocmd_diag_temp(bfad, iocmd);
2961 		break;
2962 	case IOCMD_DIAG_MEMTEST:
2963 		rc = bfad_iocmd_diag_memtest(bfad, iocmd);
2964 		break;
2965 	case IOCMD_DIAG_LOOPBACK:
2966 		rc = bfad_iocmd_diag_loopback(bfad, iocmd);
2967 		break;
2968 	case IOCMD_DIAG_FWPING:
2969 		rc = bfad_iocmd_diag_fwping(bfad, iocmd);
2970 		break;
2971 	case IOCMD_DIAG_QUEUETEST:
2972 		rc = bfad_iocmd_diag_queuetest(bfad, iocmd);
2973 		break;
2974 	case IOCMD_DIAG_SFP:
2975 		rc = bfad_iocmd_diag_sfp(bfad, iocmd);
2976 		break;
2977 	case IOCMD_DIAG_LED:
2978 		rc = bfad_iocmd_diag_led(bfad, iocmd);
2979 		break;
2980 	case IOCMD_DIAG_BEACON_LPORT:
2981 		rc = bfad_iocmd_diag_beacon_lport(bfad, iocmd);
2982 		break;
2983 	case IOCMD_DIAG_LB_STAT:
2984 		rc = bfad_iocmd_diag_lb_stat(bfad, iocmd);
2985 		break;
2986 	case IOCMD_DIAG_DPORT_ENABLE:
2987 		rc = bfad_iocmd_diag_dport_enable(bfad, iocmd);
2988 		break;
2989 	case IOCMD_DIAG_DPORT_DISABLE:
2990 		rc = bfad_iocmd_diag_dport_disable(bfad, iocmd);
2991 		break;
2992 	case IOCMD_DIAG_DPORT_SHOW:
2993 		rc = bfad_iocmd_diag_dport_show(bfad, iocmd);
2994 		break;
2995 	case IOCMD_DIAG_DPORT_START:
2996 		rc = bfad_iocmd_diag_dport_start(bfad, iocmd);
2997 		break;
2998 	case IOCMD_PHY_GET_ATTR:
2999 		rc = bfad_iocmd_phy_get_attr(bfad, iocmd);
3000 		break;
3001 	case IOCMD_PHY_GET_STATS:
3002 		rc = bfad_iocmd_phy_get_stats(bfad, iocmd);
3003 		break;
3004 	case IOCMD_PHY_UPDATE_FW:
3005 		rc = bfad_iocmd_phy_update(bfad, iocmd, payload_len);
3006 		break;
3007 	case IOCMD_PHY_READ_FW:
3008 		rc = bfad_iocmd_phy_read(bfad, iocmd, payload_len);
3009 		break;
3010 	case IOCMD_VHBA_QUERY:
3011 		rc = bfad_iocmd_vhba_query(bfad, iocmd);
3012 		break;
3013 	case IOCMD_DEBUG_PORTLOG:
3014 		rc = bfad_iocmd_porglog_get(bfad, iocmd);
3015 		break;
3016 	case IOCMD_DEBUG_FW_CORE:
3017 		rc = bfad_iocmd_debug_fw_core(bfad, iocmd, payload_len);
3018 		break;
3019 	case IOCMD_DEBUG_FW_STATE_CLR:
3020 	case IOCMD_DEBUG_PORTLOG_CLR:
3021 	case IOCMD_DEBUG_START_DTRC:
3022 	case IOCMD_DEBUG_STOP_DTRC:
3023 		rc = bfad_iocmd_debug_ctl(bfad, iocmd, cmd);
3024 		break;
3025 	case IOCMD_DEBUG_PORTLOG_CTL:
3026 		rc = bfad_iocmd_porglog_ctl(bfad, iocmd);
3027 		break;
3028 	case IOCMD_FCPIM_PROFILE_ON:
3029 	case IOCMD_FCPIM_PROFILE_OFF:
3030 		rc = bfad_iocmd_fcpim_cfg_profile(bfad, iocmd, cmd);
3031 		break;
3032 	case IOCMD_ITNIM_GET_IOPROFILE:
3033 		rc = bfad_iocmd_itnim_get_ioprofile(bfad, iocmd);
3034 		break;
3035 	case IOCMD_FCPORT_GET_STATS:
3036 		rc = bfad_iocmd_fcport_get_stats(bfad, iocmd);
3037 		break;
3038 	case IOCMD_FCPORT_RESET_STATS:
3039 		rc = bfad_iocmd_fcport_reset_stats(bfad, iocmd);
3040 		break;
3041 	case IOCMD_BOOT_CFG:
3042 		rc = bfad_iocmd_boot_cfg(bfad, iocmd);
3043 		break;
3044 	case IOCMD_BOOT_QUERY:
3045 		rc = bfad_iocmd_boot_query(bfad, iocmd);
3046 		break;
3047 	case IOCMD_PREBOOT_QUERY:
3048 		rc = bfad_iocmd_preboot_query(bfad, iocmd);
3049 		break;
3050 	case IOCMD_ETHBOOT_CFG:
3051 		rc = bfad_iocmd_ethboot_cfg(bfad, iocmd);
3052 		break;
3053 	case IOCMD_ETHBOOT_QUERY:
3054 		rc = bfad_iocmd_ethboot_query(bfad, iocmd);
3055 		break;
3056 	case IOCMD_TRUNK_ENABLE:
3057 	case IOCMD_TRUNK_DISABLE:
3058 		rc = bfad_iocmd_cfg_trunk(bfad, iocmd, cmd);
3059 		break;
3060 	case IOCMD_TRUNK_GET_ATTR:
3061 		rc = bfad_iocmd_trunk_get_attr(bfad, iocmd);
3062 		break;
3063 	case IOCMD_QOS_ENABLE:
3064 	case IOCMD_QOS_DISABLE:
3065 		rc = bfad_iocmd_qos(bfad, iocmd, cmd);
3066 		break;
3067 	case IOCMD_QOS_GET_ATTR:
3068 		rc = bfad_iocmd_qos_get_attr(bfad, iocmd);
3069 		break;
3070 	case IOCMD_QOS_GET_VC_ATTR:
3071 		rc = bfad_iocmd_qos_get_vc_attr(bfad, iocmd);
3072 		break;
3073 	case IOCMD_QOS_GET_STATS:
3074 		rc = bfad_iocmd_qos_get_stats(bfad, iocmd);
3075 		break;
3076 	case IOCMD_QOS_RESET_STATS:
3077 		rc = bfad_iocmd_qos_reset_stats(bfad, iocmd);
3078 		break;
3079 	case IOCMD_QOS_SET_BW:
3080 		rc = bfad_iocmd_qos_set_bw(bfad, iocmd);
3081 		break;
3082 	case IOCMD_VF_GET_STATS:
3083 		rc = bfad_iocmd_vf_get_stats(bfad, iocmd);
3084 		break;
3085 	case IOCMD_VF_RESET_STATS:
3086 		rc = bfad_iocmd_vf_clr_stats(bfad, iocmd);
3087 		break;
3088 	case IOCMD_FCPIM_LUNMASK_ENABLE:
3089 	case IOCMD_FCPIM_LUNMASK_DISABLE:
3090 	case IOCMD_FCPIM_LUNMASK_CLEAR:
3091 		rc = bfad_iocmd_lunmask(bfad, iocmd, cmd);
3092 		break;
3093 	case IOCMD_FCPIM_LUNMASK_QUERY:
3094 		rc = bfad_iocmd_fcpim_lunmask_query(bfad, iocmd);
3095 		break;
3096 	case IOCMD_FCPIM_LUNMASK_ADD:
3097 	case IOCMD_FCPIM_LUNMASK_DELETE:
3098 		rc = bfad_iocmd_fcpim_cfg_lunmask(bfad, iocmd, cmd);
3099 		break;
3100 	case IOCMD_FCPIM_THROTTLE_QUERY:
3101 		rc = bfad_iocmd_fcpim_throttle_query(bfad, iocmd);
3102 		break;
3103 	case IOCMD_FCPIM_THROTTLE_SET:
3104 		rc = bfad_iocmd_fcpim_throttle_set(bfad, iocmd);
3105 		break;
3106 	/* TFRU */
3107 	case IOCMD_TFRU_READ:
3108 		rc = bfad_iocmd_tfru_read(bfad, iocmd);
3109 		break;
3110 	case IOCMD_TFRU_WRITE:
3111 		rc = bfad_iocmd_tfru_write(bfad, iocmd);
3112 		break;
3113 	/* FRU */
3114 	case IOCMD_FRUVPD_READ:
3115 		rc = bfad_iocmd_fruvpd_read(bfad, iocmd);
3116 		break;
3117 	case IOCMD_FRUVPD_UPDATE:
3118 		rc = bfad_iocmd_fruvpd_update(bfad, iocmd);
3119 		break;
3120 	case IOCMD_FRUVPD_GET_MAX_SIZE:
3121 		rc = bfad_iocmd_fruvpd_get_max_size(bfad, iocmd);
3122 		break;
3123 	default:
3124 		rc = -EINVAL;
3125 		break;
3126 	}
3127 	return rc;
3128 }
3129 
3130 static int
3131 bfad_im_bsg_vendor_request(struct bsg_job *job)
3132 {
3133 	struct fc_bsg_request *bsg_request = job->request;
3134 	struct fc_bsg_reply *bsg_reply = job->reply;
3135 	uint32_t vendor_cmd = bsg_request->rqst_data.h_vendor.vendor_cmd[0];
3136 	struct Scsi_Host *shost = fc_bsg_to_shost(job);
3137 	struct bfad_im_port_s *im_port = bfad_get_im_port(shost);
3138 	struct bfad_s *bfad = im_port->bfad;
3139 	void *payload_kbuf;
3140 	int rc = -EINVAL;
3141 
3142 	/* Allocate a temp buffer to hold the passed in user space command */
3143 	payload_kbuf = kzalloc(job->request_payload.payload_len, GFP_KERNEL);
3144 	if (!payload_kbuf) {
3145 		rc = -ENOMEM;
3146 		goto out;
3147 	}
3148 
3149 	/* Copy the sg_list passed in to a linear buffer: holds the cmnd data */
3150 	sg_copy_to_buffer(job->request_payload.sg_list,
3151 			  job->request_payload.sg_cnt, payload_kbuf,
3152 			  job->request_payload.payload_len);
3153 
3154 	/* Invoke IOCMD handler - to handle all the vendor command requests */
3155 	rc = bfad_iocmd_handler(bfad, vendor_cmd, payload_kbuf,
3156 				job->request_payload.payload_len);
3157 	if (rc != BFA_STATUS_OK)
3158 		goto error;
3159 
3160 	/* Copy the response data to the job->reply_payload sg_list */
3161 	sg_copy_from_buffer(job->reply_payload.sg_list,
3162 			    job->reply_payload.sg_cnt,
3163 			    payload_kbuf,
3164 			    job->reply_payload.payload_len);
3165 
3166 	/* free the command buffer */
3167 	kfree(payload_kbuf);
3168 
3169 	/* Fill the BSG job reply data */
3170 	job->reply_len = job->reply_payload.payload_len;
3171 	bsg_reply->reply_payload_rcv_len = job->reply_payload.payload_len;
3172 	bsg_reply->result = rc;
3173 
3174 	bsg_job_done(job, bsg_reply->result,
3175 		       bsg_reply->reply_payload_rcv_len);
3176 	return rc;
3177 error:
3178 	/* free the command buffer */
3179 	kfree(payload_kbuf);
3180 out:
3181 	bsg_reply->result = rc;
3182 	job->reply_len = sizeof(uint32_t);
3183 	bsg_reply->reply_payload_rcv_len = 0;
3184 	return rc;
3185 }
3186 
3187 /* FC passthru call backs */
3188 u64
3189 bfad_fcxp_get_req_sgaddr_cb(void *bfad_fcxp, int sgeid)
3190 {
3191 	struct bfad_fcxp	*drv_fcxp = bfad_fcxp;
3192 	struct bfa_sge_s  *sge;
3193 	u64	addr;
3194 
3195 	sge = drv_fcxp->req_sge + sgeid;
3196 	addr = (u64)(size_t) sge->sg_addr;
3197 	return addr;
3198 }
3199 
3200 u32
3201 bfad_fcxp_get_req_sglen_cb(void *bfad_fcxp, int sgeid)
3202 {
3203 	struct bfad_fcxp	*drv_fcxp = bfad_fcxp;
3204 	struct bfa_sge_s	*sge;
3205 
3206 	sge = drv_fcxp->req_sge + sgeid;
3207 	return sge->sg_len;
3208 }
3209 
3210 u64
3211 bfad_fcxp_get_rsp_sgaddr_cb(void *bfad_fcxp, int sgeid)
3212 {
3213 	struct bfad_fcxp	*drv_fcxp = bfad_fcxp;
3214 	struct bfa_sge_s	*sge;
3215 	u64	addr;
3216 
3217 	sge = drv_fcxp->rsp_sge + sgeid;
3218 	addr = (u64)(size_t) sge->sg_addr;
3219 	return addr;
3220 }
3221 
3222 u32
3223 bfad_fcxp_get_rsp_sglen_cb(void *bfad_fcxp, int sgeid)
3224 {
3225 	struct bfad_fcxp	*drv_fcxp = bfad_fcxp;
3226 	struct bfa_sge_s	*sge;
3227 
3228 	sge = drv_fcxp->rsp_sge + sgeid;
3229 	return sge->sg_len;
3230 }
3231 
3232 void
3233 bfad_send_fcpt_cb(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg,
3234 		bfa_status_t req_status, u32 rsp_len, u32 resid_len,
3235 		struct fchs_s *rsp_fchs)
3236 {
3237 	struct bfad_fcxp *drv_fcxp = bfad_fcxp;
3238 
3239 	drv_fcxp->req_status = req_status;
3240 	drv_fcxp->rsp_len = rsp_len;
3241 
3242 	/* bfa_fcxp will be automatically freed by BFA */
3243 	drv_fcxp->bfa_fcxp = NULL;
3244 	complete(&drv_fcxp->comp);
3245 }
3246 
3247 struct bfad_buf_info *
3248 bfad_fcxp_map_sg(struct bfad_s *bfad, void *payload_kbuf,
3249 		 uint32_t payload_len, uint32_t *num_sgles)
3250 {
3251 	struct bfad_buf_info	*buf_base, *buf_info;
3252 	struct bfa_sge_s	*sg_table;
3253 	int sge_num = 1;
3254 
3255 	buf_base = kzalloc((sizeof(struct bfad_buf_info) +
3256 			   sizeof(struct bfa_sge_s)) * sge_num, GFP_KERNEL);
3257 	if (!buf_base)
3258 		return NULL;
3259 
3260 	sg_table = (struct bfa_sge_s *) (((uint8_t *)buf_base) +
3261 			(sizeof(struct bfad_buf_info) * sge_num));
3262 
3263 	/* Allocate dma coherent memory */
3264 	buf_info = buf_base;
3265 	buf_info->size = payload_len;
3266 	buf_info->virt = dma_zalloc_coherent(&bfad->pcidev->dev,
3267 					     buf_info->size, &buf_info->phys,
3268 					     GFP_KERNEL);
3269 	if (!buf_info->virt)
3270 		goto out_free_mem;
3271 
3272 	/* copy the linear bsg buffer to buf_info */
3273 	memcpy(buf_info->virt, payload_kbuf, buf_info->size);
3274 
3275 	/*
3276 	 * Setup SG table
3277 	 */
3278 	sg_table->sg_len = buf_info->size;
3279 	sg_table->sg_addr = (void *)(size_t) buf_info->phys;
3280 
3281 	*num_sgles = sge_num;
3282 
3283 	return buf_base;
3284 
3285 out_free_mem:
3286 	kfree(buf_base);
3287 	return NULL;
3288 }
3289 
3290 void
3291 bfad_fcxp_free_mem(struct bfad_s *bfad, struct bfad_buf_info *buf_base,
3292 		   uint32_t num_sgles)
3293 {
3294 	int i;
3295 	struct bfad_buf_info *buf_info = buf_base;
3296 
3297 	if (buf_base) {
3298 		for (i = 0; i < num_sgles; buf_info++, i++) {
3299 			if (buf_info->virt != NULL)
3300 				dma_free_coherent(&bfad->pcidev->dev,
3301 					buf_info->size, buf_info->virt,
3302 					buf_info->phys);
3303 		}
3304 		kfree(buf_base);
3305 	}
3306 }
3307 
3308 int
3309 bfad_fcxp_bsg_send(struct bsg_job *job, struct bfad_fcxp *drv_fcxp,
3310 		   bfa_bsg_fcpt_t *bsg_fcpt)
3311 {
3312 	struct bfa_fcxp_s *hal_fcxp;
3313 	struct bfad_s	*bfad = drv_fcxp->port->bfad;
3314 	unsigned long	flags;
3315 	uint8_t	lp_tag;
3316 
3317 	spin_lock_irqsave(&bfad->bfad_lock, flags);
3318 
3319 	/* Allocate bfa_fcxp structure */
3320 	hal_fcxp = bfa_fcxp_req_rsp_alloc(drv_fcxp, &bfad->bfa,
3321 				  drv_fcxp->num_req_sgles,
3322 				  drv_fcxp->num_rsp_sgles,
3323 				  bfad_fcxp_get_req_sgaddr_cb,
3324 				  bfad_fcxp_get_req_sglen_cb,
3325 				  bfad_fcxp_get_rsp_sgaddr_cb,
3326 				  bfad_fcxp_get_rsp_sglen_cb, BFA_TRUE);
3327 	if (!hal_fcxp) {
3328 		bfa_trc(bfad, 0);
3329 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
3330 		return BFA_STATUS_ENOMEM;
3331 	}
3332 
3333 	drv_fcxp->bfa_fcxp = hal_fcxp;
3334 
3335 	lp_tag = bfa_lps_get_tag_from_pid(&bfad->bfa, bsg_fcpt->fchs.s_id);
3336 
3337 	bfa_fcxp_send(hal_fcxp, drv_fcxp->bfa_rport, bsg_fcpt->vf_id, lp_tag,
3338 		      bsg_fcpt->cts, bsg_fcpt->cos,
3339 		      job->request_payload.payload_len,
3340 		      &bsg_fcpt->fchs, bfad_send_fcpt_cb, bfad,
3341 		      job->reply_payload.payload_len, bsg_fcpt->tsecs);
3342 
3343 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
3344 
3345 	return BFA_STATUS_OK;
3346 }
3347 
3348 int
3349 bfad_im_bsg_els_ct_request(struct bsg_job *job)
3350 {
3351 	struct bfa_bsg_data *bsg_data;
3352 	struct Scsi_Host *shost = fc_bsg_to_shost(job);
3353 	struct bfad_im_port_s *im_port = bfad_get_im_port(shost);
3354 	struct bfad_s *bfad = im_port->bfad;
3355 	bfa_bsg_fcpt_t *bsg_fcpt;
3356 	struct bfad_fcxp    *drv_fcxp;
3357 	struct bfa_fcs_lport_s *fcs_port;
3358 	struct bfa_fcs_rport_s *fcs_rport;
3359 	struct fc_bsg_request *bsg_request = job->request;
3360 	struct fc_bsg_reply *bsg_reply = job->reply;
3361 	uint32_t command_type = bsg_request->msgcode;
3362 	unsigned long flags;
3363 	struct bfad_buf_info *rsp_buf_info;
3364 	void *req_kbuf = NULL, *rsp_kbuf = NULL;
3365 	int rc = -EINVAL;
3366 
3367 	job->reply_len  = sizeof(uint32_t);	/* Atleast uint32_t reply_len */
3368 	bsg_reply->reply_payload_rcv_len = 0;
3369 
3370 	/* Get the payload passed in from userspace */
3371 	bsg_data = (struct bfa_bsg_data *) (((char *)bsg_request) +
3372 					    sizeof(struct fc_bsg_request));
3373 	if (bsg_data == NULL)
3374 		goto out;
3375 
3376 	/*
3377 	 * Allocate buffer for bsg_fcpt and do a copy_from_user op for payload
3378 	 * buffer of size bsg_data->payload_len
3379 	 */
3380 	bsg_fcpt = kzalloc(bsg_data->payload_len, GFP_KERNEL);
3381 	if (!bsg_fcpt) {
3382 		rc = -ENOMEM;
3383 		goto out;
3384 	}
3385 
3386 	if (copy_from_user((uint8_t *)bsg_fcpt,
3387 				(void *)(unsigned long)bsg_data->payload,
3388 				bsg_data->payload_len)) {
3389 		kfree(bsg_fcpt);
3390 		rc = -EIO;
3391 		goto out;
3392 	}
3393 
3394 	drv_fcxp = kzalloc(sizeof(struct bfad_fcxp), GFP_KERNEL);
3395 	if (drv_fcxp == NULL) {
3396 		kfree(bsg_fcpt);
3397 		rc = -ENOMEM;
3398 		goto out;
3399 	}
3400 
3401 	spin_lock_irqsave(&bfad->bfad_lock, flags);
3402 	fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, bsg_fcpt->vf_id,
3403 					bsg_fcpt->lpwwn);
3404 	if (fcs_port == NULL) {
3405 		bsg_fcpt->status = BFA_STATUS_UNKNOWN_LWWN;
3406 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
3407 		goto out_free_mem;
3408 	}
3409 
3410 	/* Check if the port is online before sending FC Passthru cmd */
3411 	if (!bfa_fcs_lport_is_online(fcs_port)) {
3412 		bsg_fcpt->status = BFA_STATUS_PORT_OFFLINE;
3413 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
3414 		goto out_free_mem;
3415 	}
3416 
3417 	drv_fcxp->port = fcs_port->bfad_port;
3418 
3419 	if (drv_fcxp->port->bfad == 0)
3420 		drv_fcxp->port->bfad = bfad;
3421 
3422 	/* Fetch the bfa_rport - if nexus needed */
3423 	if (command_type == FC_BSG_HST_ELS_NOLOGIN ||
3424 	    command_type == FC_BSG_HST_CT) {
3425 		/* BSG HST commands: no nexus needed */
3426 		drv_fcxp->bfa_rport = NULL;
3427 
3428 	} else if (command_type == FC_BSG_RPT_ELS ||
3429 		   command_type == FC_BSG_RPT_CT) {
3430 		/* BSG RPT commands: nexus needed */
3431 		fcs_rport = bfa_fcs_lport_get_rport_by_pwwn(fcs_port,
3432 							    bsg_fcpt->dpwwn);
3433 		if (fcs_rport == NULL) {
3434 			bsg_fcpt->status = BFA_STATUS_UNKNOWN_RWWN;
3435 			spin_unlock_irqrestore(&bfad->bfad_lock, flags);
3436 			goto out_free_mem;
3437 		}
3438 
3439 		drv_fcxp->bfa_rport = fcs_rport->bfa_rport;
3440 
3441 	} else { /* Unknown BSG msgcode; return -EINVAL */
3442 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
3443 		goto out_free_mem;
3444 	}
3445 
3446 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
3447 
3448 	/* allocate memory for req / rsp buffers */
3449 	req_kbuf = kzalloc(job->request_payload.payload_len, GFP_KERNEL);
3450 	if (!req_kbuf) {
3451 		printk(KERN_INFO "bfa %s: fcpt request buffer alloc failed\n",
3452 				bfad->pci_name);
3453 		rc = -ENOMEM;
3454 		goto out_free_mem;
3455 	}
3456 
3457 	rsp_kbuf = kzalloc(job->reply_payload.payload_len, GFP_KERNEL);
3458 	if (!rsp_kbuf) {
3459 		printk(KERN_INFO "bfa %s: fcpt response buffer alloc failed\n",
3460 				bfad->pci_name);
3461 		rc = -ENOMEM;
3462 		goto out_free_mem;
3463 	}
3464 
3465 	/* map req sg - copy the sg_list passed in to the linear buffer */
3466 	sg_copy_to_buffer(job->request_payload.sg_list,
3467 			  job->request_payload.sg_cnt, req_kbuf,
3468 			  job->request_payload.payload_len);
3469 
3470 	drv_fcxp->reqbuf_info = bfad_fcxp_map_sg(bfad, req_kbuf,
3471 					job->request_payload.payload_len,
3472 					&drv_fcxp->num_req_sgles);
3473 	if (!drv_fcxp->reqbuf_info) {
3474 		printk(KERN_INFO "bfa %s: fcpt request fcxp_map_sg failed\n",
3475 				bfad->pci_name);
3476 		rc = -ENOMEM;
3477 		goto out_free_mem;
3478 	}
3479 
3480 	drv_fcxp->req_sge = (struct bfa_sge_s *)
3481 			    (((uint8_t *)drv_fcxp->reqbuf_info) +
3482 			    (sizeof(struct bfad_buf_info) *
3483 					drv_fcxp->num_req_sgles));
3484 
3485 	/* map rsp sg */
3486 	drv_fcxp->rspbuf_info = bfad_fcxp_map_sg(bfad, rsp_kbuf,
3487 					job->reply_payload.payload_len,
3488 					&drv_fcxp->num_rsp_sgles);
3489 	if (!drv_fcxp->rspbuf_info) {
3490 		printk(KERN_INFO "bfa %s: fcpt response fcxp_map_sg failed\n",
3491 				bfad->pci_name);
3492 		rc = -ENOMEM;
3493 		goto out_free_mem;
3494 	}
3495 
3496 	rsp_buf_info = (struct bfad_buf_info *)drv_fcxp->rspbuf_info;
3497 	drv_fcxp->rsp_sge = (struct bfa_sge_s  *)
3498 			    (((uint8_t *)drv_fcxp->rspbuf_info) +
3499 			    (sizeof(struct bfad_buf_info) *
3500 					drv_fcxp->num_rsp_sgles));
3501 
3502 	/* fcxp send */
3503 	init_completion(&drv_fcxp->comp);
3504 	rc = bfad_fcxp_bsg_send(job, drv_fcxp, bsg_fcpt);
3505 	if (rc == BFA_STATUS_OK) {
3506 		wait_for_completion(&drv_fcxp->comp);
3507 		bsg_fcpt->status = drv_fcxp->req_status;
3508 	} else {
3509 		bsg_fcpt->status = rc;
3510 		goto out_free_mem;
3511 	}
3512 
3513 	/* fill the job->reply data */
3514 	if (drv_fcxp->req_status == BFA_STATUS_OK) {
3515 		job->reply_len = drv_fcxp->rsp_len;
3516 		bsg_reply->reply_payload_rcv_len = drv_fcxp->rsp_len;
3517 		bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
3518 	} else {
3519 		bsg_reply->reply_payload_rcv_len =
3520 					sizeof(struct fc_bsg_ctels_reply);
3521 		job->reply_len = sizeof(uint32_t);
3522 		bsg_reply->reply_data.ctels_reply.status =
3523 						FC_CTELS_STATUS_REJECT;
3524 	}
3525 
3526 	/* Copy the response data to the reply_payload sg list */
3527 	sg_copy_from_buffer(job->reply_payload.sg_list,
3528 			    job->reply_payload.sg_cnt,
3529 			    (uint8_t *)rsp_buf_info->virt,
3530 			    job->reply_payload.payload_len);
3531 
3532 out_free_mem:
3533 	bfad_fcxp_free_mem(bfad, drv_fcxp->rspbuf_info,
3534 			   drv_fcxp->num_rsp_sgles);
3535 	bfad_fcxp_free_mem(bfad, drv_fcxp->reqbuf_info,
3536 			   drv_fcxp->num_req_sgles);
3537 	kfree(req_kbuf);
3538 	kfree(rsp_kbuf);
3539 
3540 	/* Need a copy to user op */
3541 	if (copy_to_user((void *)(unsigned long)bsg_data->payload,
3542 			(void *)bsg_fcpt, bsg_data->payload_len))
3543 		rc = -EIO;
3544 
3545 	kfree(bsg_fcpt);
3546 	kfree(drv_fcxp);
3547 out:
3548 	bsg_reply->result = rc;
3549 
3550 	if (rc == BFA_STATUS_OK)
3551 		bsg_job_done(job, bsg_reply->result,
3552 			       bsg_reply->reply_payload_rcv_len);
3553 
3554 	return rc;
3555 }
3556 
3557 int
3558 bfad_im_bsg_request(struct bsg_job *job)
3559 {
3560 	struct fc_bsg_request *bsg_request = job->request;
3561 	struct fc_bsg_reply *bsg_reply = job->reply;
3562 	uint32_t rc = BFA_STATUS_OK;
3563 
3564 	switch (bsg_request->msgcode) {
3565 	case FC_BSG_HST_VENDOR:
3566 		/* Process BSG HST Vendor requests */
3567 		rc = bfad_im_bsg_vendor_request(job);
3568 		break;
3569 	case FC_BSG_HST_ELS_NOLOGIN:
3570 	case FC_BSG_RPT_ELS:
3571 	case FC_BSG_HST_CT:
3572 	case FC_BSG_RPT_CT:
3573 		/* Process BSG ELS/CT commands */
3574 		rc = bfad_im_bsg_els_ct_request(job);
3575 		break;
3576 	default:
3577 		bsg_reply->result = rc = -EINVAL;
3578 		bsg_reply->reply_payload_rcv_len = 0;
3579 		break;
3580 	}
3581 
3582 	return rc;
3583 }
3584 
3585 int
3586 bfad_im_bsg_timeout(struct bsg_job *job)
3587 {
3588 	/* Don't complete the BSG job request - return -EAGAIN
3589 	 * to reset bsg job timeout : for ELS/CT pass thru we
3590 	 * already have timer to track the request.
3591 	 */
3592 	return -EAGAIN;
3593 }
3594