xref: /openbmc/linux/drivers/scsi/bfa/bfad_bsg.c (revision a8fe58ce)
1 /*
2  * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
3  * Copyright (c) 2014- QLogic Corporation.
4  * All rights reserved
5  * www.qlogic.com
6  *
7  * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
8  *
9  * This program is free software; you can redistribute it and/or modify it
10  * under the terms of the GNU General Public License (GPL) Version 2 as
11  * published by the Free Software Foundation
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  */
18 
19 #include <linux/uaccess.h>
20 #include "bfad_drv.h"
21 #include "bfad_im.h"
22 #include "bfad_bsg.h"
23 
24 BFA_TRC_FILE(LDRV, BSG);
25 
26 int
27 bfad_iocmd_ioc_enable(struct bfad_s *bfad, void *cmd)
28 {
29 	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
30 	unsigned long	flags;
31 
32 	spin_lock_irqsave(&bfad->bfad_lock, flags);
33 	/* If IOC is not in disabled state - return */
34 	if (!bfa_ioc_is_disabled(&bfad->bfa.ioc)) {
35 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
36 		iocmd->status = BFA_STATUS_OK;
37 		return 0;
38 	}
39 
40 	init_completion(&bfad->enable_comp);
41 	bfa_iocfc_enable(&bfad->bfa);
42 	iocmd->status = BFA_STATUS_OK;
43 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
44 	wait_for_completion(&bfad->enable_comp);
45 
46 	return 0;
47 }
48 
49 int
50 bfad_iocmd_ioc_disable(struct bfad_s *bfad, void *cmd)
51 {
52 	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
53 	unsigned long	flags;
54 
55 	spin_lock_irqsave(&bfad->bfad_lock, flags);
56 	if (bfa_ioc_is_disabled(&bfad->bfa.ioc)) {
57 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
58 		iocmd->status = BFA_STATUS_OK;
59 		return 0;
60 	}
61 
62 	if (bfad->disable_active) {
63 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
64 		return -EBUSY;
65 	}
66 
67 	bfad->disable_active = BFA_TRUE;
68 	init_completion(&bfad->disable_comp);
69 	bfa_iocfc_disable(&bfad->bfa);
70 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
71 
72 	wait_for_completion(&bfad->disable_comp);
73 	bfad->disable_active = BFA_FALSE;
74 	iocmd->status = BFA_STATUS_OK;
75 
76 	return 0;
77 }
78 
79 static int
80 bfad_iocmd_ioc_get_info(struct bfad_s *bfad, void *cmd)
81 {
82 	int	i;
83 	struct bfa_bsg_ioc_info_s *iocmd = (struct bfa_bsg_ioc_info_s *)cmd;
84 	struct bfad_im_port_s	*im_port;
85 	struct bfa_port_attr_s	pattr;
86 	unsigned long	flags;
87 
88 	spin_lock_irqsave(&bfad->bfad_lock, flags);
89 	bfa_fcport_get_attr(&bfad->bfa, &pattr);
90 	iocmd->nwwn = pattr.nwwn;
91 	iocmd->pwwn = pattr.pwwn;
92 	iocmd->ioc_type = bfa_get_type(&bfad->bfa);
93 	iocmd->mac = bfa_get_mac(&bfad->bfa);
94 	iocmd->factory_mac = bfa_get_mfg_mac(&bfad->bfa);
95 	bfa_get_adapter_serial_num(&bfad->bfa, iocmd->serialnum);
96 	iocmd->factorynwwn = pattr.factorynwwn;
97 	iocmd->factorypwwn = pattr.factorypwwn;
98 	iocmd->bfad_num = bfad->inst_no;
99 	im_port = bfad->pport.im_port;
100 	iocmd->host = im_port->shost->host_no;
101 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
102 
103 	strcpy(iocmd->name, bfad->adapter_name);
104 	strcpy(iocmd->port_name, bfad->port_name);
105 	strcpy(iocmd->hwpath, bfad->pci_name);
106 
107 	/* set adapter hw path */
108 	strcpy(iocmd->adapter_hwpath, bfad->pci_name);
109 	for (i = 0; iocmd->adapter_hwpath[i] != ':' && i < BFA_STRING_32; i++)
110 		;
111 	for (; iocmd->adapter_hwpath[++i] != ':' && i < BFA_STRING_32; )
112 		;
113 	iocmd->adapter_hwpath[i] = '\0';
114 	iocmd->status = BFA_STATUS_OK;
115 	return 0;
116 }
117 
118 static int
119 bfad_iocmd_ioc_get_attr(struct bfad_s *bfad, void *cmd)
120 {
121 	struct bfa_bsg_ioc_attr_s *iocmd = (struct bfa_bsg_ioc_attr_s *)cmd;
122 	unsigned long	flags;
123 
124 	spin_lock_irqsave(&bfad->bfad_lock, flags);
125 	bfa_ioc_get_attr(&bfad->bfa.ioc, &iocmd->ioc_attr);
126 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
127 
128 	/* fill in driver attr info */
129 	strcpy(iocmd->ioc_attr.driver_attr.driver, BFAD_DRIVER_NAME);
130 	strncpy(iocmd->ioc_attr.driver_attr.driver_ver,
131 		BFAD_DRIVER_VERSION, BFA_VERSION_LEN);
132 	strcpy(iocmd->ioc_attr.driver_attr.fw_ver,
133 		iocmd->ioc_attr.adapter_attr.fw_ver);
134 	strcpy(iocmd->ioc_attr.driver_attr.bios_ver,
135 		iocmd->ioc_attr.adapter_attr.optrom_ver);
136 
137 	/* copy chip rev info first otherwise it will be overwritten */
138 	memcpy(bfad->pci_attr.chip_rev, iocmd->ioc_attr.pci_attr.chip_rev,
139 		sizeof(bfad->pci_attr.chip_rev));
140 	memcpy(&iocmd->ioc_attr.pci_attr, &bfad->pci_attr,
141 		sizeof(struct bfa_ioc_pci_attr_s));
142 
143 	iocmd->status = BFA_STATUS_OK;
144 	return 0;
145 }
146 
147 int
148 bfad_iocmd_ioc_get_stats(struct bfad_s *bfad, void *cmd)
149 {
150 	struct bfa_bsg_ioc_stats_s *iocmd = (struct bfa_bsg_ioc_stats_s *)cmd;
151 
152 	bfa_ioc_get_stats(&bfad->bfa, &iocmd->ioc_stats);
153 	iocmd->status = BFA_STATUS_OK;
154 	return 0;
155 }
156 
157 int
158 bfad_iocmd_ioc_get_fwstats(struct bfad_s *bfad, void *cmd,
159 			unsigned int payload_len)
160 {
161 	struct bfa_bsg_ioc_fwstats_s *iocmd =
162 			(struct bfa_bsg_ioc_fwstats_s *)cmd;
163 	void	*iocmd_bufptr;
164 	unsigned long	flags;
165 
166 	if (bfad_chk_iocmd_sz(payload_len,
167 			sizeof(struct bfa_bsg_ioc_fwstats_s),
168 			sizeof(struct bfa_fw_stats_s)) != BFA_STATUS_OK) {
169 		iocmd->status = BFA_STATUS_VERSION_FAIL;
170 		goto out;
171 	}
172 
173 	iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_ioc_fwstats_s);
174 	spin_lock_irqsave(&bfad->bfad_lock, flags);
175 	iocmd->status = bfa_ioc_fw_stats_get(&bfad->bfa.ioc, iocmd_bufptr);
176 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
177 
178 	if (iocmd->status != BFA_STATUS_OK) {
179 		bfa_trc(bfad, iocmd->status);
180 		goto out;
181 	}
182 out:
183 	bfa_trc(bfad, 0x6666);
184 	return 0;
185 }
186 
187 int
188 bfad_iocmd_ioc_reset_stats(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
189 {
190 	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
191 	unsigned long	flags;
192 
193 	if (v_cmd == IOCMD_IOC_RESET_STATS) {
194 		bfa_ioc_clear_stats(&bfad->bfa);
195 		iocmd->status = BFA_STATUS_OK;
196 	} else if (v_cmd == IOCMD_IOC_RESET_FWSTATS) {
197 		spin_lock_irqsave(&bfad->bfad_lock, flags);
198 		iocmd->status = bfa_ioc_fw_stats_clear(&bfad->bfa.ioc);
199 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
200 	}
201 
202 	return 0;
203 }
204 
205 int
206 bfad_iocmd_ioc_set_name(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
207 {
208 	struct bfa_bsg_ioc_name_s *iocmd = (struct bfa_bsg_ioc_name_s *) cmd;
209 
210 	if (v_cmd == IOCMD_IOC_SET_ADAPTER_NAME)
211 		strcpy(bfad->adapter_name, iocmd->name);
212 	else if (v_cmd == IOCMD_IOC_SET_PORT_NAME)
213 		strcpy(bfad->port_name, iocmd->name);
214 
215 	iocmd->status = BFA_STATUS_OK;
216 	return 0;
217 }
218 
219 int
220 bfad_iocmd_iocfc_get_attr(struct bfad_s *bfad, void *cmd)
221 {
222 	struct bfa_bsg_iocfc_attr_s *iocmd = (struct bfa_bsg_iocfc_attr_s *)cmd;
223 
224 	iocmd->status = BFA_STATUS_OK;
225 	bfa_iocfc_get_attr(&bfad->bfa, &iocmd->iocfc_attr);
226 
227 	return 0;
228 }
229 
230 int
231 bfad_iocmd_ioc_fw_sig_inv(struct bfad_s *bfad, void *cmd)
232 {
233 	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
234 	unsigned long flags;
235 
236 	spin_lock_irqsave(&bfad->bfad_lock, flags);
237 	iocmd->status = bfa_ioc_fwsig_invalidate(&bfad->bfa.ioc);
238 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
239 	return 0;
240 }
241 
242 int
243 bfad_iocmd_iocfc_set_intr(struct bfad_s *bfad, void *cmd)
244 {
245 	struct bfa_bsg_iocfc_intr_s *iocmd = (struct bfa_bsg_iocfc_intr_s *)cmd;
246 	unsigned long	flags;
247 
248 	spin_lock_irqsave(&bfad->bfad_lock, flags);
249 	iocmd->status = bfa_iocfc_israttr_set(&bfad->bfa, &iocmd->attr);
250 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
251 
252 	return 0;
253 }
254 
255 int
256 bfad_iocmd_port_enable(struct bfad_s *bfad, void *cmd)
257 {
258 	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
259 	struct bfad_hal_comp fcomp;
260 	unsigned long flags;
261 
262 	init_completion(&fcomp.comp);
263 	spin_lock_irqsave(&bfad->bfad_lock, flags);
264 	iocmd->status = bfa_port_enable(&bfad->bfa.modules.port,
265 					bfad_hcb_comp, &fcomp);
266 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
267 	if (iocmd->status != BFA_STATUS_OK) {
268 		bfa_trc(bfad, iocmd->status);
269 		return 0;
270 	}
271 	wait_for_completion(&fcomp.comp);
272 	iocmd->status = fcomp.status;
273 	return 0;
274 }
275 
276 int
277 bfad_iocmd_port_disable(struct bfad_s *bfad, void *cmd)
278 {
279 	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
280 	struct bfad_hal_comp fcomp;
281 	unsigned long flags;
282 
283 	init_completion(&fcomp.comp);
284 	spin_lock_irqsave(&bfad->bfad_lock, flags);
285 	iocmd->status = bfa_port_disable(&bfad->bfa.modules.port,
286 				bfad_hcb_comp, &fcomp);
287 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
288 
289 	if (iocmd->status != BFA_STATUS_OK) {
290 		bfa_trc(bfad, iocmd->status);
291 		return 0;
292 	}
293 	wait_for_completion(&fcomp.comp);
294 	iocmd->status = fcomp.status;
295 	return 0;
296 }
297 
298 static int
299 bfad_iocmd_port_get_attr(struct bfad_s *bfad, void *cmd)
300 {
301 	struct bfa_bsg_port_attr_s *iocmd = (struct bfa_bsg_port_attr_s *)cmd;
302 	struct bfa_lport_attr_s	port_attr;
303 	unsigned long	flags;
304 
305 	spin_lock_irqsave(&bfad->bfad_lock, flags);
306 	bfa_fcport_get_attr(&bfad->bfa, &iocmd->attr);
307 	bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr);
308 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
309 
310 	if (iocmd->attr.topology != BFA_PORT_TOPOLOGY_NONE)
311 		iocmd->attr.pid = port_attr.pid;
312 	else
313 		iocmd->attr.pid = 0;
314 
315 	iocmd->attr.port_type = port_attr.port_type;
316 	iocmd->attr.loopback = port_attr.loopback;
317 	iocmd->attr.authfail = port_attr.authfail;
318 	strncpy(iocmd->attr.port_symname.symname,
319 		port_attr.port_cfg.sym_name.symname,
320 		sizeof(port_attr.port_cfg.sym_name.symname));
321 
322 	iocmd->status = BFA_STATUS_OK;
323 	return 0;
324 }
325 
326 int
327 bfad_iocmd_port_get_stats(struct bfad_s *bfad, void *cmd,
328 			unsigned int payload_len)
329 {
330 	struct bfa_bsg_port_stats_s *iocmd = (struct bfa_bsg_port_stats_s *)cmd;
331 	struct bfad_hal_comp fcomp;
332 	void	*iocmd_bufptr;
333 	unsigned long	flags;
334 
335 	if (bfad_chk_iocmd_sz(payload_len,
336 			sizeof(struct bfa_bsg_port_stats_s),
337 			sizeof(union bfa_port_stats_u)) != BFA_STATUS_OK) {
338 		iocmd->status = BFA_STATUS_VERSION_FAIL;
339 		return 0;
340 	}
341 
342 	iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_port_stats_s);
343 
344 	init_completion(&fcomp.comp);
345 	spin_lock_irqsave(&bfad->bfad_lock, flags);
346 	iocmd->status = bfa_port_get_stats(&bfad->bfa.modules.port,
347 				iocmd_bufptr, bfad_hcb_comp, &fcomp);
348 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
349 	if (iocmd->status != BFA_STATUS_OK) {
350 		bfa_trc(bfad, iocmd->status);
351 		goto out;
352 	}
353 
354 	wait_for_completion(&fcomp.comp);
355 	iocmd->status = fcomp.status;
356 out:
357 	return 0;
358 }
359 
360 int
361 bfad_iocmd_port_reset_stats(struct bfad_s *bfad, void *cmd)
362 {
363 	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
364 	struct bfad_hal_comp fcomp;
365 	unsigned long	flags;
366 
367 	init_completion(&fcomp.comp);
368 	spin_lock_irqsave(&bfad->bfad_lock, flags);
369 	iocmd->status = bfa_port_clear_stats(&bfad->bfa.modules.port,
370 					bfad_hcb_comp, &fcomp);
371 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
372 	if (iocmd->status != BFA_STATUS_OK) {
373 		bfa_trc(bfad, iocmd->status);
374 		return 0;
375 	}
376 	wait_for_completion(&fcomp.comp);
377 	iocmd->status = fcomp.status;
378 	return 0;
379 }
380 
381 int
382 bfad_iocmd_set_port_cfg(struct bfad_s *bfad, void *iocmd, unsigned int v_cmd)
383 {
384 	struct bfa_bsg_port_cfg_s *cmd = (struct bfa_bsg_port_cfg_s *)iocmd;
385 	unsigned long	flags;
386 
387 	spin_lock_irqsave(&bfad->bfad_lock, flags);
388 	if (v_cmd == IOCMD_PORT_CFG_TOPO)
389 		cmd->status = bfa_fcport_cfg_topology(&bfad->bfa, cmd->param);
390 	else if (v_cmd == IOCMD_PORT_CFG_SPEED)
391 		cmd->status = bfa_fcport_cfg_speed(&bfad->bfa, cmd->param);
392 	else if (v_cmd == IOCMD_PORT_CFG_ALPA)
393 		cmd->status = bfa_fcport_cfg_hardalpa(&bfad->bfa, cmd->param);
394 	else if (v_cmd == IOCMD_PORT_CLR_ALPA)
395 		cmd->status = bfa_fcport_clr_hardalpa(&bfad->bfa);
396 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
397 
398 	return 0;
399 }
400 
401 int
402 bfad_iocmd_port_cfg_maxfrsize(struct bfad_s *bfad, void *cmd)
403 {
404 	struct bfa_bsg_port_cfg_maxfrsize_s *iocmd =
405 				(struct bfa_bsg_port_cfg_maxfrsize_s *)cmd;
406 	unsigned long	flags;
407 
408 	spin_lock_irqsave(&bfad->bfad_lock, flags);
409 	iocmd->status = bfa_fcport_cfg_maxfrsize(&bfad->bfa, iocmd->maxfrsize);
410 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
411 
412 	return 0;
413 }
414 
415 int
416 bfad_iocmd_port_cfg_bbcr(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
417 {
418 	struct bfa_bsg_bbcr_enable_s *iocmd =
419 			(struct bfa_bsg_bbcr_enable_s *)pcmd;
420 	unsigned long flags;
421 	int rc;
422 
423 	spin_lock_irqsave(&bfad->bfad_lock, flags);
424 	if (cmd == IOCMD_PORT_BBCR_ENABLE)
425 		rc = bfa_fcport_cfg_bbcr(&bfad->bfa, BFA_TRUE, iocmd->bb_scn);
426 	else if (cmd == IOCMD_PORT_BBCR_DISABLE)
427 		rc = bfa_fcport_cfg_bbcr(&bfad->bfa, BFA_FALSE, 0);
428 	else {
429 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
430 		return -EINVAL;
431 	}
432 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
433 
434 	iocmd->status = rc;
435 	return 0;
436 }
437 
438 int
439 bfad_iocmd_port_get_bbcr_attr(struct bfad_s *bfad, void *pcmd)
440 {
441 	struct bfa_bsg_bbcr_attr_s *iocmd = (struct bfa_bsg_bbcr_attr_s *) pcmd;
442 	unsigned long flags;
443 
444 	spin_lock_irqsave(&bfad->bfad_lock, flags);
445 	iocmd->status =
446 		bfa_fcport_get_bbcr_attr(&bfad->bfa, &iocmd->attr);
447 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
448 
449 	return 0;
450 }
451 
452 
453 static int
454 bfad_iocmd_lport_get_attr(struct bfad_s *bfad, void *cmd)
455 {
456 	struct bfa_fcs_lport_s	*fcs_port;
457 	struct bfa_bsg_lport_attr_s *iocmd = (struct bfa_bsg_lport_attr_s *)cmd;
458 	unsigned long	flags;
459 
460 	spin_lock_irqsave(&bfad->bfad_lock, flags);
461 	fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
462 				iocmd->vf_id, iocmd->pwwn);
463 	if (fcs_port == NULL) {
464 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
465 		iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
466 		goto out;
467 	}
468 
469 	bfa_fcs_lport_get_attr(fcs_port, &iocmd->port_attr);
470 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
471 	iocmd->status = BFA_STATUS_OK;
472 out:
473 	return 0;
474 }
475 
476 int
477 bfad_iocmd_lport_get_stats(struct bfad_s *bfad, void *cmd)
478 {
479 	struct bfa_fcs_lport_s *fcs_port;
480 	struct bfa_bsg_lport_stats_s *iocmd =
481 			(struct bfa_bsg_lport_stats_s *)cmd;
482 	unsigned long	flags;
483 
484 	spin_lock_irqsave(&bfad->bfad_lock, flags);
485 	fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
486 				iocmd->vf_id, iocmd->pwwn);
487 	if (fcs_port == NULL) {
488 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
489 		iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
490 		goto out;
491 	}
492 
493 	bfa_fcs_lport_get_stats(fcs_port, &iocmd->port_stats);
494 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
495 	iocmd->status = BFA_STATUS_OK;
496 out:
497 	return 0;
498 }
499 
500 int
501 bfad_iocmd_lport_reset_stats(struct bfad_s *bfad, void *cmd)
502 {
503 	struct bfa_fcs_lport_s *fcs_port;
504 	struct bfa_bsg_reset_stats_s *iocmd =
505 			(struct bfa_bsg_reset_stats_s *)cmd;
506 	struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa);
507 	struct list_head *qe, *qen;
508 	struct bfa_itnim_s *itnim;
509 	unsigned long	flags;
510 
511 	spin_lock_irqsave(&bfad->bfad_lock, flags);
512 	fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
513 				iocmd->vf_id, iocmd->vpwwn);
514 	if (fcs_port == NULL) {
515 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
516 		iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
517 		goto out;
518 	}
519 
520 	bfa_fcs_lport_clear_stats(fcs_port);
521 	/* clear IO stats from all active itnims */
522 	list_for_each_safe(qe, qen, &fcpim->itnim_q) {
523 		itnim = (struct bfa_itnim_s *) qe;
524 		if (itnim->rport->rport_info.lp_tag != fcs_port->lp_tag)
525 			continue;
526 		bfa_itnim_clear_stats(itnim);
527 	}
528 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
529 	iocmd->status = BFA_STATUS_OK;
530 out:
531 	return 0;
532 }
533 
534 int
535 bfad_iocmd_lport_get_iostats(struct bfad_s *bfad, void *cmd)
536 {
537 	struct bfa_fcs_lport_s *fcs_port;
538 	struct bfa_bsg_lport_iostats_s *iocmd =
539 			(struct bfa_bsg_lport_iostats_s *)cmd;
540 	unsigned long	flags;
541 
542 	spin_lock_irqsave(&bfad->bfad_lock, flags);
543 	fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
544 				iocmd->vf_id, iocmd->pwwn);
545 	if (fcs_port == NULL) {
546 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
547 		iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
548 		goto out;
549 	}
550 
551 	bfa_fcpim_port_iostats(&bfad->bfa, &iocmd->iostats,
552 			fcs_port->lp_tag);
553 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
554 	iocmd->status = BFA_STATUS_OK;
555 out:
556 	return 0;
557 }
558 
559 int
560 bfad_iocmd_lport_get_rports(struct bfad_s *bfad, void *cmd,
561 			unsigned int payload_len)
562 {
563 	struct bfa_bsg_lport_get_rports_s *iocmd =
564 			(struct bfa_bsg_lport_get_rports_s *)cmd;
565 	struct bfa_fcs_lport_s *fcs_port;
566 	unsigned long	flags;
567 	void	*iocmd_bufptr;
568 
569 	if (iocmd->nrports == 0)
570 		return -EINVAL;
571 
572 	if (bfad_chk_iocmd_sz(payload_len,
573 			sizeof(struct bfa_bsg_lport_get_rports_s),
574 			sizeof(struct bfa_rport_qualifier_s) * iocmd->nrports)
575 			!= BFA_STATUS_OK) {
576 		iocmd->status = BFA_STATUS_VERSION_FAIL;
577 		return 0;
578 	}
579 
580 	iocmd_bufptr = (char *)iocmd +
581 			sizeof(struct bfa_bsg_lport_get_rports_s);
582 	spin_lock_irqsave(&bfad->bfad_lock, flags);
583 	fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
584 				iocmd->vf_id, iocmd->pwwn);
585 	if (fcs_port == NULL) {
586 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
587 		bfa_trc(bfad, 0);
588 		iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
589 		goto out;
590 	}
591 
592 	bfa_fcs_lport_get_rport_quals(fcs_port,
593 			(struct bfa_rport_qualifier_s *)iocmd_bufptr,
594 			&iocmd->nrports);
595 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
596 	iocmd->status = BFA_STATUS_OK;
597 out:
598 	return 0;
599 }
600 
601 int
602 bfad_iocmd_rport_get_attr(struct bfad_s *bfad, void *cmd)
603 {
604 	struct bfa_bsg_rport_attr_s *iocmd = (struct bfa_bsg_rport_attr_s *)cmd;
605 	struct bfa_fcs_lport_s *fcs_port;
606 	struct bfa_fcs_rport_s *fcs_rport;
607 	unsigned long	flags;
608 
609 	spin_lock_irqsave(&bfad->bfad_lock, flags);
610 	fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
611 				iocmd->vf_id, iocmd->pwwn);
612 	if (fcs_port == NULL) {
613 		bfa_trc(bfad, 0);
614 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
615 		iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
616 		goto out;
617 	}
618 
619 	if (iocmd->pid)
620 		fcs_rport = bfa_fcs_lport_get_rport_by_qualifier(fcs_port,
621 						iocmd->rpwwn, iocmd->pid);
622 	else
623 		fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
624 	if (fcs_rport == NULL) {
625 		bfa_trc(bfad, 0);
626 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
627 		iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
628 		goto out;
629 	}
630 
631 	bfa_fcs_rport_get_attr(fcs_rport, &iocmd->attr);
632 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
633 	iocmd->status = BFA_STATUS_OK;
634 out:
635 	return 0;
636 }
637 
638 static int
639 bfad_iocmd_rport_get_addr(struct bfad_s *bfad, void *cmd)
640 {
641 	struct bfa_bsg_rport_scsi_addr_s *iocmd =
642 			(struct bfa_bsg_rport_scsi_addr_s *)cmd;
643 	struct bfa_fcs_lport_s	*fcs_port;
644 	struct bfa_fcs_itnim_s	*fcs_itnim;
645 	struct bfad_itnim_s	*drv_itnim;
646 	unsigned long	flags;
647 
648 	spin_lock_irqsave(&bfad->bfad_lock, flags);
649 	fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
650 				iocmd->vf_id, iocmd->pwwn);
651 	if (fcs_port == NULL) {
652 		bfa_trc(bfad, 0);
653 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
654 		iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
655 		goto out;
656 	}
657 
658 	fcs_itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
659 	if (fcs_itnim == NULL) {
660 		bfa_trc(bfad, 0);
661 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
662 		iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
663 		goto out;
664 	}
665 
666 	drv_itnim = fcs_itnim->itnim_drv;
667 
668 	if (drv_itnim && drv_itnim->im_port)
669 		iocmd->host = drv_itnim->im_port->shost->host_no;
670 	else {
671 		bfa_trc(bfad, 0);
672 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
673 		iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
674 		goto out;
675 	}
676 
677 	iocmd->target = drv_itnim->scsi_tgt_id;
678 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
679 
680 	iocmd->bus = 0;
681 	iocmd->lun = 0;
682 	iocmd->status = BFA_STATUS_OK;
683 out:
684 	return 0;
685 }
686 
687 int
688 bfad_iocmd_rport_get_stats(struct bfad_s *bfad, void *cmd)
689 {
690 	struct bfa_bsg_rport_stats_s *iocmd =
691 			(struct bfa_bsg_rport_stats_s *)cmd;
692 	struct bfa_fcs_lport_s *fcs_port;
693 	struct bfa_fcs_rport_s *fcs_rport;
694 	unsigned long	flags;
695 
696 	spin_lock_irqsave(&bfad->bfad_lock, flags);
697 	fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
698 				iocmd->vf_id, iocmd->pwwn);
699 	if (fcs_port == NULL) {
700 		bfa_trc(bfad, 0);
701 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
702 		iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
703 		goto out;
704 	}
705 
706 	fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
707 	if (fcs_rport == NULL) {
708 		bfa_trc(bfad, 0);
709 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
710 		iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
711 		goto out;
712 	}
713 
714 	memcpy((void *)&iocmd->stats, (void *)&fcs_rport->stats,
715 		sizeof(struct bfa_rport_stats_s));
716 	if (bfa_fcs_rport_get_halrport(fcs_rport)) {
717 		memcpy((void *)&iocmd->stats.hal_stats,
718 		       (void *)&(bfa_fcs_rport_get_halrport(fcs_rport)->stats),
719 			sizeof(struct bfa_rport_hal_stats_s));
720 	}
721 
722 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
723 	iocmd->status = BFA_STATUS_OK;
724 out:
725 	return 0;
726 }
727 
728 int
729 bfad_iocmd_rport_clr_stats(struct bfad_s *bfad, void *cmd)
730 {
731 	struct bfa_bsg_rport_reset_stats_s *iocmd =
732 				(struct bfa_bsg_rport_reset_stats_s *)cmd;
733 	struct bfa_fcs_lport_s *fcs_port;
734 	struct bfa_fcs_rport_s *fcs_rport;
735 	struct bfa_rport_s *rport;
736 	unsigned long	flags;
737 
738 	spin_lock_irqsave(&bfad->bfad_lock, flags);
739 	fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
740 				iocmd->vf_id, iocmd->pwwn);
741 	if (fcs_port == NULL) {
742 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
743 		iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
744 		goto out;
745 	}
746 
747 	fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
748 	if (fcs_rport == NULL) {
749 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
750 		iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
751 		goto out;
752 	}
753 
754 	memset((char *)&fcs_rport->stats, 0, sizeof(struct bfa_rport_stats_s));
755 	rport = bfa_fcs_rport_get_halrport(fcs_rport);
756 	if (rport)
757 		memset(&rport->stats, 0, sizeof(rport->stats));
758 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
759 	iocmd->status = BFA_STATUS_OK;
760 out:
761 	return 0;
762 }
763 
764 int
765 bfad_iocmd_rport_set_speed(struct bfad_s *bfad, void *cmd)
766 {
767 	struct bfa_bsg_rport_set_speed_s *iocmd =
768 				(struct bfa_bsg_rport_set_speed_s *)cmd;
769 	struct bfa_fcs_lport_s *fcs_port;
770 	struct bfa_fcs_rport_s *fcs_rport;
771 	unsigned long	flags;
772 
773 	spin_lock_irqsave(&bfad->bfad_lock, flags);
774 	fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
775 				iocmd->vf_id, iocmd->pwwn);
776 	if (fcs_port == NULL) {
777 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
778 		iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
779 		goto out;
780 	}
781 
782 	fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
783 	if (fcs_rport == NULL) {
784 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
785 		iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
786 		goto out;
787 	}
788 
789 	fcs_rport->rpf.assigned_speed  = iocmd->speed;
790 	/* Set this speed in f/w only if the RPSC speed is not available */
791 	if (fcs_rport->rpf.rpsc_speed == BFA_PORT_SPEED_UNKNOWN)
792 		if (fcs_rport->bfa_rport)
793 			bfa_rport_speed(fcs_rport->bfa_rport, iocmd->speed);
794 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
795 	iocmd->status = BFA_STATUS_OK;
796 out:
797 	return 0;
798 }
799 
800 int
801 bfad_iocmd_vport_get_attr(struct bfad_s *bfad, void *cmd)
802 {
803 	struct bfa_fcs_vport_s *fcs_vport;
804 	struct bfa_bsg_vport_attr_s *iocmd = (struct bfa_bsg_vport_attr_s *)cmd;
805 	unsigned long	flags;
806 
807 	spin_lock_irqsave(&bfad->bfad_lock, flags);
808 	fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs,
809 				iocmd->vf_id, iocmd->vpwwn);
810 	if (fcs_vport == NULL) {
811 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
812 		iocmd->status = BFA_STATUS_UNKNOWN_VWWN;
813 		goto out;
814 	}
815 
816 	bfa_fcs_vport_get_attr(fcs_vport, &iocmd->vport_attr);
817 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
818 	iocmd->status = BFA_STATUS_OK;
819 out:
820 	return 0;
821 }
822 
823 int
824 bfad_iocmd_vport_get_stats(struct bfad_s *bfad, void *cmd)
825 {
826 	struct bfa_fcs_vport_s *fcs_vport;
827 	struct bfa_bsg_vport_stats_s *iocmd =
828 				(struct bfa_bsg_vport_stats_s *)cmd;
829 	unsigned long	flags;
830 
831 	spin_lock_irqsave(&bfad->bfad_lock, flags);
832 	fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs,
833 				iocmd->vf_id, iocmd->vpwwn);
834 	if (fcs_vport == NULL) {
835 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
836 		iocmd->status = BFA_STATUS_UNKNOWN_VWWN;
837 		goto out;
838 	}
839 
840 	memcpy((void *)&iocmd->vport_stats, (void *)&fcs_vport->vport_stats,
841 		sizeof(struct bfa_vport_stats_s));
842 	memcpy((void *)&iocmd->vport_stats.port_stats,
843 	       (void *)&fcs_vport->lport.stats,
844 		sizeof(struct bfa_lport_stats_s));
845 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
846 	iocmd->status = BFA_STATUS_OK;
847 out:
848 	return 0;
849 }
850 
851 int
852 bfad_iocmd_vport_clr_stats(struct bfad_s *bfad, void *cmd)
853 {
854 	struct bfa_fcs_vport_s *fcs_vport;
855 	struct bfa_bsg_reset_stats_s *iocmd =
856 				(struct bfa_bsg_reset_stats_s *)cmd;
857 	unsigned long	flags;
858 
859 	spin_lock_irqsave(&bfad->bfad_lock, flags);
860 	fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs,
861 				iocmd->vf_id, iocmd->vpwwn);
862 	if (fcs_vport == NULL) {
863 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
864 		iocmd->status = BFA_STATUS_UNKNOWN_VWWN;
865 		goto out;
866 	}
867 
868 	memset(&fcs_vport->vport_stats, 0, sizeof(struct bfa_vport_stats_s));
869 	memset(&fcs_vport->lport.stats, 0, sizeof(struct bfa_lport_stats_s));
870 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
871 	iocmd->status = BFA_STATUS_OK;
872 out:
873 	return 0;
874 }
875 
876 static int
877 bfad_iocmd_fabric_get_lports(struct bfad_s *bfad, void *cmd,
878 			unsigned int payload_len)
879 {
880 	struct bfa_bsg_fabric_get_lports_s *iocmd =
881 			(struct bfa_bsg_fabric_get_lports_s *)cmd;
882 	bfa_fcs_vf_t	*fcs_vf;
883 	uint32_t	nports = iocmd->nports;
884 	unsigned long	flags;
885 	void	*iocmd_bufptr;
886 
887 	if (nports == 0) {
888 		iocmd->status = BFA_STATUS_EINVAL;
889 		goto out;
890 	}
891 
892 	if (bfad_chk_iocmd_sz(payload_len,
893 		sizeof(struct bfa_bsg_fabric_get_lports_s),
894 		sizeof(wwn_t[iocmd->nports])) != BFA_STATUS_OK) {
895 		iocmd->status = BFA_STATUS_VERSION_FAIL;
896 		goto out;
897 	}
898 
899 	iocmd_bufptr = (char *)iocmd +
900 			sizeof(struct bfa_bsg_fabric_get_lports_s);
901 
902 	spin_lock_irqsave(&bfad->bfad_lock, flags);
903 	fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id);
904 	if (fcs_vf == NULL) {
905 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
906 		iocmd->status = BFA_STATUS_UNKNOWN_VFID;
907 		goto out;
908 	}
909 	bfa_fcs_vf_get_ports(fcs_vf, (wwn_t *)iocmd_bufptr, &nports);
910 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
911 
912 	iocmd->nports = nports;
913 	iocmd->status = BFA_STATUS_OK;
914 out:
915 	return 0;
916 }
917 
918 int
919 bfad_iocmd_qos_set_bw(struct bfad_s *bfad, void *pcmd)
920 {
921 	struct bfa_bsg_qos_bw_s *iocmd = (struct bfa_bsg_qos_bw_s *)pcmd;
922 	unsigned long	flags;
923 
924 	spin_lock_irqsave(&bfad->bfad_lock, flags);
925 	iocmd->status = bfa_fcport_set_qos_bw(&bfad->bfa, &iocmd->qos_bw);
926 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
927 
928 	return 0;
929 }
930 
931 int
932 bfad_iocmd_ratelim(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
933 {
934 	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
935 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
936 	unsigned long	flags;
937 
938 	spin_lock_irqsave(&bfad->bfad_lock, flags);
939 
940 	if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
941 		(fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
942 		iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
943 	else {
944 		if (cmd == IOCMD_RATELIM_ENABLE)
945 			fcport->cfg.ratelimit = BFA_TRUE;
946 		else if (cmd == IOCMD_RATELIM_DISABLE)
947 			fcport->cfg.ratelimit = BFA_FALSE;
948 
949 		if (fcport->cfg.trl_def_speed == BFA_PORT_SPEED_UNKNOWN)
950 			fcport->cfg.trl_def_speed = BFA_PORT_SPEED_1GBPS;
951 
952 		iocmd->status = BFA_STATUS_OK;
953 	}
954 
955 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
956 
957 	return 0;
958 }
959 
960 int
961 bfad_iocmd_ratelim_speed(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
962 {
963 	struct bfa_bsg_trl_speed_s *iocmd = (struct bfa_bsg_trl_speed_s *)pcmd;
964 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
965 	unsigned long	flags;
966 
967 	spin_lock_irqsave(&bfad->bfad_lock, flags);
968 
969 	/* Auto and speeds greater than the supported speed, are invalid */
970 	if ((iocmd->speed == BFA_PORT_SPEED_AUTO) ||
971 	    (iocmd->speed > fcport->speed_sup)) {
972 		iocmd->status = BFA_STATUS_UNSUPP_SPEED;
973 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
974 		return 0;
975 	}
976 
977 	if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
978 		(fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
979 		iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
980 	else {
981 		fcport->cfg.trl_def_speed = iocmd->speed;
982 		iocmd->status = BFA_STATUS_OK;
983 	}
984 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
985 
986 	return 0;
987 }
988 
989 int
990 bfad_iocmd_cfg_fcpim(struct bfad_s *bfad, void *cmd)
991 {
992 	struct bfa_bsg_fcpim_s *iocmd = (struct bfa_bsg_fcpim_s *)cmd;
993 	unsigned long	flags;
994 
995 	spin_lock_irqsave(&bfad->bfad_lock, flags);
996 	bfa_fcpim_path_tov_set(&bfad->bfa, iocmd->param);
997 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
998 	iocmd->status = BFA_STATUS_OK;
999 	return 0;
1000 }
1001 
1002 int
1003 bfad_iocmd_fcpim_get_modstats(struct bfad_s *bfad, void *cmd)
1004 {
1005 	struct bfa_bsg_fcpim_modstats_s *iocmd =
1006 			(struct bfa_bsg_fcpim_modstats_s *)cmd;
1007 	struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa);
1008 	struct list_head *qe, *qen;
1009 	struct bfa_itnim_s *itnim;
1010 	unsigned long	flags;
1011 
1012 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1013 	/* accumulate IO stats from itnim */
1014 	memset((void *)&iocmd->modstats, 0, sizeof(struct bfa_itnim_iostats_s));
1015 	list_for_each_safe(qe, qen, &fcpim->itnim_q) {
1016 		itnim = (struct bfa_itnim_s *) qe;
1017 		bfa_fcpim_add_stats(&iocmd->modstats, &(itnim->stats));
1018 	}
1019 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1020 	iocmd->status = BFA_STATUS_OK;
1021 	return 0;
1022 }
1023 
1024 int
1025 bfad_iocmd_fcpim_clr_modstats(struct bfad_s *bfad, void *cmd)
1026 {
1027 	struct bfa_bsg_fcpim_modstatsclr_s *iocmd =
1028 				(struct bfa_bsg_fcpim_modstatsclr_s *)cmd;
1029 	struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa);
1030 	struct list_head *qe, *qen;
1031 	struct bfa_itnim_s *itnim;
1032 	unsigned long	flags;
1033 
1034 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1035 	list_for_each_safe(qe, qen, &fcpim->itnim_q) {
1036 		itnim = (struct bfa_itnim_s *) qe;
1037 		bfa_itnim_clear_stats(itnim);
1038 	}
1039 	memset(&fcpim->del_itn_stats, 0,
1040 		sizeof(struct bfa_fcpim_del_itn_stats_s));
1041 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1042 	iocmd->status = BFA_STATUS_OK;
1043 	return 0;
1044 }
1045 
1046 int
1047 bfad_iocmd_fcpim_get_del_itn_stats(struct bfad_s *bfad, void *cmd)
1048 {
1049 	struct bfa_bsg_fcpim_del_itn_stats_s *iocmd =
1050 			(struct bfa_bsg_fcpim_del_itn_stats_s *)cmd;
1051 	struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa);
1052 	unsigned long	flags;
1053 
1054 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1055 	memcpy((void *)&iocmd->modstats, (void *)&fcpim->del_itn_stats,
1056 		sizeof(struct bfa_fcpim_del_itn_stats_s));
1057 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1058 
1059 	iocmd->status = BFA_STATUS_OK;
1060 	return 0;
1061 }
1062 
1063 static int
1064 bfad_iocmd_itnim_get_attr(struct bfad_s *bfad, void *cmd)
1065 {
1066 	struct bfa_bsg_itnim_attr_s *iocmd = (struct bfa_bsg_itnim_attr_s *)cmd;
1067 	struct bfa_fcs_lport_s	*fcs_port;
1068 	unsigned long	flags;
1069 
1070 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1071 	fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
1072 				iocmd->vf_id, iocmd->lpwwn);
1073 	if (!fcs_port)
1074 		iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
1075 	else
1076 		iocmd->status = bfa_fcs_itnim_attr_get(fcs_port,
1077 					iocmd->rpwwn, &iocmd->attr);
1078 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1079 	return 0;
1080 }
1081 
1082 static int
1083 bfad_iocmd_itnim_get_iostats(struct bfad_s *bfad, void *cmd)
1084 {
1085 	struct bfa_bsg_itnim_iostats_s *iocmd =
1086 			(struct bfa_bsg_itnim_iostats_s *)cmd;
1087 	struct bfa_fcs_lport_s *fcs_port;
1088 	struct bfa_fcs_itnim_s *itnim;
1089 	unsigned long	flags;
1090 
1091 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1092 	fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
1093 				iocmd->vf_id, iocmd->lpwwn);
1094 	if (!fcs_port) {
1095 		iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
1096 		bfa_trc(bfad, 0);
1097 	} else {
1098 		itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
1099 		if (itnim == NULL)
1100 			iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
1101 		else {
1102 			iocmd->status = BFA_STATUS_OK;
1103 			if (bfa_fcs_itnim_get_halitn(itnim))
1104 				memcpy((void *)&iocmd->iostats, (void *)
1105 				&(bfa_fcs_itnim_get_halitn(itnim)->stats),
1106 				       sizeof(struct bfa_itnim_iostats_s));
1107 		}
1108 	}
1109 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1110 	return 0;
1111 }
1112 
1113 static int
1114 bfad_iocmd_itnim_reset_stats(struct bfad_s *bfad, void *cmd)
1115 {
1116 	struct bfa_bsg_rport_reset_stats_s *iocmd =
1117 			(struct bfa_bsg_rport_reset_stats_s *)cmd;
1118 	struct bfa_fcs_lport_s	*fcs_port;
1119 	struct bfa_fcs_itnim_s	*itnim;
1120 	unsigned long	flags;
1121 
1122 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1123 	fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
1124 				iocmd->vf_id, iocmd->pwwn);
1125 	if (!fcs_port)
1126 		iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
1127 	else {
1128 		itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
1129 		if (itnim == NULL)
1130 			iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
1131 		else {
1132 			iocmd->status = BFA_STATUS_OK;
1133 			bfa_fcs_itnim_stats_clear(fcs_port, iocmd->rpwwn);
1134 			bfa_itnim_clear_stats(bfa_fcs_itnim_get_halitn(itnim));
1135 		}
1136 	}
1137 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1138 
1139 	return 0;
1140 }
1141 
1142 static int
1143 bfad_iocmd_itnim_get_itnstats(struct bfad_s *bfad, void *cmd)
1144 {
1145 	struct bfa_bsg_itnim_itnstats_s *iocmd =
1146 			(struct bfa_bsg_itnim_itnstats_s *)cmd;
1147 	struct bfa_fcs_lport_s *fcs_port;
1148 	struct bfa_fcs_itnim_s *itnim;
1149 	unsigned long	flags;
1150 
1151 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1152 	fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
1153 				iocmd->vf_id, iocmd->lpwwn);
1154 	if (!fcs_port) {
1155 		iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
1156 		bfa_trc(bfad, 0);
1157 	} else {
1158 		itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
1159 		if (itnim == NULL)
1160 			iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
1161 		else {
1162 			iocmd->status = BFA_STATUS_OK;
1163 			bfa_fcs_itnim_stats_get(fcs_port, iocmd->rpwwn,
1164 					&iocmd->itnstats);
1165 		}
1166 	}
1167 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1168 	return 0;
1169 }
1170 
1171 int
1172 bfad_iocmd_fcport_enable(struct bfad_s *bfad, void *cmd)
1173 {
1174 	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
1175 	unsigned long flags;
1176 
1177 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1178 	iocmd->status = bfa_fcport_enable(&bfad->bfa);
1179 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1180 
1181 	return 0;
1182 }
1183 
1184 int
1185 bfad_iocmd_fcport_disable(struct bfad_s *bfad, void *cmd)
1186 {
1187 	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
1188 	unsigned long flags;
1189 
1190 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1191 	iocmd->status = bfa_fcport_disable(&bfad->bfa);
1192 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1193 
1194 	return 0;
1195 }
1196 
1197 int
1198 bfad_iocmd_ioc_get_pcifn_cfg(struct bfad_s *bfad, void *cmd)
1199 {
1200 	struct bfa_bsg_pcifn_cfg_s *iocmd = (struct bfa_bsg_pcifn_cfg_s *)cmd;
1201 	struct bfad_hal_comp fcomp;
1202 	unsigned long flags;
1203 
1204 	init_completion(&fcomp.comp);
1205 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1206 	iocmd->status = bfa_ablk_query(&bfad->bfa.modules.ablk,
1207 				&iocmd->pcifn_cfg,
1208 				bfad_hcb_comp, &fcomp);
1209 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1210 	if (iocmd->status != BFA_STATUS_OK)
1211 		goto out;
1212 
1213 	wait_for_completion(&fcomp.comp);
1214 	iocmd->status = fcomp.status;
1215 out:
1216 	return 0;
1217 }
1218 
1219 int
1220 bfad_iocmd_pcifn_create(struct bfad_s *bfad, void *cmd)
1221 {
1222 	struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd;
1223 	struct bfad_hal_comp fcomp;
1224 	unsigned long flags;
1225 
1226 	init_completion(&fcomp.comp);
1227 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1228 	iocmd->status = bfa_ablk_pf_create(&bfad->bfa.modules.ablk,
1229 				&iocmd->pcifn_id, iocmd->port,
1230 				iocmd->pcifn_class, iocmd->bw_min,
1231 				iocmd->bw_max, bfad_hcb_comp, &fcomp);
1232 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1233 	if (iocmd->status != BFA_STATUS_OK)
1234 		goto out;
1235 
1236 	wait_for_completion(&fcomp.comp);
1237 	iocmd->status = fcomp.status;
1238 out:
1239 	return 0;
1240 }
1241 
1242 int
1243 bfad_iocmd_pcifn_delete(struct bfad_s *bfad, void *cmd)
1244 {
1245 	struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd;
1246 	struct bfad_hal_comp fcomp;
1247 	unsigned long flags;
1248 
1249 	init_completion(&fcomp.comp);
1250 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1251 	iocmd->status = bfa_ablk_pf_delete(&bfad->bfa.modules.ablk,
1252 				iocmd->pcifn_id,
1253 				bfad_hcb_comp, &fcomp);
1254 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1255 	if (iocmd->status != BFA_STATUS_OK)
1256 		goto out;
1257 
1258 	wait_for_completion(&fcomp.comp);
1259 	iocmd->status = fcomp.status;
1260 out:
1261 	return 0;
1262 }
1263 
1264 int
1265 bfad_iocmd_pcifn_bw(struct bfad_s *bfad, void *cmd)
1266 {
1267 	struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd;
1268 	struct bfad_hal_comp fcomp;
1269 	unsigned long flags;
1270 
1271 	init_completion(&fcomp.comp);
1272 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1273 	iocmd->status = bfa_ablk_pf_update(&bfad->bfa.modules.ablk,
1274 				iocmd->pcifn_id, iocmd->bw_min,
1275 				iocmd->bw_max, bfad_hcb_comp, &fcomp);
1276 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1277 	bfa_trc(bfad, iocmd->status);
1278 	if (iocmd->status != BFA_STATUS_OK)
1279 		goto out;
1280 
1281 	wait_for_completion(&fcomp.comp);
1282 	iocmd->status = fcomp.status;
1283 	bfa_trc(bfad, iocmd->status);
1284 out:
1285 	return 0;
1286 }
1287 
1288 int
1289 bfad_iocmd_adapter_cfg_mode(struct bfad_s *bfad, void *cmd)
1290 {
1291 	struct bfa_bsg_adapter_cfg_mode_s *iocmd =
1292 			(struct bfa_bsg_adapter_cfg_mode_s *)cmd;
1293 	struct bfad_hal_comp fcomp;
1294 	unsigned long flags = 0;
1295 
1296 	init_completion(&fcomp.comp);
1297 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1298 	iocmd->status = bfa_ablk_adapter_config(&bfad->bfa.modules.ablk,
1299 				iocmd->cfg.mode, iocmd->cfg.max_pf,
1300 				iocmd->cfg.max_vf, bfad_hcb_comp, &fcomp);
1301 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1302 	if (iocmd->status != BFA_STATUS_OK)
1303 		goto out;
1304 
1305 	wait_for_completion(&fcomp.comp);
1306 	iocmd->status = fcomp.status;
1307 out:
1308 	return 0;
1309 }
1310 
1311 int
1312 bfad_iocmd_port_cfg_mode(struct bfad_s *bfad, void *cmd)
1313 {
1314 	struct bfa_bsg_port_cfg_mode_s *iocmd =
1315 			(struct bfa_bsg_port_cfg_mode_s *)cmd;
1316 	struct bfad_hal_comp fcomp;
1317 	unsigned long flags = 0;
1318 
1319 	init_completion(&fcomp.comp);
1320 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1321 	iocmd->status = bfa_ablk_port_config(&bfad->bfa.modules.ablk,
1322 				iocmd->instance, iocmd->cfg.mode,
1323 				iocmd->cfg.max_pf, iocmd->cfg.max_vf,
1324 				bfad_hcb_comp, &fcomp);
1325 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1326 	if (iocmd->status != BFA_STATUS_OK)
1327 		goto out;
1328 
1329 	wait_for_completion(&fcomp.comp);
1330 	iocmd->status = fcomp.status;
1331 out:
1332 	return 0;
1333 }
1334 
1335 int
1336 bfad_iocmd_ablk_optrom(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
1337 {
1338 	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
1339 	struct bfad_hal_comp fcomp;
1340 	unsigned long   flags;
1341 
1342 	init_completion(&fcomp.comp);
1343 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1344 	if (cmd == IOCMD_FLASH_ENABLE_OPTROM)
1345 		iocmd->status = bfa_ablk_optrom_en(&bfad->bfa.modules.ablk,
1346 					bfad_hcb_comp, &fcomp);
1347 	else
1348 		iocmd->status = bfa_ablk_optrom_dis(&bfad->bfa.modules.ablk,
1349 					bfad_hcb_comp, &fcomp);
1350 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1351 
1352 	if (iocmd->status != BFA_STATUS_OK)
1353 		goto out;
1354 
1355 	wait_for_completion(&fcomp.comp);
1356 	iocmd->status = fcomp.status;
1357 out:
1358 	return 0;
1359 }
1360 
1361 int
1362 bfad_iocmd_faa_query(struct bfad_s *bfad, void *cmd)
1363 {
1364 	struct bfa_bsg_faa_attr_s *iocmd = (struct bfa_bsg_faa_attr_s *)cmd;
1365 	struct bfad_hal_comp    fcomp;
1366 	unsigned long   flags;
1367 
1368 	init_completion(&fcomp.comp);
1369 	iocmd->status = BFA_STATUS_OK;
1370 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1371 	iocmd->status = bfa_faa_query(&bfad->bfa, &iocmd->faa_attr,
1372 				bfad_hcb_comp, &fcomp);
1373 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1374 
1375 	if (iocmd->status != BFA_STATUS_OK)
1376 		goto out;
1377 
1378 	wait_for_completion(&fcomp.comp);
1379 	iocmd->status = fcomp.status;
1380 out:
1381 	return 0;
1382 }
1383 
1384 int
1385 bfad_iocmd_cee_attr(struct bfad_s *bfad, void *cmd, unsigned int payload_len)
1386 {
1387 	struct bfa_bsg_cee_attr_s *iocmd =
1388 				(struct bfa_bsg_cee_attr_s *)cmd;
1389 	void	*iocmd_bufptr;
1390 	struct bfad_hal_comp	cee_comp;
1391 	unsigned long	flags;
1392 
1393 	if (bfad_chk_iocmd_sz(payload_len,
1394 			sizeof(struct bfa_bsg_cee_attr_s),
1395 			sizeof(struct bfa_cee_attr_s)) != BFA_STATUS_OK) {
1396 		iocmd->status = BFA_STATUS_VERSION_FAIL;
1397 		return 0;
1398 	}
1399 
1400 	iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_cee_attr_s);
1401 
1402 	cee_comp.status = 0;
1403 	init_completion(&cee_comp.comp);
1404 	mutex_lock(&bfad_mutex);
1405 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1406 	iocmd->status = bfa_cee_get_attr(&bfad->bfa.modules.cee, iocmd_bufptr,
1407 					 bfad_hcb_comp, &cee_comp);
1408 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1409 	if (iocmd->status != BFA_STATUS_OK) {
1410 		mutex_unlock(&bfad_mutex);
1411 		bfa_trc(bfad, 0x5555);
1412 		goto out;
1413 	}
1414 	wait_for_completion(&cee_comp.comp);
1415 	mutex_unlock(&bfad_mutex);
1416 out:
1417 	return 0;
1418 }
1419 
1420 int
1421 bfad_iocmd_cee_get_stats(struct bfad_s *bfad, void *cmd,
1422 			unsigned int payload_len)
1423 {
1424 	struct bfa_bsg_cee_stats_s *iocmd =
1425 				(struct bfa_bsg_cee_stats_s *)cmd;
1426 	void	*iocmd_bufptr;
1427 	struct bfad_hal_comp	cee_comp;
1428 	unsigned long	flags;
1429 
1430 	if (bfad_chk_iocmd_sz(payload_len,
1431 			sizeof(struct bfa_bsg_cee_stats_s),
1432 			sizeof(struct bfa_cee_stats_s)) != BFA_STATUS_OK) {
1433 		iocmd->status = BFA_STATUS_VERSION_FAIL;
1434 		return 0;
1435 	}
1436 
1437 	iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_cee_stats_s);
1438 
1439 	cee_comp.status = 0;
1440 	init_completion(&cee_comp.comp);
1441 	mutex_lock(&bfad_mutex);
1442 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1443 	iocmd->status = bfa_cee_get_stats(&bfad->bfa.modules.cee, iocmd_bufptr,
1444 					bfad_hcb_comp, &cee_comp);
1445 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1446 	if (iocmd->status != BFA_STATUS_OK) {
1447 		mutex_unlock(&bfad_mutex);
1448 		bfa_trc(bfad, 0x5555);
1449 		goto out;
1450 	}
1451 	wait_for_completion(&cee_comp.comp);
1452 	mutex_unlock(&bfad_mutex);
1453 out:
1454 	return 0;
1455 }
1456 
1457 int
1458 bfad_iocmd_cee_reset_stats(struct bfad_s *bfad, void *cmd)
1459 {
1460 	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
1461 	unsigned long	flags;
1462 
1463 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1464 	iocmd->status = bfa_cee_reset_stats(&bfad->bfa.modules.cee, NULL, NULL);
1465 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1466 	if (iocmd->status != BFA_STATUS_OK)
1467 		bfa_trc(bfad, 0x5555);
1468 	return 0;
1469 }
1470 
1471 int
1472 bfad_iocmd_sfp_media(struct bfad_s *bfad, void *cmd)
1473 {
1474 	struct bfa_bsg_sfp_media_s *iocmd = (struct bfa_bsg_sfp_media_s *)cmd;
1475 	struct bfad_hal_comp	fcomp;
1476 	unsigned long	flags;
1477 
1478 	init_completion(&fcomp.comp);
1479 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1480 	iocmd->status = bfa_sfp_media(BFA_SFP_MOD(&bfad->bfa), &iocmd->media,
1481 				bfad_hcb_comp, &fcomp);
1482 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1483 	bfa_trc(bfad, iocmd->status);
1484 	if (iocmd->status != BFA_STATUS_SFP_NOT_READY)
1485 		goto out;
1486 
1487 	wait_for_completion(&fcomp.comp);
1488 	iocmd->status = fcomp.status;
1489 out:
1490 	return 0;
1491 }
1492 
1493 int
1494 bfad_iocmd_sfp_speed(struct bfad_s *bfad, void *cmd)
1495 {
1496 	struct bfa_bsg_sfp_speed_s *iocmd = (struct bfa_bsg_sfp_speed_s *)cmd;
1497 	struct bfad_hal_comp	fcomp;
1498 	unsigned long	flags;
1499 
1500 	init_completion(&fcomp.comp);
1501 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1502 	iocmd->status = bfa_sfp_speed(BFA_SFP_MOD(&bfad->bfa), iocmd->speed,
1503 				bfad_hcb_comp, &fcomp);
1504 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1505 	bfa_trc(bfad, iocmd->status);
1506 	if (iocmd->status != BFA_STATUS_SFP_NOT_READY)
1507 		goto out;
1508 	wait_for_completion(&fcomp.comp);
1509 	iocmd->status = fcomp.status;
1510 out:
1511 	return 0;
1512 }
1513 
1514 int
1515 bfad_iocmd_flash_get_attr(struct bfad_s *bfad, void *cmd)
1516 {
1517 	struct bfa_bsg_flash_attr_s *iocmd =
1518 			(struct bfa_bsg_flash_attr_s *)cmd;
1519 	struct bfad_hal_comp fcomp;
1520 	unsigned long	flags;
1521 
1522 	init_completion(&fcomp.comp);
1523 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1524 	iocmd->status = bfa_flash_get_attr(BFA_FLASH(&bfad->bfa), &iocmd->attr,
1525 				bfad_hcb_comp, &fcomp);
1526 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1527 	if (iocmd->status != BFA_STATUS_OK)
1528 		goto out;
1529 	wait_for_completion(&fcomp.comp);
1530 	iocmd->status = fcomp.status;
1531 out:
1532 	return 0;
1533 }
1534 
1535 int
1536 bfad_iocmd_flash_erase_part(struct bfad_s *bfad, void *cmd)
1537 {
1538 	struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd;
1539 	struct bfad_hal_comp fcomp;
1540 	unsigned long	flags;
1541 
1542 	init_completion(&fcomp.comp);
1543 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1544 	iocmd->status = bfa_flash_erase_part(BFA_FLASH(&bfad->bfa), iocmd->type,
1545 				iocmd->instance, bfad_hcb_comp, &fcomp);
1546 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1547 	if (iocmd->status != BFA_STATUS_OK)
1548 		goto out;
1549 	wait_for_completion(&fcomp.comp);
1550 	iocmd->status = fcomp.status;
1551 out:
1552 	return 0;
1553 }
1554 
1555 int
1556 bfad_iocmd_flash_update_part(struct bfad_s *bfad, void *cmd,
1557 			unsigned int payload_len)
1558 {
1559 	struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd;
1560 	void	*iocmd_bufptr;
1561 	struct bfad_hal_comp fcomp;
1562 	unsigned long	flags;
1563 
1564 	if (bfad_chk_iocmd_sz(payload_len,
1565 			sizeof(struct bfa_bsg_flash_s),
1566 			iocmd->bufsz) != BFA_STATUS_OK) {
1567 		iocmd->status = BFA_STATUS_VERSION_FAIL;
1568 		return 0;
1569 	}
1570 
1571 	iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_flash_s);
1572 
1573 	init_completion(&fcomp.comp);
1574 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1575 	iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa),
1576 				iocmd->type, iocmd->instance, iocmd_bufptr,
1577 				iocmd->bufsz, 0, bfad_hcb_comp, &fcomp);
1578 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1579 	if (iocmd->status != BFA_STATUS_OK)
1580 		goto out;
1581 	wait_for_completion(&fcomp.comp);
1582 	iocmd->status = fcomp.status;
1583 out:
1584 	return 0;
1585 }
1586 
1587 int
1588 bfad_iocmd_flash_read_part(struct bfad_s *bfad, void *cmd,
1589 			unsigned int payload_len)
1590 {
1591 	struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd;
1592 	struct bfad_hal_comp fcomp;
1593 	void	*iocmd_bufptr;
1594 	unsigned long	flags;
1595 
1596 	if (bfad_chk_iocmd_sz(payload_len,
1597 			sizeof(struct bfa_bsg_flash_s),
1598 			iocmd->bufsz) != BFA_STATUS_OK) {
1599 		iocmd->status = BFA_STATUS_VERSION_FAIL;
1600 		return 0;
1601 	}
1602 
1603 	iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_flash_s);
1604 
1605 	init_completion(&fcomp.comp);
1606 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1607 	iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa), iocmd->type,
1608 				iocmd->instance, iocmd_bufptr, iocmd->bufsz, 0,
1609 				bfad_hcb_comp, &fcomp);
1610 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1611 	if (iocmd->status != BFA_STATUS_OK)
1612 		goto out;
1613 	wait_for_completion(&fcomp.comp);
1614 	iocmd->status = fcomp.status;
1615 out:
1616 	return 0;
1617 }
1618 
1619 int
1620 bfad_iocmd_diag_temp(struct bfad_s *bfad, void *cmd)
1621 {
1622 	struct bfa_bsg_diag_get_temp_s *iocmd =
1623 			(struct bfa_bsg_diag_get_temp_s *)cmd;
1624 	struct bfad_hal_comp fcomp;
1625 	unsigned long	flags;
1626 
1627 	init_completion(&fcomp.comp);
1628 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1629 	iocmd->status = bfa_diag_tsensor_query(BFA_DIAG_MOD(&bfad->bfa),
1630 				&iocmd->result, bfad_hcb_comp, &fcomp);
1631 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1632 	bfa_trc(bfad, iocmd->status);
1633 	if (iocmd->status != BFA_STATUS_OK)
1634 		goto out;
1635 	wait_for_completion(&fcomp.comp);
1636 	iocmd->status = fcomp.status;
1637 out:
1638 	return 0;
1639 }
1640 
1641 int
1642 bfad_iocmd_diag_memtest(struct bfad_s *bfad, void *cmd)
1643 {
1644 	struct bfa_bsg_diag_memtest_s *iocmd =
1645 			(struct bfa_bsg_diag_memtest_s *)cmd;
1646 	struct bfad_hal_comp fcomp;
1647 	unsigned long   flags;
1648 
1649 	init_completion(&fcomp.comp);
1650 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1651 	iocmd->status = bfa_diag_memtest(BFA_DIAG_MOD(&bfad->bfa),
1652 				&iocmd->memtest, iocmd->pat,
1653 				&iocmd->result, bfad_hcb_comp, &fcomp);
1654 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1655 	bfa_trc(bfad, iocmd->status);
1656 	if (iocmd->status != BFA_STATUS_OK)
1657 		goto out;
1658 	wait_for_completion(&fcomp.comp);
1659 	iocmd->status = fcomp.status;
1660 out:
1661 	return 0;
1662 }
1663 
1664 int
1665 bfad_iocmd_diag_loopback(struct bfad_s *bfad, void *cmd)
1666 {
1667 	struct bfa_bsg_diag_loopback_s *iocmd =
1668 			(struct bfa_bsg_diag_loopback_s *)cmd;
1669 	struct bfad_hal_comp fcomp;
1670 	unsigned long   flags;
1671 
1672 	init_completion(&fcomp.comp);
1673 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1674 	iocmd->status = bfa_fcdiag_loopback(&bfad->bfa, iocmd->opmode,
1675 				iocmd->speed, iocmd->lpcnt, iocmd->pat,
1676 				&iocmd->result, bfad_hcb_comp, &fcomp);
1677 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1678 	bfa_trc(bfad, iocmd->status);
1679 	if (iocmd->status != BFA_STATUS_OK)
1680 		goto out;
1681 	wait_for_completion(&fcomp.comp);
1682 	iocmd->status = fcomp.status;
1683 out:
1684 	return 0;
1685 }
1686 
1687 int
1688 bfad_iocmd_diag_fwping(struct bfad_s *bfad, void *cmd)
1689 {
1690 	struct bfa_bsg_diag_fwping_s *iocmd =
1691 			(struct bfa_bsg_diag_fwping_s *)cmd;
1692 	struct bfad_hal_comp fcomp;
1693 	unsigned long   flags;
1694 
1695 	init_completion(&fcomp.comp);
1696 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1697 	iocmd->status = bfa_diag_fwping(BFA_DIAG_MOD(&bfad->bfa), iocmd->cnt,
1698 				iocmd->pattern, &iocmd->result,
1699 				bfad_hcb_comp, &fcomp);
1700 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1701 	bfa_trc(bfad, iocmd->status);
1702 	if (iocmd->status != BFA_STATUS_OK)
1703 		goto out;
1704 	bfa_trc(bfad, 0x77771);
1705 	wait_for_completion(&fcomp.comp);
1706 	iocmd->status = fcomp.status;
1707 out:
1708 	return 0;
1709 }
1710 
1711 int
1712 bfad_iocmd_diag_queuetest(struct bfad_s *bfad, void *cmd)
1713 {
1714 	struct bfa_bsg_diag_qtest_s *iocmd = (struct bfa_bsg_diag_qtest_s *)cmd;
1715 	struct bfad_hal_comp fcomp;
1716 	unsigned long   flags;
1717 
1718 	init_completion(&fcomp.comp);
1719 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1720 	iocmd->status = bfa_fcdiag_queuetest(&bfad->bfa, iocmd->force,
1721 				iocmd->queue, &iocmd->result,
1722 				bfad_hcb_comp, &fcomp);
1723 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1724 	if (iocmd->status != BFA_STATUS_OK)
1725 		goto out;
1726 	wait_for_completion(&fcomp.comp);
1727 	iocmd->status = fcomp.status;
1728 out:
1729 	return 0;
1730 }
1731 
1732 int
1733 bfad_iocmd_diag_sfp(struct bfad_s *bfad, void *cmd)
1734 {
1735 	struct bfa_bsg_sfp_show_s *iocmd =
1736 			(struct bfa_bsg_sfp_show_s *)cmd;
1737 	struct bfad_hal_comp fcomp;
1738 	unsigned long   flags;
1739 
1740 	init_completion(&fcomp.comp);
1741 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1742 	iocmd->status = bfa_sfp_show(BFA_SFP_MOD(&bfad->bfa), &iocmd->sfp,
1743 				bfad_hcb_comp, &fcomp);
1744 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1745 	bfa_trc(bfad, iocmd->status);
1746 	if (iocmd->status != BFA_STATUS_OK)
1747 		goto out;
1748 	wait_for_completion(&fcomp.comp);
1749 	iocmd->status = fcomp.status;
1750 	bfa_trc(bfad, iocmd->status);
1751 out:
1752 	return 0;
1753 }
1754 
1755 int
1756 bfad_iocmd_diag_led(struct bfad_s *bfad, void *cmd)
1757 {
1758 	struct bfa_bsg_diag_led_s *iocmd = (struct bfa_bsg_diag_led_s *)cmd;
1759 	unsigned long   flags;
1760 
1761 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1762 	iocmd->status = bfa_diag_ledtest(BFA_DIAG_MOD(&bfad->bfa),
1763 				&iocmd->ledtest);
1764 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1765 	return 0;
1766 }
1767 
1768 int
1769 bfad_iocmd_diag_beacon_lport(struct bfad_s *bfad, void *cmd)
1770 {
1771 	struct bfa_bsg_diag_beacon_s *iocmd =
1772 			(struct bfa_bsg_diag_beacon_s *)cmd;
1773 	unsigned long	flags;
1774 
1775 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1776 	iocmd->status = bfa_diag_beacon_port(BFA_DIAG_MOD(&bfad->bfa),
1777 				iocmd->beacon, iocmd->link_e2e_beacon,
1778 				iocmd->second);
1779 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1780 	return 0;
1781 }
1782 
1783 int
1784 bfad_iocmd_diag_lb_stat(struct bfad_s *bfad, void *cmd)
1785 {
1786 	struct bfa_bsg_diag_lb_stat_s *iocmd =
1787 			(struct bfa_bsg_diag_lb_stat_s *)cmd;
1788 	unsigned long	flags;
1789 
1790 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1791 	iocmd->status = bfa_fcdiag_lb_is_running(&bfad->bfa);
1792 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1793 	bfa_trc(bfad, iocmd->status);
1794 
1795 	return 0;
1796 }
1797 
1798 int
1799 bfad_iocmd_diag_dport_enable(struct bfad_s *bfad, void *pcmd)
1800 {
1801 	struct bfa_bsg_dport_enable_s *iocmd =
1802 				(struct bfa_bsg_dport_enable_s *)pcmd;
1803 	unsigned long	flags;
1804 	struct bfad_hal_comp fcomp;
1805 
1806 	init_completion(&fcomp.comp);
1807 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1808 	iocmd->status = bfa_dport_enable(&bfad->bfa, iocmd->lpcnt,
1809 					iocmd->pat, bfad_hcb_comp, &fcomp);
1810 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1811 	if (iocmd->status != BFA_STATUS_OK)
1812 		bfa_trc(bfad, iocmd->status);
1813 	else {
1814 		wait_for_completion(&fcomp.comp);
1815 		iocmd->status = fcomp.status;
1816 	}
1817 	return 0;
1818 }
1819 
1820 int
1821 bfad_iocmd_diag_dport_disable(struct bfad_s *bfad, void *pcmd)
1822 {
1823 	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
1824 	unsigned long	flags;
1825 	struct bfad_hal_comp fcomp;
1826 
1827 	init_completion(&fcomp.comp);
1828 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1829 	iocmd->status = bfa_dport_disable(&bfad->bfa, bfad_hcb_comp, &fcomp);
1830 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1831 	if (iocmd->status != BFA_STATUS_OK)
1832 		bfa_trc(bfad, iocmd->status);
1833 	else {
1834 		wait_for_completion(&fcomp.comp);
1835 		iocmd->status = fcomp.status;
1836 	}
1837 	return 0;
1838 }
1839 
1840 int
1841 bfad_iocmd_diag_dport_start(struct bfad_s *bfad, void *pcmd)
1842 {
1843 	struct bfa_bsg_dport_enable_s *iocmd =
1844 				(struct bfa_bsg_dport_enable_s *)pcmd;
1845 	unsigned long   flags;
1846 	struct bfad_hal_comp fcomp;
1847 
1848 	init_completion(&fcomp.comp);
1849 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1850 	iocmd->status = bfa_dport_start(&bfad->bfa, iocmd->lpcnt,
1851 					iocmd->pat, bfad_hcb_comp,
1852 					&fcomp);
1853 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1854 
1855 	if (iocmd->status != BFA_STATUS_OK) {
1856 		bfa_trc(bfad, iocmd->status);
1857 	} else {
1858 		wait_for_completion(&fcomp.comp);
1859 		iocmd->status = fcomp.status;
1860 	}
1861 
1862 	return 0;
1863 }
1864 
1865 int
1866 bfad_iocmd_diag_dport_show(struct bfad_s *bfad, void *pcmd)
1867 {
1868 	struct bfa_bsg_diag_dport_show_s *iocmd =
1869 				(struct bfa_bsg_diag_dport_show_s *)pcmd;
1870 	unsigned long   flags;
1871 
1872 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1873 	iocmd->status = bfa_dport_show(&bfad->bfa, &iocmd->result);
1874 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1875 
1876 	return 0;
1877 }
1878 
1879 
1880 int
1881 bfad_iocmd_phy_get_attr(struct bfad_s *bfad, void *cmd)
1882 {
1883 	struct bfa_bsg_phy_attr_s *iocmd =
1884 			(struct bfa_bsg_phy_attr_s *)cmd;
1885 	struct bfad_hal_comp fcomp;
1886 	unsigned long	flags;
1887 
1888 	init_completion(&fcomp.comp);
1889 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1890 	iocmd->status = bfa_phy_get_attr(BFA_PHY(&bfad->bfa), iocmd->instance,
1891 				&iocmd->attr, bfad_hcb_comp, &fcomp);
1892 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1893 	if (iocmd->status != BFA_STATUS_OK)
1894 		goto out;
1895 	wait_for_completion(&fcomp.comp);
1896 	iocmd->status = fcomp.status;
1897 out:
1898 	return 0;
1899 }
1900 
1901 int
1902 bfad_iocmd_phy_get_stats(struct bfad_s *bfad, void *cmd)
1903 {
1904 	struct bfa_bsg_phy_stats_s *iocmd =
1905 			(struct bfa_bsg_phy_stats_s *)cmd;
1906 	struct bfad_hal_comp fcomp;
1907 	unsigned long	flags;
1908 
1909 	init_completion(&fcomp.comp);
1910 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1911 	iocmd->status = bfa_phy_get_stats(BFA_PHY(&bfad->bfa), iocmd->instance,
1912 				&iocmd->stats, bfad_hcb_comp, &fcomp);
1913 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1914 	if (iocmd->status != BFA_STATUS_OK)
1915 		goto out;
1916 	wait_for_completion(&fcomp.comp);
1917 	iocmd->status = fcomp.status;
1918 out:
1919 	return 0;
1920 }
1921 
1922 int
1923 bfad_iocmd_phy_read(struct bfad_s *bfad, void *cmd, unsigned int payload_len)
1924 {
1925 	struct bfa_bsg_phy_s *iocmd = (struct bfa_bsg_phy_s *)cmd;
1926 	struct bfad_hal_comp fcomp;
1927 	void	*iocmd_bufptr;
1928 	unsigned long	flags;
1929 
1930 	if (bfad_chk_iocmd_sz(payload_len,
1931 			sizeof(struct bfa_bsg_phy_s),
1932 			iocmd->bufsz) != BFA_STATUS_OK) {
1933 		iocmd->status = BFA_STATUS_VERSION_FAIL;
1934 		return 0;
1935 	}
1936 
1937 	iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_phy_s);
1938 	init_completion(&fcomp.comp);
1939 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1940 	iocmd->status = bfa_phy_read(BFA_PHY(&bfad->bfa),
1941 				iocmd->instance, iocmd_bufptr, iocmd->bufsz,
1942 				0, bfad_hcb_comp, &fcomp);
1943 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1944 	if (iocmd->status != BFA_STATUS_OK)
1945 		goto out;
1946 	wait_for_completion(&fcomp.comp);
1947 	iocmd->status = fcomp.status;
1948 	if (iocmd->status != BFA_STATUS_OK)
1949 		goto out;
1950 out:
1951 	return 0;
1952 }
1953 
1954 int
1955 bfad_iocmd_vhba_query(struct bfad_s *bfad, void *cmd)
1956 {
1957 	struct bfa_bsg_vhba_attr_s *iocmd =
1958 			(struct bfa_bsg_vhba_attr_s *)cmd;
1959 	struct bfa_vhba_attr_s *attr = &iocmd->attr;
1960 	unsigned long flags;
1961 
1962 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1963 	attr->pwwn =  bfad->bfa.ioc.attr->pwwn;
1964 	attr->nwwn =  bfad->bfa.ioc.attr->nwwn;
1965 	attr->plog_enabled = (bfa_boolean_t)bfad->bfa.plog->plog_enabled;
1966 	attr->io_profile = bfa_fcpim_get_io_profile(&bfad->bfa);
1967 	attr->path_tov  = bfa_fcpim_path_tov_get(&bfad->bfa);
1968 	iocmd->status = BFA_STATUS_OK;
1969 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1970 	return 0;
1971 }
1972 
1973 int
1974 bfad_iocmd_phy_update(struct bfad_s *bfad, void *cmd, unsigned int payload_len)
1975 {
1976 	struct bfa_bsg_phy_s *iocmd = (struct bfa_bsg_phy_s *)cmd;
1977 	void	*iocmd_bufptr;
1978 	struct bfad_hal_comp fcomp;
1979 	unsigned long	flags;
1980 
1981 	if (bfad_chk_iocmd_sz(payload_len,
1982 			sizeof(struct bfa_bsg_phy_s),
1983 			iocmd->bufsz) != BFA_STATUS_OK) {
1984 		iocmd->status = BFA_STATUS_VERSION_FAIL;
1985 		return 0;
1986 	}
1987 
1988 	iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_phy_s);
1989 	init_completion(&fcomp.comp);
1990 	spin_lock_irqsave(&bfad->bfad_lock, flags);
1991 	iocmd->status = bfa_phy_update(BFA_PHY(&bfad->bfa),
1992 				iocmd->instance, iocmd_bufptr, iocmd->bufsz,
1993 				0, bfad_hcb_comp, &fcomp);
1994 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1995 	if (iocmd->status != BFA_STATUS_OK)
1996 		goto out;
1997 	wait_for_completion(&fcomp.comp);
1998 	iocmd->status = fcomp.status;
1999 out:
2000 	return 0;
2001 }
2002 
2003 int
2004 bfad_iocmd_porglog_get(struct bfad_s *bfad, void *cmd)
2005 {
2006 	struct bfa_bsg_debug_s *iocmd = (struct bfa_bsg_debug_s *)cmd;
2007 	void *iocmd_bufptr;
2008 
2009 	if (iocmd->bufsz < sizeof(struct bfa_plog_s)) {
2010 		bfa_trc(bfad, sizeof(struct bfa_plog_s));
2011 		iocmd->status = BFA_STATUS_EINVAL;
2012 		goto out;
2013 	}
2014 
2015 	iocmd->status = BFA_STATUS_OK;
2016 	iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_debug_s);
2017 	memcpy(iocmd_bufptr, (u8 *) &bfad->plog_buf, sizeof(struct bfa_plog_s));
2018 out:
2019 	return 0;
2020 }
2021 
2022 #define BFA_DEBUG_FW_CORE_CHUNK_SZ	0x4000U /* 16K chunks for FW dump */
2023 int
2024 bfad_iocmd_debug_fw_core(struct bfad_s *bfad, void *cmd,
2025 			unsigned int payload_len)
2026 {
2027 	struct bfa_bsg_debug_s *iocmd = (struct bfa_bsg_debug_s *)cmd;
2028 	void	*iocmd_bufptr;
2029 	unsigned long	flags;
2030 	u32 offset;
2031 
2032 	if (bfad_chk_iocmd_sz(payload_len, sizeof(struct bfa_bsg_debug_s),
2033 			BFA_DEBUG_FW_CORE_CHUNK_SZ) != BFA_STATUS_OK) {
2034 		iocmd->status = BFA_STATUS_VERSION_FAIL;
2035 		return 0;
2036 	}
2037 
2038 	if (iocmd->bufsz < BFA_DEBUG_FW_CORE_CHUNK_SZ ||
2039 			!IS_ALIGNED(iocmd->bufsz, sizeof(u16)) ||
2040 			!IS_ALIGNED(iocmd->offset, sizeof(u32))) {
2041 		bfa_trc(bfad, BFA_DEBUG_FW_CORE_CHUNK_SZ);
2042 		iocmd->status = BFA_STATUS_EINVAL;
2043 		goto out;
2044 	}
2045 
2046 	iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_debug_s);
2047 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2048 	offset = iocmd->offset;
2049 	iocmd->status = bfa_ioc_debug_fwcore(&bfad->bfa.ioc, iocmd_bufptr,
2050 				&offset, &iocmd->bufsz);
2051 	iocmd->offset = offset;
2052 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2053 out:
2054 	return 0;
2055 }
2056 
2057 int
2058 bfad_iocmd_debug_ctl(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
2059 {
2060 	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
2061 	unsigned long	flags;
2062 
2063 	if (v_cmd == IOCMD_DEBUG_FW_STATE_CLR) {
2064 		spin_lock_irqsave(&bfad->bfad_lock, flags);
2065 		bfad->bfa.ioc.dbg_fwsave_once = BFA_TRUE;
2066 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2067 	} else if (v_cmd == IOCMD_DEBUG_PORTLOG_CLR)
2068 		bfad->plog_buf.head = bfad->plog_buf.tail = 0;
2069 	else if (v_cmd == IOCMD_DEBUG_START_DTRC)
2070 		bfa_trc_init(bfad->trcmod);
2071 	else if (v_cmd == IOCMD_DEBUG_STOP_DTRC)
2072 		bfa_trc_stop(bfad->trcmod);
2073 
2074 	iocmd->status = BFA_STATUS_OK;
2075 	return 0;
2076 }
2077 
2078 int
2079 bfad_iocmd_porglog_ctl(struct bfad_s *bfad, void *cmd)
2080 {
2081 	struct bfa_bsg_portlogctl_s *iocmd = (struct bfa_bsg_portlogctl_s *)cmd;
2082 
2083 	if (iocmd->ctl == BFA_TRUE)
2084 		bfad->plog_buf.plog_enabled = 1;
2085 	else
2086 		bfad->plog_buf.plog_enabled = 0;
2087 
2088 	iocmd->status = BFA_STATUS_OK;
2089 	return 0;
2090 }
2091 
2092 int
2093 bfad_iocmd_fcpim_cfg_profile(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
2094 {
2095 	struct bfa_bsg_fcpim_profile_s *iocmd =
2096 				(struct bfa_bsg_fcpim_profile_s *)cmd;
2097 	struct timeval  tv;
2098 	unsigned long	flags;
2099 
2100 	do_gettimeofday(&tv);
2101 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2102 	if (v_cmd == IOCMD_FCPIM_PROFILE_ON)
2103 		iocmd->status = bfa_fcpim_profile_on(&bfad->bfa, tv.tv_sec);
2104 	else if (v_cmd == IOCMD_FCPIM_PROFILE_OFF)
2105 		iocmd->status = bfa_fcpim_profile_off(&bfad->bfa);
2106 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2107 
2108 	return 0;
2109 }
2110 
2111 static int
2112 bfad_iocmd_itnim_get_ioprofile(struct bfad_s *bfad, void *cmd)
2113 {
2114 	struct bfa_bsg_itnim_ioprofile_s *iocmd =
2115 				(struct bfa_bsg_itnim_ioprofile_s *)cmd;
2116 	struct bfa_fcs_lport_s *fcs_port;
2117 	struct bfa_fcs_itnim_s *itnim;
2118 	unsigned long   flags;
2119 
2120 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2121 	fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
2122 				iocmd->vf_id, iocmd->lpwwn);
2123 	if (!fcs_port)
2124 		iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
2125 	else {
2126 		itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
2127 		if (itnim == NULL)
2128 			iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
2129 		else
2130 			iocmd->status = bfa_itnim_get_ioprofile(
2131 						bfa_fcs_itnim_get_halitn(itnim),
2132 						&iocmd->ioprofile);
2133 	}
2134 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2135 	return 0;
2136 }
2137 
2138 int
2139 bfad_iocmd_fcport_get_stats(struct bfad_s *bfad, void *cmd)
2140 {
2141 	struct bfa_bsg_fcport_stats_s *iocmd =
2142 				(struct bfa_bsg_fcport_stats_s *)cmd;
2143 	struct bfad_hal_comp fcomp;
2144 	unsigned long	flags;
2145 	struct bfa_cb_pending_q_s cb_qe;
2146 
2147 	init_completion(&fcomp.comp);
2148 	bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
2149 			   &fcomp, &iocmd->stats);
2150 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2151 	iocmd->status = bfa_fcport_get_stats(&bfad->bfa, &cb_qe);
2152 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2153 	if (iocmd->status != BFA_STATUS_OK) {
2154 		bfa_trc(bfad, iocmd->status);
2155 		goto out;
2156 	}
2157 	wait_for_completion(&fcomp.comp);
2158 	iocmd->status = fcomp.status;
2159 out:
2160 	return 0;
2161 }
2162 
2163 int
2164 bfad_iocmd_fcport_reset_stats(struct bfad_s *bfad, void *cmd)
2165 {
2166 	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
2167 	struct bfad_hal_comp fcomp;
2168 	unsigned long	flags;
2169 	struct bfa_cb_pending_q_s cb_qe;
2170 
2171 	init_completion(&fcomp.comp);
2172 	bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp, &fcomp, NULL);
2173 
2174 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2175 	iocmd->status = bfa_fcport_clear_stats(&bfad->bfa, &cb_qe);
2176 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2177 	if (iocmd->status != BFA_STATUS_OK) {
2178 		bfa_trc(bfad, iocmd->status);
2179 		goto out;
2180 	}
2181 	wait_for_completion(&fcomp.comp);
2182 	iocmd->status = fcomp.status;
2183 out:
2184 	return 0;
2185 }
2186 
2187 int
2188 bfad_iocmd_boot_cfg(struct bfad_s *bfad, void *cmd)
2189 {
2190 	struct bfa_bsg_boot_s *iocmd = (struct bfa_bsg_boot_s *)cmd;
2191 	struct bfad_hal_comp fcomp;
2192 	unsigned long	flags;
2193 
2194 	init_completion(&fcomp.comp);
2195 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2196 	iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa),
2197 			BFA_FLASH_PART_BOOT, bfad->bfa.ioc.port_id,
2198 			&iocmd->cfg, sizeof(struct bfa_boot_cfg_s), 0,
2199 			bfad_hcb_comp, &fcomp);
2200 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2201 	if (iocmd->status != BFA_STATUS_OK)
2202 		goto out;
2203 	wait_for_completion(&fcomp.comp);
2204 	iocmd->status = fcomp.status;
2205 out:
2206 	return 0;
2207 }
2208 
2209 int
2210 bfad_iocmd_boot_query(struct bfad_s *bfad, void *cmd)
2211 {
2212 	struct bfa_bsg_boot_s *iocmd = (struct bfa_bsg_boot_s *)cmd;
2213 	struct bfad_hal_comp fcomp;
2214 	unsigned long	flags;
2215 
2216 	init_completion(&fcomp.comp);
2217 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2218 	iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa),
2219 			BFA_FLASH_PART_BOOT, bfad->bfa.ioc.port_id,
2220 			&iocmd->cfg, sizeof(struct bfa_boot_cfg_s), 0,
2221 			bfad_hcb_comp, &fcomp);
2222 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2223 	if (iocmd->status != BFA_STATUS_OK)
2224 		goto out;
2225 	wait_for_completion(&fcomp.comp);
2226 	iocmd->status = fcomp.status;
2227 out:
2228 	return 0;
2229 }
2230 
2231 int
2232 bfad_iocmd_preboot_query(struct bfad_s *bfad, void *cmd)
2233 {
2234 	struct bfa_bsg_preboot_s *iocmd = (struct bfa_bsg_preboot_s *)cmd;
2235 	struct bfi_iocfc_cfgrsp_s *cfgrsp = bfad->bfa.iocfc.cfgrsp;
2236 	struct bfa_boot_pbc_s *pbcfg = &iocmd->cfg;
2237 	unsigned long	flags;
2238 
2239 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2240 	pbcfg->enable = cfgrsp->pbc_cfg.boot_enabled;
2241 	pbcfg->nbluns = cfgrsp->pbc_cfg.nbluns;
2242 	pbcfg->speed = cfgrsp->pbc_cfg.port_speed;
2243 	memcpy(pbcfg->pblun, cfgrsp->pbc_cfg.blun, sizeof(pbcfg->pblun));
2244 	iocmd->status = BFA_STATUS_OK;
2245 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2246 
2247 	return 0;
2248 }
2249 
2250 int
2251 bfad_iocmd_ethboot_cfg(struct bfad_s *bfad, void *cmd)
2252 {
2253 	struct bfa_bsg_ethboot_s *iocmd = (struct bfa_bsg_ethboot_s *)cmd;
2254 	struct bfad_hal_comp fcomp;
2255 	unsigned long	flags;
2256 
2257 	init_completion(&fcomp.comp);
2258 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2259 	iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa),
2260 				BFA_FLASH_PART_PXECFG,
2261 				bfad->bfa.ioc.port_id, &iocmd->cfg,
2262 				sizeof(struct bfa_ethboot_cfg_s), 0,
2263 				bfad_hcb_comp, &fcomp);
2264 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2265 	if (iocmd->status != BFA_STATUS_OK)
2266 		goto out;
2267 	wait_for_completion(&fcomp.comp);
2268 	iocmd->status = fcomp.status;
2269 out:
2270 	return 0;
2271 }
2272 
2273 int
2274 bfad_iocmd_ethboot_query(struct bfad_s *bfad, void *cmd)
2275 {
2276 	struct bfa_bsg_ethboot_s *iocmd = (struct bfa_bsg_ethboot_s *)cmd;
2277 	struct bfad_hal_comp fcomp;
2278 	unsigned long	flags;
2279 
2280 	init_completion(&fcomp.comp);
2281 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2282 	iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa),
2283 				BFA_FLASH_PART_PXECFG,
2284 				bfad->bfa.ioc.port_id, &iocmd->cfg,
2285 				sizeof(struct bfa_ethboot_cfg_s), 0,
2286 				bfad_hcb_comp, &fcomp);
2287 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2288 	if (iocmd->status != BFA_STATUS_OK)
2289 		goto out;
2290 	wait_for_completion(&fcomp.comp);
2291 	iocmd->status = fcomp.status;
2292 out:
2293 	return 0;
2294 }
2295 
2296 int
2297 bfad_iocmd_cfg_trunk(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
2298 {
2299 	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
2300 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
2301 	struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
2302 	unsigned long	flags;
2303 
2304 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2305 
2306 	if (bfa_fcport_is_dport(&bfad->bfa)) {
2307 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2308 		return BFA_STATUS_DPORT_ERR;
2309 	}
2310 
2311 	if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) ||
2312 		(fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
2313 		iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
2314 	else {
2315 		if (v_cmd == IOCMD_TRUNK_ENABLE) {
2316 			trunk->attr.state = BFA_TRUNK_OFFLINE;
2317 			bfa_fcport_disable(&bfad->bfa);
2318 			fcport->cfg.trunked = BFA_TRUE;
2319 		} else if (v_cmd == IOCMD_TRUNK_DISABLE) {
2320 			trunk->attr.state = BFA_TRUNK_DISABLED;
2321 			bfa_fcport_disable(&bfad->bfa);
2322 			fcport->cfg.trunked = BFA_FALSE;
2323 		}
2324 
2325 		if (!bfa_fcport_is_disabled(&bfad->bfa))
2326 			bfa_fcport_enable(&bfad->bfa);
2327 
2328 		iocmd->status = BFA_STATUS_OK;
2329 	}
2330 
2331 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2332 
2333 	return 0;
2334 }
2335 
2336 int
2337 bfad_iocmd_trunk_get_attr(struct bfad_s *bfad, void *cmd)
2338 {
2339 	struct bfa_bsg_trunk_attr_s *iocmd = (struct bfa_bsg_trunk_attr_s *)cmd;
2340 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
2341 	struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
2342 	unsigned long	flags;
2343 
2344 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2345 	if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) ||
2346 		(fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
2347 		iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
2348 	else {
2349 		memcpy((void *)&iocmd->attr, (void *)&trunk->attr,
2350 			sizeof(struct bfa_trunk_attr_s));
2351 		iocmd->attr.port_id = bfa_lps_get_base_pid(&bfad->bfa);
2352 		iocmd->status = BFA_STATUS_OK;
2353 	}
2354 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2355 
2356 	return 0;
2357 }
2358 
2359 int
2360 bfad_iocmd_qos(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
2361 {
2362 	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
2363 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
2364 	unsigned long	flags;
2365 
2366 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2367 	if (bfa_ioc_get_type(&bfad->bfa.ioc) == BFA_IOC_TYPE_FC) {
2368 		if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
2369 		(fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
2370 			iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
2371 		else {
2372 			if (v_cmd == IOCMD_QOS_ENABLE)
2373 				fcport->cfg.qos_enabled = BFA_TRUE;
2374 			else if (v_cmd == IOCMD_QOS_DISABLE) {
2375 				fcport->cfg.qos_enabled = BFA_FALSE;
2376 				fcport->cfg.qos_bw.high = BFA_QOS_BW_HIGH;
2377 				fcport->cfg.qos_bw.med = BFA_QOS_BW_MED;
2378 				fcport->cfg.qos_bw.low = BFA_QOS_BW_LOW;
2379 			}
2380 		}
2381 	}
2382 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2383 
2384 	return 0;
2385 }
2386 
2387 int
2388 bfad_iocmd_qos_get_attr(struct bfad_s *bfad, void *cmd)
2389 {
2390 	struct bfa_bsg_qos_attr_s *iocmd = (struct bfa_bsg_qos_attr_s *)cmd;
2391 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
2392 	unsigned long	flags;
2393 
2394 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2395 	if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
2396 		(fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
2397 		iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
2398 	else {
2399 		iocmd->attr.state = fcport->qos_attr.state;
2400 		iocmd->attr.total_bb_cr =
2401 			be32_to_cpu(fcport->qos_attr.total_bb_cr);
2402 		iocmd->attr.qos_bw.high = fcport->cfg.qos_bw.high;
2403 		iocmd->attr.qos_bw.med = fcport->cfg.qos_bw.med;
2404 		iocmd->attr.qos_bw.low = fcport->cfg.qos_bw.low;
2405 		iocmd->attr.qos_bw_op = fcport->qos_attr.qos_bw_op;
2406 		iocmd->status = BFA_STATUS_OK;
2407 	}
2408 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2409 
2410 	return 0;
2411 }
2412 
2413 int
2414 bfad_iocmd_qos_get_vc_attr(struct bfad_s *bfad, void *cmd)
2415 {
2416 	struct bfa_bsg_qos_vc_attr_s *iocmd =
2417 				(struct bfa_bsg_qos_vc_attr_s *)cmd;
2418 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
2419 	struct bfa_qos_vc_attr_s *bfa_vc_attr = &fcport->qos_vc_attr;
2420 	unsigned long	flags;
2421 	u32	i = 0;
2422 
2423 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2424 	iocmd->attr.total_vc_count = be16_to_cpu(bfa_vc_attr->total_vc_count);
2425 	iocmd->attr.shared_credit  = be16_to_cpu(bfa_vc_attr->shared_credit);
2426 	iocmd->attr.elp_opmode_flags  =
2427 				be32_to_cpu(bfa_vc_attr->elp_opmode_flags);
2428 
2429 	/* Individual VC info */
2430 	while (i < iocmd->attr.total_vc_count) {
2431 		iocmd->attr.vc_info[i].vc_credit =
2432 				bfa_vc_attr->vc_info[i].vc_credit;
2433 		iocmd->attr.vc_info[i].borrow_credit =
2434 				bfa_vc_attr->vc_info[i].borrow_credit;
2435 		iocmd->attr.vc_info[i].priority =
2436 				bfa_vc_attr->vc_info[i].priority;
2437 		i++;
2438 	}
2439 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2440 
2441 	iocmd->status = BFA_STATUS_OK;
2442 	return 0;
2443 }
2444 
2445 int
2446 bfad_iocmd_qos_get_stats(struct bfad_s *bfad, void *cmd)
2447 {
2448 	struct bfa_bsg_fcport_stats_s *iocmd =
2449 				(struct bfa_bsg_fcport_stats_s *)cmd;
2450 	struct bfad_hal_comp fcomp;
2451 	unsigned long	flags;
2452 	struct bfa_cb_pending_q_s cb_qe;
2453 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
2454 
2455 	init_completion(&fcomp.comp);
2456 	bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
2457 			   &fcomp, &iocmd->stats);
2458 
2459 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2460 	WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc));
2461 	if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
2462 		(fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
2463 		iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
2464 	else
2465 		iocmd->status = bfa_fcport_get_stats(&bfad->bfa, &cb_qe);
2466 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2467 	if (iocmd->status != BFA_STATUS_OK) {
2468 		bfa_trc(bfad, iocmd->status);
2469 		goto out;
2470 	}
2471 	wait_for_completion(&fcomp.comp);
2472 	iocmd->status = fcomp.status;
2473 out:
2474 	return 0;
2475 }
2476 
2477 int
2478 bfad_iocmd_qos_reset_stats(struct bfad_s *bfad, void *cmd)
2479 {
2480 	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
2481 	struct bfad_hal_comp fcomp;
2482 	unsigned long	flags;
2483 	struct bfa_cb_pending_q_s cb_qe;
2484 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
2485 
2486 	init_completion(&fcomp.comp);
2487 	bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
2488 			   &fcomp, NULL);
2489 
2490 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2491 	WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc));
2492 	if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
2493 		(fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
2494 		iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
2495 	else
2496 		iocmd->status = bfa_fcport_clear_stats(&bfad->bfa, &cb_qe);
2497 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2498 	if (iocmd->status != BFA_STATUS_OK) {
2499 		bfa_trc(bfad, iocmd->status);
2500 		goto out;
2501 	}
2502 	wait_for_completion(&fcomp.comp);
2503 	iocmd->status = fcomp.status;
2504 out:
2505 	return 0;
2506 }
2507 
2508 int
2509 bfad_iocmd_vf_get_stats(struct bfad_s *bfad, void *cmd)
2510 {
2511 	struct bfa_bsg_vf_stats_s *iocmd =
2512 			(struct bfa_bsg_vf_stats_s *)cmd;
2513 	struct bfa_fcs_fabric_s	*fcs_vf;
2514 	unsigned long	flags;
2515 
2516 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2517 	fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id);
2518 	if (fcs_vf == NULL) {
2519 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2520 		iocmd->status = BFA_STATUS_UNKNOWN_VFID;
2521 		goto out;
2522 	}
2523 	memcpy((void *)&iocmd->stats, (void *)&fcs_vf->stats,
2524 		sizeof(struct bfa_vf_stats_s));
2525 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2526 	iocmd->status = BFA_STATUS_OK;
2527 out:
2528 	return 0;
2529 }
2530 
2531 int
2532 bfad_iocmd_vf_clr_stats(struct bfad_s *bfad, void *cmd)
2533 {
2534 	struct bfa_bsg_vf_reset_stats_s *iocmd =
2535 			(struct bfa_bsg_vf_reset_stats_s *)cmd;
2536 	struct bfa_fcs_fabric_s	*fcs_vf;
2537 	unsigned long	flags;
2538 
2539 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2540 	fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id);
2541 	if (fcs_vf == NULL) {
2542 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2543 		iocmd->status = BFA_STATUS_UNKNOWN_VFID;
2544 		goto out;
2545 	}
2546 	memset((void *)&fcs_vf->stats, 0, sizeof(struct bfa_vf_stats_s));
2547 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2548 	iocmd->status = BFA_STATUS_OK;
2549 out:
2550 	return 0;
2551 }
2552 
2553 /* Function to reset the LUN SCAN mode */
2554 static void
2555 bfad_iocmd_lunmask_reset_lunscan_mode(struct bfad_s *bfad, int lunmask_cfg)
2556 {
2557 	struct bfad_im_port_s *pport_im = bfad->pport.im_port;
2558 	struct bfad_vport_s *vport = NULL;
2559 
2560 	/* Set the scsi device LUN SCAN flags for base port */
2561 	bfad_reset_sdev_bflags(pport_im, lunmask_cfg);
2562 
2563 	/* Set the scsi device LUN SCAN flags for the vports */
2564 	list_for_each_entry(vport, &bfad->vport_list, list_entry)
2565 		bfad_reset_sdev_bflags(vport->drv_port.im_port, lunmask_cfg);
2566 }
2567 
2568 int
2569 bfad_iocmd_lunmask(struct bfad_s *bfad, void *pcmd, unsigned int v_cmd)
2570 {
2571 	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
2572 	unsigned long	flags;
2573 
2574 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2575 	if (v_cmd == IOCMD_FCPIM_LUNMASK_ENABLE) {
2576 		iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_TRUE);
2577 		/* Set the LUN Scanning mode to be Sequential scan */
2578 		if (iocmd->status == BFA_STATUS_OK)
2579 			bfad_iocmd_lunmask_reset_lunscan_mode(bfad, BFA_TRUE);
2580 	} else if (v_cmd == IOCMD_FCPIM_LUNMASK_DISABLE) {
2581 		iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_FALSE);
2582 		/* Set the LUN Scanning mode to default REPORT_LUNS scan */
2583 		if (iocmd->status == BFA_STATUS_OK)
2584 			bfad_iocmd_lunmask_reset_lunscan_mode(bfad, BFA_FALSE);
2585 	} else if (v_cmd == IOCMD_FCPIM_LUNMASK_CLEAR)
2586 		iocmd->status = bfa_fcpim_lunmask_clear(&bfad->bfa);
2587 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2588 	return 0;
2589 }
2590 
2591 int
2592 bfad_iocmd_fcpim_lunmask_query(struct bfad_s *bfad, void *cmd)
2593 {
2594 	struct bfa_bsg_fcpim_lunmask_query_s *iocmd =
2595 			(struct bfa_bsg_fcpim_lunmask_query_s *)cmd;
2596 	struct bfa_lunmask_cfg_s *lun_mask = &iocmd->lun_mask;
2597 	unsigned long	flags;
2598 
2599 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2600 	iocmd->status = bfa_fcpim_lunmask_query(&bfad->bfa, lun_mask);
2601 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2602 	return 0;
2603 }
2604 
2605 int
2606 bfad_iocmd_fcpim_cfg_lunmask(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
2607 {
2608 	struct bfa_bsg_fcpim_lunmask_s *iocmd =
2609 				(struct bfa_bsg_fcpim_lunmask_s *)cmd;
2610 	unsigned long	flags;
2611 
2612 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2613 	if (v_cmd == IOCMD_FCPIM_LUNMASK_ADD)
2614 		iocmd->status = bfa_fcpim_lunmask_add(&bfad->bfa, iocmd->vf_id,
2615 					&iocmd->pwwn, iocmd->rpwwn, iocmd->lun);
2616 	else if (v_cmd == IOCMD_FCPIM_LUNMASK_DELETE)
2617 		iocmd->status = bfa_fcpim_lunmask_delete(&bfad->bfa,
2618 					iocmd->vf_id, &iocmd->pwwn,
2619 					iocmd->rpwwn, iocmd->lun);
2620 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2621 	return 0;
2622 }
2623 
2624 int
2625 bfad_iocmd_fcpim_throttle_query(struct bfad_s *bfad, void *cmd)
2626 {
2627 	struct bfa_bsg_fcpim_throttle_s *iocmd =
2628 			(struct bfa_bsg_fcpim_throttle_s *)cmd;
2629 	unsigned long   flags;
2630 
2631 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2632 	iocmd->status = bfa_fcpim_throttle_get(&bfad->bfa,
2633 				(void *)&iocmd->throttle);
2634 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2635 
2636 	return 0;
2637 }
2638 
2639 int
2640 bfad_iocmd_fcpim_throttle_set(struct bfad_s *bfad, void *cmd)
2641 {
2642 	struct bfa_bsg_fcpim_throttle_s *iocmd =
2643 			(struct bfa_bsg_fcpim_throttle_s *)cmd;
2644 	unsigned long	flags;
2645 
2646 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2647 	iocmd->status = bfa_fcpim_throttle_set(&bfad->bfa,
2648 				iocmd->throttle.cfg_value);
2649 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2650 
2651 	return 0;
2652 }
2653 
2654 int
2655 bfad_iocmd_tfru_read(struct bfad_s *bfad, void *cmd)
2656 {
2657 	struct bfa_bsg_tfru_s *iocmd =
2658 			(struct bfa_bsg_tfru_s *)cmd;
2659 	struct bfad_hal_comp fcomp;
2660 	unsigned long flags = 0;
2661 
2662 	init_completion(&fcomp.comp);
2663 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2664 	iocmd->status = bfa_tfru_read(BFA_FRU(&bfad->bfa),
2665 				&iocmd->data, iocmd->len, iocmd->offset,
2666 				bfad_hcb_comp, &fcomp);
2667 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2668 	if (iocmd->status == BFA_STATUS_OK) {
2669 		wait_for_completion(&fcomp.comp);
2670 		iocmd->status = fcomp.status;
2671 	}
2672 
2673 	return 0;
2674 }
2675 
2676 int
2677 bfad_iocmd_tfru_write(struct bfad_s *bfad, void *cmd)
2678 {
2679 	struct bfa_bsg_tfru_s *iocmd =
2680 			(struct bfa_bsg_tfru_s *)cmd;
2681 	struct bfad_hal_comp fcomp;
2682 	unsigned long flags = 0;
2683 
2684 	init_completion(&fcomp.comp);
2685 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2686 	iocmd->status = bfa_tfru_write(BFA_FRU(&bfad->bfa),
2687 				&iocmd->data, iocmd->len, iocmd->offset,
2688 				bfad_hcb_comp, &fcomp);
2689 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2690 	if (iocmd->status == BFA_STATUS_OK) {
2691 		wait_for_completion(&fcomp.comp);
2692 		iocmd->status = fcomp.status;
2693 	}
2694 
2695 	return 0;
2696 }
2697 
2698 int
2699 bfad_iocmd_fruvpd_read(struct bfad_s *bfad, void *cmd)
2700 {
2701 	struct bfa_bsg_fruvpd_s *iocmd =
2702 			(struct bfa_bsg_fruvpd_s *)cmd;
2703 	struct bfad_hal_comp fcomp;
2704 	unsigned long flags = 0;
2705 
2706 	init_completion(&fcomp.comp);
2707 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2708 	iocmd->status = bfa_fruvpd_read(BFA_FRU(&bfad->bfa),
2709 				&iocmd->data, iocmd->len, iocmd->offset,
2710 				bfad_hcb_comp, &fcomp);
2711 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2712 	if (iocmd->status == BFA_STATUS_OK) {
2713 		wait_for_completion(&fcomp.comp);
2714 		iocmd->status = fcomp.status;
2715 	}
2716 
2717 	return 0;
2718 }
2719 
2720 int
2721 bfad_iocmd_fruvpd_update(struct bfad_s *bfad, void *cmd)
2722 {
2723 	struct bfa_bsg_fruvpd_s *iocmd =
2724 			(struct bfa_bsg_fruvpd_s *)cmd;
2725 	struct bfad_hal_comp fcomp;
2726 	unsigned long flags = 0;
2727 
2728 	init_completion(&fcomp.comp);
2729 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2730 	iocmd->status = bfa_fruvpd_update(BFA_FRU(&bfad->bfa),
2731 				&iocmd->data, iocmd->len, iocmd->offset,
2732 				bfad_hcb_comp, &fcomp, iocmd->trfr_cmpl);
2733 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2734 	if (iocmd->status == BFA_STATUS_OK) {
2735 		wait_for_completion(&fcomp.comp);
2736 		iocmd->status = fcomp.status;
2737 	}
2738 
2739 	return 0;
2740 }
2741 
2742 int
2743 bfad_iocmd_fruvpd_get_max_size(struct bfad_s *bfad, void *cmd)
2744 {
2745 	struct bfa_bsg_fruvpd_max_size_s *iocmd =
2746 			(struct bfa_bsg_fruvpd_max_size_s *)cmd;
2747 	unsigned long flags = 0;
2748 
2749 	spin_lock_irqsave(&bfad->bfad_lock, flags);
2750 	iocmd->status = bfa_fruvpd_get_max_size(BFA_FRU(&bfad->bfa),
2751 						&iocmd->max_size);
2752 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2753 
2754 	return 0;
2755 }
2756 
2757 static int
2758 bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd,
2759 		unsigned int payload_len)
2760 {
2761 	int rc = -EINVAL;
2762 
2763 	switch (cmd) {
2764 	case IOCMD_IOC_ENABLE:
2765 		rc = bfad_iocmd_ioc_enable(bfad, iocmd);
2766 		break;
2767 	case IOCMD_IOC_DISABLE:
2768 		rc = bfad_iocmd_ioc_disable(bfad, iocmd);
2769 		break;
2770 	case IOCMD_IOC_GET_INFO:
2771 		rc = bfad_iocmd_ioc_get_info(bfad, iocmd);
2772 		break;
2773 	case IOCMD_IOC_GET_ATTR:
2774 		rc = bfad_iocmd_ioc_get_attr(bfad, iocmd);
2775 		break;
2776 	case IOCMD_IOC_GET_STATS:
2777 		rc = bfad_iocmd_ioc_get_stats(bfad, iocmd);
2778 		break;
2779 	case IOCMD_IOC_GET_FWSTATS:
2780 		rc = bfad_iocmd_ioc_get_fwstats(bfad, iocmd, payload_len);
2781 		break;
2782 	case IOCMD_IOC_RESET_STATS:
2783 	case IOCMD_IOC_RESET_FWSTATS:
2784 		rc = bfad_iocmd_ioc_reset_stats(bfad, iocmd, cmd);
2785 		break;
2786 	case IOCMD_IOC_SET_ADAPTER_NAME:
2787 	case IOCMD_IOC_SET_PORT_NAME:
2788 		rc = bfad_iocmd_ioc_set_name(bfad, iocmd, cmd);
2789 		break;
2790 	case IOCMD_IOCFC_GET_ATTR:
2791 		rc = bfad_iocmd_iocfc_get_attr(bfad, iocmd);
2792 		break;
2793 	case IOCMD_IOCFC_SET_INTR:
2794 		rc = bfad_iocmd_iocfc_set_intr(bfad, iocmd);
2795 		break;
2796 	case IOCMD_PORT_ENABLE:
2797 		rc = bfad_iocmd_port_enable(bfad, iocmd);
2798 		break;
2799 	case IOCMD_PORT_DISABLE:
2800 		rc = bfad_iocmd_port_disable(bfad, iocmd);
2801 		break;
2802 	case IOCMD_PORT_GET_ATTR:
2803 		rc = bfad_iocmd_port_get_attr(bfad, iocmd);
2804 		break;
2805 	case IOCMD_PORT_GET_STATS:
2806 		rc = bfad_iocmd_port_get_stats(bfad, iocmd, payload_len);
2807 		break;
2808 	case IOCMD_PORT_RESET_STATS:
2809 		rc = bfad_iocmd_port_reset_stats(bfad, iocmd);
2810 		break;
2811 	case IOCMD_PORT_CFG_TOPO:
2812 	case IOCMD_PORT_CFG_SPEED:
2813 	case IOCMD_PORT_CFG_ALPA:
2814 	case IOCMD_PORT_CLR_ALPA:
2815 		rc = bfad_iocmd_set_port_cfg(bfad, iocmd, cmd);
2816 		break;
2817 	case IOCMD_PORT_CFG_MAXFRSZ:
2818 		rc = bfad_iocmd_port_cfg_maxfrsize(bfad, iocmd);
2819 		break;
2820 	case IOCMD_PORT_BBCR_ENABLE:
2821 	case IOCMD_PORT_BBCR_DISABLE:
2822 		rc = bfad_iocmd_port_cfg_bbcr(bfad, cmd, iocmd);
2823 		break;
2824 	case IOCMD_PORT_BBCR_GET_ATTR:
2825 		rc = bfad_iocmd_port_get_bbcr_attr(bfad, iocmd);
2826 		break;
2827 	case IOCMD_LPORT_GET_ATTR:
2828 		rc = bfad_iocmd_lport_get_attr(bfad, iocmd);
2829 		break;
2830 	case IOCMD_LPORT_GET_STATS:
2831 		rc = bfad_iocmd_lport_get_stats(bfad, iocmd);
2832 		break;
2833 	case IOCMD_LPORT_RESET_STATS:
2834 		rc = bfad_iocmd_lport_reset_stats(bfad, iocmd);
2835 		break;
2836 	case IOCMD_LPORT_GET_IOSTATS:
2837 		rc = bfad_iocmd_lport_get_iostats(bfad, iocmd);
2838 		break;
2839 	case IOCMD_LPORT_GET_RPORTS:
2840 		rc = bfad_iocmd_lport_get_rports(bfad, iocmd, payload_len);
2841 		break;
2842 	case IOCMD_RPORT_GET_ATTR:
2843 		rc = bfad_iocmd_rport_get_attr(bfad, iocmd);
2844 		break;
2845 	case IOCMD_RPORT_GET_ADDR:
2846 		rc = bfad_iocmd_rport_get_addr(bfad, iocmd);
2847 		break;
2848 	case IOCMD_RPORT_GET_STATS:
2849 		rc = bfad_iocmd_rport_get_stats(bfad, iocmd);
2850 		break;
2851 	case IOCMD_RPORT_RESET_STATS:
2852 		rc = bfad_iocmd_rport_clr_stats(bfad, iocmd);
2853 		break;
2854 	case IOCMD_RPORT_SET_SPEED:
2855 		rc = bfad_iocmd_rport_set_speed(bfad, iocmd);
2856 		break;
2857 	case IOCMD_VPORT_GET_ATTR:
2858 		rc = bfad_iocmd_vport_get_attr(bfad, iocmd);
2859 		break;
2860 	case IOCMD_VPORT_GET_STATS:
2861 		rc = bfad_iocmd_vport_get_stats(bfad, iocmd);
2862 		break;
2863 	case IOCMD_VPORT_RESET_STATS:
2864 		rc = bfad_iocmd_vport_clr_stats(bfad, iocmd);
2865 		break;
2866 	case IOCMD_FABRIC_GET_LPORTS:
2867 		rc = bfad_iocmd_fabric_get_lports(bfad, iocmd, payload_len);
2868 		break;
2869 	case IOCMD_RATELIM_ENABLE:
2870 	case IOCMD_RATELIM_DISABLE:
2871 		rc = bfad_iocmd_ratelim(bfad, cmd, iocmd);
2872 		break;
2873 	case IOCMD_RATELIM_DEF_SPEED:
2874 		rc = bfad_iocmd_ratelim_speed(bfad, cmd, iocmd);
2875 		break;
2876 	case IOCMD_FCPIM_FAILOVER:
2877 		rc = bfad_iocmd_cfg_fcpim(bfad, iocmd);
2878 		break;
2879 	case IOCMD_FCPIM_MODSTATS:
2880 		rc = bfad_iocmd_fcpim_get_modstats(bfad, iocmd);
2881 		break;
2882 	case IOCMD_FCPIM_MODSTATSCLR:
2883 		rc = bfad_iocmd_fcpim_clr_modstats(bfad, iocmd);
2884 		break;
2885 	case IOCMD_FCPIM_DEL_ITN_STATS:
2886 		rc = bfad_iocmd_fcpim_get_del_itn_stats(bfad, iocmd);
2887 		break;
2888 	case IOCMD_ITNIM_GET_ATTR:
2889 		rc = bfad_iocmd_itnim_get_attr(bfad, iocmd);
2890 		break;
2891 	case IOCMD_ITNIM_GET_IOSTATS:
2892 		rc = bfad_iocmd_itnim_get_iostats(bfad, iocmd);
2893 		break;
2894 	case IOCMD_ITNIM_RESET_STATS:
2895 		rc = bfad_iocmd_itnim_reset_stats(bfad, iocmd);
2896 		break;
2897 	case IOCMD_ITNIM_GET_ITNSTATS:
2898 		rc = bfad_iocmd_itnim_get_itnstats(bfad, iocmd);
2899 		break;
2900 	case IOCMD_FCPORT_ENABLE:
2901 		rc = bfad_iocmd_fcport_enable(bfad, iocmd);
2902 		break;
2903 	case IOCMD_FCPORT_DISABLE:
2904 		rc = bfad_iocmd_fcport_disable(bfad, iocmd);
2905 		break;
2906 	case IOCMD_IOC_PCIFN_CFG:
2907 		rc = bfad_iocmd_ioc_get_pcifn_cfg(bfad, iocmd);
2908 		break;
2909 	case IOCMD_IOC_FW_SIG_INV:
2910 		rc = bfad_iocmd_ioc_fw_sig_inv(bfad, iocmd);
2911 		break;
2912 	case IOCMD_PCIFN_CREATE:
2913 		rc = bfad_iocmd_pcifn_create(bfad, iocmd);
2914 		break;
2915 	case IOCMD_PCIFN_DELETE:
2916 		rc = bfad_iocmd_pcifn_delete(bfad, iocmd);
2917 		break;
2918 	case IOCMD_PCIFN_BW:
2919 		rc = bfad_iocmd_pcifn_bw(bfad, iocmd);
2920 		break;
2921 	case IOCMD_ADAPTER_CFG_MODE:
2922 		rc = bfad_iocmd_adapter_cfg_mode(bfad, iocmd);
2923 		break;
2924 	case IOCMD_PORT_CFG_MODE:
2925 		rc = bfad_iocmd_port_cfg_mode(bfad, iocmd);
2926 		break;
2927 	case IOCMD_FLASH_ENABLE_OPTROM:
2928 	case IOCMD_FLASH_DISABLE_OPTROM:
2929 		rc = bfad_iocmd_ablk_optrom(bfad, cmd, iocmd);
2930 		break;
2931 	case IOCMD_FAA_QUERY:
2932 		rc = bfad_iocmd_faa_query(bfad, iocmd);
2933 		break;
2934 	case IOCMD_CEE_GET_ATTR:
2935 		rc = bfad_iocmd_cee_attr(bfad, iocmd, payload_len);
2936 		break;
2937 	case IOCMD_CEE_GET_STATS:
2938 		rc = bfad_iocmd_cee_get_stats(bfad, iocmd, payload_len);
2939 		break;
2940 	case IOCMD_CEE_RESET_STATS:
2941 		rc = bfad_iocmd_cee_reset_stats(bfad, iocmd);
2942 		break;
2943 	case IOCMD_SFP_MEDIA:
2944 		rc = bfad_iocmd_sfp_media(bfad, iocmd);
2945 		 break;
2946 	case IOCMD_SFP_SPEED:
2947 		rc = bfad_iocmd_sfp_speed(bfad, iocmd);
2948 		break;
2949 	case IOCMD_FLASH_GET_ATTR:
2950 		rc = bfad_iocmd_flash_get_attr(bfad, iocmd);
2951 		break;
2952 	case IOCMD_FLASH_ERASE_PART:
2953 		rc = bfad_iocmd_flash_erase_part(bfad, iocmd);
2954 		break;
2955 	case IOCMD_FLASH_UPDATE_PART:
2956 		rc = bfad_iocmd_flash_update_part(bfad, iocmd, payload_len);
2957 		break;
2958 	case IOCMD_FLASH_READ_PART:
2959 		rc = bfad_iocmd_flash_read_part(bfad, iocmd, payload_len);
2960 		break;
2961 	case IOCMD_DIAG_TEMP:
2962 		rc = bfad_iocmd_diag_temp(bfad, iocmd);
2963 		break;
2964 	case IOCMD_DIAG_MEMTEST:
2965 		rc = bfad_iocmd_diag_memtest(bfad, iocmd);
2966 		break;
2967 	case IOCMD_DIAG_LOOPBACK:
2968 		rc = bfad_iocmd_diag_loopback(bfad, iocmd);
2969 		break;
2970 	case IOCMD_DIAG_FWPING:
2971 		rc = bfad_iocmd_diag_fwping(bfad, iocmd);
2972 		break;
2973 	case IOCMD_DIAG_QUEUETEST:
2974 		rc = bfad_iocmd_diag_queuetest(bfad, iocmd);
2975 		break;
2976 	case IOCMD_DIAG_SFP:
2977 		rc = bfad_iocmd_diag_sfp(bfad, iocmd);
2978 		break;
2979 	case IOCMD_DIAG_LED:
2980 		rc = bfad_iocmd_diag_led(bfad, iocmd);
2981 		break;
2982 	case IOCMD_DIAG_BEACON_LPORT:
2983 		rc = bfad_iocmd_diag_beacon_lport(bfad, iocmd);
2984 		break;
2985 	case IOCMD_DIAG_LB_STAT:
2986 		rc = bfad_iocmd_diag_lb_stat(bfad, iocmd);
2987 		break;
2988 	case IOCMD_DIAG_DPORT_ENABLE:
2989 		rc = bfad_iocmd_diag_dport_enable(bfad, iocmd);
2990 		break;
2991 	case IOCMD_DIAG_DPORT_DISABLE:
2992 		rc = bfad_iocmd_diag_dport_disable(bfad, iocmd);
2993 		break;
2994 	case IOCMD_DIAG_DPORT_SHOW:
2995 		rc = bfad_iocmd_diag_dport_show(bfad, iocmd);
2996 		break;
2997 	case IOCMD_DIAG_DPORT_START:
2998 		rc = bfad_iocmd_diag_dport_start(bfad, iocmd);
2999 		break;
3000 	case IOCMD_PHY_GET_ATTR:
3001 		rc = bfad_iocmd_phy_get_attr(bfad, iocmd);
3002 		break;
3003 	case IOCMD_PHY_GET_STATS:
3004 		rc = bfad_iocmd_phy_get_stats(bfad, iocmd);
3005 		break;
3006 	case IOCMD_PHY_UPDATE_FW:
3007 		rc = bfad_iocmd_phy_update(bfad, iocmd, payload_len);
3008 		break;
3009 	case IOCMD_PHY_READ_FW:
3010 		rc = bfad_iocmd_phy_read(bfad, iocmd, payload_len);
3011 		break;
3012 	case IOCMD_VHBA_QUERY:
3013 		rc = bfad_iocmd_vhba_query(bfad, iocmd);
3014 		break;
3015 	case IOCMD_DEBUG_PORTLOG:
3016 		rc = bfad_iocmd_porglog_get(bfad, iocmd);
3017 		break;
3018 	case IOCMD_DEBUG_FW_CORE:
3019 		rc = bfad_iocmd_debug_fw_core(bfad, iocmd, payload_len);
3020 		break;
3021 	case IOCMD_DEBUG_FW_STATE_CLR:
3022 	case IOCMD_DEBUG_PORTLOG_CLR:
3023 	case IOCMD_DEBUG_START_DTRC:
3024 	case IOCMD_DEBUG_STOP_DTRC:
3025 		rc = bfad_iocmd_debug_ctl(bfad, iocmd, cmd);
3026 		break;
3027 	case IOCMD_DEBUG_PORTLOG_CTL:
3028 		rc = bfad_iocmd_porglog_ctl(bfad, iocmd);
3029 		break;
3030 	case IOCMD_FCPIM_PROFILE_ON:
3031 	case IOCMD_FCPIM_PROFILE_OFF:
3032 		rc = bfad_iocmd_fcpim_cfg_profile(bfad, iocmd, cmd);
3033 		break;
3034 	case IOCMD_ITNIM_GET_IOPROFILE:
3035 		rc = bfad_iocmd_itnim_get_ioprofile(bfad, iocmd);
3036 		break;
3037 	case IOCMD_FCPORT_GET_STATS:
3038 		rc = bfad_iocmd_fcport_get_stats(bfad, iocmd);
3039 		break;
3040 	case IOCMD_FCPORT_RESET_STATS:
3041 		rc = bfad_iocmd_fcport_reset_stats(bfad, iocmd);
3042 		break;
3043 	case IOCMD_BOOT_CFG:
3044 		rc = bfad_iocmd_boot_cfg(bfad, iocmd);
3045 		break;
3046 	case IOCMD_BOOT_QUERY:
3047 		rc = bfad_iocmd_boot_query(bfad, iocmd);
3048 		break;
3049 	case IOCMD_PREBOOT_QUERY:
3050 		rc = bfad_iocmd_preboot_query(bfad, iocmd);
3051 		break;
3052 	case IOCMD_ETHBOOT_CFG:
3053 		rc = bfad_iocmd_ethboot_cfg(bfad, iocmd);
3054 		break;
3055 	case IOCMD_ETHBOOT_QUERY:
3056 		rc = bfad_iocmd_ethboot_query(bfad, iocmd);
3057 		break;
3058 	case IOCMD_TRUNK_ENABLE:
3059 	case IOCMD_TRUNK_DISABLE:
3060 		rc = bfad_iocmd_cfg_trunk(bfad, iocmd, cmd);
3061 		break;
3062 	case IOCMD_TRUNK_GET_ATTR:
3063 		rc = bfad_iocmd_trunk_get_attr(bfad, iocmd);
3064 		break;
3065 	case IOCMD_QOS_ENABLE:
3066 	case IOCMD_QOS_DISABLE:
3067 		rc = bfad_iocmd_qos(bfad, iocmd, cmd);
3068 		break;
3069 	case IOCMD_QOS_GET_ATTR:
3070 		rc = bfad_iocmd_qos_get_attr(bfad, iocmd);
3071 		break;
3072 	case IOCMD_QOS_GET_VC_ATTR:
3073 		rc = bfad_iocmd_qos_get_vc_attr(bfad, iocmd);
3074 		break;
3075 	case IOCMD_QOS_GET_STATS:
3076 		rc = bfad_iocmd_qos_get_stats(bfad, iocmd);
3077 		break;
3078 	case IOCMD_QOS_RESET_STATS:
3079 		rc = bfad_iocmd_qos_reset_stats(bfad, iocmd);
3080 		break;
3081 	case IOCMD_QOS_SET_BW:
3082 		rc = bfad_iocmd_qos_set_bw(bfad, iocmd);
3083 		break;
3084 	case IOCMD_VF_GET_STATS:
3085 		rc = bfad_iocmd_vf_get_stats(bfad, iocmd);
3086 		break;
3087 	case IOCMD_VF_RESET_STATS:
3088 		rc = bfad_iocmd_vf_clr_stats(bfad, iocmd);
3089 		break;
3090 	case IOCMD_FCPIM_LUNMASK_ENABLE:
3091 	case IOCMD_FCPIM_LUNMASK_DISABLE:
3092 	case IOCMD_FCPIM_LUNMASK_CLEAR:
3093 		rc = bfad_iocmd_lunmask(bfad, iocmd, cmd);
3094 		break;
3095 	case IOCMD_FCPIM_LUNMASK_QUERY:
3096 		rc = bfad_iocmd_fcpim_lunmask_query(bfad, iocmd);
3097 		break;
3098 	case IOCMD_FCPIM_LUNMASK_ADD:
3099 	case IOCMD_FCPIM_LUNMASK_DELETE:
3100 		rc = bfad_iocmd_fcpim_cfg_lunmask(bfad, iocmd, cmd);
3101 		break;
3102 	case IOCMD_FCPIM_THROTTLE_QUERY:
3103 		rc = bfad_iocmd_fcpim_throttle_query(bfad, iocmd);
3104 		break;
3105 	case IOCMD_FCPIM_THROTTLE_SET:
3106 		rc = bfad_iocmd_fcpim_throttle_set(bfad, iocmd);
3107 		break;
3108 	/* TFRU */
3109 	case IOCMD_TFRU_READ:
3110 		rc = bfad_iocmd_tfru_read(bfad, iocmd);
3111 		break;
3112 	case IOCMD_TFRU_WRITE:
3113 		rc = bfad_iocmd_tfru_write(bfad, iocmd);
3114 		break;
3115 	/* FRU */
3116 	case IOCMD_FRUVPD_READ:
3117 		rc = bfad_iocmd_fruvpd_read(bfad, iocmd);
3118 		break;
3119 	case IOCMD_FRUVPD_UPDATE:
3120 		rc = bfad_iocmd_fruvpd_update(bfad, iocmd);
3121 		break;
3122 	case IOCMD_FRUVPD_GET_MAX_SIZE:
3123 		rc = bfad_iocmd_fruvpd_get_max_size(bfad, iocmd);
3124 		break;
3125 	default:
3126 		rc = -EINVAL;
3127 		break;
3128 	}
3129 	return rc;
3130 }
3131 
3132 static int
3133 bfad_im_bsg_vendor_request(struct fc_bsg_job *job)
3134 {
3135 	uint32_t vendor_cmd = job->request->rqst_data.h_vendor.vendor_cmd[0];
3136 	struct bfad_im_port_s *im_port =
3137 			(struct bfad_im_port_s *) job->shost->hostdata[0];
3138 	struct bfad_s *bfad = im_port->bfad;
3139 	struct request_queue *request_q = job->req->q;
3140 	void *payload_kbuf;
3141 	int rc = -EINVAL;
3142 
3143 	/*
3144 	 * Set the BSG device request_queue size to 256 to support
3145 	 * payloads larger than 512*1024K bytes.
3146 	 */
3147 	blk_queue_max_segments(request_q, 256);
3148 
3149 	/* Allocate a temp buffer to hold the passed in user space command */
3150 	payload_kbuf = kzalloc(job->request_payload.payload_len, GFP_KERNEL);
3151 	if (!payload_kbuf) {
3152 		rc = -ENOMEM;
3153 		goto out;
3154 	}
3155 
3156 	/* Copy the sg_list passed in to a linear buffer: holds the cmnd data */
3157 	sg_copy_to_buffer(job->request_payload.sg_list,
3158 			  job->request_payload.sg_cnt, payload_kbuf,
3159 			  job->request_payload.payload_len);
3160 
3161 	/* Invoke IOCMD handler - to handle all the vendor command requests */
3162 	rc = bfad_iocmd_handler(bfad, vendor_cmd, payload_kbuf,
3163 				job->request_payload.payload_len);
3164 	if (rc != BFA_STATUS_OK)
3165 		goto error;
3166 
3167 	/* Copy the response data to the job->reply_payload sg_list */
3168 	sg_copy_from_buffer(job->reply_payload.sg_list,
3169 			    job->reply_payload.sg_cnt,
3170 			    payload_kbuf,
3171 			    job->reply_payload.payload_len);
3172 
3173 	/* free the command buffer */
3174 	kfree(payload_kbuf);
3175 
3176 	/* Fill the BSG job reply data */
3177 	job->reply_len = job->reply_payload.payload_len;
3178 	job->reply->reply_payload_rcv_len = job->reply_payload.payload_len;
3179 	job->reply->result = rc;
3180 
3181 	job->job_done(job);
3182 	return rc;
3183 error:
3184 	/* free the command buffer */
3185 	kfree(payload_kbuf);
3186 out:
3187 	job->reply->result = rc;
3188 	job->reply_len = sizeof(uint32_t);
3189 	job->reply->reply_payload_rcv_len = 0;
3190 	return rc;
3191 }
3192 
3193 /* FC passthru call backs */
3194 u64
3195 bfad_fcxp_get_req_sgaddr_cb(void *bfad_fcxp, int sgeid)
3196 {
3197 	struct bfad_fcxp	*drv_fcxp = bfad_fcxp;
3198 	struct bfa_sge_s  *sge;
3199 	u64	addr;
3200 
3201 	sge = drv_fcxp->req_sge + sgeid;
3202 	addr = (u64)(size_t) sge->sg_addr;
3203 	return addr;
3204 }
3205 
3206 u32
3207 bfad_fcxp_get_req_sglen_cb(void *bfad_fcxp, int sgeid)
3208 {
3209 	struct bfad_fcxp	*drv_fcxp = bfad_fcxp;
3210 	struct bfa_sge_s	*sge;
3211 
3212 	sge = drv_fcxp->req_sge + sgeid;
3213 	return sge->sg_len;
3214 }
3215 
3216 u64
3217 bfad_fcxp_get_rsp_sgaddr_cb(void *bfad_fcxp, int sgeid)
3218 {
3219 	struct bfad_fcxp	*drv_fcxp = bfad_fcxp;
3220 	struct bfa_sge_s	*sge;
3221 	u64	addr;
3222 
3223 	sge = drv_fcxp->rsp_sge + sgeid;
3224 	addr = (u64)(size_t) sge->sg_addr;
3225 	return addr;
3226 }
3227 
3228 u32
3229 bfad_fcxp_get_rsp_sglen_cb(void *bfad_fcxp, int sgeid)
3230 {
3231 	struct bfad_fcxp	*drv_fcxp = bfad_fcxp;
3232 	struct bfa_sge_s	*sge;
3233 
3234 	sge = drv_fcxp->rsp_sge + sgeid;
3235 	return sge->sg_len;
3236 }
3237 
3238 void
3239 bfad_send_fcpt_cb(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg,
3240 		bfa_status_t req_status, u32 rsp_len, u32 resid_len,
3241 		struct fchs_s *rsp_fchs)
3242 {
3243 	struct bfad_fcxp *drv_fcxp = bfad_fcxp;
3244 
3245 	drv_fcxp->req_status = req_status;
3246 	drv_fcxp->rsp_len = rsp_len;
3247 
3248 	/* bfa_fcxp will be automatically freed by BFA */
3249 	drv_fcxp->bfa_fcxp = NULL;
3250 	complete(&drv_fcxp->comp);
3251 }
3252 
3253 struct bfad_buf_info *
3254 bfad_fcxp_map_sg(struct bfad_s *bfad, void *payload_kbuf,
3255 		 uint32_t payload_len, uint32_t *num_sgles)
3256 {
3257 	struct bfad_buf_info	*buf_base, *buf_info;
3258 	struct bfa_sge_s	*sg_table;
3259 	int sge_num = 1;
3260 
3261 	buf_base = kzalloc((sizeof(struct bfad_buf_info) +
3262 			   sizeof(struct bfa_sge_s)) * sge_num, GFP_KERNEL);
3263 	if (!buf_base)
3264 		return NULL;
3265 
3266 	sg_table = (struct bfa_sge_s *) (((uint8_t *)buf_base) +
3267 			(sizeof(struct bfad_buf_info) * sge_num));
3268 
3269 	/* Allocate dma coherent memory */
3270 	buf_info = buf_base;
3271 	buf_info->size = payload_len;
3272 	buf_info->virt = dma_zalloc_coherent(&bfad->pcidev->dev,
3273 					     buf_info->size, &buf_info->phys,
3274 					     GFP_KERNEL);
3275 	if (!buf_info->virt)
3276 		goto out_free_mem;
3277 
3278 	/* copy the linear bsg buffer to buf_info */
3279 	memcpy(buf_info->virt, payload_kbuf, buf_info->size);
3280 
3281 	/*
3282 	 * Setup SG table
3283 	 */
3284 	sg_table->sg_len = buf_info->size;
3285 	sg_table->sg_addr = (void *)(size_t) buf_info->phys;
3286 
3287 	*num_sgles = sge_num;
3288 
3289 	return buf_base;
3290 
3291 out_free_mem:
3292 	kfree(buf_base);
3293 	return NULL;
3294 }
3295 
3296 void
3297 bfad_fcxp_free_mem(struct bfad_s *bfad, struct bfad_buf_info *buf_base,
3298 		   uint32_t num_sgles)
3299 {
3300 	int i;
3301 	struct bfad_buf_info *buf_info = buf_base;
3302 
3303 	if (buf_base) {
3304 		for (i = 0; i < num_sgles; buf_info++, i++) {
3305 			if (buf_info->virt != NULL)
3306 				dma_free_coherent(&bfad->pcidev->dev,
3307 					buf_info->size, buf_info->virt,
3308 					buf_info->phys);
3309 		}
3310 		kfree(buf_base);
3311 	}
3312 }
3313 
3314 int
3315 bfad_fcxp_bsg_send(struct fc_bsg_job *job, struct bfad_fcxp *drv_fcxp,
3316 		   bfa_bsg_fcpt_t *bsg_fcpt)
3317 {
3318 	struct bfa_fcxp_s *hal_fcxp;
3319 	struct bfad_s	*bfad = drv_fcxp->port->bfad;
3320 	unsigned long	flags;
3321 	uint8_t	lp_tag;
3322 
3323 	spin_lock_irqsave(&bfad->bfad_lock, flags);
3324 
3325 	/* Allocate bfa_fcxp structure */
3326 	hal_fcxp = bfa_fcxp_req_rsp_alloc(drv_fcxp, &bfad->bfa,
3327 				  drv_fcxp->num_req_sgles,
3328 				  drv_fcxp->num_rsp_sgles,
3329 				  bfad_fcxp_get_req_sgaddr_cb,
3330 				  bfad_fcxp_get_req_sglen_cb,
3331 				  bfad_fcxp_get_rsp_sgaddr_cb,
3332 				  bfad_fcxp_get_rsp_sglen_cb, BFA_TRUE);
3333 	if (!hal_fcxp) {
3334 		bfa_trc(bfad, 0);
3335 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
3336 		return BFA_STATUS_ENOMEM;
3337 	}
3338 
3339 	drv_fcxp->bfa_fcxp = hal_fcxp;
3340 
3341 	lp_tag = bfa_lps_get_tag_from_pid(&bfad->bfa, bsg_fcpt->fchs.s_id);
3342 
3343 	bfa_fcxp_send(hal_fcxp, drv_fcxp->bfa_rport, bsg_fcpt->vf_id, lp_tag,
3344 		      bsg_fcpt->cts, bsg_fcpt->cos,
3345 		      job->request_payload.payload_len,
3346 		      &bsg_fcpt->fchs, bfad_send_fcpt_cb, bfad,
3347 		      job->reply_payload.payload_len, bsg_fcpt->tsecs);
3348 
3349 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
3350 
3351 	return BFA_STATUS_OK;
3352 }
3353 
3354 int
3355 bfad_im_bsg_els_ct_request(struct fc_bsg_job *job)
3356 {
3357 	struct bfa_bsg_data *bsg_data;
3358 	struct bfad_im_port_s *im_port =
3359 			(struct bfad_im_port_s *) job->shost->hostdata[0];
3360 	struct bfad_s *bfad = im_port->bfad;
3361 	bfa_bsg_fcpt_t *bsg_fcpt;
3362 	struct bfad_fcxp    *drv_fcxp;
3363 	struct bfa_fcs_lport_s *fcs_port;
3364 	struct bfa_fcs_rport_s *fcs_rport;
3365 	uint32_t command_type = job->request->msgcode;
3366 	unsigned long flags;
3367 	struct bfad_buf_info *rsp_buf_info;
3368 	void *req_kbuf = NULL, *rsp_kbuf = NULL;
3369 	int rc = -EINVAL;
3370 
3371 	job->reply_len  = sizeof(uint32_t);	/* Atleast uint32_t reply_len */
3372 	job->reply->reply_payload_rcv_len = 0;
3373 
3374 	/* Get the payload passed in from userspace */
3375 	bsg_data = (struct bfa_bsg_data *) (((char *)job->request) +
3376 					sizeof(struct fc_bsg_request));
3377 	if (bsg_data == NULL)
3378 		goto out;
3379 
3380 	/*
3381 	 * Allocate buffer for bsg_fcpt and do a copy_from_user op for payload
3382 	 * buffer of size bsg_data->payload_len
3383 	 */
3384 	bsg_fcpt = kzalloc(bsg_data->payload_len, GFP_KERNEL);
3385 	if (!bsg_fcpt) {
3386 		rc = -ENOMEM;
3387 		goto out;
3388 	}
3389 
3390 	if (copy_from_user((uint8_t *)bsg_fcpt,
3391 				(void *)(unsigned long)bsg_data->payload,
3392 				bsg_data->payload_len)) {
3393 		kfree(bsg_fcpt);
3394 		rc = -EIO;
3395 		goto out;
3396 	}
3397 
3398 	drv_fcxp = kzalloc(sizeof(struct bfad_fcxp), GFP_KERNEL);
3399 	if (drv_fcxp == NULL) {
3400 		kfree(bsg_fcpt);
3401 		rc = -ENOMEM;
3402 		goto out;
3403 	}
3404 
3405 	spin_lock_irqsave(&bfad->bfad_lock, flags);
3406 	fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, bsg_fcpt->vf_id,
3407 					bsg_fcpt->lpwwn);
3408 	if (fcs_port == NULL) {
3409 		bsg_fcpt->status = BFA_STATUS_UNKNOWN_LWWN;
3410 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
3411 		goto out_free_mem;
3412 	}
3413 
3414 	/* Check if the port is online before sending FC Passthru cmd */
3415 	if (!bfa_fcs_lport_is_online(fcs_port)) {
3416 		bsg_fcpt->status = BFA_STATUS_PORT_OFFLINE;
3417 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
3418 		goto out_free_mem;
3419 	}
3420 
3421 	drv_fcxp->port = fcs_port->bfad_port;
3422 
3423 	if (drv_fcxp->port->bfad == 0)
3424 		drv_fcxp->port->bfad = bfad;
3425 
3426 	/* Fetch the bfa_rport - if nexus needed */
3427 	if (command_type == FC_BSG_HST_ELS_NOLOGIN ||
3428 	    command_type == FC_BSG_HST_CT) {
3429 		/* BSG HST commands: no nexus needed */
3430 		drv_fcxp->bfa_rport = NULL;
3431 
3432 	} else if (command_type == FC_BSG_RPT_ELS ||
3433 		   command_type == FC_BSG_RPT_CT) {
3434 		/* BSG RPT commands: nexus needed */
3435 		fcs_rport = bfa_fcs_lport_get_rport_by_pwwn(fcs_port,
3436 							    bsg_fcpt->dpwwn);
3437 		if (fcs_rport == NULL) {
3438 			bsg_fcpt->status = BFA_STATUS_UNKNOWN_RWWN;
3439 			spin_unlock_irqrestore(&bfad->bfad_lock, flags);
3440 			goto out_free_mem;
3441 		}
3442 
3443 		drv_fcxp->bfa_rport = fcs_rport->bfa_rport;
3444 
3445 	} else { /* Unknown BSG msgcode; return -EINVAL */
3446 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
3447 		goto out_free_mem;
3448 	}
3449 
3450 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
3451 
3452 	/* allocate memory for req / rsp buffers */
3453 	req_kbuf = kzalloc(job->request_payload.payload_len, GFP_KERNEL);
3454 	if (!req_kbuf) {
3455 		printk(KERN_INFO "bfa %s: fcpt request buffer alloc failed\n",
3456 				bfad->pci_name);
3457 		rc = -ENOMEM;
3458 		goto out_free_mem;
3459 	}
3460 
3461 	rsp_kbuf = kzalloc(job->reply_payload.payload_len, GFP_KERNEL);
3462 	if (!rsp_kbuf) {
3463 		printk(KERN_INFO "bfa %s: fcpt response buffer alloc failed\n",
3464 				bfad->pci_name);
3465 		rc = -ENOMEM;
3466 		goto out_free_mem;
3467 	}
3468 
3469 	/* map req sg - copy the sg_list passed in to the linear buffer */
3470 	sg_copy_to_buffer(job->request_payload.sg_list,
3471 			  job->request_payload.sg_cnt, req_kbuf,
3472 			  job->request_payload.payload_len);
3473 
3474 	drv_fcxp->reqbuf_info = bfad_fcxp_map_sg(bfad, req_kbuf,
3475 					job->request_payload.payload_len,
3476 					&drv_fcxp->num_req_sgles);
3477 	if (!drv_fcxp->reqbuf_info) {
3478 		printk(KERN_INFO "bfa %s: fcpt request fcxp_map_sg failed\n",
3479 				bfad->pci_name);
3480 		rc = -ENOMEM;
3481 		goto out_free_mem;
3482 	}
3483 
3484 	drv_fcxp->req_sge = (struct bfa_sge_s *)
3485 			    (((uint8_t *)drv_fcxp->reqbuf_info) +
3486 			    (sizeof(struct bfad_buf_info) *
3487 					drv_fcxp->num_req_sgles));
3488 
3489 	/* map rsp sg */
3490 	drv_fcxp->rspbuf_info = bfad_fcxp_map_sg(bfad, rsp_kbuf,
3491 					job->reply_payload.payload_len,
3492 					&drv_fcxp->num_rsp_sgles);
3493 	if (!drv_fcxp->rspbuf_info) {
3494 		printk(KERN_INFO "bfa %s: fcpt response fcxp_map_sg failed\n",
3495 				bfad->pci_name);
3496 		rc = -ENOMEM;
3497 		goto out_free_mem;
3498 	}
3499 
3500 	rsp_buf_info = (struct bfad_buf_info *)drv_fcxp->rspbuf_info;
3501 	drv_fcxp->rsp_sge = (struct bfa_sge_s  *)
3502 			    (((uint8_t *)drv_fcxp->rspbuf_info) +
3503 			    (sizeof(struct bfad_buf_info) *
3504 					drv_fcxp->num_rsp_sgles));
3505 
3506 	/* fcxp send */
3507 	init_completion(&drv_fcxp->comp);
3508 	rc = bfad_fcxp_bsg_send(job, drv_fcxp, bsg_fcpt);
3509 	if (rc == BFA_STATUS_OK) {
3510 		wait_for_completion(&drv_fcxp->comp);
3511 		bsg_fcpt->status = drv_fcxp->req_status;
3512 	} else {
3513 		bsg_fcpt->status = rc;
3514 		goto out_free_mem;
3515 	}
3516 
3517 	/* fill the job->reply data */
3518 	if (drv_fcxp->req_status == BFA_STATUS_OK) {
3519 		job->reply_len = drv_fcxp->rsp_len;
3520 		job->reply->reply_payload_rcv_len = drv_fcxp->rsp_len;
3521 		job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
3522 	} else {
3523 		job->reply->reply_payload_rcv_len =
3524 					sizeof(struct fc_bsg_ctels_reply);
3525 		job->reply_len = sizeof(uint32_t);
3526 		job->reply->reply_data.ctels_reply.status =
3527 						FC_CTELS_STATUS_REJECT;
3528 	}
3529 
3530 	/* Copy the response data to the reply_payload sg list */
3531 	sg_copy_from_buffer(job->reply_payload.sg_list,
3532 			    job->reply_payload.sg_cnt,
3533 			    (uint8_t *)rsp_buf_info->virt,
3534 			    job->reply_payload.payload_len);
3535 
3536 out_free_mem:
3537 	bfad_fcxp_free_mem(bfad, drv_fcxp->rspbuf_info,
3538 			   drv_fcxp->num_rsp_sgles);
3539 	bfad_fcxp_free_mem(bfad, drv_fcxp->reqbuf_info,
3540 			   drv_fcxp->num_req_sgles);
3541 	kfree(req_kbuf);
3542 	kfree(rsp_kbuf);
3543 
3544 	/* Need a copy to user op */
3545 	if (copy_to_user((void *)(unsigned long)bsg_data->payload,
3546 			(void *)bsg_fcpt, bsg_data->payload_len))
3547 		rc = -EIO;
3548 
3549 	kfree(bsg_fcpt);
3550 	kfree(drv_fcxp);
3551 out:
3552 	job->reply->result = rc;
3553 
3554 	if (rc == BFA_STATUS_OK)
3555 		job->job_done(job);
3556 
3557 	return rc;
3558 }
3559 
3560 int
3561 bfad_im_bsg_request(struct fc_bsg_job *job)
3562 {
3563 	uint32_t rc = BFA_STATUS_OK;
3564 
3565 	switch (job->request->msgcode) {
3566 	case FC_BSG_HST_VENDOR:
3567 		/* Process BSG HST Vendor requests */
3568 		rc = bfad_im_bsg_vendor_request(job);
3569 		break;
3570 	case FC_BSG_HST_ELS_NOLOGIN:
3571 	case FC_BSG_RPT_ELS:
3572 	case FC_BSG_HST_CT:
3573 	case FC_BSG_RPT_CT:
3574 		/* Process BSG ELS/CT commands */
3575 		rc = bfad_im_bsg_els_ct_request(job);
3576 		break;
3577 	default:
3578 		job->reply->result = rc = -EINVAL;
3579 		job->reply->reply_payload_rcv_len = 0;
3580 		break;
3581 	}
3582 
3583 	return rc;
3584 }
3585 
3586 int
3587 bfad_im_bsg_timeout(struct fc_bsg_job *job)
3588 {
3589 	/* Don't complete the BSG job request - return -EAGAIN
3590 	 * to reset bsg job timeout : for ELS/CT pass thru we
3591 	 * already have timer to track the request.
3592 	 */
3593 	return -EAGAIN;
3594 }
3595