xref: /openbmc/linux/drivers/scsi/qla2xxx/qla_bsg.c (revision 26541cb1)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * QLogic Fibre Channel HBA Driver
4  * Copyright (c)  2003-2014 QLogic Corporation
5  */
6 #include "qla_def.h"
7 #include "qla_gbl.h"
8 
9 #include <linux/kthread.h>
10 #include <linux/vmalloc.h>
11 #include <linux/delay.h>
12 #include <linux/bsg-lib.h>
13 
14 static void qla2xxx_free_fcport_work(struct work_struct *work)
15 {
16 	struct fc_port *fcport = container_of(work, typeof(*fcport),
17 	    free_work);
18 
19 	qla2x00_free_fcport(fcport);
20 }
21 
22 /* BSG support for ELS/CT pass through */
23 void qla2x00_bsg_job_done(srb_t *sp, int res)
24 {
25 	struct bsg_job *bsg_job = sp->u.bsg_job;
26 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
27 
28 	sp->free(sp);
29 
30 	bsg_reply->result = res;
31 	bsg_job_done(bsg_job, bsg_reply->result,
32 		       bsg_reply->reply_payload_rcv_len);
33 }
34 
35 void qla2x00_bsg_sp_free(srb_t *sp)
36 {
37 	struct qla_hw_data *ha = sp->vha->hw;
38 	struct bsg_job *bsg_job = sp->u.bsg_job;
39 	struct fc_bsg_request *bsg_request = bsg_job->request;
40 	struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
41 
42 	if (sp->type == SRB_FXIOCB_BCMD) {
43 		piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
44 		    &bsg_request->rqst_data.h_vendor.vendor_cmd[1];
45 
46 		if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
47 			dma_unmap_sg(&ha->pdev->dev,
48 			    bsg_job->request_payload.sg_list,
49 			    bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
50 
51 		if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID)
52 			dma_unmap_sg(&ha->pdev->dev,
53 			    bsg_job->reply_payload.sg_list,
54 			    bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
55 	} else {
56 		dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
57 		    bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
58 
59 		dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
60 		    bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
61 	}
62 
63 	if (sp->type == SRB_CT_CMD ||
64 	    sp->type == SRB_FXIOCB_BCMD ||
65 	    sp->type == SRB_ELS_CMD_HST) {
66 		INIT_WORK(&sp->fcport->free_work, qla2xxx_free_fcport_work);
67 		queue_work(ha->wq, &sp->fcport->free_work);
68 	}
69 
70 	qla2x00_rel_sp(sp);
71 }
72 
73 int
74 qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *vha,
75 	struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
76 {
77 	int i, ret, num_valid;
78 	uint8_t *bcode;
79 	struct qla_fcp_prio_entry *pri_entry;
80 	uint32_t *bcode_val_ptr, bcode_val;
81 
82 	ret = 1;
83 	num_valid = 0;
84 	bcode = (uint8_t *)pri_cfg;
85 	bcode_val_ptr = (uint32_t *)pri_cfg;
86 	bcode_val = (uint32_t)(*bcode_val_ptr);
87 
88 	if (bcode_val == 0xFFFFFFFF) {
89 		/* No FCP Priority config data in flash */
90 		ql_dbg(ql_dbg_user, vha, 0x7051,
91 		    "No FCP Priority config data.\n");
92 		return 0;
93 	}
94 
95 	if (memcmp(bcode, "HQOS", 4)) {
96 		/* Invalid FCP priority data header*/
97 		ql_dbg(ql_dbg_user, vha, 0x7052,
98 		    "Invalid FCP Priority data header. bcode=0x%x.\n",
99 		    bcode_val);
100 		return 0;
101 	}
102 	if (flag != 1)
103 		return ret;
104 
105 	pri_entry = &pri_cfg->entry[0];
106 	for (i = 0; i < pri_cfg->num_entries; i++) {
107 		if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
108 			num_valid++;
109 		pri_entry++;
110 	}
111 
112 	if (num_valid == 0) {
113 		/* No valid FCP priority data entries */
114 		ql_dbg(ql_dbg_user, vha, 0x7053,
115 		    "No valid FCP Priority data entries.\n");
116 		ret = 0;
117 	} else {
118 		/* FCP priority data is valid */
119 		ql_dbg(ql_dbg_user, vha, 0x7054,
120 		    "Valid FCP priority data. num entries = %d.\n",
121 		    num_valid);
122 	}
123 
124 	return ret;
125 }
126 
127 static int
128 qla24xx_proc_fcp_prio_cfg_cmd(struct bsg_job *bsg_job)
129 {
130 	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
131 	struct fc_bsg_request *bsg_request = bsg_job->request;
132 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
133 	scsi_qla_host_t *vha = shost_priv(host);
134 	struct qla_hw_data *ha = vha->hw;
135 	int ret = 0;
136 	uint32_t len;
137 	uint32_t oper;
138 
139 	if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_P3P_TYPE(ha))) {
140 		ret = -EINVAL;
141 		goto exit_fcp_prio_cfg;
142 	}
143 
144 	/* Get the sub command */
145 	oper = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
146 
147 	/* Only set config is allowed if config memory is not allocated */
148 	if (!ha->fcp_prio_cfg && (oper != QLFC_FCP_PRIO_SET_CONFIG)) {
149 		ret = -EINVAL;
150 		goto exit_fcp_prio_cfg;
151 	}
152 	switch (oper) {
153 	case QLFC_FCP_PRIO_DISABLE:
154 		if (ha->flags.fcp_prio_enabled) {
155 			ha->flags.fcp_prio_enabled = 0;
156 			ha->fcp_prio_cfg->attributes &=
157 				~FCP_PRIO_ATTR_ENABLE;
158 			qla24xx_update_all_fcp_prio(vha);
159 			bsg_reply->result = DID_OK;
160 		} else {
161 			ret = -EINVAL;
162 			bsg_reply->result = (DID_ERROR << 16);
163 			goto exit_fcp_prio_cfg;
164 		}
165 		break;
166 
167 	case QLFC_FCP_PRIO_ENABLE:
168 		if (!ha->flags.fcp_prio_enabled) {
169 			if (ha->fcp_prio_cfg) {
170 				ha->flags.fcp_prio_enabled = 1;
171 				ha->fcp_prio_cfg->attributes |=
172 				    FCP_PRIO_ATTR_ENABLE;
173 				qla24xx_update_all_fcp_prio(vha);
174 				bsg_reply->result = DID_OK;
175 			} else {
176 				ret = -EINVAL;
177 				bsg_reply->result = (DID_ERROR << 16);
178 				goto exit_fcp_prio_cfg;
179 			}
180 		}
181 		break;
182 
183 	case QLFC_FCP_PRIO_GET_CONFIG:
184 		len = bsg_job->reply_payload.payload_len;
185 		if (!len || len > FCP_PRIO_CFG_SIZE) {
186 			ret = -EINVAL;
187 			bsg_reply->result = (DID_ERROR << 16);
188 			goto exit_fcp_prio_cfg;
189 		}
190 
191 		bsg_reply->result = DID_OK;
192 		bsg_reply->reply_payload_rcv_len =
193 			sg_copy_from_buffer(
194 			bsg_job->reply_payload.sg_list,
195 			bsg_job->reply_payload.sg_cnt, ha->fcp_prio_cfg,
196 			len);
197 
198 		break;
199 
200 	case QLFC_FCP_PRIO_SET_CONFIG:
201 		len = bsg_job->request_payload.payload_len;
202 		if (!len || len > FCP_PRIO_CFG_SIZE) {
203 			bsg_reply->result = (DID_ERROR << 16);
204 			ret = -EINVAL;
205 			goto exit_fcp_prio_cfg;
206 		}
207 
208 		if (!ha->fcp_prio_cfg) {
209 			ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE);
210 			if (!ha->fcp_prio_cfg) {
211 				ql_log(ql_log_warn, vha, 0x7050,
212 				    "Unable to allocate memory for fcp prio "
213 				    "config data (%x).\n", FCP_PRIO_CFG_SIZE);
214 				bsg_reply->result = (DID_ERROR << 16);
215 				ret = -ENOMEM;
216 				goto exit_fcp_prio_cfg;
217 			}
218 		}
219 
220 		memset(ha->fcp_prio_cfg, 0, FCP_PRIO_CFG_SIZE);
221 		sg_copy_to_buffer(bsg_job->request_payload.sg_list,
222 		bsg_job->request_payload.sg_cnt, ha->fcp_prio_cfg,
223 			FCP_PRIO_CFG_SIZE);
224 
225 		/* validate fcp priority data */
226 
227 		if (!qla24xx_fcp_prio_cfg_valid(vha, ha->fcp_prio_cfg, 1)) {
228 			bsg_reply->result = (DID_ERROR << 16);
229 			ret = -EINVAL;
230 			/* If buffer was invalidatic int
231 			 * fcp_prio_cfg is of no use
232 			 */
233 			vfree(ha->fcp_prio_cfg);
234 			ha->fcp_prio_cfg = NULL;
235 			goto exit_fcp_prio_cfg;
236 		}
237 
238 		ha->flags.fcp_prio_enabled = 0;
239 		if (ha->fcp_prio_cfg->attributes & FCP_PRIO_ATTR_ENABLE)
240 			ha->flags.fcp_prio_enabled = 1;
241 		qla24xx_update_all_fcp_prio(vha);
242 		bsg_reply->result = DID_OK;
243 		break;
244 	default:
245 		ret = -EINVAL;
246 		break;
247 	}
248 exit_fcp_prio_cfg:
249 	if (!ret)
250 		bsg_job_done(bsg_job, bsg_reply->result,
251 			       bsg_reply->reply_payload_rcv_len);
252 	return ret;
253 }
254 
255 static int
256 qla2x00_process_els(struct bsg_job *bsg_job)
257 {
258 	struct fc_bsg_request *bsg_request = bsg_job->request;
259 	struct fc_rport *rport;
260 	fc_port_t *fcport = NULL;
261 	struct Scsi_Host *host;
262 	scsi_qla_host_t *vha;
263 	struct qla_hw_data *ha;
264 	srb_t *sp;
265 	const char *type;
266 	int req_sg_cnt, rsp_sg_cnt;
267 	int rval =  (DID_ERROR << 16);
268 	uint16_t nextlid = 0;
269 
270 	if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
271 		rport = fc_bsg_to_rport(bsg_job);
272 		fcport = *(fc_port_t **) rport->dd_data;
273 		host = rport_to_shost(rport);
274 		vha = shost_priv(host);
275 		ha = vha->hw;
276 		type = "FC_BSG_RPT_ELS";
277 	} else {
278 		host = fc_bsg_to_shost(bsg_job);
279 		vha = shost_priv(host);
280 		ha = vha->hw;
281 		type = "FC_BSG_HST_ELS_NOLOGIN";
282 	}
283 
284 	if (!vha->flags.online) {
285 		ql_log(ql_log_warn, vha, 0x7005, "Host not online.\n");
286 		rval = -EIO;
287 		goto done;
288 	}
289 
290 	/* pass through is supported only for ISP 4Gb or higher */
291 	if (!IS_FWI2_CAPABLE(ha)) {
292 		ql_dbg(ql_dbg_user, vha, 0x7001,
293 		    "ELS passthru not supported for ISP23xx based adapters.\n");
294 		rval = -EPERM;
295 		goto done;
296 	}
297 
298 	/*  Multiple SG's are not supported for ELS requests */
299 	if (bsg_job->request_payload.sg_cnt > 1 ||
300 		bsg_job->reply_payload.sg_cnt > 1) {
301 		ql_dbg(ql_dbg_user, vha, 0x7002,
302 		    "Multiple SG's are not supported for ELS requests, "
303 		    "request_sg_cnt=%x reply_sg_cnt=%x.\n",
304 		    bsg_job->request_payload.sg_cnt,
305 		    bsg_job->reply_payload.sg_cnt);
306 		rval = -EPERM;
307 		goto done;
308 	}
309 
310 	/* ELS request for rport */
311 	if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
312 		/* make sure the rport is logged in,
313 		 * if not perform fabric login
314 		 */
315 		if (qla2x00_fabric_login(vha, fcport, &nextlid)) {
316 			ql_dbg(ql_dbg_user, vha, 0x7003,
317 			    "Failed to login port %06X for ELS passthru.\n",
318 			    fcport->d_id.b24);
319 			rval = -EIO;
320 			goto done;
321 		}
322 	} else {
323 		/* Allocate a dummy fcport structure, since functions
324 		 * preparing the IOCB and mailbox command retrieves port
325 		 * specific information from fcport structure. For Host based
326 		 * ELS commands there will be no fcport structure allocated
327 		 */
328 		fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
329 		if (!fcport) {
330 			rval = -ENOMEM;
331 			goto done;
332 		}
333 
334 		/* Initialize all required  fields of fcport */
335 		fcport->vha = vha;
336 		fcport->d_id.b.al_pa =
337 			bsg_request->rqst_data.h_els.port_id[0];
338 		fcport->d_id.b.area =
339 			bsg_request->rqst_data.h_els.port_id[1];
340 		fcport->d_id.b.domain =
341 			bsg_request->rqst_data.h_els.port_id[2];
342 		fcport->loop_id =
343 			(fcport->d_id.b.al_pa == 0xFD) ?
344 			NPH_FABRIC_CONTROLLER : NPH_F_PORT;
345 	}
346 
347 	req_sg_cnt =
348 		dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
349 		bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
350 	if (!req_sg_cnt) {
351 		dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
352 		    bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
353 		rval = -ENOMEM;
354 		goto done_free_fcport;
355 	}
356 
357 	rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
358 		bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
359         if (!rsp_sg_cnt) {
360 		dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
361 		    bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
362 		rval = -ENOMEM;
363 		goto done_free_fcport;
364 	}
365 
366 	if ((req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
367 		(rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
368 		ql_log(ql_log_warn, vha, 0x7008,
369 		    "dma mapping resulted in different sg counts, "
370 		    "request_sg_cnt: %x dma_request_sg_cnt:%x reply_sg_cnt:%x "
371 		    "dma_reply_sg_cnt:%x.\n", bsg_job->request_payload.sg_cnt,
372 		    req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
373 		rval = -EAGAIN;
374 		goto done_unmap_sg;
375 	}
376 
377 	/* Alloc SRB structure */
378 	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
379 	if (!sp) {
380 		rval = -ENOMEM;
381 		goto done_unmap_sg;
382 	}
383 
384 	sp->type =
385 		(bsg_request->msgcode == FC_BSG_RPT_ELS ?
386 		 SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
387 	sp->name =
388 		(bsg_request->msgcode == FC_BSG_RPT_ELS ?
389 		 "bsg_els_rpt" : "bsg_els_hst");
390 	sp->u.bsg_job = bsg_job;
391 	sp->free = qla2x00_bsg_sp_free;
392 	sp->done = qla2x00_bsg_job_done;
393 
394 	ql_dbg(ql_dbg_user, vha, 0x700a,
395 	    "bsg rqst type: %s els type: %x - loop-id=%x "
396 	    "portid=%-2x%02x%02x.\n", type,
397 	    bsg_request->rqst_data.h_els.command_code, fcport->loop_id,
398 	    fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
399 
400 	rval = qla2x00_start_sp(sp);
401 	if (rval != QLA_SUCCESS) {
402 		ql_log(ql_log_warn, vha, 0x700e,
403 		    "qla2x00_start_sp failed = %d\n", rval);
404 		qla2x00_rel_sp(sp);
405 		rval = -EIO;
406 		goto done_unmap_sg;
407 	}
408 	return rval;
409 
410 done_unmap_sg:
411 	dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
412 		bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
413 	dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
414 		bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
415 	goto done_free_fcport;
416 
417 done_free_fcport:
418 	if (bsg_request->msgcode == FC_BSG_RPT_ELS)
419 		qla2x00_free_fcport(fcport);
420 done:
421 	return rval;
422 }
423 
424 static inline uint16_t
425 qla24xx_calc_ct_iocbs(uint16_t dsds)
426 {
427 	uint16_t iocbs;
428 
429 	iocbs = 1;
430 	if (dsds > 2) {
431 		iocbs += (dsds - 2) / 5;
432 		if ((dsds - 2) % 5)
433 			iocbs++;
434 	}
435 	return iocbs;
436 }
437 
438 static int
439 qla2x00_process_ct(struct bsg_job *bsg_job)
440 {
441 	srb_t *sp;
442 	struct fc_bsg_request *bsg_request = bsg_job->request;
443 	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
444 	scsi_qla_host_t *vha = shost_priv(host);
445 	struct qla_hw_data *ha = vha->hw;
446 	int rval = (DID_ERROR << 16);
447 	int req_sg_cnt, rsp_sg_cnt;
448 	uint16_t loop_id;
449 	struct fc_port *fcport;
450 	char  *type = "FC_BSG_HST_CT";
451 
452 	req_sg_cnt =
453 		dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
454 			bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
455 	if (!req_sg_cnt) {
456 		ql_log(ql_log_warn, vha, 0x700f,
457 		    "dma_map_sg return %d for request\n", req_sg_cnt);
458 		rval = -ENOMEM;
459 		goto done;
460 	}
461 
462 	rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
463 		bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
464 	if (!rsp_sg_cnt) {
465 		ql_log(ql_log_warn, vha, 0x7010,
466 		    "dma_map_sg return %d for reply\n", rsp_sg_cnt);
467 		rval = -ENOMEM;
468 		goto done;
469 	}
470 
471 	if ((req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
472 	    (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
473 		ql_log(ql_log_warn, vha, 0x7011,
474 		    "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
475 		    "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
476 		    req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
477 		rval = -EAGAIN;
478 		goto done_unmap_sg;
479 	}
480 
481 	if (!vha->flags.online) {
482 		ql_log(ql_log_warn, vha, 0x7012,
483 		    "Host is not online.\n");
484 		rval = -EIO;
485 		goto done_unmap_sg;
486 	}
487 
488 	loop_id =
489 		(bsg_request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
490 			>> 24;
491 	switch (loop_id) {
492 	case 0xFC:
493 		loop_id = NPH_SNS;
494 		break;
495 	case 0xFA:
496 		loop_id = vha->mgmt_svr_loop_id;
497 		break;
498 	default:
499 		ql_dbg(ql_dbg_user, vha, 0x7013,
500 		    "Unknown loop id: %x.\n", loop_id);
501 		rval = -EINVAL;
502 		goto done_unmap_sg;
503 	}
504 
505 	/* Allocate a dummy fcport structure, since functions preparing the
506 	 * IOCB and mailbox command retrieves port specific information
507 	 * from fcport structure. For Host based ELS commands there will be
508 	 * no fcport structure allocated
509 	 */
510 	fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
511 	if (!fcport) {
512 		ql_log(ql_log_warn, vha, 0x7014,
513 		    "Failed to allocate fcport.\n");
514 		rval = -ENOMEM;
515 		goto done_unmap_sg;
516 	}
517 
518 	/* Initialize all required  fields of fcport */
519 	fcport->vha = vha;
520 	fcport->d_id.b.al_pa = bsg_request->rqst_data.h_ct.port_id[0];
521 	fcport->d_id.b.area = bsg_request->rqst_data.h_ct.port_id[1];
522 	fcport->d_id.b.domain = bsg_request->rqst_data.h_ct.port_id[2];
523 	fcport->loop_id = loop_id;
524 
525 	/* Alloc SRB structure */
526 	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
527 	if (!sp) {
528 		ql_log(ql_log_warn, vha, 0x7015,
529 		    "qla2x00_get_sp failed.\n");
530 		rval = -ENOMEM;
531 		goto done_free_fcport;
532 	}
533 
534 	sp->type = SRB_CT_CMD;
535 	sp->name = "bsg_ct";
536 	sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
537 	sp->u.bsg_job = bsg_job;
538 	sp->free = qla2x00_bsg_sp_free;
539 	sp->done = qla2x00_bsg_job_done;
540 
541 	ql_dbg(ql_dbg_user, vha, 0x7016,
542 	    "bsg rqst type: %s else type: %x - "
543 	    "loop-id=%x portid=%02x%02x%02x.\n", type,
544 	    (bsg_request->rqst_data.h_ct.preamble_word2 >> 16),
545 	    fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
546 	    fcport->d_id.b.al_pa);
547 
548 	rval = qla2x00_start_sp(sp);
549 	if (rval != QLA_SUCCESS) {
550 		ql_log(ql_log_warn, vha, 0x7017,
551 		    "qla2x00_start_sp failed=%d.\n", rval);
552 		qla2x00_rel_sp(sp);
553 		rval = -EIO;
554 		goto done_free_fcport;
555 	}
556 	return rval;
557 
558 done_free_fcport:
559 	qla2x00_free_fcport(fcport);
560 done_unmap_sg:
561 	dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
562 		bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
563 	dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
564 		bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
565 done:
566 	return rval;
567 }
568 
569 /* Disable loopback mode */
570 static inline int
571 qla81xx_reset_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
572 			    int wait, int wait2)
573 {
574 	int ret = 0;
575 	int rval = 0;
576 	uint16_t new_config[4];
577 	struct qla_hw_data *ha = vha->hw;
578 
579 	if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
580 		goto done_reset_internal;
581 
582 	memset(new_config, 0 , sizeof(new_config));
583 	if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
584 	    ENABLE_INTERNAL_LOOPBACK ||
585 	    (config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
586 	    ENABLE_EXTERNAL_LOOPBACK) {
587 		new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK;
588 		ql_dbg(ql_dbg_user, vha, 0x70bf, "new_config[0]=%02x\n",
589 		    (new_config[0] & INTERNAL_LOOPBACK_MASK));
590 		memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ;
591 
592 		ha->notify_dcbx_comp = wait;
593 		ha->notify_lb_portup_comp = wait2;
594 
595 		ret = qla81xx_set_port_config(vha, new_config);
596 		if (ret != QLA_SUCCESS) {
597 			ql_log(ql_log_warn, vha, 0x7025,
598 			    "Set port config failed.\n");
599 			ha->notify_dcbx_comp = 0;
600 			ha->notify_lb_portup_comp = 0;
601 			rval = -EINVAL;
602 			goto done_reset_internal;
603 		}
604 
605 		/* Wait for DCBX complete event */
606 		if (wait && !wait_for_completion_timeout(&ha->dcbx_comp,
607 			(DCBX_COMP_TIMEOUT * HZ))) {
608 			ql_dbg(ql_dbg_user, vha, 0x7026,
609 			    "DCBX completion not received.\n");
610 			ha->notify_dcbx_comp = 0;
611 			ha->notify_lb_portup_comp = 0;
612 			rval = -EINVAL;
613 			goto done_reset_internal;
614 		} else
615 			ql_dbg(ql_dbg_user, vha, 0x7027,
616 			    "DCBX completion received.\n");
617 
618 		if (wait2 &&
619 		    !wait_for_completion_timeout(&ha->lb_portup_comp,
620 		    (LB_PORTUP_COMP_TIMEOUT * HZ))) {
621 			ql_dbg(ql_dbg_user, vha, 0x70c5,
622 			    "Port up completion not received.\n");
623 			ha->notify_lb_portup_comp = 0;
624 			rval = -EINVAL;
625 			goto done_reset_internal;
626 		} else
627 			ql_dbg(ql_dbg_user, vha, 0x70c6,
628 			    "Port up completion received.\n");
629 
630 		ha->notify_dcbx_comp = 0;
631 		ha->notify_lb_portup_comp = 0;
632 	}
633 done_reset_internal:
634 	return rval;
635 }
636 
637 /*
638  * Set the port configuration to enable the internal or external loopback
639  * depending on the loopback mode.
640  */
641 static inline int
642 qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
643 	uint16_t *new_config, uint16_t mode)
644 {
645 	int ret = 0;
646 	int rval = 0;
647 	unsigned long rem_tmo = 0, current_tmo = 0;
648 	struct qla_hw_data *ha = vha->hw;
649 
650 	if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
651 		goto done_set_internal;
652 
653 	if (mode == INTERNAL_LOOPBACK)
654 		new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1);
655 	else if (mode == EXTERNAL_LOOPBACK)
656 		new_config[0] = config[0] | (ENABLE_EXTERNAL_LOOPBACK << 1);
657 	ql_dbg(ql_dbg_user, vha, 0x70be,
658 	     "new_config[0]=%02x\n", (new_config[0] & INTERNAL_LOOPBACK_MASK));
659 
660 	memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3);
661 
662 	ha->notify_dcbx_comp = 1;
663 	ret = qla81xx_set_port_config(vha, new_config);
664 	if (ret != QLA_SUCCESS) {
665 		ql_log(ql_log_warn, vha, 0x7021,
666 		    "set port config failed.\n");
667 		ha->notify_dcbx_comp = 0;
668 		rval = -EINVAL;
669 		goto done_set_internal;
670 	}
671 
672 	/* Wait for DCBX complete event */
673 	current_tmo = DCBX_COMP_TIMEOUT * HZ;
674 	while (1) {
675 		rem_tmo = wait_for_completion_timeout(&ha->dcbx_comp,
676 		    current_tmo);
677 		if (!ha->idc_extend_tmo || rem_tmo) {
678 			ha->idc_extend_tmo = 0;
679 			break;
680 		}
681 		current_tmo = ha->idc_extend_tmo * HZ;
682 		ha->idc_extend_tmo = 0;
683 	}
684 
685 	if (!rem_tmo) {
686 		ql_dbg(ql_dbg_user, vha, 0x7022,
687 		    "DCBX completion not received.\n");
688 		ret = qla81xx_reset_loopback_mode(vha, new_config, 0, 0);
689 		/*
690 		 * If the reset of the loopback mode doesn't work take a FCoE
691 		 * dump and reset the chip.
692 		 */
693 		if (ret) {
694 			qla2xxx_dump_fw(vha);
695 			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
696 		}
697 		rval = -EINVAL;
698 	} else {
699 		if (ha->flags.idc_compl_status) {
700 			ql_dbg(ql_dbg_user, vha, 0x70c3,
701 			    "Bad status in IDC Completion AEN\n");
702 			rval = -EINVAL;
703 			ha->flags.idc_compl_status = 0;
704 		} else
705 			ql_dbg(ql_dbg_user, vha, 0x7023,
706 			    "DCBX completion received.\n");
707 	}
708 
709 	ha->notify_dcbx_comp = 0;
710 	ha->idc_extend_tmo = 0;
711 
712 done_set_internal:
713 	return rval;
714 }
715 
716 static int
717 qla2x00_process_loopback(struct bsg_job *bsg_job)
718 {
719 	struct fc_bsg_request *bsg_request = bsg_job->request;
720 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
721 	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
722 	scsi_qla_host_t *vha = shost_priv(host);
723 	struct qla_hw_data *ha = vha->hw;
724 	int rval;
725 	uint8_t command_sent;
726 	char *type;
727 	struct msg_echo_lb elreq;
728 	uint16_t response[MAILBOX_REGISTER_COUNT];
729 	uint16_t config[4], new_config[4];
730 	uint8_t *fw_sts_ptr;
731 	void *req_data = NULL;
732 	dma_addr_t req_data_dma;
733 	uint32_t req_data_len;
734 	uint8_t *rsp_data = NULL;
735 	dma_addr_t rsp_data_dma;
736 	uint32_t rsp_data_len;
737 
738 	if (!vha->flags.online) {
739 		ql_log(ql_log_warn, vha, 0x7019, "Host is not online.\n");
740 		return -EIO;
741 	}
742 
743 	memset(&elreq, 0, sizeof(elreq));
744 
745 	elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev,
746 		bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt,
747 		DMA_TO_DEVICE);
748 
749 	if (!elreq.req_sg_cnt) {
750 		ql_log(ql_log_warn, vha, 0x701a,
751 		    "dma_map_sg returned %d for request.\n", elreq.req_sg_cnt);
752 		return -ENOMEM;
753 	}
754 
755 	elreq.rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
756 		bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
757 		DMA_FROM_DEVICE);
758 
759 	if (!elreq.rsp_sg_cnt) {
760 		ql_log(ql_log_warn, vha, 0x701b,
761 		    "dma_map_sg returned %d for reply.\n", elreq.rsp_sg_cnt);
762 		rval = -ENOMEM;
763 		goto done_unmap_req_sg;
764 	}
765 
766 	if ((elreq.req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
767 		(elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
768 		ql_log(ql_log_warn, vha, 0x701c,
769 		    "dma mapping resulted in different sg counts, "
770 		    "request_sg_cnt: %x dma_request_sg_cnt: %x "
771 		    "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
772 		    bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
773 		    bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt);
774 		rval = -EAGAIN;
775 		goto done_unmap_sg;
776 	}
777 	req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
778 	req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
779 		&req_data_dma, GFP_KERNEL);
780 	if (!req_data) {
781 		ql_log(ql_log_warn, vha, 0x701d,
782 		    "dma alloc failed for req_data.\n");
783 		rval = -ENOMEM;
784 		goto done_unmap_sg;
785 	}
786 
787 	rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
788 		&rsp_data_dma, GFP_KERNEL);
789 	if (!rsp_data) {
790 		ql_log(ql_log_warn, vha, 0x7004,
791 		    "dma alloc failed for rsp_data.\n");
792 		rval = -ENOMEM;
793 		goto done_free_dma_req;
794 	}
795 
796 	/* Copy the request buffer in req_data now */
797 	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
798 		bsg_job->request_payload.sg_cnt, req_data, req_data_len);
799 
800 	elreq.send_dma = req_data_dma;
801 	elreq.rcv_dma = rsp_data_dma;
802 	elreq.transfer_size = req_data_len;
803 
804 	elreq.options = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
805 	elreq.iteration_count =
806 	    bsg_request->rqst_data.h_vendor.vendor_cmd[2];
807 
808 	if (atomic_read(&vha->loop_state) == LOOP_READY &&
809 	    ((ha->current_topology == ISP_CFG_F && (elreq.options & 7) >= 2) ||
810 	    ((IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) &&
811 	    get_unaligned_le32(req_data) == ELS_OPCODE_BYTE &&
812 	    req_data_len == MAX_ELS_FRAME_PAYLOAD &&
813 	    elreq.options == EXTERNAL_LOOPBACK))) {
814 		type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
815 		ql_dbg(ql_dbg_user, vha, 0x701e,
816 		    "BSG request type: %s.\n", type);
817 		command_sent = INT_DEF_LB_ECHO_CMD;
818 		rval = qla2x00_echo_test(vha, &elreq, response);
819 	} else {
820 		if (IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) {
821 			memset(config, 0, sizeof(config));
822 			memset(new_config, 0, sizeof(new_config));
823 
824 			if (qla81xx_get_port_config(vha, config)) {
825 				ql_log(ql_log_warn, vha, 0x701f,
826 				    "Get port config failed.\n");
827 				rval = -EPERM;
828 				goto done_free_dma_rsp;
829 			}
830 
831 			if ((config[0] & INTERNAL_LOOPBACK_MASK) != 0) {
832 				ql_dbg(ql_dbg_user, vha, 0x70c4,
833 				    "Loopback operation already in "
834 				    "progress.\n");
835 				rval = -EAGAIN;
836 				goto done_free_dma_rsp;
837 			}
838 
839 			ql_dbg(ql_dbg_user, vha, 0x70c0,
840 			    "elreq.options=%04x\n", elreq.options);
841 
842 			if (elreq.options == EXTERNAL_LOOPBACK)
843 				if (IS_QLA8031(ha) || IS_QLA8044(ha))
844 					rval = qla81xx_set_loopback_mode(vha,
845 					    config, new_config, elreq.options);
846 				else
847 					rval = qla81xx_reset_loopback_mode(vha,
848 					    config, 1, 0);
849 			else
850 				rval = qla81xx_set_loopback_mode(vha, config,
851 				    new_config, elreq.options);
852 
853 			if (rval) {
854 				rval = -EPERM;
855 				goto done_free_dma_rsp;
856 			}
857 
858 			type = "FC_BSG_HST_VENDOR_LOOPBACK";
859 			ql_dbg(ql_dbg_user, vha, 0x7028,
860 			    "BSG request type: %s.\n", type);
861 
862 			command_sent = INT_DEF_LB_LOOPBACK_CMD;
863 			rval = qla2x00_loopback_test(vha, &elreq, response);
864 
865 			if (response[0] == MBS_COMMAND_ERROR &&
866 					response[1] == MBS_LB_RESET) {
867 				ql_log(ql_log_warn, vha, 0x7029,
868 				    "MBX command error, Aborting ISP.\n");
869 				set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
870 				qla2xxx_wake_dpc(vha);
871 				qla2x00_wait_for_chip_reset(vha);
872 				/* Also reset the MPI */
873 				if (IS_QLA81XX(ha)) {
874 					if (qla81xx_restart_mpi_firmware(vha) !=
875 					    QLA_SUCCESS) {
876 						ql_log(ql_log_warn, vha, 0x702a,
877 						    "MPI reset failed.\n");
878 					}
879 				}
880 
881 				rval = -EIO;
882 				goto done_free_dma_rsp;
883 			}
884 
885 			if (new_config[0]) {
886 				int ret;
887 
888 				/* Revert back to original port config
889 				 * Also clear internal loopback
890 				 */
891 				ret = qla81xx_reset_loopback_mode(vha,
892 				    new_config, 0, 1);
893 				if (ret) {
894 					/*
895 					 * If the reset of the loopback mode
896 					 * doesn't work take FCoE dump and then
897 					 * reset the chip.
898 					 */
899 					qla2xxx_dump_fw(vha);
900 					set_bit(ISP_ABORT_NEEDED,
901 					    &vha->dpc_flags);
902 				}
903 
904 			}
905 
906 		} else {
907 			type = "FC_BSG_HST_VENDOR_LOOPBACK";
908 			ql_dbg(ql_dbg_user, vha, 0x702b,
909 			    "BSG request type: %s.\n", type);
910 			command_sent = INT_DEF_LB_LOOPBACK_CMD;
911 			rval = qla2x00_loopback_test(vha, &elreq, response);
912 		}
913 	}
914 
915 	if (rval) {
916 		ql_log(ql_log_warn, vha, 0x702c,
917 		    "Vendor request %s failed.\n", type);
918 
919 		rval = 0;
920 		bsg_reply->result = (DID_ERROR << 16);
921 		bsg_reply->reply_payload_rcv_len = 0;
922 	} else {
923 		ql_dbg(ql_dbg_user, vha, 0x702d,
924 		    "Vendor request %s completed.\n", type);
925 		bsg_reply->result = (DID_OK << 16);
926 		sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
927 			bsg_job->reply_payload.sg_cnt, rsp_data,
928 			rsp_data_len);
929 	}
930 
931 	bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
932 	    sizeof(response) + sizeof(uint8_t);
933 	fw_sts_ptr = bsg_job->reply + sizeof(struct fc_bsg_reply);
934 	memcpy(bsg_job->reply + sizeof(struct fc_bsg_reply), response,
935 			sizeof(response));
936 	fw_sts_ptr += sizeof(response);
937 	*fw_sts_ptr = command_sent;
938 
939 done_free_dma_rsp:
940 	dma_free_coherent(&ha->pdev->dev, rsp_data_len,
941 		rsp_data, rsp_data_dma);
942 done_free_dma_req:
943 	dma_free_coherent(&ha->pdev->dev, req_data_len,
944 		req_data, req_data_dma);
945 done_unmap_sg:
946 	dma_unmap_sg(&ha->pdev->dev,
947 	    bsg_job->reply_payload.sg_list,
948 	    bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
949 done_unmap_req_sg:
950 	dma_unmap_sg(&ha->pdev->dev,
951 	    bsg_job->request_payload.sg_list,
952 	    bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
953 	if (!rval)
954 		bsg_job_done(bsg_job, bsg_reply->result,
955 			       bsg_reply->reply_payload_rcv_len);
956 	return rval;
957 }
958 
959 static int
960 qla84xx_reset(struct bsg_job *bsg_job)
961 {
962 	struct fc_bsg_request *bsg_request = bsg_job->request;
963 	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
964 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
965 	scsi_qla_host_t *vha = shost_priv(host);
966 	struct qla_hw_data *ha = vha->hw;
967 	int rval = 0;
968 	uint32_t flag;
969 
970 	if (!IS_QLA84XX(ha)) {
971 		ql_dbg(ql_dbg_user, vha, 0x702f, "Not 84xx, exiting.\n");
972 		return -EINVAL;
973 	}
974 
975 	flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
976 
977 	rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW);
978 
979 	if (rval) {
980 		ql_log(ql_log_warn, vha, 0x7030,
981 		    "Vendor request 84xx reset failed.\n");
982 		rval = (DID_ERROR << 16);
983 
984 	} else {
985 		ql_dbg(ql_dbg_user, vha, 0x7031,
986 		    "Vendor request 84xx reset completed.\n");
987 		bsg_reply->result = DID_OK;
988 		bsg_job_done(bsg_job, bsg_reply->result,
989 			       bsg_reply->reply_payload_rcv_len);
990 	}
991 
992 	return rval;
993 }
994 
995 static int
996 qla84xx_updatefw(struct bsg_job *bsg_job)
997 {
998 	struct fc_bsg_request *bsg_request = bsg_job->request;
999 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1000 	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1001 	scsi_qla_host_t *vha = shost_priv(host);
1002 	struct qla_hw_data *ha = vha->hw;
1003 	struct verify_chip_entry_84xx *mn = NULL;
1004 	dma_addr_t mn_dma, fw_dma;
1005 	void *fw_buf = NULL;
1006 	int rval = 0;
1007 	uint32_t sg_cnt;
1008 	uint32_t data_len;
1009 	uint16_t options;
1010 	uint32_t flag;
1011 	uint32_t fw_ver;
1012 
1013 	if (!IS_QLA84XX(ha)) {
1014 		ql_dbg(ql_dbg_user, vha, 0x7032,
1015 		    "Not 84xx, exiting.\n");
1016 		return -EINVAL;
1017 	}
1018 
1019 	sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1020 		bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1021 	if (!sg_cnt) {
1022 		ql_log(ql_log_warn, vha, 0x7033,
1023 		    "dma_map_sg returned %d for request.\n", sg_cnt);
1024 		return -ENOMEM;
1025 	}
1026 
1027 	if (sg_cnt != bsg_job->request_payload.sg_cnt) {
1028 		ql_log(ql_log_warn, vha, 0x7034,
1029 		    "DMA mapping resulted in different sg counts, "
1030 		    "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
1031 		    bsg_job->request_payload.sg_cnt, sg_cnt);
1032 		rval = -EAGAIN;
1033 		goto done_unmap_sg;
1034 	}
1035 
1036 	data_len = bsg_job->request_payload.payload_len;
1037 	fw_buf = dma_alloc_coherent(&ha->pdev->dev, data_len,
1038 		&fw_dma, GFP_KERNEL);
1039 	if (!fw_buf) {
1040 		ql_log(ql_log_warn, vha, 0x7035,
1041 		    "DMA alloc failed for fw_buf.\n");
1042 		rval = -ENOMEM;
1043 		goto done_unmap_sg;
1044 	}
1045 
1046 	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1047 		bsg_job->request_payload.sg_cnt, fw_buf, data_len);
1048 
1049 	mn = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
1050 	if (!mn) {
1051 		ql_log(ql_log_warn, vha, 0x7036,
1052 		    "DMA alloc failed for fw buffer.\n");
1053 		rval = -ENOMEM;
1054 		goto done_free_fw_buf;
1055 	}
1056 
1057 	flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
1058 	fw_ver = get_unaligned_le32((uint32_t *)fw_buf + 2);
1059 
1060 	mn->entry_type = VERIFY_CHIP_IOCB_TYPE;
1061 	mn->entry_count = 1;
1062 
1063 	options = VCO_FORCE_UPDATE | VCO_END_OF_DATA;
1064 	if (flag == A84_ISSUE_UPDATE_DIAGFW_CMD)
1065 		options |= VCO_DIAG_FW;
1066 
1067 	mn->options = cpu_to_le16(options);
1068 	mn->fw_ver =  cpu_to_le32(fw_ver);
1069 	mn->fw_size =  cpu_to_le32(data_len);
1070 	mn->fw_seq_size =  cpu_to_le32(data_len);
1071 	put_unaligned_le64(fw_dma, &mn->dsd.address);
1072 	mn->dsd.length = cpu_to_le32(data_len);
1073 	mn->data_seg_cnt = cpu_to_le16(1);
1074 
1075 	rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
1076 
1077 	if (rval) {
1078 		ql_log(ql_log_warn, vha, 0x7037,
1079 		    "Vendor request 84xx updatefw failed.\n");
1080 
1081 		rval = (DID_ERROR << 16);
1082 	} else {
1083 		ql_dbg(ql_dbg_user, vha, 0x7038,
1084 		    "Vendor request 84xx updatefw completed.\n");
1085 
1086 		bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1087 		bsg_reply->result = DID_OK;
1088 	}
1089 
1090 	dma_pool_free(ha->s_dma_pool, mn, mn_dma);
1091 
1092 done_free_fw_buf:
1093 	dma_free_coherent(&ha->pdev->dev, data_len, fw_buf, fw_dma);
1094 
1095 done_unmap_sg:
1096 	dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1097 		bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1098 
1099 	if (!rval)
1100 		bsg_job_done(bsg_job, bsg_reply->result,
1101 			       bsg_reply->reply_payload_rcv_len);
1102 	return rval;
1103 }
1104 
1105 static int
1106 qla84xx_mgmt_cmd(struct bsg_job *bsg_job)
1107 {
1108 	struct fc_bsg_request *bsg_request = bsg_job->request;
1109 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1110 	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1111 	scsi_qla_host_t *vha = shost_priv(host);
1112 	struct qla_hw_data *ha = vha->hw;
1113 	struct access_chip_84xx *mn = NULL;
1114 	dma_addr_t mn_dma, mgmt_dma;
1115 	void *mgmt_b = NULL;
1116 	int rval = 0;
1117 	struct qla_bsg_a84_mgmt *ql84_mgmt;
1118 	uint32_t sg_cnt;
1119 	uint32_t data_len = 0;
1120 	uint32_t dma_direction = DMA_NONE;
1121 
1122 	if (!IS_QLA84XX(ha)) {
1123 		ql_log(ql_log_warn, vha, 0x703a,
1124 		    "Not 84xx, exiting.\n");
1125 		return -EINVAL;
1126 	}
1127 
1128 	mn = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
1129 	if (!mn) {
1130 		ql_log(ql_log_warn, vha, 0x703c,
1131 		    "DMA alloc failed for fw buffer.\n");
1132 		return -ENOMEM;
1133 	}
1134 
1135 	mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
1136 	mn->entry_count = 1;
1137 	ql84_mgmt = (void *)bsg_request + sizeof(struct fc_bsg_request);
1138 	switch (ql84_mgmt->mgmt.cmd) {
1139 	case QLA84_MGMT_READ_MEM:
1140 	case QLA84_MGMT_GET_INFO:
1141 		sg_cnt = dma_map_sg(&ha->pdev->dev,
1142 			bsg_job->reply_payload.sg_list,
1143 			bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1144 		if (!sg_cnt) {
1145 			ql_log(ql_log_warn, vha, 0x703d,
1146 			    "dma_map_sg returned %d for reply.\n", sg_cnt);
1147 			rval = -ENOMEM;
1148 			goto exit_mgmt;
1149 		}
1150 
1151 		dma_direction = DMA_FROM_DEVICE;
1152 
1153 		if (sg_cnt != bsg_job->reply_payload.sg_cnt) {
1154 			ql_log(ql_log_warn, vha, 0x703e,
1155 			    "DMA mapping resulted in different sg counts, "
1156 			    "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
1157 			    bsg_job->reply_payload.sg_cnt, sg_cnt);
1158 			rval = -EAGAIN;
1159 			goto done_unmap_sg;
1160 		}
1161 
1162 		data_len = bsg_job->reply_payload.payload_len;
1163 
1164 		mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1165 		    &mgmt_dma, GFP_KERNEL);
1166 		if (!mgmt_b) {
1167 			ql_log(ql_log_warn, vha, 0x703f,
1168 			    "DMA alloc failed for mgmt_b.\n");
1169 			rval = -ENOMEM;
1170 			goto done_unmap_sg;
1171 		}
1172 
1173 		if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) {
1174 			mn->options = cpu_to_le16(ACO_DUMP_MEMORY);
1175 			mn->parameter1 =
1176 				cpu_to_le32(
1177 				ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
1178 
1179 		} else if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO) {
1180 			mn->options = cpu_to_le16(ACO_REQUEST_INFO);
1181 			mn->parameter1 =
1182 				cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.info.type);
1183 
1184 			mn->parameter2 =
1185 				cpu_to_le32(
1186 				ql84_mgmt->mgmt.mgmtp.u.info.context);
1187 		}
1188 		break;
1189 
1190 	case QLA84_MGMT_WRITE_MEM:
1191 		sg_cnt = dma_map_sg(&ha->pdev->dev,
1192 			bsg_job->request_payload.sg_list,
1193 			bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1194 
1195 		if (!sg_cnt) {
1196 			ql_log(ql_log_warn, vha, 0x7040,
1197 			    "dma_map_sg returned %d.\n", sg_cnt);
1198 			rval = -ENOMEM;
1199 			goto exit_mgmt;
1200 		}
1201 
1202 		dma_direction = DMA_TO_DEVICE;
1203 
1204 		if (sg_cnt != bsg_job->request_payload.sg_cnt) {
1205 			ql_log(ql_log_warn, vha, 0x7041,
1206 			    "DMA mapping resulted in different sg counts, "
1207 			    "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
1208 			    bsg_job->request_payload.sg_cnt, sg_cnt);
1209 			rval = -EAGAIN;
1210 			goto done_unmap_sg;
1211 		}
1212 
1213 		data_len = bsg_job->request_payload.payload_len;
1214 		mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1215 			&mgmt_dma, GFP_KERNEL);
1216 		if (!mgmt_b) {
1217 			ql_log(ql_log_warn, vha, 0x7042,
1218 			    "DMA alloc failed for mgmt_b.\n");
1219 			rval = -ENOMEM;
1220 			goto done_unmap_sg;
1221 		}
1222 
1223 		sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1224 			bsg_job->request_payload.sg_cnt, mgmt_b, data_len);
1225 
1226 		mn->options = cpu_to_le16(ACO_LOAD_MEMORY);
1227 		mn->parameter1 =
1228 			cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
1229 		break;
1230 
1231 	case QLA84_MGMT_CHNG_CONFIG:
1232 		mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM);
1233 		mn->parameter1 =
1234 			cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.id);
1235 
1236 		mn->parameter2 =
1237 			cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param0);
1238 
1239 		mn->parameter3 =
1240 			cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param1);
1241 		break;
1242 
1243 	default:
1244 		rval = -EIO;
1245 		goto exit_mgmt;
1246 	}
1247 
1248 	if (ql84_mgmt->mgmt.cmd != QLA84_MGMT_CHNG_CONFIG) {
1249 		mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->mgmt.len);
1250 		mn->dseg_count = cpu_to_le16(1);
1251 		put_unaligned_le64(mgmt_dma, &mn->dsd.address);
1252 		mn->dsd.length = cpu_to_le32(ql84_mgmt->mgmt.len);
1253 	}
1254 
1255 	rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0);
1256 
1257 	if (rval) {
1258 		ql_log(ql_log_warn, vha, 0x7043,
1259 		    "Vendor request 84xx mgmt failed.\n");
1260 
1261 		rval = (DID_ERROR << 16);
1262 
1263 	} else {
1264 		ql_dbg(ql_dbg_user, vha, 0x7044,
1265 		    "Vendor request 84xx mgmt completed.\n");
1266 
1267 		bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1268 		bsg_reply->result = DID_OK;
1269 
1270 		if ((ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) ||
1271 			(ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO)) {
1272 			bsg_reply->reply_payload_rcv_len =
1273 				bsg_job->reply_payload.payload_len;
1274 
1275 			sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1276 				bsg_job->reply_payload.sg_cnt, mgmt_b,
1277 				data_len);
1278 		}
1279 	}
1280 
1281 done_unmap_sg:
1282 	if (mgmt_b)
1283 		dma_free_coherent(&ha->pdev->dev, data_len, mgmt_b, mgmt_dma);
1284 
1285 	if (dma_direction == DMA_TO_DEVICE)
1286 		dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1287 			bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1288 	else if (dma_direction == DMA_FROM_DEVICE)
1289 		dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
1290 			bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1291 
1292 exit_mgmt:
1293 	dma_pool_free(ha->s_dma_pool, mn, mn_dma);
1294 
1295 	if (!rval)
1296 		bsg_job_done(bsg_job, bsg_reply->result,
1297 			       bsg_reply->reply_payload_rcv_len);
1298 	return rval;
1299 }
1300 
1301 static int
1302 qla24xx_iidma(struct bsg_job *bsg_job)
1303 {
1304 	struct fc_bsg_request *bsg_request = bsg_job->request;
1305 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1306 	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1307 	scsi_qla_host_t *vha = shost_priv(host);
1308 	int rval = 0;
1309 	struct qla_port_param *port_param = NULL;
1310 	fc_port_t *fcport = NULL;
1311 	int found = 0;
1312 	uint16_t mb[MAILBOX_REGISTER_COUNT];
1313 	uint8_t *rsp_ptr = NULL;
1314 
1315 	if (!IS_IIDMA_CAPABLE(vha->hw)) {
1316 		ql_log(ql_log_info, vha, 0x7046, "iiDMA not supported.\n");
1317 		return -EINVAL;
1318 	}
1319 
1320 	port_param = (void *)bsg_request + sizeof(struct fc_bsg_request);
1321 	if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) {
1322 		ql_log(ql_log_warn, vha, 0x7048,
1323 		    "Invalid destination type.\n");
1324 		return -EINVAL;
1325 	}
1326 
1327 	list_for_each_entry(fcport, &vha->vp_fcports, list) {
1328 		if (fcport->port_type != FCT_TARGET)
1329 			continue;
1330 
1331 		if (memcmp(port_param->fc_scsi_addr.dest_addr.wwpn,
1332 			fcport->port_name, sizeof(fcport->port_name)))
1333 			continue;
1334 
1335 		found = 1;
1336 		break;
1337 	}
1338 
1339 	if (!found) {
1340 		ql_log(ql_log_warn, vha, 0x7049,
1341 		    "Failed to find port.\n");
1342 		return -EINVAL;
1343 	}
1344 
1345 	if (atomic_read(&fcport->state) != FCS_ONLINE) {
1346 		ql_log(ql_log_warn, vha, 0x704a,
1347 		    "Port is not online.\n");
1348 		return -EINVAL;
1349 	}
1350 
1351 	if (fcport->flags & FCF_LOGIN_NEEDED) {
1352 		ql_log(ql_log_warn, vha, 0x704b,
1353 		    "Remote port not logged in flags = 0x%x.\n", fcport->flags);
1354 		return -EINVAL;
1355 	}
1356 
1357 	if (port_param->mode)
1358 		rval = qla2x00_set_idma_speed(vha, fcport->loop_id,
1359 			port_param->speed, mb);
1360 	else
1361 		rval = qla2x00_get_idma_speed(vha, fcport->loop_id,
1362 			&port_param->speed, mb);
1363 
1364 	if (rval) {
1365 		ql_log(ql_log_warn, vha, 0x704c,
1366 		    "iiDMA cmd failed for %8phN -- "
1367 		    "%04x %x %04x %04x.\n", fcport->port_name,
1368 		    rval, fcport->fp_speed, mb[0], mb[1]);
1369 		rval = (DID_ERROR << 16);
1370 	} else {
1371 		if (!port_param->mode) {
1372 			bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
1373 				sizeof(struct qla_port_param);
1374 
1375 			rsp_ptr = ((uint8_t *)bsg_reply) +
1376 				sizeof(struct fc_bsg_reply);
1377 
1378 			memcpy(rsp_ptr, port_param,
1379 				sizeof(struct qla_port_param));
1380 		}
1381 
1382 		bsg_reply->result = DID_OK;
1383 		bsg_job_done(bsg_job, bsg_reply->result,
1384 			       bsg_reply->reply_payload_rcv_len);
1385 	}
1386 
1387 	return rval;
1388 }
1389 
1390 static int
1391 qla2x00_optrom_setup(struct bsg_job *bsg_job, scsi_qla_host_t *vha,
1392 	uint8_t is_update)
1393 {
1394 	struct fc_bsg_request *bsg_request = bsg_job->request;
1395 	uint32_t start = 0;
1396 	int valid = 0;
1397 	struct qla_hw_data *ha = vha->hw;
1398 
1399 	if (unlikely(pci_channel_offline(ha->pdev)))
1400 		return -EINVAL;
1401 
1402 	start = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
1403 	if (start > ha->optrom_size) {
1404 		ql_log(ql_log_warn, vha, 0x7055,
1405 		    "start %d > optrom_size %d.\n", start, ha->optrom_size);
1406 		return -EINVAL;
1407 	}
1408 
1409 	if (ha->optrom_state != QLA_SWAITING) {
1410 		ql_log(ql_log_info, vha, 0x7056,
1411 		    "optrom_state %d.\n", ha->optrom_state);
1412 		return -EBUSY;
1413 	}
1414 
1415 	ha->optrom_region_start = start;
1416 	ql_dbg(ql_dbg_user, vha, 0x7057, "is_update=%d.\n", is_update);
1417 	if (is_update) {
1418 		if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
1419 			valid = 1;
1420 		else if (start == (ha->flt_region_boot * 4) ||
1421 		    start == (ha->flt_region_fw * 4))
1422 			valid = 1;
1423 		else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
1424 		    IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) ||
1425 		    IS_QLA28XX(ha))
1426 			valid = 1;
1427 		if (!valid) {
1428 			ql_log(ql_log_warn, vha, 0x7058,
1429 			    "Invalid start region 0x%x/0x%x.\n", start,
1430 			    bsg_job->request_payload.payload_len);
1431 			return -EINVAL;
1432 		}
1433 
1434 		ha->optrom_region_size = start +
1435 		    bsg_job->request_payload.payload_len > ha->optrom_size ?
1436 		    ha->optrom_size - start :
1437 		    bsg_job->request_payload.payload_len;
1438 		ha->optrom_state = QLA_SWRITING;
1439 	} else {
1440 		ha->optrom_region_size = start +
1441 		    bsg_job->reply_payload.payload_len > ha->optrom_size ?
1442 		    ha->optrom_size - start :
1443 		    bsg_job->reply_payload.payload_len;
1444 		ha->optrom_state = QLA_SREADING;
1445 	}
1446 
1447 	ha->optrom_buffer = vzalloc(ha->optrom_region_size);
1448 	if (!ha->optrom_buffer) {
1449 		ql_log(ql_log_warn, vha, 0x7059,
1450 		    "Read: Unable to allocate memory for optrom retrieval "
1451 		    "(%x)\n", ha->optrom_region_size);
1452 
1453 		ha->optrom_state = QLA_SWAITING;
1454 		return -ENOMEM;
1455 	}
1456 
1457 	return 0;
1458 }
1459 
1460 static int
1461 qla2x00_read_optrom(struct bsg_job *bsg_job)
1462 {
1463 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1464 	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1465 	scsi_qla_host_t *vha = shost_priv(host);
1466 	struct qla_hw_data *ha = vha->hw;
1467 	int rval = 0;
1468 
1469 	if (ha->flags.nic_core_reset_hdlr_active)
1470 		return -EBUSY;
1471 
1472 	mutex_lock(&ha->optrom_mutex);
1473 	rval = qla2x00_optrom_setup(bsg_job, vha, 0);
1474 	if (rval) {
1475 		mutex_unlock(&ha->optrom_mutex);
1476 		return rval;
1477 	}
1478 
1479 	ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
1480 	    ha->optrom_region_start, ha->optrom_region_size);
1481 
1482 	sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1483 	    bsg_job->reply_payload.sg_cnt, ha->optrom_buffer,
1484 	    ha->optrom_region_size);
1485 
1486 	bsg_reply->reply_payload_rcv_len = ha->optrom_region_size;
1487 	bsg_reply->result = DID_OK;
1488 	vfree(ha->optrom_buffer);
1489 	ha->optrom_buffer = NULL;
1490 	ha->optrom_state = QLA_SWAITING;
1491 	mutex_unlock(&ha->optrom_mutex);
1492 	bsg_job_done(bsg_job, bsg_reply->result,
1493 		       bsg_reply->reply_payload_rcv_len);
1494 	return rval;
1495 }
1496 
1497 static int
1498 qla2x00_update_optrom(struct bsg_job *bsg_job)
1499 {
1500 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1501 	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1502 	scsi_qla_host_t *vha = shost_priv(host);
1503 	struct qla_hw_data *ha = vha->hw;
1504 	int rval = 0;
1505 
1506 	mutex_lock(&ha->optrom_mutex);
1507 	rval = qla2x00_optrom_setup(bsg_job, vha, 1);
1508 	if (rval) {
1509 		mutex_unlock(&ha->optrom_mutex);
1510 		return rval;
1511 	}
1512 
1513 	/* Set the isp82xx_no_md_cap not to capture minidump */
1514 	ha->flags.isp82xx_no_md_cap = 1;
1515 
1516 	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1517 	    bsg_job->request_payload.sg_cnt, ha->optrom_buffer,
1518 	    ha->optrom_region_size);
1519 
1520 	rval = ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
1521 	    ha->optrom_region_start, ha->optrom_region_size);
1522 
1523 	if (rval) {
1524 		bsg_reply->result = -EINVAL;
1525 		rval = -EINVAL;
1526 	} else {
1527 		bsg_reply->result = DID_OK;
1528 	}
1529 	vfree(ha->optrom_buffer);
1530 	ha->optrom_buffer = NULL;
1531 	ha->optrom_state = QLA_SWAITING;
1532 	mutex_unlock(&ha->optrom_mutex);
1533 	bsg_job_done(bsg_job, bsg_reply->result,
1534 		       bsg_reply->reply_payload_rcv_len);
1535 	return rval;
1536 }
1537 
1538 static int
1539 qla2x00_update_fru_versions(struct bsg_job *bsg_job)
1540 {
1541 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1542 	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1543 	scsi_qla_host_t *vha = shost_priv(host);
1544 	struct qla_hw_data *ha = vha->hw;
1545 	int rval = 0;
1546 	uint8_t bsg[DMA_POOL_SIZE];
1547 	struct qla_image_version_list *list = (void *)bsg;
1548 	struct qla_image_version *image;
1549 	uint32_t count;
1550 	dma_addr_t sfp_dma;
1551 	void *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1552 
1553 	if (!sfp) {
1554 		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1555 		    EXT_STATUS_NO_MEMORY;
1556 		goto done;
1557 	}
1558 
1559 	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1560 	    bsg_job->request_payload.sg_cnt, list, sizeof(bsg));
1561 
1562 	image = list->version;
1563 	count = list->count;
1564 	while (count--) {
1565 		memcpy(sfp, &image->field_info, sizeof(image->field_info));
1566 		rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1567 		    image->field_address.device, image->field_address.offset,
1568 		    sizeof(image->field_info), image->field_address.option);
1569 		if (rval) {
1570 			bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1571 			    EXT_STATUS_MAILBOX;
1572 			goto dealloc;
1573 		}
1574 		image++;
1575 	}
1576 
1577 	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1578 
1579 dealloc:
1580 	dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1581 
1582 done:
1583 	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1584 	bsg_reply->result = DID_OK << 16;
1585 	bsg_job_done(bsg_job, bsg_reply->result,
1586 		       bsg_reply->reply_payload_rcv_len);
1587 
1588 	return 0;
1589 }
1590 
1591 static int
1592 qla2x00_read_fru_status(struct bsg_job *bsg_job)
1593 {
1594 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1595 	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1596 	scsi_qla_host_t *vha = shost_priv(host);
1597 	struct qla_hw_data *ha = vha->hw;
1598 	int rval = 0;
1599 	uint8_t bsg[DMA_POOL_SIZE];
1600 	struct qla_status_reg *sr = (void *)bsg;
1601 	dma_addr_t sfp_dma;
1602 	uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1603 
1604 	if (!sfp) {
1605 		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1606 		    EXT_STATUS_NO_MEMORY;
1607 		goto done;
1608 	}
1609 
1610 	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1611 	    bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
1612 
1613 	rval = qla2x00_read_sfp(vha, sfp_dma, sfp,
1614 	    sr->field_address.device, sr->field_address.offset,
1615 	    sizeof(sr->status_reg), sr->field_address.option);
1616 	sr->status_reg = *sfp;
1617 
1618 	if (rval) {
1619 		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1620 		    EXT_STATUS_MAILBOX;
1621 		goto dealloc;
1622 	}
1623 
1624 	sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1625 	    bsg_job->reply_payload.sg_cnt, sr, sizeof(*sr));
1626 
1627 	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1628 
1629 dealloc:
1630 	dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1631 
1632 done:
1633 	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1634 	bsg_reply->reply_payload_rcv_len = sizeof(*sr);
1635 	bsg_reply->result = DID_OK << 16;
1636 	bsg_job_done(bsg_job, bsg_reply->result,
1637 		       bsg_reply->reply_payload_rcv_len);
1638 
1639 	return 0;
1640 }
1641 
1642 static int
1643 qla2x00_write_fru_status(struct bsg_job *bsg_job)
1644 {
1645 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1646 	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1647 	scsi_qla_host_t *vha = shost_priv(host);
1648 	struct qla_hw_data *ha = vha->hw;
1649 	int rval = 0;
1650 	uint8_t bsg[DMA_POOL_SIZE];
1651 	struct qla_status_reg *sr = (void *)bsg;
1652 	dma_addr_t sfp_dma;
1653 	uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1654 
1655 	if (!sfp) {
1656 		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1657 		    EXT_STATUS_NO_MEMORY;
1658 		goto done;
1659 	}
1660 
1661 	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1662 	    bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
1663 
1664 	*sfp = sr->status_reg;
1665 	rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1666 	    sr->field_address.device, sr->field_address.offset,
1667 	    sizeof(sr->status_reg), sr->field_address.option);
1668 
1669 	if (rval) {
1670 		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1671 		    EXT_STATUS_MAILBOX;
1672 		goto dealloc;
1673 	}
1674 
1675 	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1676 
1677 dealloc:
1678 	dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1679 
1680 done:
1681 	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1682 	bsg_reply->result = DID_OK << 16;
1683 	bsg_job_done(bsg_job, bsg_reply->result,
1684 		       bsg_reply->reply_payload_rcv_len);
1685 
1686 	return 0;
1687 }
1688 
1689 static int
1690 qla2x00_write_i2c(struct bsg_job *bsg_job)
1691 {
1692 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1693 	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1694 	scsi_qla_host_t *vha = shost_priv(host);
1695 	struct qla_hw_data *ha = vha->hw;
1696 	int rval = 0;
1697 	uint8_t bsg[DMA_POOL_SIZE];
1698 	struct qla_i2c_access *i2c = (void *)bsg;
1699 	dma_addr_t sfp_dma;
1700 	uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1701 
1702 	if (!sfp) {
1703 		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1704 		    EXT_STATUS_NO_MEMORY;
1705 		goto done;
1706 	}
1707 
1708 	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1709 	    bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c));
1710 
1711 	memcpy(sfp, i2c->buffer, i2c->length);
1712 	rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1713 	    i2c->device, i2c->offset, i2c->length, i2c->option);
1714 
1715 	if (rval) {
1716 		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1717 		    EXT_STATUS_MAILBOX;
1718 		goto dealloc;
1719 	}
1720 
1721 	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1722 
1723 dealloc:
1724 	dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1725 
1726 done:
1727 	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1728 	bsg_reply->result = DID_OK << 16;
1729 	bsg_job_done(bsg_job, bsg_reply->result,
1730 		       bsg_reply->reply_payload_rcv_len);
1731 
1732 	return 0;
1733 }
1734 
1735 static int
1736 qla2x00_read_i2c(struct bsg_job *bsg_job)
1737 {
1738 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1739 	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1740 	scsi_qla_host_t *vha = shost_priv(host);
1741 	struct qla_hw_data *ha = vha->hw;
1742 	int rval = 0;
1743 	uint8_t bsg[DMA_POOL_SIZE];
1744 	struct qla_i2c_access *i2c = (void *)bsg;
1745 	dma_addr_t sfp_dma;
1746 	uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1747 
1748 	if (!sfp) {
1749 		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1750 		    EXT_STATUS_NO_MEMORY;
1751 		goto done;
1752 	}
1753 
1754 	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1755 	    bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c));
1756 
1757 	rval = qla2x00_read_sfp(vha, sfp_dma, sfp,
1758 		i2c->device, i2c->offset, i2c->length, i2c->option);
1759 
1760 	if (rval) {
1761 		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1762 		    EXT_STATUS_MAILBOX;
1763 		goto dealloc;
1764 	}
1765 
1766 	memcpy(i2c->buffer, sfp, i2c->length);
1767 	sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1768 	    bsg_job->reply_payload.sg_cnt, i2c, sizeof(*i2c));
1769 
1770 	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1771 
1772 dealloc:
1773 	dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1774 
1775 done:
1776 	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1777 	bsg_reply->reply_payload_rcv_len = sizeof(*i2c);
1778 	bsg_reply->result = DID_OK << 16;
1779 	bsg_job_done(bsg_job, bsg_reply->result,
1780 		       bsg_reply->reply_payload_rcv_len);
1781 
1782 	return 0;
1783 }
1784 
1785 static int
1786 qla24xx_process_bidir_cmd(struct bsg_job *bsg_job)
1787 {
1788 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1789 	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1790 	scsi_qla_host_t *vha = shost_priv(host);
1791 	struct qla_hw_data *ha = vha->hw;
1792 	uint32_t rval = EXT_STATUS_OK;
1793 	uint16_t req_sg_cnt = 0;
1794 	uint16_t rsp_sg_cnt = 0;
1795 	uint16_t nextlid = 0;
1796 	uint32_t tot_dsds;
1797 	srb_t *sp = NULL;
1798 	uint32_t req_data_len;
1799 	uint32_t rsp_data_len;
1800 
1801 	/* Check the type of the adapter */
1802 	if (!IS_BIDI_CAPABLE(ha)) {
1803 		ql_log(ql_log_warn, vha, 0x70a0,
1804 			"This adapter is not supported\n");
1805 		rval = EXT_STATUS_NOT_SUPPORTED;
1806 		goto done;
1807 	}
1808 
1809 	if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
1810 		test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
1811 		test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
1812 		rval =  EXT_STATUS_BUSY;
1813 		goto done;
1814 	}
1815 
1816 	/* Check if host is online */
1817 	if (!vha->flags.online) {
1818 		ql_log(ql_log_warn, vha, 0x70a1,
1819 			"Host is not online\n");
1820 		rval = EXT_STATUS_DEVICE_OFFLINE;
1821 		goto done;
1822 	}
1823 
1824 	/* Check if cable is plugged in or not */
1825 	if (vha->device_flags & DFLG_NO_CABLE) {
1826 		ql_log(ql_log_warn, vha, 0x70a2,
1827 			"Cable is unplugged...\n");
1828 		rval = EXT_STATUS_INVALID_CFG;
1829 		goto done;
1830 	}
1831 
1832 	/* Check if the switch is connected or not */
1833 	if (ha->current_topology != ISP_CFG_F) {
1834 		ql_log(ql_log_warn, vha, 0x70a3,
1835 			"Host is not connected to the switch\n");
1836 		rval = EXT_STATUS_INVALID_CFG;
1837 		goto done;
1838 	}
1839 
1840 	/* Check if operating mode is P2P */
1841 	if (ha->operating_mode != P2P) {
1842 		ql_log(ql_log_warn, vha, 0x70a4,
1843 		    "Host operating mode is not P2p\n");
1844 		rval = EXT_STATUS_INVALID_CFG;
1845 		goto done;
1846 	}
1847 
1848 	mutex_lock(&ha->selflogin_lock);
1849 	if (vha->self_login_loop_id == 0) {
1850 		/* Initialize all required  fields of fcport */
1851 		vha->bidir_fcport.vha = vha;
1852 		vha->bidir_fcport.d_id.b.al_pa = vha->d_id.b.al_pa;
1853 		vha->bidir_fcport.d_id.b.area = vha->d_id.b.area;
1854 		vha->bidir_fcport.d_id.b.domain = vha->d_id.b.domain;
1855 		vha->bidir_fcport.loop_id = vha->loop_id;
1856 
1857 		if (qla2x00_fabric_login(vha, &(vha->bidir_fcport), &nextlid)) {
1858 			ql_log(ql_log_warn, vha, 0x70a7,
1859 			    "Failed to login port %06X for bidirectional IOCB\n",
1860 			    vha->bidir_fcport.d_id.b24);
1861 			mutex_unlock(&ha->selflogin_lock);
1862 			rval = EXT_STATUS_MAILBOX;
1863 			goto done;
1864 		}
1865 		vha->self_login_loop_id = nextlid - 1;
1866 
1867 	}
1868 	/* Assign the self login loop id to fcport */
1869 	mutex_unlock(&ha->selflogin_lock);
1870 
1871 	vha->bidir_fcport.loop_id = vha->self_login_loop_id;
1872 
1873 	req_sg_cnt = dma_map_sg(&ha->pdev->dev,
1874 		bsg_job->request_payload.sg_list,
1875 		bsg_job->request_payload.sg_cnt,
1876 		DMA_TO_DEVICE);
1877 
1878 	if (!req_sg_cnt) {
1879 		rval = EXT_STATUS_NO_MEMORY;
1880 		goto done;
1881 	}
1882 
1883 	rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
1884 		bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
1885 		DMA_FROM_DEVICE);
1886 
1887 	if (!rsp_sg_cnt) {
1888 		rval = EXT_STATUS_NO_MEMORY;
1889 		goto done_unmap_req_sg;
1890 	}
1891 
1892 	if ((req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
1893 		(rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
1894 		ql_dbg(ql_dbg_user, vha, 0x70a9,
1895 		    "Dma mapping resulted in different sg counts "
1896 		    "[request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt: "
1897 		    "%x dma_reply_sg_cnt: %x]\n",
1898 		    bsg_job->request_payload.sg_cnt, req_sg_cnt,
1899 		    bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
1900 		rval = EXT_STATUS_NO_MEMORY;
1901 		goto done_unmap_sg;
1902 	}
1903 
1904 	req_data_len = bsg_job->request_payload.payload_len;
1905 	rsp_data_len = bsg_job->reply_payload.payload_len;
1906 
1907 	if (req_data_len != rsp_data_len) {
1908 		rval = EXT_STATUS_BUSY;
1909 		ql_log(ql_log_warn, vha, 0x70aa,
1910 		    "req_data_len != rsp_data_len\n");
1911 		goto done_unmap_sg;
1912 	}
1913 
1914 	/* Alloc SRB structure */
1915 	sp = qla2x00_get_sp(vha, &(vha->bidir_fcport), GFP_KERNEL);
1916 	if (!sp) {
1917 		ql_dbg(ql_dbg_user, vha, 0x70ac,
1918 		    "Alloc SRB structure failed\n");
1919 		rval = EXT_STATUS_NO_MEMORY;
1920 		goto done_unmap_sg;
1921 	}
1922 
1923 	/*Populate srb->ctx with bidir ctx*/
1924 	sp->u.bsg_job = bsg_job;
1925 	sp->free = qla2x00_bsg_sp_free;
1926 	sp->type = SRB_BIDI_CMD;
1927 	sp->done = qla2x00_bsg_job_done;
1928 
1929 	/* Add the read and write sg count */
1930 	tot_dsds = rsp_sg_cnt + req_sg_cnt;
1931 
1932 	rval = qla2x00_start_bidir(sp, vha, tot_dsds);
1933 	if (rval != EXT_STATUS_OK)
1934 		goto done_free_srb;
1935 	/* the bsg request  will be completed in the interrupt handler */
1936 	return rval;
1937 
1938 done_free_srb:
1939 	mempool_free(sp, ha->srb_mempool);
1940 done_unmap_sg:
1941 	dma_unmap_sg(&ha->pdev->dev,
1942 	    bsg_job->reply_payload.sg_list,
1943 	    bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1944 done_unmap_req_sg:
1945 	dma_unmap_sg(&ha->pdev->dev,
1946 	    bsg_job->request_payload.sg_list,
1947 	    bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1948 done:
1949 
1950 	/* Return an error vendor specific response
1951 	 * and complete the bsg request
1952 	 */
1953 	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
1954 	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1955 	bsg_reply->reply_payload_rcv_len = 0;
1956 	bsg_reply->result = (DID_OK) << 16;
1957 	bsg_job_done(bsg_job, bsg_reply->result,
1958 		       bsg_reply->reply_payload_rcv_len);
1959 	/* Always return success, vendor rsp carries correct status */
1960 	return 0;
1961 }
1962 
1963 static int
1964 qlafx00_mgmt_cmd(struct bsg_job *bsg_job)
1965 {
1966 	struct fc_bsg_request *bsg_request = bsg_job->request;
1967 	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1968 	scsi_qla_host_t *vha = shost_priv(host);
1969 	struct qla_hw_data *ha = vha->hw;
1970 	int rval = (DID_ERROR << 16);
1971 	struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
1972 	srb_t *sp;
1973 	int req_sg_cnt = 0, rsp_sg_cnt = 0;
1974 	struct fc_port *fcport;
1975 	char  *type = "FC_BSG_HST_FX_MGMT";
1976 
1977 	/* Copy the IOCB specific information */
1978 	piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
1979 	    &bsg_request->rqst_data.h_vendor.vendor_cmd[1];
1980 
1981 	/* Dump the vendor information */
1982 	ql_dump_buffer(ql_dbg_user + ql_dbg_verbose , vha, 0x70cf,
1983 	    piocb_rqst, sizeof(*piocb_rqst));
1984 
1985 	if (!vha->flags.online) {
1986 		ql_log(ql_log_warn, vha, 0x70d0,
1987 		    "Host is not online.\n");
1988 		rval = -EIO;
1989 		goto done;
1990 	}
1991 
1992 	if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) {
1993 		req_sg_cnt = dma_map_sg(&ha->pdev->dev,
1994 		    bsg_job->request_payload.sg_list,
1995 		    bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1996 		if (!req_sg_cnt) {
1997 			ql_log(ql_log_warn, vha, 0x70c7,
1998 			    "dma_map_sg return %d for request\n", req_sg_cnt);
1999 			rval = -ENOMEM;
2000 			goto done;
2001 		}
2002 	}
2003 
2004 	if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) {
2005 		rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
2006 		    bsg_job->reply_payload.sg_list,
2007 		    bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2008 		if (!rsp_sg_cnt) {
2009 			ql_log(ql_log_warn, vha, 0x70c8,
2010 			    "dma_map_sg return %d for reply\n", rsp_sg_cnt);
2011 			rval = -ENOMEM;
2012 			goto done_unmap_req_sg;
2013 		}
2014 	}
2015 
2016 	ql_dbg(ql_dbg_user, vha, 0x70c9,
2017 	    "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
2018 	    "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
2019 	    req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
2020 
2021 	/* Allocate a dummy fcport structure, since functions preparing the
2022 	 * IOCB and mailbox command retrieves port specific information
2023 	 * from fcport structure. For Host based ELS commands there will be
2024 	 * no fcport structure allocated
2025 	 */
2026 	fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2027 	if (!fcport) {
2028 		ql_log(ql_log_warn, vha, 0x70ca,
2029 		    "Failed to allocate fcport.\n");
2030 		rval = -ENOMEM;
2031 		goto done_unmap_rsp_sg;
2032 	}
2033 
2034 	/* Alloc SRB structure */
2035 	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2036 	if (!sp) {
2037 		ql_log(ql_log_warn, vha, 0x70cb,
2038 		    "qla2x00_get_sp failed.\n");
2039 		rval = -ENOMEM;
2040 		goto done_free_fcport;
2041 	}
2042 
2043 	/* Initialize all required  fields of fcport */
2044 	fcport->vha = vha;
2045 	fcport->loop_id = le32_to_cpu(piocb_rqst->dataword);
2046 
2047 	sp->type = SRB_FXIOCB_BCMD;
2048 	sp->name = "bsg_fx_mgmt";
2049 	sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
2050 	sp->u.bsg_job = bsg_job;
2051 	sp->free = qla2x00_bsg_sp_free;
2052 	sp->done = qla2x00_bsg_job_done;
2053 
2054 	ql_dbg(ql_dbg_user, vha, 0x70cc,
2055 	    "bsg rqst type: %s fx_mgmt_type: %x id=%x\n",
2056 	    type, piocb_rqst->func_type, fcport->loop_id);
2057 
2058 	rval = qla2x00_start_sp(sp);
2059 	if (rval != QLA_SUCCESS) {
2060 		ql_log(ql_log_warn, vha, 0x70cd,
2061 		    "qla2x00_start_sp failed=%d.\n", rval);
2062 		mempool_free(sp, ha->srb_mempool);
2063 		rval = -EIO;
2064 		goto done_free_fcport;
2065 	}
2066 	return rval;
2067 
2068 done_free_fcport:
2069 	qla2x00_free_fcport(fcport);
2070 
2071 done_unmap_rsp_sg:
2072 	if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID)
2073 		dma_unmap_sg(&ha->pdev->dev,
2074 		    bsg_job->reply_payload.sg_list,
2075 		    bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2076 done_unmap_req_sg:
2077 	if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
2078 		dma_unmap_sg(&ha->pdev->dev,
2079 		    bsg_job->request_payload.sg_list,
2080 		    bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2081 
2082 done:
2083 	return rval;
2084 }
2085 
2086 static int
2087 qla26xx_serdes_op(struct bsg_job *bsg_job)
2088 {
2089 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2090 	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2091 	scsi_qla_host_t *vha = shost_priv(host);
2092 	int rval = 0;
2093 	struct qla_serdes_reg sr;
2094 
2095 	memset(&sr, 0, sizeof(sr));
2096 
2097 	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2098 	    bsg_job->request_payload.sg_cnt, &sr, sizeof(sr));
2099 
2100 	switch (sr.cmd) {
2101 	case INT_SC_SERDES_WRITE_REG:
2102 		rval = qla2x00_write_serdes_word(vha, sr.addr, sr.val);
2103 		bsg_reply->reply_payload_rcv_len = 0;
2104 		break;
2105 	case INT_SC_SERDES_READ_REG:
2106 		rval = qla2x00_read_serdes_word(vha, sr.addr, &sr.val);
2107 		sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2108 		    bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr));
2109 		bsg_reply->reply_payload_rcv_len = sizeof(sr);
2110 		break;
2111 	default:
2112 		ql_dbg(ql_dbg_user, vha, 0x708c,
2113 		    "Unknown serdes cmd %x.\n", sr.cmd);
2114 		rval = -EINVAL;
2115 		break;
2116 	}
2117 
2118 	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2119 	    rval ? EXT_STATUS_MAILBOX : 0;
2120 
2121 	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2122 	bsg_reply->result = DID_OK << 16;
2123 	bsg_job_done(bsg_job, bsg_reply->result,
2124 		       bsg_reply->reply_payload_rcv_len);
2125 	return 0;
2126 }
2127 
2128 static int
2129 qla8044_serdes_op(struct bsg_job *bsg_job)
2130 {
2131 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2132 	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2133 	scsi_qla_host_t *vha = shost_priv(host);
2134 	int rval = 0;
2135 	struct qla_serdes_reg_ex sr;
2136 
2137 	memset(&sr, 0, sizeof(sr));
2138 
2139 	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2140 	    bsg_job->request_payload.sg_cnt, &sr, sizeof(sr));
2141 
2142 	switch (sr.cmd) {
2143 	case INT_SC_SERDES_WRITE_REG:
2144 		rval = qla8044_write_serdes_word(vha, sr.addr, sr.val);
2145 		bsg_reply->reply_payload_rcv_len = 0;
2146 		break;
2147 	case INT_SC_SERDES_READ_REG:
2148 		rval = qla8044_read_serdes_word(vha, sr.addr, &sr.val);
2149 		sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2150 		    bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr));
2151 		bsg_reply->reply_payload_rcv_len = sizeof(sr);
2152 		break;
2153 	default:
2154 		ql_dbg(ql_dbg_user, vha, 0x7020,
2155 		    "Unknown serdes cmd %x.\n", sr.cmd);
2156 		rval = -EINVAL;
2157 		break;
2158 	}
2159 
2160 	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2161 	    rval ? EXT_STATUS_MAILBOX : 0;
2162 
2163 	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2164 	bsg_reply->result = DID_OK << 16;
2165 	bsg_job_done(bsg_job, bsg_reply->result,
2166 		       bsg_reply->reply_payload_rcv_len);
2167 	return 0;
2168 }
2169 
2170 static int
2171 qla27xx_get_flash_upd_cap(struct bsg_job *bsg_job)
2172 {
2173 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2174 	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2175 	scsi_qla_host_t *vha = shost_priv(host);
2176 	struct qla_hw_data *ha = vha->hw;
2177 	struct qla_flash_update_caps cap;
2178 
2179 	if (!(IS_QLA27XX(ha)) && !IS_QLA28XX(ha))
2180 		return -EPERM;
2181 
2182 	memset(&cap, 0, sizeof(cap));
2183 	cap.capabilities = (uint64_t)ha->fw_attributes_ext[1] << 48 |
2184 			   (uint64_t)ha->fw_attributes_ext[0] << 32 |
2185 			   (uint64_t)ha->fw_attributes_h << 16 |
2186 			   (uint64_t)ha->fw_attributes;
2187 
2188 	sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2189 	    bsg_job->reply_payload.sg_cnt, &cap, sizeof(cap));
2190 	bsg_reply->reply_payload_rcv_len = sizeof(cap);
2191 
2192 	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2193 	    EXT_STATUS_OK;
2194 
2195 	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2196 	bsg_reply->result = DID_OK << 16;
2197 	bsg_job_done(bsg_job, bsg_reply->result,
2198 		       bsg_reply->reply_payload_rcv_len);
2199 	return 0;
2200 }
2201 
2202 static int
2203 qla27xx_set_flash_upd_cap(struct bsg_job *bsg_job)
2204 {
2205 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2206 	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2207 	scsi_qla_host_t *vha = shost_priv(host);
2208 	struct qla_hw_data *ha = vha->hw;
2209 	uint64_t online_fw_attr = 0;
2210 	struct qla_flash_update_caps cap;
2211 
2212 	if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
2213 		return -EPERM;
2214 
2215 	memset(&cap, 0, sizeof(cap));
2216 	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2217 	    bsg_job->request_payload.sg_cnt, &cap, sizeof(cap));
2218 
2219 	online_fw_attr = (uint64_t)ha->fw_attributes_ext[1] << 48 |
2220 			 (uint64_t)ha->fw_attributes_ext[0] << 32 |
2221 			 (uint64_t)ha->fw_attributes_h << 16 |
2222 			 (uint64_t)ha->fw_attributes;
2223 
2224 	if (online_fw_attr != cap.capabilities) {
2225 		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2226 		    EXT_STATUS_INVALID_PARAM;
2227 		return -EINVAL;
2228 	}
2229 
2230 	if (cap.outage_duration < MAX_LOOP_TIMEOUT)  {
2231 		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2232 		    EXT_STATUS_INVALID_PARAM;
2233 		return -EINVAL;
2234 	}
2235 
2236 	bsg_reply->reply_payload_rcv_len = 0;
2237 
2238 	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2239 	    EXT_STATUS_OK;
2240 
2241 	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2242 	bsg_reply->result = DID_OK << 16;
2243 	bsg_job_done(bsg_job, bsg_reply->result,
2244 		       bsg_reply->reply_payload_rcv_len);
2245 	return 0;
2246 }
2247 
2248 static int
2249 qla27xx_get_bbcr_data(struct bsg_job *bsg_job)
2250 {
2251 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2252 	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2253 	scsi_qla_host_t *vha = shost_priv(host);
2254 	struct qla_hw_data *ha = vha->hw;
2255 	struct qla_bbcr_data bbcr;
2256 	uint16_t loop_id, topo, sw_cap;
2257 	uint8_t domain, area, al_pa, state;
2258 	int rval;
2259 
2260 	if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
2261 		return -EPERM;
2262 
2263 	memset(&bbcr, 0, sizeof(bbcr));
2264 
2265 	if (vha->flags.bbcr_enable)
2266 		bbcr.status = QLA_BBCR_STATUS_ENABLED;
2267 	else
2268 		bbcr.status = QLA_BBCR_STATUS_DISABLED;
2269 
2270 	if (bbcr.status == QLA_BBCR_STATUS_ENABLED) {
2271 		rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa,
2272 			&area, &domain, &topo, &sw_cap);
2273 		if (rval != QLA_SUCCESS) {
2274 			bbcr.status = QLA_BBCR_STATUS_UNKNOWN;
2275 			bbcr.state = QLA_BBCR_STATE_OFFLINE;
2276 			bbcr.mbx1 = loop_id;
2277 			goto done;
2278 		}
2279 
2280 		state = (vha->bbcr >> 12) & 0x1;
2281 
2282 		if (state) {
2283 			bbcr.state = QLA_BBCR_STATE_OFFLINE;
2284 			bbcr.offline_reason_code = QLA_BBCR_REASON_LOGIN_REJECT;
2285 		} else {
2286 			bbcr.state = QLA_BBCR_STATE_ONLINE;
2287 			bbcr.negotiated_bbscn = (vha->bbcr >> 8) & 0xf;
2288 		}
2289 
2290 		bbcr.configured_bbscn = vha->bbcr & 0xf;
2291 	}
2292 
2293 done:
2294 	sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2295 		bsg_job->reply_payload.sg_cnt, &bbcr, sizeof(bbcr));
2296 	bsg_reply->reply_payload_rcv_len = sizeof(bbcr);
2297 
2298 	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
2299 
2300 	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2301 	bsg_reply->result = DID_OK << 16;
2302 	bsg_job_done(bsg_job, bsg_reply->result,
2303 		       bsg_reply->reply_payload_rcv_len);
2304 	return 0;
2305 }
2306 
2307 static int
2308 qla2x00_get_priv_stats(struct bsg_job *bsg_job)
2309 {
2310 	struct fc_bsg_request *bsg_request = bsg_job->request;
2311 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2312 	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2313 	scsi_qla_host_t *vha = shost_priv(host);
2314 	struct qla_hw_data *ha = vha->hw;
2315 	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2316 	struct link_statistics *stats = NULL;
2317 	dma_addr_t stats_dma;
2318 	int rval;
2319 	uint32_t *cmd = bsg_request->rqst_data.h_vendor.vendor_cmd;
2320 	uint options = cmd[0] == QL_VND_GET_PRIV_STATS_EX ? cmd[1] : 0;
2321 
2322 	if (test_bit(UNLOADING, &vha->dpc_flags))
2323 		return -ENODEV;
2324 
2325 	if (unlikely(pci_channel_offline(ha->pdev)))
2326 		return -ENODEV;
2327 
2328 	if (qla2x00_reset_active(vha))
2329 		return -EBUSY;
2330 
2331 	if (!IS_FWI2_CAPABLE(ha))
2332 		return -EPERM;
2333 
2334 	stats = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stats), &stats_dma,
2335 				   GFP_KERNEL);
2336 	if (!stats) {
2337 		ql_log(ql_log_warn, vha, 0x70e2,
2338 		    "Failed to allocate memory for stats.\n");
2339 		return -ENOMEM;
2340 	}
2341 
2342 	rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, options);
2343 
2344 	if (rval == QLA_SUCCESS) {
2345 		ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, vha, 0x70e5,
2346 			stats, sizeof(*stats));
2347 		sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2348 			bsg_job->reply_payload.sg_cnt, stats, sizeof(*stats));
2349 	}
2350 
2351 	bsg_reply->reply_payload_rcv_len = sizeof(*stats);
2352 	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2353 	    rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK;
2354 
2355 	bsg_job->reply_len = sizeof(*bsg_reply);
2356 	bsg_reply->result = DID_OK << 16;
2357 	bsg_job_done(bsg_job, bsg_reply->result,
2358 		       bsg_reply->reply_payload_rcv_len);
2359 
2360 	dma_free_coherent(&ha->pdev->dev, sizeof(*stats),
2361 		stats, stats_dma);
2362 
2363 	return 0;
2364 }
2365 
2366 static int
2367 qla2x00_do_dport_diagnostics(struct bsg_job *bsg_job)
2368 {
2369 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2370 	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2371 	scsi_qla_host_t *vha = shost_priv(host);
2372 	int rval;
2373 	struct qla_dport_diag *dd;
2374 
2375 	if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) &&
2376 	    !IS_QLA28XX(vha->hw))
2377 		return -EPERM;
2378 
2379 	dd = kmalloc(sizeof(*dd), GFP_KERNEL);
2380 	if (!dd) {
2381 		ql_log(ql_log_warn, vha, 0x70db,
2382 		    "Failed to allocate memory for dport.\n");
2383 		return -ENOMEM;
2384 	}
2385 
2386 	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2387 	    bsg_job->request_payload.sg_cnt, dd, sizeof(*dd));
2388 
2389 	rval = qla26xx_dport_diagnostics(
2390 	    vha, dd->buf, sizeof(dd->buf), dd->options);
2391 	if (rval == QLA_SUCCESS) {
2392 		sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2393 		    bsg_job->reply_payload.sg_cnt, dd, sizeof(*dd));
2394 	}
2395 
2396 	bsg_reply->reply_payload_rcv_len = sizeof(*dd);
2397 	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2398 	    rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK;
2399 
2400 	bsg_job->reply_len = sizeof(*bsg_reply);
2401 	bsg_reply->result = DID_OK << 16;
2402 	bsg_job_done(bsg_job, bsg_reply->result,
2403 		       bsg_reply->reply_payload_rcv_len);
2404 
2405 	kfree(dd);
2406 
2407 	return 0;
2408 }
2409 
2410 static int
2411 qla2x00_get_flash_image_status(struct bsg_job *bsg_job)
2412 {
2413 	scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
2414 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2415 	struct qla_hw_data *ha = vha->hw;
2416 	struct qla_active_regions regions = { };
2417 	struct active_regions active_regions = { };
2418 
2419 	qla27xx_get_active_image(vha, &active_regions);
2420 	regions.global_image = active_regions.global;
2421 
2422 	if (IS_QLA28XX(ha)) {
2423 		qla28xx_get_aux_images(vha, &active_regions);
2424 		regions.board_config = active_regions.aux.board_config;
2425 		regions.vpd_nvram = active_regions.aux.vpd_nvram;
2426 		regions.npiv_config_0_1 = active_regions.aux.npiv_config_0_1;
2427 		regions.npiv_config_2_3 = active_regions.aux.npiv_config_2_3;
2428 	}
2429 
2430 	ql_dbg(ql_dbg_user, vha, 0x70e1,
2431 	    "%s(%lu): FW=%u BCFG=%u VPDNVR=%u NPIV01=%u NPIV02=%u\n",
2432 	    __func__, vha->host_no, regions.global_image,
2433 	    regions.board_config, regions.vpd_nvram,
2434 	    regions.npiv_config_0_1, regions.npiv_config_2_3);
2435 
2436 	sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2437 	    bsg_job->reply_payload.sg_cnt, &regions, sizeof(regions));
2438 
2439 	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
2440 	bsg_reply->reply_payload_rcv_len = sizeof(regions);
2441 	bsg_reply->result = DID_OK << 16;
2442 	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2443 	bsg_job_done(bsg_job, bsg_reply->result,
2444 	    bsg_reply->reply_payload_rcv_len);
2445 
2446 	return 0;
2447 }
2448 
2449 static int
2450 qla2x00_manage_host_stats(struct bsg_job *bsg_job)
2451 {
2452 	scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
2453 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2454 	struct ql_vnd_mng_host_stats_param *req_data;
2455 	struct ql_vnd_mng_host_stats_resp rsp_data;
2456 	u32 req_data_len;
2457 	int ret = 0;
2458 
2459 	if (!vha->flags.online) {
2460 		ql_log(ql_log_warn, vha, 0x0000, "Host is not online.\n");
2461 		return -EIO;
2462 	}
2463 
2464 	req_data_len = bsg_job->request_payload.payload_len;
2465 
2466 	if (req_data_len != sizeof(struct ql_vnd_mng_host_stats_param)) {
2467 		ql_log(ql_log_warn, vha, 0x0000, "req_data_len invalid.\n");
2468 		return -EIO;
2469 	}
2470 
2471 	req_data = kzalloc(sizeof(*req_data), GFP_KERNEL);
2472 	if (!req_data) {
2473 		ql_log(ql_log_warn, vha, 0x0000, "req_data memory allocation failure.\n");
2474 		return -ENOMEM;
2475 	}
2476 
2477 	/* Copy the request buffer in req_data */
2478 	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2479 			  bsg_job->request_payload.sg_cnt, req_data,
2480 			  req_data_len);
2481 
2482 	switch (req_data->action) {
2483 	case QLA_STOP:
2484 		ret = qla2xxx_stop_stats(vha->host, req_data->stat_type);
2485 		break;
2486 	case QLA_START:
2487 		ret = qla2xxx_start_stats(vha->host, req_data->stat_type);
2488 		break;
2489 	case QLA_CLEAR:
2490 		ret = qla2xxx_reset_stats(vha->host, req_data->stat_type);
2491 		break;
2492 	default:
2493 		ql_log(ql_log_warn, vha, 0x0000, "Invalid action.\n");
2494 		ret = -EIO;
2495 		break;
2496 	}
2497 
2498 	kfree(req_data);
2499 
2500 	/* Prepare response */
2501 	rsp_data.status = ret;
2502 	bsg_job->reply_payload.payload_len = sizeof(struct ql_vnd_mng_host_stats_resp);
2503 
2504 	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
2505 	bsg_reply->reply_payload_rcv_len =
2506 		sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2507 				    bsg_job->reply_payload.sg_cnt,
2508 				    &rsp_data,
2509 				    sizeof(struct ql_vnd_mng_host_stats_resp));
2510 
2511 	bsg_reply->result = DID_OK;
2512 	bsg_job_done(bsg_job, bsg_reply->result,
2513 		     bsg_reply->reply_payload_rcv_len);
2514 
2515 	return ret;
2516 }
2517 
2518 static int
2519 qla2x00_get_host_stats(struct bsg_job *bsg_job)
2520 {
2521 	scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
2522 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2523 	struct ql_vnd_stats_param *req_data;
2524 	struct ql_vnd_host_stats_resp rsp_data;
2525 	u32 req_data_len;
2526 	int ret = 0;
2527 	u64 ini_entry_count = 0;
2528 	u64 entry_count = 0;
2529 	u64 tgt_num = 0;
2530 	u64 tmp_stat_type = 0;
2531 	u64 response_len = 0;
2532 	void *data;
2533 
2534 	req_data_len = bsg_job->request_payload.payload_len;
2535 
2536 	if (req_data_len != sizeof(struct ql_vnd_stats_param)) {
2537 		ql_log(ql_log_warn, vha, 0x0000, "req_data_len invalid.\n");
2538 		return -EIO;
2539 	}
2540 
2541 	req_data = kzalloc(sizeof(*req_data), GFP_KERNEL);
2542 	if (!req_data) {
2543 		ql_log(ql_log_warn, vha, 0x0000, "req_data memory allocation failure.\n");
2544 		return -ENOMEM;
2545 	}
2546 
2547 	/* Copy the request buffer in req_data */
2548 	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2549 			  bsg_job->request_payload.sg_cnt, req_data, req_data_len);
2550 
2551 	/* Copy stat type to work on it */
2552 	tmp_stat_type = req_data->stat_type;
2553 
2554 	if (tmp_stat_type & QLA2XX_TGT_SHT_LNK_DOWN) {
2555 		/* Num of tgts connected to this host */
2556 		tgt_num = qla2x00_get_num_tgts(vha);
2557 		/* unset BIT_17 */
2558 		tmp_stat_type &= ~(1 << 17);
2559 	}
2560 
2561 	/* Total ini stats */
2562 	ini_entry_count = qla2x00_count_set_bits(tmp_stat_type);
2563 
2564 	/* Total number of entries */
2565 	entry_count = ini_entry_count + tgt_num;
2566 
2567 	response_len = sizeof(struct ql_vnd_host_stats_resp) +
2568 		(sizeof(struct ql_vnd_stat_entry) * entry_count);
2569 
2570 	if (response_len > bsg_job->reply_payload.payload_len) {
2571 		rsp_data.status = EXT_STATUS_BUFFER_TOO_SMALL;
2572 		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_BUFFER_TOO_SMALL;
2573 		bsg_job->reply_payload.payload_len = sizeof(struct ql_vnd_mng_host_stats_resp);
2574 
2575 		bsg_reply->reply_payload_rcv_len =
2576 			sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2577 					    bsg_job->reply_payload.sg_cnt, &rsp_data,
2578 					    sizeof(struct ql_vnd_mng_host_stats_resp));
2579 
2580 		bsg_reply->result = DID_OK;
2581 		bsg_job_done(bsg_job, bsg_reply->result,
2582 			     bsg_reply->reply_payload_rcv_len);
2583 		goto host_stat_out;
2584 	}
2585 
2586 	data = kzalloc(response_len, GFP_KERNEL);
2587 	if (!data) {
2588 		ret = -ENOMEM;
2589 		goto host_stat_out;
2590 	}
2591 
2592 	ret = qla2xxx_get_ini_stats(fc_bsg_to_shost(bsg_job), req_data->stat_type,
2593 				    data, response_len);
2594 
2595 	rsp_data.status = EXT_STATUS_OK;
2596 	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
2597 
2598 	bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2599 							       bsg_job->reply_payload.sg_cnt,
2600 							       data, response_len);
2601 	bsg_reply->result = DID_OK;
2602 	bsg_job_done(bsg_job, bsg_reply->result,
2603 		     bsg_reply->reply_payload_rcv_len);
2604 
2605 	kfree(data);
2606 host_stat_out:
2607 	kfree(req_data);
2608 	return ret;
2609 }
2610 
2611 static struct fc_rport *
2612 qla2xxx_find_rport(scsi_qla_host_t *vha, uint32_t tgt_num)
2613 {
2614 	fc_port_t *fcport = NULL;
2615 
2616 	list_for_each_entry(fcport, &vha->vp_fcports, list) {
2617 		if (fcport->rport->number == tgt_num)
2618 			return fcport->rport;
2619 	}
2620 	return NULL;
2621 }
2622 
2623 static int
2624 qla2x00_get_tgt_stats(struct bsg_job *bsg_job)
2625 {
2626 	scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
2627 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2628 	struct ql_vnd_tgt_stats_param *req_data;
2629 	u32 req_data_len;
2630 	int ret = 0;
2631 	u64 response_len = 0;
2632 	struct ql_vnd_tgt_stats_resp *data = NULL;
2633 	struct fc_rport *rport = NULL;
2634 
2635 	if (!vha->flags.online) {
2636 		ql_log(ql_log_warn, vha, 0x0000, "Host is not online.\n");
2637 		return -EIO;
2638 	}
2639 
2640 	req_data_len = bsg_job->request_payload.payload_len;
2641 
2642 	if (req_data_len != sizeof(struct ql_vnd_stat_entry)) {
2643 		ql_log(ql_log_warn, vha, 0x0000, "req_data_len invalid.\n");
2644 		return -EIO;
2645 	}
2646 
2647 	req_data = kzalloc(sizeof(*req_data), GFP_KERNEL);
2648 	if (!req_data) {
2649 		ql_log(ql_log_warn, vha, 0x0000, "req_data memory allocation failure.\n");
2650 		return -ENOMEM;
2651 	}
2652 
2653 	/* Copy the request buffer in req_data */
2654 	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2655 			  bsg_job->request_payload.sg_cnt,
2656 			  req_data, req_data_len);
2657 
2658 	response_len = sizeof(struct ql_vnd_tgt_stats_resp) +
2659 		sizeof(struct ql_vnd_stat_entry);
2660 
2661 	/* structure + size for one entry */
2662 	data = kzalloc(response_len, GFP_KERNEL);
2663 	if (!data) {
2664 		kfree(req_data);
2665 		return -ENOMEM;
2666 	}
2667 
2668 	if (response_len > bsg_job->reply_payload.payload_len) {
2669 		data->status = EXT_STATUS_BUFFER_TOO_SMALL;
2670 		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_BUFFER_TOO_SMALL;
2671 		bsg_job->reply_payload.payload_len = sizeof(struct ql_vnd_mng_host_stats_resp);
2672 
2673 		bsg_reply->reply_payload_rcv_len =
2674 			sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2675 					    bsg_job->reply_payload.sg_cnt, data,
2676 					    sizeof(struct ql_vnd_tgt_stats_resp));
2677 
2678 		bsg_reply->result = DID_OK;
2679 		bsg_job_done(bsg_job, bsg_reply->result,
2680 			     bsg_reply->reply_payload_rcv_len);
2681 		goto tgt_stat_out;
2682 	}
2683 
2684 	rport = qla2xxx_find_rport(vha, req_data->tgt_id);
2685 	if (!rport) {
2686 		ql_log(ql_log_warn, vha, 0x0000, "target %d not found.\n", req_data->tgt_id);
2687 		ret = EXT_STATUS_INVALID_PARAM;
2688 		data->status = EXT_STATUS_INVALID_PARAM;
2689 		goto reply;
2690 	}
2691 
2692 	ret = qla2xxx_get_tgt_stats(fc_bsg_to_shost(bsg_job), req_data->stat_type,
2693 				    rport, (void *)data, response_len);
2694 
2695 	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
2696 reply:
2697 	bsg_reply->reply_payload_rcv_len =
2698 		sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2699 				    bsg_job->reply_payload.sg_cnt, data,
2700 				    response_len);
2701 	bsg_reply->result = DID_OK;
2702 	bsg_job_done(bsg_job, bsg_reply->result,
2703 		     bsg_reply->reply_payload_rcv_len);
2704 
2705 tgt_stat_out:
2706 	kfree(data);
2707 	kfree(req_data);
2708 
2709 	return ret;
2710 }
2711 
2712 static int
2713 qla2x00_manage_host_port(struct bsg_job *bsg_job)
2714 {
2715 	scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
2716 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2717 	struct ql_vnd_mng_host_port_param *req_data;
2718 	struct ql_vnd_mng_host_port_resp rsp_data;
2719 	u32 req_data_len;
2720 	int ret = 0;
2721 
2722 	req_data_len = bsg_job->request_payload.payload_len;
2723 
2724 	if (req_data_len != sizeof(struct ql_vnd_mng_host_port_param)) {
2725 		ql_log(ql_log_warn, vha, 0x0000, "req_data_len invalid.\n");
2726 		return -EIO;
2727 	}
2728 
2729 	req_data = kzalloc(sizeof(*req_data), GFP_KERNEL);
2730 	if (!req_data) {
2731 		ql_log(ql_log_warn, vha, 0x0000, "req_data memory allocation failure.\n");
2732 		return -ENOMEM;
2733 	}
2734 
2735 	/* Copy the request buffer in req_data */
2736 	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2737 			  bsg_job->request_payload.sg_cnt, req_data, req_data_len);
2738 
2739 	switch (req_data->action) {
2740 	case QLA_ENABLE:
2741 		ret = qla2xxx_enable_port(vha->host);
2742 		break;
2743 	case QLA_DISABLE:
2744 		ret = qla2xxx_disable_port(vha->host);
2745 		break;
2746 	default:
2747 		ql_log(ql_log_warn, vha, 0x0000, "Invalid action.\n");
2748 		ret = -EIO;
2749 		break;
2750 	}
2751 
2752 	kfree(req_data);
2753 
2754 	/* Prepare response */
2755 	rsp_data.status = ret;
2756 	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
2757 	bsg_job->reply_payload.payload_len = sizeof(struct ql_vnd_mng_host_port_resp);
2758 
2759 	bsg_reply->reply_payload_rcv_len =
2760 		sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2761 				    bsg_job->reply_payload.sg_cnt, &rsp_data,
2762 				    sizeof(struct ql_vnd_mng_host_port_resp));
2763 	bsg_reply->result = DID_OK;
2764 	bsg_job_done(bsg_job, bsg_reply->result,
2765 		     bsg_reply->reply_payload_rcv_len);
2766 
2767 	return ret;
2768 }
2769 
2770 static int
2771 qla2x00_process_vendor_specific(struct bsg_job *bsg_job)
2772 {
2773 	struct fc_bsg_request *bsg_request = bsg_job->request;
2774 
2775 	switch (bsg_request->rqst_data.h_vendor.vendor_cmd[0]) {
2776 	case QL_VND_LOOPBACK:
2777 		return qla2x00_process_loopback(bsg_job);
2778 
2779 	case QL_VND_A84_RESET:
2780 		return qla84xx_reset(bsg_job);
2781 
2782 	case QL_VND_A84_UPDATE_FW:
2783 		return qla84xx_updatefw(bsg_job);
2784 
2785 	case QL_VND_A84_MGMT_CMD:
2786 		return qla84xx_mgmt_cmd(bsg_job);
2787 
2788 	case QL_VND_IIDMA:
2789 		return qla24xx_iidma(bsg_job);
2790 
2791 	case QL_VND_FCP_PRIO_CFG_CMD:
2792 		return qla24xx_proc_fcp_prio_cfg_cmd(bsg_job);
2793 
2794 	case QL_VND_READ_FLASH:
2795 		return qla2x00_read_optrom(bsg_job);
2796 
2797 	case QL_VND_UPDATE_FLASH:
2798 		return qla2x00_update_optrom(bsg_job);
2799 
2800 	case QL_VND_SET_FRU_VERSION:
2801 		return qla2x00_update_fru_versions(bsg_job);
2802 
2803 	case QL_VND_READ_FRU_STATUS:
2804 		return qla2x00_read_fru_status(bsg_job);
2805 
2806 	case QL_VND_WRITE_FRU_STATUS:
2807 		return qla2x00_write_fru_status(bsg_job);
2808 
2809 	case QL_VND_WRITE_I2C:
2810 		return qla2x00_write_i2c(bsg_job);
2811 
2812 	case QL_VND_READ_I2C:
2813 		return qla2x00_read_i2c(bsg_job);
2814 
2815 	case QL_VND_DIAG_IO_CMD:
2816 		return qla24xx_process_bidir_cmd(bsg_job);
2817 
2818 	case QL_VND_FX00_MGMT_CMD:
2819 		return qlafx00_mgmt_cmd(bsg_job);
2820 
2821 	case QL_VND_SERDES_OP:
2822 		return qla26xx_serdes_op(bsg_job);
2823 
2824 	case QL_VND_SERDES_OP_EX:
2825 		return qla8044_serdes_op(bsg_job);
2826 
2827 	case QL_VND_GET_FLASH_UPDATE_CAPS:
2828 		return qla27xx_get_flash_upd_cap(bsg_job);
2829 
2830 	case QL_VND_SET_FLASH_UPDATE_CAPS:
2831 		return qla27xx_set_flash_upd_cap(bsg_job);
2832 
2833 	case QL_VND_GET_BBCR_DATA:
2834 		return qla27xx_get_bbcr_data(bsg_job);
2835 
2836 	case QL_VND_GET_PRIV_STATS:
2837 	case QL_VND_GET_PRIV_STATS_EX:
2838 		return qla2x00_get_priv_stats(bsg_job);
2839 
2840 	case QL_VND_DPORT_DIAGNOSTICS:
2841 		return qla2x00_do_dport_diagnostics(bsg_job);
2842 
2843 	case QL_VND_SS_GET_FLASH_IMAGE_STATUS:
2844 		return qla2x00_get_flash_image_status(bsg_job);
2845 
2846 	case QL_VND_MANAGE_HOST_STATS:
2847 		return qla2x00_manage_host_stats(bsg_job);
2848 
2849 	case QL_VND_GET_HOST_STATS:
2850 		return qla2x00_get_host_stats(bsg_job);
2851 
2852 	case QL_VND_GET_TGT_STATS:
2853 		return qla2x00_get_tgt_stats(bsg_job);
2854 
2855 	case QL_VND_MANAGE_HOST_PORT:
2856 		return qla2x00_manage_host_port(bsg_job);
2857 
2858 	default:
2859 		return -ENOSYS;
2860 	}
2861 }
2862 
2863 int
2864 qla24xx_bsg_request(struct bsg_job *bsg_job)
2865 {
2866 	struct fc_bsg_request *bsg_request = bsg_job->request;
2867 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2868 	int ret = -EINVAL;
2869 	struct fc_rport *rport;
2870 	struct Scsi_Host *host;
2871 	scsi_qla_host_t *vha;
2872 
2873 	/* In case no data transferred. */
2874 	bsg_reply->reply_payload_rcv_len = 0;
2875 
2876 	if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
2877 		rport = fc_bsg_to_rport(bsg_job);
2878 		host = rport_to_shost(rport);
2879 		vha = shost_priv(host);
2880 	} else {
2881 		host = fc_bsg_to_shost(bsg_job);
2882 		vha = shost_priv(host);
2883 	}
2884 
2885 	/* Disable port will bring down the chip, allow enable command */
2886 	if (bsg_request->rqst_data.h_vendor.vendor_cmd[0] == QL_VND_MANAGE_HOST_PORT ||
2887 	    bsg_request->rqst_data.h_vendor.vendor_cmd[0] == QL_VND_GET_HOST_STATS)
2888 		goto skip_chip_chk;
2889 
2890 	if (vha->hw->flags.port_isolated) {
2891 		bsg_reply->result = DID_ERROR;
2892 		/* operation not permitted */
2893 		return -EPERM;
2894 	}
2895 
2896 	if (qla2x00_chip_is_down(vha)) {
2897 		ql_dbg(ql_dbg_user, vha, 0x709f,
2898 		    "BSG: ISP abort active/needed -- cmd=%d.\n",
2899 		    bsg_request->msgcode);
2900 		return -EBUSY;
2901 	}
2902 
2903 skip_chip_chk:
2904 	ql_dbg(ql_dbg_user, vha, 0x7000,
2905 	    "Entered %s msgcode=0x%x.\n", __func__, bsg_request->msgcode);
2906 
2907 	switch (bsg_request->msgcode) {
2908 	case FC_BSG_RPT_ELS:
2909 	case FC_BSG_HST_ELS_NOLOGIN:
2910 		ret = qla2x00_process_els(bsg_job);
2911 		break;
2912 	case FC_BSG_HST_CT:
2913 		ret = qla2x00_process_ct(bsg_job);
2914 		break;
2915 	case FC_BSG_HST_VENDOR:
2916 		ret = qla2x00_process_vendor_specific(bsg_job);
2917 		break;
2918 	case FC_BSG_HST_ADD_RPORT:
2919 	case FC_BSG_HST_DEL_RPORT:
2920 	case FC_BSG_RPT_CT:
2921 	default:
2922 		ql_log(ql_log_warn, vha, 0x705a, "Unsupported BSG request.\n");
2923 		break;
2924 	}
2925 	return ret;
2926 }
2927 
2928 int
2929 qla24xx_bsg_timeout(struct bsg_job *bsg_job)
2930 {
2931 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2932 	scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
2933 	struct qla_hw_data *ha = vha->hw;
2934 	srb_t *sp;
2935 	int cnt, que;
2936 	unsigned long flags;
2937 	struct req_que *req;
2938 
2939 	/* find the bsg job from the active list of commands */
2940 	spin_lock_irqsave(&ha->hardware_lock, flags);
2941 	for (que = 0; que < ha->max_req_queues; que++) {
2942 		req = ha->req_q_map[que];
2943 		if (!req)
2944 			continue;
2945 
2946 		for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
2947 			sp = req->outstanding_cmds[cnt];
2948 			if (sp) {
2949 				if (((sp->type == SRB_CT_CMD) ||
2950 					(sp->type == SRB_ELS_CMD_HST) ||
2951 					(sp->type == SRB_FXIOCB_BCMD))
2952 					&& (sp->u.bsg_job == bsg_job)) {
2953 					req->outstanding_cmds[cnt] = NULL;
2954 					spin_unlock_irqrestore(&ha->hardware_lock, flags);
2955 					if (ha->isp_ops->abort_command(sp)) {
2956 						ql_log(ql_log_warn, vha, 0x7089,
2957 						    "mbx abort_command "
2958 						    "failed.\n");
2959 						bsg_reply->result = -EIO;
2960 					} else {
2961 						ql_dbg(ql_dbg_user, vha, 0x708a,
2962 						    "mbx abort_command "
2963 						    "success.\n");
2964 						bsg_reply->result = 0;
2965 					}
2966 					spin_lock_irqsave(&ha->hardware_lock, flags);
2967 					goto done;
2968 				}
2969 			}
2970 		}
2971 	}
2972 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
2973 	ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n");
2974 	bsg_reply->result = -ENXIO;
2975 	return 0;
2976 
2977 done:
2978 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
2979 	sp->free(sp);
2980 	return 0;
2981 }
2982