xref: /openbmc/linux/drivers/scsi/qla2xxx/qla_bsg.c (revision 8ec90bfd)
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2014 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8 
9 #include <linux/kthread.h>
10 #include <linux/vmalloc.h>
11 #include <linux/delay.h>
12 #include <linux/bsg-lib.h>
13 
14 static void qla2xxx_free_fcport_work(struct work_struct *work)
15 {
16 	struct fc_port *fcport = container_of(work, typeof(*fcport),
17 	    free_work);
18 
19 	qla2x00_free_fcport(fcport);
20 }
21 
22 /* BSG support for ELS/CT pass through */
23 void qla2x00_bsg_job_done(srb_t *sp, int res)
24 {
25 	struct bsg_job *bsg_job = sp->u.bsg_job;
26 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
27 
28 	bsg_reply->result = res;
29 	bsg_job_done(bsg_job, bsg_reply->result,
30 		       bsg_reply->reply_payload_rcv_len);
31 	sp->free(sp);
32 }
33 
34 void qla2x00_bsg_sp_free(srb_t *sp)
35 {
36 	struct qla_hw_data *ha = sp->vha->hw;
37 	struct bsg_job *bsg_job = sp->u.bsg_job;
38 	struct fc_bsg_request *bsg_request = bsg_job->request;
39 	struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
40 
41 	if (sp->type == SRB_FXIOCB_BCMD) {
42 		piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
43 		    &bsg_request->rqst_data.h_vendor.vendor_cmd[1];
44 
45 		if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
46 			dma_unmap_sg(&ha->pdev->dev,
47 			    bsg_job->request_payload.sg_list,
48 			    bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
49 
50 		if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID)
51 			dma_unmap_sg(&ha->pdev->dev,
52 			    bsg_job->reply_payload.sg_list,
53 			    bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
54 	} else {
55 		dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
56 		    bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
57 
58 		dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
59 		    bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
60 	}
61 
62 	if (sp->type == SRB_CT_CMD ||
63 	    sp->type == SRB_FXIOCB_BCMD ||
64 	    sp->type == SRB_ELS_CMD_HST) {
65 		INIT_WORK(&sp->fcport->free_work, qla2xxx_free_fcport_work);
66 		queue_work(ha->wq, &sp->fcport->free_work);
67 	}
68 
69 	qla2x00_rel_sp(sp);
70 }
71 
72 int
73 qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *vha,
74 	struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
75 {
76 	int i, ret, num_valid;
77 	uint8_t *bcode;
78 	struct qla_fcp_prio_entry *pri_entry;
79 	uint32_t *bcode_val_ptr, bcode_val;
80 
81 	ret = 1;
82 	num_valid = 0;
83 	bcode = (uint8_t *)pri_cfg;
84 	bcode_val_ptr = (uint32_t *)pri_cfg;
85 	bcode_val = (uint32_t)(*bcode_val_ptr);
86 
87 	if (bcode_val == 0xFFFFFFFF) {
88 		/* No FCP Priority config data in flash */
89 		ql_dbg(ql_dbg_user, vha, 0x7051,
90 		    "No FCP Priority config data.\n");
91 		return 0;
92 	}
93 
94 	if (memcmp(bcode, "HQOS", 4)) {
95 		/* Invalid FCP priority data header*/
96 		ql_dbg(ql_dbg_user, vha, 0x7052,
97 		    "Invalid FCP Priority data header. bcode=0x%x.\n",
98 		    bcode_val);
99 		return 0;
100 	}
101 	if (flag != 1)
102 		return ret;
103 
104 	pri_entry = &pri_cfg->entry[0];
105 	for (i = 0; i < pri_cfg->num_entries; i++) {
106 		if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
107 			num_valid++;
108 		pri_entry++;
109 	}
110 
111 	if (num_valid == 0) {
112 		/* No valid FCP priority data entries */
113 		ql_dbg(ql_dbg_user, vha, 0x7053,
114 		    "No valid FCP Priority data entries.\n");
115 		ret = 0;
116 	} else {
117 		/* FCP priority data is valid */
118 		ql_dbg(ql_dbg_user, vha, 0x7054,
119 		    "Valid FCP priority data. num entries = %d.\n",
120 		    num_valid);
121 	}
122 
123 	return ret;
124 }
125 
126 static int
127 qla24xx_proc_fcp_prio_cfg_cmd(struct bsg_job *bsg_job)
128 {
129 	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
130 	struct fc_bsg_request *bsg_request = bsg_job->request;
131 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
132 	scsi_qla_host_t *vha = shost_priv(host);
133 	struct qla_hw_data *ha = vha->hw;
134 	int ret = 0;
135 	uint32_t len;
136 	uint32_t oper;
137 
138 	if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_P3P_TYPE(ha))) {
139 		ret = -EINVAL;
140 		goto exit_fcp_prio_cfg;
141 	}
142 
143 	/* Get the sub command */
144 	oper = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
145 
146 	/* Only set config is allowed if config memory is not allocated */
147 	if (!ha->fcp_prio_cfg && (oper != QLFC_FCP_PRIO_SET_CONFIG)) {
148 		ret = -EINVAL;
149 		goto exit_fcp_prio_cfg;
150 	}
151 	switch (oper) {
152 	case QLFC_FCP_PRIO_DISABLE:
153 		if (ha->flags.fcp_prio_enabled) {
154 			ha->flags.fcp_prio_enabled = 0;
155 			ha->fcp_prio_cfg->attributes &=
156 				~FCP_PRIO_ATTR_ENABLE;
157 			qla24xx_update_all_fcp_prio(vha);
158 			bsg_reply->result = DID_OK;
159 		} else {
160 			ret = -EINVAL;
161 			bsg_reply->result = (DID_ERROR << 16);
162 			goto exit_fcp_prio_cfg;
163 		}
164 		break;
165 
166 	case QLFC_FCP_PRIO_ENABLE:
167 		if (!ha->flags.fcp_prio_enabled) {
168 			if (ha->fcp_prio_cfg) {
169 				ha->flags.fcp_prio_enabled = 1;
170 				ha->fcp_prio_cfg->attributes |=
171 				    FCP_PRIO_ATTR_ENABLE;
172 				qla24xx_update_all_fcp_prio(vha);
173 				bsg_reply->result = DID_OK;
174 			} else {
175 				ret = -EINVAL;
176 				bsg_reply->result = (DID_ERROR << 16);
177 				goto exit_fcp_prio_cfg;
178 			}
179 		}
180 		break;
181 
182 	case QLFC_FCP_PRIO_GET_CONFIG:
183 		len = bsg_job->reply_payload.payload_len;
184 		if (!len || len > FCP_PRIO_CFG_SIZE) {
185 			ret = -EINVAL;
186 			bsg_reply->result = (DID_ERROR << 16);
187 			goto exit_fcp_prio_cfg;
188 		}
189 
190 		bsg_reply->result = DID_OK;
191 		bsg_reply->reply_payload_rcv_len =
192 			sg_copy_from_buffer(
193 			bsg_job->reply_payload.sg_list,
194 			bsg_job->reply_payload.sg_cnt, ha->fcp_prio_cfg,
195 			len);
196 
197 		break;
198 
199 	case QLFC_FCP_PRIO_SET_CONFIG:
200 		len = bsg_job->request_payload.payload_len;
201 		if (!len || len > FCP_PRIO_CFG_SIZE) {
202 			bsg_reply->result = (DID_ERROR << 16);
203 			ret = -EINVAL;
204 			goto exit_fcp_prio_cfg;
205 		}
206 
207 		if (!ha->fcp_prio_cfg) {
208 			ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE);
209 			if (!ha->fcp_prio_cfg) {
210 				ql_log(ql_log_warn, vha, 0x7050,
211 				    "Unable to allocate memory for fcp prio "
212 				    "config data (%x).\n", FCP_PRIO_CFG_SIZE);
213 				bsg_reply->result = (DID_ERROR << 16);
214 				ret = -ENOMEM;
215 				goto exit_fcp_prio_cfg;
216 			}
217 		}
218 
219 		memset(ha->fcp_prio_cfg, 0, FCP_PRIO_CFG_SIZE);
220 		sg_copy_to_buffer(bsg_job->request_payload.sg_list,
221 		bsg_job->request_payload.sg_cnt, ha->fcp_prio_cfg,
222 			FCP_PRIO_CFG_SIZE);
223 
224 		/* validate fcp priority data */
225 
226 		if (!qla24xx_fcp_prio_cfg_valid(vha, ha->fcp_prio_cfg, 1)) {
227 			bsg_reply->result = (DID_ERROR << 16);
228 			ret = -EINVAL;
229 			/* If buffer was invalidatic int
230 			 * fcp_prio_cfg is of no use
231 			 */
232 			vfree(ha->fcp_prio_cfg);
233 			ha->fcp_prio_cfg = NULL;
234 			goto exit_fcp_prio_cfg;
235 		}
236 
237 		ha->flags.fcp_prio_enabled = 0;
238 		if (ha->fcp_prio_cfg->attributes & FCP_PRIO_ATTR_ENABLE)
239 			ha->flags.fcp_prio_enabled = 1;
240 		qla24xx_update_all_fcp_prio(vha);
241 		bsg_reply->result = DID_OK;
242 		break;
243 	default:
244 		ret = -EINVAL;
245 		break;
246 	}
247 exit_fcp_prio_cfg:
248 	if (!ret)
249 		bsg_job_done(bsg_job, bsg_reply->result,
250 			       bsg_reply->reply_payload_rcv_len);
251 	return ret;
252 }
253 
254 static int
255 qla2x00_process_els(struct bsg_job *bsg_job)
256 {
257 	struct fc_bsg_request *bsg_request = bsg_job->request;
258 	struct fc_rport *rport;
259 	fc_port_t *fcport = NULL;
260 	struct Scsi_Host *host;
261 	scsi_qla_host_t *vha;
262 	struct qla_hw_data *ha;
263 	srb_t *sp;
264 	const char *type;
265 	int req_sg_cnt, rsp_sg_cnt;
266 	int rval =  (DID_ERROR << 16);
267 	uint16_t nextlid = 0;
268 
269 	if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
270 		rport = fc_bsg_to_rport(bsg_job);
271 		fcport = *(fc_port_t **) rport->dd_data;
272 		host = rport_to_shost(rport);
273 		vha = shost_priv(host);
274 		ha = vha->hw;
275 		type = "FC_BSG_RPT_ELS";
276 	} else {
277 		host = fc_bsg_to_shost(bsg_job);
278 		vha = shost_priv(host);
279 		ha = vha->hw;
280 		type = "FC_BSG_HST_ELS_NOLOGIN";
281 	}
282 
283 	if (!vha->flags.online) {
284 		ql_log(ql_log_warn, vha, 0x7005, "Host not online.\n");
285 		rval = -EIO;
286 		goto done;
287 	}
288 
289 	/* pass through is supported only for ISP 4Gb or higher */
290 	if (!IS_FWI2_CAPABLE(ha)) {
291 		ql_dbg(ql_dbg_user, vha, 0x7001,
292 		    "ELS passthru not supported for ISP23xx based adapters.\n");
293 		rval = -EPERM;
294 		goto done;
295 	}
296 
297 	/*  Multiple SG's are not supported for ELS requests */
298 	if (bsg_job->request_payload.sg_cnt > 1 ||
299 		bsg_job->reply_payload.sg_cnt > 1) {
300 		ql_dbg(ql_dbg_user, vha, 0x7002,
301 		    "Multiple SG's are not supported for ELS requests, "
302 		    "request_sg_cnt=%x reply_sg_cnt=%x.\n",
303 		    bsg_job->request_payload.sg_cnt,
304 		    bsg_job->reply_payload.sg_cnt);
305 		rval = -EPERM;
306 		goto done;
307 	}
308 
309 	/* ELS request for rport */
310 	if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
311 		/* make sure the rport is logged in,
312 		 * if not perform fabric login
313 		 */
314 		if (qla2x00_fabric_login(vha, fcport, &nextlid)) {
315 			ql_dbg(ql_dbg_user, vha, 0x7003,
316 			    "Failed to login port %06X for ELS passthru.\n",
317 			    fcport->d_id.b24);
318 			rval = -EIO;
319 			goto done;
320 		}
321 	} else {
322 		/* Allocate a dummy fcport structure, since functions
323 		 * preparing the IOCB and mailbox command retrieves port
324 		 * specific information from fcport structure. For Host based
325 		 * ELS commands there will be no fcport structure allocated
326 		 */
327 		fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
328 		if (!fcport) {
329 			rval = -ENOMEM;
330 			goto done;
331 		}
332 
333 		/* Initialize all required  fields of fcport */
334 		fcport->vha = vha;
335 		fcport->d_id.b.al_pa =
336 			bsg_request->rqst_data.h_els.port_id[0];
337 		fcport->d_id.b.area =
338 			bsg_request->rqst_data.h_els.port_id[1];
339 		fcport->d_id.b.domain =
340 			bsg_request->rqst_data.h_els.port_id[2];
341 		fcport->loop_id =
342 			(fcport->d_id.b.al_pa == 0xFD) ?
343 			NPH_FABRIC_CONTROLLER : NPH_F_PORT;
344 	}
345 
346 	req_sg_cnt =
347 		dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
348 		bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
349 	if (!req_sg_cnt) {
350 		dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
351 		    bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
352 		rval = -ENOMEM;
353 		goto done_free_fcport;
354 	}
355 
356 	rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
357 		bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
358         if (!rsp_sg_cnt) {
359 		dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
360 		    bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
361 		rval = -ENOMEM;
362 		goto done_free_fcport;
363 	}
364 
365 	if ((req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
366 		(rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
367 		ql_log(ql_log_warn, vha, 0x7008,
368 		    "dma mapping resulted in different sg counts, "
369 		    "request_sg_cnt: %x dma_request_sg_cnt:%x reply_sg_cnt:%x "
370 		    "dma_reply_sg_cnt:%x.\n", bsg_job->request_payload.sg_cnt,
371 		    req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
372 		rval = -EAGAIN;
373 		goto done_unmap_sg;
374 	}
375 
376 	/* Alloc SRB structure */
377 	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
378 	if (!sp) {
379 		rval = -ENOMEM;
380 		goto done_unmap_sg;
381 	}
382 
383 	sp->type =
384 		(bsg_request->msgcode == FC_BSG_RPT_ELS ?
385 		 SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
386 	sp->name =
387 		(bsg_request->msgcode == FC_BSG_RPT_ELS ?
388 		 "bsg_els_rpt" : "bsg_els_hst");
389 	sp->u.bsg_job = bsg_job;
390 	sp->free = qla2x00_bsg_sp_free;
391 	sp->done = qla2x00_bsg_job_done;
392 
393 	ql_dbg(ql_dbg_user, vha, 0x700a,
394 	    "bsg rqst type: %s els type: %x - loop-id=%x "
395 	    "portid=%-2x%02x%02x.\n", type,
396 	    bsg_request->rqst_data.h_els.command_code, fcport->loop_id,
397 	    fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
398 
399 	rval = qla2x00_start_sp(sp);
400 	if (rval != QLA_SUCCESS) {
401 		ql_log(ql_log_warn, vha, 0x700e,
402 		    "qla2x00_start_sp failed = %d\n", rval);
403 		qla2x00_rel_sp(sp);
404 		rval = -EIO;
405 		goto done_unmap_sg;
406 	}
407 	return rval;
408 
409 done_unmap_sg:
410 	dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
411 		bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
412 	dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
413 		bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
414 	goto done_free_fcport;
415 
416 done_free_fcport:
417 	if (bsg_request->msgcode == FC_BSG_RPT_ELS)
418 		qla2x00_free_fcport(fcport);
419 done:
420 	return rval;
421 }
422 
423 static inline uint16_t
424 qla24xx_calc_ct_iocbs(uint16_t dsds)
425 {
426 	uint16_t iocbs;
427 
428 	iocbs = 1;
429 	if (dsds > 2) {
430 		iocbs += (dsds - 2) / 5;
431 		if ((dsds - 2) % 5)
432 			iocbs++;
433 	}
434 	return iocbs;
435 }
436 
437 static int
438 qla2x00_process_ct(struct bsg_job *bsg_job)
439 {
440 	srb_t *sp;
441 	struct fc_bsg_request *bsg_request = bsg_job->request;
442 	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
443 	scsi_qla_host_t *vha = shost_priv(host);
444 	struct qla_hw_data *ha = vha->hw;
445 	int rval = (DID_ERROR << 16);
446 	int req_sg_cnt, rsp_sg_cnt;
447 	uint16_t loop_id;
448 	struct fc_port *fcport;
449 	char  *type = "FC_BSG_HST_CT";
450 
451 	req_sg_cnt =
452 		dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
453 			bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
454 	if (!req_sg_cnt) {
455 		ql_log(ql_log_warn, vha, 0x700f,
456 		    "dma_map_sg return %d for request\n", req_sg_cnt);
457 		rval = -ENOMEM;
458 		goto done;
459 	}
460 
461 	rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
462 		bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
463 	if (!rsp_sg_cnt) {
464 		ql_log(ql_log_warn, vha, 0x7010,
465 		    "dma_map_sg return %d for reply\n", rsp_sg_cnt);
466 		rval = -ENOMEM;
467 		goto done;
468 	}
469 
470 	if ((req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
471 	    (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
472 		ql_log(ql_log_warn, vha, 0x7011,
473 		    "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
474 		    "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
475 		    req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
476 		rval = -EAGAIN;
477 		goto done_unmap_sg;
478 	}
479 
480 	if (!vha->flags.online) {
481 		ql_log(ql_log_warn, vha, 0x7012,
482 		    "Host is not online.\n");
483 		rval = -EIO;
484 		goto done_unmap_sg;
485 	}
486 
487 	loop_id =
488 		(bsg_request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
489 			>> 24;
490 	switch (loop_id) {
491 	case 0xFC:
492 		loop_id = NPH_SNS;
493 		break;
494 	case 0xFA:
495 		loop_id = vha->mgmt_svr_loop_id;
496 		break;
497 	default:
498 		ql_dbg(ql_dbg_user, vha, 0x7013,
499 		    "Unknown loop id: %x.\n", loop_id);
500 		rval = -EINVAL;
501 		goto done_unmap_sg;
502 	}
503 
504 	/* Allocate a dummy fcport structure, since functions preparing the
505 	 * IOCB and mailbox command retrieves port specific information
506 	 * from fcport structure. For Host based ELS commands there will be
507 	 * no fcport structure allocated
508 	 */
509 	fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
510 	if (!fcport) {
511 		ql_log(ql_log_warn, vha, 0x7014,
512 		    "Failed to allocate fcport.\n");
513 		rval = -ENOMEM;
514 		goto done_unmap_sg;
515 	}
516 
517 	/* Initialize all required  fields of fcport */
518 	fcport->vha = vha;
519 	fcport->d_id.b.al_pa = bsg_request->rqst_data.h_ct.port_id[0];
520 	fcport->d_id.b.area = bsg_request->rqst_data.h_ct.port_id[1];
521 	fcport->d_id.b.domain = bsg_request->rqst_data.h_ct.port_id[2];
522 	fcport->loop_id = loop_id;
523 
524 	/* Alloc SRB structure */
525 	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
526 	if (!sp) {
527 		ql_log(ql_log_warn, vha, 0x7015,
528 		    "qla2x00_get_sp failed.\n");
529 		rval = -ENOMEM;
530 		goto done_free_fcport;
531 	}
532 
533 	sp->type = SRB_CT_CMD;
534 	sp->name = "bsg_ct";
535 	sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
536 	sp->u.bsg_job = bsg_job;
537 	sp->free = qla2x00_bsg_sp_free;
538 	sp->done = qla2x00_bsg_job_done;
539 
540 	ql_dbg(ql_dbg_user, vha, 0x7016,
541 	    "bsg rqst type: %s else type: %x - "
542 	    "loop-id=%x portid=%02x%02x%02x.\n", type,
543 	    (bsg_request->rqst_data.h_ct.preamble_word2 >> 16),
544 	    fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
545 	    fcport->d_id.b.al_pa);
546 
547 	rval = qla2x00_start_sp(sp);
548 	if (rval != QLA_SUCCESS) {
549 		ql_log(ql_log_warn, vha, 0x7017,
550 		    "qla2x00_start_sp failed=%d.\n", rval);
551 		qla2x00_rel_sp(sp);
552 		rval = -EIO;
553 		goto done_free_fcport;
554 	}
555 	return rval;
556 
557 done_free_fcport:
558 	qla2x00_free_fcport(fcport);
559 done_unmap_sg:
560 	dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
561 		bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
562 	dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
563 		bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
564 done:
565 	return rval;
566 }
567 
568 /* Disable loopback mode */
569 static inline int
570 qla81xx_reset_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
571 			    int wait, int wait2)
572 {
573 	int ret = 0;
574 	int rval = 0;
575 	uint16_t new_config[4];
576 	struct qla_hw_data *ha = vha->hw;
577 
578 	if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
579 		goto done_reset_internal;
580 
581 	memset(new_config, 0 , sizeof(new_config));
582 	if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
583 	    ENABLE_INTERNAL_LOOPBACK ||
584 	    (config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
585 	    ENABLE_EXTERNAL_LOOPBACK) {
586 		new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK;
587 		ql_dbg(ql_dbg_user, vha, 0x70bf, "new_config[0]=%02x\n",
588 		    (new_config[0] & INTERNAL_LOOPBACK_MASK));
589 		memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ;
590 
591 		ha->notify_dcbx_comp = wait;
592 		ha->notify_lb_portup_comp = wait2;
593 
594 		ret = qla81xx_set_port_config(vha, new_config);
595 		if (ret != QLA_SUCCESS) {
596 			ql_log(ql_log_warn, vha, 0x7025,
597 			    "Set port config failed.\n");
598 			ha->notify_dcbx_comp = 0;
599 			ha->notify_lb_portup_comp = 0;
600 			rval = -EINVAL;
601 			goto done_reset_internal;
602 		}
603 
604 		/* Wait for DCBX complete event */
605 		if (wait && !wait_for_completion_timeout(&ha->dcbx_comp,
606 			(DCBX_COMP_TIMEOUT * HZ))) {
607 			ql_dbg(ql_dbg_user, vha, 0x7026,
608 			    "DCBX completion not received.\n");
609 			ha->notify_dcbx_comp = 0;
610 			ha->notify_lb_portup_comp = 0;
611 			rval = -EINVAL;
612 			goto done_reset_internal;
613 		} else
614 			ql_dbg(ql_dbg_user, vha, 0x7027,
615 			    "DCBX completion received.\n");
616 
617 		if (wait2 &&
618 		    !wait_for_completion_timeout(&ha->lb_portup_comp,
619 		    (LB_PORTUP_COMP_TIMEOUT * HZ))) {
620 			ql_dbg(ql_dbg_user, vha, 0x70c5,
621 			    "Port up completion not received.\n");
622 			ha->notify_lb_portup_comp = 0;
623 			rval = -EINVAL;
624 			goto done_reset_internal;
625 		} else
626 			ql_dbg(ql_dbg_user, vha, 0x70c6,
627 			    "Port up completion received.\n");
628 
629 		ha->notify_dcbx_comp = 0;
630 		ha->notify_lb_portup_comp = 0;
631 	}
632 done_reset_internal:
633 	return rval;
634 }
635 
636 /*
637  * Set the port configuration to enable the internal or external loopback
638  * depending on the loopback mode.
639  */
640 static inline int
641 qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
642 	uint16_t *new_config, uint16_t mode)
643 {
644 	int ret = 0;
645 	int rval = 0;
646 	unsigned long rem_tmo = 0, current_tmo = 0;
647 	struct qla_hw_data *ha = vha->hw;
648 
649 	if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
650 		goto done_set_internal;
651 
652 	if (mode == INTERNAL_LOOPBACK)
653 		new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1);
654 	else if (mode == EXTERNAL_LOOPBACK)
655 		new_config[0] = config[0] | (ENABLE_EXTERNAL_LOOPBACK << 1);
656 	ql_dbg(ql_dbg_user, vha, 0x70be,
657 	     "new_config[0]=%02x\n", (new_config[0] & INTERNAL_LOOPBACK_MASK));
658 
659 	memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3);
660 
661 	ha->notify_dcbx_comp = 1;
662 	ret = qla81xx_set_port_config(vha, new_config);
663 	if (ret != QLA_SUCCESS) {
664 		ql_log(ql_log_warn, vha, 0x7021,
665 		    "set port config failed.\n");
666 		ha->notify_dcbx_comp = 0;
667 		rval = -EINVAL;
668 		goto done_set_internal;
669 	}
670 
671 	/* Wait for DCBX complete event */
672 	current_tmo = DCBX_COMP_TIMEOUT * HZ;
673 	while (1) {
674 		rem_tmo = wait_for_completion_timeout(&ha->dcbx_comp,
675 		    current_tmo);
676 		if (!ha->idc_extend_tmo || rem_tmo) {
677 			ha->idc_extend_tmo = 0;
678 			break;
679 		}
680 		current_tmo = ha->idc_extend_tmo * HZ;
681 		ha->idc_extend_tmo = 0;
682 	}
683 
684 	if (!rem_tmo) {
685 		ql_dbg(ql_dbg_user, vha, 0x7022,
686 		    "DCBX completion not received.\n");
687 		ret = qla81xx_reset_loopback_mode(vha, new_config, 0, 0);
688 		/*
689 		 * If the reset of the loopback mode doesn't work take a FCoE
690 		 * dump and reset the chip.
691 		 */
692 		if (ret) {
693 			qla2xxx_dump_fw(vha);
694 			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
695 		}
696 		rval = -EINVAL;
697 	} else {
698 		if (ha->flags.idc_compl_status) {
699 			ql_dbg(ql_dbg_user, vha, 0x70c3,
700 			    "Bad status in IDC Completion AEN\n");
701 			rval = -EINVAL;
702 			ha->flags.idc_compl_status = 0;
703 		} else
704 			ql_dbg(ql_dbg_user, vha, 0x7023,
705 			    "DCBX completion received.\n");
706 	}
707 
708 	ha->notify_dcbx_comp = 0;
709 	ha->idc_extend_tmo = 0;
710 
711 done_set_internal:
712 	return rval;
713 }
714 
715 static int
716 qla2x00_process_loopback(struct bsg_job *bsg_job)
717 {
718 	struct fc_bsg_request *bsg_request = bsg_job->request;
719 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
720 	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
721 	scsi_qla_host_t *vha = shost_priv(host);
722 	struct qla_hw_data *ha = vha->hw;
723 	int rval;
724 	uint8_t command_sent;
725 	char *type;
726 	struct msg_echo_lb elreq;
727 	uint16_t response[MAILBOX_REGISTER_COUNT];
728 	uint16_t config[4], new_config[4];
729 	uint8_t *fw_sts_ptr;
730 	void *req_data = NULL;
731 	dma_addr_t req_data_dma;
732 	uint32_t req_data_len;
733 	uint8_t *rsp_data = NULL;
734 	dma_addr_t rsp_data_dma;
735 	uint32_t rsp_data_len;
736 
737 	if (!vha->flags.online) {
738 		ql_log(ql_log_warn, vha, 0x7019, "Host is not online.\n");
739 		return -EIO;
740 	}
741 
742 	memset(&elreq, 0, sizeof(elreq));
743 
744 	elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev,
745 		bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt,
746 		DMA_TO_DEVICE);
747 
748 	if (!elreq.req_sg_cnt) {
749 		ql_log(ql_log_warn, vha, 0x701a,
750 		    "dma_map_sg returned %d for request.\n", elreq.req_sg_cnt);
751 		return -ENOMEM;
752 	}
753 
754 	elreq.rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
755 		bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
756 		DMA_FROM_DEVICE);
757 
758 	if (!elreq.rsp_sg_cnt) {
759 		ql_log(ql_log_warn, vha, 0x701b,
760 		    "dma_map_sg returned %d for reply.\n", elreq.rsp_sg_cnt);
761 		rval = -ENOMEM;
762 		goto done_unmap_req_sg;
763 	}
764 
765 	if ((elreq.req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
766 		(elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
767 		ql_log(ql_log_warn, vha, 0x701c,
768 		    "dma mapping resulted in different sg counts, "
769 		    "request_sg_cnt: %x dma_request_sg_cnt: %x "
770 		    "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
771 		    bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
772 		    bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt);
773 		rval = -EAGAIN;
774 		goto done_unmap_sg;
775 	}
776 	req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
777 	req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
778 		&req_data_dma, GFP_KERNEL);
779 	if (!req_data) {
780 		ql_log(ql_log_warn, vha, 0x701d,
781 		    "dma alloc failed for req_data.\n");
782 		rval = -ENOMEM;
783 		goto done_unmap_sg;
784 	}
785 
786 	rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
787 		&rsp_data_dma, GFP_KERNEL);
788 	if (!rsp_data) {
789 		ql_log(ql_log_warn, vha, 0x7004,
790 		    "dma alloc failed for rsp_data.\n");
791 		rval = -ENOMEM;
792 		goto done_free_dma_req;
793 	}
794 
795 	/* Copy the request buffer in req_data now */
796 	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
797 		bsg_job->request_payload.sg_cnt, req_data, req_data_len);
798 
799 	elreq.send_dma = req_data_dma;
800 	elreq.rcv_dma = rsp_data_dma;
801 	elreq.transfer_size = req_data_len;
802 
803 	elreq.options = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
804 	elreq.iteration_count =
805 	    bsg_request->rqst_data.h_vendor.vendor_cmd[2];
806 
807 	if (atomic_read(&vha->loop_state) == LOOP_READY &&
808 	    ((ha->current_topology == ISP_CFG_F && (elreq.options & 7) >= 2) ||
809 	    ((IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) &&
810 	    get_unaligned_le32(req_data) == ELS_OPCODE_BYTE &&
811 	    req_data_len == MAX_ELS_FRAME_PAYLOAD &&
812 	    elreq.options == EXTERNAL_LOOPBACK))) {
813 		type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
814 		ql_dbg(ql_dbg_user, vha, 0x701e,
815 		    "BSG request type: %s.\n", type);
816 		command_sent = INT_DEF_LB_ECHO_CMD;
817 		rval = qla2x00_echo_test(vha, &elreq, response);
818 	} else {
819 		if (IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) {
820 			memset(config, 0, sizeof(config));
821 			memset(new_config, 0, sizeof(new_config));
822 
823 			if (qla81xx_get_port_config(vha, config)) {
824 				ql_log(ql_log_warn, vha, 0x701f,
825 				    "Get port config failed.\n");
826 				rval = -EPERM;
827 				goto done_free_dma_rsp;
828 			}
829 
830 			if ((config[0] & INTERNAL_LOOPBACK_MASK) != 0) {
831 				ql_dbg(ql_dbg_user, vha, 0x70c4,
832 				    "Loopback operation already in "
833 				    "progress.\n");
834 				rval = -EAGAIN;
835 				goto done_free_dma_rsp;
836 			}
837 
838 			ql_dbg(ql_dbg_user, vha, 0x70c0,
839 			    "elreq.options=%04x\n", elreq.options);
840 
841 			if (elreq.options == EXTERNAL_LOOPBACK)
842 				if (IS_QLA8031(ha) || IS_QLA8044(ha))
843 					rval = qla81xx_set_loopback_mode(vha,
844 					    config, new_config, elreq.options);
845 				else
846 					rval = qla81xx_reset_loopback_mode(vha,
847 					    config, 1, 0);
848 			else
849 				rval = qla81xx_set_loopback_mode(vha, config,
850 				    new_config, elreq.options);
851 
852 			if (rval) {
853 				rval = -EPERM;
854 				goto done_free_dma_rsp;
855 			}
856 
857 			type = "FC_BSG_HST_VENDOR_LOOPBACK";
858 			ql_dbg(ql_dbg_user, vha, 0x7028,
859 			    "BSG request type: %s.\n", type);
860 
861 			command_sent = INT_DEF_LB_LOOPBACK_CMD;
862 			rval = qla2x00_loopback_test(vha, &elreq, response);
863 
864 			if (response[0] == MBS_COMMAND_ERROR &&
865 					response[1] == MBS_LB_RESET) {
866 				ql_log(ql_log_warn, vha, 0x7029,
867 				    "MBX command error, Aborting ISP.\n");
868 				set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
869 				qla2xxx_wake_dpc(vha);
870 				qla2x00_wait_for_chip_reset(vha);
871 				/* Also reset the MPI */
872 				if (IS_QLA81XX(ha)) {
873 					if (qla81xx_restart_mpi_firmware(vha) !=
874 					    QLA_SUCCESS) {
875 						ql_log(ql_log_warn, vha, 0x702a,
876 						    "MPI reset failed.\n");
877 					}
878 				}
879 
880 				rval = -EIO;
881 				goto done_free_dma_rsp;
882 			}
883 
884 			if (new_config[0]) {
885 				int ret;
886 
887 				/* Revert back to original port config
888 				 * Also clear internal loopback
889 				 */
890 				ret = qla81xx_reset_loopback_mode(vha,
891 				    new_config, 0, 1);
892 				if (ret) {
893 					/*
894 					 * If the reset of the loopback mode
895 					 * doesn't work take FCoE dump and then
896 					 * reset the chip.
897 					 */
898 					qla2xxx_dump_fw(vha);
899 					set_bit(ISP_ABORT_NEEDED,
900 					    &vha->dpc_flags);
901 				}
902 
903 			}
904 
905 		} else {
906 			type = "FC_BSG_HST_VENDOR_LOOPBACK";
907 			ql_dbg(ql_dbg_user, vha, 0x702b,
908 			    "BSG request type: %s.\n", type);
909 			command_sent = INT_DEF_LB_LOOPBACK_CMD;
910 			rval = qla2x00_loopback_test(vha, &elreq, response);
911 		}
912 	}
913 
914 	if (rval) {
915 		ql_log(ql_log_warn, vha, 0x702c,
916 		    "Vendor request %s failed.\n", type);
917 
918 		rval = 0;
919 		bsg_reply->result = (DID_ERROR << 16);
920 		bsg_reply->reply_payload_rcv_len = 0;
921 	} else {
922 		ql_dbg(ql_dbg_user, vha, 0x702d,
923 		    "Vendor request %s completed.\n", type);
924 		bsg_reply->result = (DID_OK << 16);
925 		sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
926 			bsg_job->reply_payload.sg_cnt, rsp_data,
927 			rsp_data_len);
928 	}
929 
930 	bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
931 	    sizeof(response) + sizeof(uint8_t);
932 	fw_sts_ptr = bsg_job->reply + sizeof(struct fc_bsg_reply);
933 	memcpy(bsg_job->reply + sizeof(struct fc_bsg_reply), response,
934 			sizeof(response));
935 	fw_sts_ptr += sizeof(response);
936 	*fw_sts_ptr = command_sent;
937 
938 done_free_dma_rsp:
939 	dma_free_coherent(&ha->pdev->dev, rsp_data_len,
940 		rsp_data, rsp_data_dma);
941 done_free_dma_req:
942 	dma_free_coherent(&ha->pdev->dev, req_data_len,
943 		req_data, req_data_dma);
944 done_unmap_sg:
945 	dma_unmap_sg(&ha->pdev->dev,
946 	    bsg_job->reply_payload.sg_list,
947 	    bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
948 done_unmap_req_sg:
949 	dma_unmap_sg(&ha->pdev->dev,
950 	    bsg_job->request_payload.sg_list,
951 	    bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
952 	if (!rval)
953 		bsg_job_done(bsg_job, bsg_reply->result,
954 			       bsg_reply->reply_payload_rcv_len);
955 	return rval;
956 }
957 
958 static int
959 qla84xx_reset(struct bsg_job *bsg_job)
960 {
961 	struct fc_bsg_request *bsg_request = bsg_job->request;
962 	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
963 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
964 	scsi_qla_host_t *vha = shost_priv(host);
965 	struct qla_hw_data *ha = vha->hw;
966 	int rval = 0;
967 	uint32_t flag;
968 
969 	if (!IS_QLA84XX(ha)) {
970 		ql_dbg(ql_dbg_user, vha, 0x702f, "Not 84xx, exiting.\n");
971 		return -EINVAL;
972 	}
973 
974 	flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
975 
976 	rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW);
977 
978 	if (rval) {
979 		ql_log(ql_log_warn, vha, 0x7030,
980 		    "Vendor request 84xx reset failed.\n");
981 		rval = (DID_ERROR << 16);
982 
983 	} else {
984 		ql_dbg(ql_dbg_user, vha, 0x7031,
985 		    "Vendor request 84xx reset completed.\n");
986 		bsg_reply->result = DID_OK;
987 		bsg_job_done(bsg_job, bsg_reply->result,
988 			       bsg_reply->reply_payload_rcv_len);
989 	}
990 
991 	return rval;
992 }
993 
994 static int
995 qla84xx_updatefw(struct bsg_job *bsg_job)
996 {
997 	struct fc_bsg_request *bsg_request = bsg_job->request;
998 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
999 	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1000 	scsi_qla_host_t *vha = shost_priv(host);
1001 	struct qla_hw_data *ha = vha->hw;
1002 	struct verify_chip_entry_84xx *mn = NULL;
1003 	dma_addr_t mn_dma, fw_dma;
1004 	void *fw_buf = NULL;
1005 	int rval = 0;
1006 	uint32_t sg_cnt;
1007 	uint32_t data_len;
1008 	uint16_t options;
1009 	uint32_t flag;
1010 	uint32_t fw_ver;
1011 
1012 	if (!IS_QLA84XX(ha)) {
1013 		ql_dbg(ql_dbg_user, vha, 0x7032,
1014 		    "Not 84xx, exiting.\n");
1015 		return -EINVAL;
1016 	}
1017 
1018 	sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1019 		bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1020 	if (!sg_cnt) {
1021 		ql_log(ql_log_warn, vha, 0x7033,
1022 		    "dma_map_sg returned %d for request.\n", sg_cnt);
1023 		return -ENOMEM;
1024 	}
1025 
1026 	if (sg_cnt != bsg_job->request_payload.sg_cnt) {
1027 		ql_log(ql_log_warn, vha, 0x7034,
1028 		    "DMA mapping resulted in different sg counts, "
1029 		    "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
1030 		    bsg_job->request_payload.sg_cnt, sg_cnt);
1031 		rval = -EAGAIN;
1032 		goto done_unmap_sg;
1033 	}
1034 
1035 	data_len = bsg_job->request_payload.payload_len;
1036 	fw_buf = dma_alloc_coherent(&ha->pdev->dev, data_len,
1037 		&fw_dma, GFP_KERNEL);
1038 	if (!fw_buf) {
1039 		ql_log(ql_log_warn, vha, 0x7035,
1040 		    "DMA alloc failed for fw_buf.\n");
1041 		rval = -ENOMEM;
1042 		goto done_unmap_sg;
1043 	}
1044 
1045 	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1046 		bsg_job->request_payload.sg_cnt, fw_buf, data_len);
1047 
1048 	mn = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
1049 	if (!mn) {
1050 		ql_log(ql_log_warn, vha, 0x7036,
1051 		    "DMA alloc failed for fw buffer.\n");
1052 		rval = -ENOMEM;
1053 		goto done_free_fw_buf;
1054 	}
1055 
1056 	flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
1057 	fw_ver = get_unaligned_le32((uint32_t *)fw_buf + 2);
1058 
1059 	mn->entry_type = VERIFY_CHIP_IOCB_TYPE;
1060 	mn->entry_count = 1;
1061 
1062 	options = VCO_FORCE_UPDATE | VCO_END_OF_DATA;
1063 	if (flag == A84_ISSUE_UPDATE_DIAGFW_CMD)
1064 		options |= VCO_DIAG_FW;
1065 
1066 	mn->options = cpu_to_le16(options);
1067 	mn->fw_ver =  cpu_to_le32(fw_ver);
1068 	mn->fw_size =  cpu_to_le32(data_len);
1069 	mn->fw_seq_size =  cpu_to_le32(data_len);
1070 	put_unaligned_le64(fw_dma, &mn->dsd.address);
1071 	mn->dsd.length = cpu_to_le32(data_len);
1072 	mn->data_seg_cnt = cpu_to_le16(1);
1073 
1074 	rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
1075 
1076 	if (rval) {
1077 		ql_log(ql_log_warn, vha, 0x7037,
1078 		    "Vendor request 84xx updatefw failed.\n");
1079 
1080 		rval = (DID_ERROR << 16);
1081 	} else {
1082 		ql_dbg(ql_dbg_user, vha, 0x7038,
1083 		    "Vendor request 84xx updatefw completed.\n");
1084 
1085 		bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1086 		bsg_reply->result = DID_OK;
1087 	}
1088 
1089 	dma_pool_free(ha->s_dma_pool, mn, mn_dma);
1090 
1091 done_free_fw_buf:
1092 	dma_free_coherent(&ha->pdev->dev, data_len, fw_buf, fw_dma);
1093 
1094 done_unmap_sg:
1095 	dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1096 		bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1097 
1098 	if (!rval)
1099 		bsg_job_done(bsg_job, bsg_reply->result,
1100 			       bsg_reply->reply_payload_rcv_len);
1101 	return rval;
1102 }
1103 
1104 static int
1105 qla84xx_mgmt_cmd(struct bsg_job *bsg_job)
1106 {
1107 	struct fc_bsg_request *bsg_request = bsg_job->request;
1108 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1109 	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1110 	scsi_qla_host_t *vha = shost_priv(host);
1111 	struct qla_hw_data *ha = vha->hw;
1112 	struct access_chip_84xx *mn = NULL;
1113 	dma_addr_t mn_dma, mgmt_dma;
1114 	void *mgmt_b = NULL;
1115 	int rval = 0;
1116 	struct qla_bsg_a84_mgmt *ql84_mgmt;
1117 	uint32_t sg_cnt;
1118 	uint32_t data_len = 0;
1119 	uint32_t dma_direction = DMA_NONE;
1120 
1121 	if (!IS_QLA84XX(ha)) {
1122 		ql_log(ql_log_warn, vha, 0x703a,
1123 		    "Not 84xx, exiting.\n");
1124 		return -EINVAL;
1125 	}
1126 
1127 	mn = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
1128 	if (!mn) {
1129 		ql_log(ql_log_warn, vha, 0x703c,
1130 		    "DMA alloc failed for fw buffer.\n");
1131 		return -ENOMEM;
1132 	}
1133 
1134 	mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
1135 	mn->entry_count = 1;
1136 	ql84_mgmt = (void *)bsg_request + sizeof(struct fc_bsg_request);
1137 	switch (ql84_mgmt->mgmt.cmd) {
1138 	case QLA84_MGMT_READ_MEM:
1139 	case QLA84_MGMT_GET_INFO:
1140 		sg_cnt = dma_map_sg(&ha->pdev->dev,
1141 			bsg_job->reply_payload.sg_list,
1142 			bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1143 		if (!sg_cnt) {
1144 			ql_log(ql_log_warn, vha, 0x703d,
1145 			    "dma_map_sg returned %d for reply.\n", sg_cnt);
1146 			rval = -ENOMEM;
1147 			goto exit_mgmt;
1148 		}
1149 
1150 		dma_direction = DMA_FROM_DEVICE;
1151 
1152 		if (sg_cnt != bsg_job->reply_payload.sg_cnt) {
1153 			ql_log(ql_log_warn, vha, 0x703e,
1154 			    "DMA mapping resulted in different sg counts, "
1155 			    "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
1156 			    bsg_job->reply_payload.sg_cnt, sg_cnt);
1157 			rval = -EAGAIN;
1158 			goto done_unmap_sg;
1159 		}
1160 
1161 		data_len = bsg_job->reply_payload.payload_len;
1162 
1163 		mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1164 		    &mgmt_dma, GFP_KERNEL);
1165 		if (!mgmt_b) {
1166 			ql_log(ql_log_warn, vha, 0x703f,
1167 			    "DMA alloc failed for mgmt_b.\n");
1168 			rval = -ENOMEM;
1169 			goto done_unmap_sg;
1170 		}
1171 
1172 		if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) {
1173 			mn->options = cpu_to_le16(ACO_DUMP_MEMORY);
1174 			mn->parameter1 =
1175 				cpu_to_le32(
1176 				ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
1177 
1178 		} else if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO) {
1179 			mn->options = cpu_to_le16(ACO_REQUEST_INFO);
1180 			mn->parameter1 =
1181 				cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.info.type);
1182 
1183 			mn->parameter2 =
1184 				cpu_to_le32(
1185 				ql84_mgmt->mgmt.mgmtp.u.info.context);
1186 		}
1187 		break;
1188 
1189 	case QLA84_MGMT_WRITE_MEM:
1190 		sg_cnt = dma_map_sg(&ha->pdev->dev,
1191 			bsg_job->request_payload.sg_list,
1192 			bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1193 
1194 		if (!sg_cnt) {
1195 			ql_log(ql_log_warn, vha, 0x7040,
1196 			    "dma_map_sg returned %d.\n", sg_cnt);
1197 			rval = -ENOMEM;
1198 			goto exit_mgmt;
1199 		}
1200 
1201 		dma_direction = DMA_TO_DEVICE;
1202 
1203 		if (sg_cnt != bsg_job->request_payload.sg_cnt) {
1204 			ql_log(ql_log_warn, vha, 0x7041,
1205 			    "DMA mapping resulted in different sg counts, "
1206 			    "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
1207 			    bsg_job->request_payload.sg_cnt, sg_cnt);
1208 			rval = -EAGAIN;
1209 			goto done_unmap_sg;
1210 		}
1211 
1212 		data_len = bsg_job->request_payload.payload_len;
1213 		mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1214 			&mgmt_dma, GFP_KERNEL);
1215 		if (!mgmt_b) {
1216 			ql_log(ql_log_warn, vha, 0x7042,
1217 			    "DMA alloc failed for mgmt_b.\n");
1218 			rval = -ENOMEM;
1219 			goto done_unmap_sg;
1220 		}
1221 
1222 		sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1223 			bsg_job->request_payload.sg_cnt, mgmt_b, data_len);
1224 
1225 		mn->options = cpu_to_le16(ACO_LOAD_MEMORY);
1226 		mn->parameter1 =
1227 			cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
1228 		break;
1229 
1230 	case QLA84_MGMT_CHNG_CONFIG:
1231 		mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM);
1232 		mn->parameter1 =
1233 			cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.id);
1234 
1235 		mn->parameter2 =
1236 			cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param0);
1237 
1238 		mn->parameter3 =
1239 			cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param1);
1240 		break;
1241 
1242 	default:
1243 		rval = -EIO;
1244 		goto exit_mgmt;
1245 	}
1246 
1247 	if (ql84_mgmt->mgmt.cmd != QLA84_MGMT_CHNG_CONFIG) {
1248 		mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->mgmt.len);
1249 		mn->dseg_count = cpu_to_le16(1);
1250 		put_unaligned_le64(mgmt_dma, &mn->dsd.address);
1251 		mn->dsd.length = cpu_to_le32(ql84_mgmt->mgmt.len);
1252 	}
1253 
1254 	rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0);
1255 
1256 	if (rval) {
1257 		ql_log(ql_log_warn, vha, 0x7043,
1258 		    "Vendor request 84xx mgmt failed.\n");
1259 
1260 		rval = (DID_ERROR << 16);
1261 
1262 	} else {
1263 		ql_dbg(ql_dbg_user, vha, 0x7044,
1264 		    "Vendor request 84xx mgmt completed.\n");
1265 
1266 		bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1267 		bsg_reply->result = DID_OK;
1268 
1269 		if ((ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) ||
1270 			(ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO)) {
1271 			bsg_reply->reply_payload_rcv_len =
1272 				bsg_job->reply_payload.payload_len;
1273 
1274 			sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1275 				bsg_job->reply_payload.sg_cnt, mgmt_b,
1276 				data_len);
1277 		}
1278 	}
1279 
1280 done_unmap_sg:
1281 	if (mgmt_b)
1282 		dma_free_coherent(&ha->pdev->dev, data_len, mgmt_b, mgmt_dma);
1283 
1284 	if (dma_direction == DMA_TO_DEVICE)
1285 		dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1286 			bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1287 	else if (dma_direction == DMA_FROM_DEVICE)
1288 		dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
1289 			bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1290 
1291 exit_mgmt:
1292 	dma_pool_free(ha->s_dma_pool, mn, mn_dma);
1293 
1294 	if (!rval)
1295 		bsg_job_done(bsg_job, bsg_reply->result,
1296 			       bsg_reply->reply_payload_rcv_len);
1297 	return rval;
1298 }
1299 
1300 static int
1301 qla24xx_iidma(struct bsg_job *bsg_job)
1302 {
1303 	struct fc_bsg_request *bsg_request = bsg_job->request;
1304 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1305 	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1306 	scsi_qla_host_t *vha = shost_priv(host);
1307 	int rval = 0;
1308 	struct qla_port_param *port_param = NULL;
1309 	fc_port_t *fcport = NULL;
1310 	int found = 0;
1311 	uint16_t mb[MAILBOX_REGISTER_COUNT];
1312 	uint8_t *rsp_ptr = NULL;
1313 
1314 	if (!IS_IIDMA_CAPABLE(vha->hw)) {
1315 		ql_log(ql_log_info, vha, 0x7046, "iiDMA not supported.\n");
1316 		return -EINVAL;
1317 	}
1318 
1319 	port_param = (void *)bsg_request + sizeof(struct fc_bsg_request);
1320 	if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) {
1321 		ql_log(ql_log_warn, vha, 0x7048,
1322 		    "Invalid destination type.\n");
1323 		return -EINVAL;
1324 	}
1325 
1326 	list_for_each_entry(fcport, &vha->vp_fcports, list) {
1327 		if (fcport->port_type != FCT_TARGET)
1328 			continue;
1329 
1330 		if (memcmp(port_param->fc_scsi_addr.dest_addr.wwpn,
1331 			fcport->port_name, sizeof(fcport->port_name)))
1332 			continue;
1333 
1334 		found = 1;
1335 		break;
1336 	}
1337 
1338 	if (!found) {
1339 		ql_log(ql_log_warn, vha, 0x7049,
1340 		    "Failed to find port.\n");
1341 		return -EINVAL;
1342 	}
1343 
1344 	if (atomic_read(&fcport->state) != FCS_ONLINE) {
1345 		ql_log(ql_log_warn, vha, 0x704a,
1346 		    "Port is not online.\n");
1347 		return -EINVAL;
1348 	}
1349 
1350 	if (fcport->flags & FCF_LOGIN_NEEDED) {
1351 		ql_log(ql_log_warn, vha, 0x704b,
1352 		    "Remote port not logged in flags = 0x%x.\n", fcport->flags);
1353 		return -EINVAL;
1354 	}
1355 
1356 	if (port_param->mode)
1357 		rval = qla2x00_set_idma_speed(vha, fcport->loop_id,
1358 			port_param->speed, mb);
1359 	else
1360 		rval = qla2x00_get_idma_speed(vha, fcport->loop_id,
1361 			&port_param->speed, mb);
1362 
1363 	if (rval) {
1364 		ql_log(ql_log_warn, vha, 0x704c,
1365 		    "iiDMA cmd failed for %8phN -- "
1366 		    "%04x %x %04x %04x.\n", fcport->port_name,
1367 		    rval, fcport->fp_speed, mb[0], mb[1]);
1368 		rval = (DID_ERROR << 16);
1369 	} else {
1370 		if (!port_param->mode) {
1371 			bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
1372 				sizeof(struct qla_port_param);
1373 
1374 			rsp_ptr = ((uint8_t *)bsg_reply) +
1375 				sizeof(struct fc_bsg_reply);
1376 
1377 			memcpy(rsp_ptr, port_param,
1378 				sizeof(struct qla_port_param));
1379 		}
1380 
1381 		bsg_reply->result = DID_OK;
1382 		bsg_job_done(bsg_job, bsg_reply->result,
1383 			       bsg_reply->reply_payload_rcv_len);
1384 	}
1385 
1386 	return rval;
1387 }
1388 
1389 static int
1390 qla2x00_optrom_setup(struct bsg_job *bsg_job, scsi_qla_host_t *vha,
1391 	uint8_t is_update)
1392 {
1393 	struct fc_bsg_request *bsg_request = bsg_job->request;
1394 	uint32_t start = 0;
1395 	int valid = 0;
1396 	struct qla_hw_data *ha = vha->hw;
1397 
1398 	if (unlikely(pci_channel_offline(ha->pdev)))
1399 		return -EINVAL;
1400 
1401 	start = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
1402 	if (start > ha->optrom_size) {
1403 		ql_log(ql_log_warn, vha, 0x7055,
1404 		    "start %d > optrom_size %d.\n", start, ha->optrom_size);
1405 		return -EINVAL;
1406 	}
1407 
1408 	if (ha->optrom_state != QLA_SWAITING) {
1409 		ql_log(ql_log_info, vha, 0x7056,
1410 		    "optrom_state %d.\n", ha->optrom_state);
1411 		return -EBUSY;
1412 	}
1413 
1414 	ha->optrom_region_start = start;
1415 	ql_dbg(ql_dbg_user, vha, 0x7057, "is_update=%d.\n", is_update);
1416 	if (is_update) {
1417 		if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
1418 			valid = 1;
1419 		else if (start == (ha->flt_region_boot * 4) ||
1420 		    start == (ha->flt_region_fw * 4))
1421 			valid = 1;
1422 		else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
1423 		    IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) ||
1424 		    IS_QLA28XX(ha))
1425 			valid = 1;
1426 		if (!valid) {
1427 			ql_log(ql_log_warn, vha, 0x7058,
1428 			    "Invalid start region 0x%x/0x%x.\n", start,
1429 			    bsg_job->request_payload.payload_len);
1430 			return -EINVAL;
1431 		}
1432 
1433 		ha->optrom_region_size = start +
1434 		    bsg_job->request_payload.payload_len > ha->optrom_size ?
1435 		    ha->optrom_size - start :
1436 		    bsg_job->request_payload.payload_len;
1437 		ha->optrom_state = QLA_SWRITING;
1438 	} else {
1439 		ha->optrom_region_size = start +
1440 		    bsg_job->reply_payload.payload_len > ha->optrom_size ?
1441 		    ha->optrom_size - start :
1442 		    bsg_job->reply_payload.payload_len;
1443 		ha->optrom_state = QLA_SREADING;
1444 	}
1445 
1446 	ha->optrom_buffer = vzalloc(ha->optrom_region_size);
1447 	if (!ha->optrom_buffer) {
1448 		ql_log(ql_log_warn, vha, 0x7059,
1449 		    "Read: Unable to allocate memory for optrom retrieval "
1450 		    "(%x)\n", ha->optrom_region_size);
1451 
1452 		ha->optrom_state = QLA_SWAITING;
1453 		return -ENOMEM;
1454 	}
1455 
1456 	return 0;
1457 }
1458 
1459 static int
1460 qla2x00_read_optrom(struct bsg_job *bsg_job)
1461 {
1462 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1463 	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1464 	scsi_qla_host_t *vha = shost_priv(host);
1465 	struct qla_hw_data *ha = vha->hw;
1466 	int rval = 0;
1467 
1468 	if (ha->flags.nic_core_reset_hdlr_active)
1469 		return -EBUSY;
1470 
1471 	mutex_lock(&ha->optrom_mutex);
1472 	rval = qla2x00_optrom_setup(bsg_job, vha, 0);
1473 	if (rval) {
1474 		mutex_unlock(&ha->optrom_mutex);
1475 		return rval;
1476 	}
1477 
1478 	ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
1479 	    ha->optrom_region_start, ha->optrom_region_size);
1480 
1481 	sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1482 	    bsg_job->reply_payload.sg_cnt, ha->optrom_buffer,
1483 	    ha->optrom_region_size);
1484 
1485 	bsg_reply->reply_payload_rcv_len = ha->optrom_region_size;
1486 	bsg_reply->result = DID_OK;
1487 	vfree(ha->optrom_buffer);
1488 	ha->optrom_buffer = NULL;
1489 	ha->optrom_state = QLA_SWAITING;
1490 	mutex_unlock(&ha->optrom_mutex);
1491 	bsg_job_done(bsg_job, bsg_reply->result,
1492 		       bsg_reply->reply_payload_rcv_len);
1493 	return rval;
1494 }
1495 
1496 static int
1497 qla2x00_update_optrom(struct bsg_job *bsg_job)
1498 {
1499 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1500 	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1501 	scsi_qla_host_t *vha = shost_priv(host);
1502 	struct qla_hw_data *ha = vha->hw;
1503 	int rval = 0;
1504 
1505 	mutex_lock(&ha->optrom_mutex);
1506 	rval = qla2x00_optrom_setup(bsg_job, vha, 1);
1507 	if (rval) {
1508 		mutex_unlock(&ha->optrom_mutex);
1509 		return rval;
1510 	}
1511 
1512 	/* Set the isp82xx_no_md_cap not to capture minidump */
1513 	ha->flags.isp82xx_no_md_cap = 1;
1514 
1515 	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1516 	    bsg_job->request_payload.sg_cnt, ha->optrom_buffer,
1517 	    ha->optrom_region_size);
1518 
1519 	rval = ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
1520 	    ha->optrom_region_start, ha->optrom_region_size);
1521 
1522 	if (rval) {
1523 		bsg_reply->result = -EINVAL;
1524 		rval = -EINVAL;
1525 	} else {
1526 		bsg_reply->result = DID_OK;
1527 	}
1528 	vfree(ha->optrom_buffer);
1529 	ha->optrom_buffer = NULL;
1530 	ha->optrom_state = QLA_SWAITING;
1531 	mutex_unlock(&ha->optrom_mutex);
1532 	bsg_job_done(bsg_job, bsg_reply->result,
1533 		       bsg_reply->reply_payload_rcv_len);
1534 	return rval;
1535 }
1536 
1537 static int
1538 qla2x00_update_fru_versions(struct bsg_job *bsg_job)
1539 {
1540 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1541 	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1542 	scsi_qla_host_t *vha = shost_priv(host);
1543 	struct qla_hw_data *ha = vha->hw;
1544 	int rval = 0;
1545 	uint8_t bsg[DMA_POOL_SIZE];
1546 	struct qla_image_version_list *list = (void *)bsg;
1547 	struct qla_image_version *image;
1548 	uint32_t count;
1549 	dma_addr_t sfp_dma;
1550 	void *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1551 
1552 	if (!sfp) {
1553 		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1554 		    EXT_STATUS_NO_MEMORY;
1555 		goto done;
1556 	}
1557 
1558 	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1559 	    bsg_job->request_payload.sg_cnt, list, sizeof(bsg));
1560 
1561 	image = list->version;
1562 	count = list->count;
1563 	while (count--) {
1564 		memcpy(sfp, &image->field_info, sizeof(image->field_info));
1565 		rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1566 		    image->field_address.device, image->field_address.offset,
1567 		    sizeof(image->field_info), image->field_address.option);
1568 		if (rval) {
1569 			bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1570 			    EXT_STATUS_MAILBOX;
1571 			goto dealloc;
1572 		}
1573 		image++;
1574 	}
1575 
1576 	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1577 
1578 dealloc:
1579 	dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1580 
1581 done:
1582 	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1583 	bsg_reply->result = DID_OK << 16;
1584 	bsg_job_done(bsg_job, bsg_reply->result,
1585 		       bsg_reply->reply_payload_rcv_len);
1586 
1587 	return 0;
1588 }
1589 
1590 static int
1591 qla2x00_read_fru_status(struct bsg_job *bsg_job)
1592 {
1593 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1594 	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1595 	scsi_qla_host_t *vha = shost_priv(host);
1596 	struct qla_hw_data *ha = vha->hw;
1597 	int rval = 0;
1598 	uint8_t bsg[DMA_POOL_SIZE];
1599 	struct qla_status_reg *sr = (void *)bsg;
1600 	dma_addr_t sfp_dma;
1601 	uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1602 
1603 	if (!sfp) {
1604 		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1605 		    EXT_STATUS_NO_MEMORY;
1606 		goto done;
1607 	}
1608 
1609 	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1610 	    bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
1611 
1612 	rval = qla2x00_read_sfp(vha, sfp_dma, sfp,
1613 	    sr->field_address.device, sr->field_address.offset,
1614 	    sizeof(sr->status_reg), sr->field_address.option);
1615 	sr->status_reg = *sfp;
1616 
1617 	if (rval) {
1618 		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1619 		    EXT_STATUS_MAILBOX;
1620 		goto dealloc;
1621 	}
1622 
1623 	sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1624 	    bsg_job->reply_payload.sg_cnt, sr, sizeof(*sr));
1625 
1626 	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1627 
1628 dealloc:
1629 	dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1630 
1631 done:
1632 	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1633 	bsg_reply->reply_payload_rcv_len = sizeof(*sr);
1634 	bsg_reply->result = DID_OK << 16;
1635 	bsg_job_done(bsg_job, bsg_reply->result,
1636 		       bsg_reply->reply_payload_rcv_len);
1637 
1638 	return 0;
1639 }
1640 
1641 static int
1642 qla2x00_write_fru_status(struct bsg_job *bsg_job)
1643 {
1644 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1645 	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1646 	scsi_qla_host_t *vha = shost_priv(host);
1647 	struct qla_hw_data *ha = vha->hw;
1648 	int rval = 0;
1649 	uint8_t bsg[DMA_POOL_SIZE];
1650 	struct qla_status_reg *sr = (void *)bsg;
1651 	dma_addr_t sfp_dma;
1652 	uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1653 
1654 	if (!sfp) {
1655 		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1656 		    EXT_STATUS_NO_MEMORY;
1657 		goto done;
1658 	}
1659 
1660 	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1661 	    bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
1662 
1663 	*sfp = sr->status_reg;
1664 	rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1665 	    sr->field_address.device, sr->field_address.offset,
1666 	    sizeof(sr->status_reg), sr->field_address.option);
1667 
1668 	if (rval) {
1669 		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1670 		    EXT_STATUS_MAILBOX;
1671 		goto dealloc;
1672 	}
1673 
1674 	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1675 
1676 dealloc:
1677 	dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1678 
1679 done:
1680 	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1681 	bsg_reply->result = DID_OK << 16;
1682 	bsg_job_done(bsg_job, bsg_reply->result,
1683 		       bsg_reply->reply_payload_rcv_len);
1684 
1685 	return 0;
1686 }
1687 
1688 static int
1689 qla2x00_write_i2c(struct bsg_job *bsg_job)
1690 {
1691 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1692 	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1693 	scsi_qla_host_t *vha = shost_priv(host);
1694 	struct qla_hw_data *ha = vha->hw;
1695 	int rval = 0;
1696 	uint8_t bsg[DMA_POOL_SIZE];
1697 	struct qla_i2c_access *i2c = (void *)bsg;
1698 	dma_addr_t sfp_dma;
1699 	uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1700 
1701 	if (!sfp) {
1702 		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1703 		    EXT_STATUS_NO_MEMORY;
1704 		goto done;
1705 	}
1706 
1707 	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1708 	    bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c));
1709 
1710 	memcpy(sfp, i2c->buffer, i2c->length);
1711 	rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1712 	    i2c->device, i2c->offset, i2c->length, i2c->option);
1713 
1714 	if (rval) {
1715 		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1716 		    EXT_STATUS_MAILBOX;
1717 		goto dealloc;
1718 	}
1719 
1720 	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1721 
1722 dealloc:
1723 	dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1724 
1725 done:
1726 	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1727 	bsg_reply->result = DID_OK << 16;
1728 	bsg_job_done(bsg_job, bsg_reply->result,
1729 		       bsg_reply->reply_payload_rcv_len);
1730 
1731 	return 0;
1732 }
1733 
1734 static int
1735 qla2x00_read_i2c(struct bsg_job *bsg_job)
1736 {
1737 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1738 	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1739 	scsi_qla_host_t *vha = shost_priv(host);
1740 	struct qla_hw_data *ha = vha->hw;
1741 	int rval = 0;
1742 	uint8_t bsg[DMA_POOL_SIZE];
1743 	struct qla_i2c_access *i2c = (void *)bsg;
1744 	dma_addr_t sfp_dma;
1745 	uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1746 
1747 	if (!sfp) {
1748 		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1749 		    EXT_STATUS_NO_MEMORY;
1750 		goto done;
1751 	}
1752 
1753 	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1754 	    bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c));
1755 
1756 	rval = qla2x00_read_sfp(vha, sfp_dma, sfp,
1757 		i2c->device, i2c->offset, i2c->length, i2c->option);
1758 
1759 	if (rval) {
1760 		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1761 		    EXT_STATUS_MAILBOX;
1762 		goto dealloc;
1763 	}
1764 
1765 	memcpy(i2c->buffer, sfp, i2c->length);
1766 	sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1767 	    bsg_job->reply_payload.sg_cnt, i2c, sizeof(*i2c));
1768 
1769 	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1770 
1771 dealloc:
1772 	dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1773 
1774 done:
1775 	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1776 	bsg_reply->reply_payload_rcv_len = sizeof(*i2c);
1777 	bsg_reply->result = DID_OK << 16;
1778 	bsg_job_done(bsg_job, bsg_reply->result,
1779 		       bsg_reply->reply_payload_rcv_len);
1780 
1781 	return 0;
1782 }
1783 
1784 static int
1785 qla24xx_process_bidir_cmd(struct bsg_job *bsg_job)
1786 {
1787 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1788 	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1789 	scsi_qla_host_t *vha = shost_priv(host);
1790 	struct qla_hw_data *ha = vha->hw;
1791 	uint32_t rval = EXT_STATUS_OK;
1792 	uint16_t req_sg_cnt = 0;
1793 	uint16_t rsp_sg_cnt = 0;
1794 	uint16_t nextlid = 0;
1795 	uint32_t tot_dsds;
1796 	srb_t *sp = NULL;
1797 	uint32_t req_data_len;
1798 	uint32_t rsp_data_len;
1799 
1800 	/* Check the type of the adapter */
1801 	if (!IS_BIDI_CAPABLE(ha)) {
1802 		ql_log(ql_log_warn, vha, 0x70a0,
1803 			"This adapter is not supported\n");
1804 		rval = EXT_STATUS_NOT_SUPPORTED;
1805 		goto done;
1806 	}
1807 
1808 	if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
1809 		test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
1810 		test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
1811 		rval =  EXT_STATUS_BUSY;
1812 		goto done;
1813 	}
1814 
1815 	/* Check if host is online */
1816 	if (!vha->flags.online) {
1817 		ql_log(ql_log_warn, vha, 0x70a1,
1818 			"Host is not online\n");
1819 		rval = EXT_STATUS_DEVICE_OFFLINE;
1820 		goto done;
1821 	}
1822 
1823 	/* Check if cable is plugged in or not */
1824 	if (vha->device_flags & DFLG_NO_CABLE) {
1825 		ql_log(ql_log_warn, vha, 0x70a2,
1826 			"Cable is unplugged...\n");
1827 		rval = EXT_STATUS_INVALID_CFG;
1828 		goto done;
1829 	}
1830 
1831 	/* Check if the switch is connected or not */
1832 	if (ha->current_topology != ISP_CFG_F) {
1833 		ql_log(ql_log_warn, vha, 0x70a3,
1834 			"Host is not connected to the switch\n");
1835 		rval = EXT_STATUS_INVALID_CFG;
1836 		goto done;
1837 	}
1838 
1839 	/* Check if operating mode is P2P */
1840 	if (ha->operating_mode != P2P) {
1841 		ql_log(ql_log_warn, vha, 0x70a4,
1842 		    "Host operating mode is not P2p\n");
1843 		rval = EXT_STATUS_INVALID_CFG;
1844 		goto done;
1845 	}
1846 
1847 	mutex_lock(&ha->selflogin_lock);
1848 	if (vha->self_login_loop_id == 0) {
1849 		/* Initialize all required  fields of fcport */
1850 		vha->bidir_fcport.vha = vha;
1851 		vha->bidir_fcport.d_id.b.al_pa = vha->d_id.b.al_pa;
1852 		vha->bidir_fcport.d_id.b.area = vha->d_id.b.area;
1853 		vha->bidir_fcport.d_id.b.domain = vha->d_id.b.domain;
1854 		vha->bidir_fcport.loop_id = vha->loop_id;
1855 
1856 		if (qla2x00_fabric_login(vha, &(vha->bidir_fcport), &nextlid)) {
1857 			ql_log(ql_log_warn, vha, 0x70a7,
1858 			    "Failed to login port %06X for bidirectional IOCB\n",
1859 			    vha->bidir_fcport.d_id.b24);
1860 			mutex_unlock(&ha->selflogin_lock);
1861 			rval = EXT_STATUS_MAILBOX;
1862 			goto done;
1863 		}
1864 		vha->self_login_loop_id = nextlid - 1;
1865 
1866 	}
1867 	/* Assign the self login loop id to fcport */
1868 	mutex_unlock(&ha->selflogin_lock);
1869 
1870 	vha->bidir_fcport.loop_id = vha->self_login_loop_id;
1871 
1872 	req_sg_cnt = dma_map_sg(&ha->pdev->dev,
1873 		bsg_job->request_payload.sg_list,
1874 		bsg_job->request_payload.sg_cnt,
1875 		DMA_TO_DEVICE);
1876 
1877 	if (!req_sg_cnt) {
1878 		rval = EXT_STATUS_NO_MEMORY;
1879 		goto done;
1880 	}
1881 
1882 	rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
1883 		bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
1884 		DMA_FROM_DEVICE);
1885 
1886 	if (!rsp_sg_cnt) {
1887 		rval = EXT_STATUS_NO_MEMORY;
1888 		goto done_unmap_req_sg;
1889 	}
1890 
1891 	if ((req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
1892 		(rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
1893 		ql_dbg(ql_dbg_user, vha, 0x70a9,
1894 		    "Dma mapping resulted in different sg counts "
1895 		    "[request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt: "
1896 		    "%x dma_reply_sg_cnt: %x]\n",
1897 		    bsg_job->request_payload.sg_cnt, req_sg_cnt,
1898 		    bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
1899 		rval = EXT_STATUS_NO_MEMORY;
1900 		goto done_unmap_sg;
1901 	}
1902 
1903 	req_data_len = bsg_job->request_payload.payload_len;
1904 	rsp_data_len = bsg_job->reply_payload.payload_len;
1905 
1906 	if (req_data_len != rsp_data_len) {
1907 		rval = EXT_STATUS_BUSY;
1908 		ql_log(ql_log_warn, vha, 0x70aa,
1909 		    "req_data_len != rsp_data_len\n");
1910 		goto done_unmap_sg;
1911 	}
1912 
1913 	/* Alloc SRB structure */
1914 	sp = qla2x00_get_sp(vha, &(vha->bidir_fcport), GFP_KERNEL);
1915 	if (!sp) {
1916 		ql_dbg(ql_dbg_user, vha, 0x70ac,
1917 		    "Alloc SRB structure failed\n");
1918 		rval = EXT_STATUS_NO_MEMORY;
1919 		goto done_unmap_sg;
1920 	}
1921 
1922 	/*Populate srb->ctx with bidir ctx*/
1923 	sp->u.bsg_job = bsg_job;
1924 	sp->free = qla2x00_bsg_sp_free;
1925 	sp->type = SRB_BIDI_CMD;
1926 	sp->done = qla2x00_bsg_job_done;
1927 
1928 	/* Add the read and write sg count */
1929 	tot_dsds = rsp_sg_cnt + req_sg_cnt;
1930 
1931 	rval = qla2x00_start_bidir(sp, vha, tot_dsds);
1932 	if (rval != EXT_STATUS_OK)
1933 		goto done_free_srb;
1934 	/* the bsg request  will be completed in the interrupt handler */
1935 	return rval;
1936 
1937 done_free_srb:
1938 	mempool_free(sp, ha->srb_mempool);
1939 done_unmap_sg:
1940 	dma_unmap_sg(&ha->pdev->dev,
1941 	    bsg_job->reply_payload.sg_list,
1942 	    bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1943 done_unmap_req_sg:
1944 	dma_unmap_sg(&ha->pdev->dev,
1945 	    bsg_job->request_payload.sg_list,
1946 	    bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1947 done:
1948 
1949 	/* Return an error vendor specific response
1950 	 * and complete the bsg request
1951 	 */
1952 	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
1953 	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1954 	bsg_reply->reply_payload_rcv_len = 0;
1955 	bsg_reply->result = (DID_OK) << 16;
1956 	bsg_job_done(bsg_job, bsg_reply->result,
1957 		       bsg_reply->reply_payload_rcv_len);
1958 	/* Always return success, vendor rsp carries correct status */
1959 	return 0;
1960 }
1961 
1962 static int
1963 qlafx00_mgmt_cmd(struct bsg_job *bsg_job)
1964 {
1965 	struct fc_bsg_request *bsg_request = bsg_job->request;
1966 	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1967 	scsi_qla_host_t *vha = shost_priv(host);
1968 	struct qla_hw_data *ha = vha->hw;
1969 	int rval = (DID_ERROR << 16);
1970 	struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
1971 	srb_t *sp;
1972 	int req_sg_cnt = 0, rsp_sg_cnt = 0;
1973 	struct fc_port *fcport;
1974 	char  *type = "FC_BSG_HST_FX_MGMT";
1975 
1976 	/* Copy the IOCB specific information */
1977 	piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
1978 	    &bsg_request->rqst_data.h_vendor.vendor_cmd[1];
1979 
1980 	/* Dump the vendor information */
1981 	ql_dump_buffer(ql_dbg_user + ql_dbg_verbose , vha, 0x70cf,
1982 	    piocb_rqst, sizeof(*piocb_rqst));
1983 
1984 	if (!vha->flags.online) {
1985 		ql_log(ql_log_warn, vha, 0x70d0,
1986 		    "Host is not online.\n");
1987 		rval = -EIO;
1988 		goto done;
1989 	}
1990 
1991 	if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) {
1992 		req_sg_cnt = dma_map_sg(&ha->pdev->dev,
1993 		    bsg_job->request_payload.sg_list,
1994 		    bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1995 		if (!req_sg_cnt) {
1996 			ql_log(ql_log_warn, vha, 0x70c7,
1997 			    "dma_map_sg return %d for request\n", req_sg_cnt);
1998 			rval = -ENOMEM;
1999 			goto done;
2000 		}
2001 	}
2002 
2003 	if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) {
2004 		rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
2005 		    bsg_job->reply_payload.sg_list,
2006 		    bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2007 		if (!rsp_sg_cnt) {
2008 			ql_log(ql_log_warn, vha, 0x70c8,
2009 			    "dma_map_sg return %d for reply\n", rsp_sg_cnt);
2010 			rval = -ENOMEM;
2011 			goto done_unmap_req_sg;
2012 		}
2013 	}
2014 
2015 	ql_dbg(ql_dbg_user, vha, 0x70c9,
2016 	    "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
2017 	    "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
2018 	    req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
2019 
2020 	/* Allocate a dummy fcport structure, since functions preparing the
2021 	 * IOCB and mailbox command retrieves port specific information
2022 	 * from fcport structure. For Host based ELS commands there will be
2023 	 * no fcport structure allocated
2024 	 */
2025 	fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2026 	if (!fcport) {
2027 		ql_log(ql_log_warn, vha, 0x70ca,
2028 		    "Failed to allocate fcport.\n");
2029 		rval = -ENOMEM;
2030 		goto done_unmap_rsp_sg;
2031 	}
2032 
2033 	/* Alloc SRB structure */
2034 	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2035 	if (!sp) {
2036 		ql_log(ql_log_warn, vha, 0x70cb,
2037 		    "qla2x00_get_sp failed.\n");
2038 		rval = -ENOMEM;
2039 		goto done_free_fcport;
2040 	}
2041 
2042 	/* Initialize all required  fields of fcport */
2043 	fcport->vha = vha;
2044 	fcport->loop_id = le32_to_cpu(piocb_rqst->dataword);
2045 
2046 	sp->type = SRB_FXIOCB_BCMD;
2047 	sp->name = "bsg_fx_mgmt";
2048 	sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
2049 	sp->u.bsg_job = bsg_job;
2050 	sp->free = qla2x00_bsg_sp_free;
2051 	sp->done = qla2x00_bsg_job_done;
2052 
2053 	ql_dbg(ql_dbg_user, vha, 0x70cc,
2054 	    "bsg rqst type: %s fx_mgmt_type: %x id=%x\n",
2055 	    type, piocb_rqst->func_type, fcport->loop_id);
2056 
2057 	rval = qla2x00_start_sp(sp);
2058 	if (rval != QLA_SUCCESS) {
2059 		ql_log(ql_log_warn, vha, 0x70cd,
2060 		    "qla2x00_start_sp failed=%d.\n", rval);
2061 		mempool_free(sp, ha->srb_mempool);
2062 		rval = -EIO;
2063 		goto done_free_fcport;
2064 	}
2065 	return rval;
2066 
2067 done_free_fcport:
2068 	qla2x00_free_fcport(fcport);
2069 
2070 done_unmap_rsp_sg:
2071 	if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID)
2072 		dma_unmap_sg(&ha->pdev->dev,
2073 		    bsg_job->reply_payload.sg_list,
2074 		    bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2075 done_unmap_req_sg:
2076 	if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
2077 		dma_unmap_sg(&ha->pdev->dev,
2078 		    bsg_job->request_payload.sg_list,
2079 		    bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2080 
2081 done:
2082 	return rval;
2083 }
2084 
2085 static int
2086 qla26xx_serdes_op(struct bsg_job *bsg_job)
2087 {
2088 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2089 	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2090 	scsi_qla_host_t *vha = shost_priv(host);
2091 	int rval = 0;
2092 	struct qla_serdes_reg sr;
2093 
2094 	memset(&sr, 0, sizeof(sr));
2095 
2096 	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2097 	    bsg_job->request_payload.sg_cnt, &sr, sizeof(sr));
2098 
2099 	switch (sr.cmd) {
2100 	case INT_SC_SERDES_WRITE_REG:
2101 		rval = qla2x00_write_serdes_word(vha, sr.addr, sr.val);
2102 		bsg_reply->reply_payload_rcv_len = 0;
2103 		break;
2104 	case INT_SC_SERDES_READ_REG:
2105 		rval = qla2x00_read_serdes_word(vha, sr.addr, &sr.val);
2106 		sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2107 		    bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr));
2108 		bsg_reply->reply_payload_rcv_len = sizeof(sr);
2109 		break;
2110 	default:
2111 		ql_dbg(ql_dbg_user, vha, 0x708c,
2112 		    "Unknown serdes cmd %x.\n", sr.cmd);
2113 		rval = -EINVAL;
2114 		break;
2115 	}
2116 
2117 	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2118 	    rval ? EXT_STATUS_MAILBOX : 0;
2119 
2120 	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2121 	bsg_reply->result = DID_OK << 16;
2122 	bsg_job_done(bsg_job, bsg_reply->result,
2123 		       bsg_reply->reply_payload_rcv_len);
2124 	return 0;
2125 }
2126 
2127 static int
2128 qla8044_serdes_op(struct bsg_job *bsg_job)
2129 {
2130 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2131 	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2132 	scsi_qla_host_t *vha = shost_priv(host);
2133 	int rval = 0;
2134 	struct qla_serdes_reg_ex sr;
2135 
2136 	memset(&sr, 0, sizeof(sr));
2137 
2138 	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2139 	    bsg_job->request_payload.sg_cnt, &sr, sizeof(sr));
2140 
2141 	switch (sr.cmd) {
2142 	case INT_SC_SERDES_WRITE_REG:
2143 		rval = qla8044_write_serdes_word(vha, sr.addr, sr.val);
2144 		bsg_reply->reply_payload_rcv_len = 0;
2145 		break;
2146 	case INT_SC_SERDES_READ_REG:
2147 		rval = qla8044_read_serdes_word(vha, sr.addr, &sr.val);
2148 		sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2149 		    bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr));
2150 		bsg_reply->reply_payload_rcv_len = sizeof(sr);
2151 		break;
2152 	default:
2153 		ql_dbg(ql_dbg_user, vha, 0x7020,
2154 		    "Unknown serdes cmd %x.\n", sr.cmd);
2155 		rval = -EINVAL;
2156 		break;
2157 	}
2158 
2159 	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2160 	    rval ? EXT_STATUS_MAILBOX : 0;
2161 
2162 	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2163 	bsg_reply->result = DID_OK << 16;
2164 	bsg_job_done(bsg_job, bsg_reply->result,
2165 		       bsg_reply->reply_payload_rcv_len);
2166 	return 0;
2167 }
2168 
2169 static int
2170 qla27xx_get_flash_upd_cap(struct bsg_job *bsg_job)
2171 {
2172 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2173 	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2174 	scsi_qla_host_t *vha = shost_priv(host);
2175 	struct qla_hw_data *ha = vha->hw;
2176 	struct qla_flash_update_caps cap;
2177 
2178 	if (!(IS_QLA27XX(ha)) && !IS_QLA28XX(ha))
2179 		return -EPERM;
2180 
2181 	memset(&cap, 0, sizeof(cap));
2182 	cap.capabilities = (uint64_t)ha->fw_attributes_ext[1] << 48 |
2183 			   (uint64_t)ha->fw_attributes_ext[0] << 32 |
2184 			   (uint64_t)ha->fw_attributes_h << 16 |
2185 			   (uint64_t)ha->fw_attributes;
2186 
2187 	sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2188 	    bsg_job->reply_payload.sg_cnt, &cap, sizeof(cap));
2189 	bsg_reply->reply_payload_rcv_len = sizeof(cap);
2190 
2191 	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2192 	    EXT_STATUS_OK;
2193 
2194 	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2195 	bsg_reply->result = DID_OK << 16;
2196 	bsg_job_done(bsg_job, bsg_reply->result,
2197 		       bsg_reply->reply_payload_rcv_len);
2198 	return 0;
2199 }
2200 
2201 static int
2202 qla27xx_set_flash_upd_cap(struct bsg_job *bsg_job)
2203 {
2204 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2205 	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2206 	scsi_qla_host_t *vha = shost_priv(host);
2207 	struct qla_hw_data *ha = vha->hw;
2208 	uint64_t online_fw_attr = 0;
2209 	struct qla_flash_update_caps cap;
2210 
2211 	if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
2212 		return -EPERM;
2213 
2214 	memset(&cap, 0, sizeof(cap));
2215 	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2216 	    bsg_job->request_payload.sg_cnt, &cap, sizeof(cap));
2217 
2218 	online_fw_attr = (uint64_t)ha->fw_attributes_ext[1] << 48 |
2219 			 (uint64_t)ha->fw_attributes_ext[0] << 32 |
2220 			 (uint64_t)ha->fw_attributes_h << 16 |
2221 			 (uint64_t)ha->fw_attributes;
2222 
2223 	if (online_fw_attr != cap.capabilities) {
2224 		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2225 		    EXT_STATUS_INVALID_PARAM;
2226 		return -EINVAL;
2227 	}
2228 
2229 	if (cap.outage_duration < MAX_LOOP_TIMEOUT)  {
2230 		bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2231 		    EXT_STATUS_INVALID_PARAM;
2232 		return -EINVAL;
2233 	}
2234 
2235 	bsg_reply->reply_payload_rcv_len = 0;
2236 
2237 	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2238 	    EXT_STATUS_OK;
2239 
2240 	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2241 	bsg_reply->result = DID_OK << 16;
2242 	bsg_job_done(bsg_job, bsg_reply->result,
2243 		       bsg_reply->reply_payload_rcv_len);
2244 	return 0;
2245 }
2246 
2247 static int
2248 qla27xx_get_bbcr_data(struct bsg_job *bsg_job)
2249 {
2250 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2251 	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2252 	scsi_qla_host_t *vha = shost_priv(host);
2253 	struct qla_hw_data *ha = vha->hw;
2254 	struct qla_bbcr_data bbcr;
2255 	uint16_t loop_id, topo, sw_cap;
2256 	uint8_t domain, area, al_pa, state;
2257 	int rval;
2258 
2259 	if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
2260 		return -EPERM;
2261 
2262 	memset(&bbcr, 0, sizeof(bbcr));
2263 
2264 	if (vha->flags.bbcr_enable)
2265 		bbcr.status = QLA_BBCR_STATUS_ENABLED;
2266 	else
2267 		bbcr.status = QLA_BBCR_STATUS_DISABLED;
2268 
2269 	if (bbcr.status == QLA_BBCR_STATUS_ENABLED) {
2270 		rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa,
2271 			&area, &domain, &topo, &sw_cap);
2272 		if (rval != QLA_SUCCESS) {
2273 			bbcr.status = QLA_BBCR_STATUS_UNKNOWN;
2274 			bbcr.state = QLA_BBCR_STATE_OFFLINE;
2275 			bbcr.mbx1 = loop_id;
2276 			goto done;
2277 		}
2278 
2279 		state = (vha->bbcr >> 12) & 0x1;
2280 
2281 		if (state) {
2282 			bbcr.state = QLA_BBCR_STATE_OFFLINE;
2283 			bbcr.offline_reason_code = QLA_BBCR_REASON_LOGIN_REJECT;
2284 		} else {
2285 			bbcr.state = QLA_BBCR_STATE_ONLINE;
2286 			bbcr.negotiated_bbscn = (vha->bbcr >> 8) & 0xf;
2287 		}
2288 
2289 		bbcr.configured_bbscn = vha->bbcr & 0xf;
2290 	}
2291 
2292 done:
2293 	sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2294 		bsg_job->reply_payload.sg_cnt, &bbcr, sizeof(bbcr));
2295 	bsg_reply->reply_payload_rcv_len = sizeof(bbcr);
2296 
2297 	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
2298 
2299 	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2300 	bsg_reply->result = DID_OK << 16;
2301 	bsg_job_done(bsg_job, bsg_reply->result,
2302 		       bsg_reply->reply_payload_rcv_len);
2303 	return 0;
2304 }
2305 
2306 static int
2307 qla2x00_get_priv_stats(struct bsg_job *bsg_job)
2308 {
2309 	struct fc_bsg_request *bsg_request = bsg_job->request;
2310 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2311 	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2312 	scsi_qla_host_t *vha = shost_priv(host);
2313 	struct qla_hw_data *ha = vha->hw;
2314 	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2315 	struct link_statistics *stats = NULL;
2316 	dma_addr_t stats_dma;
2317 	int rval;
2318 	uint32_t *cmd = bsg_request->rqst_data.h_vendor.vendor_cmd;
2319 	uint options = cmd[0] == QL_VND_GET_PRIV_STATS_EX ? cmd[1] : 0;
2320 
2321 	if (test_bit(UNLOADING, &vha->dpc_flags))
2322 		return -ENODEV;
2323 
2324 	if (unlikely(pci_channel_offline(ha->pdev)))
2325 		return -ENODEV;
2326 
2327 	if (qla2x00_reset_active(vha))
2328 		return -EBUSY;
2329 
2330 	if (!IS_FWI2_CAPABLE(ha))
2331 		return -EPERM;
2332 
2333 	stats = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stats), &stats_dma,
2334 				   GFP_KERNEL);
2335 	if (!stats) {
2336 		ql_log(ql_log_warn, vha, 0x70e2,
2337 		    "Failed to allocate memory for stats.\n");
2338 		return -ENOMEM;
2339 	}
2340 
2341 	rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, options);
2342 
2343 	if (rval == QLA_SUCCESS) {
2344 		ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, vha, 0x70e5,
2345 			stats, sizeof(*stats));
2346 		sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2347 			bsg_job->reply_payload.sg_cnt, stats, sizeof(*stats));
2348 	}
2349 
2350 	bsg_reply->reply_payload_rcv_len = sizeof(*stats);
2351 	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2352 	    rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK;
2353 
2354 	bsg_job->reply_len = sizeof(*bsg_reply);
2355 	bsg_reply->result = DID_OK << 16;
2356 	bsg_job_done(bsg_job, bsg_reply->result,
2357 		       bsg_reply->reply_payload_rcv_len);
2358 
2359 	dma_free_coherent(&ha->pdev->dev, sizeof(*stats),
2360 		stats, stats_dma);
2361 
2362 	return 0;
2363 }
2364 
2365 static int
2366 qla2x00_do_dport_diagnostics(struct bsg_job *bsg_job)
2367 {
2368 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2369 	struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2370 	scsi_qla_host_t *vha = shost_priv(host);
2371 	int rval;
2372 	struct qla_dport_diag *dd;
2373 
2374 	if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) &&
2375 	    !IS_QLA28XX(vha->hw))
2376 		return -EPERM;
2377 
2378 	dd = kmalloc(sizeof(*dd), GFP_KERNEL);
2379 	if (!dd) {
2380 		ql_log(ql_log_warn, vha, 0x70db,
2381 		    "Failed to allocate memory for dport.\n");
2382 		return -ENOMEM;
2383 	}
2384 
2385 	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2386 	    bsg_job->request_payload.sg_cnt, dd, sizeof(*dd));
2387 
2388 	rval = qla26xx_dport_diagnostics(
2389 	    vha, dd->buf, sizeof(dd->buf), dd->options);
2390 	if (rval == QLA_SUCCESS) {
2391 		sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2392 		    bsg_job->reply_payload.sg_cnt, dd, sizeof(*dd));
2393 	}
2394 
2395 	bsg_reply->reply_payload_rcv_len = sizeof(*dd);
2396 	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2397 	    rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK;
2398 
2399 	bsg_job->reply_len = sizeof(*bsg_reply);
2400 	bsg_reply->result = DID_OK << 16;
2401 	bsg_job_done(bsg_job, bsg_reply->result,
2402 		       bsg_reply->reply_payload_rcv_len);
2403 
2404 	kfree(dd);
2405 
2406 	return 0;
2407 }
2408 
2409 static int
2410 qla2x00_get_flash_image_status(struct bsg_job *bsg_job)
2411 {
2412 	scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
2413 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2414 	struct qla_hw_data *ha = vha->hw;
2415 	struct qla_active_regions regions = { };
2416 	struct active_regions active_regions = { };
2417 
2418 	qla27xx_get_active_image(vha, &active_regions);
2419 	regions.global_image = active_regions.global;
2420 
2421 	if (IS_QLA28XX(ha)) {
2422 		qla28xx_get_aux_images(vha, &active_regions);
2423 		regions.board_config = active_regions.aux.board_config;
2424 		regions.vpd_nvram = active_regions.aux.vpd_nvram;
2425 		regions.npiv_config_0_1 = active_regions.aux.npiv_config_0_1;
2426 		regions.npiv_config_2_3 = active_regions.aux.npiv_config_2_3;
2427 	}
2428 
2429 	ql_dbg(ql_dbg_user, vha, 0x70e1,
2430 	    "%s(%lu): FW=%u BCFG=%u VPDNVR=%u NPIV01=%u NPIV02=%u\n",
2431 	    __func__, vha->host_no, regions.global_image,
2432 	    regions.board_config, regions.vpd_nvram,
2433 	    regions.npiv_config_0_1, regions.npiv_config_2_3);
2434 
2435 	sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2436 	    bsg_job->reply_payload.sg_cnt, &regions, sizeof(regions));
2437 
2438 	bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
2439 	bsg_reply->reply_payload_rcv_len = sizeof(regions);
2440 	bsg_reply->result = DID_OK << 16;
2441 	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2442 	bsg_job_done(bsg_job, bsg_reply->result,
2443 	    bsg_reply->reply_payload_rcv_len);
2444 
2445 	return 0;
2446 }
2447 
2448 static int
2449 qla2x00_process_vendor_specific(struct bsg_job *bsg_job)
2450 {
2451 	struct fc_bsg_request *bsg_request = bsg_job->request;
2452 
2453 	switch (bsg_request->rqst_data.h_vendor.vendor_cmd[0]) {
2454 	case QL_VND_LOOPBACK:
2455 		return qla2x00_process_loopback(bsg_job);
2456 
2457 	case QL_VND_A84_RESET:
2458 		return qla84xx_reset(bsg_job);
2459 
2460 	case QL_VND_A84_UPDATE_FW:
2461 		return qla84xx_updatefw(bsg_job);
2462 
2463 	case QL_VND_A84_MGMT_CMD:
2464 		return qla84xx_mgmt_cmd(bsg_job);
2465 
2466 	case QL_VND_IIDMA:
2467 		return qla24xx_iidma(bsg_job);
2468 
2469 	case QL_VND_FCP_PRIO_CFG_CMD:
2470 		return qla24xx_proc_fcp_prio_cfg_cmd(bsg_job);
2471 
2472 	case QL_VND_READ_FLASH:
2473 		return qla2x00_read_optrom(bsg_job);
2474 
2475 	case QL_VND_UPDATE_FLASH:
2476 		return qla2x00_update_optrom(bsg_job);
2477 
2478 	case QL_VND_SET_FRU_VERSION:
2479 		return qla2x00_update_fru_versions(bsg_job);
2480 
2481 	case QL_VND_READ_FRU_STATUS:
2482 		return qla2x00_read_fru_status(bsg_job);
2483 
2484 	case QL_VND_WRITE_FRU_STATUS:
2485 		return qla2x00_write_fru_status(bsg_job);
2486 
2487 	case QL_VND_WRITE_I2C:
2488 		return qla2x00_write_i2c(bsg_job);
2489 
2490 	case QL_VND_READ_I2C:
2491 		return qla2x00_read_i2c(bsg_job);
2492 
2493 	case QL_VND_DIAG_IO_CMD:
2494 		return qla24xx_process_bidir_cmd(bsg_job);
2495 
2496 	case QL_VND_FX00_MGMT_CMD:
2497 		return qlafx00_mgmt_cmd(bsg_job);
2498 
2499 	case QL_VND_SERDES_OP:
2500 		return qla26xx_serdes_op(bsg_job);
2501 
2502 	case QL_VND_SERDES_OP_EX:
2503 		return qla8044_serdes_op(bsg_job);
2504 
2505 	case QL_VND_GET_FLASH_UPDATE_CAPS:
2506 		return qla27xx_get_flash_upd_cap(bsg_job);
2507 
2508 	case QL_VND_SET_FLASH_UPDATE_CAPS:
2509 		return qla27xx_set_flash_upd_cap(bsg_job);
2510 
2511 	case QL_VND_GET_BBCR_DATA:
2512 		return qla27xx_get_bbcr_data(bsg_job);
2513 
2514 	case QL_VND_GET_PRIV_STATS:
2515 	case QL_VND_GET_PRIV_STATS_EX:
2516 		return qla2x00_get_priv_stats(bsg_job);
2517 
2518 	case QL_VND_DPORT_DIAGNOSTICS:
2519 		return qla2x00_do_dport_diagnostics(bsg_job);
2520 
2521 	case QL_VND_SS_GET_FLASH_IMAGE_STATUS:
2522 		return qla2x00_get_flash_image_status(bsg_job);
2523 
2524 	default:
2525 		return -ENOSYS;
2526 	}
2527 }
2528 
2529 int
2530 qla24xx_bsg_request(struct bsg_job *bsg_job)
2531 {
2532 	struct fc_bsg_request *bsg_request = bsg_job->request;
2533 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2534 	int ret = -EINVAL;
2535 	struct fc_rport *rport;
2536 	struct Scsi_Host *host;
2537 	scsi_qla_host_t *vha;
2538 
2539 	/* In case no data transferred. */
2540 	bsg_reply->reply_payload_rcv_len = 0;
2541 
2542 	if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
2543 		rport = fc_bsg_to_rport(bsg_job);
2544 		host = rport_to_shost(rport);
2545 		vha = shost_priv(host);
2546 	} else {
2547 		host = fc_bsg_to_shost(bsg_job);
2548 		vha = shost_priv(host);
2549 	}
2550 
2551 	if (qla2x00_chip_is_down(vha)) {
2552 		ql_dbg(ql_dbg_user, vha, 0x709f,
2553 		    "BSG: ISP abort active/needed -- cmd=%d.\n",
2554 		    bsg_request->msgcode);
2555 		return -EBUSY;
2556 	}
2557 
2558 	ql_dbg(ql_dbg_user, vha, 0x7000,
2559 	    "Entered %s msgcode=0x%x.\n", __func__, bsg_request->msgcode);
2560 
2561 	switch (bsg_request->msgcode) {
2562 	case FC_BSG_RPT_ELS:
2563 	case FC_BSG_HST_ELS_NOLOGIN:
2564 		ret = qla2x00_process_els(bsg_job);
2565 		break;
2566 	case FC_BSG_HST_CT:
2567 		ret = qla2x00_process_ct(bsg_job);
2568 		break;
2569 	case FC_BSG_HST_VENDOR:
2570 		ret = qla2x00_process_vendor_specific(bsg_job);
2571 		break;
2572 	case FC_BSG_HST_ADD_RPORT:
2573 	case FC_BSG_HST_DEL_RPORT:
2574 	case FC_BSG_RPT_CT:
2575 	default:
2576 		ql_log(ql_log_warn, vha, 0x705a, "Unsupported BSG request.\n");
2577 		break;
2578 	}
2579 	return ret;
2580 }
2581 
2582 int
2583 qla24xx_bsg_timeout(struct bsg_job *bsg_job)
2584 {
2585 	struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2586 	scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
2587 	struct qla_hw_data *ha = vha->hw;
2588 	srb_t *sp;
2589 	int cnt, que;
2590 	unsigned long flags;
2591 	struct req_que *req;
2592 
2593 	/* find the bsg job from the active list of commands */
2594 	spin_lock_irqsave(&ha->hardware_lock, flags);
2595 	for (que = 0; que < ha->max_req_queues; que++) {
2596 		req = ha->req_q_map[que];
2597 		if (!req)
2598 			continue;
2599 
2600 		for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
2601 			sp = req->outstanding_cmds[cnt];
2602 			if (sp) {
2603 				if (((sp->type == SRB_CT_CMD) ||
2604 					(sp->type == SRB_ELS_CMD_HST) ||
2605 					(sp->type == SRB_FXIOCB_BCMD))
2606 					&& (sp->u.bsg_job == bsg_job)) {
2607 					req->outstanding_cmds[cnt] = NULL;
2608 					spin_unlock_irqrestore(&ha->hardware_lock, flags);
2609 					if (ha->isp_ops->abort_command(sp)) {
2610 						ql_log(ql_log_warn, vha, 0x7089,
2611 						    "mbx abort_command "
2612 						    "failed.\n");
2613 						bsg_reply->result = -EIO;
2614 					} else {
2615 						ql_dbg(ql_dbg_user, vha, 0x708a,
2616 						    "mbx abort_command "
2617 						    "success.\n");
2618 						bsg_reply->result = 0;
2619 					}
2620 					spin_lock_irqsave(&ha->hardware_lock, flags);
2621 					goto done;
2622 				}
2623 			}
2624 		}
2625 	}
2626 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
2627 	ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n");
2628 	bsg_reply->result = -ENXIO;
2629 	return 0;
2630 
2631 done:
2632 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
2633 	sp->free(sp);
2634 	return 0;
2635 }
2636