xref: /openbmc/linux/drivers/scsi/qla2xxx/qla_bsg.c (revision 4800cd83)
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2010 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8 
9 #include <linux/kthread.h>
10 #include <linux/vmalloc.h>
11 #include <linux/delay.h>
12 
13 /* BSG support for ELS/CT pass through */
14 inline srb_t *
15 qla2x00_get_ctx_bsg_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size)
16 {
17 	srb_t *sp;
18 	struct qla_hw_data *ha = vha->hw;
19 	struct srb_ctx *ctx;
20 
21 	sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL);
22 	if (!sp)
23 		goto done;
24 	ctx = kzalloc(size, GFP_KERNEL);
25 	if (!ctx) {
26 		mempool_free(sp, ha->srb_mempool);
27 		sp = NULL;
28 		goto done;
29 	}
30 
31 	memset(sp, 0, sizeof(*sp));
32 	sp->fcport = fcport;
33 	sp->ctx = ctx;
34 done:
35 	return sp;
36 }
37 
38 int
39 qla24xx_fcp_prio_cfg_valid(struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
40 {
41 	int i, ret, num_valid;
42 	uint8_t *bcode;
43 	struct qla_fcp_prio_entry *pri_entry;
44 	uint32_t *bcode_val_ptr, bcode_val;
45 
46 	ret = 1;
47 	num_valid = 0;
48 	bcode = (uint8_t *)pri_cfg;
49 	bcode_val_ptr = (uint32_t *)pri_cfg;
50 	bcode_val = (uint32_t)(*bcode_val_ptr);
51 
52 	if (bcode_val == 0xFFFFFFFF) {
53 		/* No FCP Priority config data in flash */
54 		DEBUG2(printk(KERN_INFO
55 		    "%s: No FCP priority config data.\n",
56 		    __func__));
57 		return 0;
58 	}
59 
60 	if (bcode[0] != 'H' || bcode[1] != 'Q' || bcode[2] != 'O' ||
61 			bcode[3] != 'S') {
62 		/* Invalid FCP priority data header*/
63 		DEBUG2(printk(KERN_ERR
64 		    "%s: Invalid FCP Priority data header. bcode=0x%x\n",
65 		    __func__, bcode_val));
66 		return 0;
67 	}
68 	if (flag != 1)
69 		return ret;
70 
71 	pri_entry = &pri_cfg->entry[0];
72 	for (i = 0; i < pri_cfg->num_entries; i++) {
73 		if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
74 			num_valid++;
75 		pri_entry++;
76 	}
77 
78 	if (num_valid == 0) {
79 		/* No valid FCP priority data entries */
80 		DEBUG2(printk(KERN_ERR
81 		    "%s: No valid FCP Priority data entries.\n",
82 		    __func__));
83 		ret = 0;
84 	} else {
85 		/* FCP priority data is valid */
86 		DEBUG2(printk(KERN_INFO
87 		    "%s: Valid FCP priority data. num entries = %d\n",
88 		    __func__, num_valid));
89 	}
90 
91 	return ret;
92 }
93 
94 static int
95 qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
96 {
97 	struct Scsi_Host *host = bsg_job->shost;
98 	scsi_qla_host_t *vha = shost_priv(host);
99 	struct qla_hw_data *ha = vha->hw;
100 	int ret = 0;
101 	uint32_t len;
102 	uint32_t oper;
103 
104 	bsg_job->reply->reply_payload_rcv_len = 0;
105 
106 	if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))) {
107 		ret = -EINVAL;
108 		goto exit_fcp_prio_cfg;
109 	}
110 
111 	if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
112 		test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
113 		test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
114 		ret = -EBUSY;
115 		goto exit_fcp_prio_cfg;
116 	}
117 
118 	/* Get the sub command */
119 	oper = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
120 
121 	/* Only set config is allowed if config memory is not allocated */
122 	if (!ha->fcp_prio_cfg && (oper != QLFC_FCP_PRIO_SET_CONFIG)) {
123 		ret = -EINVAL;
124 		goto exit_fcp_prio_cfg;
125 	}
126 	switch (oper) {
127 	case QLFC_FCP_PRIO_DISABLE:
128 		if (ha->flags.fcp_prio_enabled) {
129 			ha->flags.fcp_prio_enabled = 0;
130 			ha->fcp_prio_cfg->attributes &=
131 				~FCP_PRIO_ATTR_ENABLE;
132 			qla24xx_update_all_fcp_prio(vha);
133 			bsg_job->reply->result = DID_OK;
134 		} else {
135 			ret = -EINVAL;
136 			bsg_job->reply->result = (DID_ERROR << 16);
137 			goto exit_fcp_prio_cfg;
138 		}
139 		break;
140 
141 	case QLFC_FCP_PRIO_ENABLE:
142 		if (!ha->flags.fcp_prio_enabled) {
143 			if (ha->fcp_prio_cfg) {
144 				ha->flags.fcp_prio_enabled = 1;
145 				ha->fcp_prio_cfg->attributes |=
146 				    FCP_PRIO_ATTR_ENABLE;
147 				qla24xx_update_all_fcp_prio(vha);
148 				bsg_job->reply->result = DID_OK;
149 			} else {
150 				ret = -EINVAL;
151 				bsg_job->reply->result = (DID_ERROR << 16);
152 				goto exit_fcp_prio_cfg;
153 			}
154 		}
155 		break;
156 
157 	case QLFC_FCP_PRIO_GET_CONFIG:
158 		len = bsg_job->reply_payload.payload_len;
159 		if (!len || len > FCP_PRIO_CFG_SIZE) {
160 			ret = -EINVAL;
161 			bsg_job->reply->result = (DID_ERROR << 16);
162 			goto exit_fcp_prio_cfg;
163 		}
164 
165 		bsg_job->reply->result = DID_OK;
166 		bsg_job->reply->reply_payload_rcv_len =
167 			sg_copy_from_buffer(
168 			bsg_job->reply_payload.sg_list,
169 			bsg_job->reply_payload.sg_cnt, ha->fcp_prio_cfg,
170 			len);
171 
172 		break;
173 
174 	case QLFC_FCP_PRIO_SET_CONFIG:
175 		len = bsg_job->request_payload.payload_len;
176 		if (!len || len > FCP_PRIO_CFG_SIZE) {
177 			bsg_job->reply->result = (DID_ERROR << 16);
178 			ret = -EINVAL;
179 			goto exit_fcp_prio_cfg;
180 		}
181 
182 		if (!ha->fcp_prio_cfg) {
183 			ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE);
184 			if (!ha->fcp_prio_cfg) {
185 				qla_printk(KERN_WARNING, ha,
186 					"Unable to allocate memory "
187 					"for fcp prio config data (%x).\n",
188 					FCP_PRIO_CFG_SIZE);
189 				bsg_job->reply->result = (DID_ERROR << 16);
190 				ret = -ENOMEM;
191 				goto exit_fcp_prio_cfg;
192 			}
193 		}
194 
195 		memset(ha->fcp_prio_cfg, 0, FCP_PRIO_CFG_SIZE);
196 		sg_copy_to_buffer(bsg_job->request_payload.sg_list,
197 		bsg_job->request_payload.sg_cnt, ha->fcp_prio_cfg,
198 			FCP_PRIO_CFG_SIZE);
199 
200 		/* validate fcp priority data */
201 		if (!qla24xx_fcp_prio_cfg_valid(
202 			(struct qla_fcp_prio_cfg *)
203 			ha->fcp_prio_cfg, 1)) {
204 			bsg_job->reply->result = (DID_ERROR << 16);
205 			ret = -EINVAL;
206 			/* If buffer was invalidatic int
207 			 * fcp_prio_cfg is of no use
208 			 */
209 			vfree(ha->fcp_prio_cfg);
210 			ha->fcp_prio_cfg = NULL;
211 			goto exit_fcp_prio_cfg;
212 		}
213 
214 		ha->flags.fcp_prio_enabled = 0;
215 		if (ha->fcp_prio_cfg->attributes & FCP_PRIO_ATTR_ENABLE)
216 			ha->flags.fcp_prio_enabled = 1;
217 		qla24xx_update_all_fcp_prio(vha);
218 		bsg_job->reply->result = DID_OK;
219 		break;
220 	default:
221 		ret = -EINVAL;
222 		break;
223 	}
224 exit_fcp_prio_cfg:
225 	bsg_job->job_done(bsg_job);
226 	return ret;
227 }
228 static int
229 qla2x00_process_els(struct fc_bsg_job *bsg_job)
230 {
231 	struct fc_rport *rport;
232 	fc_port_t *fcport = NULL;
233 	struct Scsi_Host *host;
234 	scsi_qla_host_t *vha;
235 	struct qla_hw_data *ha;
236 	srb_t *sp;
237 	const char *type;
238 	int req_sg_cnt, rsp_sg_cnt;
239 	int rval =  (DRIVER_ERROR << 16);
240 	uint16_t nextlid = 0;
241 	struct srb_ctx *els;
242 
243 	if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
244 		rport = bsg_job->rport;
245 		fcport = *(fc_port_t **) rport->dd_data;
246 		host = rport_to_shost(rport);
247 		vha = shost_priv(host);
248 		ha = vha->hw;
249 		type = "FC_BSG_RPT_ELS";
250 	} else {
251 		host = bsg_job->shost;
252 		vha = shost_priv(host);
253 		ha = vha->hw;
254 		type = "FC_BSG_HST_ELS_NOLOGIN";
255 	}
256 
257 	/* pass through is supported only for ISP 4Gb or higher */
258 	if (!IS_FWI2_CAPABLE(ha)) {
259 		DEBUG2(qla_printk(KERN_INFO, ha,
260 		    "scsi(%ld):ELS passthru not supported for ISP23xx based "
261 		    "adapters\n", vha->host_no));
262 		rval = -EPERM;
263 		goto done;
264 	}
265 
266 	/*  Multiple SG's are not supported for ELS requests */
267 	if (bsg_job->request_payload.sg_cnt > 1 ||
268 		bsg_job->reply_payload.sg_cnt > 1) {
269 		DEBUG2(printk(KERN_INFO
270 			"multiple SG's are not supported for ELS requests"
271 			" [request_sg_cnt: %x reply_sg_cnt: %x]\n",
272 			bsg_job->request_payload.sg_cnt,
273 			bsg_job->reply_payload.sg_cnt));
274 		rval = -EPERM;
275 		goto done;
276 	}
277 
278 	/* ELS request for rport */
279 	if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
280 		/* make sure the rport is logged in,
281 		 * if not perform fabric login
282 		 */
283 		if (qla2x00_fabric_login(vha, fcport, &nextlid)) {
284 			DEBUG2(qla_printk(KERN_WARNING, ha,
285 			"failed to login port %06X for ELS passthru\n",
286 			fcport->d_id.b24));
287 			rval = -EIO;
288 			goto done;
289 		}
290 	} else {
291 		/* Allocate a dummy fcport structure, since functions
292 		 * preparing the IOCB and mailbox command retrieves port
293 		 * specific information from fcport structure. For Host based
294 		 * ELS commands there will be no fcport structure allocated
295 		 */
296 		fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
297 		if (!fcport) {
298 			rval = -ENOMEM;
299 			goto done;
300 		}
301 
302 		/* Initialize all required  fields of fcport */
303 		fcport->vha = vha;
304 		fcport->vp_idx = vha->vp_idx;
305 		fcport->d_id.b.al_pa =
306 			bsg_job->request->rqst_data.h_els.port_id[0];
307 		fcport->d_id.b.area =
308 			bsg_job->request->rqst_data.h_els.port_id[1];
309 		fcport->d_id.b.domain =
310 			bsg_job->request->rqst_data.h_els.port_id[2];
311 		fcport->loop_id =
312 			(fcport->d_id.b.al_pa == 0xFD) ?
313 			NPH_FABRIC_CONTROLLER : NPH_F_PORT;
314 	}
315 
316 	if (!vha->flags.online) {
317 		DEBUG2(qla_printk(KERN_WARNING, ha,
318 		"host not online\n"));
319 		rval = -EIO;
320 		goto done;
321 	}
322 
323 	req_sg_cnt =
324 		dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
325 		bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
326 	if (!req_sg_cnt) {
327 		rval = -ENOMEM;
328 		goto done_free_fcport;
329 	}
330 
331 	rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
332 		bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
333         if (!rsp_sg_cnt) {
334 		rval = -ENOMEM;
335 		goto done_free_fcport;
336 	}
337 
338 	if ((req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
339 		(rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
340 		DEBUG2(printk(KERN_INFO
341 			"dma mapping resulted in different sg counts \
342 			[request_sg_cnt: %x dma_request_sg_cnt: %x\
343 			reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
344 			bsg_job->request_payload.sg_cnt, req_sg_cnt,
345 			bsg_job->reply_payload.sg_cnt, rsp_sg_cnt));
346 		rval = -EAGAIN;
347 		goto done_unmap_sg;
348 	}
349 
350 	/* Alloc SRB structure */
351 	sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_ctx));
352 	if (!sp) {
353 		rval = -ENOMEM;
354 		goto done_unmap_sg;
355 	}
356 
357 	els = sp->ctx;
358 	els->type =
359 		(bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
360 		SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
361 	els->name =
362 		(bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
363 		"bsg_els_rpt" : "bsg_els_hst");
364 	els->u.bsg_job = bsg_job;
365 
366 	DEBUG2(qla_printk(KERN_INFO, ha,
367 		"scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x "
368 		"portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type,
369 		bsg_job->request->rqst_data.h_els.command_code,
370 		fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
371 		fcport->d_id.b.al_pa));
372 
373 	rval = qla2x00_start_sp(sp);
374 	if (rval != QLA_SUCCESS) {
375 		kfree(sp->ctx);
376 		mempool_free(sp, ha->srb_mempool);
377 		rval = -EIO;
378 		goto done_unmap_sg;
379 	}
380 	return rval;
381 
382 done_unmap_sg:
383 	dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
384 		bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
385 	dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
386 		bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
387 	goto done_free_fcport;
388 
389 done_free_fcport:
390 	if (bsg_job->request->msgcode == FC_BSG_HST_ELS_NOLOGIN)
391 		kfree(fcport);
392 done:
393 	return rval;
394 }
395 
396 static int
397 qla2x00_process_ct(struct fc_bsg_job *bsg_job)
398 {
399 	srb_t *sp;
400 	struct Scsi_Host *host = bsg_job->shost;
401 	scsi_qla_host_t *vha = shost_priv(host);
402 	struct qla_hw_data *ha = vha->hw;
403 	int rval = (DRIVER_ERROR << 16);
404 	int req_sg_cnt, rsp_sg_cnt;
405 	uint16_t loop_id;
406 	struct fc_port *fcport;
407 	char  *type = "FC_BSG_HST_CT";
408 	struct srb_ctx *ct;
409 
410 	req_sg_cnt =
411 		dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
412 			bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
413 	if (!req_sg_cnt) {
414 		rval = -ENOMEM;
415 		goto done;
416 	}
417 
418 	rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
419 		bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
420 	if (!rsp_sg_cnt) {
421 		rval = -ENOMEM;
422 		goto done;
423 	}
424 
425 	if ((req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
426 	    (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
427 		DEBUG2(qla_printk(KERN_WARNING, ha,
428 		    "[request_sg_cnt: %x dma_request_sg_cnt: %x\
429 		    reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
430 		    bsg_job->request_payload.sg_cnt, req_sg_cnt,
431 		    bsg_job->reply_payload.sg_cnt, rsp_sg_cnt));
432 		rval = -EAGAIN;
433 		goto done_unmap_sg;
434 	}
435 
436 	if (!vha->flags.online) {
437 		DEBUG2(qla_printk(KERN_WARNING, ha,
438 			"host not online\n"));
439 		rval = -EIO;
440 		goto done_unmap_sg;
441 	}
442 
443 	loop_id =
444 		(bsg_job->request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
445 			>> 24;
446 	switch (loop_id) {
447 	case 0xFC:
448 		loop_id = cpu_to_le16(NPH_SNS);
449 		break;
450 	case 0xFA:
451 		loop_id = vha->mgmt_svr_loop_id;
452 		break;
453 	default:
454 		DEBUG2(qla_printk(KERN_INFO, ha,
455 		    "Unknown loop id: %x\n", loop_id));
456 		rval = -EINVAL;
457 		goto done_unmap_sg;
458 	}
459 
460 	/* Allocate a dummy fcport structure, since functions preparing the
461 	 * IOCB and mailbox command retrieves port specific information
462 	 * from fcport structure. For Host based ELS commands there will be
463 	 * no fcport structure allocated
464 	 */
465 	fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
466 	if (!fcport) {
467 		rval = -ENOMEM;
468 		goto done_unmap_sg;
469 	}
470 
471 	/* Initialize all required  fields of fcport */
472 	fcport->vha = vha;
473 	fcport->vp_idx = vha->vp_idx;
474 	fcport->d_id.b.al_pa = bsg_job->request->rqst_data.h_ct.port_id[0];
475 	fcport->d_id.b.area = bsg_job->request->rqst_data.h_ct.port_id[1];
476 	fcport->d_id.b.domain = bsg_job->request->rqst_data.h_ct.port_id[2];
477 	fcport->loop_id = loop_id;
478 
479 	/* Alloc SRB structure */
480 	sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_ctx));
481 	if (!sp) {
482 		rval = -ENOMEM;
483 		goto done_free_fcport;
484 	}
485 
486 	ct = sp->ctx;
487 	ct->type = SRB_CT_CMD;
488 	ct->name = "bsg_ct";
489 	ct->u.bsg_job = bsg_job;
490 
491 	DEBUG2(qla_printk(KERN_INFO, ha,
492 		"scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x "
493 		"portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type,
494 		(bsg_job->request->rqst_data.h_ct.preamble_word2 >> 16),
495 		fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
496 		fcport->d_id.b.al_pa));
497 
498 	rval = qla2x00_start_sp(sp);
499 	if (rval != QLA_SUCCESS) {
500 		kfree(sp->ctx);
501 		mempool_free(sp, ha->srb_mempool);
502 		rval = -EIO;
503 		goto done_free_fcport;
504 	}
505 	return rval;
506 
507 done_free_fcport:
508 	kfree(fcport);
509 done_unmap_sg:
510 	dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
511 		bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
512 	dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
513 		bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
514 done:
515 	return rval;
516 }
517 
518 /* Set the port configuration to enable the
519  * internal loopback on ISP81XX
520  */
521 static inline int
522 qla81xx_set_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
523     uint16_t *new_config)
524 {
525 	int ret = 0;
526 	int rval = 0;
527 	struct qla_hw_data *ha = vha->hw;
528 
529 	if (!IS_QLA81XX(ha))
530 		goto done_set_internal;
531 
532 	new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1);
533 	memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ;
534 
535 	ha->notify_dcbx_comp = 1;
536 	ret = qla81xx_set_port_config(vha, new_config);
537 	if (ret != QLA_SUCCESS) {
538 		DEBUG2(printk(KERN_ERR
539 		    "%s(%lu): Set port config failed\n",
540 		    __func__, vha->host_no));
541 		ha->notify_dcbx_comp = 0;
542 		rval = -EINVAL;
543 		goto done_set_internal;
544 	}
545 
546 	/* Wait for DCBX complete event */
547 	if (!wait_for_completion_timeout(&ha->dcbx_comp, (20 * HZ))) {
548 		DEBUG2(qla_printk(KERN_WARNING, ha,
549 		    "State change notificaition not received.\n"));
550 	} else
551 		DEBUG2(qla_printk(KERN_INFO, ha,
552 		    "State change RECEIVED\n"));
553 
554 	ha->notify_dcbx_comp = 0;
555 
556 done_set_internal:
557 	return rval;
558 }
559 
560 /* Set the port configuration to disable the
561  * internal loopback on ISP81XX
562  */
563 static inline int
564 qla81xx_reset_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
565     int wait)
566 {
567 	int ret = 0;
568 	int rval = 0;
569 	uint16_t new_config[4];
570 	struct qla_hw_data *ha = vha->hw;
571 
572 	if (!IS_QLA81XX(ha))
573 		goto done_reset_internal;
574 
575 	memset(new_config, 0 , sizeof(new_config));
576 	if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
577 			ENABLE_INTERNAL_LOOPBACK) {
578 		new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK;
579 		memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ;
580 
581 		ha->notify_dcbx_comp = wait;
582 		ret = qla81xx_set_port_config(vha, new_config);
583 		if (ret != QLA_SUCCESS) {
584 			DEBUG2(printk(KERN_ERR
585 			    "%s(%lu): Set port config failed\n",
586 			     __func__, vha->host_no));
587 			ha->notify_dcbx_comp = 0;
588 			rval = -EINVAL;
589 			goto done_reset_internal;
590 		}
591 
592 		/* Wait for DCBX complete event */
593 		if (wait && !wait_for_completion_timeout(&ha->dcbx_comp,
594 			(20 * HZ))) {
595 			DEBUG2(qla_printk(KERN_WARNING, ha,
596 			    "State change notificaition not received.\n"));
597 			ha->notify_dcbx_comp = 0;
598 			rval = -EINVAL;
599 			goto done_reset_internal;
600 		} else
601 			DEBUG2(qla_printk(KERN_INFO, ha,
602 			    "State change RECEIVED\n"));
603 
604 		ha->notify_dcbx_comp = 0;
605 	}
606 done_reset_internal:
607 	return rval;
608 }
609 
610 static int
611 qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
612 {
613 	struct Scsi_Host *host = bsg_job->shost;
614 	scsi_qla_host_t *vha = shost_priv(host);
615 	struct qla_hw_data *ha = vha->hw;
616 	int rval;
617 	uint8_t command_sent;
618 	char *type;
619 	struct msg_echo_lb elreq;
620 	uint16_t response[MAILBOX_REGISTER_COUNT];
621 	uint16_t config[4], new_config[4];
622 	uint8_t *fw_sts_ptr;
623 	uint8_t *req_data = NULL;
624 	dma_addr_t req_data_dma;
625 	uint32_t req_data_len;
626 	uint8_t *rsp_data = NULL;
627 	dma_addr_t rsp_data_dma;
628 	uint32_t rsp_data_len;
629 
630 	if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
631 		test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
632 		test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))
633 		return -EBUSY;
634 
635 	if (!vha->flags.online) {
636 		DEBUG2(qla_printk(KERN_WARNING, ha, "host not online\n"));
637 		return -EIO;
638 	}
639 
640 	elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev,
641 		bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt,
642 		DMA_TO_DEVICE);
643 
644 	if (!elreq.req_sg_cnt)
645 		return -ENOMEM;
646 
647 	elreq.rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
648 		bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
649 		DMA_FROM_DEVICE);
650 
651 	if (!elreq.rsp_sg_cnt) {
652 		rval = -ENOMEM;
653 		goto done_unmap_req_sg;
654 	}
655 
656 	if ((elreq.req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
657 		(elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
658 		DEBUG2(printk(KERN_INFO
659 			"dma mapping resulted in different sg counts "
660 			"[request_sg_cnt: %x dma_request_sg_cnt: %x "
661 			"reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
662 			bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
663 			bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt));
664 		rval = -EAGAIN;
665 		goto done_unmap_sg;
666 	}
667 	req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
668 	req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
669 		&req_data_dma, GFP_KERNEL);
670 	if (!req_data) {
671 		DEBUG2(printk(KERN_ERR "%s: dma alloc for req_data "
672 			"failed for host=%lu\n", __func__, vha->host_no));
673 		rval = -ENOMEM;
674 		goto done_unmap_sg;
675 	}
676 
677 	rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
678 		&rsp_data_dma, GFP_KERNEL);
679 	if (!rsp_data) {
680 		DEBUG2(printk(KERN_ERR "%s: dma alloc for rsp_data "
681 			"failed for host=%lu\n", __func__, vha->host_no));
682 		rval = -ENOMEM;
683 		goto done_free_dma_req;
684 	}
685 
686 	/* Copy the request buffer in req_data now */
687 	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
688 		bsg_job->request_payload.sg_cnt, req_data, req_data_len);
689 
690 	elreq.send_dma = req_data_dma;
691 	elreq.rcv_dma = rsp_data_dma;
692 	elreq.transfer_size = req_data_len;
693 
694 	elreq.options = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
695 
696 	if ((ha->current_topology == ISP_CFG_F ||
697 	    (IS_QLA81XX(ha) &&
698 	    le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE
699 	    && req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
700 		elreq.options == EXTERNAL_LOOPBACK) {
701 		type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
702 		DEBUG2(qla_printk(KERN_INFO, ha,
703 			"scsi(%ld) bsg rqst type: %s\n", vha->host_no, type));
704 		command_sent = INT_DEF_LB_ECHO_CMD;
705 		rval = qla2x00_echo_test(vha, &elreq, response);
706 	} else {
707 		if (IS_QLA81XX(ha)) {
708 			memset(config, 0, sizeof(config));
709 			memset(new_config, 0, sizeof(new_config));
710 			if (qla81xx_get_port_config(vha, config)) {
711 				DEBUG2(printk(KERN_ERR
712 					"%s(%lu): Get port config failed\n",
713 					__func__, vha->host_no));
714 				bsg_job->reply->reply_payload_rcv_len = 0;
715 				bsg_job->reply->result = (DID_ERROR << 16);
716 				rval = -EPERM;
717 				goto done_free_dma_req;
718 			}
719 
720 			if (elreq.options != EXTERNAL_LOOPBACK) {
721 				DEBUG2(qla_printk(KERN_INFO, ha,
722 					"Internal: current port config = %x\n",
723 					config[0]));
724 				if (qla81xx_set_internal_loopback(vha, config,
725 					new_config)) {
726 					bsg_job->reply->reply_payload_rcv_len =
727 						0;
728 					bsg_job->reply->result =
729 						(DID_ERROR << 16);
730 					rval = -EPERM;
731 					goto done_free_dma_req;
732 				}
733 			} else {
734 				/* For external loopback to work
735 				 * ensure internal loopback is disabled
736 				 */
737 				if (qla81xx_reset_internal_loopback(vha,
738 					config, 1)) {
739 					bsg_job->reply->reply_payload_rcv_len =
740 						0;
741 					bsg_job->reply->result =
742 						(DID_ERROR << 16);
743 					rval = -EPERM;
744 					goto done_free_dma_req;
745 				}
746 			}
747 
748 			type = "FC_BSG_HST_VENDOR_LOOPBACK";
749 			DEBUG2(qla_printk(KERN_INFO, ha,
750 				"scsi(%ld) bsg rqst type: %s\n",
751 				vha->host_no, type));
752 
753 			command_sent = INT_DEF_LB_LOOPBACK_CMD;
754 			rval = qla2x00_loopback_test(vha, &elreq, response);
755 
756 			if (new_config[0]) {
757 				/* Revert back to original port config
758 				 * Also clear internal loopback
759 				 */
760 				qla81xx_reset_internal_loopback(vha,
761 				    new_config, 0);
762 			}
763 
764 			if (response[0] == MBS_COMMAND_ERROR &&
765 					response[1] == MBS_LB_RESET) {
766 				DEBUG2(printk(KERN_ERR "%s(%ld): ABORTing "
767 					"ISP\n", __func__, vha->host_no));
768 				set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
769 				qla2xxx_wake_dpc(vha);
770 				qla2x00_wait_for_chip_reset(vha);
771 				/* Also reset the MPI */
772 				if (qla81xx_restart_mpi_firmware(vha) !=
773 				    QLA_SUCCESS) {
774 					qla_printk(KERN_INFO, ha,
775 					    "MPI reset failed for host%ld.\n",
776 					    vha->host_no);
777 				}
778 
779 				bsg_job->reply->reply_payload_rcv_len = 0;
780 				bsg_job->reply->result = (DID_ERROR << 16);
781 				rval = -EIO;
782 				goto done_free_dma_req;
783 			}
784 		} else {
785 			type = "FC_BSG_HST_VENDOR_LOOPBACK";
786 			DEBUG2(qla_printk(KERN_INFO, ha,
787 				"scsi(%ld) bsg rqst type: %s\n",
788 				vha->host_no, type));
789 			command_sent = INT_DEF_LB_LOOPBACK_CMD;
790 			rval = qla2x00_loopback_test(vha, &elreq, response);
791 		}
792 	}
793 
794 	if (rval) {
795 		DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
796 		    "request %s failed\n", vha->host_no, type));
797 
798 		fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
799 		    sizeof(struct fc_bsg_reply);
800 
801 		memcpy(fw_sts_ptr, response, sizeof(response));
802 		fw_sts_ptr += sizeof(response);
803 		*fw_sts_ptr = command_sent;
804 		rval = 0;
805 		bsg_job->reply->reply_payload_rcv_len = 0;
806 		bsg_job->reply->result = (DID_ERROR << 16);
807 	} else {
808 		DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
809 			"request %s completed\n", vha->host_no, type));
810 
811 		bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
812 			sizeof(response) + sizeof(uint8_t);
813 		bsg_job->reply->reply_payload_rcv_len =
814 			bsg_job->reply_payload.payload_len;
815 		fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
816 			sizeof(struct fc_bsg_reply);
817 		memcpy(fw_sts_ptr, response, sizeof(response));
818 		fw_sts_ptr += sizeof(response);
819 		*fw_sts_ptr = command_sent;
820 		bsg_job->reply->result = DID_OK;
821 		sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
822 			bsg_job->reply_payload.sg_cnt, rsp_data,
823 			rsp_data_len);
824 	}
825 	bsg_job->job_done(bsg_job);
826 
827 	dma_free_coherent(&ha->pdev->dev, rsp_data_len,
828 		rsp_data, rsp_data_dma);
829 done_free_dma_req:
830 	dma_free_coherent(&ha->pdev->dev, req_data_len,
831 		req_data, req_data_dma);
832 done_unmap_sg:
833 	dma_unmap_sg(&ha->pdev->dev,
834 	    bsg_job->reply_payload.sg_list,
835 	    bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
836 done_unmap_req_sg:
837 	dma_unmap_sg(&ha->pdev->dev,
838 	    bsg_job->request_payload.sg_list,
839 	    bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
840 	return rval;
841 }
842 
843 static int
844 qla84xx_reset(struct fc_bsg_job *bsg_job)
845 {
846 	struct Scsi_Host *host = bsg_job->shost;
847 	scsi_qla_host_t *vha = shost_priv(host);
848 	struct qla_hw_data *ha = vha->hw;
849 	int rval = 0;
850 	uint32_t flag;
851 
852 	if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
853 	    test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
854 	    test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))
855 		return -EBUSY;
856 
857 	if (!IS_QLA84XX(ha)) {
858 		DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld): Not 84xx, "
859 		   "exiting.\n", vha->host_no));
860 		return -EINVAL;
861 	}
862 
863 	flag = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
864 
865 	rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW);
866 
867 	if (rval) {
868 		DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
869 		    "request 84xx reset failed\n", vha->host_no));
870 		rval = bsg_job->reply->reply_payload_rcv_len = 0;
871 		bsg_job->reply->result = (DID_ERROR << 16);
872 
873 	} else {
874 		DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
875 		    "request 84xx reset completed\n", vha->host_no));
876 		bsg_job->reply->result = DID_OK;
877 	}
878 
879 	bsg_job->job_done(bsg_job);
880 	return rval;
881 }
882 
883 static int
884 qla84xx_updatefw(struct fc_bsg_job *bsg_job)
885 {
886 	struct Scsi_Host *host = bsg_job->shost;
887 	scsi_qla_host_t *vha = shost_priv(host);
888 	struct qla_hw_data *ha = vha->hw;
889 	struct verify_chip_entry_84xx *mn = NULL;
890 	dma_addr_t mn_dma, fw_dma;
891 	void *fw_buf = NULL;
892 	int rval = 0;
893 	uint32_t sg_cnt;
894 	uint32_t data_len;
895 	uint16_t options;
896 	uint32_t flag;
897 	uint32_t fw_ver;
898 
899 	if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
900 		test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
901 		test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))
902 		return -EBUSY;
903 
904 	if (!IS_QLA84XX(ha)) {
905 		DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld): Not 84xx, "
906 			"exiting.\n", vha->host_no));
907 		return -EINVAL;
908 	}
909 
910 	sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
911 		bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
912 	if (!sg_cnt)
913 		return -ENOMEM;
914 
915 	if (sg_cnt != bsg_job->request_payload.sg_cnt) {
916 		DEBUG2(printk(KERN_INFO
917 			"dma mapping resulted in different sg counts "
918 			"request_sg_cnt: %x dma_request_sg_cnt: %x ",
919 			bsg_job->request_payload.sg_cnt, sg_cnt));
920 		rval = -EAGAIN;
921 		goto done_unmap_sg;
922 	}
923 
924 	data_len = bsg_job->request_payload.payload_len;
925 	fw_buf = dma_alloc_coherent(&ha->pdev->dev, data_len,
926 		&fw_dma, GFP_KERNEL);
927 	if (!fw_buf) {
928 		DEBUG2(printk(KERN_ERR "%s: dma alloc for fw_buf "
929 			"failed for host=%lu\n", __func__, vha->host_no));
930 		rval = -ENOMEM;
931 		goto done_unmap_sg;
932 	}
933 
934 	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
935 		bsg_job->request_payload.sg_cnt, fw_buf, data_len);
936 
937 	mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
938 	if (!mn) {
939 		DEBUG2(printk(KERN_ERR "%s: dma alloc for fw buffer "
940 			"failed for host=%lu\n", __func__, vha->host_no));
941 		rval = -ENOMEM;
942 		goto done_free_fw_buf;
943 	}
944 
945 	flag = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
946 	fw_ver = le32_to_cpu(*((uint32_t *)((uint32_t *)fw_buf + 2)));
947 
948 	memset(mn, 0, sizeof(struct access_chip_84xx));
949 	mn->entry_type = VERIFY_CHIP_IOCB_TYPE;
950 	mn->entry_count = 1;
951 
952 	options = VCO_FORCE_UPDATE | VCO_END_OF_DATA;
953 	if (flag == A84_ISSUE_UPDATE_DIAGFW_CMD)
954 		options |= VCO_DIAG_FW;
955 
956 	mn->options = cpu_to_le16(options);
957 	mn->fw_ver =  cpu_to_le32(fw_ver);
958 	mn->fw_size =  cpu_to_le32(data_len);
959 	mn->fw_seq_size =  cpu_to_le32(data_len);
960 	mn->dseg_address[0] = cpu_to_le32(LSD(fw_dma));
961 	mn->dseg_address[1] = cpu_to_le32(MSD(fw_dma));
962 	mn->dseg_length = cpu_to_le32(data_len);
963 	mn->data_seg_cnt = cpu_to_le16(1);
964 
965 	rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
966 
967 	if (rval) {
968 		DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
969 			"request 84xx updatefw failed\n", vha->host_no));
970 
971 		rval = bsg_job->reply->reply_payload_rcv_len = 0;
972 		bsg_job->reply->result = (DID_ERROR << 16);
973 
974 	} else {
975 		DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
976 			"request 84xx updatefw completed\n", vha->host_no));
977 
978 		bsg_job->reply_len = sizeof(struct fc_bsg_reply);
979 		bsg_job->reply->result = DID_OK;
980 	}
981 
982 	bsg_job->job_done(bsg_job);
983 	dma_pool_free(ha->s_dma_pool, mn, mn_dma);
984 
985 done_free_fw_buf:
986 	dma_free_coherent(&ha->pdev->dev, data_len, fw_buf, fw_dma);
987 
988 done_unmap_sg:
989 	dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
990 		bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
991 
992 	return rval;
993 }
994 
995 static int
996 qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
997 {
998 	struct Scsi_Host *host = bsg_job->shost;
999 	scsi_qla_host_t *vha = shost_priv(host);
1000 	struct qla_hw_data *ha = vha->hw;
1001 	struct access_chip_84xx *mn = NULL;
1002 	dma_addr_t mn_dma, mgmt_dma;
1003 	void *mgmt_b = NULL;
1004 	int rval = 0;
1005 	struct qla_bsg_a84_mgmt *ql84_mgmt;
1006 	uint32_t sg_cnt;
1007 	uint32_t data_len = 0;
1008 	uint32_t dma_direction = DMA_NONE;
1009 
1010 	if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
1011 		test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
1012 		test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))
1013 		return -EBUSY;
1014 
1015 	if (!IS_QLA84XX(ha)) {
1016 		DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld): Not 84xx, "
1017 			"exiting.\n", vha->host_no));
1018 		return -EINVAL;
1019 	}
1020 
1021 	ql84_mgmt = (struct qla_bsg_a84_mgmt *)((char *)bsg_job->request +
1022 		sizeof(struct fc_bsg_request));
1023 	if (!ql84_mgmt) {
1024 		DEBUG2(printk("%s(%ld): mgmt header not provided, exiting.\n",
1025 			__func__, vha->host_no));
1026 		return -EINVAL;
1027 	}
1028 
1029 	mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
1030 	if (!mn) {
1031 		DEBUG2(printk(KERN_ERR "%s: dma alloc for fw buffer "
1032 			"failed for host=%lu\n", __func__, vha->host_no));
1033 		return -ENOMEM;
1034 	}
1035 
1036 	memset(mn, 0, sizeof(struct access_chip_84xx));
1037 	mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
1038 	mn->entry_count = 1;
1039 
1040 	switch (ql84_mgmt->mgmt.cmd) {
1041 	case QLA84_MGMT_READ_MEM:
1042 	case QLA84_MGMT_GET_INFO:
1043 		sg_cnt = dma_map_sg(&ha->pdev->dev,
1044 			bsg_job->reply_payload.sg_list,
1045 			bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1046 		if (!sg_cnt) {
1047 			rval = -ENOMEM;
1048 			goto exit_mgmt;
1049 		}
1050 
1051 		dma_direction = DMA_FROM_DEVICE;
1052 
1053 		if (sg_cnt != bsg_job->reply_payload.sg_cnt) {
1054 			DEBUG2(printk(KERN_INFO
1055 				"dma mapping resulted in different sg counts "
1056 				"reply_sg_cnt: %x dma_reply_sg_cnt: %x\n",
1057 				bsg_job->reply_payload.sg_cnt, sg_cnt));
1058 			rval = -EAGAIN;
1059 			goto done_unmap_sg;
1060 		}
1061 
1062 		data_len = bsg_job->reply_payload.payload_len;
1063 
1064 		mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1065 		    &mgmt_dma, GFP_KERNEL);
1066 		if (!mgmt_b) {
1067 			DEBUG2(printk(KERN_ERR "%s: dma alloc for mgmt_b "
1068 				"failed for host=%lu\n",
1069 				__func__, vha->host_no));
1070 			rval = -ENOMEM;
1071 			goto done_unmap_sg;
1072 		}
1073 
1074 		if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) {
1075 			mn->options = cpu_to_le16(ACO_DUMP_MEMORY);
1076 			mn->parameter1 =
1077 				cpu_to_le32(
1078 				ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
1079 
1080 		} else if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO) {
1081 			mn->options = cpu_to_le16(ACO_REQUEST_INFO);
1082 			mn->parameter1 =
1083 				cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.info.type);
1084 
1085 			mn->parameter2 =
1086 				cpu_to_le32(
1087 				ql84_mgmt->mgmt.mgmtp.u.info.context);
1088 		}
1089 		break;
1090 
1091 	case QLA84_MGMT_WRITE_MEM:
1092 		sg_cnt = dma_map_sg(&ha->pdev->dev,
1093 			bsg_job->request_payload.sg_list,
1094 			bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1095 
1096 		if (!sg_cnt) {
1097 			rval = -ENOMEM;
1098 			goto exit_mgmt;
1099 		}
1100 
1101 		dma_direction = DMA_TO_DEVICE;
1102 
1103 		if (sg_cnt != bsg_job->request_payload.sg_cnt) {
1104 			DEBUG2(printk(KERN_INFO
1105 				"dma mapping resulted in different sg counts "
1106 				"request_sg_cnt: %x dma_request_sg_cnt: %x ",
1107 				bsg_job->request_payload.sg_cnt, sg_cnt));
1108 			rval = -EAGAIN;
1109 			goto done_unmap_sg;
1110 		}
1111 
1112 		data_len = bsg_job->request_payload.payload_len;
1113 		mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1114 			&mgmt_dma, GFP_KERNEL);
1115 		if (!mgmt_b) {
1116 			DEBUG2(printk(KERN_ERR "%s: dma alloc for mgmt_b "
1117 				"failed for host=%lu\n",
1118 				__func__, vha->host_no));
1119 			rval = -ENOMEM;
1120 			goto done_unmap_sg;
1121 		}
1122 
1123 		sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1124 			bsg_job->request_payload.sg_cnt, mgmt_b, data_len);
1125 
1126 		mn->options = cpu_to_le16(ACO_LOAD_MEMORY);
1127 		mn->parameter1 =
1128 			cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
1129 		break;
1130 
1131 	case QLA84_MGMT_CHNG_CONFIG:
1132 		mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM);
1133 		mn->parameter1 =
1134 			cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.id);
1135 
1136 		mn->parameter2 =
1137 			cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param0);
1138 
1139 		mn->parameter3 =
1140 			cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param1);
1141 		break;
1142 
1143 	default:
1144 		rval = -EIO;
1145 		goto exit_mgmt;
1146 	}
1147 
1148 	if (ql84_mgmt->mgmt.cmd != QLA84_MGMT_CHNG_CONFIG) {
1149 		mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->mgmt.len);
1150 		mn->dseg_count = cpu_to_le16(1);
1151 		mn->dseg_address[0] = cpu_to_le32(LSD(mgmt_dma));
1152 		mn->dseg_address[1] = cpu_to_le32(MSD(mgmt_dma));
1153 		mn->dseg_length = cpu_to_le32(ql84_mgmt->mgmt.len);
1154 	}
1155 
1156 	rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0);
1157 
1158 	if (rval) {
1159 		DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
1160 			"request 84xx mgmt failed\n", vha->host_no));
1161 
1162 		rval = bsg_job->reply->reply_payload_rcv_len = 0;
1163 		bsg_job->reply->result = (DID_ERROR << 16);
1164 
1165 	} else {
1166 		DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
1167 			"request 84xx mgmt completed\n", vha->host_no));
1168 
1169 		bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1170 		bsg_job->reply->result = DID_OK;
1171 
1172 		if ((ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) ||
1173 			(ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO)) {
1174 			bsg_job->reply->reply_payload_rcv_len =
1175 				bsg_job->reply_payload.payload_len;
1176 
1177 			sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1178 				bsg_job->reply_payload.sg_cnt, mgmt_b,
1179 				data_len);
1180 		}
1181 	}
1182 
1183 	bsg_job->job_done(bsg_job);
1184 
1185 done_unmap_sg:
1186 	if (mgmt_b)
1187 		dma_free_coherent(&ha->pdev->dev, data_len, mgmt_b, mgmt_dma);
1188 
1189 	if (dma_direction == DMA_TO_DEVICE)
1190 		dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1191 			bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1192 	else if (dma_direction == DMA_FROM_DEVICE)
1193 		dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
1194 			bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1195 
1196 exit_mgmt:
1197 	dma_pool_free(ha->s_dma_pool, mn, mn_dma);
1198 
1199 	return rval;
1200 }
1201 
1202 static int
1203 qla24xx_iidma(struct fc_bsg_job *bsg_job)
1204 {
1205 	struct Scsi_Host *host = bsg_job->shost;
1206 	scsi_qla_host_t *vha = shost_priv(host);
1207 	struct qla_hw_data *ha = vha->hw;
1208 	int rval = 0;
1209 	struct qla_port_param *port_param = NULL;
1210 	fc_port_t *fcport = NULL;
1211 	uint16_t mb[MAILBOX_REGISTER_COUNT];
1212 	uint8_t *rsp_ptr = NULL;
1213 
1214 	bsg_job->reply->reply_payload_rcv_len = 0;
1215 
1216 	if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
1217 		test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
1218 		test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))
1219 		return -EBUSY;
1220 
1221 	if (!IS_IIDMA_CAPABLE(vha->hw)) {
1222 		DEBUG2(qla_printk(KERN_WARNING, ha, "%s(%lu): iiDMA not "
1223 			"supported\n",  __func__, vha->host_no));
1224 		return -EINVAL;
1225 	}
1226 
1227 	port_param = (struct qla_port_param *)((char *)bsg_job->request +
1228 		sizeof(struct fc_bsg_request));
1229 	if (!port_param) {
1230 		DEBUG2(printk("%s(%ld): port_param header not provided, "
1231 			"exiting.\n", __func__, vha->host_no));
1232 		return -EINVAL;
1233 	}
1234 
1235 	if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) {
1236 		DEBUG2(printk(KERN_ERR "%s(%ld): Invalid destination type\n",
1237 			__func__, vha->host_no));
1238 		return -EINVAL;
1239 	}
1240 
1241 	list_for_each_entry(fcport, &vha->vp_fcports, list) {
1242 		if (fcport->port_type != FCT_TARGET)
1243 			continue;
1244 
1245 		if (memcmp(port_param->fc_scsi_addr.dest_addr.wwpn,
1246 			fcport->port_name, sizeof(fcport->port_name)))
1247 			continue;
1248 		break;
1249 	}
1250 
1251 	if (!fcport) {
1252 		DEBUG2(printk(KERN_ERR "%s(%ld): Failed to find port\n",
1253 			__func__, vha->host_no));
1254 		return -EINVAL;
1255 	}
1256 
1257 	if (atomic_read(&fcport->state) != FCS_ONLINE) {
1258 		DEBUG2(printk(KERN_ERR "%s(%ld): Port not online\n",
1259 			__func__, vha->host_no));
1260 		return -EINVAL;
1261 	}
1262 
1263 	if (fcport->flags & FCF_LOGIN_NEEDED) {
1264 		DEBUG2(printk(KERN_ERR "%s(%ld): Remote port not logged in, "
1265 		    "flags = 0x%x\n",
1266 		    __func__, vha->host_no, fcport->flags));
1267 		return -EINVAL;
1268 	}
1269 
1270 	if (port_param->mode)
1271 		rval = qla2x00_set_idma_speed(vha, fcport->loop_id,
1272 			port_param->speed, mb);
1273 	else
1274 		rval = qla2x00_get_idma_speed(vha, fcport->loop_id,
1275 			&port_param->speed, mb);
1276 
1277 	if (rval) {
1278 		DEBUG16(printk(KERN_ERR "scsi(%ld): iIDMA cmd failed for "
1279 			"%02x%02x%02x%02x%02x%02x%02x%02x -- "
1280 			"%04x %x %04x %04x.\n",
1281 			vha->host_no, fcport->port_name[0],
1282 			fcport->port_name[1],
1283 			fcport->port_name[2], fcport->port_name[3],
1284 			fcport->port_name[4], fcport->port_name[5],
1285 			fcport->port_name[6], fcport->port_name[7], rval,
1286 			fcport->fp_speed, mb[0], mb[1]));
1287 		rval = 0;
1288 		bsg_job->reply->result = (DID_ERROR << 16);
1289 
1290 	} else {
1291 		if (!port_param->mode) {
1292 			bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
1293 				sizeof(struct qla_port_param);
1294 
1295 			rsp_ptr = ((uint8_t *)bsg_job->reply) +
1296 				sizeof(struct fc_bsg_reply);
1297 
1298 			memcpy(rsp_ptr, port_param,
1299 				sizeof(struct qla_port_param));
1300 		}
1301 
1302 		bsg_job->reply->result = DID_OK;
1303 	}
1304 
1305 	bsg_job->job_done(bsg_job);
1306 	return rval;
1307 }
1308 
1309 static int
1310 qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, struct qla_hw_data *ha,
1311 	uint8_t is_update)
1312 {
1313 	uint32_t start = 0;
1314 	int valid = 0;
1315 
1316 	bsg_job->reply->reply_payload_rcv_len = 0;
1317 
1318 	if (unlikely(pci_channel_offline(ha->pdev)))
1319 		return -EINVAL;
1320 
1321 	start = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
1322 	if (start > ha->optrom_size)
1323 		return -EINVAL;
1324 
1325 	if (ha->optrom_state != QLA_SWAITING)
1326 		return -EBUSY;
1327 
1328 	ha->optrom_region_start = start;
1329 
1330 	if (is_update) {
1331 		if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
1332 			valid = 1;
1333 		else if (start == (ha->flt_region_boot * 4) ||
1334 		    start == (ha->flt_region_fw * 4))
1335 			valid = 1;
1336 		else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
1337 		    IS_QLA8XXX_TYPE(ha))
1338 			valid = 1;
1339 		if (!valid) {
1340 			qla_printk(KERN_WARNING, ha,
1341 			    "Invalid start region 0x%x/0x%x.\n",
1342 			    start, bsg_job->request_payload.payload_len);
1343 			return -EINVAL;
1344 		}
1345 
1346 		ha->optrom_region_size = start +
1347 		    bsg_job->request_payload.payload_len > ha->optrom_size ?
1348 		    ha->optrom_size - start :
1349 		    bsg_job->request_payload.payload_len;
1350 		ha->optrom_state = QLA_SWRITING;
1351 	} else {
1352 		ha->optrom_region_size = start +
1353 		    bsg_job->reply_payload.payload_len > ha->optrom_size ?
1354 		    ha->optrom_size - start :
1355 		    bsg_job->reply_payload.payload_len;
1356 		ha->optrom_state = QLA_SREADING;
1357 	}
1358 
1359 	ha->optrom_buffer = vmalloc(ha->optrom_region_size);
1360 	if (!ha->optrom_buffer) {
1361 		qla_printk(KERN_WARNING, ha,
1362 		    "Read: Unable to allocate memory for optrom retrieval "
1363 		    "(%x).\n", ha->optrom_region_size);
1364 
1365 		ha->optrom_state = QLA_SWAITING;
1366 		return -ENOMEM;
1367 	}
1368 
1369 	memset(ha->optrom_buffer, 0, ha->optrom_region_size);
1370 	return 0;
1371 }
1372 
1373 static int
1374 qla2x00_read_optrom(struct fc_bsg_job *bsg_job)
1375 {
1376 	struct Scsi_Host *host = bsg_job->shost;
1377 	scsi_qla_host_t *vha = shost_priv(host);
1378 	struct qla_hw_data *ha = vha->hw;
1379 	int rval = 0;
1380 
1381 	rval = qla2x00_optrom_setup(bsg_job, ha, 0);
1382 	if (rval)
1383 		return rval;
1384 
1385 	ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
1386 	    ha->optrom_region_start, ha->optrom_region_size);
1387 
1388 	sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1389 	    bsg_job->reply_payload.sg_cnt, ha->optrom_buffer,
1390 	    ha->optrom_region_size);
1391 
1392 	bsg_job->reply->reply_payload_rcv_len = ha->optrom_region_size;
1393 	bsg_job->reply->result = DID_OK;
1394 	vfree(ha->optrom_buffer);
1395 	ha->optrom_buffer = NULL;
1396 	ha->optrom_state = QLA_SWAITING;
1397 	bsg_job->job_done(bsg_job);
1398 	return rval;
1399 }
1400 
1401 static int
1402 qla2x00_update_optrom(struct fc_bsg_job *bsg_job)
1403 {
1404 	struct Scsi_Host *host = bsg_job->shost;
1405 	scsi_qla_host_t *vha = shost_priv(host);
1406 	struct qla_hw_data *ha = vha->hw;
1407 	int rval = 0;
1408 
1409 	rval = qla2x00_optrom_setup(bsg_job, ha, 1);
1410 	if (rval)
1411 		return rval;
1412 
1413 	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1414 	    bsg_job->request_payload.sg_cnt, ha->optrom_buffer,
1415 	    ha->optrom_region_size);
1416 
1417 	ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
1418 	    ha->optrom_region_start, ha->optrom_region_size);
1419 
1420 	bsg_job->reply->result = DID_OK;
1421 	vfree(ha->optrom_buffer);
1422 	ha->optrom_buffer = NULL;
1423 	ha->optrom_state = QLA_SWAITING;
1424 	bsg_job->job_done(bsg_job);
1425 	return rval;
1426 }
1427 
1428 static int
1429 qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
1430 {
1431 	switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) {
1432 	case QL_VND_LOOPBACK:
1433 		return qla2x00_process_loopback(bsg_job);
1434 
1435 	case QL_VND_A84_RESET:
1436 		return qla84xx_reset(bsg_job);
1437 
1438 	case QL_VND_A84_UPDATE_FW:
1439 		return qla84xx_updatefw(bsg_job);
1440 
1441 	case QL_VND_A84_MGMT_CMD:
1442 		return qla84xx_mgmt_cmd(bsg_job);
1443 
1444 	case QL_VND_IIDMA:
1445 		return qla24xx_iidma(bsg_job);
1446 
1447 	case QL_VND_FCP_PRIO_CFG_CMD:
1448 		return qla24xx_proc_fcp_prio_cfg_cmd(bsg_job);
1449 
1450 	case QL_VND_READ_FLASH:
1451 		return qla2x00_read_optrom(bsg_job);
1452 
1453 	case QL_VND_UPDATE_FLASH:
1454 		return qla2x00_update_optrom(bsg_job);
1455 
1456 	default:
1457 		bsg_job->reply->result = (DID_ERROR << 16);
1458 		bsg_job->job_done(bsg_job);
1459 		return -ENOSYS;
1460 	}
1461 }
1462 
1463 int
1464 qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
1465 {
1466 	int ret = -EINVAL;
1467 
1468 	switch (bsg_job->request->msgcode) {
1469 	case FC_BSG_RPT_ELS:
1470 	case FC_BSG_HST_ELS_NOLOGIN:
1471 		ret = qla2x00_process_els(bsg_job);
1472 		break;
1473 	case FC_BSG_HST_CT:
1474 		ret = qla2x00_process_ct(bsg_job);
1475 		break;
1476 	case FC_BSG_HST_VENDOR:
1477 		ret = qla2x00_process_vendor_specific(bsg_job);
1478 		break;
1479 	case FC_BSG_HST_ADD_RPORT:
1480 	case FC_BSG_HST_DEL_RPORT:
1481 	case FC_BSG_RPT_CT:
1482 	default:
1483 		DEBUG2(printk("qla2xxx: unsupported BSG request\n"));
1484 		break;
1485 	}
1486 	return ret;
1487 }
1488 
1489 int
1490 qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
1491 {
1492 	scsi_qla_host_t *vha = shost_priv(bsg_job->shost);
1493 	struct qla_hw_data *ha = vha->hw;
1494 	srb_t *sp;
1495 	int cnt, que;
1496 	unsigned long flags;
1497 	struct req_que *req;
1498 	struct srb_ctx *sp_bsg;
1499 
1500 	/* find the bsg job from the active list of commands */
1501 	spin_lock_irqsave(&ha->hardware_lock, flags);
1502 	for (que = 0; que < ha->max_req_queues; que++) {
1503 		req = ha->req_q_map[que];
1504 		if (!req)
1505 			continue;
1506 
1507 		for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
1508 			sp = req->outstanding_cmds[cnt];
1509 			if (sp) {
1510 				sp_bsg = sp->ctx;
1511 
1512 				if (((sp_bsg->type == SRB_CT_CMD) ||
1513 					(sp_bsg->type == SRB_ELS_CMD_HST))
1514 					&& (sp_bsg->u.bsg_job == bsg_job)) {
1515 					spin_unlock_irqrestore(&ha->hardware_lock, flags);
1516 					if (ha->isp_ops->abort_command(sp)) {
1517 						DEBUG2(qla_printk(KERN_INFO, ha,
1518 						    "scsi(%ld): mbx "
1519 						    "abort_command failed\n",
1520 						    vha->host_no));
1521 						bsg_job->req->errors =
1522 						bsg_job->reply->result = -EIO;
1523 					} else {
1524 						DEBUG2(qla_printk(KERN_INFO, ha,
1525 						    "scsi(%ld): mbx "
1526 						    "abort_command success\n",
1527 						    vha->host_no));
1528 						bsg_job->req->errors =
1529 						bsg_job->reply->result = 0;
1530 					}
1531 					spin_lock_irqsave(&ha->hardware_lock, flags);
1532 					goto done;
1533 				}
1534 			}
1535 		}
1536 	}
1537 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1538 	DEBUG2(qla_printk(KERN_INFO, ha,
1539 		"scsi(%ld) SRB not found to abort\n", vha->host_no));
1540 	bsg_job->req->errors = bsg_job->reply->result = -ENXIO;
1541 	return 0;
1542 
1543 done:
1544 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1545 	if (bsg_job->request->msgcode == FC_BSG_HST_CT)
1546 		kfree(sp->fcport);
1547 	kfree(sp->ctx);
1548 	mempool_free(sp, ha->srb_mempool);
1549 	return 0;
1550 }
1551