1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * QLogic Fibre Channel HBA Driver
4 * Copyright (c) 2003-2014 QLogic Corporation
5 */
6 #include "qla_def.h"
7 #include "qla_gbl.h"
8
9 #include <linux/kthread.h>
10 #include <linux/vmalloc.h>
11 #include <linux/delay.h>
12 #include <linux/bsg-lib.h>
13
qla2xxx_free_fcport_work(struct work_struct * work)14 static void qla2xxx_free_fcport_work(struct work_struct *work)
15 {
16 struct fc_port *fcport = container_of(work, typeof(*fcport),
17 free_work);
18
19 qla2x00_free_fcport(fcport);
20 }
21
22 /* BSG support for ELS/CT pass through */
qla2x00_bsg_job_done(srb_t * sp,int res)23 void qla2x00_bsg_job_done(srb_t *sp, int res)
24 {
25 struct bsg_job *bsg_job = sp->u.bsg_job;
26 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
27
28 ql_dbg(ql_dbg_user, sp->vha, 0x7009,
29 "%s: sp hdl %x, result=%x bsg ptr %p\n",
30 __func__, sp->handle, res, bsg_job);
31
32 /* ref: INIT */
33 kref_put(&sp->cmd_kref, qla2x00_sp_release);
34
35 bsg_reply->result = res;
36 bsg_job_done(bsg_job, bsg_reply->result,
37 bsg_reply->reply_payload_rcv_len);
38 }
39
qla2x00_bsg_sp_free(srb_t * sp)40 void qla2x00_bsg_sp_free(srb_t *sp)
41 {
42 struct qla_hw_data *ha = sp->vha->hw;
43 struct bsg_job *bsg_job = sp->u.bsg_job;
44 struct fc_bsg_request *bsg_request = bsg_job->request;
45 struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
46
47 if (sp->type == SRB_FXIOCB_BCMD) {
48 piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
49 &bsg_request->rqst_data.h_vendor.vendor_cmd[1];
50
51 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
52 dma_unmap_sg(&ha->pdev->dev,
53 bsg_job->request_payload.sg_list,
54 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
55
56 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID)
57 dma_unmap_sg(&ha->pdev->dev,
58 bsg_job->reply_payload.sg_list,
59 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
60 } else {
61
62 if (sp->remap.remapped) {
63 dma_pool_free(ha->purex_dma_pool, sp->remap.rsp.buf,
64 sp->remap.rsp.dma);
65 dma_pool_free(ha->purex_dma_pool, sp->remap.req.buf,
66 sp->remap.req.dma);
67 } else {
68 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
69 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
70
71 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
72 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
73 }
74 }
75
76 if (sp->type == SRB_CT_CMD ||
77 sp->type == SRB_FXIOCB_BCMD ||
78 sp->type == SRB_ELS_CMD_HST) {
79 INIT_WORK(&sp->fcport->free_work, qla2xxx_free_fcport_work);
80 queue_work(ha->wq, &sp->fcport->free_work);
81 }
82
83 qla2x00_rel_sp(sp);
84 }
85
86 int
qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t * vha,struct qla_fcp_prio_cfg * pri_cfg,uint8_t flag)87 qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *vha,
88 struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
89 {
90 int i, ret, num_valid;
91 uint8_t *bcode;
92 struct qla_fcp_prio_entry *pri_entry;
93 uint32_t *bcode_val_ptr, bcode_val;
94
95 ret = 1;
96 num_valid = 0;
97 bcode = (uint8_t *)pri_cfg;
98 bcode_val_ptr = (uint32_t *)pri_cfg;
99 bcode_val = (uint32_t)(*bcode_val_ptr);
100
101 if (bcode_val == 0xFFFFFFFF) {
102 /* No FCP Priority config data in flash */
103 ql_dbg(ql_dbg_user, vha, 0x7051,
104 "No FCP Priority config data.\n");
105 return 0;
106 }
107
108 if (memcmp(bcode, "HQOS", 4)) {
109 /* Invalid FCP priority data header*/
110 ql_dbg(ql_dbg_user, vha, 0x7052,
111 "Invalid FCP Priority data header. bcode=0x%x.\n",
112 bcode_val);
113 return 0;
114 }
115 if (flag != 1)
116 return ret;
117
118 pri_entry = &pri_cfg->entry[0];
119 for (i = 0; i < pri_cfg->num_entries; i++) {
120 if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
121 num_valid++;
122 pri_entry++;
123 }
124
125 if (num_valid == 0) {
126 /* No valid FCP priority data entries */
127 ql_dbg(ql_dbg_user, vha, 0x7053,
128 "No valid FCP Priority data entries.\n");
129 ret = 0;
130 } else {
131 /* FCP priority data is valid */
132 ql_dbg(ql_dbg_user, vha, 0x7054,
133 "Valid FCP priority data. num entries = %d.\n",
134 num_valid);
135 }
136
137 return ret;
138 }
139
140 static int
qla24xx_proc_fcp_prio_cfg_cmd(struct bsg_job * bsg_job)141 qla24xx_proc_fcp_prio_cfg_cmd(struct bsg_job *bsg_job)
142 {
143 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
144 struct fc_bsg_request *bsg_request = bsg_job->request;
145 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
146 scsi_qla_host_t *vha = shost_priv(host);
147 struct qla_hw_data *ha = vha->hw;
148 int ret = 0;
149 uint32_t len;
150 uint32_t oper;
151
152 if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_P3P_TYPE(ha))) {
153 ret = -EINVAL;
154 goto exit_fcp_prio_cfg;
155 }
156
157 /* Get the sub command */
158 oper = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
159
160 /* Only set config is allowed if config memory is not allocated */
161 if (!ha->fcp_prio_cfg && (oper != QLFC_FCP_PRIO_SET_CONFIG)) {
162 ret = -EINVAL;
163 goto exit_fcp_prio_cfg;
164 }
165 switch (oper) {
166 case QLFC_FCP_PRIO_DISABLE:
167 if (ha->flags.fcp_prio_enabled) {
168 ha->flags.fcp_prio_enabled = 0;
169 ha->fcp_prio_cfg->attributes &=
170 ~FCP_PRIO_ATTR_ENABLE;
171 qla24xx_update_all_fcp_prio(vha);
172 bsg_reply->result = DID_OK;
173 } else {
174 ret = -EINVAL;
175 bsg_reply->result = (DID_ERROR << 16);
176 goto exit_fcp_prio_cfg;
177 }
178 break;
179
180 case QLFC_FCP_PRIO_ENABLE:
181 if (!ha->flags.fcp_prio_enabled) {
182 if (ha->fcp_prio_cfg) {
183 ha->flags.fcp_prio_enabled = 1;
184 ha->fcp_prio_cfg->attributes |=
185 FCP_PRIO_ATTR_ENABLE;
186 qla24xx_update_all_fcp_prio(vha);
187 bsg_reply->result = DID_OK;
188 } else {
189 ret = -EINVAL;
190 bsg_reply->result = (DID_ERROR << 16);
191 goto exit_fcp_prio_cfg;
192 }
193 }
194 break;
195
196 case QLFC_FCP_PRIO_GET_CONFIG:
197 len = bsg_job->reply_payload.payload_len;
198 if (!len || len > FCP_PRIO_CFG_SIZE) {
199 ret = -EINVAL;
200 bsg_reply->result = (DID_ERROR << 16);
201 goto exit_fcp_prio_cfg;
202 }
203
204 bsg_reply->result = DID_OK;
205 bsg_reply->reply_payload_rcv_len =
206 sg_copy_from_buffer(
207 bsg_job->reply_payload.sg_list,
208 bsg_job->reply_payload.sg_cnt, ha->fcp_prio_cfg,
209 len);
210
211 break;
212
213 case QLFC_FCP_PRIO_SET_CONFIG:
214 len = bsg_job->request_payload.payload_len;
215 if (!len || len > FCP_PRIO_CFG_SIZE) {
216 bsg_reply->result = (DID_ERROR << 16);
217 ret = -EINVAL;
218 goto exit_fcp_prio_cfg;
219 }
220
221 if (!ha->fcp_prio_cfg) {
222 ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE);
223 if (!ha->fcp_prio_cfg) {
224 ql_log(ql_log_warn, vha, 0x7050,
225 "Unable to allocate memory for fcp prio "
226 "config data (%x).\n", FCP_PRIO_CFG_SIZE);
227 bsg_reply->result = (DID_ERROR << 16);
228 ret = -ENOMEM;
229 goto exit_fcp_prio_cfg;
230 }
231 }
232
233 memset(ha->fcp_prio_cfg, 0, FCP_PRIO_CFG_SIZE);
234 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
235 bsg_job->request_payload.sg_cnt, ha->fcp_prio_cfg,
236 FCP_PRIO_CFG_SIZE);
237
238 /* validate fcp priority data */
239
240 if (!qla24xx_fcp_prio_cfg_valid(vha, ha->fcp_prio_cfg, 1)) {
241 bsg_reply->result = (DID_ERROR << 16);
242 ret = -EINVAL;
243 /* If buffer was invalidatic int
244 * fcp_prio_cfg is of no use
245 */
246 vfree(ha->fcp_prio_cfg);
247 ha->fcp_prio_cfg = NULL;
248 goto exit_fcp_prio_cfg;
249 }
250
251 ha->flags.fcp_prio_enabled = 0;
252 if (ha->fcp_prio_cfg->attributes & FCP_PRIO_ATTR_ENABLE)
253 ha->flags.fcp_prio_enabled = 1;
254 qla24xx_update_all_fcp_prio(vha);
255 bsg_reply->result = DID_OK;
256 break;
257 default:
258 ret = -EINVAL;
259 break;
260 }
261 exit_fcp_prio_cfg:
262 if (!ret)
263 bsg_job_done(bsg_job, bsg_reply->result,
264 bsg_reply->reply_payload_rcv_len);
265 return ret;
266 }
267
268 static int
qla2x00_process_els(struct bsg_job * bsg_job)269 qla2x00_process_els(struct bsg_job *bsg_job)
270 {
271 struct fc_bsg_request *bsg_request = bsg_job->request;
272 struct fc_rport *rport;
273 fc_port_t *fcport = NULL;
274 struct Scsi_Host *host;
275 scsi_qla_host_t *vha;
276 struct qla_hw_data *ha;
277 srb_t *sp;
278 const char *type;
279 int req_sg_cnt, rsp_sg_cnt;
280 int rval = (DID_ERROR << 16);
281 uint32_t els_cmd = 0;
282 int qla_port_allocated = 0;
283
284 if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
285 rport = fc_bsg_to_rport(bsg_job);
286 if (!rport) {
287 rval = -ENOMEM;
288 goto done;
289 }
290 fcport = *(fc_port_t **) rport->dd_data;
291 host = rport_to_shost(rport);
292 vha = shost_priv(host);
293 ha = vha->hw;
294 type = "FC_BSG_RPT_ELS";
295 } else {
296 host = fc_bsg_to_shost(bsg_job);
297 vha = shost_priv(host);
298 ha = vha->hw;
299 type = "FC_BSG_HST_ELS_NOLOGIN";
300 els_cmd = bsg_request->rqst_data.h_els.command_code;
301 if (els_cmd == ELS_AUTH_ELS)
302 return qla_edif_process_els(vha, bsg_job);
303 }
304
305 if (!vha->flags.online) {
306 ql_log(ql_log_warn, vha, 0x7005, "Host not online.\n");
307 rval = -EIO;
308 goto done;
309 }
310
311 /* pass through is supported only for ISP 4Gb or higher */
312 if (!IS_FWI2_CAPABLE(ha)) {
313 ql_dbg(ql_dbg_user, vha, 0x7001,
314 "ELS passthru not supported for ISP23xx based adapters.\n");
315 rval = -EPERM;
316 goto done;
317 }
318
319 /* Multiple SG's are not supported for ELS requests */
320 if (bsg_job->request_payload.sg_cnt > 1 ||
321 bsg_job->reply_payload.sg_cnt > 1) {
322 ql_dbg(ql_dbg_user, vha, 0x7002,
323 "Multiple SG's are not supported for ELS requests, "
324 "request_sg_cnt=%x reply_sg_cnt=%x.\n",
325 bsg_job->request_payload.sg_cnt,
326 bsg_job->reply_payload.sg_cnt);
327 rval = -ENOBUFS;
328 goto done;
329 }
330
331 /* ELS request for rport */
332 if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
333 /* make sure the rport is logged in,
334 * if not perform fabric login
335 */
336 if (atomic_read(&fcport->state) != FCS_ONLINE) {
337 ql_dbg(ql_dbg_user, vha, 0x7003,
338 "Port %06X is not online for ELS passthru.\n",
339 fcport->d_id.b24);
340 rval = -EIO;
341 goto done;
342 }
343 } else {
344 /* Allocate a dummy fcport structure, since functions
345 * preparing the IOCB and mailbox command retrieves port
346 * specific information from fcport structure. For Host based
347 * ELS commands there will be no fcport structure allocated
348 */
349 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
350 if (!fcport) {
351 rval = -ENOMEM;
352 goto done;
353 }
354
355 qla_port_allocated = 1;
356 /* Initialize all required fields of fcport */
357 fcport->vha = vha;
358 fcport->d_id.b.al_pa =
359 bsg_request->rqst_data.h_els.port_id[0];
360 fcport->d_id.b.area =
361 bsg_request->rqst_data.h_els.port_id[1];
362 fcport->d_id.b.domain =
363 bsg_request->rqst_data.h_els.port_id[2];
364 fcport->loop_id =
365 (fcport->d_id.b.al_pa == 0xFD) ?
366 NPH_FABRIC_CONTROLLER : NPH_F_PORT;
367 }
368
369 req_sg_cnt =
370 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
371 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
372 if (!req_sg_cnt) {
373 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
374 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
375 rval = -ENOMEM;
376 goto done_free_fcport;
377 }
378
379 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
380 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
381 if (!rsp_sg_cnt) {
382 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
383 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
384 rval = -ENOMEM;
385 goto done_free_fcport;
386 }
387
388 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
389 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
390 ql_log(ql_log_warn, vha, 0x7008,
391 "dma mapping resulted in different sg counts, "
392 "request_sg_cnt: %x dma_request_sg_cnt:%x reply_sg_cnt:%x "
393 "dma_reply_sg_cnt:%x.\n", bsg_job->request_payload.sg_cnt,
394 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
395 rval = -EAGAIN;
396 goto done_unmap_sg;
397 }
398
399 /* Alloc SRB structure */
400 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
401 if (!sp) {
402 rval = -ENOMEM;
403 goto done_unmap_sg;
404 }
405
406 sp->type =
407 (bsg_request->msgcode == FC_BSG_RPT_ELS ?
408 SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
409 sp->name =
410 (bsg_request->msgcode == FC_BSG_RPT_ELS ?
411 "bsg_els_rpt" : "bsg_els_hst");
412 sp->u.bsg_job = bsg_job;
413 sp->free = qla2x00_bsg_sp_free;
414 sp->done = qla2x00_bsg_job_done;
415
416 ql_dbg(ql_dbg_user, vha, 0x700a,
417 "bsg rqst type: %s els type: %x - loop-id=%x "
418 "portid=%-2x%02x%02x.\n", type,
419 bsg_request->rqst_data.h_els.command_code, fcport->loop_id,
420 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
421
422 rval = qla2x00_start_sp(sp);
423 if (rval != QLA_SUCCESS) {
424 ql_log(ql_log_warn, vha, 0x700e,
425 "qla2x00_start_sp failed = %d\n", rval);
426 qla2x00_rel_sp(sp);
427 rval = -EIO;
428 goto done_unmap_sg;
429 }
430 return rval;
431
432 done_unmap_sg:
433 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
434 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
435 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
436 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
437 goto done_free_fcport;
438
439 done_free_fcport:
440 if (qla_port_allocated)
441 qla2x00_free_fcport(fcport);
442 done:
443 return rval;
444 }
445
446 static inline uint16_t
qla24xx_calc_ct_iocbs(uint16_t dsds)447 qla24xx_calc_ct_iocbs(uint16_t dsds)
448 {
449 uint16_t iocbs;
450
451 iocbs = 1;
452 if (dsds > 2) {
453 iocbs += (dsds - 2) / 5;
454 if ((dsds - 2) % 5)
455 iocbs++;
456 }
457 return iocbs;
458 }
459
460 static int
qla2x00_process_ct(struct bsg_job * bsg_job)461 qla2x00_process_ct(struct bsg_job *bsg_job)
462 {
463 srb_t *sp;
464 struct fc_bsg_request *bsg_request = bsg_job->request;
465 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
466 scsi_qla_host_t *vha = shost_priv(host);
467 struct qla_hw_data *ha = vha->hw;
468 int rval = (DID_ERROR << 16);
469 int req_sg_cnt, rsp_sg_cnt;
470 uint16_t loop_id;
471 struct fc_port *fcport;
472 char *type = "FC_BSG_HST_CT";
473
474 req_sg_cnt =
475 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
476 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
477 if (!req_sg_cnt) {
478 ql_log(ql_log_warn, vha, 0x700f,
479 "dma_map_sg return %d for request\n", req_sg_cnt);
480 rval = -ENOMEM;
481 goto done;
482 }
483
484 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
485 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
486 if (!rsp_sg_cnt) {
487 ql_log(ql_log_warn, vha, 0x7010,
488 "dma_map_sg return %d for reply\n", rsp_sg_cnt);
489 rval = -ENOMEM;
490 goto done;
491 }
492
493 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
494 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
495 ql_log(ql_log_warn, vha, 0x7011,
496 "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
497 "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
498 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
499 rval = -EAGAIN;
500 goto done_unmap_sg;
501 }
502
503 if (!vha->flags.online) {
504 ql_log(ql_log_warn, vha, 0x7012,
505 "Host is not online.\n");
506 rval = -EIO;
507 goto done_unmap_sg;
508 }
509
510 loop_id =
511 (bsg_request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
512 >> 24;
513 switch (loop_id) {
514 case 0xFC:
515 loop_id = NPH_SNS;
516 break;
517 case 0xFA:
518 loop_id = vha->mgmt_svr_loop_id;
519 break;
520 default:
521 ql_dbg(ql_dbg_user, vha, 0x7013,
522 "Unknown loop id: %x.\n", loop_id);
523 rval = -EINVAL;
524 goto done_unmap_sg;
525 }
526
527 /* Allocate a dummy fcport structure, since functions preparing the
528 * IOCB and mailbox command retrieves port specific information
529 * from fcport structure. For Host based ELS commands there will be
530 * no fcport structure allocated
531 */
532 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
533 if (!fcport) {
534 ql_log(ql_log_warn, vha, 0x7014,
535 "Failed to allocate fcport.\n");
536 rval = -ENOMEM;
537 goto done_unmap_sg;
538 }
539
540 /* Initialize all required fields of fcport */
541 fcport->vha = vha;
542 fcport->d_id.b.al_pa = bsg_request->rqst_data.h_ct.port_id[0];
543 fcport->d_id.b.area = bsg_request->rqst_data.h_ct.port_id[1];
544 fcport->d_id.b.domain = bsg_request->rqst_data.h_ct.port_id[2];
545 fcport->loop_id = loop_id;
546
547 /* Alloc SRB structure */
548 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
549 if (!sp) {
550 ql_log(ql_log_warn, vha, 0x7015,
551 "qla2x00_get_sp failed.\n");
552 rval = -ENOMEM;
553 goto done_free_fcport;
554 }
555
556 sp->type = SRB_CT_CMD;
557 sp->name = "bsg_ct";
558 sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
559 sp->u.bsg_job = bsg_job;
560 sp->free = qla2x00_bsg_sp_free;
561 sp->done = qla2x00_bsg_job_done;
562
563 ql_dbg(ql_dbg_user, vha, 0x7016,
564 "bsg rqst type: %s else type: %x - "
565 "loop-id=%x portid=%02x%02x%02x.\n", type,
566 (bsg_request->rqst_data.h_ct.preamble_word2 >> 16),
567 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
568 fcport->d_id.b.al_pa);
569
570 rval = qla2x00_start_sp(sp);
571 if (rval != QLA_SUCCESS) {
572 ql_log(ql_log_warn, vha, 0x7017,
573 "qla2x00_start_sp failed=%d.\n", rval);
574 qla2x00_rel_sp(sp);
575 rval = -EIO;
576 goto done_free_fcport;
577 }
578 return rval;
579
580 done_free_fcport:
581 qla2x00_free_fcport(fcport);
582 done_unmap_sg:
583 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
584 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
585 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
586 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
587 done:
588 return rval;
589 }
590
591 /* Disable loopback mode */
592 static inline int
qla81xx_reset_loopback_mode(scsi_qla_host_t * vha,uint16_t * config,int wait,int wait2)593 qla81xx_reset_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
594 int wait, int wait2)
595 {
596 int ret = 0;
597 int rval = 0;
598 uint16_t new_config[4];
599 struct qla_hw_data *ha = vha->hw;
600
601 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
602 goto done_reset_internal;
603
604 memset(new_config, 0 , sizeof(new_config));
605 if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
606 ENABLE_INTERNAL_LOOPBACK ||
607 (config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
608 ENABLE_EXTERNAL_LOOPBACK) {
609 new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK;
610 ql_dbg(ql_dbg_user, vha, 0x70bf, "new_config[0]=%02x\n",
611 (new_config[0] & INTERNAL_LOOPBACK_MASK));
612 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ;
613
614 ha->notify_dcbx_comp = wait;
615 ha->notify_lb_portup_comp = wait2;
616
617 ret = qla81xx_set_port_config(vha, new_config);
618 if (ret != QLA_SUCCESS) {
619 ql_log(ql_log_warn, vha, 0x7025,
620 "Set port config failed.\n");
621 ha->notify_dcbx_comp = 0;
622 ha->notify_lb_portup_comp = 0;
623 rval = -EINVAL;
624 goto done_reset_internal;
625 }
626
627 /* Wait for DCBX complete event */
628 if (wait && !wait_for_completion_timeout(&ha->dcbx_comp,
629 (DCBX_COMP_TIMEOUT * HZ))) {
630 ql_dbg(ql_dbg_user, vha, 0x7026,
631 "DCBX completion not received.\n");
632 ha->notify_dcbx_comp = 0;
633 ha->notify_lb_portup_comp = 0;
634 rval = -EINVAL;
635 goto done_reset_internal;
636 } else
637 ql_dbg(ql_dbg_user, vha, 0x7027,
638 "DCBX completion received.\n");
639
640 if (wait2 &&
641 !wait_for_completion_timeout(&ha->lb_portup_comp,
642 (LB_PORTUP_COMP_TIMEOUT * HZ))) {
643 ql_dbg(ql_dbg_user, vha, 0x70c5,
644 "Port up completion not received.\n");
645 ha->notify_lb_portup_comp = 0;
646 rval = -EINVAL;
647 goto done_reset_internal;
648 } else
649 ql_dbg(ql_dbg_user, vha, 0x70c6,
650 "Port up completion received.\n");
651
652 ha->notify_dcbx_comp = 0;
653 ha->notify_lb_portup_comp = 0;
654 }
655 done_reset_internal:
656 return rval;
657 }
658
659 /*
660 * Set the port configuration to enable the internal or external loopback
661 * depending on the loopback mode.
662 */
663 static inline int
qla81xx_set_loopback_mode(scsi_qla_host_t * vha,uint16_t * config,uint16_t * new_config,uint16_t mode)664 qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
665 uint16_t *new_config, uint16_t mode)
666 {
667 int ret = 0;
668 int rval = 0;
669 unsigned long rem_tmo = 0, current_tmo = 0;
670 struct qla_hw_data *ha = vha->hw;
671
672 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
673 goto done_set_internal;
674
675 if (mode == INTERNAL_LOOPBACK)
676 new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1);
677 else if (mode == EXTERNAL_LOOPBACK)
678 new_config[0] = config[0] | (ENABLE_EXTERNAL_LOOPBACK << 1);
679 ql_dbg(ql_dbg_user, vha, 0x70be,
680 "new_config[0]=%02x\n", (new_config[0] & INTERNAL_LOOPBACK_MASK));
681
682 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3);
683
684 ha->notify_dcbx_comp = 1;
685 ret = qla81xx_set_port_config(vha, new_config);
686 if (ret != QLA_SUCCESS) {
687 ql_log(ql_log_warn, vha, 0x7021,
688 "set port config failed.\n");
689 ha->notify_dcbx_comp = 0;
690 rval = -EINVAL;
691 goto done_set_internal;
692 }
693
694 /* Wait for DCBX complete event */
695 current_tmo = DCBX_COMP_TIMEOUT * HZ;
696 while (1) {
697 rem_tmo = wait_for_completion_timeout(&ha->dcbx_comp,
698 current_tmo);
699 if (!ha->idc_extend_tmo || rem_tmo) {
700 ha->idc_extend_tmo = 0;
701 break;
702 }
703 current_tmo = ha->idc_extend_tmo * HZ;
704 ha->idc_extend_tmo = 0;
705 }
706
707 if (!rem_tmo) {
708 ql_dbg(ql_dbg_user, vha, 0x7022,
709 "DCBX completion not received.\n");
710 ret = qla81xx_reset_loopback_mode(vha, new_config, 0, 0);
711 /*
712 * If the reset of the loopback mode doesn't work take a FCoE
713 * dump and reset the chip.
714 */
715 if (ret) {
716 qla2xxx_dump_fw(vha);
717 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
718 }
719 rval = -EINVAL;
720 } else {
721 if (ha->flags.idc_compl_status) {
722 ql_dbg(ql_dbg_user, vha, 0x70c3,
723 "Bad status in IDC Completion AEN\n");
724 rval = -EINVAL;
725 ha->flags.idc_compl_status = 0;
726 } else
727 ql_dbg(ql_dbg_user, vha, 0x7023,
728 "DCBX completion received.\n");
729 }
730
731 ha->notify_dcbx_comp = 0;
732 ha->idc_extend_tmo = 0;
733
734 done_set_internal:
735 return rval;
736 }
737
738 static int
qla2x00_process_loopback(struct bsg_job * bsg_job)739 qla2x00_process_loopback(struct bsg_job *bsg_job)
740 {
741 struct fc_bsg_request *bsg_request = bsg_job->request;
742 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
743 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
744 scsi_qla_host_t *vha = shost_priv(host);
745 struct qla_hw_data *ha = vha->hw;
746 int rval;
747 uint8_t command_sent;
748 char *type;
749 struct msg_echo_lb elreq;
750 uint16_t response[MAILBOX_REGISTER_COUNT];
751 uint16_t config[4], new_config[4];
752 uint8_t *fw_sts_ptr;
753 void *req_data = NULL;
754 dma_addr_t req_data_dma;
755 uint32_t req_data_len;
756 uint8_t *rsp_data = NULL;
757 dma_addr_t rsp_data_dma;
758 uint32_t rsp_data_len;
759
760 if (!vha->flags.online) {
761 ql_log(ql_log_warn, vha, 0x7019, "Host is not online.\n");
762 return -EIO;
763 }
764
765 memset(&elreq, 0, sizeof(elreq));
766
767 elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev,
768 bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt,
769 DMA_TO_DEVICE);
770
771 if (!elreq.req_sg_cnt) {
772 ql_log(ql_log_warn, vha, 0x701a,
773 "dma_map_sg returned %d for request.\n", elreq.req_sg_cnt);
774 return -ENOMEM;
775 }
776
777 elreq.rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
778 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
779 DMA_FROM_DEVICE);
780
781 if (!elreq.rsp_sg_cnt) {
782 ql_log(ql_log_warn, vha, 0x701b,
783 "dma_map_sg returned %d for reply.\n", elreq.rsp_sg_cnt);
784 rval = -ENOMEM;
785 goto done_unmap_req_sg;
786 }
787
788 if ((elreq.req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
789 (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
790 ql_log(ql_log_warn, vha, 0x701c,
791 "dma mapping resulted in different sg counts, "
792 "request_sg_cnt: %x dma_request_sg_cnt: %x "
793 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
794 bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
795 bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt);
796 rval = -EAGAIN;
797 goto done_unmap_sg;
798 }
799 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
800 req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
801 &req_data_dma, GFP_KERNEL);
802 if (!req_data) {
803 ql_log(ql_log_warn, vha, 0x701d,
804 "dma alloc failed for req_data.\n");
805 rval = -ENOMEM;
806 goto done_unmap_sg;
807 }
808
809 rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
810 &rsp_data_dma, GFP_KERNEL);
811 if (!rsp_data) {
812 ql_log(ql_log_warn, vha, 0x7004,
813 "dma alloc failed for rsp_data.\n");
814 rval = -ENOMEM;
815 goto done_free_dma_req;
816 }
817
818 /* Copy the request buffer in req_data now */
819 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
820 bsg_job->request_payload.sg_cnt, req_data, req_data_len);
821
822 elreq.send_dma = req_data_dma;
823 elreq.rcv_dma = rsp_data_dma;
824 elreq.transfer_size = req_data_len;
825
826 elreq.options = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
827 elreq.iteration_count =
828 bsg_request->rqst_data.h_vendor.vendor_cmd[2];
829
830 if (atomic_read(&vha->loop_state) == LOOP_READY &&
831 ((ha->current_topology == ISP_CFG_F && (elreq.options & 7) >= 2) ||
832 ((IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) &&
833 get_unaligned_le32(req_data) == ELS_OPCODE_BYTE &&
834 req_data_len == MAX_ELS_FRAME_PAYLOAD &&
835 elreq.options == EXTERNAL_LOOPBACK))) {
836 type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
837 ql_dbg(ql_dbg_user, vha, 0x701e,
838 "BSG request type: %s.\n", type);
839 command_sent = INT_DEF_LB_ECHO_CMD;
840 rval = qla2x00_echo_test(vha, &elreq, response);
841 } else {
842 if (IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) {
843 memset(config, 0, sizeof(config));
844 memset(new_config, 0, sizeof(new_config));
845
846 if (qla81xx_get_port_config(vha, config)) {
847 ql_log(ql_log_warn, vha, 0x701f,
848 "Get port config failed.\n");
849 rval = -EPERM;
850 goto done_free_dma_rsp;
851 }
852
853 if ((config[0] & INTERNAL_LOOPBACK_MASK) != 0) {
854 ql_dbg(ql_dbg_user, vha, 0x70c4,
855 "Loopback operation already in "
856 "progress.\n");
857 rval = -EAGAIN;
858 goto done_free_dma_rsp;
859 }
860
861 ql_dbg(ql_dbg_user, vha, 0x70c0,
862 "elreq.options=%04x\n", elreq.options);
863
864 if (elreq.options == EXTERNAL_LOOPBACK)
865 if (IS_QLA8031(ha) || IS_QLA8044(ha))
866 rval = qla81xx_set_loopback_mode(vha,
867 config, new_config, elreq.options);
868 else
869 rval = qla81xx_reset_loopback_mode(vha,
870 config, 1, 0);
871 else
872 rval = qla81xx_set_loopback_mode(vha, config,
873 new_config, elreq.options);
874
875 if (rval) {
876 rval = -EPERM;
877 goto done_free_dma_rsp;
878 }
879
880 type = "FC_BSG_HST_VENDOR_LOOPBACK";
881 ql_dbg(ql_dbg_user, vha, 0x7028,
882 "BSG request type: %s.\n", type);
883
884 command_sent = INT_DEF_LB_LOOPBACK_CMD;
885 rval = qla2x00_loopback_test(vha, &elreq, response);
886
887 if (response[0] == MBS_COMMAND_ERROR &&
888 response[1] == MBS_LB_RESET) {
889 ql_log(ql_log_warn, vha, 0x7029,
890 "MBX command error, Aborting ISP.\n");
891 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
892 qla2xxx_wake_dpc(vha);
893 qla2x00_wait_for_chip_reset(vha);
894 /* Also reset the MPI */
895 if (IS_QLA81XX(ha)) {
896 if (qla81xx_restart_mpi_firmware(vha) !=
897 QLA_SUCCESS) {
898 ql_log(ql_log_warn, vha, 0x702a,
899 "MPI reset failed.\n");
900 }
901 }
902
903 rval = -EIO;
904 goto done_free_dma_rsp;
905 }
906
907 if (new_config[0]) {
908 int ret;
909
910 /* Revert back to original port config
911 * Also clear internal loopback
912 */
913 ret = qla81xx_reset_loopback_mode(vha,
914 new_config, 0, 1);
915 if (ret) {
916 /*
917 * If the reset of the loopback mode
918 * doesn't work take FCoE dump and then
919 * reset the chip.
920 */
921 qla2xxx_dump_fw(vha);
922 set_bit(ISP_ABORT_NEEDED,
923 &vha->dpc_flags);
924 }
925
926 }
927
928 } else {
929 type = "FC_BSG_HST_VENDOR_LOOPBACK";
930 ql_dbg(ql_dbg_user, vha, 0x702b,
931 "BSG request type: %s.\n", type);
932 command_sent = INT_DEF_LB_LOOPBACK_CMD;
933 rval = qla2x00_loopback_test(vha, &elreq, response);
934 }
935 }
936
937 if (rval) {
938 ql_log(ql_log_warn, vha, 0x702c,
939 "Vendor request %s failed.\n", type);
940
941 rval = 0;
942 bsg_reply->result = (DID_ERROR << 16);
943 bsg_reply->reply_payload_rcv_len = 0;
944 } else {
945 ql_dbg(ql_dbg_user, vha, 0x702d,
946 "Vendor request %s completed.\n", type);
947 bsg_reply->result = (DID_OK << 16);
948 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
949 bsg_job->reply_payload.sg_cnt, rsp_data,
950 rsp_data_len);
951 }
952
953 bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
954 sizeof(response) + sizeof(uint8_t);
955 fw_sts_ptr = bsg_job->reply + sizeof(struct fc_bsg_reply);
956 memcpy(bsg_job->reply + sizeof(struct fc_bsg_reply), response,
957 sizeof(response));
958 fw_sts_ptr += sizeof(response);
959 *fw_sts_ptr = command_sent;
960
961 done_free_dma_rsp:
962 dma_free_coherent(&ha->pdev->dev, rsp_data_len,
963 rsp_data, rsp_data_dma);
964 done_free_dma_req:
965 dma_free_coherent(&ha->pdev->dev, req_data_len,
966 req_data, req_data_dma);
967 done_unmap_sg:
968 dma_unmap_sg(&ha->pdev->dev,
969 bsg_job->reply_payload.sg_list,
970 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
971 done_unmap_req_sg:
972 dma_unmap_sg(&ha->pdev->dev,
973 bsg_job->request_payload.sg_list,
974 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
975 if (!rval)
976 bsg_job_done(bsg_job, bsg_reply->result,
977 bsg_reply->reply_payload_rcv_len);
978 return rval;
979 }
980
981 static int
qla84xx_reset(struct bsg_job * bsg_job)982 qla84xx_reset(struct bsg_job *bsg_job)
983 {
984 struct fc_bsg_request *bsg_request = bsg_job->request;
985 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
986 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
987 scsi_qla_host_t *vha = shost_priv(host);
988 struct qla_hw_data *ha = vha->hw;
989 int rval = 0;
990 uint32_t flag;
991
992 if (!IS_QLA84XX(ha)) {
993 ql_dbg(ql_dbg_user, vha, 0x702f, "Not 84xx, exiting.\n");
994 return -EINVAL;
995 }
996
997 flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
998
999 rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW);
1000
1001 if (rval) {
1002 ql_log(ql_log_warn, vha, 0x7030,
1003 "Vendor request 84xx reset failed.\n");
1004 rval = (DID_ERROR << 16);
1005
1006 } else {
1007 ql_dbg(ql_dbg_user, vha, 0x7031,
1008 "Vendor request 84xx reset completed.\n");
1009 bsg_reply->result = DID_OK;
1010 bsg_job_done(bsg_job, bsg_reply->result,
1011 bsg_reply->reply_payload_rcv_len);
1012 }
1013
1014 return rval;
1015 }
1016
1017 static int
qla84xx_updatefw(struct bsg_job * bsg_job)1018 qla84xx_updatefw(struct bsg_job *bsg_job)
1019 {
1020 struct fc_bsg_request *bsg_request = bsg_job->request;
1021 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1022 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1023 scsi_qla_host_t *vha = shost_priv(host);
1024 struct qla_hw_data *ha = vha->hw;
1025 struct verify_chip_entry_84xx *mn = NULL;
1026 dma_addr_t mn_dma, fw_dma;
1027 void *fw_buf = NULL;
1028 int rval = 0;
1029 uint32_t sg_cnt;
1030 uint32_t data_len;
1031 uint16_t options;
1032 uint32_t flag;
1033 uint32_t fw_ver;
1034
1035 if (!IS_QLA84XX(ha)) {
1036 ql_dbg(ql_dbg_user, vha, 0x7032,
1037 "Not 84xx, exiting.\n");
1038 return -EINVAL;
1039 }
1040
1041 sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1042 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1043 if (!sg_cnt) {
1044 ql_log(ql_log_warn, vha, 0x7033,
1045 "dma_map_sg returned %d for request.\n", sg_cnt);
1046 return -ENOMEM;
1047 }
1048
1049 if (sg_cnt != bsg_job->request_payload.sg_cnt) {
1050 ql_log(ql_log_warn, vha, 0x7034,
1051 "DMA mapping resulted in different sg counts, "
1052 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
1053 bsg_job->request_payload.sg_cnt, sg_cnt);
1054 rval = -EAGAIN;
1055 goto done_unmap_sg;
1056 }
1057
1058 data_len = bsg_job->request_payload.payload_len;
1059 fw_buf = dma_alloc_coherent(&ha->pdev->dev, data_len,
1060 &fw_dma, GFP_KERNEL);
1061 if (!fw_buf) {
1062 ql_log(ql_log_warn, vha, 0x7035,
1063 "DMA alloc failed for fw_buf.\n");
1064 rval = -ENOMEM;
1065 goto done_unmap_sg;
1066 }
1067
1068 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1069 bsg_job->request_payload.sg_cnt, fw_buf, data_len);
1070
1071 mn = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
1072 if (!mn) {
1073 ql_log(ql_log_warn, vha, 0x7036,
1074 "DMA alloc failed for fw buffer.\n");
1075 rval = -ENOMEM;
1076 goto done_free_fw_buf;
1077 }
1078
1079 flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
1080 fw_ver = get_unaligned_le32((uint32_t *)fw_buf + 2);
1081
1082 mn->entry_type = VERIFY_CHIP_IOCB_TYPE;
1083 mn->entry_count = 1;
1084
1085 options = VCO_FORCE_UPDATE | VCO_END_OF_DATA;
1086 if (flag == A84_ISSUE_UPDATE_DIAGFW_CMD)
1087 options |= VCO_DIAG_FW;
1088
1089 mn->options = cpu_to_le16(options);
1090 mn->fw_ver = cpu_to_le32(fw_ver);
1091 mn->fw_size = cpu_to_le32(data_len);
1092 mn->fw_seq_size = cpu_to_le32(data_len);
1093 put_unaligned_le64(fw_dma, &mn->dsd.address);
1094 mn->dsd.length = cpu_to_le32(data_len);
1095 mn->data_seg_cnt = cpu_to_le16(1);
1096
1097 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
1098
1099 if (rval) {
1100 ql_log(ql_log_warn, vha, 0x7037,
1101 "Vendor request 84xx updatefw failed.\n");
1102
1103 rval = (DID_ERROR << 16);
1104 } else {
1105 ql_dbg(ql_dbg_user, vha, 0x7038,
1106 "Vendor request 84xx updatefw completed.\n");
1107
1108 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1109 bsg_reply->result = DID_OK;
1110 }
1111
1112 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
1113
1114 done_free_fw_buf:
1115 dma_free_coherent(&ha->pdev->dev, data_len, fw_buf, fw_dma);
1116
1117 done_unmap_sg:
1118 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1119 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1120
1121 if (!rval)
1122 bsg_job_done(bsg_job, bsg_reply->result,
1123 bsg_reply->reply_payload_rcv_len);
1124 return rval;
1125 }
1126
1127 static int
qla84xx_mgmt_cmd(struct bsg_job * bsg_job)1128 qla84xx_mgmt_cmd(struct bsg_job *bsg_job)
1129 {
1130 struct fc_bsg_request *bsg_request = bsg_job->request;
1131 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1132 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1133 scsi_qla_host_t *vha = shost_priv(host);
1134 struct qla_hw_data *ha = vha->hw;
1135 struct access_chip_84xx *mn = NULL;
1136 dma_addr_t mn_dma, mgmt_dma;
1137 void *mgmt_b = NULL;
1138 int rval = 0;
1139 struct qla_bsg_a84_mgmt *ql84_mgmt;
1140 uint32_t sg_cnt;
1141 uint32_t data_len = 0;
1142 uint32_t dma_direction = DMA_NONE;
1143
1144 if (!IS_QLA84XX(ha)) {
1145 ql_log(ql_log_warn, vha, 0x703a,
1146 "Not 84xx, exiting.\n");
1147 return -EINVAL;
1148 }
1149
1150 mn = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
1151 if (!mn) {
1152 ql_log(ql_log_warn, vha, 0x703c,
1153 "DMA alloc failed for fw buffer.\n");
1154 return -ENOMEM;
1155 }
1156
1157 mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
1158 mn->entry_count = 1;
1159 ql84_mgmt = (void *)bsg_request + sizeof(struct fc_bsg_request);
1160 switch (ql84_mgmt->mgmt.cmd) {
1161 case QLA84_MGMT_READ_MEM:
1162 case QLA84_MGMT_GET_INFO:
1163 sg_cnt = dma_map_sg(&ha->pdev->dev,
1164 bsg_job->reply_payload.sg_list,
1165 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1166 if (!sg_cnt) {
1167 ql_log(ql_log_warn, vha, 0x703d,
1168 "dma_map_sg returned %d for reply.\n", sg_cnt);
1169 rval = -ENOMEM;
1170 goto exit_mgmt;
1171 }
1172
1173 dma_direction = DMA_FROM_DEVICE;
1174
1175 if (sg_cnt != bsg_job->reply_payload.sg_cnt) {
1176 ql_log(ql_log_warn, vha, 0x703e,
1177 "DMA mapping resulted in different sg counts, "
1178 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
1179 bsg_job->reply_payload.sg_cnt, sg_cnt);
1180 rval = -EAGAIN;
1181 goto done_unmap_sg;
1182 }
1183
1184 data_len = bsg_job->reply_payload.payload_len;
1185
1186 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1187 &mgmt_dma, GFP_KERNEL);
1188 if (!mgmt_b) {
1189 ql_log(ql_log_warn, vha, 0x703f,
1190 "DMA alloc failed for mgmt_b.\n");
1191 rval = -ENOMEM;
1192 goto done_unmap_sg;
1193 }
1194
1195 if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) {
1196 mn->options = cpu_to_le16(ACO_DUMP_MEMORY);
1197 mn->parameter1 =
1198 cpu_to_le32(
1199 ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
1200
1201 } else if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO) {
1202 mn->options = cpu_to_le16(ACO_REQUEST_INFO);
1203 mn->parameter1 =
1204 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.info.type);
1205
1206 mn->parameter2 =
1207 cpu_to_le32(
1208 ql84_mgmt->mgmt.mgmtp.u.info.context);
1209 }
1210 break;
1211
1212 case QLA84_MGMT_WRITE_MEM:
1213 sg_cnt = dma_map_sg(&ha->pdev->dev,
1214 bsg_job->request_payload.sg_list,
1215 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1216
1217 if (!sg_cnt) {
1218 ql_log(ql_log_warn, vha, 0x7040,
1219 "dma_map_sg returned %d.\n", sg_cnt);
1220 rval = -ENOMEM;
1221 goto exit_mgmt;
1222 }
1223
1224 dma_direction = DMA_TO_DEVICE;
1225
1226 if (sg_cnt != bsg_job->request_payload.sg_cnt) {
1227 ql_log(ql_log_warn, vha, 0x7041,
1228 "DMA mapping resulted in different sg counts, "
1229 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
1230 bsg_job->request_payload.sg_cnt, sg_cnt);
1231 rval = -EAGAIN;
1232 goto done_unmap_sg;
1233 }
1234
1235 data_len = bsg_job->request_payload.payload_len;
1236 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1237 &mgmt_dma, GFP_KERNEL);
1238 if (!mgmt_b) {
1239 ql_log(ql_log_warn, vha, 0x7042,
1240 "DMA alloc failed for mgmt_b.\n");
1241 rval = -ENOMEM;
1242 goto done_unmap_sg;
1243 }
1244
1245 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1246 bsg_job->request_payload.sg_cnt, mgmt_b, data_len);
1247
1248 mn->options = cpu_to_le16(ACO_LOAD_MEMORY);
1249 mn->parameter1 =
1250 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
1251 break;
1252
1253 case QLA84_MGMT_CHNG_CONFIG:
1254 mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM);
1255 mn->parameter1 =
1256 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.id);
1257
1258 mn->parameter2 =
1259 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param0);
1260
1261 mn->parameter3 =
1262 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param1);
1263 break;
1264
1265 default:
1266 rval = -EIO;
1267 goto exit_mgmt;
1268 }
1269
1270 if (ql84_mgmt->mgmt.cmd != QLA84_MGMT_CHNG_CONFIG) {
1271 mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->mgmt.len);
1272 mn->dseg_count = cpu_to_le16(1);
1273 put_unaligned_le64(mgmt_dma, &mn->dsd.address);
1274 mn->dsd.length = cpu_to_le32(ql84_mgmt->mgmt.len);
1275 }
1276
1277 rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0);
1278
1279 if (rval) {
1280 ql_log(ql_log_warn, vha, 0x7043,
1281 "Vendor request 84xx mgmt failed.\n");
1282
1283 rval = (DID_ERROR << 16);
1284
1285 } else {
1286 ql_dbg(ql_dbg_user, vha, 0x7044,
1287 "Vendor request 84xx mgmt completed.\n");
1288
1289 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1290 bsg_reply->result = DID_OK;
1291
1292 if ((ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) ||
1293 (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO)) {
1294 bsg_reply->reply_payload_rcv_len =
1295 bsg_job->reply_payload.payload_len;
1296
1297 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1298 bsg_job->reply_payload.sg_cnt, mgmt_b,
1299 data_len);
1300 }
1301 }
1302
1303 done_unmap_sg:
1304 if (mgmt_b)
1305 dma_free_coherent(&ha->pdev->dev, data_len, mgmt_b, mgmt_dma);
1306
1307 if (dma_direction == DMA_TO_DEVICE)
1308 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1309 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1310 else if (dma_direction == DMA_FROM_DEVICE)
1311 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
1312 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1313
1314 exit_mgmt:
1315 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
1316
1317 if (!rval)
1318 bsg_job_done(bsg_job, bsg_reply->result,
1319 bsg_reply->reply_payload_rcv_len);
1320 return rval;
1321 }
1322
1323 static int
qla24xx_iidma(struct bsg_job * bsg_job)1324 qla24xx_iidma(struct bsg_job *bsg_job)
1325 {
1326 struct fc_bsg_request *bsg_request = bsg_job->request;
1327 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1328 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1329 scsi_qla_host_t *vha = shost_priv(host);
1330 int rval = 0;
1331 struct qla_port_param *port_param = NULL;
1332 fc_port_t *fcport = NULL;
1333 int found = 0;
1334 uint16_t mb[MAILBOX_REGISTER_COUNT];
1335 uint8_t *rsp_ptr = NULL;
1336
1337 if (!IS_IIDMA_CAPABLE(vha->hw)) {
1338 ql_log(ql_log_info, vha, 0x7046, "iiDMA not supported.\n");
1339 return -EINVAL;
1340 }
1341
1342 port_param = (void *)bsg_request + sizeof(struct fc_bsg_request);
1343 if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) {
1344 ql_log(ql_log_warn, vha, 0x7048,
1345 "Invalid destination type.\n");
1346 return -EINVAL;
1347 }
1348
1349 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1350 if (fcport->port_type != FCT_TARGET)
1351 continue;
1352
1353 if (memcmp(port_param->fc_scsi_addr.dest_addr.wwpn,
1354 fcport->port_name, sizeof(fcport->port_name)))
1355 continue;
1356
1357 found = 1;
1358 break;
1359 }
1360
1361 if (!found) {
1362 ql_log(ql_log_warn, vha, 0x7049,
1363 "Failed to find port.\n");
1364 return -EINVAL;
1365 }
1366
1367 if (atomic_read(&fcport->state) != FCS_ONLINE) {
1368 ql_log(ql_log_warn, vha, 0x704a,
1369 "Port is not online.\n");
1370 return -EINVAL;
1371 }
1372
1373 if (fcport->flags & FCF_LOGIN_NEEDED) {
1374 ql_log(ql_log_warn, vha, 0x704b,
1375 "Remote port not logged in flags = 0x%x.\n", fcport->flags);
1376 return -EINVAL;
1377 }
1378
1379 if (port_param->mode)
1380 rval = qla2x00_set_idma_speed(vha, fcport->loop_id,
1381 port_param->speed, mb);
1382 else
1383 rval = qla2x00_get_idma_speed(vha, fcport->loop_id,
1384 &port_param->speed, mb);
1385
1386 if (rval) {
1387 ql_log(ql_log_warn, vha, 0x704c,
1388 "iiDMA cmd failed for %8phN -- "
1389 "%04x %x %04x %04x.\n", fcport->port_name,
1390 rval, fcport->fp_speed, mb[0], mb[1]);
1391 rval = (DID_ERROR << 16);
1392 } else {
1393 if (!port_param->mode) {
1394 bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
1395 sizeof(struct qla_port_param);
1396
1397 rsp_ptr = ((uint8_t *)bsg_reply) +
1398 sizeof(struct fc_bsg_reply);
1399
1400 memcpy(rsp_ptr, port_param,
1401 sizeof(struct qla_port_param));
1402 }
1403
1404 bsg_reply->result = DID_OK;
1405 bsg_job_done(bsg_job, bsg_reply->result,
1406 bsg_reply->reply_payload_rcv_len);
1407 }
1408
1409 return rval;
1410 }
1411
1412 static int
qla2x00_optrom_setup(struct bsg_job * bsg_job,scsi_qla_host_t * vha,uint8_t is_update)1413 qla2x00_optrom_setup(struct bsg_job *bsg_job, scsi_qla_host_t *vha,
1414 uint8_t is_update)
1415 {
1416 struct fc_bsg_request *bsg_request = bsg_job->request;
1417 uint32_t start = 0;
1418 int valid = 0;
1419 struct qla_hw_data *ha = vha->hw;
1420
1421 if (unlikely(pci_channel_offline(ha->pdev)))
1422 return -EINVAL;
1423
1424 start = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
1425 if (start > ha->optrom_size) {
1426 ql_log(ql_log_warn, vha, 0x7055,
1427 "start %d > optrom_size %d.\n", start, ha->optrom_size);
1428 return -EINVAL;
1429 }
1430
1431 if (ha->optrom_state != QLA_SWAITING) {
1432 ql_log(ql_log_info, vha, 0x7056,
1433 "optrom_state %d.\n", ha->optrom_state);
1434 return -EBUSY;
1435 }
1436
1437 ha->optrom_region_start = start;
1438 ql_dbg(ql_dbg_user, vha, 0x7057, "is_update=%d.\n", is_update);
1439 if (is_update) {
1440 if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
1441 valid = 1;
1442 else if (start == (ha->flt_region_boot * 4) ||
1443 start == (ha->flt_region_fw * 4))
1444 valid = 1;
1445 else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
1446 IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) ||
1447 IS_QLA28XX(ha))
1448 valid = 1;
1449 if (!valid) {
1450 ql_log(ql_log_warn, vha, 0x7058,
1451 "Invalid start region 0x%x/0x%x.\n", start,
1452 bsg_job->request_payload.payload_len);
1453 return -EINVAL;
1454 }
1455
1456 ha->optrom_region_size = start +
1457 bsg_job->request_payload.payload_len > ha->optrom_size ?
1458 ha->optrom_size - start :
1459 bsg_job->request_payload.payload_len;
1460 ha->optrom_state = QLA_SWRITING;
1461 } else {
1462 ha->optrom_region_size = start +
1463 bsg_job->reply_payload.payload_len > ha->optrom_size ?
1464 ha->optrom_size - start :
1465 bsg_job->reply_payload.payload_len;
1466 ha->optrom_state = QLA_SREADING;
1467 }
1468
1469 ha->optrom_buffer = vzalloc(ha->optrom_region_size);
1470 if (!ha->optrom_buffer) {
1471 ql_log(ql_log_warn, vha, 0x7059,
1472 "Read: Unable to allocate memory for optrom retrieval "
1473 "(%x)\n", ha->optrom_region_size);
1474
1475 ha->optrom_state = QLA_SWAITING;
1476 return -ENOMEM;
1477 }
1478
1479 return 0;
1480 }
1481
1482 static int
qla2x00_read_optrom(struct bsg_job * bsg_job)1483 qla2x00_read_optrom(struct bsg_job *bsg_job)
1484 {
1485 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1486 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1487 scsi_qla_host_t *vha = shost_priv(host);
1488 struct qla_hw_data *ha = vha->hw;
1489 int rval = 0;
1490
1491 if (ha->flags.nic_core_reset_hdlr_active)
1492 return -EBUSY;
1493
1494 mutex_lock(&ha->optrom_mutex);
1495 rval = qla2x00_optrom_setup(bsg_job, vha, 0);
1496 if (rval) {
1497 mutex_unlock(&ha->optrom_mutex);
1498 return rval;
1499 }
1500
1501 ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
1502 ha->optrom_region_start, ha->optrom_region_size);
1503
1504 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1505 bsg_job->reply_payload.sg_cnt, ha->optrom_buffer,
1506 ha->optrom_region_size);
1507
1508 bsg_reply->reply_payload_rcv_len = ha->optrom_region_size;
1509 bsg_reply->result = DID_OK;
1510 vfree(ha->optrom_buffer);
1511 ha->optrom_buffer = NULL;
1512 ha->optrom_state = QLA_SWAITING;
1513 mutex_unlock(&ha->optrom_mutex);
1514 bsg_job_done(bsg_job, bsg_reply->result,
1515 bsg_reply->reply_payload_rcv_len);
1516 return rval;
1517 }
1518
1519 static int
qla2x00_update_optrom(struct bsg_job * bsg_job)1520 qla2x00_update_optrom(struct bsg_job *bsg_job)
1521 {
1522 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1523 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1524 scsi_qla_host_t *vha = shost_priv(host);
1525 struct qla_hw_data *ha = vha->hw;
1526 int rval = 0;
1527
1528 mutex_lock(&ha->optrom_mutex);
1529 rval = qla2x00_optrom_setup(bsg_job, vha, 1);
1530 if (rval) {
1531 mutex_unlock(&ha->optrom_mutex);
1532 return rval;
1533 }
1534
1535 /* Set the isp82xx_no_md_cap not to capture minidump */
1536 ha->flags.isp82xx_no_md_cap = 1;
1537
1538 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1539 bsg_job->request_payload.sg_cnt, ha->optrom_buffer,
1540 ha->optrom_region_size);
1541
1542 rval = ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
1543 ha->optrom_region_start, ha->optrom_region_size);
1544
1545 if (rval) {
1546 bsg_reply->result = -EINVAL;
1547 rval = -EINVAL;
1548 } else {
1549 bsg_reply->result = DID_OK;
1550 }
1551 vfree(ha->optrom_buffer);
1552 ha->optrom_buffer = NULL;
1553 ha->optrom_state = QLA_SWAITING;
1554 mutex_unlock(&ha->optrom_mutex);
1555 bsg_job_done(bsg_job, bsg_reply->result,
1556 bsg_reply->reply_payload_rcv_len);
1557 return rval;
1558 }
1559
1560 static int
qla2x00_update_fru_versions(struct bsg_job * bsg_job)1561 qla2x00_update_fru_versions(struct bsg_job *bsg_job)
1562 {
1563 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1564 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1565 scsi_qla_host_t *vha = shost_priv(host);
1566 struct qla_hw_data *ha = vha->hw;
1567 int rval = 0;
1568 uint8_t bsg[DMA_POOL_SIZE];
1569 struct qla_image_version_list *list = (void *)bsg;
1570 struct qla_image_version *image;
1571 uint32_t count;
1572 dma_addr_t sfp_dma;
1573 void *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1574
1575 if (!sfp) {
1576 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1577 EXT_STATUS_NO_MEMORY;
1578 goto done;
1579 }
1580
1581 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1582 bsg_job->request_payload.sg_cnt, list, sizeof(bsg));
1583
1584 image = list->version;
1585 count = list->count;
1586 while (count--) {
1587 memcpy(sfp, &image->field_info, sizeof(image->field_info));
1588 rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1589 image->field_address.device, image->field_address.offset,
1590 sizeof(image->field_info), image->field_address.option);
1591 if (rval) {
1592 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1593 EXT_STATUS_MAILBOX;
1594 goto dealloc;
1595 }
1596 image++;
1597 }
1598
1599 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1600
1601 dealloc:
1602 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1603
1604 done:
1605 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1606 bsg_reply->result = DID_OK << 16;
1607 bsg_job_done(bsg_job, bsg_reply->result,
1608 bsg_reply->reply_payload_rcv_len);
1609
1610 return 0;
1611 }
1612
1613 static int
qla2x00_read_fru_status(struct bsg_job * bsg_job)1614 qla2x00_read_fru_status(struct bsg_job *bsg_job)
1615 {
1616 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1617 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1618 scsi_qla_host_t *vha = shost_priv(host);
1619 struct qla_hw_data *ha = vha->hw;
1620 int rval = 0;
1621 uint8_t bsg[DMA_POOL_SIZE];
1622 struct qla_status_reg *sr = (void *)bsg;
1623 dma_addr_t sfp_dma;
1624 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1625
1626 if (!sfp) {
1627 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1628 EXT_STATUS_NO_MEMORY;
1629 goto done;
1630 }
1631
1632 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1633 bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
1634
1635 rval = qla2x00_read_sfp(vha, sfp_dma, sfp,
1636 sr->field_address.device, sr->field_address.offset,
1637 sizeof(sr->status_reg), sr->field_address.option);
1638 sr->status_reg = *sfp;
1639
1640 if (rval) {
1641 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1642 EXT_STATUS_MAILBOX;
1643 goto dealloc;
1644 }
1645
1646 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1647 bsg_job->reply_payload.sg_cnt, sr, sizeof(*sr));
1648
1649 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1650
1651 dealloc:
1652 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1653
1654 done:
1655 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1656 bsg_reply->reply_payload_rcv_len = sizeof(*sr);
1657 bsg_reply->result = DID_OK << 16;
1658 bsg_job_done(bsg_job, bsg_reply->result,
1659 bsg_reply->reply_payload_rcv_len);
1660
1661 return 0;
1662 }
1663
1664 static int
qla2x00_write_fru_status(struct bsg_job * bsg_job)1665 qla2x00_write_fru_status(struct bsg_job *bsg_job)
1666 {
1667 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1668 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1669 scsi_qla_host_t *vha = shost_priv(host);
1670 struct qla_hw_data *ha = vha->hw;
1671 int rval = 0;
1672 uint8_t bsg[DMA_POOL_SIZE];
1673 struct qla_status_reg *sr = (void *)bsg;
1674 dma_addr_t sfp_dma;
1675 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1676
1677 if (!sfp) {
1678 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1679 EXT_STATUS_NO_MEMORY;
1680 goto done;
1681 }
1682
1683 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1684 bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
1685
1686 *sfp = sr->status_reg;
1687 rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1688 sr->field_address.device, sr->field_address.offset,
1689 sizeof(sr->status_reg), sr->field_address.option);
1690
1691 if (rval) {
1692 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1693 EXT_STATUS_MAILBOX;
1694 goto dealloc;
1695 }
1696
1697 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1698
1699 dealloc:
1700 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1701
1702 done:
1703 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1704 bsg_reply->result = DID_OK << 16;
1705 bsg_job_done(bsg_job, bsg_reply->result,
1706 bsg_reply->reply_payload_rcv_len);
1707
1708 return 0;
1709 }
1710
1711 static int
qla2x00_write_i2c(struct bsg_job * bsg_job)1712 qla2x00_write_i2c(struct bsg_job *bsg_job)
1713 {
1714 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1715 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1716 scsi_qla_host_t *vha = shost_priv(host);
1717 struct qla_hw_data *ha = vha->hw;
1718 int rval = 0;
1719 uint8_t bsg[DMA_POOL_SIZE];
1720 struct qla_i2c_access *i2c = (void *)bsg;
1721 dma_addr_t sfp_dma;
1722 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1723
1724 if (!sfp) {
1725 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1726 EXT_STATUS_NO_MEMORY;
1727 goto done;
1728 }
1729
1730 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1731 bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c));
1732
1733 memcpy(sfp, i2c->buffer, i2c->length);
1734 rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1735 i2c->device, i2c->offset, i2c->length, i2c->option);
1736
1737 if (rval) {
1738 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1739 EXT_STATUS_MAILBOX;
1740 goto dealloc;
1741 }
1742
1743 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1744
1745 dealloc:
1746 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1747
1748 done:
1749 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1750 bsg_reply->result = DID_OK << 16;
1751 bsg_job_done(bsg_job, bsg_reply->result,
1752 bsg_reply->reply_payload_rcv_len);
1753
1754 return 0;
1755 }
1756
1757 static int
qla2x00_read_i2c(struct bsg_job * bsg_job)1758 qla2x00_read_i2c(struct bsg_job *bsg_job)
1759 {
1760 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1761 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1762 scsi_qla_host_t *vha = shost_priv(host);
1763 struct qla_hw_data *ha = vha->hw;
1764 int rval = 0;
1765 uint8_t bsg[DMA_POOL_SIZE];
1766 struct qla_i2c_access *i2c = (void *)bsg;
1767 dma_addr_t sfp_dma;
1768 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1769
1770 if (!sfp) {
1771 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1772 EXT_STATUS_NO_MEMORY;
1773 goto done;
1774 }
1775
1776 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1777 bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c));
1778
1779 rval = qla2x00_read_sfp(vha, sfp_dma, sfp,
1780 i2c->device, i2c->offset, i2c->length, i2c->option);
1781
1782 if (rval) {
1783 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1784 EXT_STATUS_MAILBOX;
1785 goto dealloc;
1786 }
1787
1788 memcpy(i2c->buffer, sfp, i2c->length);
1789 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1790 bsg_job->reply_payload.sg_cnt, i2c, sizeof(*i2c));
1791
1792 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1793
1794 dealloc:
1795 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1796
1797 done:
1798 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1799 bsg_reply->reply_payload_rcv_len = sizeof(*i2c);
1800 bsg_reply->result = DID_OK << 16;
1801 bsg_job_done(bsg_job, bsg_reply->result,
1802 bsg_reply->reply_payload_rcv_len);
1803
1804 return 0;
1805 }
1806
1807 static int
qla24xx_process_bidir_cmd(struct bsg_job * bsg_job)1808 qla24xx_process_bidir_cmd(struct bsg_job *bsg_job)
1809 {
1810 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1811 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1812 scsi_qla_host_t *vha = shost_priv(host);
1813 struct qla_hw_data *ha = vha->hw;
1814 uint32_t rval = EXT_STATUS_OK;
1815 uint16_t req_sg_cnt = 0;
1816 uint16_t rsp_sg_cnt = 0;
1817 uint16_t nextlid = 0;
1818 uint32_t tot_dsds;
1819 srb_t *sp = NULL;
1820 uint32_t req_data_len;
1821 uint32_t rsp_data_len;
1822
1823 /* Check the type of the adapter */
1824 if (!IS_BIDI_CAPABLE(ha)) {
1825 ql_log(ql_log_warn, vha, 0x70a0,
1826 "This adapter is not supported\n");
1827 rval = EXT_STATUS_NOT_SUPPORTED;
1828 goto done;
1829 }
1830
1831 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
1832 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
1833 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
1834 rval = EXT_STATUS_BUSY;
1835 goto done;
1836 }
1837
1838 /* Check if host is online */
1839 if (!vha->flags.online) {
1840 ql_log(ql_log_warn, vha, 0x70a1,
1841 "Host is not online\n");
1842 rval = EXT_STATUS_DEVICE_OFFLINE;
1843 goto done;
1844 }
1845
1846 /* Check if cable is plugged in or not */
1847 if (vha->device_flags & DFLG_NO_CABLE) {
1848 ql_log(ql_log_warn, vha, 0x70a2,
1849 "Cable is unplugged...\n");
1850 rval = EXT_STATUS_INVALID_CFG;
1851 goto done;
1852 }
1853
1854 /* Check if the switch is connected or not */
1855 if (ha->current_topology != ISP_CFG_F) {
1856 ql_log(ql_log_warn, vha, 0x70a3,
1857 "Host is not connected to the switch\n");
1858 rval = EXT_STATUS_INVALID_CFG;
1859 goto done;
1860 }
1861
1862 /* Check if operating mode is P2P */
1863 if (ha->operating_mode != P2P) {
1864 ql_log(ql_log_warn, vha, 0x70a4,
1865 "Host operating mode is not P2p\n");
1866 rval = EXT_STATUS_INVALID_CFG;
1867 goto done;
1868 }
1869
1870 mutex_lock(&ha->selflogin_lock);
1871 if (vha->self_login_loop_id == 0) {
1872 /* Initialize all required fields of fcport */
1873 vha->bidir_fcport.vha = vha;
1874 vha->bidir_fcport.d_id.b.al_pa = vha->d_id.b.al_pa;
1875 vha->bidir_fcport.d_id.b.area = vha->d_id.b.area;
1876 vha->bidir_fcport.d_id.b.domain = vha->d_id.b.domain;
1877 vha->bidir_fcport.loop_id = vha->loop_id;
1878
1879 if (qla2x00_fabric_login(vha, &(vha->bidir_fcport), &nextlid)) {
1880 ql_log(ql_log_warn, vha, 0x70a7,
1881 "Failed to login port %06X for bidirectional IOCB\n",
1882 vha->bidir_fcport.d_id.b24);
1883 mutex_unlock(&ha->selflogin_lock);
1884 rval = EXT_STATUS_MAILBOX;
1885 goto done;
1886 }
1887 vha->self_login_loop_id = nextlid - 1;
1888
1889 }
1890 /* Assign the self login loop id to fcport */
1891 mutex_unlock(&ha->selflogin_lock);
1892
1893 vha->bidir_fcport.loop_id = vha->self_login_loop_id;
1894
1895 req_sg_cnt = dma_map_sg(&ha->pdev->dev,
1896 bsg_job->request_payload.sg_list,
1897 bsg_job->request_payload.sg_cnt,
1898 DMA_TO_DEVICE);
1899
1900 if (!req_sg_cnt) {
1901 rval = EXT_STATUS_NO_MEMORY;
1902 goto done;
1903 }
1904
1905 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
1906 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
1907 DMA_FROM_DEVICE);
1908
1909 if (!rsp_sg_cnt) {
1910 rval = EXT_STATUS_NO_MEMORY;
1911 goto done_unmap_req_sg;
1912 }
1913
1914 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
1915 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
1916 ql_dbg(ql_dbg_user, vha, 0x70a9,
1917 "Dma mapping resulted in different sg counts "
1918 "[request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt: "
1919 "%x dma_reply_sg_cnt: %x]\n",
1920 bsg_job->request_payload.sg_cnt, req_sg_cnt,
1921 bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
1922 rval = EXT_STATUS_NO_MEMORY;
1923 goto done_unmap_sg;
1924 }
1925
1926 req_data_len = bsg_job->request_payload.payload_len;
1927 rsp_data_len = bsg_job->reply_payload.payload_len;
1928
1929 if (req_data_len != rsp_data_len) {
1930 rval = EXT_STATUS_BUSY;
1931 ql_log(ql_log_warn, vha, 0x70aa,
1932 "req_data_len != rsp_data_len\n");
1933 goto done_unmap_sg;
1934 }
1935
1936 /* Alloc SRB structure */
1937 sp = qla2x00_get_sp(vha, &(vha->bidir_fcport), GFP_KERNEL);
1938 if (!sp) {
1939 ql_dbg(ql_dbg_user, vha, 0x70ac,
1940 "Alloc SRB structure failed\n");
1941 rval = EXT_STATUS_NO_MEMORY;
1942 goto done_unmap_sg;
1943 }
1944
1945 /*Populate srb->ctx with bidir ctx*/
1946 sp->u.bsg_job = bsg_job;
1947 sp->free = qla2x00_bsg_sp_free;
1948 sp->type = SRB_BIDI_CMD;
1949 sp->done = qla2x00_bsg_job_done;
1950
1951 /* Add the read and write sg count */
1952 tot_dsds = rsp_sg_cnt + req_sg_cnt;
1953
1954 rval = qla2x00_start_bidir(sp, vha, tot_dsds);
1955 if (rval != EXT_STATUS_OK)
1956 goto done_free_srb;
1957 /* the bsg request will be completed in the interrupt handler */
1958 return rval;
1959
1960 done_free_srb:
1961 mempool_free(sp, ha->srb_mempool);
1962 done_unmap_sg:
1963 dma_unmap_sg(&ha->pdev->dev,
1964 bsg_job->reply_payload.sg_list,
1965 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1966 done_unmap_req_sg:
1967 dma_unmap_sg(&ha->pdev->dev,
1968 bsg_job->request_payload.sg_list,
1969 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1970 done:
1971
1972 /* Return an error vendor specific response
1973 * and complete the bsg request
1974 */
1975 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
1976 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1977 bsg_reply->reply_payload_rcv_len = 0;
1978 bsg_reply->result = (DID_OK) << 16;
1979 bsg_job_done(bsg_job, bsg_reply->result,
1980 bsg_reply->reply_payload_rcv_len);
1981 /* Always return success, vendor rsp carries correct status */
1982 return 0;
1983 }
1984
1985 static int
qlafx00_mgmt_cmd(struct bsg_job * bsg_job)1986 qlafx00_mgmt_cmd(struct bsg_job *bsg_job)
1987 {
1988 struct fc_bsg_request *bsg_request = bsg_job->request;
1989 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1990 scsi_qla_host_t *vha = shost_priv(host);
1991 struct qla_hw_data *ha = vha->hw;
1992 int rval = (DID_ERROR << 16);
1993 struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
1994 srb_t *sp;
1995 int req_sg_cnt = 0, rsp_sg_cnt = 0;
1996 struct fc_port *fcport;
1997 char *type = "FC_BSG_HST_FX_MGMT";
1998
1999 /* Copy the IOCB specific information */
2000 piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
2001 &bsg_request->rqst_data.h_vendor.vendor_cmd[1];
2002
2003 /* Dump the vendor information */
2004 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose , vha, 0x70cf,
2005 piocb_rqst, sizeof(*piocb_rqst));
2006
2007 if (!vha->flags.online) {
2008 ql_log(ql_log_warn, vha, 0x70d0,
2009 "Host is not online.\n");
2010 rval = -EIO;
2011 goto done;
2012 }
2013
2014 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) {
2015 req_sg_cnt = dma_map_sg(&ha->pdev->dev,
2016 bsg_job->request_payload.sg_list,
2017 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2018 if (!req_sg_cnt) {
2019 ql_log(ql_log_warn, vha, 0x70c7,
2020 "dma_map_sg return %d for request\n", req_sg_cnt);
2021 rval = -ENOMEM;
2022 goto done;
2023 }
2024 }
2025
2026 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) {
2027 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
2028 bsg_job->reply_payload.sg_list,
2029 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2030 if (!rsp_sg_cnt) {
2031 ql_log(ql_log_warn, vha, 0x70c8,
2032 "dma_map_sg return %d for reply\n", rsp_sg_cnt);
2033 rval = -ENOMEM;
2034 goto done_unmap_req_sg;
2035 }
2036 }
2037
2038 ql_dbg(ql_dbg_user, vha, 0x70c9,
2039 "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
2040 "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
2041 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
2042
2043 /* Allocate a dummy fcport structure, since functions preparing the
2044 * IOCB and mailbox command retrieves port specific information
2045 * from fcport structure. For Host based ELS commands there will be
2046 * no fcport structure allocated
2047 */
2048 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2049 if (!fcport) {
2050 ql_log(ql_log_warn, vha, 0x70ca,
2051 "Failed to allocate fcport.\n");
2052 rval = -ENOMEM;
2053 goto done_unmap_rsp_sg;
2054 }
2055
2056 /* Alloc SRB structure */
2057 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2058 if (!sp) {
2059 ql_log(ql_log_warn, vha, 0x70cb,
2060 "qla2x00_get_sp failed.\n");
2061 rval = -ENOMEM;
2062 goto done_free_fcport;
2063 }
2064
2065 /* Initialize all required fields of fcport */
2066 fcport->vha = vha;
2067 fcport->loop_id = le32_to_cpu(piocb_rqst->dataword);
2068
2069 sp->type = SRB_FXIOCB_BCMD;
2070 sp->name = "bsg_fx_mgmt";
2071 sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
2072 sp->u.bsg_job = bsg_job;
2073 sp->free = qla2x00_bsg_sp_free;
2074 sp->done = qla2x00_bsg_job_done;
2075
2076 ql_dbg(ql_dbg_user, vha, 0x70cc,
2077 "bsg rqst type: %s fx_mgmt_type: %x id=%x\n",
2078 type, piocb_rqst->func_type, fcport->loop_id);
2079
2080 rval = qla2x00_start_sp(sp);
2081 if (rval != QLA_SUCCESS) {
2082 ql_log(ql_log_warn, vha, 0x70cd,
2083 "qla2x00_start_sp failed=%d.\n", rval);
2084 mempool_free(sp, ha->srb_mempool);
2085 rval = -EIO;
2086 goto done_free_fcport;
2087 }
2088 return rval;
2089
2090 done_free_fcport:
2091 qla2x00_free_fcport(fcport);
2092
2093 done_unmap_rsp_sg:
2094 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID)
2095 dma_unmap_sg(&ha->pdev->dev,
2096 bsg_job->reply_payload.sg_list,
2097 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2098 done_unmap_req_sg:
2099 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
2100 dma_unmap_sg(&ha->pdev->dev,
2101 bsg_job->request_payload.sg_list,
2102 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2103
2104 done:
2105 return rval;
2106 }
2107
2108 static int
qla26xx_serdes_op(struct bsg_job * bsg_job)2109 qla26xx_serdes_op(struct bsg_job *bsg_job)
2110 {
2111 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2112 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2113 scsi_qla_host_t *vha = shost_priv(host);
2114 int rval = 0;
2115 struct qla_serdes_reg sr;
2116
2117 memset(&sr, 0, sizeof(sr));
2118
2119 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2120 bsg_job->request_payload.sg_cnt, &sr, sizeof(sr));
2121
2122 switch (sr.cmd) {
2123 case INT_SC_SERDES_WRITE_REG:
2124 rval = qla2x00_write_serdes_word(vha, sr.addr, sr.val);
2125 bsg_reply->reply_payload_rcv_len = 0;
2126 break;
2127 case INT_SC_SERDES_READ_REG:
2128 rval = qla2x00_read_serdes_word(vha, sr.addr, &sr.val);
2129 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2130 bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr));
2131 bsg_reply->reply_payload_rcv_len = sizeof(sr);
2132 break;
2133 default:
2134 ql_dbg(ql_dbg_user, vha, 0x708c,
2135 "Unknown serdes cmd %x.\n", sr.cmd);
2136 rval = -EINVAL;
2137 break;
2138 }
2139
2140 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2141 rval ? EXT_STATUS_MAILBOX : 0;
2142
2143 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2144 bsg_reply->result = DID_OK << 16;
2145 bsg_job_done(bsg_job, bsg_reply->result,
2146 bsg_reply->reply_payload_rcv_len);
2147 return 0;
2148 }
2149
2150 static int
qla8044_serdes_op(struct bsg_job * bsg_job)2151 qla8044_serdes_op(struct bsg_job *bsg_job)
2152 {
2153 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2154 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2155 scsi_qla_host_t *vha = shost_priv(host);
2156 int rval = 0;
2157 struct qla_serdes_reg_ex sr;
2158
2159 memset(&sr, 0, sizeof(sr));
2160
2161 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2162 bsg_job->request_payload.sg_cnt, &sr, sizeof(sr));
2163
2164 switch (sr.cmd) {
2165 case INT_SC_SERDES_WRITE_REG:
2166 rval = qla8044_write_serdes_word(vha, sr.addr, sr.val);
2167 bsg_reply->reply_payload_rcv_len = 0;
2168 break;
2169 case INT_SC_SERDES_READ_REG:
2170 rval = qla8044_read_serdes_word(vha, sr.addr, &sr.val);
2171 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2172 bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr));
2173 bsg_reply->reply_payload_rcv_len = sizeof(sr);
2174 break;
2175 default:
2176 ql_dbg(ql_dbg_user, vha, 0x7020,
2177 "Unknown serdes cmd %x.\n", sr.cmd);
2178 rval = -EINVAL;
2179 break;
2180 }
2181
2182 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2183 rval ? EXT_STATUS_MAILBOX : 0;
2184
2185 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2186 bsg_reply->result = DID_OK << 16;
2187 bsg_job_done(bsg_job, bsg_reply->result,
2188 bsg_reply->reply_payload_rcv_len);
2189 return 0;
2190 }
2191
2192 static int
qla27xx_get_flash_upd_cap(struct bsg_job * bsg_job)2193 qla27xx_get_flash_upd_cap(struct bsg_job *bsg_job)
2194 {
2195 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2196 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2197 scsi_qla_host_t *vha = shost_priv(host);
2198 struct qla_hw_data *ha = vha->hw;
2199 struct qla_flash_update_caps cap;
2200
2201 if (!(IS_QLA27XX(ha)) && !IS_QLA28XX(ha))
2202 return -EPERM;
2203
2204 memset(&cap, 0, sizeof(cap));
2205 cap.capabilities = (uint64_t)ha->fw_attributes_ext[1] << 48 |
2206 (uint64_t)ha->fw_attributes_ext[0] << 32 |
2207 (uint64_t)ha->fw_attributes_h << 16 |
2208 (uint64_t)ha->fw_attributes;
2209
2210 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2211 bsg_job->reply_payload.sg_cnt, &cap, sizeof(cap));
2212 bsg_reply->reply_payload_rcv_len = sizeof(cap);
2213
2214 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2215 EXT_STATUS_OK;
2216
2217 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2218 bsg_reply->result = DID_OK << 16;
2219 bsg_job_done(bsg_job, bsg_reply->result,
2220 bsg_reply->reply_payload_rcv_len);
2221 return 0;
2222 }
2223
2224 static int
qla27xx_set_flash_upd_cap(struct bsg_job * bsg_job)2225 qla27xx_set_flash_upd_cap(struct bsg_job *bsg_job)
2226 {
2227 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2228 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2229 scsi_qla_host_t *vha = shost_priv(host);
2230 struct qla_hw_data *ha = vha->hw;
2231 uint64_t online_fw_attr = 0;
2232 struct qla_flash_update_caps cap;
2233
2234 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
2235 return -EPERM;
2236
2237 memset(&cap, 0, sizeof(cap));
2238 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2239 bsg_job->request_payload.sg_cnt, &cap, sizeof(cap));
2240
2241 online_fw_attr = (uint64_t)ha->fw_attributes_ext[1] << 48 |
2242 (uint64_t)ha->fw_attributes_ext[0] << 32 |
2243 (uint64_t)ha->fw_attributes_h << 16 |
2244 (uint64_t)ha->fw_attributes;
2245
2246 if (online_fw_attr != cap.capabilities) {
2247 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2248 EXT_STATUS_INVALID_PARAM;
2249 return -EINVAL;
2250 }
2251
2252 if (cap.outage_duration < MAX_LOOP_TIMEOUT) {
2253 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2254 EXT_STATUS_INVALID_PARAM;
2255 return -EINVAL;
2256 }
2257
2258 bsg_reply->reply_payload_rcv_len = 0;
2259
2260 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2261 EXT_STATUS_OK;
2262
2263 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2264 bsg_reply->result = DID_OK << 16;
2265 bsg_job_done(bsg_job, bsg_reply->result,
2266 bsg_reply->reply_payload_rcv_len);
2267 return 0;
2268 }
2269
2270 static int
qla27xx_get_bbcr_data(struct bsg_job * bsg_job)2271 qla27xx_get_bbcr_data(struct bsg_job *bsg_job)
2272 {
2273 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2274 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2275 scsi_qla_host_t *vha = shost_priv(host);
2276 struct qla_hw_data *ha = vha->hw;
2277 struct qla_bbcr_data bbcr;
2278 uint16_t loop_id, topo, sw_cap;
2279 uint8_t domain, area, al_pa, state;
2280 int rval;
2281
2282 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
2283 return -EPERM;
2284
2285 memset(&bbcr, 0, sizeof(bbcr));
2286
2287 if (vha->flags.bbcr_enable)
2288 bbcr.status = QLA_BBCR_STATUS_ENABLED;
2289 else
2290 bbcr.status = QLA_BBCR_STATUS_DISABLED;
2291
2292 if (bbcr.status == QLA_BBCR_STATUS_ENABLED) {
2293 rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa,
2294 &area, &domain, &topo, &sw_cap);
2295 if (rval != QLA_SUCCESS) {
2296 bbcr.status = QLA_BBCR_STATUS_UNKNOWN;
2297 bbcr.state = QLA_BBCR_STATE_OFFLINE;
2298 bbcr.mbx1 = loop_id;
2299 goto done;
2300 }
2301
2302 state = (vha->bbcr >> 12) & 0x1;
2303
2304 if (state) {
2305 bbcr.state = QLA_BBCR_STATE_OFFLINE;
2306 bbcr.offline_reason_code = QLA_BBCR_REASON_LOGIN_REJECT;
2307 } else {
2308 bbcr.state = QLA_BBCR_STATE_ONLINE;
2309 bbcr.negotiated_bbscn = (vha->bbcr >> 8) & 0xf;
2310 }
2311
2312 bbcr.configured_bbscn = vha->bbcr & 0xf;
2313 }
2314
2315 done:
2316 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2317 bsg_job->reply_payload.sg_cnt, &bbcr, sizeof(bbcr));
2318 bsg_reply->reply_payload_rcv_len = sizeof(bbcr);
2319
2320 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
2321
2322 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2323 bsg_reply->result = DID_OK << 16;
2324 bsg_job_done(bsg_job, bsg_reply->result,
2325 bsg_reply->reply_payload_rcv_len);
2326 return 0;
2327 }
2328
2329 static int
qla2x00_get_priv_stats(struct bsg_job * bsg_job)2330 qla2x00_get_priv_stats(struct bsg_job *bsg_job)
2331 {
2332 struct fc_bsg_request *bsg_request = bsg_job->request;
2333 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2334 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2335 scsi_qla_host_t *vha = shost_priv(host);
2336 struct qla_hw_data *ha = vha->hw;
2337 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2338 struct link_statistics *stats = NULL;
2339 dma_addr_t stats_dma;
2340 int rval;
2341 uint32_t *cmd = bsg_request->rqst_data.h_vendor.vendor_cmd;
2342 uint options = cmd[0] == QL_VND_GET_PRIV_STATS_EX ? cmd[1] : 0;
2343
2344 if (test_bit(UNLOADING, &vha->dpc_flags))
2345 return -ENODEV;
2346
2347 if (unlikely(pci_channel_offline(ha->pdev)))
2348 return -ENODEV;
2349
2350 if (qla2x00_reset_active(vha))
2351 return -EBUSY;
2352
2353 if (!IS_FWI2_CAPABLE(ha))
2354 return -EPERM;
2355
2356 stats = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stats), &stats_dma,
2357 GFP_KERNEL);
2358 if (!stats) {
2359 ql_log(ql_log_warn, vha, 0x70e2,
2360 "Failed to allocate memory for stats.\n");
2361 return -ENOMEM;
2362 }
2363
2364 rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, options);
2365
2366 if (rval == QLA_SUCCESS) {
2367 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, vha, 0x70e5,
2368 stats, sizeof(*stats));
2369 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2370 bsg_job->reply_payload.sg_cnt, stats, sizeof(*stats));
2371 }
2372
2373 bsg_reply->reply_payload_rcv_len = sizeof(*stats);
2374 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2375 rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK;
2376
2377 bsg_job->reply_len = sizeof(*bsg_reply);
2378 bsg_reply->result = DID_OK << 16;
2379 bsg_job_done(bsg_job, bsg_reply->result,
2380 bsg_reply->reply_payload_rcv_len);
2381
2382 dma_free_coherent(&ha->pdev->dev, sizeof(*stats),
2383 stats, stats_dma);
2384
2385 return 0;
2386 }
2387
2388 static int
qla2x00_do_dport_diagnostics(struct bsg_job * bsg_job)2389 qla2x00_do_dport_diagnostics(struct bsg_job *bsg_job)
2390 {
2391 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2392 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2393 scsi_qla_host_t *vha = shost_priv(host);
2394 int rval;
2395 struct qla_dport_diag *dd;
2396
2397 if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) &&
2398 !IS_QLA28XX(vha->hw))
2399 return -EPERM;
2400
2401 dd = kmalloc(sizeof(*dd), GFP_KERNEL);
2402 if (!dd) {
2403 ql_log(ql_log_warn, vha, 0x70db,
2404 "Failed to allocate memory for dport.\n");
2405 return -ENOMEM;
2406 }
2407
2408 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2409 bsg_job->request_payload.sg_cnt, dd, sizeof(*dd));
2410
2411 rval = qla26xx_dport_diagnostics(
2412 vha, dd->buf, sizeof(dd->buf), dd->options);
2413 if (rval == QLA_SUCCESS) {
2414 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2415 bsg_job->reply_payload.sg_cnt, dd, sizeof(*dd));
2416 }
2417
2418 bsg_reply->reply_payload_rcv_len = sizeof(*dd);
2419 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2420 rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK;
2421
2422 bsg_job->reply_len = sizeof(*bsg_reply);
2423 bsg_reply->result = DID_OK << 16;
2424 bsg_job_done(bsg_job, bsg_reply->result,
2425 bsg_reply->reply_payload_rcv_len);
2426
2427 kfree(dd);
2428
2429 return 0;
2430 }
2431
2432 static int
qla2x00_do_dport_diagnostics_v2(struct bsg_job * bsg_job)2433 qla2x00_do_dport_diagnostics_v2(struct bsg_job *bsg_job)
2434 {
2435 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2436 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2437 scsi_qla_host_t *vha = shost_priv(host);
2438 int rval;
2439 struct qla_dport_diag_v2 *dd;
2440 mbx_cmd_t mc;
2441 mbx_cmd_t *mcp = &mc;
2442 uint16_t options;
2443
2444 if (!IS_DPORT_CAPABLE(vha->hw))
2445 return -EPERM;
2446
2447 dd = kzalloc(sizeof(*dd), GFP_KERNEL);
2448 if (!dd)
2449 return -ENOMEM;
2450
2451 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2452 bsg_job->request_payload.sg_cnt, dd, sizeof(*dd));
2453
2454 options = dd->options;
2455
2456 /* Check dport Test in progress */
2457 if (options == QLA_GET_DPORT_RESULT_V2 &&
2458 vha->dport_status & DPORT_DIAG_IN_PROGRESS) {
2459 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2460 EXT_STATUS_DPORT_DIAG_IN_PROCESS;
2461 goto dportcomplete;
2462 }
2463
2464 /* Check chip reset in progress and start/restart requests arrive */
2465 if (vha->dport_status & DPORT_DIAG_CHIP_RESET_IN_PROGRESS &&
2466 (options == QLA_START_DPORT_TEST_V2 ||
2467 options == QLA_RESTART_DPORT_TEST_V2)) {
2468 vha->dport_status &= ~DPORT_DIAG_CHIP_RESET_IN_PROGRESS;
2469 }
2470
2471 /* Check chip reset in progress and get result request arrive */
2472 if (vha->dport_status & DPORT_DIAG_CHIP_RESET_IN_PROGRESS &&
2473 options == QLA_GET_DPORT_RESULT_V2) {
2474 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2475 EXT_STATUS_DPORT_DIAG_NOT_RUNNING;
2476 goto dportcomplete;
2477 }
2478
2479 rval = qla26xx_dport_diagnostics_v2(vha, dd, mcp);
2480
2481 if (rval == QLA_SUCCESS) {
2482 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2483 EXT_STATUS_OK;
2484 if (options == QLA_START_DPORT_TEST_V2 ||
2485 options == QLA_RESTART_DPORT_TEST_V2) {
2486 dd->mbx1 = mcp->mb[0];
2487 dd->mbx2 = mcp->mb[1];
2488 vha->dport_status |= DPORT_DIAG_IN_PROGRESS;
2489 } else if (options == QLA_GET_DPORT_RESULT_V2) {
2490 dd->mbx1 = le16_to_cpu(vha->dport_data[1]);
2491 dd->mbx2 = le16_to_cpu(vha->dport_data[2]);
2492 }
2493 } else {
2494 dd->mbx1 = mcp->mb[0];
2495 dd->mbx2 = mcp->mb[1];
2496 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2497 EXT_STATUS_DPORT_DIAG_ERR;
2498 }
2499
2500 dportcomplete:
2501 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2502 bsg_job->reply_payload.sg_cnt, dd, sizeof(*dd));
2503
2504 bsg_reply->reply_payload_rcv_len = sizeof(*dd);
2505 bsg_job->reply_len = sizeof(*bsg_reply);
2506 bsg_reply->result = DID_OK << 16;
2507 bsg_job_done(bsg_job, bsg_reply->result,
2508 bsg_reply->reply_payload_rcv_len);
2509
2510 kfree(dd);
2511
2512 return 0;
2513 }
2514
2515 static int
qla2x00_get_flash_image_status(struct bsg_job * bsg_job)2516 qla2x00_get_flash_image_status(struct bsg_job *bsg_job)
2517 {
2518 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
2519 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2520 struct qla_hw_data *ha = vha->hw;
2521 struct qla_active_regions regions = { };
2522 struct active_regions active_regions = { };
2523
2524 qla27xx_get_active_image(vha, &active_regions);
2525 regions.global_image = active_regions.global;
2526
2527 if (IS_QLA27XX(ha))
2528 regions.nvme_params = QLA27XX_PRIMARY_IMAGE;
2529
2530 if (IS_QLA28XX(ha)) {
2531 qla28xx_get_aux_images(vha, &active_regions);
2532 regions.board_config = active_regions.aux.board_config;
2533 regions.vpd_nvram = active_regions.aux.vpd_nvram;
2534 regions.npiv_config_0_1 = active_regions.aux.npiv_config_0_1;
2535 regions.npiv_config_2_3 = active_regions.aux.npiv_config_2_3;
2536 regions.nvme_params = active_regions.aux.nvme_params;
2537 }
2538
2539 ql_dbg(ql_dbg_user, vha, 0x70e1,
2540 "%s(%lu): FW=%u BCFG=%u VPDNVR=%u NPIV01=%u NPIV02=%u NVME_PARAMS=%u\n",
2541 __func__, vha->host_no, regions.global_image,
2542 regions.board_config, regions.vpd_nvram,
2543 regions.npiv_config_0_1, regions.npiv_config_2_3, regions.nvme_params);
2544
2545 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2546 bsg_job->reply_payload.sg_cnt, ®ions, sizeof(regions));
2547
2548 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
2549 bsg_reply->reply_payload_rcv_len = sizeof(regions);
2550 bsg_reply->result = DID_OK << 16;
2551 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2552 bsg_job_done(bsg_job, bsg_reply->result,
2553 bsg_reply->reply_payload_rcv_len);
2554
2555 return 0;
2556 }
2557
2558 static int
qla2x00_manage_host_stats(struct bsg_job * bsg_job)2559 qla2x00_manage_host_stats(struct bsg_job *bsg_job)
2560 {
2561 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
2562 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2563 struct ql_vnd_mng_host_stats_param *req_data;
2564 struct ql_vnd_mng_host_stats_resp rsp_data;
2565 u32 req_data_len;
2566 int ret = 0;
2567
2568 if (!vha->flags.online) {
2569 ql_log(ql_log_warn, vha, 0x0000, "Host is not online.\n");
2570 return -EIO;
2571 }
2572
2573 req_data_len = bsg_job->request_payload.payload_len;
2574
2575 if (req_data_len != sizeof(struct ql_vnd_mng_host_stats_param)) {
2576 ql_log(ql_log_warn, vha, 0x0000, "req_data_len invalid.\n");
2577 return -EIO;
2578 }
2579
2580 req_data = kzalloc(sizeof(*req_data), GFP_KERNEL);
2581 if (!req_data) {
2582 ql_log(ql_log_warn, vha, 0x0000, "req_data memory allocation failure.\n");
2583 return -ENOMEM;
2584 }
2585
2586 /* Copy the request buffer in req_data */
2587 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2588 bsg_job->request_payload.sg_cnt, req_data,
2589 req_data_len);
2590
2591 switch (req_data->action) {
2592 case QLA_STOP:
2593 ret = qla2xxx_stop_stats(vha->host, req_data->stat_type);
2594 break;
2595 case QLA_START:
2596 ret = qla2xxx_start_stats(vha->host, req_data->stat_type);
2597 break;
2598 case QLA_CLEAR:
2599 ret = qla2xxx_reset_stats(vha->host, req_data->stat_type);
2600 break;
2601 default:
2602 ql_log(ql_log_warn, vha, 0x0000, "Invalid action.\n");
2603 ret = -EIO;
2604 break;
2605 }
2606
2607 kfree(req_data);
2608
2609 /* Prepare response */
2610 rsp_data.status = ret;
2611 bsg_job->reply_payload.payload_len = sizeof(struct ql_vnd_mng_host_stats_resp);
2612
2613 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
2614 bsg_reply->reply_payload_rcv_len =
2615 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2616 bsg_job->reply_payload.sg_cnt,
2617 &rsp_data,
2618 sizeof(struct ql_vnd_mng_host_stats_resp));
2619
2620 bsg_reply->result = DID_OK;
2621 bsg_job_done(bsg_job, bsg_reply->result,
2622 bsg_reply->reply_payload_rcv_len);
2623
2624 return ret;
2625 }
2626
2627 static int
qla2x00_get_host_stats(struct bsg_job * bsg_job)2628 qla2x00_get_host_stats(struct bsg_job *bsg_job)
2629 {
2630 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
2631 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2632 struct ql_vnd_stats_param *req_data;
2633 struct ql_vnd_host_stats_resp rsp_data;
2634 u32 req_data_len;
2635 int ret = 0;
2636 u64 ini_entry_count = 0;
2637 u64 entry_count = 0;
2638 u64 tgt_num = 0;
2639 u64 tmp_stat_type = 0;
2640 u64 response_len = 0;
2641 void *data;
2642
2643 req_data_len = bsg_job->request_payload.payload_len;
2644
2645 if (req_data_len != sizeof(struct ql_vnd_stats_param)) {
2646 ql_log(ql_log_warn, vha, 0x0000, "req_data_len invalid.\n");
2647 return -EIO;
2648 }
2649
2650 req_data = kzalloc(sizeof(*req_data), GFP_KERNEL);
2651 if (!req_data) {
2652 ql_log(ql_log_warn, vha, 0x0000, "req_data memory allocation failure.\n");
2653 return -ENOMEM;
2654 }
2655
2656 /* Copy the request buffer in req_data */
2657 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2658 bsg_job->request_payload.sg_cnt, req_data, req_data_len);
2659
2660 /* Copy stat type to work on it */
2661 tmp_stat_type = req_data->stat_type;
2662
2663 if (tmp_stat_type & QLA2XX_TGT_SHT_LNK_DOWN) {
2664 /* Num of tgts connected to this host */
2665 tgt_num = qla2x00_get_num_tgts(vha);
2666 /* unset BIT_17 */
2667 tmp_stat_type &= ~(1 << 17);
2668 }
2669
2670 /* Total ini stats */
2671 ini_entry_count = qla2x00_count_set_bits(tmp_stat_type);
2672
2673 /* Total number of entries */
2674 entry_count = ini_entry_count + tgt_num;
2675
2676 response_len = sizeof(struct ql_vnd_host_stats_resp) +
2677 (sizeof(struct ql_vnd_stat_entry) * entry_count);
2678
2679 if (response_len > bsg_job->reply_payload.payload_len) {
2680 rsp_data.status = EXT_STATUS_BUFFER_TOO_SMALL;
2681 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_BUFFER_TOO_SMALL;
2682 bsg_job->reply_payload.payload_len = sizeof(struct ql_vnd_mng_host_stats_resp);
2683
2684 bsg_reply->reply_payload_rcv_len =
2685 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2686 bsg_job->reply_payload.sg_cnt, &rsp_data,
2687 sizeof(struct ql_vnd_mng_host_stats_resp));
2688
2689 bsg_reply->result = DID_OK;
2690 bsg_job_done(bsg_job, bsg_reply->result,
2691 bsg_reply->reply_payload_rcv_len);
2692 goto host_stat_out;
2693 }
2694
2695 data = kzalloc(response_len, GFP_KERNEL);
2696 if (!data) {
2697 ret = -ENOMEM;
2698 goto host_stat_out;
2699 }
2700
2701 ret = qla2xxx_get_ini_stats(fc_bsg_to_shost(bsg_job), req_data->stat_type,
2702 data, response_len);
2703
2704 rsp_data.status = EXT_STATUS_OK;
2705 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
2706
2707 bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2708 bsg_job->reply_payload.sg_cnt,
2709 data, response_len);
2710 bsg_reply->result = DID_OK;
2711 bsg_job_done(bsg_job, bsg_reply->result,
2712 bsg_reply->reply_payload_rcv_len);
2713
2714 kfree(data);
2715 host_stat_out:
2716 kfree(req_data);
2717 return ret;
2718 }
2719
2720 static struct fc_rport *
qla2xxx_find_rport(scsi_qla_host_t * vha,uint32_t tgt_num)2721 qla2xxx_find_rport(scsi_qla_host_t *vha, uint32_t tgt_num)
2722 {
2723 fc_port_t *fcport = NULL;
2724
2725 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2726 if (fcport->rport->number == tgt_num)
2727 return fcport->rport;
2728 }
2729 return NULL;
2730 }
2731
2732 static int
qla2x00_get_tgt_stats(struct bsg_job * bsg_job)2733 qla2x00_get_tgt_stats(struct bsg_job *bsg_job)
2734 {
2735 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
2736 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2737 struct ql_vnd_tgt_stats_param *req_data;
2738 u32 req_data_len;
2739 int ret = 0;
2740 u64 response_len = 0;
2741 struct ql_vnd_tgt_stats_resp *data = NULL;
2742 struct fc_rport *rport = NULL;
2743
2744 if (!vha->flags.online) {
2745 ql_log(ql_log_warn, vha, 0x0000, "Host is not online.\n");
2746 return -EIO;
2747 }
2748
2749 req_data_len = bsg_job->request_payload.payload_len;
2750
2751 if (req_data_len != sizeof(struct ql_vnd_stat_entry)) {
2752 ql_log(ql_log_warn, vha, 0x0000, "req_data_len invalid.\n");
2753 return -EIO;
2754 }
2755
2756 req_data = kzalloc(sizeof(*req_data), GFP_KERNEL);
2757 if (!req_data) {
2758 ql_log(ql_log_warn, vha, 0x0000, "req_data memory allocation failure.\n");
2759 return -ENOMEM;
2760 }
2761
2762 /* Copy the request buffer in req_data */
2763 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2764 bsg_job->request_payload.sg_cnt,
2765 req_data, req_data_len);
2766
2767 response_len = sizeof(struct ql_vnd_tgt_stats_resp) +
2768 sizeof(struct ql_vnd_stat_entry);
2769
2770 /* structure + size for one entry */
2771 data = kzalloc(response_len, GFP_KERNEL);
2772 if (!data) {
2773 kfree(req_data);
2774 return -ENOMEM;
2775 }
2776
2777 if (response_len > bsg_job->reply_payload.payload_len) {
2778 data->status = EXT_STATUS_BUFFER_TOO_SMALL;
2779 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_BUFFER_TOO_SMALL;
2780 bsg_job->reply_payload.payload_len = sizeof(struct ql_vnd_mng_host_stats_resp);
2781
2782 bsg_reply->reply_payload_rcv_len =
2783 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2784 bsg_job->reply_payload.sg_cnt, data,
2785 sizeof(struct ql_vnd_tgt_stats_resp));
2786
2787 bsg_reply->result = DID_OK;
2788 bsg_job_done(bsg_job, bsg_reply->result,
2789 bsg_reply->reply_payload_rcv_len);
2790 goto tgt_stat_out;
2791 }
2792
2793 rport = qla2xxx_find_rport(vha, req_data->tgt_id);
2794 if (!rport) {
2795 ql_log(ql_log_warn, vha, 0x0000, "target %d not found.\n", req_data->tgt_id);
2796 ret = EXT_STATUS_INVALID_PARAM;
2797 data->status = EXT_STATUS_INVALID_PARAM;
2798 goto reply;
2799 }
2800
2801 ret = qla2xxx_get_tgt_stats(fc_bsg_to_shost(bsg_job), req_data->stat_type,
2802 rport, (void *)data, response_len);
2803
2804 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
2805 reply:
2806 bsg_reply->reply_payload_rcv_len =
2807 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2808 bsg_job->reply_payload.sg_cnt, data,
2809 response_len);
2810 bsg_reply->result = DID_OK;
2811 bsg_job_done(bsg_job, bsg_reply->result,
2812 bsg_reply->reply_payload_rcv_len);
2813
2814 tgt_stat_out:
2815 kfree(data);
2816 kfree(req_data);
2817
2818 return ret;
2819 }
2820
2821 static int
qla2x00_manage_host_port(struct bsg_job * bsg_job)2822 qla2x00_manage_host_port(struct bsg_job *bsg_job)
2823 {
2824 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
2825 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2826 struct ql_vnd_mng_host_port_param *req_data;
2827 struct ql_vnd_mng_host_port_resp rsp_data;
2828 u32 req_data_len;
2829 int ret = 0;
2830
2831 req_data_len = bsg_job->request_payload.payload_len;
2832
2833 if (req_data_len != sizeof(struct ql_vnd_mng_host_port_param)) {
2834 ql_log(ql_log_warn, vha, 0x0000, "req_data_len invalid.\n");
2835 return -EIO;
2836 }
2837
2838 req_data = kzalloc(sizeof(*req_data), GFP_KERNEL);
2839 if (!req_data) {
2840 ql_log(ql_log_warn, vha, 0x0000, "req_data memory allocation failure.\n");
2841 return -ENOMEM;
2842 }
2843
2844 /* Copy the request buffer in req_data */
2845 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2846 bsg_job->request_payload.sg_cnt, req_data, req_data_len);
2847
2848 switch (req_data->action) {
2849 case QLA_ENABLE:
2850 ret = qla2xxx_enable_port(vha->host);
2851 break;
2852 case QLA_DISABLE:
2853 ret = qla2xxx_disable_port(vha->host);
2854 break;
2855 default:
2856 ql_log(ql_log_warn, vha, 0x0000, "Invalid action.\n");
2857 ret = -EIO;
2858 break;
2859 }
2860
2861 kfree(req_data);
2862
2863 /* Prepare response */
2864 rsp_data.status = ret;
2865 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
2866 bsg_job->reply_payload.payload_len = sizeof(struct ql_vnd_mng_host_port_resp);
2867
2868 bsg_reply->reply_payload_rcv_len =
2869 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2870 bsg_job->reply_payload.sg_cnt, &rsp_data,
2871 sizeof(struct ql_vnd_mng_host_port_resp));
2872 bsg_reply->result = DID_OK;
2873 bsg_job_done(bsg_job, bsg_reply->result,
2874 bsg_reply->reply_payload_rcv_len);
2875
2876 return ret;
2877 }
2878
2879 static int
qla2x00_process_vendor_specific(struct scsi_qla_host * vha,struct bsg_job * bsg_job)2880 qla2x00_process_vendor_specific(struct scsi_qla_host *vha, struct bsg_job *bsg_job)
2881 {
2882 struct fc_bsg_request *bsg_request = bsg_job->request;
2883
2884 ql_dbg(ql_dbg_edif, vha, 0x911b, "%s FC_BSG_HST_VENDOR cmd[0]=0x%x\n",
2885 __func__, bsg_request->rqst_data.h_vendor.vendor_cmd[0]);
2886
2887 switch (bsg_request->rqst_data.h_vendor.vendor_cmd[0]) {
2888 case QL_VND_LOOPBACK:
2889 return qla2x00_process_loopback(bsg_job);
2890
2891 case QL_VND_A84_RESET:
2892 return qla84xx_reset(bsg_job);
2893
2894 case QL_VND_A84_UPDATE_FW:
2895 return qla84xx_updatefw(bsg_job);
2896
2897 case QL_VND_A84_MGMT_CMD:
2898 return qla84xx_mgmt_cmd(bsg_job);
2899
2900 case QL_VND_IIDMA:
2901 return qla24xx_iidma(bsg_job);
2902
2903 case QL_VND_FCP_PRIO_CFG_CMD:
2904 return qla24xx_proc_fcp_prio_cfg_cmd(bsg_job);
2905
2906 case QL_VND_READ_FLASH:
2907 return qla2x00_read_optrom(bsg_job);
2908
2909 case QL_VND_UPDATE_FLASH:
2910 return qla2x00_update_optrom(bsg_job);
2911
2912 case QL_VND_SET_FRU_VERSION:
2913 return qla2x00_update_fru_versions(bsg_job);
2914
2915 case QL_VND_READ_FRU_STATUS:
2916 return qla2x00_read_fru_status(bsg_job);
2917
2918 case QL_VND_WRITE_FRU_STATUS:
2919 return qla2x00_write_fru_status(bsg_job);
2920
2921 case QL_VND_WRITE_I2C:
2922 return qla2x00_write_i2c(bsg_job);
2923
2924 case QL_VND_READ_I2C:
2925 return qla2x00_read_i2c(bsg_job);
2926
2927 case QL_VND_DIAG_IO_CMD:
2928 return qla24xx_process_bidir_cmd(bsg_job);
2929
2930 case QL_VND_FX00_MGMT_CMD:
2931 return qlafx00_mgmt_cmd(bsg_job);
2932
2933 case QL_VND_SERDES_OP:
2934 return qla26xx_serdes_op(bsg_job);
2935
2936 case QL_VND_SERDES_OP_EX:
2937 return qla8044_serdes_op(bsg_job);
2938
2939 case QL_VND_GET_FLASH_UPDATE_CAPS:
2940 return qla27xx_get_flash_upd_cap(bsg_job);
2941
2942 case QL_VND_SET_FLASH_UPDATE_CAPS:
2943 return qla27xx_set_flash_upd_cap(bsg_job);
2944
2945 case QL_VND_GET_BBCR_DATA:
2946 return qla27xx_get_bbcr_data(bsg_job);
2947
2948 case QL_VND_GET_PRIV_STATS:
2949 case QL_VND_GET_PRIV_STATS_EX:
2950 return qla2x00_get_priv_stats(bsg_job);
2951
2952 case QL_VND_DPORT_DIAGNOSTICS:
2953 return qla2x00_do_dport_diagnostics(bsg_job);
2954
2955 case QL_VND_DPORT_DIAGNOSTICS_V2:
2956 return qla2x00_do_dport_diagnostics_v2(bsg_job);
2957
2958 case QL_VND_EDIF_MGMT:
2959 return qla_edif_app_mgmt(bsg_job);
2960
2961 case QL_VND_SS_GET_FLASH_IMAGE_STATUS:
2962 return qla2x00_get_flash_image_status(bsg_job);
2963
2964 case QL_VND_MANAGE_HOST_STATS:
2965 return qla2x00_manage_host_stats(bsg_job);
2966
2967 case QL_VND_GET_HOST_STATS:
2968 return qla2x00_get_host_stats(bsg_job);
2969
2970 case QL_VND_GET_TGT_STATS:
2971 return qla2x00_get_tgt_stats(bsg_job);
2972
2973 case QL_VND_MANAGE_HOST_PORT:
2974 return qla2x00_manage_host_port(bsg_job);
2975
2976 case QL_VND_MBX_PASSTHRU:
2977 return qla2x00_mailbox_passthru(bsg_job);
2978
2979 default:
2980 return -ENOSYS;
2981 }
2982 }
2983
2984 int
qla24xx_bsg_request(struct bsg_job * bsg_job)2985 qla24xx_bsg_request(struct bsg_job *bsg_job)
2986 {
2987 struct fc_bsg_request *bsg_request = bsg_job->request;
2988 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2989 int ret = -EINVAL;
2990 struct fc_rport *rport;
2991 struct Scsi_Host *host;
2992 scsi_qla_host_t *vha;
2993
2994 /* In case no data transferred. */
2995 bsg_reply->reply_payload_rcv_len = 0;
2996
2997 if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
2998 rport = fc_bsg_to_rport(bsg_job);
2999 if (!rport)
3000 return ret;
3001 host = rport_to_shost(rport);
3002 vha = shost_priv(host);
3003 } else {
3004 host = fc_bsg_to_shost(bsg_job);
3005 vha = shost_priv(host);
3006 }
3007
3008 /* Disable port will bring down the chip, allow enable command */
3009 if (bsg_request->rqst_data.h_vendor.vendor_cmd[0] == QL_VND_MANAGE_HOST_PORT ||
3010 bsg_request->rqst_data.h_vendor.vendor_cmd[0] == QL_VND_GET_HOST_STATS)
3011 goto skip_chip_chk;
3012
3013 if (vha->hw->flags.port_isolated) {
3014 bsg_reply->result = DID_ERROR;
3015 /* operation not permitted */
3016 return -EPERM;
3017 }
3018
3019 if (qla2x00_chip_is_down(vha)) {
3020 ql_dbg(ql_dbg_user, vha, 0x709f,
3021 "BSG: ISP abort active/needed -- cmd=%d.\n",
3022 bsg_request->msgcode);
3023 SET_DID_STATUS(bsg_reply->result, DID_ERROR);
3024 return -EBUSY;
3025 }
3026
3027 if (test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags)) {
3028 SET_DID_STATUS(bsg_reply->result, DID_ERROR);
3029 return -EIO;
3030 }
3031
3032 skip_chip_chk:
3033 ql_dbg(ql_dbg_user + ql_dbg_verbose, vha, 0x7000,
3034 "Entered %s msgcode=0x%x. bsg ptr %px\n",
3035 __func__, bsg_request->msgcode, bsg_job);
3036
3037 switch (bsg_request->msgcode) {
3038 case FC_BSG_RPT_ELS:
3039 case FC_BSG_HST_ELS_NOLOGIN:
3040 ret = qla2x00_process_els(bsg_job);
3041 break;
3042 case FC_BSG_HST_CT:
3043 ret = qla2x00_process_ct(bsg_job);
3044 break;
3045 case FC_BSG_HST_VENDOR:
3046 ret = qla2x00_process_vendor_specific(vha, bsg_job);
3047 break;
3048 case FC_BSG_HST_ADD_RPORT:
3049 case FC_BSG_HST_DEL_RPORT:
3050 case FC_BSG_RPT_CT:
3051 default:
3052 ql_log(ql_log_warn, vha, 0x705a, "Unsupported BSG request.\n");
3053 break;
3054 }
3055
3056 ql_dbg(ql_dbg_user + ql_dbg_verbose, vha, 0x7000,
3057 "%s done with return %x\n", __func__, ret);
3058
3059 return ret;
3060 }
3061
qla_bsg_found(struct qla_qpair * qpair,struct bsg_job * bsg_job)3062 static bool qla_bsg_found(struct qla_qpair *qpair, struct bsg_job *bsg_job)
3063 {
3064 bool found = false;
3065 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
3066 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
3067 struct qla_hw_data *ha = vha->hw;
3068 srb_t *sp = NULL;
3069 int cnt;
3070 unsigned long flags;
3071 struct req_que *req;
3072
3073 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
3074 req = qpair->req;
3075
3076 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
3077 sp = req->outstanding_cmds[cnt];
3078 if (sp &&
3079 (sp->type == SRB_CT_CMD ||
3080 sp->type == SRB_ELS_CMD_HST ||
3081 sp->type == SRB_ELS_CMD_HST_NOLOGIN) &&
3082 sp->u.bsg_job == bsg_job) {
3083 req->outstanding_cmds[cnt] = NULL;
3084 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3085
3086 if (!ha->flags.eeh_busy && ha->isp_ops->abort_command(sp)) {
3087 ql_log(ql_log_warn, vha, 0x7089,
3088 "mbx abort_command failed.\n");
3089 bsg_reply->result = -EIO;
3090 } else {
3091 ql_dbg(ql_dbg_user, vha, 0x708a,
3092 "mbx abort_command success.\n");
3093 bsg_reply->result = 0;
3094 }
3095 /* ref: INIT */
3096 kref_put(&sp->cmd_kref, qla2x00_sp_release);
3097
3098 found = true;
3099 goto done;
3100 }
3101 }
3102 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3103
3104 done:
3105 return found;
3106 }
3107
3108 int
qla24xx_bsg_timeout(struct bsg_job * bsg_job)3109 qla24xx_bsg_timeout(struct bsg_job *bsg_job)
3110 {
3111 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
3112 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
3113 struct qla_hw_data *ha = vha->hw;
3114 int i;
3115 struct qla_qpair *qpair;
3116
3117 ql_log(ql_log_info, vha, 0x708b, "%s CMD timeout. bsg ptr %p.\n",
3118 __func__, bsg_job);
3119
3120 if (qla2x00_isp_reg_stat(ha)) {
3121 ql_log(ql_log_info, vha, 0x9007,
3122 "PCI/Register disconnect.\n");
3123 qla_pci_set_eeh_busy(vha);
3124 }
3125
3126 if (qla_bsg_found(ha->base_qpair, bsg_job))
3127 goto done;
3128
3129 /* find the bsg job from the active list of commands */
3130 for (i = 0; i < ha->max_qpairs; i++) {
3131 qpair = vha->hw->queue_pair_map[i];
3132 if (!qpair)
3133 continue;
3134 if (qla_bsg_found(qpair, bsg_job))
3135 goto done;
3136 }
3137
3138 ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n");
3139 bsg_reply->result = -ENXIO;
3140
3141 done:
3142 return 0;
3143 }
3144
qla2x00_mailbox_passthru(struct bsg_job * bsg_job)3145 int qla2x00_mailbox_passthru(struct bsg_job *bsg_job)
3146 {
3147 struct fc_bsg_reply *bsg_reply = bsg_job->reply;
3148 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
3149 int ret = -EINVAL;
3150 int ptsize = sizeof(struct qla_mbx_passthru);
3151 struct qla_mbx_passthru *req_data = NULL;
3152 uint32_t req_data_len;
3153
3154 req_data_len = bsg_job->request_payload.payload_len;
3155 if (req_data_len != ptsize) {
3156 ql_log(ql_log_warn, vha, 0xf0a3, "req_data_len invalid.\n");
3157 return -EIO;
3158 }
3159 req_data = kzalloc(ptsize, GFP_KERNEL);
3160 if (!req_data) {
3161 ql_log(ql_log_warn, vha, 0xf0a4,
3162 "req_data memory allocation failure.\n");
3163 return -ENOMEM;
3164 }
3165
3166 /* Copy the request buffer in req_data */
3167 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
3168 bsg_job->request_payload.sg_cnt, req_data, ptsize);
3169 ret = qla_mailbox_passthru(vha, req_data->mbx_in, req_data->mbx_out);
3170
3171 /* Copy the req_data in request buffer */
3172 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
3173 bsg_job->reply_payload.sg_cnt, req_data, ptsize);
3174
3175 bsg_reply->reply_payload_rcv_len = ptsize;
3176 if (ret == QLA_SUCCESS)
3177 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
3178 else
3179 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_ERR;
3180
3181 bsg_job->reply_len = sizeof(*bsg_job->reply);
3182 bsg_reply->result = DID_OK << 16;
3183 bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len);
3184
3185 kfree(req_data);
3186
3187 return ret;
3188 }
3189