1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * QLogic Fibre Channel HBA Driver 4 * Copyright (c) 2003-2014 QLogic Corporation 5 */ 6 #include "qla_def.h" 7 #include "qla_gbl.h" 8 9 #include <linux/kthread.h> 10 #include <linux/vmalloc.h> 11 #include <linux/delay.h> 12 #include <linux/bsg-lib.h> 13 14 static void qla2xxx_free_fcport_work(struct work_struct *work) 15 { 16 struct fc_port *fcport = container_of(work, typeof(*fcport), 17 free_work); 18 19 qla2x00_free_fcport(fcport); 20 } 21 22 /* BSG support for ELS/CT pass through */ 23 void qla2x00_bsg_job_done(srb_t *sp, int res) 24 { 25 struct bsg_job *bsg_job = sp->u.bsg_job; 26 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 27 28 ql_dbg(ql_dbg_user, sp->vha, 0x7009, 29 "%s: sp hdl %x, result=%x bsg ptr %p\n", 30 __func__, sp->handle, res, bsg_job); 31 32 /* ref: INIT */ 33 kref_put(&sp->cmd_kref, qla2x00_sp_release); 34 35 bsg_reply->result = res; 36 bsg_job_done(bsg_job, bsg_reply->result, 37 bsg_reply->reply_payload_rcv_len); 38 } 39 40 void qla2x00_bsg_sp_free(srb_t *sp) 41 { 42 struct qla_hw_data *ha = sp->vha->hw; 43 struct bsg_job *bsg_job = sp->u.bsg_job; 44 struct fc_bsg_request *bsg_request = bsg_job->request; 45 struct qla_mt_iocb_rqst_fx00 *piocb_rqst; 46 47 if (sp->type == SRB_FXIOCB_BCMD) { 48 piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *) 49 &bsg_request->rqst_data.h_vendor.vendor_cmd[1]; 50 51 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) 52 dma_unmap_sg(&ha->pdev->dev, 53 bsg_job->request_payload.sg_list, 54 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 55 56 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) 57 dma_unmap_sg(&ha->pdev->dev, 58 bsg_job->reply_payload.sg_list, 59 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 60 } else { 61 62 if (sp->remap.remapped) { 63 dma_pool_free(ha->purex_dma_pool, sp->remap.rsp.buf, 64 sp->remap.rsp.dma); 65 dma_pool_free(ha->purex_dma_pool, sp->remap.req.buf, 66 sp->remap.req.dma); 67 } else { 68 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 69 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 70 71 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 72 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 73 } 74 } 75 76 if (sp->type == SRB_CT_CMD || 77 sp->type == SRB_FXIOCB_BCMD || 78 sp->type == SRB_ELS_CMD_HST) { 79 INIT_WORK(&sp->fcport->free_work, qla2xxx_free_fcport_work); 80 queue_work(ha->wq, &sp->fcport->free_work); 81 } 82 83 qla2x00_rel_sp(sp); 84 } 85 86 int 87 qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *vha, 88 struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag) 89 { 90 int i, ret, num_valid; 91 uint8_t *bcode; 92 struct qla_fcp_prio_entry *pri_entry; 93 uint32_t *bcode_val_ptr, bcode_val; 94 95 ret = 1; 96 num_valid = 0; 97 bcode = (uint8_t *)pri_cfg; 98 bcode_val_ptr = (uint32_t *)pri_cfg; 99 bcode_val = (uint32_t)(*bcode_val_ptr); 100 101 if (bcode_val == 0xFFFFFFFF) { 102 /* No FCP Priority config data in flash */ 103 ql_dbg(ql_dbg_user, vha, 0x7051, 104 "No FCP Priority config data.\n"); 105 return 0; 106 } 107 108 if (memcmp(bcode, "HQOS", 4)) { 109 /* Invalid FCP priority data header*/ 110 ql_dbg(ql_dbg_user, vha, 0x7052, 111 "Invalid FCP Priority data header. bcode=0x%x.\n", 112 bcode_val); 113 return 0; 114 } 115 if (flag != 1) 116 return ret; 117 118 pri_entry = &pri_cfg->entry[0]; 119 for (i = 0; i < pri_cfg->num_entries; i++) { 120 if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID) 121 num_valid++; 122 pri_entry++; 123 } 124 125 if (num_valid == 0) { 126 /* No valid FCP priority data entries */ 127 ql_dbg(ql_dbg_user, vha, 0x7053, 128 "No valid FCP Priority data entries.\n"); 129 ret = 0; 130 } else { 131 /* FCP priority data is valid */ 132 ql_dbg(ql_dbg_user, vha, 0x7054, 133 "Valid FCP priority data. num entries = %d.\n", 134 num_valid); 135 } 136 137 return ret; 138 } 139 140 static int 141 qla24xx_proc_fcp_prio_cfg_cmd(struct bsg_job *bsg_job) 142 { 143 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 144 struct fc_bsg_request *bsg_request = bsg_job->request; 145 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 146 scsi_qla_host_t *vha = shost_priv(host); 147 struct qla_hw_data *ha = vha->hw; 148 int ret = 0; 149 uint32_t len; 150 uint32_t oper; 151 152 if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_P3P_TYPE(ha))) { 153 ret = -EINVAL; 154 goto exit_fcp_prio_cfg; 155 } 156 157 /* Get the sub command */ 158 oper = bsg_request->rqst_data.h_vendor.vendor_cmd[1]; 159 160 /* Only set config is allowed if config memory is not allocated */ 161 if (!ha->fcp_prio_cfg && (oper != QLFC_FCP_PRIO_SET_CONFIG)) { 162 ret = -EINVAL; 163 goto exit_fcp_prio_cfg; 164 } 165 switch (oper) { 166 case QLFC_FCP_PRIO_DISABLE: 167 if (ha->flags.fcp_prio_enabled) { 168 ha->flags.fcp_prio_enabled = 0; 169 ha->fcp_prio_cfg->attributes &= 170 ~FCP_PRIO_ATTR_ENABLE; 171 qla24xx_update_all_fcp_prio(vha); 172 bsg_reply->result = DID_OK; 173 } else { 174 ret = -EINVAL; 175 bsg_reply->result = (DID_ERROR << 16); 176 goto exit_fcp_prio_cfg; 177 } 178 break; 179 180 case QLFC_FCP_PRIO_ENABLE: 181 if (!ha->flags.fcp_prio_enabled) { 182 if (ha->fcp_prio_cfg) { 183 ha->flags.fcp_prio_enabled = 1; 184 ha->fcp_prio_cfg->attributes |= 185 FCP_PRIO_ATTR_ENABLE; 186 qla24xx_update_all_fcp_prio(vha); 187 bsg_reply->result = DID_OK; 188 } else { 189 ret = -EINVAL; 190 bsg_reply->result = (DID_ERROR << 16); 191 goto exit_fcp_prio_cfg; 192 } 193 } 194 break; 195 196 case QLFC_FCP_PRIO_GET_CONFIG: 197 len = bsg_job->reply_payload.payload_len; 198 if (!len || len > FCP_PRIO_CFG_SIZE) { 199 ret = -EINVAL; 200 bsg_reply->result = (DID_ERROR << 16); 201 goto exit_fcp_prio_cfg; 202 } 203 204 bsg_reply->result = DID_OK; 205 bsg_reply->reply_payload_rcv_len = 206 sg_copy_from_buffer( 207 bsg_job->reply_payload.sg_list, 208 bsg_job->reply_payload.sg_cnt, ha->fcp_prio_cfg, 209 len); 210 211 break; 212 213 case QLFC_FCP_PRIO_SET_CONFIG: 214 len = bsg_job->request_payload.payload_len; 215 if (!len || len > FCP_PRIO_CFG_SIZE) { 216 bsg_reply->result = (DID_ERROR << 16); 217 ret = -EINVAL; 218 goto exit_fcp_prio_cfg; 219 } 220 221 if (!ha->fcp_prio_cfg) { 222 ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE); 223 if (!ha->fcp_prio_cfg) { 224 ql_log(ql_log_warn, vha, 0x7050, 225 "Unable to allocate memory for fcp prio " 226 "config data (%x).\n", FCP_PRIO_CFG_SIZE); 227 bsg_reply->result = (DID_ERROR << 16); 228 ret = -ENOMEM; 229 goto exit_fcp_prio_cfg; 230 } 231 } 232 233 memset(ha->fcp_prio_cfg, 0, FCP_PRIO_CFG_SIZE); 234 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 235 bsg_job->request_payload.sg_cnt, ha->fcp_prio_cfg, 236 FCP_PRIO_CFG_SIZE); 237 238 /* validate fcp priority data */ 239 240 if (!qla24xx_fcp_prio_cfg_valid(vha, ha->fcp_prio_cfg, 1)) { 241 bsg_reply->result = (DID_ERROR << 16); 242 ret = -EINVAL; 243 /* If buffer was invalidatic int 244 * fcp_prio_cfg is of no use 245 */ 246 vfree(ha->fcp_prio_cfg); 247 ha->fcp_prio_cfg = NULL; 248 goto exit_fcp_prio_cfg; 249 } 250 251 ha->flags.fcp_prio_enabled = 0; 252 if (ha->fcp_prio_cfg->attributes & FCP_PRIO_ATTR_ENABLE) 253 ha->flags.fcp_prio_enabled = 1; 254 qla24xx_update_all_fcp_prio(vha); 255 bsg_reply->result = DID_OK; 256 break; 257 default: 258 ret = -EINVAL; 259 break; 260 } 261 exit_fcp_prio_cfg: 262 if (!ret) 263 bsg_job_done(bsg_job, bsg_reply->result, 264 bsg_reply->reply_payload_rcv_len); 265 return ret; 266 } 267 268 static int 269 qla2x00_process_els(struct bsg_job *bsg_job) 270 { 271 struct fc_bsg_request *bsg_request = bsg_job->request; 272 struct fc_rport *rport; 273 fc_port_t *fcport = NULL; 274 struct Scsi_Host *host; 275 scsi_qla_host_t *vha; 276 struct qla_hw_data *ha; 277 srb_t *sp; 278 const char *type; 279 int req_sg_cnt, rsp_sg_cnt; 280 int rval = (DID_ERROR << 16); 281 uint32_t els_cmd = 0; 282 int qla_port_allocated = 0; 283 284 if (bsg_request->msgcode == FC_BSG_RPT_ELS) { 285 rport = fc_bsg_to_rport(bsg_job); 286 fcport = *(fc_port_t **) rport->dd_data; 287 host = rport_to_shost(rport); 288 vha = shost_priv(host); 289 ha = vha->hw; 290 type = "FC_BSG_RPT_ELS"; 291 } else { 292 host = fc_bsg_to_shost(bsg_job); 293 vha = shost_priv(host); 294 ha = vha->hw; 295 type = "FC_BSG_HST_ELS_NOLOGIN"; 296 els_cmd = bsg_request->rqst_data.h_els.command_code; 297 if (els_cmd == ELS_AUTH_ELS) 298 return qla_edif_process_els(vha, bsg_job); 299 } 300 301 if (!vha->flags.online) { 302 ql_log(ql_log_warn, vha, 0x7005, "Host not online.\n"); 303 rval = -EIO; 304 goto done; 305 } 306 307 /* pass through is supported only for ISP 4Gb or higher */ 308 if (!IS_FWI2_CAPABLE(ha)) { 309 ql_dbg(ql_dbg_user, vha, 0x7001, 310 "ELS passthru not supported for ISP23xx based adapters.\n"); 311 rval = -EPERM; 312 goto done; 313 } 314 315 /* Multiple SG's are not supported for ELS requests */ 316 if (bsg_job->request_payload.sg_cnt > 1 || 317 bsg_job->reply_payload.sg_cnt > 1) { 318 ql_dbg(ql_dbg_user, vha, 0x7002, 319 "Multiple SG's are not supported for ELS requests, " 320 "request_sg_cnt=%x reply_sg_cnt=%x.\n", 321 bsg_job->request_payload.sg_cnt, 322 bsg_job->reply_payload.sg_cnt); 323 rval = -EPERM; 324 goto done; 325 } 326 327 /* ELS request for rport */ 328 if (bsg_request->msgcode == FC_BSG_RPT_ELS) { 329 /* make sure the rport is logged in, 330 * if not perform fabric login 331 */ 332 if (atomic_read(&fcport->state) != FCS_ONLINE) { 333 ql_dbg(ql_dbg_user, vha, 0x7003, 334 "Port %06X is not online for ELS passthru.\n", 335 fcport->d_id.b24); 336 rval = -EIO; 337 goto done; 338 } 339 } else { 340 /* Allocate a dummy fcport structure, since functions 341 * preparing the IOCB and mailbox command retrieves port 342 * specific information from fcport structure. For Host based 343 * ELS commands there will be no fcport structure allocated 344 */ 345 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 346 if (!fcport) { 347 rval = -ENOMEM; 348 goto done; 349 } 350 351 qla_port_allocated = 1; 352 /* Initialize all required fields of fcport */ 353 fcport->vha = vha; 354 fcport->d_id.b.al_pa = 355 bsg_request->rqst_data.h_els.port_id[0]; 356 fcport->d_id.b.area = 357 bsg_request->rqst_data.h_els.port_id[1]; 358 fcport->d_id.b.domain = 359 bsg_request->rqst_data.h_els.port_id[2]; 360 fcport->loop_id = 361 (fcport->d_id.b.al_pa == 0xFD) ? 362 NPH_FABRIC_CONTROLLER : NPH_F_PORT; 363 } 364 365 req_sg_cnt = 366 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 367 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 368 if (!req_sg_cnt) { 369 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 370 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 371 rval = -ENOMEM; 372 goto done_free_fcport; 373 } 374 375 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 376 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 377 if (!rsp_sg_cnt) { 378 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 379 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 380 rval = -ENOMEM; 381 goto done_free_fcport; 382 } 383 384 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) || 385 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) { 386 ql_log(ql_log_warn, vha, 0x7008, 387 "dma mapping resulted in different sg counts, " 388 "request_sg_cnt: %x dma_request_sg_cnt:%x reply_sg_cnt:%x " 389 "dma_reply_sg_cnt:%x.\n", bsg_job->request_payload.sg_cnt, 390 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt); 391 rval = -EAGAIN; 392 goto done_unmap_sg; 393 } 394 395 /* Alloc SRB structure */ 396 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 397 if (!sp) { 398 rval = -ENOMEM; 399 goto done_unmap_sg; 400 } 401 402 sp->type = 403 (bsg_request->msgcode == FC_BSG_RPT_ELS ? 404 SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST); 405 sp->name = 406 (bsg_request->msgcode == FC_BSG_RPT_ELS ? 407 "bsg_els_rpt" : "bsg_els_hst"); 408 sp->u.bsg_job = bsg_job; 409 sp->free = qla2x00_bsg_sp_free; 410 sp->done = qla2x00_bsg_job_done; 411 412 ql_dbg(ql_dbg_user, vha, 0x700a, 413 "bsg rqst type: %s els type: %x - loop-id=%x " 414 "portid=%-2x%02x%02x.\n", type, 415 bsg_request->rqst_data.h_els.command_code, fcport->loop_id, 416 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); 417 418 rval = qla2x00_start_sp(sp); 419 if (rval != QLA_SUCCESS) { 420 ql_log(ql_log_warn, vha, 0x700e, 421 "qla2x00_start_sp failed = %d\n", rval); 422 qla2x00_rel_sp(sp); 423 rval = -EIO; 424 goto done_unmap_sg; 425 } 426 return rval; 427 428 done_unmap_sg: 429 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 430 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 431 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 432 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 433 goto done_free_fcport; 434 435 done_free_fcport: 436 if (qla_port_allocated) 437 qla2x00_free_fcport(fcport); 438 done: 439 return rval; 440 } 441 442 static inline uint16_t 443 qla24xx_calc_ct_iocbs(uint16_t dsds) 444 { 445 uint16_t iocbs; 446 447 iocbs = 1; 448 if (dsds > 2) { 449 iocbs += (dsds - 2) / 5; 450 if ((dsds - 2) % 5) 451 iocbs++; 452 } 453 return iocbs; 454 } 455 456 static int 457 qla2x00_process_ct(struct bsg_job *bsg_job) 458 { 459 srb_t *sp; 460 struct fc_bsg_request *bsg_request = bsg_job->request; 461 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 462 scsi_qla_host_t *vha = shost_priv(host); 463 struct qla_hw_data *ha = vha->hw; 464 int rval = (DID_ERROR << 16); 465 int req_sg_cnt, rsp_sg_cnt; 466 uint16_t loop_id; 467 struct fc_port *fcport; 468 char *type = "FC_BSG_HST_CT"; 469 470 req_sg_cnt = 471 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 472 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 473 if (!req_sg_cnt) { 474 ql_log(ql_log_warn, vha, 0x700f, 475 "dma_map_sg return %d for request\n", req_sg_cnt); 476 rval = -ENOMEM; 477 goto done; 478 } 479 480 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 481 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 482 if (!rsp_sg_cnt) { 483 ql_log(ql_log_warn, vha, 0x7010, 484 "dma_map_sg return %d for reply\n", rsp_sg_cnt); 485 rval = -ENOMEM; 486 goto done; 487 } 488 489 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) || 490 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) { 491 ql_log(ql_log_warn, vha, 0x7011, 492 "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x " 493 "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt, 494 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt); 495 rval = -EAGAIN; 496 goto done_unmap_sg; 497 } 498 499 if (!vha->flags.online) { 500 ql_log(ql_log_warn, vha, 0x7012, 501 "Host is not online.\n"); 502 rval = -EIO; 503 goto done_unmap_sg; 504 } 505 506 loop_id = 507 (bsg_request->rqst_data.h_ct.preamble_word1 & 0xFF000000) 508 >> 24; 509 switch (loop_id) { 510 case 0xFC: 511 loop_id = NPH_SNS; 512 break; 513 case 0xFA: 514 loop_id = vha->mgmt_svr_loop_id; 515 break; 516 default: 517 ql_dbg(ql_dbg_user, vha, 0x7013, 518 "Unknown loop id: %x.\n", loop_id); 519 rval = -EINVAL; 520 goto done_unmap_sg; 521 } 522 523 /* Allocate a dummy fcport structure, since functions preparing the 524 * IOCB and mailbox command retrieves port specific information 525 * from fcport structure. For Host based ELS commands there will be 526 * no fcport structure allocated 527 */ 528 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 529 if (!fcport) { 530 ql_log(ql_log_warn, vha, 0x7014, 531 "Failed to allocate fcport.\n"); 532 rval = -ENOMEM; 533 goto done_unmap_sg; 534 } 535 536 /* Initialize all required fields of fcport */ 537 fcport->vha = vha; 538 fcport->d_id.b.al_pa = bsg_request->rqst_data.h_ct.port_id[0]; 539 fcport->d_id.b.area = bsg_request->rqst_data.h_ct.port_id[1]; 540 fcport->d_id.b.domain = bsg_request->rqst_data.h_ct.port_id[2]; 541 fcport->loop_id = loop_id; 542 543 /* Alloc SRB structure */ 544 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 545 if (!sp) { 546 ql_log(ql_log_warn, vha, 0x7015, 547 "qla2x00_get_sp failed.\n"); 548 rval = -ENOMEM; 549 goto done_free_fcport; 550 } 551 552 sp->type = SRB_CT_CMD; 553 sp->name = "bsg_ct"; 554 sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt); 555 sp->u.bsg_job = bsg_job; 556 sp->free = qla2x00_bsg_sp_free; 557 sp->done = qla2x00_bsg_job_done; 558 559 ql_dbg(ql_dbg_user, vha, 0x7016, 560 "bsg rqst type: %s else type: %x - " 561 "loop-id=%x portid=%02x%02x%02x.\n", type, 562 (bsg_request->rqst_data.h_ct.preamble_word2 >> 16), 563 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, 564 fcport->d_id.b.al_pa); 565 566 rval = qla2x00_start_sp(sp); 567 if (rval != QLA_SUCCESS) { 568 ql_log(ql_log_warn, vha, 0x7017, 569 "qla2x00_start_sp failed=%d.\n", rval); 570 qla2x00_rel_sp(sp); 571 rval = -EIO; 572 goto done_free_fcport; 573 } 574 return rval; 575 576 done_free_fcport: 577 qla2x00_free_fcport(fcport); 578 done_unmap_sg: 579 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 580 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 581 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 582 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 583 done: 584 return rval; 585 } 586 587 /* Disable loopback mode */ 588 static inline int 589 qla81xx_reset_loopback_mode(scsi_qla_host_t *vha, uint16_t *config, 590 int wait, int wait2) 591 { 592 int ret = 0; 593 int rval = 0; 594 uint16_t new_config[4]; 595 struct qla_hw_data *ha = vha->hw; 596 597 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha)) 598 goto done_reset_internal; 599 600 memset(new_config, 0 , sizeof(new_config)); 601 if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 == 602 ENABLE_INTERNAL_LOOPBACK || 603 (config[0] & INTERNAL_LOOPBACK_MASK) >> 1 == 604 ENABLE_EXTERNAL_LOOPBACK) { 605 new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK; 606 ql_dbg(ql_dbg_user, vha, 0x70bf, "new_config[0]=%02x\n", 607 (new_config[0] & INTERNAL_LOOPBACK_MASK)); 608 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ; 609 610 ha->notify_dcbx_comp = wait; 611 ha->notify_lb_portup_comp = wait2; 612 613 ret = qla81xx_set_port_config(vha, new_config); 614 if (ret != QLA_SUCCESS) { 615 ql_log(ql_log_warn, vha, 0x7025, 616 "Set port config failed.\n"); 617 ha->notify_dcbx_comp = 0; 618 ha->notify_lb_portup_comp = 0; 619 rval = -EINVAL; 620 goto done_reset_internal; 621 } 622 623 /* Wait for DCBX complete event */ 624 if (wait && !wait_for_completion_timeout(&ha->dcbx_comp, 625 (DCBX_COMP_TIMEOUT * HZ))) { 626 ql_dbg(ql_dbg_user, vha, 0x7026, 627 "DCBX completion not received.\n"); 628 ha->notify_dcbx_comp = 0; 629 ha->notify_lb_portup_comp = 0; 630 rval = -EINVAL; 631 goto done_reset_internal; 632 } else 633 ql_dbg(ql_dbg_user, vha, 0x7027, 634 "DCBX completion received.\n"); 635 636 if (wait2 && 637 !wait_for_completion_timeout(&ha->lb_portup_comp, 638 (LB_PORTUP_COMP_TIMEOUT * HZ))) { 639 ql_dbg(ql_dbg_user, vha, 0x70c5, 640 "Port up completion not received.\n"); 641 ha->notify_lb_portup_comp = 0; 642 rval = -EINVAL; 643 goto done_reset_internal; 644 } else 645 ql_dbg(ql_dbg_user, vha, 0x70c6, 646 "Port up completion received.\n"); 647 648 ha->notify_dcbx_comp = 0; 649 ha->notify_lb_portup_comp = 0; 650 } 651 done_reset_internal: 652 return rval; 653 } 654 655 /* 656 * Set the port configuration to enable the internal or external loopback 657 * depending on the loopback mode. 658 */ 659 static inline int 660 qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config, 661 uint16_t *new_config, uint16_t mode) 662 { 663 int ret = 0; 664 int rval = 0; 665 unsigned long rem_tmo = 0, current_tmo = 0; 666 struct qla_hw_data *ha = vha->hw; 667 668 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha)) 669 goto done_set_internal; 670 671 if (mode == INTERNAL_LOOPBACK) 672 new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1); 673 else if (mode == EXTERNAL_LOOPBACK) 674 new_config[0] = config[0] | (ENABLE_EXTERNAL_LOOPBACK << 1); 675 ql_dbg(ql_dbg_user, vha, 0x70be, 676 "new_config[0]=%02x\n", (new_config[0] & INTERNAL_LOOPBACK_MASK)); 677 678 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3); 679 680 ha->notify_dcbx_comp = 1; 681 ret = qla81xx_set_port_config(vha, new_config); 682 if (ret != QLA_SUCCESS) { 683 ql_log(ql_log_warn, vha, 0x7021, 684 "set port config failed.\n"); 685 ha->notify_dcbx_comp = 0; 686 rval = -EINVAL; 687 goto done_set_internal; 688 } 689 690 /* Wait for DCBX complete event */ 691 current_tmo = DCBX_COMP_TIMEOUT * HZ; 692 while (1) { 693 rem_tmo = wait_for_completion_timeout(&ha->dcbx_comp, 694 current_tmo); 695 if (!ha->idc_extend_tmo || rem_tmo) { 696 ha->idc_extend_tmo = 0; 697 break; 698 } 699 current_tmo = ha->idc_extend_tmo * HZ; 700 ha->idc_extend_tmo = 0; 701 } 702 703 if (!rem_tmo) { 704 ql_dbg(ql_dbg_user, vha, 0x7022, 705 "DCBX completion not received.\n"); 706 ret = qla81xx_reset_loopback_mode(vha, new_config, 0, 0); 707 /* 708 * If the reset of the loopback mode doesn't work take a FCoE 709 * dump and reset the chip. 710 */ 711 if (ret) { 712 qla2xxx_dump_fw(vha); 713 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 714 } 715 rval = -EINVAL; 716 } else { 717 if (ha->flags.idc_compl_status) { 718 ql_dbg(ql_dbg_user, vha, 0x70c3, 719 "Bad status in IDC Completion AEN\n"); 720 rval = -EINVAL; 721 ha->flags.idc_compl_status = 0; 722 } else 723 ql_dbg(ql_dbg_user, vha, 0x7023, 724 "DCBX completion received.\n"); 725 } 726 727 ha->notify_dcbx_comp = 0; 728 ha->idc_extend_tmo = 0; 729 730 done_set_internal: 731 return rval; 732 } 733 734 static int 735 qla2x00_process_loopback(struct bsg_job *bsg_job) 736 { 737 struct fc_bsg_request *bsg_request = bsg_job->request; 738 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 739 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 740 scsi_qla_host_t *vha = shost_priv(host); 741 struct qla_hw_data *ha = vha->hw; 742 int rval; 743 uint8_t command_sent; 744 char *type; 745 struct msg_echo_lb elreq; 746 uint16_t response[MAILBOX_REGISTER_COUNT]; 747 uint16_t config[4], new_config[4]; 748 uint8_t *fw_sts_ptr; 749 void *req_data = NULL; 750 dma_addr_t req_data_dma; 751 uint32_t req_data_len; 752 uint8_t *rsp_data = NULL; 753 dma_addr_t rsp_data_dma; 754 uint32_t rsp_data_len; 755 756 if (!vha->flags.online) { 757 ql_log(ql_log_warn, vha, 0x7019, "Host is not online.\n"); 758 return -EIO; 759 } 760 761 memset(&elreq, 0, sizeof(elreq)); 762 763 elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev, 764 bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, 765 DMA_TO_DEVICE); 766 767 if (!elreq.req_sg_cnt) { 768 ql_log(ql_log_warn, vha, 0x701a, 769 "dma_map_sg returned %d for request.\n", elreq.req_sg_cnt); 770 return -ENOMEM; 771 } 772 773 elreq.rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, 774 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, 775 DMA_FROM_DEVICE); 776 777 if (!elreq.rsp_sg_cnt) { 778 ql_log(ql_log_warn, vha, 0x701b, 779 "dma_map_sg returned %d for reply.\n", elreq.rsp_sg_cnt); 780 rval = -ENOMEM; 781 goto done_unmap_req_sg; 782 } 783 784 if ((elreq.req_sg_cnt != bsg_job->request_payload.sg_cnt) || 785 (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) { 786 ql_log(ql_log_warn, vha, 0x701c, 787 "dma mapping resulted in different sg counts, " 788 "request_sg_cnt: %x dma_request_sg_cnt: %x " 789 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n", 790 bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt, 791 bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt); 792 rval = -EAGAIN; 793 goto done_unmap_sg; 794 } 795 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len; 796 req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len, 797 &req_data_dma, GFP_KERNEL); 798 if (!req_data) { 799 ql_log(ql_log_warn, vha, 0x701d, 800 "dma alloc failed for req_data.\n"); 801 rval = -ENOMEM; 802 goto done_unmap_sg; 803 } 804 805 rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len, 806 &rsp_data_dma, GFP_KERNEL); 807 if (!rsp_data) { 808 ql_log(ql_log_warn, vha, 0x7004, 809 "dma alloc failed for rsp_data.\n"); 810 rval = -ENOMEM; 811 goto done_free_dma_req; 812 } 813 814 /* Copy the request buffer in req_data now */ 815 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 816 bsg_job->request_payload.sg_cnt, req_data, req_data_len); 817 818 elreq.send_dma = req_data_dma; 819 elreq.rcv_dma = rsp_data_dma; 820 elreq.transfer_size = req_data_len; 821 822 elreq.options = bsg_request->rqst_data.h_vendor.vendor_cmd[1]; 823 elreq.iteration_count = 824 bsg_request->rqst_data.h_vendor.vendor_cmd[2]; 825 826 if (atomic_read(&vha->loop_state) == LOOP_READY && 827 ((ha->current_topology == ISP_CFG_F && (elreq.options & 7) >= 2) || 828 ((IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) && 829 get_unaligned_le32(req_data) == ELS_OPCODE_BYTE && 830 req_data_len == MAX_ELS_FRAME_PAYLOAD && 831 elreq.options == EXTERNAL_LOOPBACK))) { 832 type = "FC_BSG_HST_VENDOR_ECHO_DIAG"; 833 ql_dbg(ql_dbg_user, vha, 0x701e, 834 "BSG request type: %s.\n", type); 835 command_sent = INT_DEF_LB_ECHO_CMD; 836 rval = qla2x00_echo_test(vha, &elreq, response); 837 } else { 838 if (IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) { 839 memset(config, 0, sizeof(config)); 840 memset(new_config, 0, sizeof(new_config)); 841 842 if (qla81xx_get_port_config(vha, config)) { 843 ql_log(ql_log_warn, vha, 0x701f, 844 "Get port config failed.\n"); 845 rval = -EPERM; 846 goto done_free_dma_rsp; 847 } 848 849 if ((config[0] & INTERNAL_LOOPBACK_MASK) != 0) { 850 ql_dbg(ql_dbg_user, vha, 0x70c4, 851 "Loopback operation already in " 852 "progress.\n"); 853 rval = -EAGAIN; 854 goto done_free_dma_rsp; 855 } 856 857 ql_dbg(ql_dbg_user, vha, 0x70c0, 858 "elreq.options=%04x\n", elreq.options); 859 860 if (elreq.options == EXTERNAL_LOOPBACK) 861 if (IS_QLA8031(ha) || IS_QLA8044(ha)) 862 rval = qla81xx_set_loopback_mode(vha, 863 config, new_config, elreq.options); 864 else 865 rval = qla81xx_reset_loopback_mode(vha, 866 config, 1, 0); 867 else 868 rval = qla81xx_set_loopback_mode(vha, config, 869 new_config, elreq.options); 870 871 if (rval) { 872 rval = -EPERM; 873 goto done_free_dma_rsp; 874 } 875 876 type = "FC_BSG_HST_VENDOR_LOOPBACK"; 877 ql_dbg(ql_dbg_user, vha, 0x7028, 878 "BSG request type: %s.\n", type); 879 880 command_sent = INT_DEF_LB_LOOPBACK_CMD; 881 rval = qla2x00_loopback_test(vha, &elreq, response); 882 883 if (response[0] == MBS_COMMAND_ERROR && 884 response[1] == MBS_LB_RESET) { 885 ql_log(ql_log_warn, vha, 0x7029, 886 "MBX command error, Aborting ISP.\n"); 887 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 888 qla2xxx_wake_dpc(vha); 889 qla2x00_wait_for_chip_reset(vha); 890 /* Also reset the MPI */ 891 if (IS_QLA81XX(ha)) { 892 if (qla81xx_restart_mpi_firmware(vha) != 893 QLA_SUCCESS) { 894 ql_log(ql_log_warn, vha, 0x702a, 895 "MPI reset failed.\n"); 896 } 897 } 898 899 rval = -EIO; 900 goto done_free_dma_rsp; 901 } 902 903 if (new_config[0]) { 904 int ret; 905 906 /* Revert back to original port config 907 * Also clear internal loopback 908 */ 909 ret = qla81xx_reset_loopback_mode(vha, 910 new_config, 0, 1); 911 if (ret) { 912 /* 913 * If the reset of the loopback mode 914 * doesn't work take FCoE dump and then 915 * reset the chip. 916 */ 917 qla2xxx_dump_fw(vha); 918 set_bit(ISP_ABORT_NEEDED, 919 &vha->dpc_flags); 920 } 921 922 } 923 924 } else { 925 type = "FC_BSG_HST_VENDOR_LOOPBACK"; 926 ql_dbg(ql_dbg_user, vha, 0x702b, 927 "BSG request type: %s.\n", type); 928 command_sent = INT_DEF_LB_LOOPBACK_CMD; 929 rval = qla2x00_loopback_test(vha, &elreq, response); 930 } 931 } 932 933 if (rval) { 934 ql_log(ql_log_warn, vha, 0x702c, 935 "Vendor request %s failed.\n", type); 936 937 rval = 0; 938 bsg_reply->result = (DID_ERROR << 16); 939 bsg_reply->reply_payload_rcv_len = 0; 940 } else { 941 ql_dbg(ql_dbg_user, vha, 0x702d, 942 "Vendor request %s completed.\n", type); 943 bsg_reply->result = (DID_OK << 16); 944 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 945 bsg_job->reply_payload.sg_cnt, rsp_data, 946 rsp_data_len); 947 } 948 949 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + 950 sizeof(response) + sizeof(uint8_t); 951 fw_sts_ptr = bsg_job->reply + sizeof(struct fc_bsg_reply); 952 memcpy(bsg_job->reply + sizeof(struct fc_bsg_reply), response, 953 sizeof(response)); 954 fw_sts_ptr += sizeof(response); 955 *fw_sts_ptr = command_sent; 956 957 done_free_dma_rsp: 958 dma_free_coherent(&ha->pdev->dev, rsp_data_len, 959 rsp_data, rsp_data_dma); 960 done_free_dma_req: 961 dma_free_coherent(&ha->pdev->dev, req_data_len, 962 req_data, req_data_dma); 963 done_unmap_sg: 964 dma_unmap_sg(&ha->pdev->dev, 965 bsg_job->reply_payload.sg_list, 966 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 967 done_unmap_req_sg: 968 dma_unmap_sg(&ha->pdev->dev, 969 bsg_job->request_payload.sg_list, 970 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 971 if (!rval) 972 bsg_job_done(bsg_job, bsg_reply->result, 973 bsg_reply->reply_payload_rcv_len); 974 return rval; 975 } 976 977 static int 978 qla84xx_reset(struct bsg_job *bsg_job) 979 { 980 struct fc_bsg_request *bsg_request = bsg_job->request; 981 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 982 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 983 scsi_qla_host_t *vha = shost_priv(host); 984 struct qla_hw_data *ha = vha->hw; 985 int rval = 0; 986 uint32_t flag; 987 988 if (!IS_QLA84XX(ha)) { 989 ql_dbg(ql_dbg_user, vha, 0x702f, "Not 84xx, exiting.\n"); 990 return -EINVAL; 991 } 992 993 flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1]; 994 995 rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW); 996 997 if (rval) { 998 ql_log(ql_log_warn, vha, 0x7030, 999 "Vendor request 84xx reset failed.\n"); 1000 rval = (DID_ERROR << 16); 1001 1002 } else { 1003 ql_dbg(ql_dbg_user, vha, 0x7031, 1004 "Vendor request 84xx reset completed.\n"); 1005 bsg_reply->result = DID_OK; 1006 bsg_job_done(bsg_job, bsg_reply->result, 1007 bsg_reply->reply_payload_rcv_len); 1008 } 1009 1010 return rval; 1011 } 1012 1013 static int 1014 qla84xx_updatefw(struct bsg_job *bsg_job) 1015 { 1016 struct fc_bsg_request *bsg_request = bsg_job->request; 1017 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1018 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1019 scsi_qla_host_t *vha = shost_priv(host); 1020 struct qla_hw_data *ha = vha->hw; 1021 struct verify_chip_entry_84xx *mn = NULL; 1022 dma_addr_t mn_dma, fw_dma; 1023 void *fw_buf = NULL; 1024 int rval = 0; 1025 uint32_t sg_cnt; 1026 uint32_t data_len; 1027 uint16_t options; 1028 uint32_t flag; 1029 uint32_t fw_ver; 1030 1031 if (!IS_QLA84XX(ha)) { 1032 ql_dbg(ql_dbg_user, vha, 0x7032, 1033 "Not 84xx, exiting.\n"); 1034 return -EINVAL; 1035 } 1036 1037 sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 1038 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 1039 if (!sg_cnt) { 1040 ql_log(ql_log_warn, vha, 0x7033, 1041 "dma_map_sg returned %d for request.\n", sg_cnt); 1042 return -ENOMEM; 1043 } 1044 1045 if (sg_cnt != bsg_job->request_payload.sg_cnt) { 1046 ql_log(ql_log_warn, vha, 0x7034, 1047 "DMA mapping resulted in different sg counts, " 1048 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n", 1049 bsg_job->request_payload.sg_cnt, sg_cnt); 1050 rval = -EAGAIN; 1051 goto done_unmap_sg; 1052 } 1053 1054 data_len = bsg_job->request_payload.payload_len; 1055 fw_buf = dma_alloc_coherent(&ha->pdev->dev, data_len, 1056 &fw_dma, GFP_KERNEL); 1057 if (!fw_buf) { 1058 ql_log(ql_log_warn, vha, 0x7035, 1059 "DMA alloc failed for fw_buf.\n"); 1060 rval = -ENOMEM; 1061 goto done_unmap_sg; 1062 } 1063 1064 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1065 bsg_job->request_payload.sg_cnt, fw_buf, data_len); 1066 1067 mn = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma); 1068 if (!mn) { 1069 ql_log(ql_log_warn, vha, 0x7036, 1070 "DMA alloc failed for fw buffer.\n"); 1071 rval = -ENOMEM; 1072 goto done_free_fw_buf; 1073 } 1074 1075 flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1]; 1076 fw_ver = get_unaligned_le32((uint32_t *)fw_buf + 2); 1077 1078 mn->entry_type = VERIFY_CHIP_IOCB_TYPE; 1079 mn->entry_count = 1; 1080 1081 options = VCO_FORCE_UPDATE | VCO_END_OF_DATA; 1082 if (flag == A84_ISSUE_UPDATE_DIAGFW_CMD) 1083 options |= VCO_DIAG_FW; 1084 1085 mn->options = cpu_to_le16(options); 1086 mn->fw_ver = cpu_to_le32(fw_ver); 1087 mn->fw_size = cpu_to_le32(data_len); 1088 mn->fw_seq_size = cpu_to_le32(data_len); 1089 put_unaligned_le64(fw_dma, &mn->dsd.address); 1090 mn->dsd.length = cpu_to_le32(data_len); 1091 mn->data_seg_cnt = cpu_to_le16(1); 1092 1093 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120); 1094 1095 if (rval) { 1096 ql_log(ql_log_warn, vha, 0x7037, 1097 "Vendor request 84xx updatefw failed.\n"); 1098 1099 rval = (DID_ERROR << 16); 1100 } else { 1101 ql_dbg(ql_dbg_user, vha, 0x7038, 1102 "Vendor request 84xx updatefw completed.\n"); 1103 1104 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1105 bsg_reply->result = DID_OK; 1106 } 1107 1108 dma_pool_free(ha->s_dma_pool, mn, mn_dma); 1109 1110 done_free_fw_buf: 1111 dma_free_coherent(&ha->pdev->dev, data_len, fw_buf, fw_dma); 1112 1113 done_unmap_sg: 1114 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 1115 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 1116 1117 if (!rval) 1118 bsg_job_done(bsg_job, bsg_reply->result, 1119 bsg_reply->reply_payload_rcv_len); 1120 return rval; 1121 } 1122 1123 static int 1124 qla84xx_mgmt_cmd(struct bsg_job *bsg_job) 1125 { 1126 struct fc_bsg_request *bsg_request = bsg_job->request; 1127 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1128 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1129 scsi_qla_host_t *vha = shost_priv(host); 1130 struct qla_hw_data *ha = vha->hw; 1131 struct access_chip_84xx *mn = NULL; 1132 dma_addr_t mn_dma, mgmt_dma; 1133 void *mgmt_b = NULL; 1134 int rval = 0; 1135 struct qla_bsg_a84_mgmt *ql84_mgmt; 1136 uint32_t sg_cnt; 1137 uint32_t data_len = 0; 1138 uint32_t dma_direction = DMA_NONE; 1139 1140 if (!IS_QLA84XX(ha)) { 1141 ql_log(ql_log_warn, vha, 0x703a, 1142 "Not 84xx, exiting.\n"); 1143 return -EINVAL; 1144 } 1145 1146 mn = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma); 1147 if (!mn) { 1148 ql_log(ql_log_warn, vha, 0x703c, 1149 "DMA alloc failed for fw buffer.\n"); 1150 return -ENOMEM; 1151 } 1152 1153 mn->entry_type = ACCESS_CHIP_IOCB_TYPE; 1154 mn->entry_count = 1; 1155 ql84_mgmt = (void *)bsg_request + sizeof(struct fc_bsg_request); 1156 switch (ql84_mgmt->mgmt.cmd) { 1157 case QLA84_MGMT_READ_MEM: 1158 case QLA84_MGMT_GET_INFO: 1159 sg_cnt = dma_map_sg(&ha->pdev->dev, 1160 bsg_job->reply_payload.sg_list, 1161 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 1162 if (!sg_cnt) { 1163 ql_log(ql_log_warn, vha, 0x703d, 1164 "dma_map_sg returned %d for reply.\n", sg_cnt); 1165 rval = -ENOMEM; 1166 goto exit_mgmt; 1167 } 1168 1169 dma_direction = DMA_FROM_DEVICE; 1170 1171 if (sg_cnt != bsg_job->reply_payload.sg_cnt) { 1172 ql_log(ql_log_warn, vha, 0x703e, 1173 "DMA mapping resulted in different sg counts, " 1174 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n", 1175 bsg_job->reply_payload.sg_cnt, sg_cnt); 1176 rval = -EAGAIN; 1177 goto done_unmap_sg; 1178 } 1179 1180 data_len = bsg_job->reply_payload.payload_len; 1181 1182 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len, 1183 &mgmt_dma, GFP_KERNEL); 1184 if (!mgmt_b) { 1185 ql_log(ql_log_warn, vha, 0x703f, 1186 "DMA alloc failed for mgmt_b.\n"); 1187 rval = -ENOMEM; 1188 goto done_unmap_sg; 1189 } 1190 1191 if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) { 1192 mn->options = cpu_to_le16(ACO_DUMP_MEMORY); 1193 mn->parameter1 = 1194 cpu_to_le32( 1195 ql84_mgmt->mgmt.mgmtp.u.mem.start_addr); 1196 1197 } else if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO) { 1198 mn->options = cpu_to_le16(ACO_REQUEST_INFO); 1199 mn->parameter1 = 1200 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.info.type); 1201 1202 mn->parameter2 = 1203 cpu_to_le32( 1204 ql84_mgmt->mgmt.mgmtp.u.info.context); 1205 } 1206 break; 1207 1208 case QLA84_MGMT_WRITE_MEM: 1209 sg_cnt = dma_map_sg(&ha->pdev->dev, 1210 bsg_job->request_payload.sg_list, 1211 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 1212 1213 if (!sg_cnt) { 1214 ql_log(ql_log_warn, vha, 0x7040, 1215 "dma_map_sg returned %d.\n", sg_cnt); 1216 rval = -ENOMEM; 1217 goto exit_mgmt; 1218 } 1219 1220 dma_direction = DMA_TO_DEVICE; 1221 1222 if (sg_cnt != bsg_job->request_payload.sg_cnt) { 1223 ql_log(ql_log_warn, vha, 0x7041, 1224 "DMA mapping resulted in different sg counts, " 1225 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n", 1226 bsg_job->request_payload.sg_cnt, sg_cnt); 1227 rval = -EAGAIN; 1228 goto done_unmap_sg; 1229 } 1230 1231 data_len = bsg_job->request_payload.payload_len; 1232 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len, 1233 &mgmt_dma, GFP_KERNEL); 1234 if (!mgmt_b) { 1235 ql_log(ql_log_warn, vha, 0x7042, 1236 "DMA alloc failed for mgmt_b.\n"); 1237 rval = -ENOMEM; 1238 goto done_unmap_sg; 1239 } 1240 1241 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1242 bsg_job->request_payload.sg_cnt, mgmt_b, data_len); 1243 1244 mn->options = cpu_to_le16(ACO_LOAD_MEMORY); 1245 mn->parameter1 = 1246 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.mem.start_addr); 1247 break; 1248 1249 case QLA84_MGMT_CHNG_CONFIG: 1250 mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM); 1251 mn->parameter1 = 1252 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.id); 1253 1254 mn->parameter2 = 1255 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param0); 1256 1257 mn->parameter3 = 1258 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param1); 1259 break; 1260 1261 default: 1262 rval = -EIO; 1263 goto exit_mgmt; 1264 } 1265 1266 if (ql84_mgmt->mgmt.cmd != QLA84_MGMT_CHNG_CONFIG) { 1267 mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->mgmt.len); 1268 mn->dseg_count = cpu_to_le16(1); 1269 put_unaligned_le64(mgmt_dma, &mn->dsd.address); 1270 mn->dsd.length = cpu_to_le32(ql84_mgmt->mgmt.len); 1271 } 1272 1273 rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0); 1274 1275 if (rval) { 1276 ql_log(ql_log_warn, vha, 0x7043, 1277 "Vendor request 84xx mgmt failed.\n"); 1278 1279 rval = (DID_ERROR << 16); 1280 1281 } else { 1282 ql_dbg(ql_dbg_user, vha, 0x7044, 1283 "Vendor request 84xx mgmt completed.\n"); 1284 1285 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1286 bsg_reply->result = DID_OK; 1287 1288 if ((ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) || 1289 (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO)) { 1290 bsg_reply->reply_payload_rcv_len = 1291 bsg_job->reply_payload.payload_len; 1292 1293 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 1294 bsg_job->reply_payload.sg_cnt, mgmt_b, 1295 data_len); 1296 } 1297 } 1298 1299 done_unmap_sg: 1300 if (mgmt_b) 1301 dma_free_coherent(&ha->pdev->dev, data_len, mgmt_b, mgmt_dma); 1302 1303 if (dma_direction == DMA_TO_DEVICE) 1304 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 1305 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 1306 else if (dma_direction == DMA_FROM_DEVICE) 1307 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 1308 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 1309 1310 exit_mgmt: 1311 dma_pool_free(ha->s_dma_pool, mn, mn_dma); 1312 1313 if (!rval) 1314 bsg_job_done(bsg_job, bsg_reply->result, 1315 bsg_reply->reply_payload_rcv_len); 1316 return rval; 1317 } 1318 1319 static int 1320 qla24xx_iidma(struct bsg_job *bsg_job) 1321 { 1322 struct fc_bsg_request *bsg_request = bsg_job->request; 1323 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1324 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1325 scsi_qla_host_t *vha = shost_priv(host); 1326 int rval = 0; 1327 struct qla_port_param *port_param = NULL; 1328 fc_port_t *fcport = NULL; 1329 int found = 0; 1330 uint16_t mb[MAILBOX_REGISTER_COUNT]; 1331 uint8_t *rsp_ptr = NULL; 1332 1333 if (!IS_IIDMA_CAPABLE(vha->hw)) { 1334 ql_log(ql_log_info, vha, 0x7046, "iiDMA not supported.\n"); 1335 return -EINVAL; 1336 } 1337 1338 port_param = (void *)bsg_request + sizeof(struct fc_bsg_request); 1339 if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) { 1340 ql_log(ql_log_warn, vha, 0x7048, 1341 "Invalid destination type.\n"); 1342 return -EINVAL; 1343 } 1344 1345 list_for_each_entry(fcport, &vha->vp_fcports, list) { 1346 if (fcport->port_type != FCT_TARGET) 1347 continue; 1348 1349 if (memcmp(port_param->fc_scsi_addr.dest_addr.wwpn, 1350 fcport->port_name, sizeof(fcport->port_name))) 1351 continue; 1352 1353 found = 1; 1354 break; 1355 } 1356 1357 if (!found) { 1358 ql_log(ql_log_warn, vha, 0x7049, 1359 "Failed to find port.\n"); 1360 return -EINVAL; 1361 } 1362 1363 if (atomic_read(&fcport->state) != FCS_ONLINE) { 1364 ql_log(ql_log_warn, vha, 0x704a, 1365 "Port is not online.\n"); 1366 return -EINVAL; 1367 } 1368 1369 if (fcport->flags & FCF_LOGIN_NEEDED) { 1370 ql_log(ql_log_warn, vha, 0x704b, 1371 "Remote port not logged in flags = 0x%x.\n", fcport->flags); 1372 return -EINVAL; 1373 } 1374 1375 if (port_param->mode) 1376 rval = qla2x00_set_idma_speed(vha, fcport->loop_id, 1377 port_param->speed, mb); 1378 else 1379 rval = qla2x00_get_idma_speed(vha, fcport->loop_id, 1380 &port_param->speed, mb); 1381 1382 if (rval) { 1383 ql_log(ql_log_warn, vha, 0x704c, 1384 "iiDMA cmd failed for %8phN -- " 1385 "%04x %x %04x %04x.\n", fcport->port_name, 1386 rval, fcport->fp_speed, mb[0], mb[1]); 1387 rval = (DID_ERROR << 16); 1388 } else { 1389 if (!port_param->mode) { 1390 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + 1391 sizeof(struct qla_port_param); 1392 1393 rsp_ptr = ((uint8_t *)bsg_reply) + 1394 sizeof(struct fc_bsg_reply); 1395 1396 memcpy(rsp_ptr, port_param, 1397 sizeof(struct qla_port_param)); 1398 } 1399 1400 bsg_reply->result = DID_OK; 1401 bsg_job_done(bsg_job, bsg_reply->result, 1402 bsg_reply->reply_payload_rcv_len); 1403 } 1404 1405 return rval; 1406 } 1407 1408 static int 1409 qla2x00_optrom_setup(struct bsg_job *bsg_job, scsi_qla_host_t *vha, 1410 uint8_t is_update) 1411 { 1412 struct fc_bsg_request *bsg_request = bsg_job->request; 1413 uint32_t start = 0; 1414 int valid = 0; 1415 struct qla_hw_data *ha = vha->hw; 1416 1417 if (unlikely(pci_channel_offline(ha->pdev))) 1418 return -EINVAL; 1419 1420 start = bsg_request->rqst_data.h_vendor.vendor_cmd[1]; 1421 if (start > ha->optrom_size) { 1422 ql_log(ql_log_warn, vha, 0x7055, 1423 "start %d > optrom_size %d.\n", start, ha->optrom_size); 1424 return -EINVAL; 1425 } 1426 1427 if (ha->optrom_state != QLA_SWAITING) { 1428 ql_log(ql_log_info, vha, 0x7056, 1429 "optrom_state %d.\n", ha->optrom_state); 1430 return -EBUSY; 1431 } 1432 1433 ha->optrom_region_start = start; 1434 ql_dbg(ql_dbg_user, vha, 0x7057, "is_update=%d.\n", is_update); 1435 if (is_update) { 1436 if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0) 1437 valid = 1; 1438 else if (start == (ha->flt_region_boot * 4) || 1439 start == (ha->flt_region_fw * 4)) 1440 valid = 1; 1441 else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || 1442 IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) || 1443 IS_QLA28XX(ha)) 1444 valid = 1; 1445 if (!valid) { 1446 ql_log(ql_log_warn, vha, 0x7058, 1447 "Invalid start region 0x%x/0x%x.\n", start, 1448 bsg_job->request_payload.payload_len); 1449 return -EINVAL; 1450 } 1451 1452 ha->optrom_region_size = start + 1453 bsg_job->request_payload.payload_len > ha->optrom_size ? 1454 ha->optrom_size - start : 1455 bsg_job->request_payload.payload_len; 1456 ha->optrom_state = QLA_SWRITING; 1457 } else { 1458 ha->optrom_region_size = start + 1459 bsg_job->reply_payload.payload_len > ha->optrom_size ? 1460 ha->optrom_size - start : 1461 bsg_job->reply_payload.payload_len; 1462 ha->optrom_state = QLA_SREADING; 1463 } 1464 1465 ha->optrom_buffer = vzalloc(ha->optrom_region_size); 1466 if (!ha->optrom_buffer) { 1467 ql_log(ql_log_warn, vha, 0x7059, 1468 "Read: Unable to allocate memory for optrom retrieval " 1469 "(%x)\n", ha->optrom_region_size); 1470 1471 ha->optrom_state = QLA_SWAITING; 1472 return -ENOMEM; 1473 } 1474 1475 return 0; 1476 } 1477 1478 static int 1479 qla2x00_read_optrom(struct bsg_job *bsg_job) 1480 { 1481 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1482 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1483 scsi_qla_host_t *vha = shost_priv(host); 1484 struct qla_hw_data *ha = vha->hw; 1485 int rval = 0; 1486 1487 if (ha->flags.nic_core_reset_hdlr_active) 1488 return -EBUSY; 1489 1490 mutex_lock(&ha->optrom_mutex); 1491 rval = qla2x00_optrom_setup(bsg_job, vha, 0); 1492 if (rval) { 1493 mutex_unlock(&ha->optrom_mutex); 1494 return rval; 1495 } 1496 1497 ha->isp_ops->read_optrom(vha, ha->optrom_buffer, 1498 ha->optrom_region_start, ha->optrom_region_size); 1499 1500 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 1501 bsg_job->reply_payload.sg_cnt, ha->optrom_buffer, 1502 ha->optrom_region_size); 1503 1504 bsg_reply->reply_payload_rcv_len = ha->optrom_region_size; 1505 bsg_reply->result = DID_OK; 1506 vfree(ha->optrom_buffer); 1507 ha->optrom_buffer = NULL; 1508 ha->optrom_state = QLA_SWAITING; 1509 mutex_unlock(&ha->optrom_mutex); 1510 bsg_job_done(bsg_job, bsg_reply->result, 1511 bsg_reply->reply_payload_rcv_len); 1512 return rval; 1513 } 1514 1515 static int 1516 qla2x00_update_optrom(struct bsg_job *bsg_job) 1517 { 1518 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1519 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1520 scsi_qla_host_t *vha = shost_priv(host); 1521 struct qla_hw_data *ha = vha->hw; 1522 int rval = 0; 1523 1524 mutex_lock(&ha->optrom_mutex); 1525 rval = qla2x00_optrom_setup(bsg_job, vha, 1); 1526 if (rval) { 1527 mutex_unlock(&ha->optrom_mutex); 1528 return rval; 1529 } 1530 1531 /* Set the isp82xx_no_md_cap not to capture minidump */ 1532 ha->flags.isp82xx_no_md_cap = 1; 1533 1534 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1535 bsg_job->request_payload.sg_cnt, ha->optrom_buffer, 1536 ha->optrom_region_size); 1537 1538 rval = ha->isp_ops->write_optrom(vha, ha->optrom_buffer, 1539 ha->optrom_region_start, ha->optrom_region_size); 1540 1541 if (rval) { 1542 bsg_reply->result = -EINVAL; 1543 rval = -EINVAL; 1544 } else { 1545 bsg_reply->result = DID_OK; 1546 } 1547 vfree(ha->optrom_buffer); 1548 ha->optrom_buffer = NULL; 1549 ha->optrom_state = QLA_SWAITING; 1550 mutex_unlock(&ha->optrom_mutex); 1551 bsg_job_done(bsg_job, bsg_reply->result, 1552 bsg_reply->reply_payload_rcv_len); 1553 return rval; 1554 } 1555 1556 static int 1557 qla2x00_update_fru_versions(struct bsg_job *bsg_job) 1558 { 1559 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1560 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1561 scsi_qla_host_t *vha = shost_priv(host); 1562 struct qla_hw_data *ha = vha->hw; 1563 int rval = 0; 1564 uint8_t bsg[DMA_POOL_SIZE]; 1565 struct qla_image_version_list *list = (void *)bsg; 1566 struct qla_image_version *image; 1567 uint32_t count; 1568 dma_addr_t sfp_dma; 1569 void *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); 1570 1571 if (!sfp) { 1572 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 1573 EXT_STATUS_NO_MEMORY; 1574 goto done; 1575 } 1576 1577 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1578 bsg_job->request_payload.sg_cnt, list, sizeof(bsg)); 1579 1580 image = list->version; 1581 count = list->count; 1582 while (count--) { 1583 memcpy(sfp, &image->field_info, sizeof(image->field_info)); 1584 rval = qla2x00_write_sfp(vha, sfp_dma, sfp, 1585 image->field_address.device, image->field_address.offset, 1586 sizeof(image->field_info), image->field_address.option); 1587 if (rval) { 1588 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 1589 EXT_STATUS_MAILBOX; 1590 goto dealloc; 1591 } 1592 image++; 1593 } 1594 1595 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0; 1596 1597 dealloc: 1598 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); 1599 1600 done: 1601 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1602 bsg_reply->result = DID_OK << 16; 1603 bsg_job_done(bsg_job, bsg_reply->result, 1604 bsg_reply->reply_payload_rcv_len); 1605 1606 return 0; 1607 } 1608 1609 static int 1610 qla2x00_read_fru_status(struct bsg_job *bsg_job) 1611 { 1612 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1613 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1614 scsi_qla_host_t *vha = shost_priv(host); 1615 struct qla_hw_data *ha = vha->hw; 1616 int rval = 0; 1617 uint8_t bsg[DMA_POOL_SIZE]; 1618 struct qla_status_reg *sr = (void *)bsg; 1619 dma_addr_t sfp_dma; 1620 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); 1621 1622 if (!sfp) { 1623 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 1624 EXT_STATUS_NO_MEMORY; 1625 goto done; 1626 } 1627 1628 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1629 bsg_job->request_payload.sg_cnt, sr, sizeof(*sr)); 1630 1631 rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 1632 sr->field_address.device, sr->field_address.offset, 1633 sizeof(sr->status_reg), sr->field_address.option); 1634 sr->status_reg = *sfp; 1635 1636 if (rval) { 1637 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 1638 EXT_STATUS_MAILBOX; 1639 goto dealloc; 1640 } 1641 1642 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 1643 bsg_job->reply_payload.sg_cnt, sr, sizeof(*sr)); 1644 1645 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0; 1646 1647 dealloc: 1648 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); 1649 1650 done: 1651 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1652 bsg_reply->reply_payload_rcv_len = sizeof(*sr); 1653 bsg_reply->result = DID_OK << 16; 1654 bsg_job_done(bsg_job, bsg_reply->result, 1655 bsg_reply->reply_payload_rcv_len); 1656 1657 return 0; 1658 } 1659 1660 static int 1661 qla2x00_write_fru_status(struct bsg_job *bsg_job) 1662 { 1663 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1664 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1665 scsi_qla_host_t *vha = shost_priv(host); 1666 struct qla_hw_data *ha = vha->hw; 1667 int rval = 0; 1668 uint8_t bsg[DMA_POOL_SIZE]; 1669 struct qla_status_reg *sr = (void *)bsg; 1670 dma_addr_t sfp_dma; 1671 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); 1672 1673 if (!sfp) { 1674 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 1675 EXT_STATUS_NO_MEMORY; 1676 goto done; 1677 } 1678 1679 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1680 bsg_job->request_payload.sg_cnt, sr, sizeof(*sr)); 1681 1682 *sfp = sr->status_reg; 1683 rval = qla2x00_write_sfp(vha, sfp_dma, sfp, 1684 sr->field_address.device, sr->field_address.offset, 1685 sizeof(sr->status_reg), sr->field_address.option); 1686 1687 if (rval) { 1688 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 1689 EXT_STATUS_MAILBOX; 1690 goto dealloc; 1691 } 1692 1693 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0; 1694 1695 dealloc: 1696 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); 1697 1698 done: 1699 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1700 bsg_reply->result = DID_OK << 16; 1701 bsg_job_done(bsg_job, bsg_reply->result, 1702 bsg_reply->reply_payload_rcv_len); 1703 1704 return 0; 1705 } 1706 1707 static int 1708 qla2x00_write_i2c(struct bsg_job *bsg_job) 1709 { 1710 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1711 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1712 scsi_qla_host_t *vha = shost_priv(host); 1713 struct qla_hw_data *ha = vha->hw; 1714 int rval = 0; 1715 uint8_t bsg[DMA_POOL_SIZE]; 1716 struct qla_i2c_access *i2c = (void *)bsg; 1717 dma_addr_t sfp_dma; 1718 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); 1719 1720 if (!sfp) { 1721 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 1722 EXT_STATUS_NO_MEMORY; 1723 goto done; 1724 } 1725 1726 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1727 bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c)); 1728 1729 memcpy(sfp, i2c->buffer, i2c->length); 1730 rval = qla2x00_write_sfp(vha, sfp_dma, sfp, 1731 i2c->device, i2c->offset, i2c->length, i2c->option); 1732 1733 if (rval) { 1734 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 1735 EXT_STATUS_MAILBOX; 1736 goto dealloc; 1737 } 1738 1739 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0; 1740 1741 dealloc: 1742 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); 1743 1744 done: 1745 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1746 bsg_reply->result = DID_OK << 16; 1747 bsg_job_done(bsg_job, bsg_reply->result, 1748 bsg_reply->reply_payload_rcv_len); 1749 1750 return 0; 1751 } 1752 1753 static int 1754 qla2x00_read_i2c(struct bsg_job *bsg_job) 1755 { 1756 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1757 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1758 scsi_qla_host_t *vha = shost_priv(host); 1759 struct qla_hw_data *ha = vha->hw; 1760 int rval = 0; 1761 uint8_t bsg[DMA_POOL_SIZE]; 1762 struct qla_i2c_access *i2c = (void *)bsg; 1763 dma_addr_t sfp_dma; 1764 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); 1765 1766 if (!sfp) { 1767 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 1768 EXT_STATUS_NO_MEMORY; 1769 goto done; 1770 } 1771 1772 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1773 bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c)); 1774 1775 rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 1776 i2c->device, i2c->offset, i2c->length, i2c->option); 1777 1778 if (rval) { 1779 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 1780 EXT_STATUS_MAILBOX; 1781 goto dealloc; 1782 } 1783 1784 memcpy(i2c->buffer, sfp, i2c->length); 1785 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 1786 bsg_job->reply_payload.sg_cnt, i2c, sizeof(*i2c)); 1787 1788 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0; 1789 1790 dealloc: 1791 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); 1792 1793 done: 1794 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1795 bsg_reply->reply_payload_rcv_len = sizeof(*i2c); 1796 bsg_reply->result = DID_OK << 16; 1797 bsg_job_done(bsg_job, bsg_reply->result, 1798 bsg_reply->reply_payload_rcv_len); 1799 1800 return 0; 1801 } 1802 1803 static int 1804 qla24xx_process_bidir_cmd(struct bsg_job *bsg_job) 1805 { 1806 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1807 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1808 scsi_qla_host_t *vha = shost_priv(host); 1809 struct qla_hw_data *ha = vha->hw; 1810 uint32_t rval = EXT_STATUS_OK; 1811 uint16_t req_sg_cnt = 0; 1812 uint16_t rsp_sg_cnt = 0; 1813 uint16_t nextlid = 0; 1814 uint32_t tot_dsds; 1815 srb_t *sp = NULL; 1816 uint32_t req_data_len; 1817 uint32_t rsp_data_len; 1818 1819 /* Check the type of the adapter */ 1820 if (!IS_BIDI_CAPABLE(ha)) { 1821 ql_log(ql_log_warn, vha, 0x70a0, 1822 "This adapter is not supported\n"); 1823 rval = EXT_STATUS_NOT_SUPPORTED; 1824 goto done; 1825 } 1826 1827 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) || 1828 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) || 1829 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { 1830 rval = EXT_STATUS_BUSY; 1831 goto done; 1832 } 1833 1834 /* Check if host is online */ 1835 if (!vha->flags.online) { 1836 ql_log(ql_log_warn, vha, 0x70a1, 1837 "Host is not online\n"); 1838 rval = EXT_STATUS_DEVICE_OFFLINE; 1839 goto done; 1840 } 1841 1842 /* Check if cable is plugged in or not */ 1843 if (vha->device_flags & DFLG_NO_CABLE) { 1844 ql_log(ql_log_warn, vha, 0x70a2, 1845 "Cable is unplugged...\n"); 1846 rval = EXT_STATUS_INVALID_CFG; 1847 goto done; 1848 } 1849 1850 /* Check if the switch is connected or not */ 1851 if (ha->current_topology != ISP_CFG_F) { 1852 ql_log(ql_log_warn, vha, 0x70a3, 1853 "Host is not connected to the switch\n"); 1854 rval = EXT_STATUS_INVALID_CFG; 1855 goto done; 1856 } 1857 1858 /* Check if operating mode is P2P */ 1859 if (ha->operating_mode != P2P) { 1860 ql_log(ql_log_warn, vha, 0x70a4, 1861 "Host operating mode is not P2p\n"); 1862 rval = EXT_STATUS_INVALID_CFG; 1863 goto done; 1864 } 1865 1866 mutex_lock(&ha->selflogin_lock); 1867 if (vha->self_login_loop_id == 0) { 1868 /* Initialize all required fields of fcport */ 1869 vha->bidir_fcport.vha = vha; 1870 vha->bidir_fcport.d_id.b.al_pa = vha->d_id.b.al_pa; 1871 vha->bidir_fcport.d_id.b.area = vha->d_id.b.area; 1872 vha->bidir_fcport.d_id.b.domain = vha->d_id.b.domain; 1873 vha->bidir_fcport.loop_id = vha->loop_id; 1874 1875 if (qla2x00_fabric_login(vha, &(vha->bidir_fcport), &nextlid)) { 1876 ql_log(ql_log_warn, vha, 0x70a7, 1877 "Failed to login port %06X for bidirectional IOCB\n", 1878 vha->bidir_fcport.d_id.b24); 1879 mutex_unlock(&ha->selflogin_lock); 1880 rval = EXT_STATUS_MAILBOX; 1881 goto done; 1882 } 1883 vha->self_login_loop_id = nextlid - 1; 1884 1885 } 1886 /* Assign the self login loop id to fcport */ 1887 mutex_unlock(&ha->selflogin_lock); 1888 1889 vha->bidir_fcport.loop_id = vha->self_login_loop_id; 1890 1891 req_sg_cnt = dma_map_sg(&ha->pdev->dev, 1892 bsg_job->request_payload.sg_list, 1893 bsg_job->request_payload.sg_cnt, 1894 DMA_TO_DEVICE); 1895 1896 if (!req_sg_cnt) { 1897 rval = EXT_STATUS_NO_MEMORY; 1898 goto done; 1899 } 1900 1901 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, 1902 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, 1903 DMA_FROM_DEVICE); 1904 1905 if (!rsp_sg_cnt) { 1906 rval = EXT_STATUS_NO_MEMORY; 1907 goto done_unmap_req_sg; 1908 } 1909 1910 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) || 1911 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) { 1912 ql_dbg(ql_dbg_user, vha, 0x70a9, 1913 "Dma mapping resulted in different sg counts " 1914 "[request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt: " 1915 "%x dma_reply_sg_cnt: %x]\n", 1916 bsg_job->request_payload.sg_cnt, req_sg_cnt, 1917 bsg_job->reply_payload.sg_cnt, rsp_sg_cnt); 1918 rval = EXT_STATUS_NO_MEMORY; 1919 goto done_unmap_sg; 1920 } 1921 1922 req_data_len = bsg_job->request_payload.payload_len; 1923 rsp_data_len = bsg_job->reply_payload.payload_len; 1924 1925 if (req_data_len != rsp_data_len) { 1926 rval = EXT_STATUS_BUSY; 1927 ql_log(ql_log_warn, vha, 0x70aa, 1928 "req_data_len != rsp_data_len\n"); 1929 goto done_unmap_sg; 1930 } 1931 1932 /* Alloc SRB structure */ 1933 sp = qla2x00_get_sp(vha, &(vha->bidir_fcport), GFP_KERNEL); 1934 if (!sp) { 1935 ql_dbg(ql_dbg_user, vha, 0x70ac, 1936 "Alloc SRB structure failed\n"); 1937 rval = EXT_STATUS_NO_MEMORY; 1938 goto done_unmap_sg; 1939 } 1940 1941 /*Populate srb->ctx with bidir ctx*/ 1942 sp->u.bsg_job = bsg_job; 1943 sp->free = qla2x00_bsg_sp_free; 1944 sp->type = SRB_BIDI_CMD; 1945 sp->done = qla2x00_bsg_job_done; 1946 1947 /* Add the read and write sg count */ 1948 tot_dsds = rsp_sg_cnt + req_sg_cnt; 1949 1950 rval = qla2x00_start_bidir(sp, vha, tot_dsds); 1951 if (rval != EXT_STATUS_OK) 1952 goto done_free_srb; 1953 /* the bsg request will be completed in the interrupt handler */ 1954 return rval; 1955 1956 done_free_srb: 1957 mempool_free(sp, ha->srb_mempool); 1958 done_unmap_sg: 1959 dma_unmap_sg(&ha->pdev->dev, 1960 bsg_job->reply_payload.sg_list, 1961 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 1962 done_unmap_req_sg: 1963 dma_unmap_sg(&ha->pdev->dev, 1964 bsg_job->request_payload.sg_list, 1965 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 1966 done: 1967 1968 /* Return an error vendor specific response 1969 * and complete the bsg request 1970 */ 1971 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval; 1972 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1973 bsg_reply->reply_payload_rcv_len = 0; 1974 bsg_reply->result = (DID_OK) << 16; 1975 bsg_job_done(bsg_job, bsg_reply->result, 1976 bsg_reply->reply_payload_rcv_len); 1977 /* Always return success, vendor rsp carries correct status */ 1978 return 0; 1979 } 1980 1981 static int 1982 qlafx00_mgmt_cmd(struct bsg_job *bsg_job) 1983 { 1984 struct fc_bsg_request *bsg_request = bsg_job->request; 1985 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1986 scsi_qla_host_t *vha = shost_priv(host); 1987 struct qla_hw_data *ha = vha->hw; 1988 int rval = (DID_ERROR << 16); 1989 struct qla_mt_iocb_rqst_fx00 *piocb_rqst; 1990 srb_t *sp; 1991 int req_sg_cnt = 0, rsp_sg_cnt = 0; 1992 struct fc_port *fcport; 1993 char *type = "FC_BSG_HST_FX_MGMT"; 1994 1995 /* Copy the IOCB specific information */ 1996 piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *) 1997 &bsg_request->rqst_data.h_vendor.vendor_cmd[1]; 1998 1999 /* Dump the vendor information */ 2000 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose , vha, 0x70cf, 2001 piocb_rqst, sizeof(*piocb_rqst)); 2002 2003 if (!vha->flags.online) { 2004 ql_log(ql_log_warn, vha, 0x70d0, 2005 "Host is not online.\n"); 2006 rval = -EIO; 2007 goto done; 2008 } 2009 2010 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) { 2011 req_sg_cnt = dma_map_sg(&ha->pdev->dev, 2012 bsg_job->request_payload.sg_list, 2013 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 2014 if (!req_sg_cnt) { 2015 ql_log(ql_log_warn, vha, 0x70c7, 2016 "dma_map_sg return %d for request\n", req_sg_cnt); 2017 rval = -ENOMEM; 2018 goto done; 2019 } 2020 } 2021 2022 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) { 2023 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, 2024 bsg_job->reply_payload.sg_list, 2025 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 2026 if (!rsp_sg_cnt) { 2027 ql_log(ql_log_warn, vha, 0x70c8, 2028 "dma_map_sg return %d for reply\n", rsp_sg_cnt); 2029 rval = -ENOMEM; 2030 goto done_unmap_req_sg; 2031 } 2032 } 2033 2034 ql_dbg(ql_dbg_user, vha, 0x70c9, 2035 "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x " 2036 "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt, 2037 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt); 2038 2039 /* Allocate a dummy fcport structure, since functions preparing the 2040 * IOCB and mailbox command retrieves port specific information 2041 * from fcport structure. For Host based ELS commands there will be 2042 * no fcport structure allocated 2043 */ 2044 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 2045 if (!fcport) { 2046 ql_log(ql_log_warn, vha, 0x70ca, 2047 "Failed to allocate fcport.\n"); 2048 rval = -ENOMEM; 2049 goto done_unmap_rsp_sg; 2050 } 2051 2052 /* Alloc SRB structure */ 2053 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 2054 if (!sp) { 2055 ql_log(ql_log_warn, vha, 0x70cb, 2056 "qla2x00_get_sp failed.\n"); 2057 rval = -ENOMEM; 2058 goto done_free_fcport; 2059 } 2060 2061 /* Initialize all required fields of fcport */ 2062 fcport->vha = vha; 2063 fcport->loop_id = le32_to_cpu(piocb_rqst->dataword); 2064 2065 sp->type = SRB_FXIOCB_BCMD; 2066 sp->name = "bsg_fx_mgmt"; 2067 sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt); 2068 sp->u.bsg_job = bsg_job; 2069 sp->free = qla2x00_bsg_sp_free; 2070 sp->done = qla2x00_bsg_job_done; 2071 2072 ql_dbg(ql_dbg_user, vha, 0x70cc, 2073 "bsg rqst type: %s fx_mgmt_type: %x id=%x\n", 2074 type, piocb_rqst->func_type, fcport->loop_id); 2075 2076 rval = qla2x00_start_sp(sp); 2077 if (rval != QLA_SUCCESS) { 2078 ql_log(ql_log_warn, vha, 0x70cd, 2079 "qla2x00_start_sp failed=%d.\n", rval); 2080 mempool_free(sp, ha->srb_mempool); 2081 rval = -EIO; 2082 goto done_free_fcport; 2083 } 2084 return rval; 2085 2086 done_free_fcport: 2087 qla2x00_free_fcport(fcport); 2088 2089 done_unmap_rsp_sg: 2090 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) 2091 dma_unmap_sg(&ha->pdev->dev, 2092 bsg_job->reply_payload.sg_list, 2093 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 2094 done_unmap_req_sg: 2095 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) 2096 dma_unmap_sg(&ha->pdev->dev, 2097 bsg_job->request_payload.sg_list, 2098 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 2099 2100 done: 2101 return rval; 2102 } 2103 2104 static int 2105 qla26xx_serdes_op(struct bsg_job *bsg_job) 2106 { 2107 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2108 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 2109 scsi_qla_host_t *vha = shost_priv(host); 2110 int rval = 0; 2111 struct qla_serdes_reg sr; 2112 2113 memset(&sr, 0, sizeof(sr)); 2114 2115 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 2116 bsg_job->request_payload.sg_cnt, &sr, sizeof(sr)); 2117 2118 switch (sr.cmd) { 2119 case INT_SC_SERDES_WRITE_REG: 2120 rval = qla2x00_write_serdes_word(vha, sr.addr, sr.val); 2121 bsg_reply->reply_payload_rcv_len = 0; 2122 break; 2123 case INT_SC_SERDES_READ_REG: 2124 rval = qla2x00_read_serdes_word(vha, sr.addr, &sr.val); 2125 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 2126 bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr)); 2127 bsg_reply->reply_payload_rcv_len = sizeof(sr); 2128 break; 2129 default: 2130 ql_dbg(ql_dbg_user, vha, 0x708c, 2131 "Unknown serdes cmd %x.\n", sr.cmd); 2132 rval = -EINVAL; 2133 break; 2134 } 2135 2136 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 2137 rval ? EXT_STATUS_MAILBOX : 0; 2138 2139 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 2140 bsg_reply->result = DID_OK << 16; 2141 bsg_job_done(bsg_job, bsg_reply->result, 2142 bsg_reply->reply_payload_rcv_len); 2143 return 0; 2144 } 2145 2146 static int 2147 qla8044_serdes_op(struct bsg_job *bsg_job) 2148 { 2149 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2150 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 2151 scsi_qla_host_t *vha = shost_priv(host); 2152 int rval = 0; 2153 struct qla_serdes_reg_ex sr; 2154 2155 memset(&sr, 0, sizeof(sr)); 2156 2157 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 2158 bsg_job->request_payload.sg_cnt, &sr, sizeof(sr)); 2159 2160 switch (sr.cmd) { 2161 case INT_SC_SERDES_WRITE_REG: 2162 rval = qla8044_write_serdes_word(vha, sr.addr, sr.val); 2163 bsg_reply->reply_payload_rcv_len = 0; 2164 break; 2165 case INT_SC_SERDES_READ_REG: 2166 rval = qla8044_read_serdes_word(vha, sr.addr, &sr.val); 2167 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 2168 bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr)); 2169 bsg_reply->reply_payload_rcv_len = sizeof(sr); 2170 break; 2171 default: 2172 ql_dbg(ql_dbg_user, vha, 0x7020, 2173 "Unknown serdes cmd %x.\n", sr.cmd); 2174 rval = -EINVAL; 2175 break; 2176 } 2177 2178 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 2179 rval ? EXT_STATUS_MAILBOX : 0; 2180 2181 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 2182 bsg_reply->result = DID_OK << 16; 2183 bsg_job_done(bsg_job, bsg_reply->result, 2184 bsg_reply->reply_payload_rcv_len); 2185 return 0; 2186 } 2187 2188 static int 2189 qla27xx_get_flash_upd_cap(struct bsg_job *bsg_job) 2190 { 2191 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2192 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 2193 scsi_qla_host_t *vha = shost_priv(host); 2194 struct qla_hw_data *ha = vha->hw; 2195 struct qla_flash_update_caps cap; 2196 2197 if (!(IS_QLA27XX(ha)) && !IS_QLA28XX(ha)) 2198 return -EPERM; 2199 2200 memset(&cap, 0, sizeof(cap)); 2201 cap.capabilities = (uint64_t)ha->fw_attributes_ext[1] << 48 | 2202 (uint64_t)ha->fw_attributes_ext[0] << 32 | 2203 (uint64_t)ha->fw_attributes_h << 16 | 2204 (uint64_t)ha->fw_attributes; 2205 2206 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 2207 bsg_job->reply_payload.sg_cnt, &cap, sizeof(cap)); 2208 bsg_reply->reply_payload_rcv_len = sizeof(cap); 2209 2210 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 2211 EXT_STATUS_OK; 2212 2213 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 2214 bsg_reply->result = DID_OK << 16; 2215 bsg_job_done(bsg_job, bsg_reply->result, 2216 bsg_reply->reply_payload_rcv_len); 2217 return 0; 2218 } 2219 2220 static int 2221 qla27xx_set_flash_upd_cap(struct bsg_job *bsg_job) 2222 { 2223 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2224 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 2225 scsi_qla_host_t *vha = shost_priv(host); 2226 struct qla_hw_data *ha = vha->hw; 2227 uint64_t online_fw_attr = 0; 2228 struct qla_flash_update_caps cap; 2229 2230 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 2231 return -EPERM; 2232 2233 memset(&cap, 0, sizeof(cap)); 2234 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 2235 bsg_job->request_payload.sg_cnt, &cap, sizeof(cap)); 2236 2237 online_fw_attr = (uint64_t)ha->fw_attributes_ext[1] << 48 | 2238 (uint64_t)ha->fw_attributes_ext[0] << 32 | 2239 (uint64_t)ha->fw_attributes_h << 16 | 2240 (uint64_t)ha->fw_attributes; 2241 2242 if (online_fw_attr != cap.capabilities) { 2243 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 2244 EXT_STATUS_INVALID_PARAM; 2245 return -EINVAL; 2246 } 2247 2248 if (cap.outage_duration < MAX_LOOP_TIMEOUT) { 2249 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 2250 EXT_STATUS_INVALID_PARAM; 2251 return -EINVAL; 2252 } 2253 2254 bsg_reply->reply_payload_rcv_len = 0; 2255 2256 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 2257 EXT_STATUS_OK; 2258 2259 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 2260 bsg_reply->result = DID_OK << 16; 2261 bsg_job_done(bsg_job, bsg_reply->result, 2262 bsg_reply->reply_payload_rcv_len); 2263 return 0; 2264 } 2265 2266 static int 2267 qla27xx_get_bbcr_data(struct bsg_job *bsg_job) 2268 { 2269 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2270 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 2271 scsi_qla_host_t *vha = shost_priv(host); 2272 struct qla_hw_data *ha = vha->hw; 2273 struct qla_bbcr_data bbcr; 2274 uint16_t loop_id, topo, sw_cap; 2275 uint8_t domain, area, al_pa, state; 2276 int rval; 2277 2278 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 2279 return -EPERM; 2280 2281 memset(&bbcr, 0, sizeof(bbcr)); 2282 2283 if (vha->flags.bbcr_enable) 2284 bbcr.status = QLA_BBCR_STATUS_ENABLED; 2285 else 2286 bbcr.status = QLA_BBCR_STATUS_DISABLED; 2287 2288 if (bbcr.status == QLA_BBCR_STATUS_ENABLED) { 2289 rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa, 2290 &area, &domain, &topo, &sw_cap); 2291 if (rval != QLA_SUCCESS) { 2292 bbcr.status = QLA_BBCR_STATUS_UNKNOWN; 2293 bbcr.state = QLA_BBCR_STATE_OFFLINE; 2294 bbcr.mbx1 = loop_id; 2295 goto done; 2296 } 2297 2298 state = (vha->bbcr >> 12) & 0x1; 2299 2300 if (state) { 2301 bbcr.state = QLA_BBCR_STATE_OFFLINE; 2302 bbcr.offline_reason_code = QLA_BBCR_REASON_LOGIN_REJECT; 2303 } else { 2304 bbcr.state = QLA_BBCR_STATE_ONLINE; 2305 bbcr.negotiated_bbscn = (vha->bbcr >> 8) & 0xf; 2306 } 2307 2308 bbcr.configured_bbscn = vha->bbcr & 0xf; 2309 } 2310 2311 done: 2312 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 2313 bsg_job->reply_payload.sg_cnt, &bbcr, sizeof(bbcr)); 2314 bsg_reply->reply_payload_rcv_len = sizeof(bbcr); 2315 2316 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK; 2317 2318 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 2319 bsg_reply->result = DID_OK << 16; 2320 bsg_job_done(bsg_job, bsg_reply->result, 2321 bsg_reply->reply_payload_rcv_len); 2322 return 0; 2323 } 2324 2325 static int 2326 qla2x00_get_priv_stats(struct bsg_job *bsg_job) 2327 { 2328 struct fc_bsg_request *bsg_request = bsg_job->request; 2329 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2330 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 2331 scsi_qla_host_t *vha = shost_priv(host); 2332 struct qla_hw_data *ha = vha->hw; 2333 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 2334 struct link_statistics *stats = NULL; 2335 dma_addr_t stats_dma; 2336 int rval; 2337 uint32_t *cmd = bsg_request->rqst_data.h_vendor.vendor_cmd; 2338 uint options = cmd[0] == QL_VND_GET_PRIV_STATS_EX ? cmd[1] : 0; 2339 2340 if (test_bit(UNLOADING, &vha->dpc_flags)) 2341 return -ENODEV; 2342 2343 if (unlikely(pci_channel_offline(ha->pdev))) 2344 return -ENODEV; 2345 2346 if (qla2x00_reset_active(vha)) 2347 return -EBUSY; 2348 2349 if (!IS_FWI2_CAPABLE(ha)) 2350 return -EPERM; 2351 2352 stats = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stats), &stats_dma, 2353 GFP_KERNEL); 2354 if (!stats) { 2355 ql_log(ql_log_warn, vha, 0x70e2, 2356 "Failed to allocate memory for stats.\n"); 2357 return -ENOMEM; 2358 } 2359 2360 rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, options); 2361 2362 if (rval == QLA_SUCCESS) { 2363 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, vha, 0x70e5, 2364 stats, sizeof(*stats)); 2365 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 2366 bsg_job->reply_payload.sg_cnt, stats, sizeof(*stats)); 2367 } 2368 2369 bsg_reply->reply_payload_rcv_len = sizeof(*stats); 2370 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 2371 rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK; 2372 2373 bsg_job->reply_len = sizeof(*bsg_reply); 2374 bsg_reply->result = DID_OK << 16; 2375 bsg_job_done(bsg_job, bsg_reply->result, 2376 bsg_reply->reply_payload_rcv_len); 2377 2378 dma_free_coherent(&ha->pdev->dev, sizeof(*stats), 2379 stats, stats_dma); 2380 2381 return 0; 2382 } 2383 2384 static int 2385 qla2x00_do_dport_diagnostics(struct bsg_job *bsg_job) 2386 { 2387 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2388 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 2389 scsi_qla_host_t *vha = shost_priv(host); 2390 int rval; 2391 struct qla_dport_diag *dd; 2392 2393 if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) && 2394 !IS_QLA28XX(vha->hw)) 2395 return -EPERM; 2396 2397 dd = kmalloc(sizeof(*dd), GFP_KERNEL); 2398 if (!dd) { 2399 ql_log(ql_log_warn, vha, 0x70db, 2400 "Failed to allocate memory for dport.\n"); 2401 return -ENOMEM; 2402 } 2403 2404 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 2405 bsg_job->request_payload.sg_cnt, dd, sizeof(*dd)); 2406 2407 rval = qla26xx_dport_diagnostics( 2408 vha, dd->buf, sizeof(dd->buf), dd->options); 2409 if (rval == QLA_SUCCESS) { 2410 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 2411 bsg_job->reply_payload.sg_cnt, dd, sizeof(*dd)); 2412 } 2413 2414 bsg_reply->reply_payload_rcv_len = sizeof(*dd); 2415 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 2416 rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK; 2417 2418 bsg_job->reply_len = sizeof(*bsg_reply); 2419 bsg_reply->result = DID_OK << 16; 2420 bsg_job_done(bsg_job, bsg_reply->result, 2421 bsg_reply->reply_payload_rcv_len); 2422 2423 kfree(dd); 2424 2425 return 0; 2426 } 2427 2428 static int 2429 qla2x00_do_dport_diagnostics_v2(struct bsg_job *bsg_job) 2430 { 2431 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2432 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 2433 scsi_qla_host_t *vha = shost_priv(host); 2434 int rval; 2435 struct qla_dport_diag_v2 *dd; 2436 mbx_cmd_t mc; 2437 mbx_cmd_t *mcp = &mc; 2438 uint16_t options; 2439 2440 if (!IS_DPORT_CAPABLE(vha->hw)) 2441 return -EPERM; 2442 2443 dd = kzalloc(sizeof(*dd), GFP_KERNEL); 2444 if (!dd) 2445 return -ENOMEM; 2446 2447 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 2448 bsg_job->request_payload.sg_cnt, dd, sizeof(*dd)); 2449 2450 options = dd->options; 2451 2452 /* Check dport Test in progress */ 2453 if (options == QLA_GET_DPORT_RESULT_V2 && 2454 vha->dport_status & DPORT_DIAG_IN_PROGRESS) { 2455 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 2456 EXT_STATUS_DPORT_DIAG_IN_PROCESS; 2457 goto dportcomplete; 2458 } 2459 2460 /* Check chip reset in progress and start/restart requests arrive */ 2461 if (vha->dport_status & DPORT_DIAG_CHIP_RESET_IN_PROGRESS && 2462 (options == QLA_START_DPORT_TEST_V2 || 2463 options == QLA_RESTART_DPORT_TEST_V2)) { 2464 vha->dport_status &= ~DPORT_DIAG_CHIP_RESET_IN_PROGRESS; 2465 } 2466 2467 /* Check chip reset in progress and get result request arrive */ 2468 if (vha->dport_status & DPORT_DIAG_CHIP_RESET_IN_PROGRESS && 2469 options == QLA_GET_DPORT_RESULT_V2) { 2470 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 2471 EXT_STATUS_DPORT_DIAG_NOT_RUNNING; 2472 goto dportcomplete; 2473 } 2474 2475 rval = qla26xx_dport_diagnostics_v2(vha, dd, mcp); 2476 2477 if (rval == QLA_SUCCESS) { 2478 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 2479 EXT_STATUS_OK; 2480 if (options == QLA_START_DPORT_TEST_V2 || 2481 options == QLA_RESTART_DPORT_TEST_V2) { 2482 dd->mbx1 = mcp->mb[0]; 2483 dd->mbx2 = mcp->mb[1]; 2484 vha->dport_status |= DPORT_DIAG_IN_PROGRESS; 2485 } else if (options == QLA_GET_DPORT_RESULT_V2) { 2486 dd->mbx1 = le16_to_cpu(vha->dport_data[1]); 2487 dd->mbx2 = le16_to_cpu(vha->dport_data[2]); 2488 } 2489 } else { 2490 dd->mbx1 = mcp->mb[0]; 2491 dd->mbx2 = mcp->mb[1]; 2492 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 2493 EXT_STATUS_DPORT_DIAG_ERR; 2494 } 2495 2496 dportcomplete: 2497 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 2498 bsg_job->reply_payload.sg_cnt, dd, sizeof(*dd)); 2499 2500 bsg_reply->reply_payload_rcv_len = sizeof(*dd); 2501 bsg_job->reply_len = sizeof(*bsg_reply); 2502 bsg_reply->result = DID_OK << 16; 2503 bsg_job_done(bsg_job, bsg_reply->result, 2504 bsg_reply->reply_payload_rcv_len); 2505 2506 kfree(dd); 2507 2508 return 0; 2509 } 2510 2511 static int 2512 qla2x00_get_flash_image_status(struct bsg_job *bsg_job) 2513 { 2514 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job)); 2515 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2516 struct qla_hw_data *ha = vha->hw; 2517 struct qla_active_regions regions = { }; 2518 struct active_regions active_regions = { }; 2519 2520 qla27xx_get_active_image(vha, &active_regions); 2521 regions.global_image = active_regions.global; 2522 2523 if (IS_QLA27XX(ha)) 2524 regions.nvme_params = QLA27XX_PRIMARY_IMAGE; 2525 2526 if (IS_QLA28XX(ha)) { 2527 qla28xx_get_aux_images(vha, &active_regions); 2528 regions.board_config = active_regions.aux.board_config; 2529 regions.vpd_nvram = active_regions.aux.vpd_nvram; 2530 regions.npiv_config_0_1 = active_regions.aux.npiv_config_0_1; 2531 regions.npiv_config_2_3 = active_regions.aux.npiv_config_2_3; 2532 regions.nvme_params = active_regions.aux.nvme_params; 2533 } 2534 2535 ql_dbg(ql_dbg_user, vha, 0x70e1, 2536 "%s(%lu): FW=%u BCFG=%u VPDNVR=%u NPIV01=%u NPIV02=%u NVME_PARAMS=%u\n", 2537 __func__, vha->host_no, regions.global_image, 2538 regions.board_config, regions.vpd_nvram, 2539 regions.npiv_config_0_1, regions.npiv_config_2_3, regions.nvme_params); 2540 2541 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 2542 bsg_job->reply_payload.sg_cnt, ®ions, sizeof(regions)); 2543 2544 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK; 2545 bsg_reply->reply_payload_rcv_len = sizeof(regions); 2546 bsg_reply->result = DID_OK << 16; 2547 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 2548 bsg_job_done(bsg_job, bsg_reply->result, 2549 bsg_reply->reply_payload_rcv_len); 2550 2551 return 0; 2552 } 2553 2554 static int 2555 qla2x00_manage_host_stats(struct bsg_job *bsg_job) 2556 { 2557 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job)); 2558 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2559 struct ql_vnd_mng_host_stats_param *req_data; 2560 struct ql_vnd_mng_host_stats_resp rsp_data; 2561 u32 req_data_len; 2562 int ret = 0; 2563 2564 if (!vha->flags.online) { 2565 ql_log(ql_log_warn, vha, 0x0000, "Host is not online.\n"); 2566 return -EIO; 2567 } 2568 2569 req_data_len = bsg_job->request_payload.payload_len; 2570 2571 if (req_data_len != sizeof(struct ql_vnd_mng_host_stats_param)) { 2572 ql_log(ql_log_warn, vha, 0x0000, "req_data_len invalid.\n"); 2573 return -EIO; 2574 } 2575 2576 req_data = kzalloc(sizeof(*req_data), GFP_KERNEL); 2577 if (!req_data) { 2578 ql_log(ql_log_warn, vha, 0x0000, "req_data memory allocation failure.\n"); 2579 return -ENOMEM; 2580 } 2581 2582 /* Copy the request buffer in req_data */ 2583 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 2584 bsg_job->request_payload.sg_cnt, req_data, 2585 req_data_len); 2586 2587 switch (req_data->action) { 2588 case QLA_STOP: 2589 ret = qla2xxx_stop_stats(vha->host, req_data->stat_type); 2590 break; 2591 case QLA_START: 2592 ret = qla2xxx_start_stats(vha->host, req_data->stat_type); 2593 break; 2594 case QLA_CLEAR: 2595 ret = qla2xxx_reset_stats(vha->host, req_data->stat_type); 2596 break; 2597 default: 2598 ql_log(ql_log_warn, vha, 0x0000, "Invalid action.\n"); 2599 ret = -EIO; 2600 break; 2601 } 2602 2603 kfree(req_data); 2604 2605 /* Prepare response */ 2606 rsp_data.status = ret; 2607 bsg_job->reply_payload.payload_len = sizeof(struct ql_vnd_mng_host_stats_resp); 2608 2609 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK; 2610 bsg_reply->reply_payload_rcv_len = 2611 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 2612 bsg_job->reply_payload.sg_cnt, 2613 &rsp_data, 2614 sizeof(struct ql_vnd_mng_host_stats_resp)); 2615 2616 bsg_reply->result = DID_OK; 2617 bsg_job_done(bsg_job, bsg_reply->result, 2618 bsg_reply->reply_payload_rcv_len); 2619 2620 return ret; 2621 } 2622 2623 static int 2624 qla2x00_get_host_stats(struct bsg_job *bsg_job) 2625 { 2626 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job)); 2627 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2628 struct ql_vnd_stats_param *req_data; 2629 struct ql_vnd_host_stats_resp rsp_data; 2630 u32 req_data_len; 2631 int ret = 0; 2632 u64 ini_entry_count = 0; 2633 u64 entry_count = 0; 2634 u64 tgt_num = 0; 2635 u64 tmp_stat_type = 0; 2636 u64 response_len = 0; 2637 void *data; 2638 2639 req_data_len = bsg_job->request_payload.payload_len; 2640 2641 if (req_data_len != sizeof(struct ql_vnd_stats_param)) { 2642 ql_log(ql_log_warn, vha, 0x0000, "req_data_len invalid.\n"); 2643 return -EIO; 2644 } 2645 2646 req_data = kzalloc(sizeof(*req_data), GFP_KERNEL); 2647 if (!req_data) { 2648 ql_log(ql_log_warn, vha, 0x0000, "req_data memory allocation failure.\n"); 2649 return -ENOMEM; 2650 } 2651 2652 /* Copy the request buffer in req_data */ 2653 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 2654 bsg_job->request_payload.sg_cnt, req_data, req_data_len); 2655 2656 /* Copy stat type to work on it */ 2657 tmp_stat_type = req_data->stat_type; 2658 2659 if (tmp_stat_type & QLA2XX_TGT_SHT_LNK_DOWN) { 2660 /* Num of tgts connected to this host */ 2661 tgt_num = qla2x00_get_num_tgts(vha); 2662 /* unset BIT_17 */ 2663 tmp_stat_type &= ~(1 << 17); 2664 } 2665 2666 /* Total ini stats */ 2667 ini_entry_count = qla2x00_count_set_bits(tmp_stat_type); 2668 2669 /* Total number of entries */ 2670 entry_count = ini_entry_count + tgt_num; 2671 2672 response_len = sizeof(struct ql_vnd_host_stats_resp) + 2673 (sizeof(struct ql_vnd_stat_entry) * entry_count); 2674 2675 if (response_len > bsg_job->reply_payload.payload_len) { 2676 rsp_data.status = EXT_STATUS_BUFFER_TOO_SMALL; 2677 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_BUFFER_TOO_SMALL; 2678 bsg_job->reply_payload.payload_len = sizeof(struct ql_vnd_mng_host_stats_resp); 2679 2680 bsg_reply->reply_payload_rcv_len = 2681 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 2682 bsg_job->reply_payload.sg_cnt, &rsp_data, 2683 sizeof(struct ql_vnd_mng_host_stats_resp)); 2684 2685 bsg_reply->result = DID_OK; 2686 bsg_job_done(bsg_job, bsg_reply->result, 2687 bsg_reply->reply_payload_rcv_len); 2688 goto host_stat_out; 2689 } 2690 2691 data = kzalloc(response_len, GFP_KERNEL); 2692 if (!data) { 2693 ret = -ENOMEM; 2694 goto host_stat_out; 2695 } 2696 2697 ret = qla2xxx_get_ini_stats(fc_bsg_to_shost(bsg_job), req_data->stat_type, 2698 data, response_len); 2699 2700 rsp_data.status = EXT_STATUS_OK; 2701 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK; 2702 2703 bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 2704 bsg_job->reply_payload.sg_cnt, 2705 data, response_len); 2706 bsg_reply->result = DID_OK; 2707 bsg_job_done(bsg_job, bsg_reply->result, 2708 bsg_reply->reply_payload_rcv_len); 2709 2710 kfree(data); 2711 host_stat_out: 2712 kfree(req_data); 2713 return ret; 2714 } 2715 2716 static struct fc_rport * 2717 qla2xxx_find_rport(scsi_qla_host_t *vha, uint32_t tgt_num) 2718 { 2719 fc_port_t *fcport = NULL; 2720 2721 list_for_each_entry(fcport, &vha->vp_fcports, list) { 2722 if (fcport->rport->number == tgt_num) 2723 return fcport->rport; 2724 } 2725 return NULL; 2726 } 2727 2728 static int 2729 qla2x00_get_tgt_stats(struct bsg_job *bsg_job) 2730 { 2731 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job)); 2732 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2733 struct ql_vnd_tgt_stats_param *req_data; 2734 u32 req_data_len; 2735 int ret = 0; 2736 u64 response_len = 0; 2737 struct ql_vnd_tgt_stats_resp *data = NULL; 2738 struct fc_rport *rport = NULL; 2739 2740 if (!vha->flags.online) { 2741 ql_log(ql_log_warn, vha, 0x0000, "Host is not online.\n"); 2742 return -EIO; 2743 } 2744 2745 req_data_len = bsg_job->request_payload.payload_len; 2746 2747 if (req_data_len != sizeof(struct ql_vnd_stat_entry)) { 2748 ql_log(ql_log_warn, vha, 0x0000, "req_data_len invalid.\n"); 2749 return -EIO; 2750 } 2751 2752 req_data = kzalloc(sizeof(*req_data), GFP_KERNEL); 2753 if (!req_data) { 2754 ql_log(ql_log_warn, vha, 0x0000, "req_data memory allocation failure.\n"); 2755 return -ENOMEM; 2756 } 2757 2758 /* Copy the request buffer in req_data */ 2759 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 2760 bsg_job->request_payload.sg_cnt, 2761 req_data, req_data_len); 2762 2763 response_len = sizeof(struct ql_vnd_tgt_stats_resp) + 2764 sizeof(struct ql_vnd_stat_entry); 2765 2766 /* structure + size for one entry */ 2767 data = kzalloc(response_len, GFP_KERNEL); 2768 if (!data) { 2769 kfree(req_data); 2770 return -ENOMEM; 2771 } 2772 2773 if (response_len > bsg_job->reply_payload.payload_len) { 2774 data->status = EXT_STATUS_BUFFER_TOO_SMALL; 2775 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_BUFFER_TOO_SMALL; 2776 bsg_job->reply_payload.payload_len = sizeof(struct ql_vnd_mng_host_stats_resp); 2777 2778 bsg_reply->reply_payload_rcv_len = 2779 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 2780 bsg_job->reply_payload.sg_cnt, data, 2781 sizeof(struct ql_vnd_tgt_stats_resp)); 2782 2783 bsg_reply->result = DID_OK; 2784 bsg_job_done(bsg_job, bsg_reply->result, 2785 bsg_reply->reply_payload_rcv_len); 2786 goto tgt_stat_out; 2787 } 2788 2789 rport = qla2xxx_find_rport(vha, req_data->tgt_id); 2790 if (!rport) { 2791 ql_log(ql_log_warn, vha, 0x0000, "target %d not found.\n", req_data->tgt_id); 2792 ret = EXT_STATUS_INVALID_PARAM; 2793 data->status = EXT_STATUS_INVALID_PARAM; 2794 goto reply; 2795 } 2796 2797 ret = qla2xxx_get_tgt_stats(fc_bsg_to_shost(bsg_job), req_data->stat_type, 2798 rport, (void *)data, response_len); 2799 2800 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK; 2801 reply: 2802 bsg_reply->reply_payload_rcv_len = 2803 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 2804 bsg_job->reply_payload.sg_cnt, data, 2805 response_len); 2806 bsg_reply->result = DID_OK; 2807 bsg_job_done(bsg_job, bsg_reply->result, 2808 bsg_reply->reply_payload_rcv_len); 2809 2810 tgt_stat_out: 2811 kfree(data); 2812 kfree(req_data); 2813 2814 return ret; 2815 } 2816 2817 static int 2818 qla2x00_manage_host_port(struct bsg_job *bsg_job) 2819 { 2820 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job)); 2821 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2822 struct ql_vnd_mng_host_port_param *req_data; 2823 struct ql_vnd_mng_host_port_resp rsp_data; 2824 u32 req_data_len; 2825 int ret = 0; 2826 2827 req_data_len = bsg_job->request_payload.payload_len; 2828 2829 if (req_data_len != sizeof(struct ql_vnd_mng_host_port_param)) { 2830 ql_log(ql_log_warn, vha, 0x0000, "req_data_len invalid.\n"); 2831 return -EIO; 2832 } 2833 2834 req_data = kzalloc(sizeof(*req_data), GFP_KERNEL); 2835 if (!req_data) { 2836 ql_log(ql_log_warn, vha, 0x0000, "req_data memory allocation failure.\n"); 2837 return -ENOMEM; 2838 } 2839 2840 /* Copy the request buffer in req_data */ 2841 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 2842 bsg_job->request_payload.sg_cnt, req_data, req_data_len); 2843 2844 switch (req_data->action) { 2845 case QLA_ENABLE: 2846 ret = qla2xxx_enable_port(vha->host); 2847 break; 2848 case QLA_DISABLE: 2849 ret = qla2xxx_disable_port(vha->host); 2850 break; 2851 default: 2852 ql_log(ql_log_warn, vha, 0x0000, "Invalid action.\n"); 2853 ret = -EIO; 2854 break; 2855 } 2856 2857 kfree(req_data); 2858 2859 /* Prepare response */ 2860 rsp_data.status = ret; 2861 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK; 2862 bsg_job->reply_payload.payload_len = sizeof(struct ql_vnd_mng_host_port_resp); 2863 2864 bsg_reply->reply_payload_rcv_len = 2865 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 2866 bsg_job->reply_payload.sg_cnt, &rsp_data, 2867 sizeof(struct ql_vnd_mng_host_port_resp)); 2868 bsg_reply->result = DID_OK; 2869 bsg_job_done(bsg_job, bsg_reply->result, 2870 bsg_reply->reply_payload_rcv_len); 2871 2872 return ret; 2873 } 2874 2875 static int 2876 qla2x00_process_vendor_specific(struct scsi_qla_host *vha, struct bsg_job *bsg_job) 2877 { 2878 struct fc_bsg_request *bsg_request = bsg_job->request; 2879 2880 ql_dbg(ql_dbg_edif, vha, 0x911b, "%s FC_BSG_HST_VENDOR cmd[0]=0x%x\n", 2881 __func__, bsg_request->rqst_data.h_vendor.vendor_cmd[0]); 2882 2883 switch (bsg_request->rqst_data.h_vendor.vendor_cmd[0]) { 2884 case QL_VND_LOOPBACK: 2885 return qla2x00_process_loopback(bsg_job); 2886 2887 case QL_VND_A84_RESET: 2888 return qla84xx_reset(bsg_job); 2889 2890 case QL_VND_A84_UPDATE_FW: 2891 return qla84xx_updatefw(bsg_job); 2892 2893 case QL_VND_A84_MGMT_CMD: 2894 return qla84xx_mgmt_cmd(bsg_job); 2895 2896 case QL_VND_IIDMA: 2897 return qla24xx_iidma(bsg_job); 2898 2899 case QL_VND_FCP_PRIO_CFG_CMD: 2900 return qla24xx_proc_fcp_prio_cfg_cmd(bsg_job); 2901 2902 case QL_VND_READ_FLASH: 2903 return qla2x00_read_optrom(bsg_job); 2904 2905 case QL_VND_UPDATE_FLASH: 2906 return qla2x00_update_optrom(bsg_job); 2907 2908 case QL_VND_SET_FRU_VERSION: 2909 return qla2x00_update_fru_versions(bsg_job); 2910 2911 case QL_VND_READ_FRU_STATUS: 2912 return qla2x00_read_fru_status(bsg_job); 2913 2914 case QL_VND_WRITE_FRU_STATUS: 2915 return qla2x00_write_fru_status(bsg_job); 2916 2917 case QL_VND_WRITE_I2C: 2918 return qla2x00_write_i2c(bsg_job); 2919 2920 case QL_VND_READ_I2C: 2921 return qla2x00_read_i2c(bsg_job); 2922 2923 case QL_VND_DIAG_IO_CMD: 2924 return qla24xx_process_bidir_cmd(bsg_job); 2925 2926 case QL_VND_FX00_MGMT_CMD: 2927 return qlafx00_mgmt_cmd(bsg_job); 2928 2929 case QL_VND_SERDES_OP: 2930 return qla26xx_serdes_op(bsg_job); 2931 2932 case QL_VND_SERDES_OP_EX: 2933 return qla8044_serdes_op(bsg_job); 2934 2935 case QL_VND_GET_FLASH_UPDATE_CAPS: 2936 return qla27xx_get_flash_upd_cap(bsg_job); 2937 2938 case QL_VND_SET_FLASH_UPDATE_CAPS: 2939 return qla27xx_set_flash_upd_cap(bsg_job); 2940 2941 case QL_VND_GET_BBCR_DATA: 2942 return qla27xx_get_bbcr_data(bsg_job); 2943 2944 case QL_VND_GET_PRIV_STATS: 2945 case QL_VND_GET_PRIV_STATS_EX: 2946 return qla2x00_get_priv_stats(bsg_job); 2947 2948 case QL_VND_DPORT_DIAGNOSTICS: 2949 return qla2x00_do_dport_diagnostics(bsg_job); 2950 2951 case QL_VND_DPORT_DIAGNOSTICS_V2: 2952 return qla2x00_do_dport_diagnostics_v2(bsg_job); 2953 2954 case QL_VND_EDIF_MGMT: 2955 return qla_edif_app_mgmt(bsg_job); 2956 2957 case QL_VND_SS_GET_FLASH_IMAGE_STATUS: 2958 return qla2x00_get_flash_image_status(bsg_job); 2959 2960 case QL_VND_MANAGE_HOST_STATS: 2961 return qla2x00_manage_host_stats(bsg_job); 2962 2963 case QL_VND_GET_HOST_STATS: 2964 return qla2x00_get_host_stats(bsg_job); 2965 2966 case QL_VND_GET_TGT_STATS: 2967 return qla2x00_get_tgt_stats(bsg_job); 2968 2969 case QL_VND_MANAGE_HOST_PORT: 2970 return qla2x00_manage_host_port(bsg_job); 2971 2972 case QL_VND_MBX_PASSTHRU: 2973 return qla2x00_mailbox_passthru(bsg_job); 2974 2975 default: 2976 return -ENOSYS; 2977 } 2978 } 2979 2980 int 2981 qla24xx_bsg_request(struct bsg_job *bsg_job) 2982 { 2983 struct fc_bsg_request *bsg_request = bsg_job->request; 2984 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2985 int ret = -EINVAL; 2986 struct fc_rport *rport; 2987 struct Scsi_Host *host; 2988 scsi_qla_host_t *vha; 2989 2990 /* In case no data transferred. */ 2991 bsg_reply->reply_payload_rcv_len = 0; 2992 2993 if (bsg_request->msgcode == FC_BSG_RPT_ELS) { 2994 rport = fc_bsg_to_rport(bsg_job); 2995 host = rport_to_shost(rport); 2996 vha = shost_priv(host); 2997 } else { 2998 host = fc_bsg_to_shost(bsg_job); 2999 vha = shost_priv(host); 3000 } 3001 3002 /* Disable port will bring down the chip, allow enable command */ 3003 if (bsg_request->rqst_data.h_vendor.vendor_cmd[0] == QL_VND_MANAGE_HOST_PORT || 3004 bsg_request->rqst_data.h_vendor.vendor_cmd[0] == QL_VND_GET_HOST_STATS) 3005 goto skip_chip_chk; 3006 3007 if (vha->hw->flags.port_isolated) { 3008 bsg_reply->result = DID_ERROR; 3009 /* operation not permitted */ 3010 return -EPERM; 3011 } 3012 3013 if (qla2x00_chip_is_down(vha)) { 3014 ql_dbg(ql_dbg_user, vha, 0x709f, 3015 "BSG: ISP abort active/needed -- cmd=%d.\n", 3016 bsg_request->msgcode); 3017 SET_DID_STATUS(bsg_reply->result, DID_ERROR); 3018 return -EBUSY; 3019 } 3020 3021 if (test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags)) { 3022 SET_DID_STATUS(bsg_reply->result, DID_ERROR); 3023 return -EIO; 3024 } 3025 3026 skip_chip_chk: 3027 ql_dbg(ql_dbg_user + ql_dbg_verbose, vha, 0x7000, 3028 "Entered %s msgcode=0x%x. bsg ptr %px\n", 3029 __func__, bsg_request->msgcode, bsg_job); 3030 3031 switch (bsg_request->msgcode) { 3032 case FC_BSG_RPT_ELS: 3033 case FC_BSG_HST_ELS_NOLOGIN: 3034 ret = qla2x00_process_els(bsg_job); 3035 break; 3036 case FC_BSG_HST_CT: 3037 ret = qla2x00_process_ct(bsg_job); 3038 break; 3039 case FC_BSG_HST_VENDOR: 3040 ret = qla2x00_process_vendor_specific(vha, bsg_job); 3041 break; 3042 case FC_BSG_HST_ADD_RPORT: 3043 case FC_BSG_HST_DEL_RPORT: 3044 case FC_BSG_RPT_CT: 3045 default: 3046 ql_log(ql_log_warn, vha, 0x705a, "Unsupported BSG request.\n"); 3047 break; 3048 } 3049 3050 ql_dbg(ql_dbg_user + ql_dbg_verbose, vha, 0x7000, 3051 "%s done with return %x\n", __func__, ret); 3052 3053 return ret; 3054 } 3055 3056 int 3057 qla24xx_bsg_timeout(struct bsg_job *bsg_job) 3058 { 3059 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 3060 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job)); 3061 struct qla_hw_data *ha = vha->hw; 3062 srb_t *sp; 3063 int cnt, que; 3064 unsigned long flags; 3065 struct req_que *req; 3066 3067 ql_log(ql_log_info, vha, 0x708b, "%s CMD timeout. bsg ptr %p.\n", 3068 __func__, bsg_job); 3069 3070 if (qla2x00_isp_reg_stat(ha)) { 3071 ql_log(ql_log_info, vha, 0x9007, 3072 "PCI/Register disconnect.\n"); 3073 qla_pci_set_eeh_busy(vha); 3074 } 3075 3076 /* find the bsg job from the active list of commands */ 3077 spin_lock_irqsave(&ha->hardware_lock, flags); 3078 for (que = 0; que < ha->max_req_queues; que++) { 3079 req = ha->req_q_map[que]; 3080 if (!req) 3081 continue; 3082 3083 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { 3084 sp = req->outstanding_cmds[cnt]; 3085 if (sp && 3086 (sp->type == SRB_CT_CMD || 3087 sp->type == SRB_ELS_CMD_HST || 3088 sp->type == SRB_ELS_CMD_HST_NOLOGIN || 3089 sp->type == SRB_FXIOCB_BCMD) && 3090 sp->u.bsg_job == bsg_job) { 3091 req->outstanding_cmds[cnt] = NULL; 3092 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3093 3094 if (!ha->flags.eeh_busy && ha->isp_ops->abort_command(sp)) { 3095 ql_log(ql_log_warn, vha, 0x7089, 3096 "mbx abort_command failed.\n"); 3097 bsg_reply->result = -EIO; 3098 } else { 3099 ql_dbg(ql_dbg_user, vha, 0x708a, 3100 "mbx abort_command success.\n"); 3101 bsg_reply->result = 0; 3102 } 3103 spin_lock_irqsave(&ha->hardware_lock, flags); 3104 goto done; 3105 3106 } 3107 } 3108 } 3109 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3110 ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n"); 3111 bsg_reply->result = -ENXIO; 3112 return 0; 3113 3114 done: 3115 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3116 /* ref: INIT */ 3117 kref_put(&sp->cmd_kref, qla2x00_sp_release); 3118 return 0; 3119 } 3120 3121 int qla2x00_mailbox_passthru(struct bsg_job *bsg_job) 3122 { 3123 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 3124 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job)); 3125 int ret = -EINVAL; 3126 int ptsize = sizeof(struct qla_mbx_passthru); 3127 struct qla_mbx_passthru *req_data = NULL; 3128 uint32_t req_data_len; 3129 3130 req_data_len = bsg_job->request_payload.payload_len; 3131 if (req_data_len != ptsize) { 3132 ql_log(ql_log_warn, vha, 0xf0a3, "req_data_len invalid.\n"); 3133 return -EIO; 3134 } 3135 req_data = kzalloc(ptsize, GFP_KERNEL); 3136 if (!req_data) { 3137 ql_log(ql_log_warn, vha, 0xf0a4, 3138 "req_data memory allocation failure.\n"); 3139 return -ENOMEM; 3140 } 3141 3142 /* Copy the request buffer in req_data */ 3143 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 3144 bsg_job->request_payload.sg_cnt, req_data, ptsize); 3145 ret = qla_mailbox_passthru(vha, req_data->mbx_in, req_data->mbx_out); 3146 3147 /* Copy the req_data in request buffer */ 3148 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 3149 bsg_job->reply_payload.sg_cnt, req_data, ptsize); 3150 3151 bsg_reply->reply_payload_rcv_len = ptsize; 3152 if (ret == QLA_SUCCESS) 3153 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK; 3154 else 3155 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_ERR; 3156 3157 bsg_job->reply_len = sizeof(*bsg_job->reply); 3158 bsg_reply->result = DID_OK << 16; 3159 bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); 3160 3161 kfree(req_data); 3162 3163 return ret; 3164 } 3165