1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * QLogic Fibre Channel HBA Driver 4 * Copyright (c) 2003-2014 QLogic Corporation 5 */ 6 #include "qla_def.h" 7 #include "qla_gbl.h" 8 9 #include <linux/kthread.h> 10 #include <linux/vmalloc.h> 11 #include <linux/delay.h> 12 #include <linux/bsg-lib.h> 13 14 static void qla2xxx_free_fcport_work(struct work_struct *work) 15 { 16 struct fc_port *fcport = container_of(work, typeof(*fcport), 17 free_work); 18 19 qla2x00_free_fcport(fcport); 20 } 21 22 /* BSG support for ELS/CT pass through */ 23 void qla2x00_bsg_job_done(srb_t *sp, int res) 24 { 25 struct bsg_job *bsg_job = sp->u.bsg_job; 26 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 27 28 ql_dbg(ql_dbg_user, sp->vha, 0x7009, 29 "%s: sp hdl %x, result=%x bsg ptr %p\n", 30 __func__, sp->handle, res, bsg_job); 31 32 sp->free(sp); 33 34 bsg_reply->result = res; 35 bsg_job_done(bsg_job, bsg_reply->result, 36 bsg_reply->reply_payload_rcv_len); 37 } 38 39 void qla2x00_bsg_sp_free(srb_t *sp) 40 { 41 struct qla_hw_data *ha = sp->vha->hw; 42 struct bsg_job *bsg_job = sp->u.bsg_job; 43 struct fc_bsg_request *bsg_request = bsg_job->request; 44 struct qla_mt_iocb_rqst_fx00 *piocb_rqst; 45 46 if (sp->type == SRB_FXIOCB_BCMD) { 47 piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *) 48 &bsg_request->rqst_data.h_vendor.vendor_cmd[1]; 49 50 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) 51 dma_unmap_sg(&ha->pdev->dev, 52 bsg_job->request_payload.sg_list, 53 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 54 55 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) 56 dma_unmap_sg(&ha->pdev->dev, 57 bsg_job->reply_payload.sg_list, 58 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 59 } else { 60 61 if (sp->remap.remapped) { 62 dma_pool_free(ha->purex_dma_pool, sp->remap.rsp.buf, 63 sp->remap.rsp.dma); 64 dma_pool_free(ha->purex_dma_pool, sp->remap.req.buf, 65 sp->remap.req.dma); 66 } else { 67 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 68 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 69 70 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 71 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 72 } 73 } 74 75 if (sp->type == SRB_CT_CMD || 76 sp->type == SRB_FXIOCB_BCMD || 77 sp->type == SRB_ELS_CMD_HST) { 78 INIT_WORK(&sp->fcport->free_work, qla2xxx_free_fcport_work); 79 queue_work(ha->wq, &sp->fcport->free_work); 80 } 81 82 qla2x00_rel_sp(sp); 83 } 84 85 int 86 qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *vha, 87 struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag) 88 { 89 int i, ret, num_valid; 90 uint8_t *bcode; 91 struct qla_fcp_prio_entry *pri_entry; 92 uint32_t *bcode_val_ptr, bcode_val; 93 94 ret = 1; 95 num_valid = 0; 96 bcode = (uint8_t *)pri_cfg; 97 bcode_val_ptr = (uint32_t *)pri_cfg; 98 bcode_val = (uint32_t)(*bcode_val_ptr); 99 100 if (bcode_val == 0xFFFFFFFF) { 101 /* No FCP Priority config data in flash */ 102 ql_dbg(ql_dbg_user, vha, 0x7051, 103 "No FCP Priority config data.\n"); 104 return 0; 105 } 106 107 if (memcmp(bcode, "HQOS", 4)) { 108 /* Invalid FCP priority data header*/ 109 ql_dbg(ql_dbg_user, vha, 0x7052, 110 "Invalid FCP Priority data header. bcode=0x%x.\n", 111 bcode_val); 112 return 0; 113 } 114 if (flag != 1) 115 return ret; 116 117 pri_entry = &pri_cfg->entry[0]; 118 for (i = 0; i < pri_cfg->num_entries; i++) { 119 if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID) 120 num_valid++; 121 pri_entry++; 122 } 123 124 if (num_valid == 0) { 125 /* No valid FCP priority data entries */ 126 ql_dbg(ql_dbg_user, vha, 0x7053, 127 "No valid FCP Priority data entries.\n"); 128 ret = 0; 129 } else { 130 /* FCP priority data is valid */ 131 ql_dbg(ql_dbg_user, vha, 0x7054, 132 "Valid FCP priority data. num entries = %d.\n", 133 num_valid); 134 } 135 136 return ret; 137 } 138 139 static int 140 qla24xx_proc_fcp_prio_cfg_cmd(struct bsg_job *bsg_job) 141 { 142 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 143 struct fc_bsg_request *bsg_request = bsg_job->request; 144 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 145 scsi_qla_host_t *vha = shost_priv(host); 146 struct qla_hw_data *ha = vha->hw; 147 int ret = 0; 148 uint32_t len; 149 uint32_t oper; 150 151 if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_P3P_TYPE(ha))) { 152 ret = -EINVAL; 153 goto exit_fcp_prio_cfg; 154 } 155 156 /* Get the sub command */ 157 oper = bsg_request->rqst_data.h_vendor.vendor_cmd[1]; 158 159 /* Only set config is allowed if config memory is not allocated */ 160 if (!ha->fcp_prio_cfg && (oper != QLFC_FCP_PRIO_SET_CONFIG)) { 161 ret = -EINVAL; 162 goto exit_fcp_prio_cfg; 163 } 164 switch (oper) { 165 case QLFC_FCP_PRIO_DISABLE: 166 if (ha->flags.fcp_prio_enabled) { 167 ha->flags.fcp_prio_enabled = 0; 168 ha->fcp_prio_cfg->attributes &= 169 ~FCP_PRIO_ATTR_ENABLE; 170 qla24xx_update_all_fcp_prio(vha); 171 bsg_reply->result = DID_OK; 172 } else { 173 ret = -EINVAL; 174 bsg_reply->result = (DID_ERROR << 16); 175 goto exit_fcp_prio_cfg; 176 } 177 break; 178 179 case QLFC_FCP_PRIO_ENABLE: 180 if (!ha->flags.fcp_prio_enabled) { 181 if (ha->fcp_prio_cfg) { 182 ha->flags.fcp_prio_enabled = 1; 183 ha->fcp_prio_cfg->attributes |= 184 FCP_PRIO_ATTR_ENABLE; 185 qla24xx_update_all_fcp_prio(vha); 186 bsg_reply->result = DID_OK; 187 } else { 188 ret = -EINVAL; 189 bsg_reply->result = (DID_ERROR << 16); 190 goto exit_fcp_prio_cfg; 191 } 192 } 193 break; 194 195 case QLFC_FCP_PRIO_GET_CONFIG: 196 len = bsg_job->reply_payload.payload_len; 197 if (!len || len > FCP_PRIO_CFG_SIZE) { 198 ret = -EINVAL; 199 bsg_reply->result = (DID_ERROR << 16); 200 goto exit_fcp_prio_cfg; 201 } 202 203 bsg_reply->result = DID_OK; 204 bsg_reply->reply_payload_rcv_len = 205 sg_copy_from_buffer( 206 bsg_job->reply_payload.sg_list, 207 bsg_job->reply_payload.sg_cnt, ha->fcp_prio_cfg, 208 len); 209 210 break; 211 212 case QLFC_FCP_PRIO_SET_CONFIG: 213 len = bsg_job->request_payload.payload_len; 214 if (!len || len > FCP_PRIO_CFG_SIZE) { 215 bsg_reply->result = (DID_ERROR << 16); 216 ret = -EINVAL; 217 goto exit_fcp_prio_cfg; 218 } 219 220 if (!ha->fcp_prio_cfg) { 221 ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE); 222 if (!ha->fcp_prio_cfg) { 223 ql_log(ql_log_warn, vha, 0x7050, 224 "Unable to allocate memory for fcp prio " 225 "config data (%x).\n", FCP_PRIO_CFG_SIZE); 226 bsg_reply->result = (DID_ERROR << 16); 227 ret = -ENOMEM; 228 goto exit_fcp_prio_cfg; 229 } 230 } 231 232 memset(ha->fcp_prio_cfg, 0, FCP_PRIO_CFG_SIZE); 233 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 234 bsg_job->request_payload.sg_cnt, ha->fcp_prio_cfg, 235 FCP_PRIO_CFG_SIZE); 236 237 /* validate fcp priority data */ 238 239 if (!qla24xx_fcp_prio_cfg_valid(vha, ha->fcp_prio_cfg, 1)) { 240 bsg_reply->result = (DID_ERROR << 16); 241 ret = -EINVAL; 242 /* If buffer was invalidatic int 243 * fcp_prio_cfg is of no use 244 */ 245 vfree(ha->fcp_prio_cfg); 246 ha->fcp_prio_cfg = NULL; 247 goto exit_fcp_prio_cfg; 248 } 249 250 ha->flags.fcp_prio_enabled = 0; 251 if (ha->fcp_prio_cfg->attributes & FCP_PRIO_ATTR_ENABLE) 252 ha->flags.fcp_prio_enabled = 1; 253 qla24xx_update_all_fcp_prio(vha); 254 bsg_reply->result = DID_OK; 255 break; 256 default: 257 ret = -EINVAL; 258 break; 259 } 260 exit_fcp_prio_cfg: 261 if (!ret) 262 bsg_job_done(bsg_job, bsg_reply->result, 263 bsg_reply->reply_payload_rcv_len); 264 return ret; 265 } 266 267 static int 268 qla2x00_process_els(struct bsg_job *bsg_job) 269 { 270 struct fc_bsg_request *bsg_request = bsg_job->request; 271 struct fc_rport *rport; 272 fc_port_t *fcport = NULL; 273 struct Scsi_Host *host; 274 scsi_qla_host_t *vha; 275 struct qla_hw_data *ha; 276 srb_t *sp; 277 const char *type; 278 int req_sg_cnt, rsp_sg_cnt; 279 int rval = (DID_ERROR << 16); 280 uint16_t nextlid = 0; 281 uint32_t els_cmd = 0; 282 283 if (bsg_request->msgcode == FC_BSG_RPT_ELS) { 284 rport = fc_bsg_to_rport(bsg_job); 285 fcport = *(fc_port_t **) rport->dd_data; 286 host = rport_to_shost(rport); 287 vha = shost_priv(host); 288 ha = vha->hw; 289 type = "FC_BSG_RPT_ELS"; 290 } else { 291 host = fc_bsg_to_shost(bsg_job); 292 vha = shost_priv(host); 293 ha = vha->hw; 294 type = "FC_BSG_HST_ELS_NOLOGIN"; 295 els_cmd = bsg_request->rqst_data.h_els.command_code; 296 if (els_cmd == ELS_AUTH_ELS) 297 return qla_edif_process_els(vha, bsg_job); 298 } 299 300 if (!vha->flags.online) { 301 ql_log(ql_log_warn, vha, 0x7005, "Host not online.\n"); 302 rval = -EIO; 303 goto done; 304 } 305 306 /* pass through is supported only for ISP 4Gb or higher */ 307 if (!IS_FWI2_CAPABLE(ha)) { 308 ql_dbg(ql_dbg_user, vha, 0x7001, 309 "ELS passthru not supported for ISP23xx based adapters.\n"); 310 rval = -EPERM; 311 goto done; 312 } 313 314 /* Multiple SG's are not supported for ELS requests */ 315 if (bsg_job->request_payload.sg_cnt > 1 || 316 bsg_job->reply_payload.sg_cnt > 1) { 317 ql_dbg(ql_dbg_user, vha, 0x7002, 318 "Multiple SG's are not supported for ELS requests, " 319 "request_sg_cnt=%x reply_sg_cnt=%x.\n", 320 bsg_job->request_payload.sg_cnt, 321 bsg_job->reply_payload.sg_cnt); 322 rval = -EPERM; 323 goto done; 324 } 325 326 /* ELS request for rport */ 327 if (bsg_request->msgcode == FC_BSG_RPT_ELS) { 328 /* make sure the rport is logged in, 329 * if not perform fabric login 330 */ 331 if (qla2x00_fabric_login(vha, fcport, &nextlid)) { 332 ql_dbg(ql_dbg_user, vha, 0x7003, 333 "Failed to login port %06X for ELS passthru.\n", 334 fcport->d_id.b24); 335 rval = -EIO; 336 goto done; 337 } 338 } else { 339 /* Allocate a dummy fcport structure, since functions 340 * preparing the IOCB and mailbox command retrieves port 341 * specific information from fcport structure. For Host based 342 * ELS commands there will be no fcport structure allocated 343 */ 344 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 345 if (!fcport) { 346 rval = -ENOMEM; 347 goto done; 348 } 349 350 /* Initialize all required fields of fcport */ 351 fcport->vha = vha; 352 fcport->d_id.b.al_pa = 353 bsg_request->rqst_data.h_els.port_id[0]; 354 fcport->d_id.b.area = 355 bsg_request->rqst_data.h_els.port_id[1]; 356 fcport->d_id.b.domain = 357 bsg_request->rqst_data.h_els.port_id[2]; 358 fcport->loop_id = 359 (fcport->d_id.b.al_pa == 0xFD) ? 360 NPH_FABRIC_CONTROLLER : NPH_F_PORT; 361 } 362 363 req_sg_cnt = 364 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 365 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 366 if (!req_sg_cnt) { 367 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 368 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 369 rval = -ENOMEM; 370 goto done_free_fcport; 371 } 372 373 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 374 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 375 if (!rsp_sg_cnt) { 376 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 377 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 378 rval = -ENOMEM; 379 goto done_free_fcport; 380 } 381 382 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) || 383 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) { 384 ql_log(ql_log_warn, vha, 0x7008, 385 "dma mapping resulted in different sg counts, " 386 "request_sg_cnt: %x dma_request_sg_cnt:%x reply_sg_cnt:%x " 387 "dma_reply_sg_cnt:%x.\n", bsg_job->request_payload.sg_cnt, 388 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt); 389 rval = -EAGAIN; 390 goto done_unmap_sg; 391 } 392 393 /* Alloc SRB structure */ 394 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 395 if (!sp) { 396 rval = -ENOMEM; 397 goto done_unmap_sg; 398 } 399 400 sp->type = 401 (bsg_request->msgcode == FC_BSG_RPT_ELS ? 402 SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST); 403 sp->name = 404 (bsg_request->msgcode == FC_BSG_RPT_ELS ? 405 "bsg_els_rpt" : "bsg_els_hst"); 406 sp->u.bsg_job = bsg_job; 407 sp->free = qla2x00_bsg_sp_free; 408 sp->done = qla2x00_bsg_job_done; 409 410 ql_dbg(ql_dbg_user, vha, 0x700a, 411 "bsg rqst type: %s els type: %x - loop-id=%x " 412 "portid=%-2x%02x%02x.\n", type, 413 bsg_request->rqst_data.h_els.command_code, fcport->loop_id, 414 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); 415 416 rval = qla2x00_start_sp(sp); 417 if (rval != QLA_SUCCESS) { 418 ql_log(ql_log_warn, vha, 0x700e, 419 "qla2x00_start_sp failed = %d\n", rval); 420 qla2x00_rel_sp(sp); 421 rval = -EIO; 422 goto done_unmap_sg; 423 } 424 return rval; 425 426 done_unmap_sg: 427 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 428 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 429 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 430 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 431 goto done_free_fcport; 432 433 done_free_fcport: 434 if (bsg_request->msgcode == FC_BSG_RPT_ELS) 435 qla2x00_free_fcport(fcport); 436 done: 437 return rval; 438 } 439 440 static inline uint16_t 441 qla24xx_calc_ct_iocbs(uint16_t dsds) 442 { 443 uint16_t iocbs; 444 445 iocbs = 1; 446 if (dsds > 2) { 447 iocbs += (dsds - 2) / 5; 448 if ((dsds - 2) % 5) 449 iocbs++; 450 } 451 return iocbs; 452 } 453 454 static int 455 qla2x00_process_ct(struct bsg_job *bsg_job) 456 { 457 srb_t *sp; 458 struct fc_bsg_request *bsg_request = bsg_job->request; 459 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 460 scsi_qla_host_t *vha = shost_priv(host); 461 struct qla_hw_data *ha = vha->hw; 462 int rval = (DID_ERROR << 16); 463 int req_sg_cnt, rsp_sg_cnt; 464 uint16_t loop_id; 465 struct fc_port *fcport; 466 char *type = "FC_BSG_HST_CT"; 467 468 req_sg_cnt = 469 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 470 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 471 if (!req_sg_cnt) { 472 ql_log(ql_log_warn, vha, 0x700f, 473 "dma_map_sg return %d for request\n", req_sg_cnt); 474 rval = -ENOMEM; 475 goto done; 476 } 477 478 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 479 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 480 if (!rsp_sg_cnt) { 481 ql_log(ql_log_warn, vha, 0x7010, 482 "dma_map_sg return %d for reply\n", rsp_sg_cnt); 483 rval = -ENOMEM; 484 goto done; 485 } 486 487 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) || 488 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) { 489 ql_log(ql_log_warn, vha, 0x7011, 490 "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x " 491 "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt, 492 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt); 493 rval = -EAGAIN; 494 goto done_unmap_sg; 495 } 496 497 if (!vha->flags.online) { 498 ql_log(ql_log_warn, vha, 0x7012, 499 "Host is not online.\n"); 500 rval = -EIO; 501 goto done_unmap_sg; 502 } 503 504 loop_id = 505 (bsg_request->rqst_data.h_ct.preamble_word1 & 0xFF000000) 506 >> 24; 507 switch (loop_id) { 508 case 0xFC: 509 loop_id = NPH_SNS; 510 break; 511 case 0xFA: 512 loop_id = vha->mgmt_svr_loop_id; 513 break; 514 default: 515 ql_dbg(ql_dbg_user, vha, 0x7013, 516 "Unknown loop id: %x.\n", loop_id); 517 rval = -EINVAL; 518 goto done_unmap_sg; 519 } 520 521 /* Allocate a dummy fcport structure, since functions preparing the 522 * IOCB and mailbox command retrieves port specific information 523 * from fcport structure. For Host based ELS commands there will be 524 * no fcport structure allocated 525 */ 526 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 527 if (!fcport) { 528 ql_log(ql_log_warn, vha, 0x7014, 529 "Failed to allocate fcport.\n"); 530 rval = -ENOMEM; 531 goto done_unmap_sg; 532 } 533 534 /* Initialize all required fields of fcport */ 535 fcport->vha = vha; 536 fcport->d_id.b.al_pa = bsg_request->rqst_data.h_ct.port_id[0]; 537 fcport->d_id.b.area = bsg_request->rqst_data.h_ct.port_id[1]; 538 fcport->d_id.b.domain = bsg_request->rqst_data.h_ct.port_id[2]; 539 fcport->loop_id = loop_id; 540 541 /* Alloc SRB structure */ 542 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 543 if (!sp) { 544 ql_log(ql_log_warn, vha, 0x7015, 545 "qla2x00_get_sp failed.\n"); 546 rval = -ENOMEM; 547 goto done_free_fcport; 548 } 549 550 sp->type = SRB_CT_CMD; 551 sp->name = "bsg_ct"; 552 sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt); 553 sp->u.bsg_job = bsg_job; 554 sp->free = qla2x00_bsg_sp_free; 555 sp->done = qla2x00_bsg_job_done; 556 557 ql_dbg(ql_dbg_user, vha, 0x7016, 558 "bsg rqst type: %s else type: %x - " 559 "loop-id=%x portid=%02x%02x%02x.\n", type, 560 (bsg_request->rqst_data.h_ct.preamble_word2 >> 16), 561 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, 562 fcport->d_id.b.al_pa); 563 564 rval = qla2x00_start_sp(sp); 565 if (rval != QLA_SUCCESS) { 566 ql_log(ql_log_warn, vha, 0x7017, 567 "qla2x00_start_sp failed=%d.\n", rval); 568 qla2x00_rel_sp(sp); 569 rval = -EIO; 570 goto done_free_fcport; 571 } 572 return rval; 573 574 done_free_fcport: 575 qla2x00_free_fcport(fcport); 576 done_unmap_sg: 577 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 578 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 579 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 580 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 581 done: 582 return rval; 583 } 584 585 /* Disable loopback mode */ 586 static inline int 587 qla81xx_reset_loopback_mode(scsi_qla_host_t *vha, uint16_t *config, 588 int wait, int wait2) 589 { 590 int ret = 0; 591 int rval = 0; 592 uint16_t new_config[4]; 593 struct qla_hw_data *ha = vha->hw; 594 595 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha)) 596 goto done_reset_internal; 597 598 memset(new_config, 0 , sizeof(new_config)); 599 if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 == 600 ENABLE_INTERNAL_LOOPBACK || 601 (config[0] & INTERNAL_LOOPBACK_MASK) >> 1 == 602 ENABLE_EXTERNAL_LOOPBACK) { 603 new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK; 604 ql_dbg(ql_dbg_user, vha, 0x70bf, "new_config[0]=%02x\n", 605 (new_config[0] & INTERNAL_LOOPBACK_MASK)); 606 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ; 607 608 ha->notify_dcbx_comp = wait; 609 ha->notify_lb_portup_comp = wait2; 610 611 ret = qla81xx_set_port_config(vha, new_config); 612 if (ret != QLA_SUCCESS) { 613 ql_log(ql_log_warn, vha, 0x7025, 614 "Set port config failed.\n"); 615 ha->notify_dcbx_comp = 0; 616 ha->notify_lb_portup_comp = 0; 617 rval = -EINVAL; 618 goto done_reset_internal; 619 } 620 621 /* Wait for DCBX complete event */ 622 if (wait && !wait_for_completion_timeout(&ha->dcbx_comp, 623 (DCBX_COMP_TIMEOUT * HZ))) { 624 ql_dbg(ql_dbg_user, vha, 0x7026, 625 "DCBX completion not received.\n"); 626 ha->notify_dcbx_comp = 0; 627 ha->notify_lb_portup_comp = 0; 628 rval = -EINVAL; 629 goto done_reset_internal; 630 } else 631 ql_dbg(ql_dbg_user, vha, 0x7027, 632 "DCBX completion received.\n"); 633 634 if (wait2 && 635 !wait_for_completion_timeout(&ha->lb_portup_comp, 636 (LB_PORTUP_COMP_TIMEOUT * HZ))) { 637 ql_dbg(ql_dbg_user, vha, 0x70c5, 638 "Port up completion not received.\n"); 639 ha->notify_lb_portup_comp = 0; 640 rval = -EINVAL; 641 goto done_reset_internal; 642 } else 643 ql_dbg(ql_dbg_user, vha, 0x70c6, 644 "Port up completion received.\n"); 645 646 ha->notify_dcbx_comp = 0; 647 ha->notify_lb_portup_comp = 0; 648 } 649 done_reset_internal: 650 return rval; 651 } 652 653 /* 654 * Set the port configuration to enable the internal or external loopback 655 * depending on the loopback mode. 656 */ 657 static inline int 658 qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config, 659 uint16_t *new_config, uint16_t mode) 660 { 661 int ret = 0; 662 int rval = 0; 663 unsigned long rem_tmo = 0, current_tmo = 0; 664 struct qla_hw_data *ha = vha->hw; 665 666 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha)) 667 goto done_set_internal; 668 669 if (mode == INTERNAL_LOOPBACK) 670 new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1); 671 else if (mode == EXTERNAL_LOOPBACK) 672 new_config[0] = config[0] | (ENABLE_EXTERNAL_LOOPBACK << 1); 673 ql_dbg(ql_dbg_user, vha, 0x70be, 674 "new_config[0]=%02x\n", (new_config[0] & INTERNAL_LOOPBACK_MASK)); 675 676 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3); 677 678 ha->notify_dcbx_comp = 1; 679 ret = qla81xx_set_port_config(vha, new_config); 680 if (ret != QLA_SUCCESS) { 681 ql_log(ql_log_warn, vha, 0x7021, 682 "set port config failed.\n"); 683 ha->notify_dcbx_comp = 0; 684 rval = -EINVAL; 685 goto done_set_internal; 686 } 687 688 /* Wait for DCBX complete event */ 689 current_tmo = DCBX_COMP_TIMEOUT * HZ; 690 while (1) { 691 rem_tmo = wait_for_completion_timeout(&ha->dcbx_comp, 692 current_tmo); 693 if (!ha->idc_extend_tmo || rem_tmo) { 694 ha->idc_extend_tmo = 0; 695 break; 696 } 697 current_tmo = ha->idc_extend_tmo * HZ; 698 ha->idc_extend_tmo = 0; 699 } 700 701 if (!rem_tmo) { 702 ql_dbg(ql_dbg_user, vha, 0x7022, 703 "DCBX completion not received.\n"); 704 ret = qla81xx_reset_loopback_mode(vha, new_config, 0, 0); 705 /* 706 * If the reset of the loopback mode doesn't work take a FCoE 707 * dump and reset the chip. 708 */ 709 if (ret) { 710 qla2xxx_dump_fw(vha); 711 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 712 } 713 rval = -EINVAL; 714 } else { 715 if (ha->flags.idc_compl_status) { 716 ql_dbg(ql_dbg_user, vha, 0x70c3, 717 "Bad status in IDC Completion AEN\n"); 718 rval = -EINVAL; 719 ha->flags.idc_compl_status = 0; 720 } else 721 ql_dbg(ql_dbg_user, vha, 0x7023, 722 "DCBX completion received.\n"); 723 } 724 725 ha->notify_dcbx_comp = 0; 726 ha->idc_extend_tmo = 0; 727 728 done_set_internal: 729 return rval; 730 } 731 732 static int 733 qla2x00_process_loopback(struct bsg_job *bsg_job) 734 { 735 struct fc_bsg_request *bsg_request = bsg_job->request; 736 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 737 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 738 scsi_qla_host_t *vha = shost_priv(host); 739 struct qla_hw_data *ha = vha->hw; 740 int rval; 741 uint8_t command_sent; 742 char *type; 743 struct msg_echo_lb elreq; 744 uint16_t response[MAILBOX_REGISTER_COUNT]; 745 uint16_t config[4], new_config[4]; 746 uint8_t *fw_sts_ptr; 747 void *req_data = NULL; 748 dma_addr_t req_data_dma; 749 uint32_t req_data_len; 750 uint8_t *rsp_data = NULL; 751 dma_addr_t rsp_data_dma; 752 uint32_t rsp_data_len; 753 754 if (!vha->flags.online) { 755 ql_log(ql_log_warn, vha, 0x7019, "Host is not online.\n"); 756 return -EIO; 757 } 758 759 memset(&elreq, 0, sizeof(elreq)); 760 761 elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev, 762 bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, 763 DMA_TO_DEVICE); 764 765 if (!elreq.req_sg_cnt) { 766 ql_log(ql_log_warn, vha, 0x701a, 767 "dma_map_sg returned %d for request.\n", elreq.req_sg_cnt); 768 return -ENOMEM; 769 } 770 771 elreq.rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, 772 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, 773 DMA_FROM_DEVICE); 774 775 if (!elreq.rsp_sg_cnt) { 776 ql_log(ql_log_warn, vha, 0x701b, 777 "dma_map_sg returned %d for reply.\n", elreq.rsp_sg_cnt); 778 rval = -ENOMEM; 779 goto done_unmap_req_sg; 780 } 781 782 if ((elreq.req_sg_cnt != bsg_job->request_payload.sg_cnt) || 783 (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) { 784 ql_log(ql_log_warn, vha, 0x701c, 785 "dma mapping resulted in different sg counts, " 786 "request_sg_cnt: %x dma_request_sg_cnt: %x " 787 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n", 788 bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt, 789 bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt); 790 rval = -EAGAIN; 791 goto done_unmap_sg; 792 } 793 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len; 794 req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len, 795 &req_data_dma, GFP_KERNEL); 796 if (!req_data) { 797 ql_log(ql_log_warn, vha, 0x701d, 798 "dma alloc failed for req_data.\n"); 799 rval = -ENOMEM; 800 goto done_unmap_sg; 801 } 802 803 rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len, 804 &rsp_data_dma, GFP_KERNEL); 805 if (!rsp_data) { 806 ql_log(ql_log_warn, vha, 0x7004, 807 "dma alloc failed for rsp_data.\n"); 808 rval = -ENOMEM; 809 goto done_free_dma_req; 810 } 811 812 /* Copy the request buffer in req_data now */ 813 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 814 bsg_job->request_payload.sg_cnt, req_data, req_data_len); 815 816 elreq.send_dma = req_data_dma; 817 elreq.rcv_dma = rsp_data_dma; 818 elreq.transfer_size = req_data_len; 819 820 elreq.options = bsg_request->rqst_data.h_vendor.vendor_cmd[1]; 821 elreq.iteration_count = 822 bsg_request->rqst_data.h_vendor.vendor_cmd[2]; 823 824 if (atomic_read(&vha->loop_state) == LOOP_READY && 825 ((ha->current_topology == ISP_CFG_F && (elreq.options & 7) >= 2) || 826 ((IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) && 827 get_unaligned_le32(req_data) == ELS_OPCODE_BYTE && 828 req_data_len == MAX_ELS_FRAME_PAYLOAD && 829 elreq.options == EXTERNAL_LOOPBACK))) { 830 type = "FC_BSG_HST_VENDOR_ECHO_DIAG"; 831 ql_dbg(ql_dbg_user, vha, 0x701e, 832 "BSG request type: %s.\n", type); 833 command_sent = INT_DEF_LB_ECHO_CMD; 834 rval = qla2x00_echo_test(vha, &elreq, response); 835 } else { 836 if (IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) { 837 memset(config, 0, sizeof(config)); 838 memset(new_config, 0, sizeof(new_config)); 839 840 if (qla81xx_get_port_config(vha, config)) { 841 ql_log(ql_log_warn, vha, 0x701f, 842 "Get port config failed.\n"); 843 rval = -EPERM; 844 goto done_free_dma_rsp; 845 } 846 847 if ((config[0] & INTERNAL_LOOPBACK_MASK) != 0) { 848 ql_dbg(ql_dbg_user, vha, 0x70c4, 849 "Loopback operation already in " 850 "progress.\n"); 851 rval = -EAGAIN; 852 goto done_free_dma_rsp; 853 } 854 855 ql_dbg(ql_dbg_user, vha, 0x70c0, 856 "elreq.options=%04x\n", elreq.options); 857 858 if (elreq.options == EXTERNAL_LOOPBACK) 859 if (IS_QLA8031(ha) || IS_QLA8044(ha)) 860 rval = qla81xx_set_loopback_mode(vha, 861 config, new_config, elreq.options); 862 else 863 rval = qla81xx_reset_loopback_mode(vha, 864 config, 1, 0); 865 else 866 rval = qla81xx_set_loopback_mode(vha, config, 867 new_config, elreq.options); 868 869 if (rval) { 870 rval = -EPERM; 871 goto done_free_dma_rsp; 872 } 873 874 type = "FC_BSG_HST_VENDOR_LOOPBACK"; 875 ql_dbg(ql_dbg_user, vha, 0x7028, 876 "BSG request type: %s.\n", type); 877 878 command_sent = INT_DEF_LB_LOOPBACK_CMD; 879 rval = qla2x00_loopback_test(vha, &elreq, response); 880 881 if (response[0] == MBS_COMMAND_ERROR && 882 response[1] == MBS_LB_RESET) { 883 ql_log(ql_log_warn, vha, 0x7029, 884 "MBX command error, Aborting ISP.\n"); 885 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 886 qla2xxx_wake_dpc(vha); 887 qla2x00_wait_for_chip_reset(vha); 888 /* Also reset the MPI */ 889 if (IS_QLA81XX(ha)) { 890 if (qla81xx_restart_mpi_firmware(vha) != 891 QLA_SUCCESS) { 892 ql_log(ql_log_warn, vha, 0x702a, 893 "MPI reset failed.\n"); 894 } 895 } 896 897 rval = -EIO; 898 goto done_free_dma_rsp; 899 } 900 901 if (new_config[0]) { 902 int ret; 903 904 /* Revert back to original port config 905 * Also clear internal loopback 906 */ 907 ret = qla81xx_reset_loopback_mode(vha, 908 new_config, 0, 1); 909 if (ret) { 910 /* 911 * If the reset of the loopback mode 912 * doesn't work take FCoE dump and then 913 * reset the chip. 914 */ 915 qla2xxx_dump_fw(vha); 916 set_bit(ISP_ABORT_NEEDED, 917 &vha->dpc_flags); 918 } 919 920 } 921 922 } else { 923 type = "FC_BSG_HST_VENDOR_LOOPBACK"; 924 ql_dbg(ql_dbg_user, vha, 0x702b, 925 "BSG request type: %s.\n", type); 926 command_sent = INT_DEF_LB_LOOPBACK_CMD; 927 rval = qla2x00_loopback_test(vha, &elreq, response); 928 } 929 } 930 931 if (rval) { 932 ql_log(ql_log_warn, vha, 0x702c, 933 "Vendor request %s failed.\n", type); 934 935 rval = 0; 936 bsg_reply->result = (DID_ERROR << 16); 937 bsg_reply->reply_payload_rcv_len = 0; 938 } else { 939 ql_dbg(ql_dbg_user, vha, 0x702d, 940 "Vendor request %s completed.\n", type); 941 bsg_reply->result = (DID_OK << 16); 942 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 943 bsg_job->reply_payload.sg_cnt, rsp_data, 944 rsp_data_len); 945 } 946 947 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + 948 sizeof(response) + sizeof(uint8_t); 949 fw_sts_ptr = bsg_job->reply + sizeof(struct fc_bsg_reply); 950 memcpy(bsg_job->reply + sizeof(struct fc_bsg_reply), response, 951 sizeof(response)); 952 fw_sts_ptr += sizeof(response); 953 *fw_sts_ptr = command_sent; 954 955 done_free_dma_rsp: 956 dma_free_coherent(&ha->pdev->dev, rsp_data_len, 957 rsp_data, rsp_data_dma); 958 done_free_dma_req: 959 dma_free_coherent(&ha->pdev->dev, req_data_len, 960 req_data, req_data_dma); 961 done_unmap_sg: 962 dma_unmap_sg(&ha->pdev->dev, 963 bsg_job->reply_payload.sg_list, 964 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 965 done_unmap_req_sg: 966 dma_unmap_sg(&ha->pdev->dev, 967 bsg_job->request_payload.sg_list, 968 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 969 if (!rval) 970 bsg_job_done(bsg_job, bsg_reply->result, 971 bsg_reply->reply_payload_rcv_len); 972 return rval; 973 } 974 975 static int 976 qla84xx_reset(struct bsg_job *bsg_job) 977 { 978 struct fc_bsg_request *bsg_request = bsg_job->request; 979 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 980 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 981 scsi_qla_host_t *vha = shost_priv(host); 982 struct qla_hw_data *ha = vha->hw; 983 int rval = 0; 984 uint32_t flag; 985 986 if (!IS_QLA84XX(ha)) { 987 ql_dbg(ql_dbg_user, vha, 0x702f, "Not 84xx, exiting.\n"); 988 return -EINVAL; 989 } 990 991 flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1]; 992 993 rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW); 994 995 if (rval) { 996 ql_log(ql_log_warn, vha, 0x7030, 997 "Vendor request 84xx reset failed.\n"); 998 rval = (DID_ERROR << 16); 999 1000 } else { 1001 ql_dbg(ql_dbg_user, vha, 0x7031, 1002 "Vendor request 84xx reset completed.\n"); 1003 bsg_reply->result = DID_OK; 1004 bsg_job_done(bsg_job, bsg_reply->result, 1005 bsg_reply->reply_payload_rcv_len); 1006 } 1007 1008 return rval; 1009 } 1010 1011 static int 1012 qla84xx_updatefw(struct bsg_job *bsg_job) 1013 { 1014 struct fc_bsg_request *bsg_request = bsg_job->request; 1015 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1016 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1017 scsi_qla_host_t *vha = shost_priv(host); 1018 struct qla_hw_data *ha = vha->hw; 1019 struct verify_chip_entry_84xx *mn = NULL; 1020 dma_addr_t mn_dma, fw_dma; 1021 void *fw_buf = NULL; 1022 int rval = 0; 1023 uint32_t sg_cnt; 1024 uint32_t data_len; 1025 uint16_t options; 1026 uint32_t flag; 1027 uint32_t fw_ver; 1028 1029 if (!IS_QLA84XX(ha)) { 1030 ql_dbg(ql_dbg_user, vha, 0x7032, 1031 "Not 84xx, exiting.\n"); 1032 return -EINVAL; 1033 } 1034 1035 sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 1036 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 1037 if (!sg_cnt) { 1038 ql_log(ql_log_warn, vha, 0x7033, 1039 "dma_map_sg returned %d for request.\n", sg_cnt); 1040 return -ENOMEM; 1041 } 1042 1043 if (sg_cnt != bsg_job->request_payload.sg_cnt) { 1044 ql_log(ql_log_warn, vha, 0x7034, 1045 "DMA mapping resulted in different sg counts, " 1046 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n", 1047 bsg_job->request_payload.sg_cnt, sg_cnt); 1048 rval = -EAGAIN; 1049 goto done_unmap_sg; 1050 } 1051 1052 data_len = bsg_job->request_payload.payload_len; 1053 fw_buf = dma_alloc_coherent(&ha->pdev->dev, data_len, 1054 &fw_dma, GFP_KERNEL); 1055 if (!fw_buf) { 1056 ql_log(ql_log_warn, vha, 0x7035, 1057 "DMA alloc failed for fw_buf.\n"); 1058 rval = -ENOMEM; 1059 goto done_unmap_sg; 1060 } 1061 1062 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1063 bsg_job->request_payload.sg_cnt, fw_buf, data_len); 1064 1065 mn = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma); 1066 if (!mn) { 1067 ql_log(ql_log_warn, vha, 0x7036, 1068 "DMA alloc failed for fw buffer.\n"); 1069 rval = -ENOMEM; 1070 goto done_free_fw_buf; 1071 } 1072 1073 flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1]; 1074 fw_ver = get_unaligned_le32((uint32_t *)fw_buf + 2); 1075 1076 mn->entry_type = VERIFY_CHIP_IOCB_TYPE; 1077 mn->entry_count = 1; 1078 1079 options = VCO_FORCE_UPDATE | VCO_END_OF_DATA; 1080 if (flag == A84_ISSUE_UPDATE_DIAGFW_CMD) 1081 options |= VCO_DIAG_FW; 1082 1083 mn->options = cpu_to_le16(options); 1084 mn->fw_ver = cpu_to_le32(fw_ver); 1085 mn->fw_size = cpu_to_le32(data_len); 1086 mn->fw_seq_size = cpu_to_le32(data_len); 1087 put_unaligned_le64(fw_dma, &mn->dsd.address); 1088 mn->dsd.length = cpu_to_le32(data_len); 1089 mn->data_seg_cnt = cpu_to_le16(1); 1090 1091 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120); 1092 1093 if (rval) { 1094 ql_log(ql_log_warn, vha, 0x7037, 1095 "Vendor request 84xx updatefw failed.\n"); 1096 1097 rval = (DID_ERROR << 16); 1098 } else { 1099 ql_dbg(ql_dbg_user, vha, 0x7038, 1100 "Vendor request 84xx updatefw completed.\n"); 1101 1102 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1103 bsg_reply->result = DID_OK; 1104 } 1105 1106 dma_pool_free(ha->s_dma_pool, mn, mn_dma); 1107 1108 done_free_fw_buf: 1109 dma_free_coherent(&ha->pdev->dev, data_len, fw_buf, fw_dma); 1110 1111 done_unmap_sg: 1112 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 1113 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 1114 1115 if (!rval) 1116 bsg_job_done(bsg_job, bsg_reply->result, 1117 bsg_reply->reply_payload_rcv_len); 1118 return rval; 1119 } 1120 1121 static int 1122 qla84xx_mgmt_cmd(struct bsg_job *bsg_job) 1123 { 1124 struct fc_bsg_request *bsg_request = bsg_job->request; 1125 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1126 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1127 scsi_qla_host_t *vha = shost_priv(host); 1128 struct qla_hw_data *ha = vha->hw; 1129 struct access_chip_84xx *mn = NULL; 1130 dma_addr_t mn_dma, mgmt_dma; 1131 void *mgmt_b = NULL; 1132 int rval = 0; 1133 struct qla_bsg_a84_mgmt *ql84_mgmt; 1134 uint32_t sg_cnt; 1135 uint32_t data_len = 0; 1136 uint32_t dma_direction = DMA_NONE; 1137 1138 if (!IS_QLA84XX(ha)) { 1139 ql_log(ql_log_warn, vha, 0x703a, 1140 "Not 84xx, exiting.\n"); 1141 return -EINVAL; 1142 } 1143 1144 mn = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma); 1145 if (!mn) { 1146 ql_log(ql_log_warn, vha, 0x703c, 1147 "DMA alloc failed for fw buffer.\n"); 1148 return -ENOMEM; 1149 } 1150 1151 mn->entry_type = ACCESS_CHIP_IOCB_TYPE; 1152 mn->entry_count = 1; 1153 ql84_mgmt = (void *)bsg_request + sizeof(struct fc_bsg_request); 1154 switch (ql84_mgmt->mgmt.cmd) { 1155 case QLA84_MGMT_READ_MEM: 1156 case QLA84_MGMT_GET_INFO: 1157 sg_cnt = dma_map_sg(&ha->pdev->dev, 1158 bsg_job->reply_payload.sg_list, 1159 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 1160 if (!sg_cnt) { 1161 ql_log(ql_log_warn, vha, 0x703d, 1162 "dma_map_sg returned %d for reply.\n", sg_cnt); 1163 rval = -ENOMEM; 1164 goto exit_mgmt; 1165 } 1166 1167 dma_direction = DMA_FROM_DEVICE; 1168 1169 if (sg_cnt != bsg_job->reply_payload.sg_cnt) { 1170 ql_log(ql_log_warn, vha, 0x703e, 1171 "DMA mapping resulted in different sg counts, " 1172 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n", 1173 bsg_job->reply_payload.sg_cnt, sg_cnt); 1174 rval = -EAGAIN; 1175 goto done_unmap_sg; 1176 } 1177 1178 data_len = bsg_job->reply_payload.payload_len; 1179 1180 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len, 1181 &mgmt_dma, GFP_KERNEL); 1182 if (!mgmt_b) { 1183 ql_log(ql_log_warn, vha, 0x703f, 1184 "DMA alloc failed for mgmt_b.\n"); 1185 rval = -ENOMEM; 1186 goto done_unmap_sg; 1187 } 1188 1189 if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) { 1190 mn->options = cpu_to_le16(ACO_DUMP_MEMORY); 1191 mn->parameter1 = 1192 cpu_to_le32( 1193 ql84_mgmt->mgmt.mgmtp.u.mem.start_addr); 1194 1195 } else if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO) { 1196 mn->options = cpu_to_le16(ACO_REQUEST_INFO); 1197 mn->parameter1 = 1198 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.info.type); 1199 1200 mn->parameter2 = 1201 cpu_to_le32( 1202 ql84_mgmt->mgmt.mgmtp.u.info.context); 1203 } 1204 break; 1205 1206 case QLA84_MGMT_WRITE_MEM: 1207 sg_cnt = dma_map_sg(&ha->pdev->dev, 1208 bsg_job->request_payload.sg_list, 1209 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 1210 1211 if (!sg_cnt) { 1212 ql_log(ql_log_warn, vha, 0x7040, 1213 "dma_map_sg returned %d.\n", sg_cnt); 1214 rval = -ENOMEM; 1215 goto exit_mgmt; 1216 } 1217 1218 dma_direction = DMA_TO_DEVICE; 1219 1220 if (sg_cnt != bsg_job->request_payload.sg_cnt) { 1221 ql_log(ql_log_warn, vha, 0x7041, 1222 "DMA mapping resulted in different sg counts, " 1223 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n", 1224 bsg_job->request_payload.sg_cnt, sg_cnt); 1225 rval = -EAGAIN; 1226 goto done_unmap_sg; 1227 } 1228 1229 data_len = bsg_job->request_payload.payload_len; 1230 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len, 1231 &mgmt_dma, GFP_KERNEL); 1232 if (!mgmt_b) { 1233 ql_log(ql_log_warn, vha, 0x7042, 1234 "DMA alloc failed for mgmt_b.\n"); 1235 rval = -ENOMEM; 1236 goto done_unmap_sg; 1237 } 1238 1239 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1240 bsg_job->request_payload.sg_cnt, mgmt_b, data_len); 1241 1242 mn->options = cpu_to_le16(ACO_LOAD_MEMORY); 1243 mn->parameter1 = 1244 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.mem.start_addr); 1245 break; 1246 1247 case QLA84_MGMT_CHNG_CONFIG: 1248 mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM); 1249 mn->parameter1 = 1250 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.id); 1251 1252 mn->parameter2 = 1253 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param0); 1254 1255 mn->parameter3 = 1256 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param1); 1257 break; 1258 1259 default: 1260 rval = -EIO; 1261 goto exit_mgmt; 1262 } 1263 1264 if (ql84_mgmt->mgmt.cmd != QLA84_MGMT_CHNG_CONFIG) { 1265 mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->mgmt.len); 1266 mn->dseg_count = cpu_to_le16(1); 1267 put_unaligned_le64(mgmt_dma, &mn->dsd.address); 1268 mn->dsd.length = cpu_to_le32(ql84_mgmt->mgmt.len); 1269 } 1270 1271 rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0); 1272 1273 if (rval) { 1274 ql_log(ql_log_warn, vha, 0x7043, 1275 "Vendor request 84xx mgmt failed.\n"); 1276 1277 rval = (DID_ERROR << 16); 1278 1279 } else { 1280 ql_dbg(ql_dbg_user, vha, 0x7044, 1281 "Vendor request 84xx mgmt completed.\n"); 1282 1283 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1284 bsg_reply->result = DID_OK; 1285 1286 if ((ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) || 1287 (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO)) { 1288 bsg_reply->reply_payload_rcv_len = 1289 bsg_job->reply_payload.payload_len; 1290 1291 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 1292 bsg_job->reply_payload.sg_cnt, mgmt_b, 1293 data_len); 1294 } 1295 } 1296 1297 done_unmap_sg: 1298 if (mgmt_b) 1299 dma_free_coherent(&ha->pdev->dev, data_len, mgmt_b, mgmt_dma); 1300 1301 if (dma_direction == DMA_TO_DEVICE) 1302 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 1303 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 1304 else if (dma_direction == DMA_FROM_DEVICE) 1305 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 1306 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 1307 1308 exit_mgmt: 1309 dma_pool_free(ha->s_dma_pool, mn, mn_dma); 1310 1311 if (!rval) 1312 bsg_job_done(bsg_job, bsg_reply->result, 1313 bsg_reply->reply_payload_rcv_len); 1314 return rval; 1315 } 1316 1317 static int 1318 qla24xx_iidma(struct bsg_job *bsg_job) 1319 { 1320 struct fc_bsg_request *bsg_request = bsg_job->request; 1321 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1322 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1323 scsi_qla_host_t *vha = shost_priv(host); 1324 int rval = 0; 1325 struct qla_port_param *port_param = NULL; 1326 fc_port_t *fcport = NULL; 1327 int found = 0; 1328 uint16_t mb[MAILBOX_REGISTER_COUNT]; 1329 uint8_t *rsp_ptr = NULL; 1330 1331 if (!IS_IIDMA_CAPABLE(vha->hw)) { 1332 ql_log(ql_log_info, vha, 0x7046, "iiDMA not supported.\n"); 1333 return -EINVAL; 1334 } 1335 1336 port_param = (void *)bsg_request + sizeof(struct fc_bsg_request); 1337 if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) { 1338 ql_log(ql_log_warn, vha, 0x7048, 1339 "Invalid destination type.\n"); 1340 return -EINVAL; 1341 } 1342 1343 list_for_each_entry(fcport, &vha->vp_fcports, list) { 1344 if (fcport->port_type != FCT_TARGET) 1345 continue; 1346 1347 if (memcmp(port_param->fc_scsi_addr.dest_addr.wwpn, 1348 fcport->port_name, sizeof(fcport->port_name))) 1349 continue; 1350 1351 found = 1; 1352 break; 1353 } 1354 1355 if (!found) { 1356 ql_log(ql_log_warn, vha, 0x7049, 1357 "Failed to find port.\n"); 1358 return -EINVAL; 1359 } 1360 1361 if (atomic_read(&fcport->state) != FCS_ONLINE) { 1362 ql_log(ql_log_warn, vha, 0x704a, 1363 "Port is not online.\n"); 1364 return -EINVAL; 1365 } 1366 1367 if (fcport->flags & FCF_LOGIN_NEEDED) { 1368 ql_log(ql_log_warn, vha, 0x704b, 1369 "Remote port not logged in flags = 0x%x.\n", fcport->flags); 1370 return -EINVAL; 1371 } 1372 1373 if (port_param->mode) 1374 rval = qla2x00_set_idma_speed(vha, fcport->loop_id, 1375 port_param->speed, mb); 1376 else 1377 rval = qla2x00_get_idma_speed(vha, fcport->loop_id, 1378 &port_param->speed, mb); 1379 1380 if (rval) { 1381 ql_log(ql_log_warn, vha, 0x704c, 1382 "iiDMA cmd failed for %8phN -- " 1383 "%04x %x %04x %04x.\n", fcport->port_name, 1384 rval, fcport->fp_speed, mb[0], mb[1]); 1385 rval = (DID_ERROR << 16); 1386 } else { 1387 if (!port_param->mode) { 1388 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + 1389 sizeof(struct qla_port_param); 1390 1391 rsp_ptr = ((uint8_t *)bsg_reply) + 1392 sizeof(struct fc_bsg_reply); 1393 1394 memcpy(rsp_ptr, port_param, 1395 sizeof(struct qla_port_param)); 1396 } 1397 1398 bsg_reply->result = DID_OK; 1399 bsg_job_done(bsg_job, bsg_reply->result, 1400 bsg_reply->reply_payload_rcv_len); 1401 } 1402 1403 return rval; 1404 } 1405 1406 static int 1407 qla2x00_optrom_setup(struct bsg_job *bsg_job, scsi_qla_host_t *vha, 1408 uint8_t is_update) 1409 { 1410 struct fc_bsg_request *bsg_request = bsg_job->request; 1411 uint32_t start = 0; 1412 int valid = 0; 1413 struct qla_hw_data *ha = vha->hw; 1414 1415 if (unlikely(pci_channel_offline(ha->pdev))) 1416 return -EINVAL; 1417 1418 start = bsg_request->rqst_data.h_vendor.vendor_cmd[1]; 1419 if (start > ha->optrom_size) { 1420 ql_log(ql_log_warn, vha, 0x7055, 1421 "start %d > optrom_size %d.\n", start, ha->optrom_size); 1422 return -EINVAL; 1423 } 1424 1425 if (ha->optrom_state != QLA_SWAITING) { 1426 ql_log(ql_log_info, vha, 0x7056, 1427 "optrom_state %d.\n", ha->optrom_state); 1428 return -EBUSY; 1429 } 1430 1431 ha->optrom_region_start = start; 1432 ql_dbg(ql_dbg_user, vha, 0x7057, "is_update=%d.\n", is_update); 1433 if (is_update) { 1434 if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0) 1435 valid = 1; 1436 else if (start == (ha->flt_region_boot * 4) || 1437 start == (ha->flt_region_fw * 4)) 1438 valid = 1; 1439 else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || 1440 IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) || 1441 IS_QLA28XX(ha)) 1442 valid = 1; 1443 if (!valid) { 1444 ql_log(ql_log_warn, vha, 0x7058, 1445 "Invalid start region 0x%x/0x%x.\n", start, 1446 bsg_job->request_payload.payload_len); 1447 return -EINVAL; 1448 } 1449 1450 ha->optrom_region_size = start + 1451 bsg_job->request_payload.payload_len > ha->optrom_size ? 1452 ha->optrom_size - start : 1453 bsg_job->request_payload.payload_len; 1454 ha->optrom_state = QLA_SWRITING; 1455 } else { 1456 ha->optrom_region_size = start + 1457 bsg_job->reply_payload.payload_len > ha->optrom_size ? 1458 ha->optrom_size - start : 1459 bsg_job->reply_payload.payload_len; 1460 ha->optrom_state = QLA_SREADING; 1461 } 1462 1463 ha->optrom_buffer = vzalloc(ha->optrom_region_size); 1464 if (!ha->optrom_buffer) { 1465 ql_log(ql_log_warn, vha, 0x7059, 1466 "Read: Unable to allocate memory for optrom retrieval " 1467 "(%x)\n", ha->optrom_region_size); 1468 1469 ha->optrom_state = QLA_SWAITING; 1470 return -ENOMEM; 1471 } 1472 1473 return 0; 1474 } 1475 1476 static int 1477 qla2x00_read_optrom(struct bsg_job *bsg_job) 1478 { 1479 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1480 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1481 scsi_qla_host_t *vha = shost_priv(host); 1482 struct qla_hw_data *ha = vha->hw; 1483 int rval = 0; 1484 1485 if (ha->flags.nic_core_reset_hdlr_active) 1486 return -EBUSY; 1487 1488 mutex_lock(&ha->optrom_mutex); 1489 rval = qla2x00_optrom_setup(bsg_job, vha, 0); 1490 if (rval) { 1491 mutex_unlock(&ha->optrom_mutex); 1492 return rval; 1493 } 1494 1495 ha->isp_ops->read_optrom(vha, ha->optrom_buffer, 1496 ha->optrom_region_start, ha->optrom_region_size); 1497 1498 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 1499 bsg_job->reply_payload.sg_cnt, ha->optrom_buffer, 1500 ha->optrom_region_size); 1501 1502 bsg_reply->reply_payload_rcv_len = ha->optrom_region_size; 1503 bsg_reply->result = DID_OK; 1504 vfree(ha->optrom_buffer); 1505 ha->optrom_buffer = NULL; 1506 ha->optrom_state = QLA_SWAITING; 1507 mutex_unlock(&ha->optrom_mutex); 1508 bsg_job_done(bsg_job, bsg_reply->result, 1509 bsg_reply->reply_payload_rcv_len); 1510 return rval; 1511 } 1512 1513 static int 1514 qla2x00_update_optrom(struct bsg_job *bsg_job) 1515 { 1516 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1517 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1518 scsi_qla_host_t *vha = shost_priv(host); 1519 struct qla_hw_data *ha = vha->hw; 1520 int rval = 0; 1521 1522 mutex_lock(&ha->optrom_mutex); 1523 rval = qla2x00_optrom_setup(bsg_job, vha, 1); 1524 if (rval) { 1525 mutex_unlock(&ha->optrom_mutex); 1526 return rval; 1527 } 1528 1529 /* Set the isp82xx_no_md_cap not to capture minidump */ 1530 ha->flags.isp82xx_no_md_cap = 1; 1531 1532 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1533 bsg_job->request_payload.sg_cnt, ha->optrom_buffer, 1534 ha->optrom_region_size); 1535 1536 rval = ha->isp_ops->write_optrom(vha, ha->optrom_buffer, 1537 ha->optrom_region_start, ha->optrom_region_size); 1538 1539 if (rval) { 1540 bsg_reply->result = -EINVAL; 1541 rval = -EINVAL; 1542 } else { 1543 bsg_reply->result = DID_OK; 1544 } 1545 vfree(ha->optrom_buffer); 1546 ha->optrom_buffer = NULL; 1547 ha->optrom_state = QLA_SWAITING; 1548 mutex_unlock(&ha->optrom_mutex); 1549 bsg_job_done(bsg_job, bsg_reply->result, 1550 bsg_reply->reply_payload_rcv_len); 1551 return rval; 1552 } 1553 1554 static int 1555 qla2x00_update_fru_versions(struct bsg_job *bsg_job) 1556 { 1557 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1558 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1559 scsi_qla_host_t *vha = shost_priv(host); 1560 struct qla_hw_data *ha = vha->hw; 1561 int rval = 0; 1562 uint8_t bsg[DMA_POOL_SIZE]; 1563 struct qla_image_version_list *list = (void *)bsg; 1564 struct qla_image_version *image; 1565 uint32_t count; 1566 dma_addr_t sfp_dma; 1567 void *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); 1568 1569 if (!sfp) { 1570 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 1571 EXT_STATUS_NO_MEMORY; 1572 goto done; 1573 } 1574 1575 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1576 bsg_job->request_payload.sg_cnt, list, sizeof(bsg)); 1577 1578 image = list->version; 1579 count = list->count; 1580 while (count--) { 1581 memcpy(sfp, &image->field_info, sizeof(image->field_info)); 1582 rval = qla2x00_write_sfp(vha, sfp_dma, sfp, 1583 image->field_address.device, image->field_address.offset, 1584 sizeof(image->field_info), image->field_address.option); 1585 if (rval) { 1586 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 1587 EXT_STATUS_MAILBOX; 1588 goto dealloc; 1589 } 1590 image++; 1591 } 1592 1593 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0; 1594 1595 dealloc: 1596 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); 1597 1598 done: 1599 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1600 bsg_reply->result = DID_OK << 16; 1601 bsg_job_done(bsg_job, bsg_reply->result, 1602 bsg_reply->reply_payload_rcv_len); 1603 1604 return 0; 1605 } 1606 1607 static int 1608 qla2x00_read_fru_status(struct bsg_job *bsg_job) 1609 { 1610 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1611 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1612 scsi_qla_host_t *vha = shost_priv(host); 1613 struct qla_hw_data *ha = vha->hw; 1614 int rval = 0; 1615 uint8_t bsg[DMA_POOL_SIZE]; 1616 struct qla_status_reg *sr = (void *)bsg; 1617 dma_addr_t sfp_dma; 1618 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); 1619 1620 if (!sfp) { 1621 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 1622 EXT_STATUS_NO_MEMORY; 1623 goto done; 1624 } 1625 1626 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1627 bsg_job->request_payload.sg_cnt, sr, sizeof(*sr)); 1628 1629 rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 1630 sr->field_address.device, sr->field_address.offset, 1631 sizeof(sr->status_reg), sr->field_address.option); 1632 sr->status_reg = *sfp; 1633 1634 if (rval) { 1635 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 1636 EXT_STATUS_MAILBOX; 1637 goto dealloc; 1638 } 1639 1640 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 1641 bsg_job->reply_payload.sg_cnt, sr, sizeof(*sr)); 1642 1643 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0; 1644 1645 dealloc: 1646 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); 1647 1648 done: 1649 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1650 bsg_reply->reply_payload_rcv_len = sizeof(*sr); 1651 bsg_reply->result = DID_OK << 16; 1652 bsg_job_done(bsg_job, bsg_reply->result, 1653 bsg_reply->reply_payload_rcv_len); 1654 1655 return 0; 1656 } 1657 1658 static int 1659 qla2x00_write_fru_status(struct bsg_job *bsg_job) 1660 { 1661 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1662 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1663 scsi_qla_host_t *vha = shost_priv(host); 1664 struct qla_hw_data *ha = vha->hw; 1665 int rval = 0; 1666 uint8_t bsg[DMA_POOL_SIZE]; 1667 struct qla_status_reg *sr = (void *)bsg; 1668 dma_addr_t sfp_dma; 1669 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); 1670 1671 if (!sfp) { 1672 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 1673 EXT_STATUS_NO_MEMORY; 1674 goto done; 1675 } 1676 1677 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1678 bsg_job->request_payload.sg_cnt, sr, sizeof(*sr)); 1679 1680 *sfp = sr->status_reg; 1681 rval = qla2x00_write_sfp(vha, sfp_dma, sfp, 1682 sr->field_address.device, sr->field_address.offset, 1683 sizeof(sr->status_reg), sr->field_address.option); 1684 1685 if (rval) { 1686 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 1687 EXT_STATUS_MAILBOX; 1688 goto dealloc; 1689 } 1690 1691 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0; 1692 1693 dealloc: 1694 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); 1695 1696 done: 1697 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1698 bsg_reply->result = DID_OK << 16; 1699 bsg_job_done(bsg_job, bsg_reply->result, 1700 bsg_reply->reply_payload_rcv_len); 1701 1702 return 0; 1703 } 1704 1705 static int 1706 qla2x00_write_i2c(struct bsg_job *bsg_job) 1707 { 1708 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1709 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1710 scsi_qla_host_t *vha = shost_priv(host); 1711 struct qla_hw_data *ha = vha->hw; 1712 int rval = 0; 1713 uint8_t bsg[DMA_POOL_SIZE]; 1714 struct qla_i2c_access *i2c = (void *)bsg; 1715 dma_addr_t sfp_dma; 1716 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); 1717 1718 if (!sfp) { 1719 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 1720 EXT_STATUS_NO_MEMORY; 1721 goto done; 1722 } 1723 1724 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1725 bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c)); 1726 1727 memcpy(sfp, i2c->buffer, i2c->length); 1728 rval = qla2x00_write_sfp(vha, sfp_dma, sfp, 1729 i2c->device, i2c->offset, i2c->length, i2c->option); 1730 1731 if (rval) { 1732 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 1733 EXT_STATUS_MAILBOX; 1734 goto dealloc; 1735 } 1736 1737 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0; 1738 1739 dealloc: 1740 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); 1741 1742 done: 1743 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1744 bsg_reply->result = DID_OK << 16; 1745 bsg_job_done(bsg_job, bsg_reply->result, 1746 bsg_reply->reply_payload_rcv_len); 1747 1748 return 0; 1749 } 1750 1751 static int 1752 qla2x00_read_i2c(struct bsg_job *bsg_job) 1753 { 1754 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1755 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1756 scsi_qla_host_t *vha = shost_priv(host); 1757 struct qla_hw_data *ha = vha->hw; 1758 int rval = 0; 1759 uint8_t bsg[DMA_POOL_SIZE]; 1760 struct qla_i2c_access *i2c = (void *)bsg; 1761 dma_addr_t sfp_dma; 1762 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); 1763 1764 if (!sfp) { 1765 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 1766 EXT_STATUS_NO_MEMORY; 1767 goto done; 1768 } 1769 1770 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1771 bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c)); 1772 1773 rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 1774 i2c->device, i2c->offset, i2c->length, i2c->option); 1775 1776 if (rval) { 1777 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 1778 EXT_STATUS_MAILBOX; 1779 goto dealloc; 1780 } 1781 1782 memcpy(i2c->buffer, sfp, i2c->length); 1783 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 1784 bsg_job->reply_payload.sg_cnt, i2c, sizeof(*i2c)); 1785 1786 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0; 1787 1788 dealloc: 1789 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); 1790 1791 done: 1792 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1793 bsg_reply->reply_payload_rcv_len = sizeof(*i2c); 1794 bsg_reply->result = DID_OK << 16; 1795 bsg_job_done(bsg_job, bsg_reply->result, 1796 bsg_reply->reply_payload_rcv_len); 1797 1798 return 0; 1799 } 1800 1801 static int 1802 qla24xx_process_bidir_cmd(struct bsg_job *bsg_job) 1803 { 1804 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1805 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1806 scsi_qla_host_t *vha = shost_priv(host); 1807 struct qla_hw_data *ha = vha->hw; 1808 uint32_t rval = EXT_STATUS_OK; 1809 uint16_t req_sg_cnt = 0; 1810 uint16_t rsp_sg_cnt = 0; 1811 uint16_t nextlid = 0; 1812 uint32_t tot_dsds; 1813 srb_t *sp = NULL; 1814 uint32_t req_data_len; 1815 uint32_t rsp_data_len; 1816 1817 /* Check the type of the adapter */ 1818 if (!IS_BIDI_CAPABLE(ha)) { 1819 ql_log(ql_log_warn, vha, 0x70a0, 1820 "This adapter is not supported\n"); 1821 rval = EXT_STATUS_NOT_SUPPORTED; 1822 goto done; 1823 } 1824 1825 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) || 1826 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) || 1827 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { 1828 rval = EXT_STATUS_BUSY; 1829 goto done; 1830 } 1831 1832 /* Check if host is online */ 1833 if (!vha->flags.online) { 1834 ql_log(ql_log_warn, vha, 0x70a1, 1835 "Host is not online\n"); 1836 rval = EXT_STATUS_DEVICE_OFFLINE; 1837 goto done; 1838 } 1839 1840 /* Check if cable is plugged in or not */ 1841 if (vha->device_flags & DFLG_NO_CABLE) { 1842 ql_log(ql_log_warn, vha, 0x70a2, 1843 "Cable is unplugged...\n"); 1844 rval = EXT_STATUS_INVALID_CFG; 1845 goto done; 1846 } 1847 1848 /* Check if the switch is connected or not */ 1849 if (ha->current_topology != ISP_CFG_F) { 1850 ql_log(ql_log_warn, vha, 0x70a3, 1851 "Host is not connected to the switch\n"); 1852 rval = EXT_STATUS_INVALID_CFG; 1853 goto done; 1854 } 1855 1856 /* Check if operating mode is P2P */ 1857 if (ha->operating_mode != P2P) { 1858 ql_log(ql_log_warn, vha, 0x70a4, 1859 "Host operating mode is not P2p\n"); 1860 rval = EXT_STATUS_INVALID_CFG; 1861 goto done; 1862 } 1863 1864 mutex_lock(&ha->selflogin_lock); 1865 if (vha->self_login_loop_id == 0) { 1866 /* Initialize all required fields of fcport */ 1867 vha->bidir_fcport.vha = vha; 1868 vha->bidir_fcport.d_id.b.al_pa = vha->d_id.b.al_pa; 1869 vha->bidir_fcport.d_id.b.area = vha->d_id.b.area; 1870 vha->bidir_fcport.d_id.b.domain = vha->d_id.b.domain; 1871 vha->bidir_fcport.loop_id = vha->loop_id; 1872 1873 if (qla2x00_fabric_login(vha, &(vha->bidir_fcport), &nextlid)) { 1874 ql_log(ql_log_warn, vha, 0x70a7, 1875 "Failed to login port %06X for bidirectional IOCB\n", 1876 vha->bidir_fcport.d_id.b24); 1877 mutex_unlock(&ha->selflogin_lock); 1878 rval = EXT_STATUS_MAILBOX; 1879 goto done; 1880 } 1881 vha->self_login_loop_id = nextlid - 1; 1882 1883 } 1884 /* Assign the self login loop id to fcport */ 1885 mutex_unlock(&ha->selflogin_lock); 1886 1887 vha->bidir_fcport.loop_id = vha->self_login_loop_id; 1888 1889 req_sg_cnt = dma_map_sg(&ha->pdev->dev, 1890 bsg_job->request_payload.sg_list, 1891 bsg_job->request_payload.sg_cnt, 1892 DMA_TO_DEVICE); 1893 1894 if (!req_sg_cnt) { 1895 rval = EXT_STATUS_NO_MEMORY; 1896 goto done; 1897 } 1898 1899 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, 1900 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, 1901 DMA_FROM_DEVICE); 1902 1903 if (!rsp_sg_cnt) { 1904 rval = EXT_STATUS_NO_MEMORY; 1905 goto done_unmap_req_sg; 1906 } 1907 1908 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) || 1909 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) { 1910 ql_dbg(ql_dbg_user, vha, 0x70a9, 1911 "Dma mapping resulted in different sg counts " 1912 "[request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt: " 1913 "%x dma_reply_sg_cnt: %x]\n", 1914 bsg_job->request_payload.sg_cnt, req_sg_cnt, 1915 bsg_job->reply_payload.sg_cnt, rsp_sg_cnt); 1916 rval = EXT_STATUS_NO_MEMORY; 1917 goto done_unmap_sg; 1918 } 1919 1920 req_data_len = bsg_job->request_payload.payload_len; 1921 rsp_data_len = bsg_job->reply_payload.payload_len; 1922 1923 if (req_data_len != rsp_data_len) { 1924 rval = EXT_STATUS_BUSY; 1925 ql_log(ql_log_warn, vha, 0x70aa, 1926 "req_data_len != rsp_data_len\n"); 1927 goto done_unmap_sg; 1928 } 1929 1930 /* Alloc SRB structure */ 1931 sp = qla2x00_get_sp(vha, &(vha->bidir_fcport), GFP_KERNEL); 1932 if (!sp) { 1933 ql_dbg(ql_dbg_user, vha, 0x70ac, 1934 "Alloc SRB structure failed\n"); 1935 rval = EXT_STATUS_NO_MEMORY; 1936 goto done_unmap_sg; 1937 } 1938 1939 /*Populate srb->ctx with bidir ctx*/ 1940 sp->u.bsg_job = bsg_job; 1941 sp->free = qla2x00_bsg_sp_free; 1942 sp->type = SRB_BIDI_CMD; 1943 sp->done = qla2x00_bsg_job_done; 1944 1945 /* Add the read and write sg count */ 1946 tot_dsds = rsp_sg_cnt + req_sg_cnt; 1947 1948 rval = qla2x00_start_bidir(sp, vha, tot_dsds); 1949 if (rval != EXT_STATUS_OK) 1950 goto done_free_srb; 1951 /* the bsg request will be completed in the interrupt handler */ 1952 return rval; 1953 1954 done_free_srb: 1955 mempool_free(sp, ha->srb_mempool); 1956 done_unmap_sg: 1957 dma_unmap_sg(&ha->pdev->dev, 1958 bsg_job->reply_payload.sg_list, 1959 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 1960 done_unmap_req_sg: 1961 dma_unmap_sg(&ha->pdev->dev, 1962 bsg_job->request_payload.sg_list, 1963 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 1964 done: 1965 1966 /* Return an error vendor specific response 1967 * and complete the bsg request 1968 */ 1969 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval; 1970 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1971 bsg_reply->reply_payload_rcv_len = 0; 1972 bsg_reply->result = (DID_OK) << 16; 1973 bsg_job_done(bsg_job, bsg_reply->result, 1974 bsg_reply->reply_payload_rcv_len); 1975 /* Always return success, vendor rsp carries correct status */ 1976 return 0; 1977 } 1978 1979 static int 1980 qlafx00_mgmt_cmd(struct bsg_job *bsg_job) 1981 { 1982 struct fc_bsg_request *bsg_request = bsg_job->request; 1983 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1984 scsi_qla_host_t *vha = shost_priv(host); 1985 struct qla_hw_data *ha = vha->hw; 1986 int rval = (DID_ERROR << 16); 1987 struct qla_mt_iocb_rqst_fx00 *piocb_rqst; 1988 srb_t *sp; 1989 int req_sg_cnt = 0, rsp_sg_cnt = 0; 1990 struct fc_port *fcport; 1991 char *type = "FC_BSG_HST_FX_MGMT"; 1992 1993 /* Copy the IOCB specific information */ 1994 piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *) 1995 &bsg_request->rqst_data.h_vendor.vendor_cmd[1]; 1996 1997 /* Dump the vendor information */ 1998 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose , vha, 0x70cf, 1999 piocb_rqst, sizeof(*piocb_rqst)); 2000 2001 if (!vha->flags.online) { 2002 ql_log(ql_log_warn, vha, 0x70d0, 2003 "Host is not online.\n"); 2004 rval = -EIO; 2005 goto done; 2006 } 2007 2008 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) { 2009 req_sg_cnt = dma_map_sg(&ha->pdev->dev, 2010 bsg_job->request_payload.sg_list, 2011 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 2012 if (!req_sg_cnt) { 2013 ql_log(ql_log_warn, vha, 0x70c7, 2014 "dma_map_sg return %d for request\n", req_sg_cnt); 2015 rval = -ENOMEM; 2016 goto done; 2017 } 2018 } 2019 2020 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) { 2021 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, 2022 bsg_job->reply_payload.sg_list, 2023 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 2024 if (!rsp_sg_cnt) { 2025 ql_log(ql_log_warn, vha, 0x70c8, 2026 "dma_map_sg return %d for reply\n", rsp_sg_cnt); 2027 rval = -ENOMEM; 2028 goto done_unmap_req_sg; 2029 } 2030 } 2031 2032 ql_dbg(ql_dbg_user, vha, 0x70c9, 2033 "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x " 2034 "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt, 2035 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt); 2036 2037 /* Allocate a dummy fcport structure, since functions preparing the 2038 * IOCB and mailbox command retrieves port specific information 2039 * from fcport structure. For Host based ELS commands there will be 2040 * no fcport structure allocated 2041 */ 2042 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 2043 if (!fcport) { 2044 ql_log(ql_log_warn, vha, 0x70ca, 2045 "Failed to allocate fcport.\n"); 2046 rval = -ENOMEM; 2047 goto done_unmap_rsp_sg; 2048 } 2049 2050 /* Alloc SRB structure */ 2051 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 2052 if (!sp) { 2053 ql_log(ql_log_warn, vha, 0x70cb, 2054 "qla2x00_get_sp failed.\n"); 2055 rval = -ENOMEM; 2056 goto done_free_fcport; 2057 } 2058 2059 /* Initialize all required fields of fcport */ 2060 fcport->vha = vha; 2061 fcport->loop_id = le32_to_cpu(piocb_rqst->dataword); 2062 2063 sp->type = SRB_FXIOCB_BCMD; 2064 sp->name = "bsg_fx_mgmt"; 2065 sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt); 2066 sp->u.bsg_job = bsg_job; 2067 sp->free = qla2x00_bsg_sp_free; 2068 sp->done = qla2x00_bsg_job_done; 2069 2070 ql_dbg(ql_dbg_user, vha, 0x70cc, 2071 "bsg rqst type: %s fx_mgmt_type: %x id=%x\n", 2072 type, piocb_rqst->func_type, fcport->loop_id); 2073 2074 rval = qla2x00_start_sp(sp); 2075 if (rval != QLA_SUCCESS) { 2076 ql_log(ql_log_warn, vha, 0x70cd, 2077 "qla2x00_start_sp failed=%d.\n", rval); 2078 mempool_free(sp, ha->srb_mempool); 2079 rval = -EIO; 2080 goto done_free_fcport; 2081 } 2082 return rval; 2083 2084 done_free_fcport: 2085 qla2x00_free_fcport(fcport); 2086 2087 done_unmap_rsp_sg: 2088 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) 2089 dma_unmap_sg(&ha->pdev->dev, 2090 bsg_job->reply_payload.sg_list, 2091 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 2092 done_unmap_req_sg: 2093 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) 2094 dma_unmap_sg(&ha->pdev->dev, 2095 bsg_job->request_payload.sg_list, 2096 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 2097 2098 done: 2099 return rval; 2100 } 2101 2102 static int 2103 qla26xx_serdes_op(struct bsg_job *bsg_job) 2104 { 2105 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2106 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 2107 scsi_qla_host_t *vha = shost_priv(host); 2108 int rval = 0; 2109 struct qla_serdes_reg sr; 2110 2111 memset(&sr, 0, sizeof(sr)); 2112 2113 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 2114 bsg_job->request_payload.sg_cnt, &sr, sizeof(sr)); 2115 2116 switch (sr.cmd) { 2117 case INT_SC_SERDES_WRITE_REG: 2118 rval = qla2x00_write_serdes_word(vha, sr.addr, sr.val); 2119 bsg_reply->reply_payload_rcv_len = 0; 2120 break; 2121 case INT_SC_SERDES_READ_REG: 2122 rval = qla2x00_read_serdes_word(vha, sr.addr, &sr.val); 2123 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 2124 bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr)); 2125 bsg_reply->reply_payload_rcv_len = sizeof(sr); 2126 break; 2127 default: 2128 ql_dbg(ql_dbg_user, vha, 0x708c, 2129 "Unknown serdes cmd %x.\n", sr.cmd); 2130 rval = -EINVAL; 2131 break; 2132 } 2133 2134 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 2135 rval ? EXT_STATUS_MAILBOX : 0; 2136 2137 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 2138 bsg_reply->result = DID_OK << 16; 2139 bsg_job_done(bsg_job, bsg_reply->result, 2140 bsg_reply->reply_payload_rcv_len); 2141 return 0; 2142 } 2143 2144 static int 2145 qla8044_serdes_op(struct bsg_job *bsg_job) 2146 { 2147 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2148 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 2149 scsi_qla_host_t *vha = shost_priv(host); 2150 int rval = 0; 2151 struct qla_serdes_reg_ex sr; 2152 2153 memset(&sr, 0, sizeof(sr)); 2154 2155 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 2156 bsg_job->request_payload.sg_cnt, &sr, sizeof(sr)); 2157 2158 switch (sr.cmd) { 2159 case INT_SC_SERDES_WRITE_REG: 2160 rval = qla8044_write_serdes_word(vha, sr.addr, sr.val); 2161 bsg_reply->reply_payload_rcv_len = 0; 2162 break; 2163 case INT_SC_SERDES_READ_REG: 2164 rval = qla8044_read_serdes_word(vha, sr.addr, &sr.val); 2165 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 2166 bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr)); 2167 bsg_reply->reply_payload_rcv_len = sizeof(sr); 2168 break; 2169 default: 2170 ql_dbg(ql_dbg_user, vha, 0x7020, 2171 "Unknown serdes cmd %x.\n", sr.cmd); 2172 rval = -EINVAL; 2173 break; 2174 } 2175 2176 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 2177 rval ? EXT_STATUS_MAILBOX : 0; 2178 2179 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 2180 bsg_reply->result = DID_OK << 16; 2181 bsg_job_done(bsg_job, bsg_reply->result, 2182 bsg_reply->reply_payload_rcv_len); 2183 return 0; 2184 } 2185 2186 static int 2187 qla27xx_get_flash_upd_cap(struct bsg_job *bsg_job) 2188 { 2189 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2190 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 2191 scsi_qla_host_t *vha = shost_priv(host); 2192 struct qla_hw_data *ha = vha->hw; 2193 struct qla_flash_update_caps cap; 2194 2195 if (!(IS_QLA27XX(ha)) && !IS_QLA28XX(ha)) 2196 return -EPERM; 2197 2198 memset(&cap, 0, sizeof(cap)); 2199 cap.capabilities = (uint64_t)ha->fw_attributes_ext[1] << 48 | 2200 (uint64_t)ha->fw_attributes_ext[0] << 32 | 2201 (uint64_t)ha->fw_attributes_h << 16 | 2202 (uint64_t)ha->fw_attributes; 2203 2204 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 2205 bsg_job->reply_payload.sg_cnt, &cap, sizeof(cap)); 2206 bsg_reply->reply_payload_rcv_len = sizeof(cap); 2207 2208 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 2209 EXT_STATUS_OK; 2210 2211 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 2212 bsg_reply->result = DID_OK << 16; 2213 bsg_job_done(bsg_job, bsg_reply->result, 2214 bsg_reply->reply_payload_rcv_len); 2215 return 0; 2216 } 2217 2218 static int 2219 qla27xx_set_flash_upd_cap(struct bsg_job *bsg_job) 2220 { 2221 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2222 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 2223 scsi_qla_host_t *vha = shost_priv(host); 2224 struct qla_hw_data *ha = vha->hw; 2225 uint64_t online_fw_attr = 0; 2226 struct qla_flash_update_caps cap; 2227 2228 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 2229 return -EPERM; 2230 2231 memset(&cap, 0, sizeof(cap)); 2232 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 2233 bsg_job->request_payload.sg_cnt, &cap, sizeof(cap)); 2234 2235 online_fw_attr = (uint64_t)ha->fw_attributes_ext[1] << 48 | 2236 (uint64_t)ha->fw_attributes_ext[0] << 32 | 2237 (uint64_t)ha->fw_attributes_h << 16 | 2238 (uint64_t)ha->fw_attributes; 2239 2240 if (online_fw_attr != cap.capabilities) { 2241 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 2242 EXT_STATUS_INVALID_PARAM; 2243 return -EINVAL; 2244 } 2245 2246 if (cap.outage_duration < MAX_LOOP_TIMEOUT) { 2247 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 2248 EXT_STATUS_INVALID_PARAM; 2249 return -EINVAL; 2250 } 2251 2252 bsg_reply->reply_payload_rcv_len = 0; 2253 2254 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 2255 EXT_STATUS_OK; 2256 2257 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 2258 bsg_reply->result = DID_OK << 16; 2259 bsg_job_done(bsg_job, bsg_reply->result, 2260 bsg_reply->reply_payload_rcv_len); 2261 return 0; 2262 } 2263 2264 static int 2265 qla27xx_get_bbcr_data(struct bsg_job *bsg_job) 2266 { 2267 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2268 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 2269 scsi_qla_host_t *vha = shost_priv(host); 2270 struct qla_hw_data *ha = vha->hw; 2271 struct qla_bbcr_data bbcr; 2272 uint16_t loop_id, topo, sw_cap; 2273 uint8_t domain, area, al_pa, state; 2274 int rval; 2275 2276 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 2277 return -EPERM; 2278 2279 memset(&bbcr, 0, sizeof(bbcr)); 2280 2281 if (vha->flags.bbcr_enable) 2282 bbcr.status = QLA_BBCR_STATUS_ENABLED; 2283 else 2284 bbcr.status = QLA_BBCR_STATUS_DISABLED; 2285 2286 if (bbcr.status == QLA_BBCR_STATUS_ENABLED) { 2287 rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa, 2288 &area, &domain, &topo, &sw_cap); 2289 if (rval != QLA_SUCCESS) { 2290 bbcr.status = QLA_BBCR_STATUS_UNKNOWN; 2291 bbcr.state = QLA_BBCR_STATE_OFFLINE; 2292 bbcr.mbx1 = loop_id; 2293 goto done; 2294 } 2295 2296 state = (vha->bbcr >> 12) & 0x1; 2297 2298 if (state) { 2299 bbcr.state = QLA_BBCR_STATE_OFFLINE; 2300 bbcr.offline_reason_code = QLA_BBCR_REASON_LOGIN_REJECT; 2301 } else { 2302 bbcr.state = QLA_BBCR_STATE_ONLINE; 2303 bbcr.negotiated_bbscn = (vha->bbcr >> 8) & 0xf; 2304 } 2305 2306 bbcr.configured_bbscn = vha->bbcr & 0xf; 2307 } 2308 2309 done: 2310 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 2311 bsg_job->reply_payload.sg_cnt, &bbcr, sizeof(bbcr)); 2312 bsg_reply->reply_payload_rcv_len = sizeof(bbcr); 2313 2314 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK; 2315 2316 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 2317 bsg_reply->result = DID_OK << 16; 2318 bsg_job_done(bsg_job, bsg_reply->result, 2319 bsg_reply->reply_payload_rcv_len); 2320 return 0; 2321 } 2322 2323 static int 2324 qla2x00_get_priv_stats(struct bsg_job *bsg_job) 2325 { 2326 struct fc_bsg_request *bsg_request = bsg_job->request; 2327 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2328 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 2329 scsi_qla_host_t *vha = shost_priv(host); 2330 struct qla_hw_data *ha = vha->hw; 2331 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 2332 struct link_statistics *stats = NULL; 2333 dma_addr_t stats_dma; 2334 int rval; 2335 uint32_t *cmd = bsg_request->rqst_data.h_vendor.vendor_cmd; 2336 uint options = cmd[0] == QL_VND_GET_PRIV_STATS_EX ? cmd[1] : 0; 2337 2338 if (test_bit(UNLOADING, &vha->dpc_flags)) 2339 return -ENODEV; 2340 2341 if (unlikely(pci_channel_offline(ha->pdev))) 2342 return -ENODEV; 2343 2344 if (qla2x00_reset_active(vha)) 2345 return -EBUSY; 2346 2347 if (!IS_FWI2_CAPABLE(ha)) 2348 return -EPERM; 2349 2350 stats = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stats), &stats_dma, 2351 GFP_KERNEL); 2352 if (!stats) { 2353 ql_log(ql_log_warn, vha, 0x70e2, 2354 "Failed to allocate memory for stats.\n"); 2355 return -ENOMEM; 2356 } 2357 2358 rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, options); 2359 2360 if (rval == QLA_SUCCESS) { 2361 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, vha, 0x70e5, 2362 stats, sizeof(*stats)); 2363 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 2364 bsg_job->reply_payload.sg_cnt, stats, sizeof(*stats)); 2365 } 2366 2367 bsg_reply->reply_payload_rcv_len = sizeof(*stats); 2368 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 2369 rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK; 2370 2371 bsg_job->reply_len = sizeof(*bsg_reply); 2372 bsg_reply->result = DID_OK << 16; 2373 bsg_job_done(bsg_job, bsg_reply->result, 2374 bsg_reply->reply_payload_rcv_len); 2375 2376 dma_free_coherent(&ha->pdev->dev, sizeof(*stats), 2377 stats, stats_dma); 2378 2379 return 0; 2380 } 2381 2382 static int 2383 qla2x00_do_dport_diagnostics(struct bsg_job *bsg_job) 2384 { 2385 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2386 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 2387 scsi_qla_host_t *vha = shost_priv(host); 2388 int rval; 2389 struct qla_dport_diag *dd; 2390 2391 if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) && 2392 !IS_QLA28XX(vha->hw)) 2393 return -EPERM; 2394 2395 dd = kmalloc(sizeof(*dd), GFP_KERNEL); 2396 if (!dd) { 2397 ql_log(ql_log_warn, vha, 0x70db, 2398 "Failed to allocate memory for dport.\n"); 2399 return -ENOMEM; 2400 } 2401 2402 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 2403 bsg_job->request_payload.sg_cnt, dd, sizeof(*dd)); 2404 2405 rval = qla26xx_dport_diagnostics( 2406 vha, dd->buf, sizeof(dd->buf), dd->options); 2407 if (rval == QLA_SUCCESS) { 2408 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 2409 bsg_job->reply_payload.sg_cnt, dd, sizeof(*dd)); 2410 } 2411 2412 bsg_reply->reply_payload_rcv_len = sizeof(*dd); 2413 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 2414 rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK; 2415 2416 bsg_job->reply_len = sizeof(*bsg_reply); 2417 bsg_reply->result = DID_OK << 16; 2418 bsg_job_done(bsg_job, bsg_reply->result, 2419 bsg_reply->reply_payload_rcv_len); 2420 2421 kfree(dd); 2422 2423 return 0; 2424 } 2425 2426 static int 2427 qla2x00_get_flash_image_status(struct bsg_job *bsg_job) 2428 { 2429 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job)); 2430 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2431 struct qla_hw_data *ha = vha->hw; 2432 struct qla_active_regions regions = { }; 2433 struct active_regions active_regions = { }; 2434 2435 qla27xx_get_active_image(vha, &active_regions); 2436 regions.global_image = active_regions.global; 2437 2438 if (IS_QLA28XX(ha)) { 2439 qla28xx_get_aux_images(vha, &active_regions); 2440 regions.board_config = active_regions.aux.board_config; 2441 regions.vpd_nvram = active_regions.aux.vpd_nvram; 2442 regions.npiv_config_0_1 = active_regions.aux.npiv_config_0_1; 2443 regions.npiv_config_2_3 = active_regions.aux.npiv_config_2_3; 2444 } 2445 2446 ql_dbg(ql_dbg_user, vha, 0x70e1, 2447 "%s(%lu): FW=%u BCFG=%u VPDNVR=%u NPIV01=%u NPIV02=%u\n", 2448 __func__, vha->host_no, regions.global_image, 2449 regions.board_config, regions.vpd_nvram, 2450 regions.npiv_config_0_1, regions.npiv_config_2_3); 2451 2452 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 2453 bsg_job->reply_payload.sg_cnt, ®ions, sizeof(regions)); 2454 2455 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK; 2456 bsg_reply->reply_payload_rcv_len = sizeof(regions); 2457 bsg_reply->result = DID_OK << 16; 2458 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 2459 bsg_job_done(bsg_job, bsg_reply->result, 2460 bsg_reply->reply_payload_rcv_len); 2461 2462 return 0; 2463 } 2464 2465 static int 2466 qla2x00_manage_host_stats(struct bsg_job *bsg_job) 2467 { 2468 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job)); 2469 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2470 struct ql_vnd_mng_host_stats_param *req_data; 2471 struct ql_vnd_mng_host_stats_resp rsp_data; 2472 u32 req_data_len; 2473 int ret = 0; 2474 2475 if (!vha->flags.online) { 2476 ql_log(ql_log_warn, vha, 0x0000, "Host is not online.\n"); 2477 return -EIO; 2478 } 2479 2480 req_data_len = bsg_job->request_payload.payload_len; 2481 2482 if (req_data_len != sizeof(struct ql_vnd_mng_host_stats_param)) { 2483 ql_log(ql_log_warn, vha, 0x0000, "req_data_len invalid.\n"); 2484 return -EIO; 2485 } 2486 2487 req_data = kzalloc(sizeof(*req_data), GFP_KERNEL); 2488 if (!req_data) { 2489 ql_log(ql_log_warn, vha, 0x0000, "req_data memory allocation failure.\n"); 2490 return -ENOMEM; 2491 } 2492 2493 /* Copy the request buffer in req_data */ 2494 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 2495 bsg_job->request_payload.sg_cnt, req_data, 2496 req_data_len); 2497 2498 switch (req_data->action) { 2499 case QLA_STOP: 2500 ret = qla2xxx_stop_stats(vha->host, req_data->stat_type); 2501 break; 2502 case QLA_START: 2503 ret = qla2xxx_start_stats(vha->host, req_data->stat_type); 2504 break; 2505 case QLA_CLEAR: 2506 ret = qla2xxx_reset_stats(vha->host, req_data->stat_type); 2507 break; 2508 default: 2509 ql_log(ql_log_warn, vha, 0x0000, "Invalid action.\n"); 2510 ret = -EIO; 2511 break; 2512 } 2513 2514 kfree(req_data); 2515 2516 /* Prepare response */ 2517 rsp_data.status = ret; 2518 bsg_job->reply_payload.payload_len = sizeof(struct ql_vnd_mng_host_stats_resp); 2519 2520 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK; 2521 bsg_reply->reply_payload_rcv_len = 2522 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 2523 bsg_job->reply_payload.sg_cnt, 2524 &rsp_data, 2525 sizeof(struct ql_vnd_mng_host_stats_resp)); 2526 2527 bsg_reply->result = DID_OK; 2528 bsg_job_done(bsg_job, bsg_reply->result, 2529 bsg_reply->reply_payload_rcv_len); 2530 2531 return ret; 2532 } 2533 2534 static int 2535 qla2x00_get_host_stats(struct bsg_job *bsg_job) 2536 { 2537 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job)); 2538 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2539 struct ql_vnd_stats_param *req_data; 2540 struct ql_vnd_host_stats_resp rsp_data; 2541 u32 req_data_len; 2542 int ret = 0; 2543 u64 ini_entry_count = 0; 2544 u64 entry_count = 0; 2545 u64 tgt_num = 0; 2546 u64 tmp_stat_type = 0; 2547 u64 response_len = 0; 2548 void *data; 2549 2550 req_data_len = bsg_job->request_payload.payload_len; 2551 2552 if (req_data_len != sizeof(struct ql_vnd_stats_param)) { 2553 ql_log(ql_log_warn, vha, 0x0000, "req_data_len invalid.\n"); 2554 return -EIO; 2555 } 2556 2557 req_data = kzalloc(sizeof(*req_data), GFP_KERNEL); 2558 if (!req_data) { 2559 ql_log(ql_log_warn, vha, 0x0000, "req_data memory allocation failure.\n"); 2560 return -ENOMEM; 2561 } 2562 2563 /* Copy the request buffer in req_data */ 2564 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 2565 bsg_job->request_payload.sg_cnt, req_data, req_data_len); 2566 2567 /* Copy stat type to work on it */ 2568 tmp_stat_type = req_data->stat_type; 2569 2570 if (tmp_stat_type & QLA2XX_TGT_SHT_LNK_DOWN) { 2571 /* Num of tgts connected to this host */ 2572 tgt_num = qla2x00_get_num_tgts(vha); 2573 /* unset BIT_17 */ 2574 tmp_stat_type &= ~(1 << 17); 2575 } 2576 2577 /* Total ini stats */ 2578 ini_entry_count = qla2x00_count_set_bits(tmp_stat_type); 2579 2580 /* Total number of entries */ 2581 entry_count = ini_entry_count + tgt_num; 2582 2583 response_len = sizeof(struct ql_vnd_host_stats_resp) + 2584 (sizeof(struct ql_vnd_stat_entry) * entry_count); 2585 2586 if (response_len > bsg_job->reply_payload.payload_len) { 2587 rsp_data.status = EXT_STATUS_BUFFER_TOO_SMALL; 2588 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_BUFFER_TOO_SMALL; 2589 bsg_job->reply_payload.payload_len = sizeof(struct ql_vnd_mng_host_stats_resp); 2590 2591 bsg_reply->reply_payload_rcv_len = 2592 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 2593 bsg_job->reply_payload.sg_cnt, &rsp_data, 2594 sizeof(struct ql_vnd_mng_host_stats_resp)); 2595 2596 bsg_reply->result = DID_OK; 2597 bsg_job_done(bsg_job, bsg_reply->result, 2598 bsg_reply->reply_payload_rcv_len); 2599 goto host_stat_out; 2600 } 2601 2602 data = kzalloc(response_len, GFP_KERNEL); 2603 if (!data) { 2604 ret = -ENOMEM; 2605 goto host_stat_out; 2606 } 2607 2608 ret = qla2xxx_get_ini_stats(fc_bsg_to_shost(bsg_job), req_data->stat_type, 2609 data, response_len); 2610 2611 rsp_data.status = EXT_STATUS_OK; 2612 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK; 2613 2614 bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 2615 bsg_job->reply_payload.sg_cnt, 2616 data, response_len); 2617 bsg_reply->result = DID_OK; 2618 bsg_job_done(bsg_job, bsg_reply->result, 2619 bsg_reply->reply_payload_rcv_len); 2620 2621 kfree(data); 2622 host_stat_out: 2623 kfree(req_data); 2624 return ret; 2625 } 2626 2627 static struct fc_rport * 2628 qla2xxx_find_rport(scsi_qla_host_t *vha, uint32_t tgt_num) 2629 { 2630 fc_port_t *fcport = NULL; 2631 2632 list_for_each_entry(fcport, &vha->vp_fcports, list) { 2633 if (fcport->rport->number == tgt_num) 2634 return fcport->rport; 2635 } 2636 return NULL; 2637 } 2638 2639 static int 2640 qla2x00_get_tgt_stats(struct bsg_job *bsg_job) 2641 { 2642 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job)); 2643 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2644 struct ql_vnd_tgt_stats_param *req_data; 2645 u32 req_data_len; 2646 int ret = 0; 2647 u64 response_len = 0; 2648 struct ql_vnd_tgt_stats_resp *data = NULL; 2649 struct fc_rport *rport = NULL; 2650 2651 if (!vha->flags.online) { 2652 ql_log(ql_log_warn, vha, 0x0000, "Host is not online.\n"); 2653 return -EIO; 2654 } 2655 2656 req_data_len = bsg_job->request_payload.payload_len; 2657 2658 if (req_data_len != sizeof(struct ql_vnd_stat_entry)) { 2659 ql_log(ql_log_warn, vha, 0x0000, "req_data_len invalid.\n"); 2660 return -EIO; 2661 } 2662 2663 req_data = kzalloc(sizeof(*req_data), GFP_KERNEL); 2664 if (!req_data) { 2665 ql_log(ql_log_warn, vha, 0x0000, "req_data memory allocation failure.\n"); 2666 return -ENOMEM; 2667 } 2668 2669 /* Copy the request buffer in req_data */ 2670 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 2671 bsg_job->request_payload.sg_cnt, 2672 req_data, req_data_len); 2673 2674 response_len = sizeof(struct ql_vnd_tgt_stats_resp) + 2675 sizeof(struct ql_vnd_stat_entry); 2676 2677 /* structure + size for one entry */ 2678 data = kzalloc(response_len, GFP_KERNEL); 2679 if (!data) { 2680 kfree(req_data); 2681 return -ENOMEM; 2682 } 2683 2684 if (response_len > bsg_job->reply_payload.payload_len) { 2685 data->status = EXT_STATUS_BUFFER_TOO_SMALL; 2686 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_BUFFER_TOO_SMALL; 2687 bsg_job->reply_payload.payload_len = sizeof(struct ql_vnd_mng_host_stats_resp); 2688 2689 bsg_reply->reply_payload_rcv_len = 2690 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 2691 bsg_job->reply_payload.sg_cnt, data, 2692 sizeof(struct ql_vnd_tgt_stats_resp)); 2693 2694 bsg_reply->result = DID_OK; 2695 bsg_job_done(bsg_job, bsg_reply->result, 2696 bsg_reply->reply_payload_rcv_len); 2697 goto tgt_stat_out; 2698 } 2699 2700 rport = qla2xxx_find_rport(vha, req_data->tgt_id); 2701 if (!rport) { 2702 ql_log(ql_log_warn, vha, 0x0000, "target %d not found.\n", req_data->tgt_id); 2703 ret = EXT_STATUS_INVALID_PARAM; 2704 data->status = EXT_STATUS_INVALID_PARAM; 2705 goto reply; 2706 } 2707 2708 ret = qla2xxx_get_tgt_stats(fc_bsg_to_shost(bsg_job), req_data->stat_type, 2709 rport, (void *)data, response_len); 2710 2711 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK; 2712 reply: 2713 bsg_reply->reply_payload_rcv_len = 2714 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 2715 bsg_job->reply_payload.sg_cnt, data, 2716 response_len); 2717 bsg_reply->result = DID_OK; 2718 bsg_job_done(bsg_job, bsg_reply->result, 2719 bsg_reply->reply_payload_rcv_len); 2720 2721 tgt_stat_out: 2722 kfree(data); 2723 kfree(req_data); 2724 2725 return ret; 2726 } 2727 2728 static int 2729 qla2x00_manage_host_port(struct bsg_job *bsg_job) 2730 { 2731 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job)); 2732 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2733 struct ql_vnd_mng_host_port_param *req_data; 2734 struct ql_vnd_mng_host_port_resp rsp_data; 2735 u32 req_data_len; 2736 int ret = 0; 2737 2738 req_data_len = bsg_job->request_payload.payload_len; 2739 2740 if (req_data_len != sizeof(struct ql_vnd_mng_host_port_param)) { 2741 ql_log(ql_log_warn, vha, 0x0000, "req_data_len invalid.\n"); 2742 return -EIO; 2743 } 2744 2745 req_data = kzalloc(sizeof(*req_data), GFP_KERNEL); 2746 if (!req_data) { 2747 ql_log(ql_log_warn, vha, 0x0000, "req_data memory allocation failure.\n"); 2748 return -ENOMEM; 2749 } 2750 2751 /* Copy the request buffer in req_data */ 2752 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 2753 bsg_job->request_payload.sg_cnt, req_data, req_data_len); 2754 2755 switch (req_data->action) { 2756 case QLA_ENABLE: 2757 ret = qla2xxx_enable_port(vha->host); 2758 break; 2759 case QLA_DISABLE: 2760 ret = qla2xxx_disable_port(vha->host); 2761 break; 2762 default: 2763 ql_log(ql_log_warn, vha, 0x0000, "Invalid action.\n"); 2764 ret = -EIO; 2765 break; 2766 } 2767 2768 kfree(req_data); 2769 2770 /* Prepare response */ 2771 rsp_data.status = ret; 2772 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK; 2773 bsg_job->reply_payload.payload_len = sizeof(struct ql_vnd_mng_host_port_resp); 2774 2775 bsg_reply->reply_payload_rcv_len = 2776 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 2777 bsg_job->reply_payload.sg_cnt, &rsp_data, 2778 sizeof(struct ql_vnd_mng_host_port_resp)); 2779 bsg_reply->result = DID_OK; 2780 bsg_job_done(bsg_job, bsg_reply->result, 2781 bsg_reply->reply_payload_rcv_len); 2782 2783 return ret; 2784 } 2785 2786 static int 2787 qla2x00_process_vendor_specific(struct scsi_qla_host *vha, struct bsg_job *bsg_job) 2788 { 2789 struct fc_bsg_request *bsg_request = bsg_job->request; 2790 2791 ql_dbg(ql_dbg_edif, vha, 0x911b, "%s FC_BSG_HST_VENDOR cmd[0]=0x%x\n", 2792 __func__, bsg_request->rqst_data.h_vendor.vendor_cmd[0]); 2793 2794 switch (bsg_request->rqst_data.h_vendor.vendor_cmd[0]) { 2795 case QL_VND_LOOPBACK: 2796 return qla2x00_process_loopback(bsg_job); 2797 2798 case QL_VND_A84_RESET: 2799 return qla84xx_reset(bsg_job); 2800 2801 case QL_VND_A84_UPDATE_FW: 2802 return qla84xx_updatefw(bsg_job); 2803 2804 case QL_VND_A84_MGMT_CMD: 2805 return qla84xx_mgmt_cmd(bsg_job); 2806 2807 case QL_VND_IIDMA: 2808 return qla24xx_iidma(bsg_job); 2809 2810 case QL_VND_FCP_PRIO_CFG_CMD: 2811 return qla24xx_proc_fcp_prio_cfg_cmd(bsg_job); 2812 2813 case QL_VND_READ_FLASH: 2814 return qla2x00_read_optrom(bsg_job); 2815 2816 case QL_VND_UPDATE_FLASH: 2817 return qla2x00_update_optrom(bsg_job); 2818 2819 case QL_VND_SET_FRU_VERSION: 2820 return qla2x00_update_fru_versions(bsg_job); 2821 2822 case QL_VND_READ_FRU_STATUS: 2823 return qla2x00_read_fru_status(bsg_job); 2824 2825 case QL_VND_WRITE_FRU_STATUS: 2826 return qla2x00_write_fru_status(bsg_job); 2827 2828 case QL_VND_WRITE_I2C: 2829 return qla2x00_write_i2c(bsg_job); 2830 2831 case QL_VND_READ_I2C: 2832 return qla2x00_read_i2c(bsg_job); 2833 2834 case QL_VND_DIAG_IO_CMD: 2835 return qla24xx_process_bidir_cmd(bsg_job); 2836 2837 case QL_VND_FX00_MGMT_CMD: 2838 return qlafx00_mgmt_cmd(bsg_job); 2839 2840 case QL_VND_SERDES_OP: 2841 return qla26xx_serdes_op(bsg_job); 2842 2843 case QL_VND_SERDES_OP_EX: 2844 return qla8044_serdes_op(bsg_job); 2845 2846 case QL_VND_GET_FLASH_UPDATE_CAPS: 2847 return qla27xx_get_flash_upd_cap(bsg_job); 2848 2849 case QL_VND_SET_FLASH_UPDATE_CAPS: 2850 return qla27xx_set_flash_upd_cap(bsg_job); 2851 2852 case QL_VND_GET_BBCR_DATA: 2853 return qla27xx_get_bbcr_data(bsg_job); 2854 2855 case QL_VND_GET_PRIV_STATS: 2856 case QL_VND_GET_PRIV_STATS_EX: 2857 return qla2x00_get_priv_stats(bsg_job); 2858 2859 case QL_VND_DPORT_DIAGNOSTICS: 2860 return qla2x00_do_dport_diagnostics(bsg_job); 2861 2862 case QL_VND_EDIF_MGMT: 2863 return qla_edif_app_mgmt(bsg_job); 2864 2865 case QL_VND_SS_GET_FLASH_IMAGE_STATUS: 2866 return qla2x00_get_flash_image_status(bsg_job); 2867 2868 case QL_VND_MANAGE_HOST_STATS: 2869 return qla2x00_manage_host_stats(bsg_job); 2870 2871 case QL_VND_GET_HOST_STATS: 2872 return qla2x00_get_host_stats(bsg_job); 2873 2874 case QL_VND_GET_TGT_STATS: 2875 return qla2x00_get_tgt_stats(bsg_job); 2876 2877 case QL_VND_MANAGE_HOST_PORT: 2878 return qla2x00_manage_host_port(bsg_job); 2879 2880 default: 2881 return -ENOSYS; 2882 } 2883 } 2884 2885 int 2886 qla24xx_bsg_request(struct bsg_job *bsg_job) 2887 { 2888 struct fc_bsg_request *bsg_request = bsg_job->request; 2889 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2890 int ret = -EINVAL; 2891 struct fc_rport *rport; 2892 struct Scsi_Host *host; 2893 scsi_qla_host_t *vha; 2894 2895 /* In case no data transferred. */ 2896 bsg_reply->reply_payload_rcv_len = 0; 2897 2898 if (bsg_request->msgcode == FC_BSG_RPT_ELS) { 2899 rport = fc_bsg_to_rport(bsg_job); 2900 host = rport_to_shost(rport); 2901 vha = shost_priv(host); 2902 } else { 2903 host = fc_bsg_to_shost(bsg_job); 2904 vha = shost_priv(host); 2905 } 2906 2907 /* Disable port will bring down the chip, allow enable command */ 2908 if (bsg_request->rqst_data.h_vendor.vendor_cmd[0] == QL_VND_MANAGE_HOST_PORT || 2909 bsg_request->rqst_data.h_vendor.vendor_cmd[0] == QL_VND_GET_HOST_STATS) 2910 goto skip_chip_chk; 2911 2912 if (vha->hw->flags.port_isolated) { 2913 bsg_reply->result = DID_ERROR; 2914 /* operation not permitted */ 2915 return -EPERM; 2916 } 2917 2918 if (qla2x00_chip_is_down(vha)) { 2919 ql_dbg(ql_dbg_user, vha, 0x709f, 2920 "BSG: ISP abort active/needed -- cmd=%d.\n", 2921 bsg_request->msgcode); 2922 SET_DID_STATUS(bsg_reply->result, DID_ERROR); 2923 return -EBUSY; 2924 } 2925 2926 if (test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags)) { 2927 SET_DID_STATUS(bsg_reply->result, DID_ERROR); 2928 return -EIO; 2929 } 2930 2931 skip_chip_chk: 2932 ql_dbg(ql_dbg_user + ql_dbg_verbose, vha, 0x7000, 2933 "Entered %s msgcode=0x%x. bsg ptr %px\n", 2934 __func__, bsg_request->msgcode, bsg_job); 2935 2936 switch (bsg_request->msgcode) { 2937 case FC_BSG_RPT_ELS: 2938 case FC_BSG_HST_ELS_NOLOGIN: 2939 ret = qla2x00_process_els(bsg_job); 2940 break; 2941 case FC_BSG_HST_CT: 2942 ret = qla2x00_process_ct(bsg_job); 2943 break; 2944 case FC_BSG_HST_VENDOR: 2945 ret = qla2x00_process_vendor_specific(vha, bsg_job); 2946 break; 2947 case FC_BSG_HST_ADD_RPORT: 2948 case FC_BSG_HST_DEL_RPORT: 2949 case FC_BSG_RPT_CT: 2950 default: 2951 ql_log(ql_log_warn, vha, 0x705a, "Unsupported BSG request.\n"); 2952 break; 2953 } 2954 2955 ql_dbg(ql_dbg_user + ql_dbg_verbose, vha, 0x7000, 2956 "%s done with return %x\n", __func__, ret); 2957 2958 return ret; 2959 } 2960 2961 int 2962 qla24xx_bsg_timeout(struct bsg_job *bsg_job) 2963 { 2964 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2965 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job)); 2966 struct qla_hw_data *ha = vha->hw; 2967 srb_t *sp; 2968 int cnt, que; 2969 unsigned long flags; 2970 struct req_que *req; 2971 2972 ql_log(ql_log_info, vha, 0x708b, "%s CMD timeout. bsg ptr %p.\n", 2973 __func__, bsg_job); 2974 /* find the bsg job from the active list of commands */ 2975 spin_lock_irqsave(&ha->hardware_lock, flags); 2976 for (que = 0; que < ha->max_req_queues; que++) { 2977 req = ha->req_q_map[que]; 2978 if (!req) 2979 continue; 2980 2981 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { 2982 sp = req->outstanding_cmds[cnt]; 2983 if (sp && 2984 (sp->type == SRB_CT_CMD || 2985 sp->type == SRB_ELS_CMD_HST || 2986 sp->type == SRB_ELS_CMD_HST_NOLOGIN || 2987 sp->type == SRB_FXIOCB_BCMD) && 2988 sp->u.bsg_job == bsg_job) { 2989 req->outstanding_cmds[cnt] = NULL; 2990 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2991 if (ha->isp_ops->abort_command(sp)) { 2992 ql_log(ql_log_warn, vha, 0x7089, 2993 "mbx abort_command failed.\n"); 2994 bsg_reply->result = -EIO; 2995 } else { 2996 ql_dbg(ql_dbg_user, vha, 0x708a, 2997 "mbx abort_command success.\n"); 2998 bsg_reply->result = 0; 2999 } 3000 spin_lock_irqsave(&ha->hardware_lock, flags); 3001 goto done; 3002 3003 } 3004 } 3005 } 3006 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3007 ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n"); 3008 bsg_reply->result = -ENXIO; 3009 return 0; 3010 3011 done: 3012 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3013 sp->free(sp); 3014 return 0; 3015 } 3016