1 /* 2 * QLogic Fibre Channel HBA Driver 3 * Copyright (c) 2003-2014 QLogic Corporation 4 * 5 * See LICENSE.qla2xxx for copyright and licensing details. 6 */ 7 #include "qla_def.h" 8 9 #include <linux/kthread.h> 10 #include <linux/vmalloc.h> 11 #include <linux/delay.h> 12 #include <linux/bsg-lib.h> 13 14 /* BSG support for ELS/CT pass through */ 15 void 16 qla2x00_bsg_job_done(void *ptr, int res) 17 { 18 srb_t *sp = ptr; 19 struct bsg_job *bsg_job = sp->u.bsg_job; 20 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 21 22 bsg_reply->result = res; 23 bsg_job_done(bsg_job, bsg_reply->result, 24 bsg_reply->reply_payload_rcv_len); 25 sp->free(sp); 26 } 27 28 void 29 qla2x00_bsg_sp_free(void *ptr) 30 { 31 srb_t *sp = ptr; 32 struct qla_hw_data *ha = sp->vha->hw; 33 struct bsg_job *bsg_job = sp->u.bsg_job; 34 struct fc_bsg_request *bsg_request = bsg_job->request; 35 struct qla_mt_iocb_rqst_fx00 *piocb_rqst; 36 37 if (sp->type == SRB_FXIOCB_BCMD) { 38 piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *) 39 &bsg_request->rqst_data.h_vendor.vendor_cmd[1]; 40 41 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) 42 dma_unmap_sg(&ha->pdev->dev, 43 bsg_job->request_payload.sg_list, 44 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 45 46 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) 47 dma_unmap_sg(&ha->pdev->dev, 48 bsg_job->reply_payload.sg_list, 49 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 50 } else { 51 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 52 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 53 54 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 55 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 56 } 57 58 if (sp->type == SRB_CT_CMD || 59 sp->type == SRB_FXIOCB_BCMD || 60 sp->type == SRB_ELS_CMD_HST) 61 kfree(sp->fcport); 62 qla2x00_rel_sp(sp); 63 } 64 65 int 66 qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *vha, 67 struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag) 68 { 69 int i, ret, num_valid; 70 uint8_t *bcode; 71 struct qla_fcp_prio_entry *pri_entry; 72 uint32_t *bcode_val_ptr, bcode_val; 73 74 ret = 1; 75 num_valid = 0; 76 bcode = (uint8_t *)pri_cfg; 77 bcode_val_ptr = (uint32_t *)pri_cfg; 78 bcode_val = (uint32_t)(*bcode_val_ptr); 79 80 if (bcode_val == 0xFFFFFFFF) { 81 /* No FCP Priority config data in flash */ 82 ql_dbg(ql_dbg_user, vha, 0x7051, 83 "No FCP Priority config data.\n"); 84 return 0; 85 } 86 87 if (memcmp(bcode, "HQOS", 4)) { 88 /* Invalid FCP priority data header*/ 89 ql_dbg(ql_dbg_user, vha, 0x7052, 90 "Invalid FCP Priority data header. bcode=0x%x.\n", 91 bcode_val); 92 return 0; 93 } 94 if (flag != 1) 95 return ret; 96 97 pri_entry = &pri_cfg->entry[0]; 98 for (i = 0; i < pri_cfg->num_entries; i++) { 99 if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID) 100 num_valid++; 101 pri_entry++; 102 } 103 104 if (num_valid == 0) { 105 /* No valid FCP priority data entries */ 106 ql_dbg(ql_dbg_user, vha, 0x7053, 107 "No valid FCP Priority data entries.\n"); 108 ret = 0; 109 } else { 110 /* FCP priority data is valid */ 111 ql_dbg(ql_dbg_user, vha, 0x7054, 112 "Valid FCP priority data. num entries = %d.\n", 113 num_valid); 114 } 115 116 return ret; 117 } 118 119 static int 120 qla24xx_proc_fcp_prio_cfg_cmd(struct bsg_job *bsg_job) 121 { 122 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 123 struct fc_bsg_request *bsg_request = bsg_job->request; 124 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 125 scsi_qla_host_t *vha = shost_priv(host); 126 struct qla_hw_data *ha = vha->hw; 127 int ret = 0; 128 uint32_t len; 129 uint32_t oper; 130 131 if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_P3P_TYPE(ha))) { 132 ret = -EINVAL; 133 goto exit_fcp_prio_cfg; 134 } 135 136 /* Get the sub command */ 137 oper = bsg_request->rqst_data.h_vendor.vendor_cmd[1]; 138 139 /* Only set config is allowed if config memory is not allocated */ 140 if (!ha->fcp_prio_cfg && (oper != QLFC_FCP_PRIO_SET_CONFIG)) { 141 ret = -EINVAL; 142 goto exit_fcp_prio_cfg; 143 } 144 switch (oper) { 145 case QLFC_FCP_PRIO_DISABLE: 146 if (ha->flags.fcp_prio_enabled) { 147 ha->flags.fcp_prio_enabled = 0; 148 ha->fcp_prio_cfg->attributes &= 149 ~FCP_PRIO_ATTR_ENABLE; 150 qla24xx_update_all_fcp_prio(vha); 151 bsg_reply->result = DID_OK; 152 } else { 153 ret = -EINVAL; 154 bsg_reply->result = (DID_ERROR << 16); 155 goto exit_fcp_prio_cfg; 156 } 157 break; 158 159 case QLFC_FCP_PRIO_ENABLE: 160 if (!ha->flags.fcp_prio_enabled) { 161 if (ha->fcp_prio_cfg) { 162 ha->flags.fcp_prio_enabled = 1; 163 ha->fcp_prio_cfg->attributes |= 164 FCP_PRIO_ATTR_ENABLE; 165 qla24xx_update_all_fcp_prio(vha); 166 bsg_reply->result = DID_OK; 167 } else { 168 ret = -EINVAL; 169 bsg_reply->result = (DID_ERROR << 16); 170 goto exit_fcp_prio_cfg; 171 } 172 } 173 break; 174 175 case QLFC_FCP_PRIO_GET_CONFIG: 176 len = bsg_job->reply_payload.payload_len; 177 if (!len || len > FCP_PRIO_CFG_SIZE) { 178 ret = -EINVAL; 179 bsg_reply->result = (DID_ERROR << 16); 180 goto exit_fcp_prio_cfg; 181 } 182 183 bsg_reply->result = DID_OK; 184 bsg_reply->reply_payload_rcv_len = 185 sg_copy_from_buffer( 186 bsg_job->reply_payload.sg_list, 187 bsg_job->reply_payload.sg_cnt, ha->fcp_prio_cfg, 188 len); 189 190 break; 191 192 case QLFC_FCP_PRIO_SET_CONFIG: 193 len = bsg_job->request_payload.payload_len; 194 if (!len || len > FCP_PRIO_CFG_SIZE) { 195 bsg_reply->result = (DID_ERROR << 16); 196 ret = -EINVAL; 197 goto exit_fcp_prio_cfg; 198 } 199 200 if (!ha->fcp_prio_cfg) { 201 ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE); 202 if (!ha->fcp_prio_cfg) { 203 ql_log(ql_log_warn, vha, 0x7050, 204 "Unable to allocate memory for fcp prio " 205 "config data (%x).\n", FCP_PRIO_CFG_SIZE); 206 bsg_reply->result = (DID_ERROR << 16); 207 ret = -ENOMEM; 208 goto exit_fcp_prio_cfg; 209 } 210 } 211 212 memset(ha->fcp_prio_cfg, 0, FCP_PRIO_CFG_SIZE); 213 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 214 bsg_job->request_payload.sg_cnt, ha->fcp_prio_cfg, 215 FCP_PRIO_CFG_SIZE); 216 217 /* validate fcp priority data */ 218 219 if (!qla24xx_fcp_prio_cfg_valid(vha, 220 (struct qla_fcp_prio_cfg *) ha->fcp_prio_cfg, 1)) { 221 bsg_reply->result = (DID_ERROR << 16); 222 ret = -EINVAL; 223 /* If buffer was invalidatic int 224 * fcp_prio_cfg is of no use 225 */ 226 vfree(ha->fcp_prio_cfg); 227 ha->fcp_prio_cfg = NULL; 228 goto exit_fcp_prio_cfg; 229 } 230 231 ha->flags.fcp_prio_enabled = 0; 232 if (ha->fcp_prio_cfg->attributes & FCP_PRIO_ATTR_ENABLE) 233 ha->flags.fcp_prio_enabled = 1; 234 qla24xx_update_all_fcp_prio(vha); 235 bsg_reply->result = DID_OK; 236 break; 237 default: 238 ret = -EINVAL; 239 break; 240 } 241 exit_fcp_prio_cfg: 242 if (!ret) 243 bsg_job_done(bsg_job, bsg_reply->result, 244 bsg_reply->reply_payload_rcv_len); 245 return ret; 246 } 247 248 static int 249 qla2x00_process_els(struct bsg_job *bsg_job) 250 { 251 struct fc_bsg_request *bsg_request = bsg_job->request; 252 struct fc_rport *rport; 253 fc_port_t *fcport = NULL; 254 struct Scsi_Host *host; 255 scsi_qla_host_t *vha; 256 struct qla_hw_data *ha; 257 srb_t *sp; 258 const char *type; 259 int req_sg_cnt, rsp_sg_cnt; 260 int rval = (DRIVER_ERROR << 16); 261 uint16_t nextlid = 0; 262 263 if (bsg_request->msgcode == FC_BSG_RPT_ELS) { 264 rport = fc_bsg_to_rport(bsg_job); 265 fcport = *(fc_port_t **) rport->dd_data; 266 host = rport_to_shost(rport); 267 vha = shost_priv(host); 268 ha = vha->hw; 269 type = "FC_BSG_RPT_ELS"; 270 } else { 271 host = fc_bsg_to_shost(bsg_job); 272 vha = shost_priv(host); 273 ha = vha->hw; 274 type = "FC_BSG_HST_ELS_NOLOGIN"; 275 } 276 277 if (!vha->flags.online) { 278 ql_log(ql_log_warn, vha, 0x7005, "Host not online.\n"); 279 rval = -EIO; 280 goto done; 281 } 282 283 /* pass through is supported only for ISP 4Gb or higher */ 284 if (!IS_FWI2_CAPABLE(ha)) { 285 ql_dbg(ql_dbg_user, vha, 0x7001, 286 "ELS passthru not supported for ISP23xx based adapters.\n"); 287 rval = -EPERM; 288 goto done; 289 } 290 291 /* Multiple SG's are not supported for ELS requests */ 292 if (bsg_job->request_payload.sg_cnt > 1 || 293 bsg_job->reply_payload.sg_cnt > 1) { 294 ql_dbg(ql_dbg_user, vha, 0x7002, 295 "Multiple SG's are not supported for ELS requests, " 296 "request_sg_cnt=%x reply_sg_cnt=%x.\n", 297 bsg_job->request_payload.sg_cnt, 298 bsg_job->reply_payload.sg_cnt); 299 rval = -EPERM; 300 goto done; 301 } 302 303 /* ELS request for rport */ 304 if (bsg_request->msgcode == FC_BSG_RPT_ELS) { 305 /* make sure the rport is logged in, 306 * if not perform fabric login 307 */ 308 if (qla2x00_fabric_login(vha, fcport, &nextlid)) { 309 ql_dbg(ql_dbg_user, vha, 0x7003, 310 "Failed to login port %06X for ELS passthru.\n", 311 fcport->d_id.b24); 312 rval = -EIO; 313 goto done; 314 } 315 } else { 316 /* Allocate a dummy fcport structure, since functions 317 * preparing the IOCB and mailbox command retrieves port 318 * specific information from fcport structure. For Host based 319 * ELS commands there will be no fcport structure allocated 320 */ 321 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 322 if (!fcport) { 323 rval = -ENOMEM; 324 goto done; 325 } 326 327 /* Initialize all required fields of fcport */ 328 fcport->vha = vha; 329 fcport->d_id.b.al_pa = 330 bsg_request->rqst_data.h_els.port_id[0]; 331 fcport->d_id.b.area = 332 bsg_request->rqst_data.h_els.port_id[1]; 333 fcport->d_id.b.domain = 334 bsg_request->rqst_data.h_els.port_id[2]; 335 fcport->loop_id = 336 (fcport->d_id.b.al_pa == 0xFD) ? 337 NPH_FABRIC_CONTROLLER : NPH_F_PORT; 338 } 339 340 req_sg_cnt = 341 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 342 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 343 if (!req_sg_cnt) { 344 rval = -ENOMEM; 345 goto done_free_fcport; 346 } 347 348 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 349 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 350 if (!rsp_sg_cnt) { 351 rval = -ENOMEM; 352 goto done_free_fcport; 353 } 354 355 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) || 356 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) { 357 ql_log(ql_log_warn, vha, 0x7008, 358 "dma mapping resulted in different sg counts, " 359 "request_sg_cnt: %x dma_request_sg_cnt:%x reply_sg_cnt:%x " 360 "dma_reply_sg_cnt:%x.\n", bsg_job->request_payload.sg_cnt, 361 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt); 362 rval = -EAGAIN; 363 goto done_unmap_sg; 364 } 365 366 /* Alloc SRB structure */ 367 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 368 if (!sp) { 369 rval = -ENOMEM; 370 goto done_unmap_sg; 371 } 372 373 sp->type = 374 (bsg_request->msgcode == FC_BSG_RPT_ELS ? 375 SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST); 376 sp->name = 377 (bsg_request->msgcode == FC_BSG_RPT_ELS ? 378 "bsg_els_rpt" : "bsg_els_hst"); 379 sp->u.bsg_job = bsg_job; 380 sp->free = qla2x00_bsg_sp_free; 381 sp->done = qla2x00_bsg_job_done; 382 383 ql_dbg(ql_dbg_user, vha, 0x700a, 384 "bsg rqst type: %s els type: %x - loop-id=%x " 385 "portid=%-2x%02x%02x.\n", type, 386 bsg_request->rqst_data.h_els.command_code, fcport->loop_id, 387 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); 388 389 rval = qla2x00_start_sp(sp); 390 if (rval != QLA_SUCCESS) { 391 ql_log(ql_log_warn, vha, 0x700e, 392 "qla2x00_start_sp failed = %d\n", rval); 393 qla2x00_rel_sp(sp); 394 rval = -EIO; 395 goto done_unmap_sg; 396 } 397 return rval; 398 399 done_unmap_sg: 400 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 401 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 402 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 403 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 404 goto done_free_fcport; 405 406 done_free_fcport: 407 if (bsg_request->msgcode == FC_BSG_RPT_ELS) 408 kfree(fcport); 409 done: 410 return rval; 411 } 412 413 static inline uint16_t 414 qla24xx_calc_ct_iocbs(uint16_t dsds) 415 { 416 uint16_t iocbs; 417 418 iocbs = 1; 419 if (dsds > 2) { 420 iocbs += (dsds - 2) / 5; 421 if ((dsds - 2) % 5) 422 iocbs++; 423 } 424 return iocbs; 425 } 426 427 static int 428 qla2x00_process_ct(struct bsg_job *bsg_job) 429 { 430 srb_t *sp; 431 struct fc_bsg_request *bsg_request = bsg_job->request; 432 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 433 scsi_qla_host_t *vha = shost_priv(host); 434 struct qla_hw_data *ha = vha->hw; 435 int rval = (DRIVER_ERROR << 16); 436 int req_sg_cnt, rsp_sg_cnt; 437 uint16_t loop_id; 438 struct fc_port *fcport; 439 char *type = "FC_BSG_HST_CT"; 440 441 req_sg_cnt = 442 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 443 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 444 if (!req_sg_cnt) { 445 ql_log(ql_log_warn, vha, 0x700f, 446 "dma_map_sg return %d for request\n", req_sg_cnt); 447 rval = -ENOMEM; 448 goto done; 449 } 450 451 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 452 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 453 if (!rsp_sg_cnt) { 454 ql_log(ql_log_warn, vha, 0x7010, 455 "dma_map_sg return %d for reply\n", rsp_sg_cnt); 456 rval = -ENOMEM; 457 goto done; 458 } 459 460 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) || 461 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) { 462 ql_log(ql_log_warn, vha, 0x7011, 463 "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x " 464 "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt, 465 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt); 466 rval = -EAGAIN; 467 goto done_unmap_sg; 468 } 469 470 if (!vha->flags.online) { 471 ql_log(ql_log_warn, vha, 0x7012, 472 "Host is not online.\n"); 473 rval = -EIO; 474 goto done_unmap_sg; 475 } 476 477 loop_id = 478 (bsg_request->rqst_data.h_ct.preamble_word1 & 0xFF000000) 479 >> 24; 480 switch (loop_id) { 481 case 0xFC: 482 loop_id = cpu_to_le16(NPH_SNS); 483 break; 484 case 0xFA: 485 loop_id = vha->mgmt_svr_loop_id; 486 break; 487 default: 488 ql_dbg(ql_dbg_user, vha, 0x7013, 489 "Unknown loop id: %x.\n", loop_id); 490 rval = -EINVAL; 491 goto done_unmap_sg; 492 } 493 494 /* Allocate a dummy fcport structure, since functions preparing the 495 * IOCB and mailbox command retrieves port specific information 496 * from fcport structure. For Host based ELS commands there will be 497 * no fcport structure allocated 498 */ 499 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 500 if (!fcport) { 501 ql_log(ql_log_warn, vha, 0x7014, 502 "Failed to allocate fcport.\n"); 503 rval = -ENOMEM; 504 goto done_unmap_sg; 505 } 506 507 /* Initialize all required fields of fcport */ 508 fcport->vha = vha; 509 fcport->d_id.b.al_pa = bsg_request->rqst_data.h_ct.port_id[0]; 510 fcport->d_id.b.area = bsg_request->rqst_data.h_ct.port_id[1]; 511 fcport->d_id.b.domain = bsg_request->rqst_data.h_ct.port_id[2]; 512 fcport->loop_id = loop_id; 513 514 /* Alloc SRB structure */ 515 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 516 if (!sp) { 517 ql_log(ql_log_warn, vha, 0x7015, 518 "qla2x00_get_sp failed.\n"); 519 rval = -ENOMEM; 520 goto done_free_fcport; 521 } 522 523 sp->type = SRB_CT_CMD; 524 sp->name = "bsg_ct"; 525 sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt); 526 sp->u.bsg_job = bsg_job; 527 sp->free = qla2x00_bsg_sp_free; 528 sp->done = qla2x00_bsg_job_done; 529 530 ql_dbg(ql_dbg_user, vha, 0x7016, 531 "bsg rqst type: %s else type: %x - " 532 "loop-id=%x portid=%02x%02x%02x.\n", type, 533 (bsg_request->rqst_data.h_ct.preamble_word2 >> 16), 534 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, 535 fcport->d_id.b.al_pa); 536 537 rval = qla2x00_start_sp(sp); 538 if (rval != QLA_SUCCESS) { 539 ql_log(ql_log_warn, vha, 0x7017, 540 "qla2x00_start_sp failed=%d.\n", rval); 541 qla2x00_rel_sp(sp); 542 rval = -EIO; 543 goto done_free_fcport; 544 } 545 return rval; 546 547 done_free_fcport: 548 kfree(fcport); 549 done_unmap_sg: 550 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 551 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 552 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 553 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 554 done: 555 return rval; 556 } 557 558 /* Disable loopback mode */ 559 static inline int 560 qla81xx_reset_loopback_mode(scsi_qla_host_t *vha, uint16_t *config, 561 int wait, int wait2) 562 { 563 int ret = 0; 564 int rval = 0; 565 uint16_t new_config[4]; 566 struct qla_hw_data *ha = vha->hw; 567 568 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha)) 569 goto done_reset_internal; 570 571 memset(new_config, 0 , sizeof(new_config)); 572 if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 == 573 ENABLE_INTERNAL_LOOPBACK || 574 (config[0] & INTERNAL_LOOPBACK_MASK) >> 1 == 575 ENABLE_EXTERNAL_LOOPBACK) { 576 new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK; 577 ql_dbg(ql_dbg_user, vha, 0x70bf, "new_config[0]=%02x\n", 578 (new_config[0] & INTERNAL_LOOPBACK_MASK)); 579 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ; 580 581 ha->notify_dcbx_comp = wait; 582 ha->notify_lb_portup_comp = wait2; 583 584 ret = qla81xx_set_port_config(vha, new_config); 585 if (ret != QLA_SUCCESS) { 586 ql_log(ql_log_warn, vha, 0x7025, 587 "Set port config failed.\n"); 588 ha->notify_dcbx_comp = 0; 589 ha->notify_lb_portup_comp = 0; 590 rval = -EINVAL; 591 goto done_reset_internal; 592 } 593 594 /* Wait for DCBX complete event */ 595 if (wait && !wait_for_completion_timeout(&ha->dcbx_comp, 596 (DCBX_COMP_TIMEOUT * HZ))) { 597 ql_dbg(ql_dbg_user, vha, 0x7026, 598 "DCBX completion not received.\n"); 599 ha->notify_dcbx_comp = 0; 600 ha->notify_lb_portup_comp = 0; 601 rval = -EINVAL; 602 goto done_reset_internal; 603 } else 604 ql_dbg(ql_dbg_user, vha, 0x7027, 605 "DCBX completion received.\n"); 606 607 if (wait2 && 608 !wait_for_completion_timeout(&ha->lb_portup_comp, 609 (LB_PORTUP_COMP_TIMEOUT * HZ))) { 610 ql_dbg(ql_dbg_user, vha, 0x70c5, 611 "Port up completion not received.\n"); 612 ha->notify_lb_portup_comp = 0; 613 rval = -EINVAL; 614 goto done_reset_internal; 615 } else 616 ql_dbg(ql_dbg_user, vha, 0x70c6, 617 "Port up completion received.\n"); 618 619 ha->notify_dcbx_comp = 0; 620 ha->notify_lb_portup_comp = 0; 621 } 622 done_reset_internal: 623 return rval; 624 } 625 626 /* 627 * Set the port configuration to enable the internal or external loopback 628 * depending on the loopback mode. 629 */ 630 static inline int 631 qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config, 632 uint16_t *new_config, uint16_t mode) 633 { 634 int ret = 0; 635 int rval = 0; 636 unsigned long rem_tmo = 0, current_tmo = 0; 637 struct qla_hw_data *ha = vha->hw; 638 639 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha)) 640 goto done_set_internal; 641 642 if (mode == INTERNAL_LOOPBACK) 643 new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1); 644 else if (mode == EXTERNAL_LOOPBACK) 645 new_config[0] = config[0] | (ENABLE_EXTERNAL_LOOPBACK << 1); 646 ql_dbg(ql_dbg_user, vha, 0x70be, 647 "new_config[0]=%02x\n", (new_config[0] & INTERNAL_LOOPBACK_MASK)); 648 649 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3); 650 651 ha->notify_dcbx_comp = 1; 652 ret = qla81xx_set_port_config(vha, new_config); 653 if (ret != QLA_SUCCESS) { 654 ql_log(ql_log_warn, vha, 0x7021, 655 "set port config failed.\n"); 656 ha->notify_dcbx_comp = 0; 657 rval = -EINVAL; 658 goto done_set_internal; 659 } 660 661 /* Wait for DCBX complete event */ 662 current_tmo = DCBX_COMP_TIMEOUT * HZ; 663 while (1) { 664 rem_tmo = wait_for_completion_timeout(&ha->dcbx_comp, 665 current_tmo); 666 if (!ha->idc_extend_tmo || rem_tmo) { 667 ha->idc_extend_tmo = 0; 668 break; 669 } 670 current_tmo = ha->idc_extend_tmo * HZ; 671 ha->idc_extend_tmo = 0; 672 } 673 674 if (!rem_tmo) { 675 ql_dbg(ql_dbg_user, vha, 0x7022, 676 "DCBX completion not received.\n"); 677 ret = qla81xx_reset_loopback_mode(vha, new_config, 0, 0); 678 /* 679 * If the reset of the loopback mode doesn't work take a FCoE 680 * dump and reset the chip. 681 */ 682 if (ret) { 683 ha->isp_ops->fw_dump(vha, 0); 684 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 685 } 686 rval = -EINVAL; 687 } else { 688 if (ha->flags.idc_compl_status) { 689 ql_dbg(ql_dbg_user, vha, 0x70c3, 690 "Bad status in IDC Completion AEN\n"); 691 rval = -EINVAL; 692 ha->flags.idc_compl_status = 0; 693 } else 694 ql_dbg(ql_dbg_user, vha, 0x7023, 695 "DCBX completion received.\n"); 696 } 697 698 ha->notify_dcbx_comp = 0; 699 ha->idc_extend_tmo = 0; 700 701 done_set_internal: 702 return rval; 703 } 704 705 static int 706 qla2x00_process_loopback(struct bsg_job *bsg_job) 707 { 708 struct fc_bsg_request *bsg_request = bsg_job->request; 709 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 710 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 711 scsi_qla_host_t *vha = shost_priv(host); 712 struct qla_hw_data *ha = vha->hw; 713 int rval; 714 uint8_t command_sent; 715 char *type; 716 struct msg_echo_lb elreq; 717 uint16_t response[MAILBOX_REGISTER_COUNT]; 718 uint16_t config[4], new_config[4]; 719 uint8_t *fw_sts_ptr; 720 uint8_t *req_data = NULL; 721 dma_addr_t req_data_dma; 722 uint32_t req_data_len; 723 uint8_t *rsp_data = NULL; 724 dma_addr_t rsp_data_dma; 725 uint32_t rsp_data_len; 726 727 if (!vha->flags.online) { 728 ql_log(ql_log_warn, vha, 0x7019, "Host is not online.\n"); 729 return -EIO; 730 } 731 732 memset(&elreq, 0, sizeof(elreq)); 733 734 elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev, 735 bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, 736 DMA_TO_DEVICE); 737 738 if (!elreq.req_sg_cnt) { 739 ql_log(ql_log_warn, vha, 0x701a, 740 "dma_map_sg returned %d for request.\n", elreq.req_sg_cnt); 741 return -ENOMEM; 742 } 743 744 elreq.rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, 745 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, 746 DMA_FROM_DEVICE); 747 748 if (!elreq.rsp_sg_cnt) { 749 ql_log(ql_log_warn, vha, 0x701b, 750 "dma_map_sg returned %d for reply.\n", elreq.rsp_sg_cnt); 751 rval = -ENOMEM; 752 goto done_unmap_req_sg; 753 } 754 755 if ((elreq.req_sg_cnt != bsg_job->request_payload.sg_cnt) || 756 (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) { 757 ql_log(ql_log_warn, vha, 0x701c, 758 "dma mapping resulted in different sg counts, " 759 "request_sg_cnt: %x dma_request_sg_cnt: %x " 760 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n", 761 bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt, 762 bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt); 763 rval = -EAGAIN; 764 goto done_unmap_sg; 765 } 766 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len; 767 req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len, 768 &req_data_dma, GFP_KERNEL); 769 if (!req_data) { 770 ql_log(ql_log_warn, vha, 0x701d, 771 "dma alloc failed for req_data.\n"); 772 rval = -ENOMEM; 773 goto done_unmap_sg; 774 } 775 776 rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len, 777 &rsp_data_dma, GFP_KERNEL); 778 if (!rsp_data) { 779 ql_log(ql_log_warn, vha, 0x7004, 780 "dma alloc failed for rsp_data.\n"); 781 rval = -ENOMEM; 782 goto done_free_dma_req; 783 } 784 785 /* Copy the request buffer in req_data now */ 786 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 787 bsg_job->request_payload.sg_cnt, req_data, req_data_len); 788 789 elreq.send_dma = req_data_dma; 790 elreq.rcv_dma = rsp_data_dma; 791 elreq.transfer_size = req_data_len; 792 793 elreq.options = bsg_request->rqst_data.h_vendor.vendor_cmd[1]; 794 elreq.iteration_count = 795 bsg_request->rqst_data.h_vendor.vendor_cmd[2]; 796 797 if (atomic_read(&vha->loop_state) == LOOP_READY && 798 (ha->current_topology == ISP_CFG_F || 799 (le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE && 800 req_data_len == MAX_ELS_FRAME_PAYLOAD)) && 801 elreq.options == EXTERNAL_LOOPBACK) { 802 type = "FC_BSG_HST_VENDOR_ECHO_DIAG"; 803 ql_dbg(ql_dbg_user, vha, 0x701e, 804 "BSG request type: %s.\n", type); 805 command_sent = INT_DEF_LB_ECHO_CMD; 806 rval = qla2x00_echo_test(vha, &elreq, response); 807 } else { 808 if (IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) { 809 memset(config, 0, sizeof(config)); 810 memset(new_config, 0, sizeof(new_config)); 811 812 if (qla81xx_get_port_config(vha, config)) { 813 ql_log(ql_log_warn, vha, 0x701f, 814 "Get port config failed.\n"); 815 rval = -EPERM; 816 goto done_free_dma_rsp; 817 } 818 819 if ((config[0] & INTERNAL_LOOPBACK_MASK) != 0) { 820 ql_dbg(ql_dbg_user, vha, 0x70c4, 821 "Loopback operation already in " 822 "progress.\n"); 823 rval = -EAGAIN; 824 goto done_free_dma_rsp; 825 } 826 827 ql_dbg(ql_dbg_user, vha, 0x70c0, 828 "elreq.options=%04x\n", elreq.options); 829 830 if (elreq.options == EXTERNAL_LOOPBACK) 831 if (IS_QLA8031(ha) || IS_QLA8044(ha)) 832 rval = qla81xx_set_loopback_mode(vha, 833 config, new_config, elreq.options); 834 else 835 rval = qla81xx_reset_loopback_mode(vha, 836 config, 1, 0); 837 else 838 rval = qla81xx_set_loopback_mode(vha, config, 839 new_config, elreq.options); 840 841 if (rval) { 842 rval = -EPERM; 843 goto done_free_dma_rsp; 844 } 845 846 type = "FC_BSG_HST_VENDOR_LOOPBACK"; 847 ql_dbg(ql_dbg_user, vha, 0x7028, 848 "BSG request type: %s.\n", type); 849 850 command_sent = INT_DEF_LB_LOOPBACK_CMD; 851 rval = qla2x00_loopback_test(vha, &elreq, response); 852 853 if (response[0] == MBS_COMMAND_ERROR && 854 response[1] == MBS_LB_RESET) { 855 ql_log(ql_log_warn, vha, 0x7029, 856 "MBX command error, Aborting ISP.\n"); 857 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 858 qla2xxx_wake_dpc(vha); 859 qla2x00_wait_for_chip_reset(vha); 860 /* Also reset the MPI */ 861 if (IS_QLA81XX(ha)) { 862 if (qla81xx_restart_mpi_firmware(vha) != 863 QLA_SUCCESS) { 864 ql_log(ql_log_warn, vha, 0x702a, 865 "MPI reset failed.\n"); 866 } 867 } 868 869 rval = -EIO; 870 goto done_free_dma_rsp; 871 } 872 873 if (new_config[0]) { 874 int ret; 875 876 /* Revert back to original port config 877 * Also clear internal loopback 878 */ 879 ret = qla81xx_reset_loopback_mode(vha, 880 new_config, 0, 1); 881 if (ret) { 882 /* 883 * If the reset of the loopback mode 884 * doesn't work take FCoE dump and then 885 * reset the chip. 886 */ 887 ha->isp_ops->fw_dump(vha, 0); 888 set_bit(ISP_ABORT_NEEDED, 889 &vha->dpc_flags); 890 } 891 892 } 893 894 } else { 895 type = "FC_BSG_HST_VENDOR_LOOPBACK"; 896 ql_dbg(ql_dbg_user, vha, 0x702b, 897 "BSG request type: %s.\n", type); 898 command_sent = INT_DEF_LB_LOOPBACK_CMD; 899 rval = qla2x00_loopback_test(vha, &elreq, response); 900 } 901 } 902 903 if (rval) { 904 ql_log(ql_log_warn, vha, 0x702c, 905 "Vendor request %s failed.\n", type); 906 907 rval = 0; 908 bsg_reply->result = (DID_ERROR << 16); 909 bsg_reply->reply_payload_rcv_len = 0; 910 } else { 911 ql_dbg(ql_dbg_user, vha, 0x702d, 912 "Vendor request %s completed.\n", type); 913 bsg_reply->result = (DID_OK << 16); 914 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 915 bsg_job->reply_payload.sg_cnt, rsp_data, 916 rsp_data_len); 917 } 918 919 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + 920 sizeof(response) + sizeof(uint8_t); 921 fw_sts_ptr = bsg_job->reply + sizeof(struct fc_bsg_reply); 922 memcpy(bsg_job->reply + sizeof(struct fc_bsg_reply), response, 923 sizeof(response)); 924 fw_sts_ptr += sizeof(response); 925 *fw_sts_ptr = command_sent; 926 927 done_free_dma_rsp: 928 dma_free_coherent(&ha->pdev->dev, rsp_data_len, 929 rsp_data, rsp_data_dma); 930 done_free_dma_req: 931 dma_free_coherent(&ha->pdev->dev, req_data_len, 932 req_data, req_data_dma); 933 done_unmap_sg: 934 dma_unmap_sg(&ha->pdev->dev, 935 bsg_job->reply_payload.sg_list, 936 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 937 done_unmap_req_sg: 938 dma_unmap_sg(&ha->pdev->dev, 939 bsg_job->request_payload.sg_list, 940 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 941 if (!rval) 942 bsg_job_done(bsg_job, bsg_reply->result, 943 bsg_reply->reply_payload_rcv_len); 944 return rval; 945 } 946 947 static int 948 qla84xx_reset(struct bsg_job *bsg_job) 949 { 950 struct fc_bsg_request *bsg_request = bsg_job->request; 951 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 952 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 953 scsi_qla_host_t *vha = shost_priv(host); 954 struct qla_hw_data *ha = vha->hw; 955 int rval = 0; 956 uint32_t flag; 957 958 if (!IS_QLA84XX(ha)) { 959 ql_dbg(ql_dbg_user, vha, 0x702f, "Not 84xx, exiting.\n"); 960 return -EINVAL; 961 } 962 963 flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1]; 964 965 rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW); 966 967 if (rval) { 968 ql_log(ql_log_warn, vha, 0x7030, 969 "Vendor request 84xx reset failed.\n"); 970 rval = (DID_ERROR << 16); 971 972 } else { 973 ql_dbg(ql_dbg_user, vha, 0x7031, 974 "Vendor request 84xx reset completed.\n"); 975 bsg_reply->result = DID_OK; 976 bsg_job_done(bsg_job, bsg_reply->result, 977 bsg_reply->reply_payload_rcv_len); 978 } 979 980 return rval; 981 } 982 983 static int 984 qla84xx_updatefw(struct bsg_job *bsg_job) 985 { 986 struct fc_bsg_request *bsg_request = bsg_job->request; 987 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 988 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 989 scsi_qla_host_t *vha = shost_priv(host); 990 struct qla_hw_data *ha = vha->hw; 991 struct verify_chip_entry_84xx *mn = NULL; 992 dma_addr_t mn_dma, fw_dma; 993 void *fw_buf = NULL; 994 int rval = 0; 995 uint32_t sg_cnt; 996 uint32_t data_len; 997 uint16_t options; 998 uint32_t flag; 999 uint32_t fw_ver; 1000 1001 if (!IS_QLA84XX(ha)) { 1002 ql_dbg(ql_dbg_user, vha, 0x7032, 1003 "Not 84xx, exiting.\n"); 1004 return -EINVAL; 1005 } 1006 1007 sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 1008 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 1009 if (!sg_cnt) { 1010 ql_log(ql_log_warn, vha, 0x7033, 1011 "dma_map_sg returned %d for request.\n", sg_cnt); 1012 return -ENOMEM; 1013 } 1014 1015 if (sg_cnt != bsg_job->request_payload.sg_cnt) { 1016 ql_log(ql_log_warn, vha, 0x7034, 1017 "DMA mapping resulted in different sg counts, " 1018 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n", 1019 bsg_job->request_payload.sg_cnt, sg_cnt); 1020 rval = -EAGAIN; 1021 goto done_unmap_sg; 1022 } 1023 1024 data_len = bsg_job->request_payload.payload_len; 1025 fw_buf = dma_alloc_coherent(&ha->pdev->dev, data_len, 1026 &fw_dma, GFP_KERNEL); 1027 if (!fw_buf) { 1028 ql_log(ql_log_warn, vha, 0x7035, 1029 "DMA alloc failed for fw_buf.\n"); 1030 rval = -ENOMEM; 1031 goto done_unmap_sg; 1032 } 1033 1034 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1035 bsg_job->request_payload.sg_cnt, fw_buf, data_len); 1036 1037 mn = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma); 1038 if (!mn) { 1039 ql_log(ql_log_warn, vha, 0x7036, 1040 "DMA alloc failed for fw buffer.\n"); 1041 rval = -ENOMEM; 1042 goto done_free_fw_buf; 1043 } 1044 1045 flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1]; 1046 fw_ver = get_unaligned_le32((uint32_t *)fw_buf + 2); 1047 1048 mn->entry_type = VERIFY_CHIP_IOCB_TYPE; 1049 mn->entry_count = 1; 1050 1051 options = VCO_FORCE_UPDATE | VCO_END_OF_DATA; 1052 if (flag == A84_ISSUE_UPDATE_DIAGFW_CMD) 1053 options |= VCO_DIAG_FW; 1054 1055 mn->options = cpu_to_le16(options); 1056 mn->fw_ver = cpu_to_le32(fw_ver); 1057 mn->fw_size = cpu_to_le32(data_len); 1058 mn->fw_seq_size = cpu_to_le32(data_len); 1059 put_unaligned_le64(fw_dma, &mn->dsd.address); 1060 mn->dsd.length = cpu_to_le32(data_len); 1061 mn->data_seg_cnt = cpu_to_le16(1); 1062 1063 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120); 1064 1065 if (rval) { 1066 ql_log(ql_log_warn, vha, 0x7037, 1067 "Vendor request 84xx updatefw failed.\n"); 1068 1069 rval = (DID_ERROR << 16); 1070 } else { 1071 ql_dbg(ql_dbg_user, vha, 0x7038, 1072 "Vendor request 84xx updatefw completed.\n"); 1073 1074 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1075 bsg_reply->result = DID_OK; 1076 } 1077 1078 dma_pool_free(ha->s_dma_pool, mn, mn_dma); 1079 1080 done_free_fw_buf: 1081 dma_free_coherent(&ha->pdev->dev, data_len, fw_buf, fw_dma); 1082 1083 done_unmap_sg: 1084 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 1085 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 1086 1087 if (!rval) 1088 bsg_job_done(bsg_job, bsg_reply->result, 1089 bsg_reply->reply_payload_rcv_len); 1090 return rval; 1091 } 1092 1093 static int 1094 qla84xx_mgmt_cmd(struct bsg_job *bsg_job) 1095 { 1096 struct fc_bsg_request *bsg_request = bsg_job->request; 1097 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1098 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1099 scsi_qla_host_t *vha = shost_priv(host); 1100 struct qla_hw_data *ha = vha->hw; 1101 struct access_chip_84xx *mn = NULL; 1102 dma_addr_t mn_dma, mgmt_dma; 1103 void *mgmt_b = NULL; 1104 int rval = 0; 1105 struct qla_bsg_a84_mgmt *ql84_mgmt; 1106 uint32_t sg_cnt; 1107 uint32_t data_len = 0; 1108 uint32_t dma_direction = DMA_NONE; 1109 1110 if (!IS_QLA84XX(ha)) { 1111 ql_log(ql_log_warn, vha, 0x703a, 1112 "Not 84xx, exiting.\n"); 1113 return -EINVAL; 1114 } 1115 1116 mn = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma); 1117 if (!mn) { 1118 ql_log(ql_log_warn, vha, 0x703c, 1119 "DMA alloc failed for fw buffer.\n"); 1120 return -ENOMEM; 1121 } 1122 1123 mn->entry_type = ACCESS_CHIP_IOCB_TYPE; 1124 mn->entry_count = 1; 1125 ql84_mgmt = (void *)bsg_request + sizeof(struct fc_bsg_request); 1126 switch (ql84_mgmt->mgmt.cmd) { 1127 case QLA84_MGMT_READ_MEM: 1128 case QLA84_MGMT_GET_INFO: 1129 sg_cnt = dma_map_sg(&ha->pdev->dev, 1130 bsg_job->reply_payload.sg_list, 1131 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 1132 if (!sg_cnt) { 1133 ql_log(ql_log_warn, vha, 0x703d, 1134 "dma_map_sg returned %d for reply.\n", sg_cnt); 1135 rval = -ENOMEM; 1136 goto exit_mgmt; 1137 } 1138 1139 dma_direction = DMA_FROM_DEVICE; 1140 1141 if (sg_cnt != bsg_job->reply_payload.sg_cnt) { 1142 ql_log(ql_log_warn, vha, 0x703e, 1143 "DMA mapping resulted in different sg counts, " 1144 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n", 1145 bsg_job->reply_payload.sg_cnt, sg_cnt); 1146 rval = -EAGAIN; 1147 goto done_unmap_sg; 1148 } 1149 1150 data_len = bsg_job->reply_payload.payload_len; 1151 1152 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len, 1153 &mgmt_dma, GFP_KERNEL); 1154 if (!mgmt_b) { 1155 ql_log(ql_log_warn, vha, 0x703f, 1156 "DMA alloc failed for mgmt_b.\n"); 1157 rval = -ENOMEM; 1158 goto done_unmap_sg; 1159 } 1160 1161 if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) { 1162 mn->options = cpu_to_le16(ACO_DUMP_MEMORY); 1163 mn->parameter1 = 1164 cpu_to_le32( 1165 ql84_mgmt->mgmt.mgmtp.u.mem.start_addr); 1166 1167 } else if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO) { 1168 mn->options = cpu_to_le16(ACO_REQUEST_INFO); 1169 mn->parameter1 = 1170 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.info.type); 1171 1172 mn->parameter2 = 1173 cpu_to_le32( 1174 ql84_mgmt->mgmt.mgmtp.u.info.context); 1175 } 1176 break; 1177 1178 case QLA84_MGMT_WRITE_MEM: 1179 sg_cnt = dma_map_sg(&ha->pdev->dev, 1180 bsg_job->request_payload.sg_list, 1181 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 1182 1183 if (!sg_cnt) { 1184 ql_log(ql_log_warn, vha, 0x7040, 1185 "dma_map_sg returned %d.\n", sg_cnt); 1186 rval = -ENOMEM; 1187 goto exit_mgmt; 1188 } 1189 1190 dma_direction = DMA_TO_DEVICE; 1191 1192 if (sg_cnt != bsg_job->request_payload.sg_cnt) { 1193 ql_log(ql_log_warn, vha, 0x7041, 1194 "DMA mapping resulted in different sg counts, " 1195 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n", 1196 bsg_job->request_payload.sg_cnt, sg_cnt); 1197 rval = -EAGAIN; 1198 goto done_unmap_sg; 1199 } 1200 1201 data_len = bsg_job->request_payload.payload_len; 1202 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len, 1203 &mgmt_dma, GFP_KERNEL); 1204 if (!mgmt_b) { 1205 ql_log(ql_log_warn, vha, 0x7042, 1206 "DMA alloc failed for mgmt_b.\n"); 1207 rval = -ENOMEM; 1208 goto done_unmap_sg; 1209 } 1210 1211 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1212 bsg_job->request_payload.sg_cnt, mgmt_b, data_len); 1213 1214 mn->options = cpu_to_le16(ACO_LOAD_MEMORY); 1215 mn->parameter1 = 1216 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.mem.start_addr); 1217 break; 1218 1219 case QLA84_MGMT_CHNG_CONFIG: 1220 mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM); 1221 mn->parameter1 = 1222 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.id); 1223 1224 mn->parameter2 = 1225 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param0); 1226 1227 mn->parameter3 = 1228 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param1); 1229 break; 1230 1231 default: 1232 rval = -EIO; 1233 goto exit_mgmt; 1234 } 1235 1236 if (ql84_mgmt->mgmt.cmd != QLA84_MGMT_CHNG_CONFIG) { 1237 mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->mgmt.len); 1238 mn->dseg_count = cpu_to_le16(1); 1239 put_unaligned_le64(mgmt_dma, &mn->dsd.address); 1240 mn->dsd.length = cpu_to_le32(ql84_mgmt->mgmt.len); 1241 } 1242 1243 rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0); 1244 1245 if (rval) { 1246 ql_log(ql_log_warn, vha, 0x7043, 1247 "Vendor request 84xx mgmt failed.\n"); 1248 1249 rval = (DID_ERROR << 16); 1250 1251 } else { 1252 ql_dbg(ql_dbg_user, vha, 0x7044, 1253 "Vendor request 84xx mgmt completed.\n"); 1254 1255 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1256 bsg_reply->result = DID_OK; 1257 1258 if ((ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) || 1259 (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO)) { 1260 bsg_reply->reply_payload_rcv_len = 1261 bsg_job->reply_payload.payload_len; 1262 1263 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 1264 bsg_job->reply_payload.sg_cnt, mgmt_b, 1265 data_len); 1266 } 1267 } 1268 1269 done_unmap_sg: 1270 if (mgmt_b) 1271 dma_free_coherent(&ha->pdev->dev, data_len, mgmt_b, mgmt_dma); 1272 1273 if (dma_direction == DMA_TO_DEVICE) 1274 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 1275 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 1276 else if (dma_direction == DMA_FROM_DEVICE) 1277 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 1278 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 1279 1280 exit_mgmt: 1281 dma_pool_free(ha->s_dma_pool, mn, mn_dma); 1282 1283 if (!rval) 1284 bsg_job_done(bsg_job, bsg_reply->result, 1285 bsg_reply->reply_payload_rcv_len); 1286 return rval; 1287 } 1288 1289 static int 1290 qla24xx_iidma(struct bsg_job *bsg_job) 1291 { 1292 struct fc_bsg_request *bsg_request = bsg_job->request; 1293 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1294 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1295 scsi_qla_host_t *vha = shost_priv(host); 1296 int rval = 0; 1297 struct qla_port_param *port_param = NULL; 1298 fc_port_t *fcport = NULL; 1299 int found = 0; 1300 uint16_t mb[MAILBOX_REGISTER_COUNT]; 1301 uint8_t *rsp_ptr = NULL; 1302 1303 if (!IS_IIDMA_CAPABLE(vha->hw)) { 1304 ql_log(ql_log_info, vha, 0x7046, "iiDMA not supported.\n"); 1305 return -EINVAL; 1306 } 1307 1308 port_param = (void *)bsg_request + sizeof(struct fc_bsg_request); 1309 if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) { 1310 ql_log(ql_log_warn, vha, 0x7048, 1311 "Invalid destination type.\n"); 1312 return -EINVAL; 1313 } 1314 1315 list_for_each_entry(fcport, &vha->vp_fcports, list) { 1316 if (fcport->port_type != FCT_TARGET) 1317 continue; 1318 1319 if (memcmp(port_param->fc_scsi_addr.dest_addr.wwpn, 1320 fcport->port_name, sizeof(fcport->port_name))) 1321 continue; 1322 1323 found = 1; 1324 break; 1325 } 1326 1327 if (!found) { 1328 ql_log(ql_log_warn, vha, 0x7049, 1329 "Failed to find port.\n"); 1330 return -EINVAL; 1331 } 1332 1333 if (atomic_read(&fcport->state) != FCS_ONLINE) { 1334 ql_log(ql_log_warn, vha, 0x704a, 1335 "Port is not online.\n"); 1336 return -EINVAL; 1337 } 1338 1339 if (fcport->flags & FCF_LOGIN_NEEDED) { 1340 ql_log(ql_log_warn, vha, 0x704b, 1341 "Remote port not logged in flags = 0x%x.\n", fcport->flags); 1342 return -EINVAL; 1343 } 1344 1345 if (port_param->mode) 1346 rval = qla2x00_set_idma_speed(vha, fcport->loop_id, 1347 port_param->speed, mb); 1348 else 1349 rval = qla2x00_get_idma_speed(vha, fcport->loop_id, 1350 &port_param->speed, mb); 1351 1352 if (rval) { 1353 ql_log(ql_log_warn, vha, 0x704c, 1354 "iiDMA cmd failed for %8phN -- " 1355 "%04x %x %04x %04x.\n", fcport->port_name, 1356 rval, fcport->fp_speed, mb[0], mb[1]); 1357 rval = (DID_ERROR << 16); 1358 } else { 1359 if (!port_param->mode) { 1360 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + 1361 sizeof(struct qla_port_param); 1362 1363 rsp_ptr = ((uint8_t *)bsg_reply) + 1364 sizeof(struct fc_bsg_reply); 1365 1366 memcpy(rsp_ptr, port_param, 1367 sizeof(struct qla_port_param)); 1368 } 1369 1370 bsg_reply->result = DID_OK; 1371 bsg_job_done(bsg_job, bsg_reply->result, 1372 bsg_reply->reply_payload_rcv_len); 1373 } 1374 1375 return rval; 1376 } 1377 1378 static int 1379 qla2x00_optrom_setup(struct bsg_job *bsg_job, scsi_qla_host_t *vha, 1380 uint8_t is_update) 1381 { 1382 struct fc_bsg_request *bsg_request = bsg_job->request; 1383 uint32_t start = 0; 1384 int valid = 0; 1385 struct qla_hw_data *ha = vha->hw; 1386 1387 if (unlikely(pci_channel_offline(ha->pdev))) 1388 return -EINVAL; 1389 1390 start = bsg_request->rqst_data.h_vendor.vendor_cmd[1]; 1391 if (start > ha->optrom_size) { 1392 ql_log(ql_log_warn, vha, 0x7055, 1393 "start %d > optrom_size %d.\n", start, ha->optrom_size); 1394 return -EINVAL; 1395 } 1396 1397 if (ha->optrom_state != QLA_SWAITING) { 1398 ql_log(ql_log_info, vha, 0x7056, 1399 "optrom_state %d.\n", ha->optrom_state); 1400 return -EBUSY; 1401 } 1402 1403 ha->optrom_region_start = start; 1404 ql_dbg(ql_dbg_user, vha, 0x7057, "is_update=%d.\n", is_update); 1405 if (is_update) { 1406 if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0) 1407 valid = 1; 1408 else if (start == (ha->flt_region_boot * 4) || 1409 start == (ha->flt_region_fw * 4)) 1410 valid = 1; 1411 else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || 1412 IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) || 1413 IS_QLA28XX(ha)) 1414 valid = 1; 1415 if (!valid) { 1416 ql_log(ql_log_warn, vha, 0x7058, 1417 "Invalid start region 0x%x/0x%x.\n", start, 1418 bsg_job->request_payload.payload_len); 1419 return -EINVAL; 1420 } 1421 1422 ha->optrom_region_size = start + 1423 bsg_job->request_payload.payload_len > ha->optrom_size ? 1424 ha->optrom_size - start : 1425 bsg_job->request_payload.payload_len; 1426 ha->optrom_state = QLA_SWRITING; 1427 } else { 1428 ha->optrom_region_size = start + 1429 bsg_job->reply_payload.payload_len > ha->optrom_size ? 1430 ha->optrom_size - start : 1431 bsg_job->reply_payload.payload_len; 1432 ha->optrom_state = QLA_SREADING; 1433 } 1434 1435 ha->optrom_buffer = vzalloc(ha->optrom_region_size); 1436 if (!ha->optrom_buffer) { 1437 ql_log(ql_log_warn, vha, 0x7059, 1438 "Read: Unable to allocate memory for optrom retrieval " 1439 "(%x)\n", ha->optrom_region_size); 1440 1441 ha->optrom_state = QLA_SWAITING; 1442 return -ENOMEM; 1443 } 1444 1445 return 0; 1446 } 1447 1448 static int 1449 qla2x00_read_optrom(struct bsg_job *bsg_job) 1450 { 1451 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1452 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1453 scsi_qla_host_t *vha = shost_priv(host); 1454 struct qla_hw_data *ha = vha->hw; 1455 int rval = 0; 1456 1457 if (ha->flags.nic_core_reset_hdlr_active) 1458 return -EBUSY; 1459 1460 mutex_lock(&ha->optrom_mutex); 1461 rval = qla2x00_optrom_setup(bsg_job, vha, 0); 1462 if (rval) { 1463 mutex_unlock(&ha->optrom_mutex); 1464 return rval; 1465 } 1466 1467 ha->isp_ops->read_optrom(vha, ha->optrom_buffer, 1468 ha->optrom_region_start, ha->optrom_region_size); 1469 1470 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 1471 bsg_job->reply_payload.sg_cnt, ha->optrom_buffer, 1472 ha->optrom_region_size); 1473 1474 bsg_reply->reply_payload_rcv_len = ha->optrom_region_size; 1475 bsg_reply->result = DID_OK; 1476 vfree(ha->optrom_buffer); 1477 ha->optrom_buffer = NULL; 1478 ha->optrom_state = QLA_SWAITING; 1479 mutex_unlock(&ha->optrom_mutex); 1480 bsg_job_done(bsg_job, bsg_reply->result, 1481 bsg_reply->reply_payload_rcv_len); 1482 return rval; 1483 } 1484 1485 static int 1486 qla2x00_update_optrom(struct bsg_job *bsg_job) 1487 { 1488 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1489 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1490 scsi_qla_host_t *vha = shost_priv(host); 1491 struct qla_hw_data *ha = vha->hw; 1492 int rval = 0; 1493 1494 mutex_lock(&ha->optrom_mutex); 1495 rval = qla2x00_optrom_setup(bsg_job, vha, 1); 1496 if (rval) { 1497 mutex_unlock(&ha->optrom_mutex); 1498 return rval; 1499 } 1500 1501 /* Set the isp82xx_no_md_cap not to capture minidump */ 1502 ha->flags.isp82xx_no_md_cap = 1; 1503 1504 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1505 bsg_job->request_payload.sg_cnt, ha->optrom_buffer, 1506 ha->optrom_region_size); 1507 1508 ha->isp_ops->write_optrom(vha, ha->optrom_buffer, 1509 ha->optrom_region_start, ha->optrom_region_size); 1510 1511 bsg_reply->result = DID_OK; 1512 vfree(ha->optrom_buffer); 1513 ha->optrom_buffer = NULL; 1514 ha->optrom_state = QLA_SWAITING; 1515 mutex_unlock(&ha->optrom_mutex); 1516 bsg_job_done(bsg_job, bsg_reply->result, 1517 bsg_reply->reply_payload_rcv_len); 1518 return rval; 1519 } 1520 1521 static int 1522 qla2x00_update_fru_versions(struct bsg_job *bsg_job) 1523 { 1524 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1525 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1526 scsi_qla_host_t *vha = shost_priv(host); 1527 struct qla_hw_data *ha = vha->hw; 1528 int rval = 0; 1529 uint8_t bsg[DMA_POOL_SIZE]; 1530 struct qla_image_version_list *list = (void *)bsg; 1531 struct qla_image_version *image; 1532 uint32_t count; 1533 dma_addr_t sfp_dma; 1534 void *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); 1535 1536 if (!sfp) { 1537 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 1538 EXT_STATUS_NO_MEMORY; 1539 goto done; 1540 } 1541 1542 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1543 bsg_job->request_payload.sg_cnt, list, sizeof(bsg)); 1544 1545 image = list->version; 1546 count = list->count; 1547 while (count--) { 1548 memcpy(sfp, &image->field_info, sizeof(image->field_info)); 1549 rval = qla2x00_write_sfp(vha, sfp_dma, sfp, 1550 image->field_address.device, image->field_address.offset, 1551 sizeof(image->field_info), image->field_address.option); 1552 if (rval) { 1553 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 1554 EXT_STATUS_MAILBOX; 1555 goto dealloc; 1556 } 1557 image++; 1558 } 1559 1560 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0; 1561 1562 dealloc: 1563 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); 1564 1565 done: 1566 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1567 bsg_reply->result = DID_OK << 16; 1568 bsg_job_done(bsg_job, bsg_reply->result, 1569 bsg_reply->reply_payload_rcv_len); 1570 1571 return 0; 1572 } 1573 1574 static int 1575 qla2x00_read_fru_status(struct bsg_job *bsg_job) 1576 { 1577 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1578 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1579 scsi_qla_host_t *vha = shost_priv(host); 1580 struct qla_hw_data *ha = vha->hw; 1581 int rval = 0; 1582 uint8_t bsg[DMA_POOL_SIZE]; 1583 struct qla_status_reg *sr = (void *)bsg; 1584 dma_addr_t sfp_dma; 1585 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); 1586 1587 if (!sfp) { 1588 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 1589 EXT_STATUS_NO_MEMORY; 1590 goto done; 1591 } 1592 1593 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1594 bsg_job->request_payload.sg_cnt, sr, sizeof(*sr)); 1595 1596 rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 1597 sr->field_address.device, sr->field_address.offset, 1598 sizeof(sr->status_reg), sr->field_address.option); 1599 sr->status_reg = *sfp; 1600 1601 if (rval) { 1602 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 1603 EXT_STATUS_MAILBOX; 1604 goto dealloc; 1605 } 1606 1607 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 1608 bsg_job->reply_payload.sg_cnt, sr, sizeof(*sr)); 1609 1610 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0; 1611 1612 dealloc: 1613 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); 1614 1615 done: 1616 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1617 bsg_reply->reply_payload_rcv_len = sizeof(*sr); 1618 bsg_reply->result = DID_OK << 16; 1619 bsg_job_done(bsg_job, bsg_reply->result, 1620 bsg_reply->reply_payload_rcv_len); 1621 1622 return 0; 1623 } 1624 1625 static int 1626 qla2x00_write_fru_status(struct bsg_job *bsg_job) 1627 { 1628 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1629 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1630 scsi_qla_host_t *vha = shost_priv(host); 1631 struct qla_hw_data *ha = vha->hw; 1632 int rval = 0; 1633 uint8_t bsg[DMA_POOL_SIZE]; 1634 struct qla_status_reg *sr = (void *)bsg; 1635 dma_addr_t sfp_dma; 1636 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); 1637 1638 if (!sfp) { 1639 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 1640 EXT_STATUS_NO_MEMORY; 1641 goto done; 1642 } 1643 1644 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1645 bsg_job->request_payload.sg_cnt, sr, sizeof(*sr)); 1646 1647 *sfp = sr->status_reg; 1648 rval = qla2x00_write_sfp(vha, sfp_dma, sfp, 1649 sr->field_address.device, sr->field_address.offset, 1650 sizeof(sr->status_reg), sr->field_address.option); 1651 1652 if (rval) { 1653 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 1654 EXT_STATUS_MAILBOX; 1655 goto dealloc; 1656 } 1657 1658 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0; 1659 1660 dealloc: 1661 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); 1662 1663 done: 1664 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1665 bsg_reply->result = DID_OK << 16; 1666 bsg_job_done(bsg_job, bsg_reply->result, 1667 bsg_reply->reply_payload_rcv_len); 1668 1669 return 0; 1670 } 1671 1672 static int 1673 qla2x00_write_i2c(struct bsg_job *bsg_job) 1674 { 1675 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1676 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1677 scsi_qla_host_t *vha = shost_priv(host); 1678 struct qla_hw_data *ha = vha->hw; 1679 int rval = 0; 1680 uint8_t bsg[DMA_POOL_SIZE]; 1681 struct qla_i2c_access *i2c = (void *)bsg; 1682 dma_addr_t sfp_dma; 1683 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); 1684 1685 if (!sfp) { 1686 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 1687 EXT_STATUS_NO_MEMORY; 1688 goto done; 1689 } 1690 1691 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1692 bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c)); 1693 1694 memcpy(sfp, i2c->buffer, i2c->length); 1695 rval = qla2x00_write_sfp(vha, sfp_dma, sfp, 1696 i2c->device, i2c->offset, i2c->length, i2c->option); 1697 1698 if (rval) { 1699 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 1700 EXT_STATUS_MAILBOX; 1701 goto dealloc; 1702 } 1703 1704 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0; 1705 1706 dealloc: 1707 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); 1708 1709 done: 1710 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1711 bsg_reply->result = DID_OK << 16; 1712 bsg_job_done(bsg_job, bsg_reply->result, 1713 bsg_reply->reply_payload_rcv_len); 1714 1715 return 0; 1716 } 1717 1718 static int 1719 qla2x00_read_i2c(struct bsg_job *bsg_job) 1720 { 1721 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1722 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1723 scsi_qla_host_t *vha = shost_priv(host); 1724 struct qla_hw_data *ha = vha->hw; 1725 int rval = 0; 1726 uint8_t bsg[DMA_POOL_SIZE]; 1727 struct qla_i2c_access *i2c = (void *)bsg; 1728 dma_addr_t sfp_dma; 1729 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); 1730 1731 if (!sfp) { 1732 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 1733 EXT_STATUS_NO_MEMORY; 1734 goto done; 1735 } 1736 1737 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1738 bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c)); 1739 1740 rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 1741 i2c->device, i2c->offset, i2c->length, i2c->option); 1742 1743 if (rval) { 1744 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 1745 EXT_STATUS_MAILBOX; 1746 goto dealloc; 1747 } 1748 1749 memcpy(i2c->buffer, sfp, i2c->length); 1750 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 1751 bsg_job->reply_payload.sg_cnt, i2c, sizeof(*i2c)); 1752 1753 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0; 1754 1755 dealloc: 1756 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); 1757 1758 done: 1759 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1760 bsg_reply->reply_payload_rcv_len = sizeof(*i2c); 1761 bsg_reply->result = DID_OK << 16; 1762 bsg_job_done(bsg_job, bsg_reply->result, 1763 bsg_reply->reply_payload_rcv_len); 1764 1765 return 0; 1766 } 1767 1768 static int 1769 qla24xx_process_bidir_cmd(struct bsg_job *bsg_job) 1770 { 1771 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1772 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1773 scsi_qla_host_t *vha = shost_priv(host); 1774 struct qla_hw_data *ha = vha->hw; 1775 uint32_t rval = EXT_STATUS_OK; 1776 uint16_t req_sg_cnt = 0; 1777 uint16_t rsp_sg_cnt = 0; 1778 uint16_t nextlid = 0; 1779 uint32_t tot_dsds; 1780 srb_t *sp = NULL; 1781 uint32_t req_data_len = 0; 1782 uint32_t rsp_data_len = 0; 1783 1784 /* Check the type of the adapter */ 1785 if (!IS_BIDI_CAPABLE(ha)) { 1786 ql_log(ql_log_warn, vha, 0x70a0, 1787 "This adapter is not supported\n"); 1788 rval = EXT_STATUS_NOT_SUPPORTED; 1789 goto done; 1790 } 1791 1792 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) || 1793 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) || 1794 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { 1795 rval = EXT_STATUS_BUSY; 1796 goto done; 1797 } 1798 1799 /* Check if host is online */ 1800 if (!vha->flags.online) { 1801 ql_log(ql_log_warn, vha, 0x70a1, 1802 "Host is not online\n"); 1803 rval = EXT_STATUS_DEVICE_OFFLINE; 1804 goto done; 1805 } 1806 1807 /* Check if cable is plugged in or not */ 1808 if (vha->device_flags & DFLG_NO_CABLE) { 1809 ql_log(ql_log_warn, vha, 0x70a2, 1810 "Cable is unplugged...\n"); 1811 rval = EXT_STATUS_INVALID_CFG; 1812 goto done; 1813 } 1814 1815 /* Check if the switch is connected or not */ 1816 if (ha->current_topology != ISP_CFG_F) { 1817 ql_log(ql_log_warn, vha, 0x70a3, 1818 "Host is not connected to the switch\n"); 1819 rval = EXT_STATUS_INVALID_CFG; 1820 goto done; 1821 } 1822 1823 /* Check if operating mode is P2P */ 1824 if (ha->operating_mode != P2P) { 1825 ql_log(ql_log_warn, vha, 0x70a4, 1826 "Host operating mode is not P2p\n"); 1827 rval = EXT_STATUS_INVALID_CFG; 1828 goto done; 1829 } 1830 1831 mutex_lock(&ha->selflogin_lock); 1832 if (vha->self_login_loop_id == 0) { 1833 /* Initialize all required fields of fcport */ 1834 vha->bidir_fcport.vha = vha; 1835 vha->bidir_fcport.d_id.b.al_pa = vha->d_id.b.al_pa; 1836 vha->bidir_fcport.d_id.b.area = vha->d_id.b.area; 1837 vha->bidir_fcport.d_id.b.domain = vha->d_id.b.domain; 1838 vha->bidir_fcport.loop_id = vha->loop_id; 1839 1840 if (qla2x00_fabric_login(vha, &(vha->bidir_fcport), &nextlid)) { 1841 ql_log(ql_log_warn, vha, 0x70a7, 1842 "Failed to login port %06X for bidirectional IOCB\n", 1843 vha->bidir_fcport.d_id.b24); 1844 mutex_unlock(&ha->selflogin_lock); 1845 rval = EXT_STATUS_MAILBOX; 1846 goto done; 1847 } 1848 vha->self_login_loop_id = nextlid - 1; 1849 1850 } 1851 /* Assign the self login loop id to fcport */ 1852 mutex_unlock(&ha->selflogin_lock); 1853 1854 vha->bidir_fcport.loop_id = vha->self_login_loop_id; 1855 1856 req_sg_cnt = dma_map_sg(&ha->pdev->dev, 1857 bsg_job->request_payload.sg_list, 1858 bsg_job->request_payload.sg_cnt, 1859 DMA_TO_DEVICE); 1860 1861 if (!req_sg_cnt) { 1862 rval = EXT_STATUS_NO_MEMORY; 1863 goto done; 1864 } 1865 1866 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, 1867 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, 1868 DMA_FROM_DEVICE); 1869 1870 if (!rsp_sg_cnt) { 1871 rval = EXT_STATUS_NO_MEMORY; 1872 goto done_unmap_req_sg; 1873 } 1874 1875 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) || 1876 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) { 1877 ql_dbg(ql_dbg_user, vha, 0x70a9, 1878 "Dma mapping resulted in different sg counts " 1879 "[request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt: " 1880 "%x dma_reply_sg_cnt: %x]\n", 1881 bsg_job->request_payload.sg_cnt, req_sg_cnt, 1882 bsg_job->reply_payload.sg_cnt, rsp_sg_cnt); 1883 rval = EXT_STATUS_NO_MEMORY; 1884 goto done_unmap_sg; 1885 } 1886 1887 if (req_data_len != rsp_data_len) { 1888 rval = EXT_STATUS_BUSY; 1889 ql_log(ql_log_warn, vha, 0x70aa, 1890 "req_data_len != rsp_data_len\n"); 1891 goto done_unmap_sg; 1892 } 1893 1894 req_data_len = bsg_job->request_payload.payload_len; 1895 rsp_data_len = bsg_job->reply_payload.payload_len; 1896 1897 1898 /* Alloc SRB structure */ 1899 sp = qla2x00_get_sp(vha, &(vha->bidir_fcport), GFP_KERNEL); 1900 if (!sp) { 1901 ql_dbg(ql_dbg_user, vha, 0x70ac, 1902 "Alloc SRB structure failed\n"); 1903 rval = EXT_STATUS_NO_MEMORY; 1904 goto done_unmap_sg; 1905 } 1906 1907 /*Populate srb->ctx with bidir ctx*/ 1908 sp->u.bsg_job = bsg_job; 1909 sp->free = qla2x00_bsg_sp_free; 1910 sp->type = SRB_BIDI_CMD; 1911 sp->done = qla2x00_bsg_job_done; 1912 1913 /* Add the read and write sg count */ 1914 tot_dsds = rsp_sg_cnt + req_sg_cnt; 1915 1916 rval = qla2x00_start_bidir(sp, vha, tot_dsds); 1917 if (rval != EXT_STATUS_OK) 1918 goto done_free_srb; 1919 /* the bsg request will be completed in the interrupt handler */ 1920 return rval; 1921 1922 done_free_srb: 1923 mempool_free(sp, ha->srb_mempool); 1924 done_unmap_sg: 1925 dma_unmap_sg(&ha->pdev->dev, 1926 bsg_job->reply_payload.sg_list, 1927 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 1928 done_unmap_req_sg: 1929 dma_unmap_sg(&ha->pdev->dev, 1930 bsg_job->request_payload.sg_list, 1931 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 1932 done: 1933 1934 /* Return an error vendor specific response 1935 * and complete the bsg request 1936 */ 1937 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval; 1938 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1939 bsg_reply->reply_payload_rcv_len = 0; 1940 bsg_reply->result = (DID_OK) << 16; 1941 bsg_job_done(bsg_job, bsg_reply->result, 1942 bsg_reply->reply_payload_rcv_len); 1943 /* Always return success, vendor rsp carries correct status */ 1944 return 0; 1945 } 1946 1947 static int 1948 qlafx00_mgmt_cmd(struct bsg_job *bsg_job) 1949 { 1950 struct fc_bsg_request *bsg_request = bsg_job->request; 1951 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1952 scsi_qla_host_t *vha = shost_priv(host); 1953 struct qla_hw_data *ha = vha->hw; 1954 int rval = (DRIVER_ERROR << 16); 1955 struct qla_mt_iocb_rqst_fx00 *piocb_rqst; 1956 srb_t *sp; 1957 int req_sg_cnt = 0, rsp_sg_cnt = 0; 1958 struct fc_port *fcport; 1959 char *type = "FC_BSG_HST_FX_MGMT"; 1960 1961 /* Copy the IOCB specific information */ 1962 piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *) 1963 &bsg_request->rqst_data.h_vendor.vendor_cmd[1]; 1964 1965 /* Dump the vendor information */ 1966 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose , vha, 0x70cf, 1967 piocb_rqst, sizeof(*piocb_rqst)); 1968 1969 if (!vha->flags.online) { 1970 ql_log(ql_log_warn, vha, 0x70d0, 1971 "Host is not online.\n"); 1972 rval = -EIO; 1973 goto done; 1974 } 1975 1976 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) { 1977 req_sg_cnt = dma_map_sg(&ha->pdev->dev, 1978 bsg_job->request_payload.sg_list, 1979 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 1980 if (!req_sg_cnt) { 1981 ql_log(ql_log_warn, vha, 0x70c7, 1982 "dma_map_sg return %d for request\n", req_sg_cnt); 1983 rval = -ENOMEM; 1984 goto done; 1985 } 1986 } 1987 1988 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) { 1989 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, 1990 bsg_job->reply_payload.sg_list, 1991 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 1992 if (!rsp_sg_cnt) { 1993 ql_log(ql_log_warn, vha, 0x70c8, 1994 "dma_map_sg return %d for reply\n", rsp_sg_cnt); 1995 rval = -ENOMEM; 1996 goto done_unmap_req_sg; 1997 } 1998 } 1999 2000 ql_dbg(ql_dbg_user, vha, 0x70c9, 2001 "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x " 2002 "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt, 2003 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt); 2004 2005 /* Allocate a dummy fcport structure, since functions preparing the 2006 * IOCB and mailbox command retrieves port specific information 2007 * from fcport structure. For Host based ELS commands there will be 2008 * no fcport structure allocated 2009 */ 2010 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 2011 if (!fcport) { 2012 ql_log(ql_log_warn, vha, 0x70ca, 2013 "Failed to allocate fcport.\n"); 2014 rval = -ENOMEM; 2015 goto done_unmap_rsp_sg; 2016 } 2017 2018 /* Alloc SRB structure */ 2019 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 2020 if (!sp) { 2021 ql_log(ql_log_warn, vha, 0x70cb, 2022 "qla2x00_get_sp failed.\n"); 2023 rval = -ENOMEM; 2024 goto done_free_fcport; 2025 } 2026 2027 /* Initialize all required fields of fcport */ 2028 fcport->vha = vha; 2029 fcport->loop_id = piocb_rqst->dataword; 2030 2031 sp->type = SRB_FXIOCB_BCMD; 2032 sp->name = "bsg_fx_mgmt"; 2033 sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt); 2034 sp->u.bsg_job = bsg_job; 2035 sp->free = qla2x00_bsg_sp_free; 2036 sp->done = qla2x00_bsg_job_done; 2037 2038 ql_dbg(ql_dbg_user, vha, 0x70cc, 2039 "bsg rqst type: %s fx_mgmt_type: %x id=%x\n", 2040 type, piocb_rqst->func_type, fcport->loop_id); 2041 2042 rval = qla2x00_start_sp(sp); 2043 if (rval != QLA_SUCCESS) { 2044 ql_log(ql_log_warn, vha, 0x70cd, 2045 "qla2x00_start_sp failed=%d.\n", rval); 2046 mempool_free(sp, ha->srb_mempool); 2047 rval = -EIO; 2048 goto done_free_fcport; 2049 } 2050 return rval; 2051 2052 done_free_fcport: 2053 kfree(fcport); 2054 2055 done_unmap_rsp_sg: 2056 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) 2057 dma_unmap_sg(&ha->pdev->dev, 2058 bsg_job->reply_payload.sg_list, 2059 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 2060 done_unmap_req_sg: 2061 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) 2062 dma_unmap_sg(&ha->pdev->dev, 2063 bsg_job->request_payload.sg_list, 2064 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 2065 2066 done: 2067 return rval; 2068 } 2069 2070 static int 2071 qla26xx_serdes_op(struct bsg_job *bsg_job) 2072 { 2073 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2074 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 2075 scsi_qla_host_t *vha = shost_priv(host); 2076 int rval = 0; 2077 struct qla_serdes_reg sr; 2078 2079 memset(&sr, 0, sizeof(sr)); 2080 2081 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 2082 bsg_job->request_payload.sg_cnt, &sr, sizeof(sr)); 2083 2084 switch (sr.cmd) { 2085 case INT_SC_SERDES_WRITE_REG: 2086 rval = qla2x00_write_serdes_word(vha, sr.addr, sr.val); 2087 bsg_reply->reply_payload_rcv_len = 0; 2088 break; 2089 case INT_SC_SERDES_READ_REG: 2090 rval = qla2x00_read_serdes_word(vha, sr.addr, &sr.val); 2091 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 2092 bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr)); 2093 bsg_reply->reply_payload_rcv_len = sizeof(sr); 2094 break; 2095 default: 2096 ql_dbg(ql_dbg_user, vha, 0x708c, 2097 "Unknown serdes cmd %x.\n", sr.cmd); 2098 rval = -EINVAL; 2099 break; 2100 } 2101 2102 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 2103 rval ? EXT_STATUS_MAILBOX : 0; 2104 2105 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 2106 bsg_reply->result = DID_OK << 16; 2107 bsg_job_done(bsg_job, bsg_reply->result, 2108 bsg_reply->reply_payload_rcv_len); 2109 return 0; 2110 } 2111 2112 static int 2113 qla8044_serdes_op(struct bsg_job *bsg_job) 2114 { 2115 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2116 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 2117 scsi_qla_host_t *vha = shost_priv(host); 2118 int rval = 0; 2119 struct qla_serdes_reg_ex sr; 2120 2121 memset(&sr, 0, sizeof(sr)); 2122 2123 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 2124 bsg_job->request_payload.sg_cnt, &sr, sizeof(sr)); 2125 2126 switch (sr.cmd) { 2127 case INT_SC_SERDES_WRITE_REG: 2128 rval = qla8044_write_serdes_word(vha, sr.addr, sr.val); 2129 bsg_reply->reply_payload_rcv_len = 0; 2130 break; 2131 case INT_SC_SERDES_READ_REG: 2132 rval = qla8044_read_serdes_word(vha, sr.addr, &sr.val); 2133 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 2134 bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr)); 2135 bsg_reply->reply_payload_rcv_len = sizeof(sr); 2136 break; 2137 default: 2138 ql_dbg(ql_dbg_user, vha, 0x7020, 2139 "Unknown serdes cmd %x.\n", sr.cmd); 2140 rval = -EINVAL; 2141 break; 2142 } 2143 2144 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 2145 rval ? EXT_STATUS_MAILBOX : 0; 2146 2147 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 2148 bsg_reply->result = DID_OK << 16; 2149 bsg_job_done(bsg_job, bsg_reply->result, 2150 bsg_reply->reply_payload_rcv_len); 2151 return 0; 2152 } 2153 2154 static int 2155 qla27xx_get_flash_upd_cap(struct bsg_job *bsg_job) 2156 { 2157 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2158 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 2159 scsi_qla_host_t *vha = shost_priv(host); 2160 struct qla_hw_data *ha = vha->hw; 2161 struct qla_flash_update_caps cap; 2162 2163 if (!(IS_QLA27XX(ha)) && !IS_QLA28XX(ha)) 2164 return -EPERM; 2165 2166 memset(&cap, 0, sizeof(cap)); 2167 cap.capabilities = (uint64_t)ha->fw_attributes_ext[1] << 48 | 2168 (uint64_t)ha->fw_attributes_ext[0] << 32 | 2169 (uint64_t)ha->fw_attributes_h << 16 | 2170 (uint64_t)ha->fw_attributes; 2171 2172 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 2173 bsg_job->reply_payload.sg_cnt, &cap, sizeof(cap)); 2174 bsg_reply->reply_payload_rcv_len = sizeof(cap); 2175 2176 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 2177 EXT_STATUS_OK; 2178 2179 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 2180 bsg_reply->result = DID_OK << 16; 2181 bsg_job_done(bsg_job, bsg_reply->result, 2182 bsg_reply->reply_payload_rcv_len); 2183 return 0; 2184 } 2185 2186 static int 2187 qla27xx_set_flash_upd_cap(struct bsg_job *bsg_job) 2188 { 2189 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2190 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 2191 scsi_qla_host_t *vha = shost_priv(host); 2192 struct qla_hw_data *ha = vha->hw; 2193 uint64_t online_fw_attr = 0; 2194 struct qla_flash_update_caps cap; 2195 2196 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 2197 return -EPERM; 2198 2199 memset(&cap, 0, sizeof(cap)); 2200 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 2201 bsg_job->request_payload.sg_cnt, &cap, sizeof(cap)); 2202 2203 online_fw_attr = (uint64_t)ha->fw_attributes_ext[1] << 48 | 2204 (uint64_t)ha->fw_attributes_ext[0] << 32 | 2205 (uint64_t)ha->fw_attributes_h << 16 | 2206 (uint64_t)ha->fw_attributes; 2207 2208 if (online_fw_attr != cap.capabilities) { 2209 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 2210 EXT_STATUS_INVALID_PARAM; 2211 return -EINVAL; 2212 } 2213 2214 if (cap.outage_duration < MAX_LOOP_TIMEOUT) { 2215 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 2216 EXT_STATUS_INVALID_PARAM; 2217 return -EINVAL; 2218 } 2219 2220 bsg_reply->reply_payload_rcv_len = 0; 2221 2222 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 2223 EXT_STATUS_OK; 2224 2225 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 2226 bsg_reply->result = DID_OK << 16; 2227 bsg_job_done(bsg_job, bsg_reply->result, 2228 bsg_reply->reply_payload_rcv_len); 2229 return 0; 2230 } 2231 2232 static int 2233 qla27xx_get_bbcr_data(struct bsg_job *bsg_job) 2234 { 2235 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2236 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 2237 scsi_qla_host_t *vha = shost_priv(host); 2238 struct qla_hw_data *ha = vha->hw; 2239 struct qla_bbcr_data bbcr; 2240 uint16_t loop_id, topo, sw_cap; 2241 uint8_t domain, area, al_pa, state; 2242 int rval; 2243 2244 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 2245 return -EPERM; 2246 2247 memset(&bbcr, 0, sizeof(bbcr)); 2248 2249 if (vha->flags.bbcr_enable) 2250 bbcr.status = QLA_BBCR_STATUS_ENABLED; 2251 else 2252 bbcr.status = QLA_BBCR_STATUS_DISABLED; 2253 2254 if (bbcr.status == QLA_BBCR_STATUS_ENABLED) { 2255 rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa, 2256 &area, &domain, &topo, &sw_cap); 2257 if (rval != QLA_SUCCESS) { 2258 bbcr.status = QLA_BBCR_STATUS_UNKNOWN; 2259 bbcr.state = QLA_BBCR_STATE_OFFLINE; 2260 bbcr.mbx1 = loop_id; 2261 goto done; 2262 } 2263 2264 state = (vha->bbcr >> 12) & 0x1; 2265 2266 if (state) { 2267 bbcr.state = QLA_BBCR_STATE_OFFLINE; 2268 bbcr.offline_reason_code = QLA_BBCR_REASON_LOGIN_REJECT; 2269 } else { 2270 bbcr.state = QLA_BBCR_STATE_ONLINE; 2271 bbcr.negotiated_bbscn = (vha->bbcr >> 8) & 0xf; 2272 } 2273 2274 bbcr.configured_bbscn = vha->bbcr & 0xf; 2275 } 2276 2277 done: 2278 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 2279 bsg_job->reply_payload.sg_cnt, &bbcr, sizeof(bbcr)); 2280 bsg_reply->reply_payload_rcv_len = sizeof(bbcr); 2281 2282 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK; 2283 2284 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 2285 bsg_reply->result = DID_OK << 16; 2286 bsg_job_done(bsg_job, bsg_reply->result, 2287 bsg_reply->reply_payload_rcv_len); 2288 return 0; 2289 } 2290 2291 static int 2292 qla2x00_get_priv_stats(struct bsg_job *bsg_job) 2293 { 2294 struct fc_bsg_request *bsg_request = bsg_job->request; 2295 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2296 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 2297 scsi_qla_host_t *vha = shost_priv(host); 2298 struct qla_hw_data *ha = vha->hw; 2299 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 2300 struct link_statistics *stats = NULL; 2301 dma_addr_t stats_dma; 2302 int rval; 2303 uint32_t *cmd = bsg_request->rqst_data.h_vendor.vendor_cmd; 2304 uint options = cmd[0] == QL_VND_GET_PRIV_STATS_EX ? cmd[1] : 0; 2305 2306 if (test_bit(UNLOADING, &vha->dpc_flags)) 2307 return -ENODEV; 2308 2309 if (unlikely(pci_channel_offline(ha->pdev))) 2310 return -ENODEV; 2311 2312 if (qla2x00_reset_active(vha)) 2313 return -EBUSY; 2314 2315 if (!IS_FWI2_CAPABLE(ha)) 2316 return -EPERM; 2317 2318 stats = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stats), &stats_dma, 2319 GFP_KERNEL); 2320 if (!stats) { 2321 ql_log(ql_log_warn, vha, 0x70e2, 2322 "Failed to allocate memory for stats.\n"); 2323 return -ENOMEM; 2324 } 2325 2326 rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, options); 2327 2328 if (rval == QLA_SUCCESS) { 2329 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, vha, 0x70e5, 2330 stats, sizeof(*stats)); 2331 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 2332 bsg_job->reply_payload.sg_cnt, stats, sizeof(*stats)); 2333 } 2334 2335 bsg_reply->reply_payload_rcv_len = sizeof(*stats); 2336 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 2337 rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK; 2338 2339 bsg_job->reply_len = sizeof(*bsg_reply); 2340 bsg_reply->result = DID_OK << 16; 2341 bsg_job_done(bsg_job, bsg_reply->result, 2342 bsg_reply->reply_payload_rcv_len); 2343 2344 dma_free_coherent(&ha->pdev->dev, sizeof(*stats), 2345 stats, stats_dma); 2346 2347 return 0; 2348 } 2349 2350 static int 2351 qla2x00_do_dport_diagnostics(struct bsg_job *bsg_job) 2352 { 2353 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2354 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 2355 scsi_qla_host_t *vha = shost_priv(host); 2356 int rval; 2357 struct qla_dport_diag *dd; 2358 2359 if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) && 2360 !IS_QLA28XX(vha->hw)) 2361 return -EPERM; 2362 2363 dd = kmalloc(sizeof(*dd), GFP_KERNEL); 2364 if (!dd) { 2365 ql_log(ql_log_warn, vha, 0x70db, 2366 "Failed to allocate memory for dport.\n"); 2367 return -ENOMEM; 2368 } 2369 2370 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 2371 bsg_job->request_payload.sg_cnt, dd, sizeof(*dd)); 2372 2373 rval = qla26xx_dport_diagnostics( 2374 vha, dd->buf, sizeof(dd->buf), dd->options); 2375 if (rval == QLA_SUCCESS) { 2376 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 2377 bsg_job->reply_payload.sg_cnt, dd, sizeof(*dd)); 2378 } 2379 2380 bsg_reply->reply_payload_rcv_len = sizeof(*dd); 2381 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 2382 rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK; 2383 2384 bsg_job->reply_len = sizeof(*bsg_reply); 2385 bsg_reply->result = DID_OK << 16; 2386 bsg_job_done(bsg_job, bsg_reply->result, 2387 bsg_reply->reply_payload_rcv_len); 2388 2389 kfree(dd); 2390 2391 return 0; 2392 } 2393 2394 static int 2395 qla2x00_get_flash_image_status(struct bsg_job *bsg_job) 2396 { 2397 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job)); 2398 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2399 struct qla_hw_data *ha = vha->hw; 2400 struct qla_active_regions regions = { }; 2401 struct active_regions active_regions = { }; 2402 2403 qla28xx_get_aux_images(vha, &active_regions); 2404 regions.global_image = active_regions.global; 2405 2406 if (IS_QLA28XX(ha)) { 2407 qla27xx_get_active_image(vha, &active_regions); 2408 regions.board_config = active_regions.aux.board_config; 2409 regions.vpd_nvram = active_regions.aux.vpd_nvram; 2410 regions.npiv_config_0_1 = active_regions.aux.npiv_config_0_1; 2411 regions.npiv_config_2_3 = active_regions.aux.npiv_config_2_3; 2412 } 2413 2414 ql_dbg(ql_dbg_user, vha, 0x70e1, 2415 "%s(%lu): FW=%u BCFG=%u VPDNVR=%u NPIV01=%u NPIV02=%u\n", 2416 __func__, vha->host_no, regions.global_image, 2417 regions.board_config, regions.vpd_nvram, 2418 regions.npiv_config_0_1, regions.npiv_config_2_3); 2419 2420 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 2421 bsg_job->reply_payload.sg_cnt, ®ions, sizeof(regions)); 2422 2423 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK; 2424 bsg_reply->reply_payload_rcv_len = sizeof(regions); 2425 bsg_reply->result = DID_OK << 16; 2426 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 2427 bsg_job_done(bsg_job, bsg_reply->result, 2428 bsg_reply->reply_payload_rcv_len); 2429 2430 return 0; 2431 } 2432 2433 static int 2434 qla2x00_process_vendor_specific(struct bsg_job *bsg_job) 2435 { 2436 struct fc_bsg_request *bsg_request = bsg_job->request; 2437 2438 switch (bsg_request->rqst_data.h_vendor.vendor_cmd[0]) { 2439 case QL_VND_LOOPBACK: 2440 return qla2x00_process_loopback(bsg_job); 2441 2442 case QL_VND_A84_RESET: 2443 return qla84xx_reset(bsg_job); 2444 2445 case QL_VND_A84_UPDATE_FW: 2446 return qla84xx_updatefw(bsg_job); 2447 2448 case QL_VND_A84_MGMT_CMD: 2449 return qla84xx_mgmt_cmd(bsg_job); 2450 2451 case QL_VND_IIDMA: 2452 return qla24xx_iidma(bsg_job); 2453 2454 case QL_VND_FCP_PRIO_CFG_CMD: 2455 return qla24xx_proc_fcp_prio_cfg_cmd(bsg_job); 2456 2457 case QL_VND_READ_FLASH: 2458 return qla2x00_read_optrom(bsg_job); 2459 2460 case QL_VND_UPDATE_FLASH: 2461 return qla2x00_update_optrom(bsg_job); 2462 2463 case QL_VND_SET_FRU_VERSION: 2464 return qla2x00_update_fru_versions(bsg_job); 2465 2466 case QL_VND_READ_FRU_STATUS: 2467 return qla2x00_read_fru_status(bsg_job); 2468 2469 case QL_VND_WRITE_FRU_STATUS: 2470 return qla2x00_write_fru_status(bsg_job); 2471 2472 case QL_VND_WRITE_I2C: 2473 return qla2x00_write_i2c(bsg_job); 2474 2475 case QL_VND_READ_I2C: 2476 return qla2x00_read_i2c(bsg_job); 2477 2478 case QL_VND_DIAG_IO_CMD: 2479 return qla24xx_process_bidir_cmd(bsg_job); 2480 2481 case QL_VND_FX00_MGMT_CMD: 2482 return qlafx00_mgmt_cmd(bsg_job); 2483 2484 case QL_VND_SERDES_OP: 2485 return qla26xx_serdes_op(bsg_job); 2486 2487 case QL_VND_SERDES_OP_EX: 2488 return qla8044_serdes_op(bsg_job); 2489 2490 case QL_VND_GET_FLASH_UPDATE_CAPS: 2491 return qla27xx_get_flash_upd_cap(bsg_job); 2492 2493 case QL_VND_SET_FLASH_UPDATE_CAPS: 2494 return qla27xx_set_flash_upd_cap(bsg_job); 2495 2496 case QL_VND_GET_BBCR_DATA: 2497 return qla27xx_get_bbcr_data(bsg_job); 2498 2499 case QL_VND_GET_PRIV_STATS: 2500 case QL_VND_GET_PRIV_STATS_EX: 2501 return qla2x00_get_priv_stats(bsg_job); 2502 2503 case QL_VND_DPORT_DIAGNOSTICS: 2504 return qla2x00_do_dport_diagnostics(bsg_job); 2505 2506 case QL_VND_SS_GET_FLASH_IMAGE_STATUS: 2507 return qla2x00_get_flash_image_status(bsg_job); 2508 2509 default: 2510 return -ENOSYS; 2511 } 2512 } 2513 2514 int 2515 qla24xx_bsg_request(struct bsg_job *bsg_job) 2516 { 2517 struct fc_bsg_request *bsg_request = bsg_job->request; 2518 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2519 int ret = -EINVAL; 2520 struct fc_rport *rport; 2521 struct Scsi_Host *host; 2522 scsi_qla_host_t *vha; 2523 2524 /* In case no data transferred. */ 2525 bsg_reply->reply_payload_rcv_len = 0; 2526 2527 if (bsg_request->msgcode == FC_BSG_RPT_ELS) { 2528 rport = fc_bsg_to_rport(bsg_job); 2529 host = rport_to_shost(rport); 2530 vha = shost_priv(host); 2531 } else { 2532 host = fc_bsg_to_shost(bsg_job); 2533 vha = shost_priv(host); 2534 } 2535 2536 if (qla2x00_chip_is_down(vha)) { 2537 ql_dbg(ql_dbg_user, vha, 0x709f, 2538 "BSG: ISP abort active/needed -- cmd=%d.\n", 2539 bsg_request->msgcode); 2540 return -EBUSY; 2541 } 2542 2543 ql_dbg(ql_dbg_user, vha, 0x7000, 2544 "Entered %s msgcode=0x%x.\n", __func__, bsg_request->msgcode); 2545 2546 switch (bsg_request->msgcode) { 2547 case FC_BSG_RPT_ELS: 2548 case FC_BSG_HST_ELS_NOLOGIN: 2549 ret = qla2x00_process_els(bsg_job); 2550 break; 2551 case FC_BSG_HST_CT: 2552 ret = qla2x00_process_ct(bsg_job); 2553 break; 2554 case FC_BSG_HST_VENDOR: 2555 ret = qla2x00_process_vendor_specific(bsg_job); 2556 break; 2557 case FC_BSG_HST_ADD_RPORT: 2558 case FC_BSG_HST_DEL_RPORT: 2559 case FC_BSG_RPT_CT: 2560 default: 2561 ql_log(ql_log_warn, vha, 0x705a, "Unsupported BSG request.\n"); 2562 break; 2563 } 2564 return ret; 2565 } 2566 2567 int 2568 qla24xx_bsg_timeout(struct bsg_job *bsg_job) 2569 { 2570 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2571 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job)); 2572 struct qla_hw_data *ha = vha->hw; 2573 srb_t *sp; 2574 int cnt, que; 2575 unsigned long flags; 2576 struct req_que *req; 2577 2578 /* find the bsg job from the active list of commands */ 2579 spin_lock_irqsave(&ha->hardware_lock, flags); 2580 for (que = 0; que < ha->max_req_queues; que++) { 2581 req = ha->req_q_map[que]; 2582 if (!req) 2583 continue; 2584 2585 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { 2586 sp = req->outstanding_cmds[cnt]; 2587 if (sp) { 2588 if (((sp->type == SRB_CT_CMD) || 2589 (sp->type == SRB_ELS_CMD_HST) || 2590 (sp->type == SRB_FXIOCB_BCMD)) 2591 && (sp->u.bsg_job == bsg_job)) { 2592 req->outstanding_cmds[cnt] = NULL; 2593 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2594 if (ha->isp_ops->abort_command(sp)) { 2595 ql_log(ql_log_warn, vha, 0x7089, 2596 "mbx abort_command " 2597 "failed.\n"); 2598 bsg_reply->result = -EIO; 2599 } else { 2600 ql_dbg(ql_dbg_user, vha, 0x708a, 2601 "mbx abort_command " 2602 "success.\n"); 2603 bsg_reply->result = 0; 2604 } 2605 spin_lock_irqsave(&ha->hardware_lock, flags); 2606 goto done; 2607 } 2608 } 2609 } 2610 } 2611 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2612 ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n"); 2613 bsg_reply->result = -ENXIO; 2614 return 0; 2615 2616 done: 2617 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2618 sp->free(sp); 2619 return 0; 2620 } 2621