1 /* 2 * QLogic Fibre Channel HBA Driver 3 * Copyright (c) 2003-2014 QLogic Corporation 4 * 5 * See LICENSE.qla2xxx for copyright and licensing details. 6 */ 7 #include "qla_def.h" 8 9 #include <linux/kthread.h> 10 #include <linux/vmalloc.h> 11 #include <linux/delay.h> 12 #include <linux/bsg-lib.h> 13 14 /* BSG support for ELS/CT pass through */ 15 void 16 qla2x00_bsg_job_done(void *ptr, int res) 17 { 18 srb_t *sp = ptr; 19 struct bsg_job *bsg_job = sp->u.bsg_job; 20 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 21 22 bsg_reply->result = res; 23 bsg_job_done(bsg_job, bsg_reply->result, 24 bsg_reply->reply_payload_rcv_len); 25 sp->free(sp); 26 } 27 28 void 29 qla2x00_bsg_sp_free(void *ptr) 30 { 31 srb_t *sp = ptr; 32 struct qla_hw_data *ha = sp->vha->hw; 33 struct bsg_job *bsg_job = sp->u.bsg_job; 34 struct fc_bsg_request *bsg_request = bsg_job->request; 35 struct qla_mt_iocb_rqst_fx00 *piocb_rqst; 36 37 if (sp->type == SRB_FXIOCB_BCMD) { 38 piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *) 39 &bsg_request->rqst_data.h_vendor.vendor_cmd[1]; 40 41 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) 42 dma_unmap_sg(&ha->pdev->dev, 43 bsg_job->request_payload.sg_list, 44 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 45 46 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) 47 dma_unmap_sg(&ha->pdev->dev, 48 bsg_job->reply_payload.sg_list, 49 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 50 } else { 51 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 52 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 53 54 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 55 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 56 } 57 58 if (sp->type == SRB_CT_CMD || 59 sp->type == SRB_FXIOCB_BCMD || 60 sp->type == SRB_ELS_CMD_HST) 61 kfree(sp->fcport); 62 qla2x00_rel_sp(sp); 63 } 64 65 int 66 qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *vha, 67 struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag) 68 { 69 int i, ret, num_valid; 70 uint8_t *bcode; 71 struct qla_fcp_prio_entry *pri_entry; 72 uint32_t *bcode_val_ptr, bcode_val; 73 74 ret = 1; 75 num_valid = 0; 76 bcode = (uint8_t *)pri_cfg; 77 bcode_val_ptr = (uint32_t *)pri_cfg; 78 bcode_val = (uint32_t)(*bcode_val_ptr); 79 80 if (bcode_val == 0xFFFFFFFF) { 81 /* No FCP Priority config data in flash */ 82 ql_dbg(ql_dbg_user, vha, 0x7051, 83 "No FCP Priority config data.\n"); 84 return 0; 85 } 86 87 if (bcode[0] != 'H' || bcode[1] != 'Q' || bcode[2] != 'O' || 88 bcode[3] != 'S') { 89 /* Invalid FCP priority data header*/ 90 ql_dbg(ql_dbg_user, vha, 0x7052, 91 "Invalid FCP Priority data header. bcode=0x%x.\n", 92 bcode_val); 93 return 0; 94 } 95 if (flag != 1) 96 return ret; 97 98 pri_entry = &pri_cfg->entry[0]; 99 for (i = 0; i < pri_cfg->num_entries; i++) { 100 if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID) 101 num_valid++; 102 pri_entry++; 103 } 104 105 if (num_valid == 0) { 106 /* No valid FCP priority data entries */ 107 ql_dbg(ql_dbg_user, vha, 0x7053, 108 "No valid FCP Priority data entries.\n"); 109 ret = 0; 110 } else { 111 /* FCP priority data is valid */ 112 ql_dbg(ql_dbg_user, vha, 0x7054, 113 "Valid FCP priority data. num entries = %d.\n", 114 num_valid); 115 } 116 117 return ret; 118 } 119 120 static int 121 qla24xx_proc_fcp_prio_cfg_cmd(struct bsg_job *bsg_job) 122 { 123 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 124 struct fc_bsg_request *bsg_request = bsg_job->request; 125 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 126 scsi_qla_host_t *vha = shost_priv(host); 127 struct qla_hw_data *ha = vha->hw; 128 int ret = 0; 129 uint32_t len; 130 uint32_t oper; 131 132 if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_P3P_TYPE(ha))) { 133 ret = -EINVAL; 134 goto exit_fcp_prio_cfg; 135 } 136 137 /* Get the sub command */ 138 oper = bsg_request->rqst_data.h_vendor.vendor_cmd[1]; 139 140 /* Only set config is allowed if config memory is not allocated */ 141 if (!ha->fcp_prio_cfg && (oper != QLFC_FCP_PRIO_SET_CONFIG)) { 142 ret = -EINVAL; 143 goto exit_fcp_prio_cfg; 144 } 145 switch (oper) { 146 case QLFC_FCP_PRIO_DISABLE: 147 if (ha->flags.fcp_prio_enabled) { 148 ha->flags.fcp_prio_enabled = 0; 149 ha->fcp_prio_cfg->attributes &= 150 ~FCP_PRIO_ATTR_ENABLE; 151 qla24xx_update_all_fcp_prio(vha); 152 bsg_reply->result = DID_OK; 153 } else { 154 ret = -EINVAL; 155 bsg_reply->result = (DID_ERROR << 16); 156 goto exit_fcp_prio_cfg; 157 } 158 break; 159 160 case QLFC_FCP_PRIO_ENABLE: 161 if (!ha->flags.fcp_prio_enabled) { 162 if (ha->fcp_prio_cfg) { 163 ha->flags.fcp_prio_enabled = 1; 164 ha->fcp_prio_cfg->attributes |= 165 FCP_PRIO_ATTR_ENABLE; 166 qla24xx_update_all_fcp_prio(vha); 167 bsg_reply->result = DID_OK; 168 } else { 169 ret = -EINVAL; 170 bsg_reply->result = (DID_ERROR << 16); 171 goto exit_fcp_prio_cfg; 172 } 173 } 174 break; 175 176 case QLFC_FCP_PRIO_GET_CONFIG: 177 len = bsg_job->reply_payload.payload_len; 178 if (!len || len > FCP_PRIO_CFG_SIZE) { 179 ret = -EINVAL; 180 bsg_reply->result = (DID_ERROR << 16); 181 goto exit_fcp_prio_cfg; 182 } 183 184 bsg_reply->result = DID_OK; 185 bsg_reply->reply_payload_rcv_len = 186 sg_copy_from_buffer( 187 bsg_job->reply_payload.sg_list, 188 bsg_job->reply_payload.sg_cnt, ha->fcp_prio_cfg, 189 len); 190 191 break; 192 193 case QLFC_FCP_PRIO_SET_CONFIG: 194 len = bsg_job->request_payload.payload_len; 195 if (!len || len > FCP_PRIO_CFG_SIZE) { 196 bsg_reply->result = (DID_ERROR << 16); 197 ret = -EINVAL; 198 goto exit_fcp_prio_cfg; 199 } 200 201 if (!ha->fcp_prio_cfg) { 202 ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE); 203 if (!ha->fcp_prio_cfg) { 204 ql_log(ql_log_warn, vha, 0x7050, 205 "Unable to allocate memory for fcp prio " 206 "config data (%x).\n", FCP_PRIO_CFG_SIZE); 207 bsg_reply->result = (DID_ERROR << 16); 208 ret = -ENOMEM; 209 goto exit_fcp_prio_cfg; 210 } 211 } 212 213 memset(ha->fcp_prio_cfg, 0, FCP_PRIO_CFG_SIZE); 214 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 215 bsg_job->request_payload.sg_cnt, ha->fcp_prio_cfg, 216 FCP_PRIO_CFG_SIZE); 217 218 /* validate fcp priority data */ 219 220 if (!qla24xx_fcp_prio_cfg_valid(vha, 221 (struct qla_fcp_prio_cfg *) ha->fcp_prio_cfg, 1)) { 222 bsg_reply->result = (DID_ERROR << 16); 223 ret = -EINVAL; 224 /* If buffer was invalidatic int 225 * fcp_prio_cfg is of no use 226 */ 227 vfree(ha->fcp_prio_cfg); 228 ha->fcp_prio_cfg = NULL; 229 goto exit_fcp_prio_cfg; 230 } 231 232 ha->flags.fcp_prio_enabled = 0; 233 if (ha->fcp_prio_cfg->attributes & FCP_PRIO_ATTR_ENABLE) 234 ha->flags.fcp_prio_enabled = 1; 235 qla24xx_update_all_fcp_prio(vha); 236 bsg_reply->result = DID_OK; 237 break; 238 default: 239 ret = -EINVAL; 240 break; 241 } 242 exit_fcp_prio_cfg: 243 if (!ret) 244 bsg_job_done(bsg_job, bsg_reply->result, 245 bsg_reply->reply_payload_rcv_len); 246 return ret; 247 } 248 249 static int 250 qla2x00_process_els(struct bsg_job *bsg_job) 251 { 252 struct fc_bsg_request *bsg_request = bsg_job->request; 253 struct fc_rport *rport; 254 fc_port_t *fcport = NULL; 255 struct Scsi_Host *host; 256 scsi_qla_host_t *vha; 257 struct qla_hw_data *ha; 258 srb_t *sp; 259 const char *type; 260 int req_sg_cnt, rsp_sg_cnt; 261 int rval = (DRIVER_ERROR << 16); 262 uint16_t nextlid = 0; 263 264 if (bsg_request->msgcode == FC_BSG_RPT_ELS) { 265 rport = fc_bsg_to_rport(bsg_job); 266 fcport = *(fc_port_t **) rport->dd_data; 267 host = rport_to_shost(rport); 268 vha = shost_priv(host); 269 ha = vha->hw; 270 type = "FC_BSG_RPT_ELS"; 271 } else { 272 host = fc_bsg_to_shost(bsg_job); 273 vha = shost_priv(host); 274 ha = vha->hw; 275 type = "FC_BSG_HST_ELS_NOLOGIN"; 276 } 277 278 if (!vha->flags.online) { 279 ql_log(ql_log_warn, vha, 0x7005, "Host not online.\n"); 280 rval = -EIO; 281 goto done; 282 } 283 284 /* pass through is supported only for ISP 4Gb or higher */ 285 if (!IS_FWI2_CAPABLE(ha)) { 286 ql_dbg(ql_dbg_user, vha, 0x7001, 287 "ELS passthru not supported for ISP23xx based adapters.\n"); 288 rval = -EPERM; 289 goto done; 290 } 291 292 /* Multiple SG's are not supported for ELS requests */ 293 if (bsg_job->request_payload.sg_cnt > 1 || 294 bsg_job->reply_payload.sg_cnt > 1) { 295 ql_dbg(ql_dbg_user, vha, 0x7002, 296 "Multiple SG's are not supported for ELS requests, " 297 "request_sg_cnt=%x reply_sg_cnt=%x.\n", 298 bsg_job->request_payload.sg_cnt, 299 bsg_job->reply_payload.sg_cnt); 300 rval = -EPERM; 301 goto done; 302 } 303 304 /* ELS request for rport */ 305 if (bsg_request->msgcode == FC_BSG_RPT_ELS) { 306 /* make sure the rport is logged in, 307 * if not perform fabric login 308 */ 309 if (qla2x00_fabric_login(vha, fcport, &nextlid)) { 310 ql_dbg(ql_dbg_user, vha, 0x7003, 311 "Failed to login port %06X for ELS passthru.\n", 312 fcport->d_id.b24); 313 rval = -EIO; 314 goto done; 315 } 316 } else { 317 /* Allocate a dummy fcport structure, since functions 318 * preparing the IOCB and mailbox command retrieves port 319 * specific information from fcport structure. For Host based 320 * ELS commands there will be no fcport structure allocated 321 */ 322 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 323 if (!fcport) { 324 rval = -ENOMEM; 325 goto done; 326 } 327 328 /* Initialize all required fields of fcport */ 329 fcport->vha = vha; 330 fcport->d_id.b.al_pa = 331 bsg_request->rqst_data.h_els.port_id[0]; 332 fcport->d_id.b.area = 333 bsg_request->rqst_data.h_els.port_id[1]; 334 fcport->d_id.b.domain = 335 bsg_request->rqst_data.h_els.port_id[2]; 336 fcport->loop_id = 337 (fcport->d_id.b.al_pa == 0xFD) ? 338 NPH_FABRIC_CONTROLLER : NPH_F_PORT; 339 } 340 341 req_sg_cnt = 342 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 343 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 344 if (!req_sg_cnt) { 345 rval = -ENOMEM; 346 goto done_free_fcport; 347 } 348 349 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 350 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 351 if (!rsp_sg_cnt) { 352 rval = -ENOMEM; 353 goto done_free_fcport; 354 } 355 356 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) || 357 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) { 358 ql_log(ql_log_warn, vha, 0x7008, 359 "dma mapping resulted in different sg counts, " 360 "request_sg_cnt: %x dma_request_sg_cnt:%x reply_sg_cnt:%x " 361 "dma_reply_sg_cnt:%x.\n", bsg_job->request_payload.sg_cnt, 362 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt); 363 rval = -EAGAIN; 364 goto done_unmap_sg; 365 } 366 367 /* Alloc SRB structure */ 368 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 369 if (!sp) { 370 rval = -ENOMEM; 371 goto done_unmap_sg; 372 } 373 374 sp->type = 375 (bsg_request->msgcode == FC_BSG_RPT_ELS ? 376 SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST); 377 sp->name = 378 (bsg_request->msgcode == FC_BSG_RPT_ELS ? 379 "bsg_els_rpt" : "bsg_els_hst"); 380 sp->u.bsg_job = bsg_job; 381 sp->free = qla2x00_bsg_sp_free; 382 sp->done = qla2x00_bsg_job_done; 383 384 ql_dbg(ql_dbg_user, vha, 0x700a, 385 "bsg rqst type: %s els type: %x - loop-id=%x " 386 "portid=%-2x%02x%02x.\n", type, 387 bsg_request->rqst_data.h_els.command_code, fcport->loop_id, 388 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); 389 390 rval = qla2x00_start_sp(sp); 391 if (rval != QLA_SUCCESS) { 392 ql_log(ql_log_warn, vha, 0x700e, 393 "qla2x00_start_sp failed = %d\n", rval); 394 qla2x00_rel_sp(sp); 395 rval = -EIO; 396 goto done_unmap_sg; 397 } 398 return rval; 399 400 done_unmap_sg: 401 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 402 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 403 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 404 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 405 goto done_free_fcport; 406 407 done_free_fcport: 408 if (bsg_request->msgcode == FC_BSG_RPT_ELS) 409 kfree(fcport); 410 done: 411 return rval; 412 } 413 414 static inline uint16_t 415 qla24xx_calc_ct_iocbs(uint16_t dsds) 416 { 417 uint16_t iocbs; 418 419 iocbs = 1; 420 if (dsds > 2) { 421 iocbs += (dsds - 2) / 5; 422 if ((dsds - 2) % 5) 423 iocbs++; 424 } 425 return iocbs; 426 } 427 428 static int 429 qla2x00_process_ct(struct bsg_job *bsg_job) 430 { 431 srb_t *sp; 432 struct fc_bsg_request *bsg_request = bsg_job->request; 433 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 434 scsi_qla_host_t *vha = shost_priv(host); 435 struct qla_hw_data *ha = vha->hw; 436 int rval = (DRIVER_ERROR << 16); 437 int req_sg_cnt, rsp_sg_cnt; 438 uint16_t loop_id; 439 struct fc_port *fcport; 440 char *type = "FC_BSG_HST_CT"; 441 442 req_sg_cnt = 443 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 444 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 445 if (!req_sg_cnt) { 446 ql_log(ql_log_warn, vha, 0x700f, 447 "dma_map_sg return %d for request\n", req_sg_cnt); 448 rval = -ENOMEM; 449 goto done; 450 } 451 452 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 453 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 454 if (!rsp_sg_cnt) { 455 ql_log(ql_log_warn, vha, 0x7010, 456 "dma_map_sg return %d for reply\n", rsp_sg_cnt); 457 rval = -ENOMEM; 458 goto done; 459 } 460 461 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) || 462 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) { 463 ql_log(ql_log_warn, vha, 0x7011, 464 "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x " 465 "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt, 466 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt); 467 rval = -EAGAIN; 468 goto done_unmap_sg; 469 } 470 471 if (!vha->flags.online) { 472 ql_log(ql_log_warn, vha, 0x7012, 473 "Host is not online.\n"); 474 rval = -EIO; 475 goto done_unmap_sg; 476 } 477 478 loop_id = 479 (bsg_request->rqst_data.h_ct.preamble_word1 & 0xFF000000) 480 >> 24; 481 switch (loop_id) { 482 case 0xFC: 483 loop_id = cpu_to_le16(NPH_SNS); 484 break; 485 case 0xFA: 486 loop_id = vha->mgmt_svr_loop_id; 487 break; 488 default: 489 ql_dbg(ql_dbg_user, vha, 0x7013, 490 "Unknown loop id: %x.\n", loop_id); 491 rval = -EINVAL; 492 goto done_unmap_sg; 493 } 494 495 /* Allocate a dummy fcport structure, since functions preparing the 496 * IOCB and mailbox command retrieves port specific information 497 * from fcport structure. For Host based ELS commands there will be 498 * no fcport structure allocated 499 */ 500 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 501 if (!fcport) { 502 ql_log(ql_log_warn, vha, 0x7014, 503 "Failed to allocate fcport.\n"); 504 rval = -ENOMEM; 505 goto done_unmap_sg; 506 } 507 508 /* Initialize all required fields of fcport */ 509 fcport->vha = vha; 510 fcport->d_id.b.al_pa = bsg_request->rqst_data.h_ct.port_id[0]; 511 fcport->d_id.b.area = bsg_request->rqst_data.h_ct.port_id[1]; 512 fcport->d_id.b.domain = bsg_request->rqst_data.h_ct.port_id[2]; 513 fcport->loop_id = loop_id; 514 515 /* Alloc SRB structure */ 516 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 517 if (!sp) { 518 ql_log(ql_log_warn, vha, 0x7015, 519 "qla2x00_get_sp failed.\n"); 520 rval = -ENOMEM; 521 goto done_free_fcport; 522 } 523 524 sp->type = SRB_CT_CMD; 525 sp->name = "bsg_ct"; 526 sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt); 527 sp->u.bsg_job = bsg_job; 528 sp->free = qla2x00_bsg_sp_free; 529 sp->done = qla2x00_bsg_job_done; 530 531 ql_dbg(ql_dbg_user, vha, 0x7016, 532 "bsg rqst type: %s else type: %x - " 533 "loop-id=%x portid=%02x%02x%02x.\n", type, 534 (bsg_request->rqst_data.h_ct.preamble_word2 >> 16), 535 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, 536 fcport->d_id.b.al_pa); 537 538 rval = qla2x00_start_sp(sp); 539 if (rval != QLA_SUCCESS) { 540 ql_log(ql_log_warn, vha, 0x7017, 541 "qla2x00_start_sp failed=%d.\n", rval); 542 qla2x00_rel_sp(sp); 543 rval = -EIO; 544 goto done_free_fcport; 545 } 546 return rval; 547 548 done_free_fcport: 549 kfree(fcport); 550 done_unmap_sg: 551 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 552 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 553 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 554 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 555 done: 556 return rval; 557 } 558 559 /* Disable loopback mode */ 560 static inline int 561 qla81xx_reset_loopback_mode(scsi_qla_host_t *vha, uint16_t *config, 562 int wait, int wait2) 563 { 564 int ret = 0; 565 int rval = 0; 566 uint16_t new_config[4]; 567 struct qla_hw_data *ha = vha->hw; 568 569 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha)) 570 goto done_reset_internal; 571 572 memset(new_config, 0 , sizeof(new_config)); 573 if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 == 574 ENABLE_INTERNAL_LOOPBACK || 575 (config[0] & INTERNAL_LOOPBACK_MASK) >> 1 == 576 ENABLE_EXTERNAL_LOOPBACK) { 577 new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK; 578 ql_dbg(ql_dbg_user, vha, 0x70bf, "new_config[0]=%02x\n", 579 (new_config[0] & INTERNAL_LOOPBACK_MASK)); 580 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ; 581 582 ha->notify_dcbx_comp = wait; 583 ha->notify_lb_portup_comp = wait2; 584 585 ret = qla81xx_set_port_config(vha, new_config); 586 if (ret != QLA_SUCCESS) { 587 ql_log(ql_log_warn, vha, 0x7025, 588 "Set port config failed.\n"); 589 ha->notify_dcbx_comp = 0; 590 ha->notify_lb_portup_comp = 0; 591 rval = -EINVAL; 592 goto done_reset_internal; 593 } 594 595 /* Wait for DCBX complete event */ 596 if (wait && !wait_for_completion_timeout(&ha->dcbx_comp, 597 (DCBX_COMP_TIMEOUT * HZ))) { 598 ql_dbg(ql_dbg_user, vha, 0x7026, 599 "DCBX completion not received.\n"); 600 ha->notify_dcbx_comp = 0; 601 ha->notify_lb_portup_comp = 0; 602 rval = -EINVAL; 603 goto done_reset_internal; 604 } else 605 ql_dbg(ql_dbg_user, vha, 0x7027, 606 "DCBX completion received.\n"); 607 608 if (wait2 && 609 !wait_for_completion_timeout(&ha->lb_portup_comp, 610 (LB_PORTUP_COMP_TIMEOUT * HZ))) { 611 ql_dbg(ql_dbg_user, vha, 0x70c5, 612 "Port up completion not received.\n"); 613 ha->notify_lb_portup_comp = 0; 614 rval = -EINVAL; 615 goto done_reset_internal; 616 } else 617 ql_dbg(ql_dbg_user, vha, 0x70c6, 618 "Port up completion received.\n"); 619 620 ha->notify_dcbx_comp = 0; 621 ha->notify_lb_portup_comp = 0; 622 } 623 done_reset_internal: 624 return rval; 625 } 626 627 /* 628 * Set the port configuration to enable the internal or external loopback 629 * depending on the loopback mode. 630 */ 631 static inline int 632 qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config, 633 uint16_t *new_config, uint16_t mode) 634 { 635 int ret = 0; 636 int rval = 0; 637 unsigned long rem_tmo = 0, current_tmo = 0; 638 struct qla_hw_data *ha = vha->hw; 639 640 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha)) 641 goto done_set_internal; 642 643 if (mode == INTERNAL_LOOPBACK) 644 new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1); 645 else if (mode == EXTERNAL_LOOPBACK) 646 new_config[0] = config[0] | (ENABLE_EXTERNAL_LOOPBACK << 1); 647 ql_dbg(ql_dbg_user, vha, 0x70be, 648 "new_config[0]=%02x\n", (new_config[0] & INTERNAL_LOOPBACK_MASK)); 649 650 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3); 651 652 ha->notify_dcbx_comp = 1; 653 ret = qla81xx_set_port_config(vha, new_config); 654 if (ret != QLA_SUCCESS) { 655 ql_log(ql_log_warn, vha, 0x7021, 656 "set port config failed.\n"); 657 ha->notify_dcbx_comp = 0; 658 rval = -EINVAL; 659 goto done_set_internal; 660 } 661 662 /* Wait for DCBX complete event */ 663 current_tmo = DCBX_COMP_TIMEOUT * HZ; 664 while (1) { 665 rem_tmo = wait_for_completion_timeout(&ha->dcbx_comp, 666 current_tmo); 667 if (!ha->idc_extend_tmo || rem_tmo) { 668 ha->idc_extend_tmo = 0; 669 break; 670 } 671 current_tmo = ha->idc_extend_tmo * HZ; 672 ha->idc_extend_tmo = 0; 673 } 674 675 if (!rem_tmo) { 676 ql_dbg(ql_dbg_user, vha, 0x7022, 677 "DCBX completion not received.\n"); 678 ret = qla81xx_reset_loopback_mode(vha, new_config, 0, 0); 679 /* 680 * If the reset of the loopback mode doesn't work take a FCoE 681 * dump and reset the chip. 682 */ 683 if (ret) { 684 ha->isp_ops->fw_dump(vha, 0); 685 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 686 } 687 rval = -EINVAL; 688 } else { 689 if (ha->flags.idc_compl_status) { 690 ql_dbg(ql_dbg_user, vha, 0x70c3, 691 "Bad status in IDC Completion AEN\n"); 692 rval = -EINVAL; 693 ha->flags.idc_compl_status = 0; 694 } else 695 ql_dbg(ql_dbg_user, vha, 0x7023, 696 "DCBX completion received.\n"); 697 } 698 699 ha->notify_dcbx_comp = 0; 700 ha->idc_extend_tmo = 0; 701 702 done_set_internal: 703 return rval; 704 } 705 706 static int 707 qla2x00_process_loopback(struct bsg_job *bsg_job) 708 { 709 struct fc_bsg_request *bsg_request = bsg_job->request; 710 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 711 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 712 scsi_qla_host_t *vha = shost_priv(host); 713 struct qla_hw_data *ha = vha->hw; 714 int rval; 715 uint8_t command_sent; 716 char *type; 717 struct msg_echo_lb elreq; 718 uint16_t response[MAILBOX_REGISTER_COUNT]; 719 uint16_t config[4], new_config[4]; 720 uint8_t *fw_sts_ptr; 721 uint8_t *req_data = NULL; 722 dma_addr_t req_data_dma; 723 uint32_t req_data_len; 724 uint8_t *rsp_data = NULL; 725 dma_addr_t rsp_data_dma; 726 uint32_t rsp_data_len; 727 728 if (!vha->flags.online) { 729 ql_log(ql_log_warn, vha, 0x7019, "Host is not online.\n"); 730 return -EIO; 731 } 732 733 memset(&elreq, 0, sizeof(elreq)); 734 735 elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev, 736 bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, 737 DMA_TO_DEVICE); 738 739 if (!elreq.req_sg_cnt) { 740 ql_log(ql_log_warn, vha, 0x701a, 741 "dma_map_sg returned %d for request.\n", elreq.req_sg_cnt); 742 return -ENOMEM; 743 } 744 745 elreq.rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, 746 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, 747 DMA_FROM_DEVICE); 748 749 if (!elreq.rsp_sg_cnt) { 750 ql_log(ql_log_warn, vha, 0x701b, 751 "dma_map_sg returned %d for reply.\n", elreq.rsp_sg_cnt); 752 rval = -ENOMEM; 753 goto done_unmap_req_sg; 754 } 755 756 if ((elreq.req_sg_cnt != bsg_job->request_payload.sg_cnt) || 757 (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) { 758 ql_log(ql_log_warn, vha, 0x701c, 759 "dma mapping resulted in different sg counts, " 760 "request_sg_cnt: %x dma_request_sg_cnt: %x " 761 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n", 762 bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt, 763 bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt); 764 rval = -EAGAIN; 765 goto done_unmap_sg; 766 } 767 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len; 768 req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len, 769 &req_data_dma, GFP_KERNEL); 770 if (!req_data) { 771 ql_log(ql_log_warn, vha, 0x701d, 772 "dma alloc failed for req_data.\n"); 773 rval = -ENOMEM; 774 goto done_unmap_sg; 775 } 776 777 rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len, 778 &rsp_data_dma, GFP_KERNEL); 779 if (!rsp_data) { 780 ql_log(ql_log_warn, vha, 0x7004, 781 "dma alloc failed for rsp_data.\n"); 782 rval = -ENOMEM; 783 goto done_free_dma_req; 784 } 785 786 /* Copy the request buffer in req_data now */ 787 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 788 bsg_job->request_payload.sg_cnt, req_data, req_data_len); 789 790 elreq.send_dma = req_data_dma; 791 elreq.rcv_dma = rsp_data_dma; 792 elreq.transfer_size = req_data_len; 793 794 elreq.options = bsg_request->rqst_data.h_vendor.vendor_cmd[1]; 795 elreq.iteration_count = 796 bsg_request->rqst_data.h_vendor.vendor_cmd[2]; 797 798 if (atomic_read(&vha->loop_state) == LOOP_READY && 799 (ha->current_topology == ISP_CFG_F || 800 (le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE && 801 req_data_len == MAX_ELS_FRAME_PAYLOAD)) && 802 elreq.options == EXTERNAL_LOOPBACK) { 803 type = "FC_BSG_HST_VENDOR_ECHO_DIAG"; 804 ql_dbg(ql_dbg_user, vha, 0x701e, 805 "BSG request type: %s.\n", type); 806 command_sent = INT_DEF_LB_ECHO_CMD; 807 rval = qla2x00_echo_test(vha, &elreq, response); 808 } else { 809 if (IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) { 810 memset(config, 0, sizeof(config)); 811 memset(new_config, 0, sizeof(new_config)); 812 813 if (qla81xx_get_port_config(vha, config)) { 814 ql_log(ql_log_warn, vha, 0x701f, 815 "Get port config failed.\n"); 816 rval = -EPERM; 817 goto done_free_dma_rsp; 818 } 819 820 if ((config[0] & INTERNAL_LOOPBACK_MASK) != 0) { 821 ql_dbg(ql_dbg_user, vha, 0x70c4, 822 "Loopback operation already in " 823 "progress.\n"); 824 rval = -EAGAIN; 825 goto done_free_dma_rsp; 826 } 827 828 ql_dbg(ql_dbg_user, vha, 0x70c0, 829 "elreq.options=%04x\n", elreq.options); 830 831 if (elreq.options == EXTERNAL_LOOPBACK) 832 if (IS_QLA8031(ha) || IS_QLA8044(ha)) 833 rval = qla81xx_set_loopback_mode(vha, 834 config, new_config, elreq.options); 835 else 836 rval = qla81xx_reset_loopback_mode(vha, 837 config, 1, 0); 838 else 839 rval = qla81xx_set_loopback_mode(vha, config, 840 new_config, elreq.options); 841 842 if (rval) { 843 rval = -EPERM; 844 goto done_free_dma_rsp; 845 } 846 847 type = "FC_BSG_HST_VENDOR_LOOPBACK"; 848 ql_dbg(ql_dbg_user, vha, 0x7028, 849 "BSG request type: %s.\n", type); 850 851 command_sent = INT_DEF_LB_LOOPBACK_CMD; 852 rval = qla2x00_loopback_test(vha, &elreq, response); 853 854 if (response[0] == MBS_COMMAND_ERROR && 855 response[1] == MBS_LB_RESET) { 856 ql_log(ql_log_warn, vha, 0x7029, 857 "MBX command error, Aborting ISP.\n"); 858 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 859 qla2xxx_wake_dpc(vha); 860 qla2x00_wait_for_chip_reset(vha); 861 /* Also reset the MPI */ 862 if (IS_QLA81XX(ha)) { 863 if (qla81xx_restart_mpi_firmware(vha) != 864 QLA_SUCCESS) { 865 ql_log(ql_log_warn, vha, 0x702a, 866 "MPI reset failed.\n"); 867 } 868 } 869 870 rval = -EIO; 871 goto done_free_dma_rsp; 872 } 873 874 if (new_config[0]) { 875 int ret; 876 877 /* Revert back to original port config 878 * Also clear internal loopback 879 */ 880 ret = qla81xx_reset_loopback_mode(vha, 881 new_config, 0, 1); 882 if (ret) { 883 /* 884 * If the reset of the loopback mode 885 * doesn't work take FCoE dump and then 886 * reset the chip. 887 */ 888 ha->isp_ops->fw_dump(vha, 0); 889 set_bit(ISP_ABORT_NEEDED, 890 &vha->dpc_flags); 891 } 892 893 } 894 895 } else { 896 type = "FC_BSG_HST_VENDOR_LOOPBACK"; 897 ql_dbg(ql_dbg_user, vha, 0x702b, 898 "BSG request type: %s.\n", type); 899 command_sent = INT_DEF_LB_LOOPBACK_CMD; 900 rval = qla2x00_loopback_test(vha, &elreq, response); 901 } 902 } 903 904 if (rval) { 905 ql_log(ql_log_warn, vha, 0x702c, 906 "Vendor request %s failed.\n", type); 907 908 rval = 0; 909 bsg_reply->result = (DID_ERROR << 16); 910 bsg_reply->reply_payload_rcv_len = 0; 911 } else { 912 ql_dbg(ql_dbg_user, vha, 0x702d, 913 "Vendor request %s completed.\n", type); 914 bsg_reply->result = (DID_OK << 16); 915 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 916 bsg_job->reply_payload.sg_cnt, rsp_data, 917 rsp_data_len); 918 } 919 920 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + 921 sizeof(response) + sizeof(uint8_t); 922 fw_sts_ptr = ((uint8_t *)scsi_req(bsg_job->req)->sense) + 923 sizeof(struct fc_bsg_reply); 924 memcpy(fw_sts_ptr, response, sizeof(response)); 925 fw_sts_ptr += sizeof(response); 926 *fw_sts_ptr = command_sent; 927 928 done_free_dma_rsp: 929 dma_free_coherent(&ha->pdev->dev, rsp_data_len, 930 rsp_data, rsp_data_dma); 931 done_free_dma_req: 932 dma_free_coherent(&ha->pdev->dev, req_data_len, 933 req_data, req_data_dma); 934 done_unmap_sg: 935 dma_unmap_sg(&ha->pdev->dev, 936 bsg_job->reply_payload.sg_list, 937 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 938 done_unmap_req_sg: 939 dma_unmap_sg(&ha->pdev->dev, 940 bsg_job->request_payload.sg_list, 941 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 942 if (!rval) 943 bsg_job_done(bsg_job, bsg_reply->result, 944 bsg_reply->reply_payload_rcv_len); 945 return rval; 946 } 947 948 static int 949 qla84xx_reset(struct bsg_job *bsg_job) 950 { 951 struct fc_bsg_request *bsg_request = bsg_job->request; 952 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 953 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 954 scsi_qla_host_t *vha = shost_priv(host); 955 struct qla_hw_data *ha = vha->hw; 956 int rval = 0; 957 uint32_t flag; 958 959 if (!IS_QLA84XX(ha)) { 960 ql_dbg(ql_dbg_user, vha, 0x702f, "Not 84xx, exiting.\n"); 961 return -EINVAL; 962 } 963 964 flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1]; 965 966 rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW); 967 968 if (rval) { 969 ql_log(ql_log_warn, vha, 0x7030, 970 "Vendor request 84xx reset failed.\n"); 971 rval = (DID_ERROR << 16); 972 973 } else { 974 ql_dbg(ql_dbg_user, vha, 0x7031, 975 "Vendor request 84xx reset completed.\n"); 976 bsg_reply->result = DID_OK; 977 bsg_job_done(bsg_job, bsg_reply->result, 978 bsg_reply->reply_payload_rcv_len); 979 } 980 981 return rval; 982 } 983 984 static int 985 qla84xx_updatefw(struct bsg_job *bsg_job) 986 { 987 struct fc_bsg_request *bsg_request = bsg_job->request; 988 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 989 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 990 scsi_qla_host_t *vha = shost_priv(host); 991 struct qla_hw_data *ha = vha->hw; 992 struct verify_chip_entry_84xx *mn = NULL; 993 dma_addr_t mn_dma, fw_dma; 994 void *fw_buf = NULL; 995 int rval = 0; 996 uint32_t sg_cnt; 997 uint32_t data_len; 998 uint16_t options; 999 uint32_t flag; 1000 uint32_t fw_ver; 1001 1002 if (!IS_QLA84XX(ha)) { 1003 ql_dbg(ql_dbg_user, vha, 0x7032, 1004 "Not 84xx, exiting.\n"); 1005 return -EINVAL; 1006 } 1007 1008 sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 1009 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 1010 if (!sg_cnt) { 1011 ql_log(ql_log_warn, vha, 0x7033, 1012 "dma_map_sg returned %d for request.\n", sg_cnt); 1013 return -ENOMEM; 1014 } 1015 1016 if (sg_cnt != bsg_job->request_payload.sg_cnt) { 1017 ql_log(ql_log_warn, vha, 0x7034, 1018 "DMA mapping resulted in different sg counts, " 1019 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n", 1020 bsg_job->request_payload.sg_cnt, sg_cnt); 1021 rval = -EAGAIN; 1022 goto done_unmap_sg; 1023 } 1024 1025 data_len = bsg_job->request_payload.payload_len; 1026 fw_buf = dma_alloc_coherent(&ha->pdev->dev, data_len, 1027 &fw_dma, GFP_KERNEL); 1028 if (!fw_buf) { 1029 ql_log(ql_log_warn, vha, 0x7035, 1030 "DMA alloc failed for fw_buf.\n"); 1031 rval = -ENOMEM; 1032 goto done_unmap_sg; 1033 } 1034 1035 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1036 bsg_job->request_payload.sg_cnt, fw_buf, data_len); 1037 1038 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma); 1039 if (!mn) { 1040 ql_log(ql_log_warn, vha, 0x7036, 1041 "DMA alloc failed for fw buffer.\n"); 1042 rval = -ENOMEM; 1043 goto done_free_fw_buf; 1044 } 1045 1046 flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1]; 1047 fw_ver = le32_to_cpu(*((uint32_t *)((uint32_t *)fw_buf + 2))); 1048 1049 memset(mn, 0, sizeof(struct access_chip_84xx)); 1050 mn->entry_type = VERIFY_CHIP_IOCB_TYPE; 1051 mn->entry_count = 1; 1052 1053 options = VCO_FORCE_UPDATE | VCO_END_OF_DATA; 1054 if (flag == A84_ISSUE_UPDATE_DIAGFW_CMD) 1055 options |= VCO_DIAG_FW; 1056 1057 mn->options = cpu_to_le16(options); 1058 mn->fw_ver = cpu_to_le32(fw_ver); 1059 mn->fw_size = cpu_to_le32(data_len); 1060 mn->fw_seq_size = cpu_to_le32(data_len); 1061 mn->dseg_address[0] = cpu_to_le32(LSD(fw_dma)); 1062 mn->dseg_address[1] = cpu_to_le32(MSD(fw_dma)); 1063 mn->dseg_length = cpu_to_le32(data_len); 1064 mn->data_seg_cnt = cpu_to_le16(1); 1065 1066 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120); 1067 1068 if (rval) { 1069 ql_log(ql_log_warn, vha, 0x7037, 1070 "Vendor request 84xx updatefw failed.\n"); 1071 1072 rval = (DID_ERROR << 16); 1073 } else { 1074 ql_dbg(ql_dbg_user, vha, 0x7038, 1075 "Vendor request 84xx updatefw completed.\n"); 1076 1077 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1078 bsg_reply->result = DID_OK; 1079 } 1080 1081 dma_pool_free(ha->s_dma_pool, mn, mn_dma); 1082 1083 done_free_fw_buf: 1084 dma_free_coherent(&ha->pdev->dev, data_len, fw_buf, fw_dma); 1085 1086 done_unmap_sg: 1087 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 1088 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 1089 1090 if (!rval) 1091 bsg_job_done(bsg_job, bsg_reply->result, 1092 bsg_reply->reply_payload_rcv_len); 1093 return rval; 1094 } 1095 1096 static int 1097 qla84xx_mgmt_cmd(struct bsg_job *bsg_job) 1098 { 1099 struct fc_bsg_request *bsg_request = bsg_job->request; 1100 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1101 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1102 scsi_qla_host_t *vha = shost_priv(host); 1103 struct qla_hw_data *ha = vha->hw; 1104 struct access_chip_84xx *mn = NULL; 1105 dma_addr_t mn_dma, mgmt_dma; 1106 void *mgmt_b = NULL; 1107 int rval = 0; 1108 struct qla_bsg_a84_mgmt *ql84_mgmt; 1109 uint32_t sg_cnt; 1110 uint32_t data_len = 0; 1111 uint32_t dma_direction = DMA_NONE; 1112 1113 if (!IS_QLA84XX(ha)) { 1114 ql_log(ql_log_warn, vha, 0x703a, 1115 "Not 84xx, exiting.\n"); 1116 return -EINVAL; 1117 } 1118 1119 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma); 1120 if (!mn) { 1121 ql_log(ql_log_warn, vha, 0x703c, 1122 "DMA alloc failed for fw buffer.\n"); 1123 return -ENOMEM; 1124 } 1125 1126 memset(mn, 0, sizeof(struct access_chip_84xx)); 1127 mn->entry_type = ACCESS_CHIP_IOCB_TYPE; 1128 mn->entry_count = 1; 1129 ql84_mgmt = (void *)bsg_request + sizeof(struct fc_bsg_request); 1130 switch (ql84_mgmt->mgmt.cmd) { 1131 case QLA84_MGMT_READ_MEM: 1132 case QLA84_MGMT_GET_INFO: 1133 sg_cnt = dma_map_sg(&ha->pdev->dev, 1134 bsg_job->reply_payload.sg_list, 1135 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 1136 if (!sg_cnt) { 1137 ql_log(ql_log_warn, vha, 0x703d, 1138 "dma_map_sg returned %d for reply.\n", sg_cnt); 1139 rval = -ENOMEM; 1140 goto exit_mgmt; 1141 } 1142 1143 dma_direction = DMA_FROM_DEVICE; 1144 1145 if (sg_cnt != bsg_job->reply_payload.sg_cnt) { 1146 ql_log(ql_log_warn, vha, 0x703e, 1147 "DMA mapping resulted in different sg counts, " 1148 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n", 1149 bsg_job->reply_payload.sg_cnt, sg_cnt); 1150 rval = -EAGAIN; 1151 goto done_unmap_sg; 1152 } 1153 1154 data_len = bsg_job->reply_payload.payload_len; 1155 1156 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len, 1157 &mgmt_dma, GFP_KERNEL); 1158 if (!mgmt_b) { 1159 ql_log(ql_log_warn, vha, 0x703f, 1160 "DMA alloc failed for mgmt_b.\n"); 1161 rval = -ENOMEM; 1162 goto done_unmap_sg; 1163 } 1164 1165 if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) { 1166 mn->options = cpu_to_le16(ACO_DUMP_MEMORY); 1167 mn->parameter1 = 1168 cpu_to_le32( 1169 ql84_mgmt->mgmt.mgmtp.u.mem.start_addr); 1170 1171 } else if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO) { 1172 mn->options = cpu_to_le16(ACO_REQUEST_INFO); 1173 mn->parameter1 = 1174 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.info.type); 1175 1176 mn->parameter2 = 1177 cpu_to_le32( 1178 ql84_mgmt->mgmt.mgmtp.u.info.context); 1179 } 1180 break; 1181 1182 case QLA84_MGMT_WRITE_MEM: 1183 sg_cnt = dma_map_sg(&ha->pdev->dev, 1184 bsg_job->request_payload.sg_list, 1185 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 1186 1187 if (!sg_cnt) { 1188 ql_log(ql_log_warn, vha, 0x7040, 1189 "dma_map_sg returned %d.\n", sg_cnt); 1190 rval = -ENOMEM; 1191 goto exit_mgmt; 1192 } 1193 1194 dma_direction = DMA_TO_DEVICE; 1195 1196 if (sg_cnt != bsg_job->request_payload.sg_cnt) { 1197 ql_log(ql_log_warn, vha, 0x7041, 1198 "DMA mapping resulted in different sg counts, " 1199 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n", 1200 bsg_job->request_payload.sg_cnt, sg_cnt); 1201 rval = -EAGAIN; 1202 goto done_unmap_sg; 1203 } 1204 1205 data_len = bsg_job->request_payload.payload_len; 1206 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len, 1207 &mgmt_dma, GFP_KERNEL); 1208 if (!mgmt_b) { 1209 ql_log(ql_log_warn, vha, 0x7042, 1210 "DMA alloc failed for mgmt_b.\n"); 1211 rval = -ENOMEM; 1212 goto done_unmap_sg; 1213 } 1214 1215 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1216 bsg_job->request_payload.sg_cnt, mgmt_b, data_len); 1217 1218 mn->options = cpu_to_le16(ACO_LOAD_MEMORY); 1219 mn->parameter1 = 1220 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.mem.start_addr); 1221 break; 1222 1223 case QLA84_MGMT_CHNG_CONFIG: 1224 mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM); 1225 mn->parameter1 = 1226 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.id); 1227 1228 mn->parameter2 = 1229 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param0); 1230 1231 mn->parameter3 = 1232 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param1); 1233 break; 1234 1235 default: 1236 rval = -EIO; 1237 goto exit_mgmt; 1238 } 1239 1240 if (ql84_mgmt->mgmt.cmd != QLA84_MGMT_CHNG_CONFIG) { 1241 mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->mgmt.len); 1242 mn->dseg_count = cpu_to_le16(1); 1243 mn->dseg_address[0] = cpu_to_le32(LSD(mgmt_dma)); 1244 mn->dseg_address[1] = cpu_to_le32(MSD(mgmt_dma)); 1245 mn->dseg_length = cpu_to_le32(ql84_mgmt->mgmt.len); 1246 } 1247 1248 rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0); 1249 1250 if (rval) { 1251 ql_log(ql_log_warn, vha, 0x7043, 1252 "Vendor request 84xx mgmt failed.\n"); 1253 1254 rval = (DID_ERROR << 16); 1255 1256 } else { 1257 ql_dbg(ql_dbg_user, vha, 0x7044, 1258 "Vendor request 84xx mgmt completed.\n"); 1259 1260 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1261 bsg_reply->result = DID_OK; 1262 1263 if ((ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) || 1264 (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO)) { 1265 bsg_reply->reply_payload_rcv_len = 1266 bsg_job->reply_payload.payload_len; 1267 1268 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 1269 bsg_job->reply_payload.sg_cnt, mgmt_b, 1270 data_len); 1271 } 1272 } 1273 1274 done_unmap_sg: 1275 if (mgmt_b) 1276 dma_free_coherent(&ha->pdev->dev, data_len, mgmt_b, mgmt_dma); 1277 1278 if (dma_direction == DMA_TO_DEVICE) 1279 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 1280 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 1281 else if (dma_direction == DMA_FROM_DEVICE) 1282 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 1283 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 1284 1285 exit_mgmt: 1286 dma_pool_free(ha->s_dma_pool, mn, mn_dma); 1287 1288 if (!rval) 1289 bsg_job_done(bsg_job, bsg_reply->result, 1290 bsg_reply->reply_payload_rcv_len); 1291 return rval; 1292 } 1293 1294 static int 1295 qla24xx_iidma(struct bsg_job *bsg_job) 1296 { 1297 struct fc_bsg_request *bsg_request = bsg_job->request; 1298 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1299 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1300 scsi_qla_host_t *vha = shost_priv(host); 1301 int rval = 0; 1302 struct qla_port_param *port_param = NULL; 1303 fc_port_t *fcport = NULL; 1304 int found = 0; 1305 uint16_t mb[MAILBOX_REGISTER_COUNT]; 1306 uint8_t *rsp_ptr = NULL; 1307 1308 if (!IS_IIDMA_CAPABLE(vha->hw)) { 1309 ql_log(ql_log_info, vha, 0x7046, "iiDMA not supported.\n"); 1310 return -EINVAL; 1311 } 1312 1313 port_param = (void *)bsg_request + sizeof(struct fc_bsg_request); 1314 if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) { 1315 ql_log(ql_log_warn, vha, 0x7048, 1316 "Invalid destination type.\n"); 1317 return -EINVAL; 1318 } 1319 1320 list_for_each_entry(fcport, &vha->vp_fcports, list) { 1321 if (fcport->port_type != FCT_TARGET) 1322 continue; 1323 1324 if (memcmp(port_param->fc_scsi_addr.dest_addr.wwpn, 1325 fcport->port_name, sizeof(fcport->port_name))) 1326 continue; 1327 1328 found = 1; 1329 break; 1330 } 1331 1332 if (!found) { 1333 ql_log(ql_log_warn, vha, 0x7049, 1334 "Failed to find port.\n"); 1335 return -EINVAL; 1336 } 1337 1338 if (atomic_read(&fcport->state) != FCS_ONLINE) { 1339 ql_log(ql_log_warn, vha, 0x704a, 1340 "Port is not online.\n"); 1341 return -EINVAL; 1342 } 1343 1344 if (fcport->flags & FCF_LOGIN_NEEDED) { 1345 ql_log(ql_log_warn, vha, 0x704b, 1346 "Remote port not logged in flags = 0x%x.\n", fcport->flags); 1347 return -EINVAL; 1348 } 1349 1350 if (port_param->mode) 1351 rval = qla2x00_set_idma_speed(vha, fcport->loop_id, 1352 port_param->speed, mb); 1353 else 1354 rval = qla2x00_get_idma_speed(vha, fcport->loop_id, 1355 &port_param->speed, mb); 1356 1357 if (rval) { 1358 ql_log(ql_log_warn, vha, 0x704c, 1359 "iIDMA cmd failed for %8phN -- " 1360 "%04x %x %04x %04x.\n", fcport->port_name, 1361 rval, fcport->fp_speed, mb[0], mb[1]); 1362 rval = (DID_ERROR << 16); 1363 } else { 1364 if (!port_param->mode) { 1365 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + 1366 sizeof(struct qla_port_param); 1367 1368 rsp_ptr = ((uint8_t *)bsg_reply) + 1369 sizeof(struct fc_bsg_reply); 1370 1371 memcpy(rsp_ptr, port_param, 1372 sizeof(struct qla_port_param)); 1373 } 1374 1375 bsg_reply->result = DID_OK; 1376 bsg_job_done(bsg_job, bsg_reply->result, 1377 bsg_reply->reply_payload_rcv_len); 1378 } 1379 1380 return rval; 1381 } 1382 1383 static int 1384 qla2x00_optrom_setup(struct bsg_job *bsg_job, scsi_qla_host_t *vha, 1385 uint8_t is_update) 1386 { 1387 struct fc_bsg_request *bsg_request = bsg_job->request; 1388 uint32_t start = 0; 1389 int valid = 0; 1390 struct qla_hw_data *ha = vha->hw; 1391 1392 if (unlikely(pci_channel_offline(ha->pdev))) 1393 return -EINVAL; 1394 1395 start = bsg_request->rqst_data.h_vendor.vendor_cmd[1]; 1396 if (start > ha->optrom_size) { 1397 ql_log(ql_log_warn, vha, 0x7055, 1398 "start %d > optrom_size %d.\n", start, ha->optrom_size); 1399 return -EINVAL; 1400 } 1401 1402 if (ha->optrom_state != QLA_SWAITING) { 1403 ql_log(ql_log_info, vha, 0x7056, 1404 "optrom_state %d.\n", ha->optrom_state); 1405 return -EBUSY; 1406 } 1407 1408 ha->optrom_region_start = start; 1409 ql_dbg(ql_dbg_user, vha, 0x7057, "is_update=%d.\n", is_update); 1410 if (is_update) { 1411 if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0) 1412 valid = 1; 1413 else if (start == (ha->flt_region_boot * 4) || 1414 start == (ha->flt_region_fw * 4)) 1415 valid = 1; 1416 else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || 1417 IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha)) 1418 valid = 1; 1419 if (!valid) { 1420 ql_log(ql_log_warn, vha, 0x7058, 1421 "Invalid start region 0x%x/0x%x.\n", start, 1422 bsg_job->request_payload.payload_len); 1423 return -EINVAL; 1424 } 1425 1426 ha->optrom_region_size = start + 1427 bsg_job->request_payload.payload_len > ha->optrom_size ? 1428 ha->optrom_size - start : 1429 bsg_job->request_payload.payload_len; 1430 ha->optrom_state = QLA_SWRITING; 1431 } else { 1432 ha->optrom_region_size = start + 1433 bsg_job->reply_payload.payload_len > ha->optrom_size ? 1434 ha->optrom_size - start : 1435 bsg_job->reply_payload.payload_len; 1436 ha->optrom_state = QLA_SREADING; 1437 } 1438 1439 ha->optrom_buffer = vmalloc(ha->optrom_region_size); 1440 if (!ha->optrom_buffer) { 1441 ql_log(ql_log_warn, vha, 0x7059, 1442 "Read: Unable to allocate memory for optrom retrieval " 1443 "(%x)\n", ha->optrom_region_size); 1444 1445 ha->optrom_state = QLA_SWAITING; 1446 return -ENOMEM; 1447 } 1448 1449 memset(ha->optrom_buffer, 0, ha->optrom_region_size); 1450 return 0; 1451 } 1452 1453 static int 1454 qla2x00_read_optrom(struct bsg_job *bsg_job) 1455 { 1456 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1457 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1458 scsi_qla_host_t *vha = shost_priv(host); 1459 struct qla_hw_data *ha = vha->hw; 1460 int rval = 0; 1461 1462 if (ha->flags.nic_core_reset_hdlr_active) 1463 return -EBUSY; 1464 1465 mutex_lock(&ha->optrom_mutex); 1466 rval = qla2x00_optrom_setup(bsg_job, vha, 0); 1467 if (rval) { 1468 mutex_unlock(&ha->optrom_mutex); 1469 return rval; 1470 } 1471 1472 ha->isp_ops->read_optrom(vha, ha->optrom_buffer, 1473 ha->optrom_region_start, ha->optrom_region_size); 1474 1475 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 1476 bsg_job->reply_payload.sg_cnt, ha->optrom_buffer, 1477 ha->optrom_region_size); 1478 1479 bsg_reply->reply_payload_rcv_len = ha->optrom_region_size; 1480 bsg_reply->result = DID_OK; 1481 vfree(ha->optrom_buffer); 1482 ha->optrom_buffer = NULL; 1483 ha->optrom_state = QLA_SWAITING; 1484 mutex_unlock(&ha->optrom_mutex); 1485 bsg_job_done(bsg_job, bsg_reply->result, 1486 bsg_reply->reply_payload_rcv_len); 1487 return rval; 1488 } 1489 1490 static int 1491 qla2x00_update_optrom(struct bsg_job *bsg_job) 1492 { 1493 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1494 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1495 scsi_qla_host_t *vha = shost_priv(host); 1496 struct qla_hw_data *ha = vha->hw; 1497 int rval = 0; 1498 1499 mutex_lock(&ha->optrom_mutex); 1500 rval = qla2x00_optrom_setup(bsg_job, vha, 1); 1501 if (rval) { 1502 mutex_unlock(&ha->optrom_mutex); 1503 return rval; 1504 } 1505 1506 /* Set the isp82xx_no_md_cap not to capture minidump */ 1507 ha->flags.isp82xx_no_md_cap = 1; 1508 1509 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1510 bsg_job->request_payload.sg_cnt, ha->optrom_buffer, 1511 ha->optrom_region_size); 1512 1513 ha->isp_ops->write_optrom(vha, ha->optrom_buffer, 1514 ha->optrom_region_start, ha->optrom_region_size); 1515 1516 bsg_reply->result = DID_OK; 1517 vfree(ha->optrom_buffer); 1518 ha->optrom_buffer = NULL; 1519 ha->optrom_state = QLA_SWAITING; 1520 mutex_unlock(&ha->optrom_mutex); 1521 bsg_job_done(bsg_job, bsg_reply->result, 1522 bsg_reply->reply_payload_rcv_len); 1523 return rval; 1524 } 1525 1526 static int 1527 qla2x00_update_fru_versions(struct bsg_job *bsg_job) 1528 { 1529 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1530 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1531 scsi_qla_host_t *vha = shost_priv(host); 1532 struct qla_hw_data *ha = vha->hw; 1533 int rval = 0; 1534 uint8_t bsg[DMA_POOL_SIZE]; 1535 struct qla_image_version_list *list = (void *)bsg; 1536 struct qla_image_version *image; 1537 uint32_t count; 1538 dma_addr_t sfp_dma; 1539 void *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); 1540 if (!sfp) { 1541 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 1542 EXT_STATUS_NO_MEMORY; 1543 goto done; 1544 } 1545 1546 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1547 bsg_job->request_payload.sg_cnt, list, sizeof(bsg)); 1548 1549 image = list->version; 1550 count = list->count; 1551 while (count--) { 1552 memcpy(sfp, &image->field_info, sizeof(image->field_info)); 1553 rval = qla2x00_write_sfp(vha, sfp_dma, sfp, 1554 image->field_address.device, image->field_address.offset, 1555 sizeof(image->field_info), image->field_address.option); 1556 if (rval) { 1557 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 1558 EXT_STATUS_MAILBOX; 1559 goto dealloc; 1560 } 1561 image++; 1562 } 1563 1564 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0; 1565 1566 dealloc: 1567 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); 1568 1569 done: 1570 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1571 bsg_reply->result = DID_OK << 16; 1572 bsg_job_done(bsg_job, bsg_reply->result, 1573 bsg_reply->reply_payload_rcv_len); 1574 1575 return 0; 1576 } 1577 1578 static int 1579 qla2x00_read_fru_status(struct bsg_job *bsg_job) 1580 { 1581 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1582 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1583 scsi_qla_host_t *vha = shost_priv(host); 1584 struct qla_hw_data *ha = vha->hw; 1585 int rval = 0; 1586 uint8_t bsg[DMA_POOL_SIZE]; 1587 struct qla_status_reg *sr = (void *)bsg; 1588 dma_addr_t sfp_dma; 1589 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); 1590 if (!sfp) { 1591 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 1592 EXT_STATUS_NO_MEMORY; 1593 goto done; 1594 } 1595 1596 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1597 bsg_job->request_payload.sg_cnt, sr, sizeof(*sr)); 1598 1599 rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 1600 sr->field_address.device, sr->field_address.offset, 1601 sizeof(sr->status_reg), sr->field_address.option); 1602 sr->status_reg = *sfp; 1603 1604 if (rval) { 1605 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 1606 EXT_STATUS_MAILBOX; 1607 goto dealloc; 1608 } 1609 1610 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 1611 bsg_job->reply_payload.sg_cnt, sr, sizeof(*sr)); 1612 1613 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0; 1614 1615 dealloc: 1616 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); 1617 1618 done: 1619 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1620 bsg_reply->reply_payload_rcv_len = sizeof(*sr); 1621 bsg_reply->result = DID_OK << 16; 1622 bsg_job_done(bsg_job, bsg_reply->result, 1623 bsg_reply->reply_payload_rcv_len); 1624 1625 return 0; 1626 } 1627 1628 static int 1629 qla2x00_write_fru_status(struct bsg_job *bsg_job) 1630 { 1631 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1632 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1633 scsi_qla_host_t *vha = shost_priv(host); 1634 struct qla_hw_data *ha = vha->hw; 1635 int rval = 0; 1636 uint8_t bsg[DMA_POOL_SIZE]; 1637 struct qla_status_reg *sr = (void *)bsg; 1638 dma_addr_t sfp_dma; 1639 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); 1640 if (!sfp) { 1641 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 1642 EXT_STATUS_NO_MEMORY; 1643 goto done; 1644 } 1645 1646 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1647 bsg_job->request_payload.sg_cnt, sr, sizeof(*sr)); 1648 1649 *sfp = sr->status_reg; 1650 rval = qla2x00_write_sfp(vha, sfp_dma, sfp, 1651 sr->field_address.device, sr->field_address.offset, 1652 sizeof(sr->status_reg), sr->field_address.option); 1653 1654 if (rval) { 1655 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 1656 EXT_STATUS_MAILBOX; 1657 goto dealloc; 1658 } 1659 1660 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0; 1661 1662 dealloc: 1663 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); 1664 1665 done: 1666 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1667 bsg_reply->result = DID_OK << 16; 1668 bsg_job_done(bsg_job, bsg_reply->result, 1669 bsg_reply->reply_payload_rcv_len); 1670 1671 return 0; 1672 } 1673 1674 static int 1675 qla2x00_write_i2c(struct bsg_job *bsg_job) 1676 { 1677 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1678 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1679 scsi_qla_host_t *vha = shost_priv(host); 1680 struct qla_hw_data *ha = vha->hw; 1681 int rval = 0; 1682 uint8_t bsg[DMA_POOL_SIZE]; 1683 struct qla_i2c_access *i2c = (void *)bsg; 1684 dma_addr_t sfp_dma; 1685 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); 1686 if (!sfp) { 1687 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 1688 EXT_STATUS_NO_MEMORY; 1689 goto done; 1690 } 1691 1692 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1693 bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c)); 1694 1695 memcpy(sfp, i2c->buffer, i2c->length); 1696 rval = qla2x00_write_sfp(vha, sfp_dma, sfp, 1697 i2c->device, i2c->offset, i2c->length, i2c->option); 1698 1699 if (rval) { 1700 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 1701 EXT_STATUS_MAILBOX; 1702 goto dealloc; 1703 } 1704 1705 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0; 1706 1707 dealloc: 1708 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); 1709 1710 done: 1711 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1712 bsg_reply->result = DID_OK << 16; 1713 bsg_job_done(bsg_job, bsg_reply->result, 1714 bsg_reply->reply_payload_rcv_len); 1715 1716 return 0; 1717 } 1718 1719 static int 1720 qla2x00_read_i2c(struct bsg_job *bsg_job) 1721 { 1722 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1723 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1724 scsi_qla_host_t *vha = shost_priv(host); 1725 struct qla_hw_data *ha = vha->hw; 1726 int rval = 0; 1727 uint8_t bsg[DMA_POOL_SIZE]; 1728 struct qla_i2c_access *i2c = (void *)bsg; 1729 dma_addr_t sfp_dma; 1730 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); 1731 if (!sfp) { 1732 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 1733 EXT_STATUS_NO_MEMORY; 1734 goto done; 1735 } 1736 1737 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1738 bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c)); 1739 1740 rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 1741 i2c->device, i2c->offset, i2c->length, i2c->option); 1742 1743 if (rval) { 1744 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 1745 EXT_STATUS_MAILBOX; 1746 goto dealloc; 1747 } 1748 1749 memcpy(i2c->buffer, sfp, i2c->length); 1750 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 1751 bsg_job->reply_payload.sg_cnt, i2c, sizeof(*i2c)); 1752 1753 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0; 1754 1755 dealloc: 1756 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); 1757 1758 done: 1759 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1760 bsg_reply->reply_payload_rcv_len = sizeof(*i2c); 1761 bsg_reply->result = DID_OK << 16; 1762 bsg_job_done(bsg_job, bsg_reply->result, 1763 bsg_reply->reply_payload_rcv_len); 1764 1765 return 0; 1766 } 1767 1768 static int 1769 qla24xx_process_bidir_cmd(struct bsg_job *bsg_job) 1770 { 1771 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1772 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1773 scsi_qla_host_t *vha = shost_priv(host); 1774 struct qla_hw_data *ha = vha->hw; 1775 uint32_t rval = EXT_STATUS_OK; 1776 uint16_t req_sg_cnt = 0; 1777 uint16_t rsp_sg_cnt = 0; 1778 uint16_t nextlid = 0; 1779 uint32_t tot_dsds; 1780 srb_t *sp = NULL; 1781 uint32_t req_data_len = 0; 1782 uint32_t rsp_data_len = 0; 1783 1784 /* Check the type of the adapter */ 1785 if (!IS_BIDI_CAPABLE(ha)) { 1786 ql_log(ql_log_warn, vha, 0x70a0, 1787 "This adapter is not supported\n"); 1788 rval = EXT_STATUS_NOT_SUPPORTED; 1789 goto done; 1790 } 1791 1792 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) || 1793 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) || 1794 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { 1795 rval = EXT_STATUS_BUSY; 1796 goto done; 1797 } 1798 1799 /* Check if host is online */ 1800 if (!vha->flags.online) { 1801 ql_log(ql_log_warn, vha, 0x70a1, 1802 "Host is not online\n"); 1803 rval = EXT_STATUS_DEVICE_OFFLINE; 1804 goto done; 1805 } 1806 1807 /* Check if cable is plugged in or not */ 1808 if (vha->device_flags & DFLG_NO_CABLE) { 1809 ql_log(ql_log_warn, vha, 0x70a2, 1810 "Cable is unplugged...\n"); 1811 rval = EXT_STATUS_INVALID_CFG; 1812 goto done; 1813 } 1814 1815 /* Check if the switch is connected or not */ 1816 if (ha->current_topology != ISP_CFG_F) { 1817 ql_log(ql_log_warn, vha, 0x70a3, 1818 "Host is not connected to the switch\n"); 1819 rval = EXT_STATUS_INVALID_CFG; 1820 goto done; 1821 } 1822 1823 /* Check if operating mode is P2P */ 1824 if (ha->operating_mode != P2P) { 1825 ql_log(ql_log_warn, vha, 0x70a4, 1826 "Host operating mode is not P2p\n"); 1827 rval = EXT_STATUS_INVALID_CFG; 1828 goto done; 1829 } 1830 1831 mutex_lock(&ha->selflogin_lock); 1832 if (vha->self_login_loop_id == 0) { 1833 /* Initialize all required fields of fcport */ 1834 vha->bidir_fcport.vha = vha; 1835 vha->bidir_fcport.d_id.b.al_pa = vha->d_id.b.al_pa; 1836 vha->bidir_fcport.d_id.b.area = vha->d_id.b.area; 1837 vha->bidir_fcport.d_id.b.domain = vha->d_id.b.domain; 1838 vha->bidir_fcport.loop_id = vha->loop_id; 1839 1840 if (qla2x00_fabric_login(vha, &(vha->bidir_fcport), &nextlid)) { 1841 ql_log(ql_log_warn, vha, 0x70a7, 1842 "Failed to login port %06X for bidirectional IOCB\n", 1843 vha->bidir_fcport.d_id.b24); 1844 mutex_unlock(&ha->selflogin_lock); 1845 rval = EXT_STATUS_MAILBOX; 1846 goto done; 1847 } 1848 vha->self_login_loop_id = nextlid - 1; 1849 1850 } 1851 /* Assign the self login loop id to fcport */ 1852 mutex_unlock(&ha->selflogin_lock); 1853 1854 vha->bidir_fcport.loop_id = vha->self_login_loop_id; 1855 1856 req_sg_cnt = dma_map_sg(&ha->pdev->dev, 1857 bsg_job->request_payload.sg_list, 1858 bsg_job->request_payload.sg_cnt, 1859 DMA_TO_DEVICE); 1860 1861 if (!req_sg_cnt) { 1862 rval = EXT_STATUS_NO_MEMORY; 1863 goto done; 1864 } 1865 1866 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, 1867 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, 1868 DMA_FROM_DEVICE); 1869 1870 if (!rsp_sg_cnt) { 1871 rval = EXT_STATUS_NO_MEMORY; 1872 goto done_unmap_req_sg; 1873 } 1874 1875 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) || 1876 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) { 1877 ql_dbg(ql_dbg_user, vha, 0x70a9, 1878 "Dma mapping resulted in different sg counts " 1879 "[request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt: " 1880 "%x dma_reply_sg_cnt: %x]\n", 1881 bsg_job->request_payload.sg_cnt, req_sg_cnt, 1882 bsg_job->reply_payload.sg_cnt, rsp_sg_cnt); 1883 rval = EXT_STATUS_NO_MEMORY; 1884 goto done_unmap_sg; 1885 } 1886 1887 if (req_data_len != rsp_data_len) { 1888 rval = EXT_STATUS_BUSY; 1889 ql_log(ql_log_warn, vha, 0x70aa, 1890 "req_data_len != rsp_data_len\n"); 1891 goto done_unmap_sg; 1892 } 1893 1894 req_data_len = bsg_job->request_payload.payload_len; 1895 rsp_data_len = bsg_job->reply_payload.payload_len; 1896 1897 1898 /* Alloc SRB structure */ 1899 sp = qla2x00_get_sp(vha, &(vha->bidir_fcport), GFP_KERNEL); 1900 if (!sp) { 1901 ql_dbg(ql_dbg_user, vha, 0x70ac, 1902 "Alloc SRB structure failed\n"); 1903 rval = EXT_STATUS_NO_MEMORY; 1904 goto done_unmap_sg; 1905 } 1906 1907 /*Populate srb->ctx with bidir ctx*/ 1908 sp->u.bsg_job = bsg_job; 1909 sp->free = qla2x00_bsg_sp_free; 1910 sp->type = SRB_BIDI_CMD; 1911 sp->done = qla2x00_bsg_job_done; 1912 1913 /* Add the read and write sg count */ 1914 tot_dsds = rsp_sg_cnt + req_sg_cnt; 1915 1916 rval = qla2x00_start_bidir(sp, vha, tot_dsds); 1917 if (rval != EXT_STATUS_OK) 1918 goto done_free_srb; 1919 /* the bsg request will be completed in the interrupt handler */ 1920 return rval; 1921 1922 done_free_srb: 1923 mempool_free(sp, ha->srb_mempool); 1924 done_unmap_sg: 1925 dma_unmap_sg(&ha->pdev->dev, 1926 bsg_job->reply_payload.sg_list, 1927 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 1928 done_unmap_req_sg: 1929 dma_unmap_sg(&ha->pdev->dev, 1930 bsg_job->request_payload.sg_list, 1931 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 1932 done: 1933 1934 /* Return an error vendor specific response 1935 * and complete the bsg request 1936 */ 1937 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval; 1938 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1939 bsg_reply->reply_payload_rcv_len = 0; 1940 bsg_reply->result = (DID_OK) << 16; 1941 bsg_job_done(bsg_job, bsg_reply->result, 1942 bsg_reply->reply_payload_rcv_len); 1943 /* Always return success, vendor rsp carries correct status */ 1944 return 0; 1945 } 1946 1947 static int 1948 qlafx00_mgmt_cmd(struct bsg_job *bsg_job) 1949 { 1950 struct fc_bsg_request *bsg_request = bsg_job->request; 1951 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1952 scsi_qla_host_t *vha = shost_priv(host); 1953 struct qla_hw_data *ha = vha->hw; 1954 int rval = (DRIVER_ERROR << 16); 1955 struct qla_mt_iocb_rqst_fx00 *piocb_rqst; 1956 srb_t *sp; 1957 int req_sg_cnt = 0, rsp_sg_cnt = 0; 1958 struct fc_port *fcport; 1959 char *type = "FC_BSG_HST_FX_MGMT"; 1960 1961 /* Copy the IOCB specific information */ 1962 piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *) 1963 &bsg_request->rqst_data.h_vendor.vendor_cmd[1]; 1964 1965 /* Dump the vendor information */ 1966 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose , vha, 0x70cf, 1967 (uint8_t *)piocb_rqst, sizeof(struct qla_mt_iocb_rqst_fx00)); 1968 1969 if (!vha->flags.online) { 1970 ql_log(ql_log_warn, vha, 0x70d0, 1971 "Host is not online.\n"); 1972 rval = -EIO; 1973 goto done; 1974 } 1975 1976 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) { 1977 req_sg_cnt = dma_map_sg(&ha->pdev->dev, 1978 bsg_job->request_payload.sg_list, 1979 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 1980 if (!req_sg_cnt) { 1981 ql_log(ql_log_warn, vha, 0x70c7, 1982 "dma_map_sg return %d for request\n", req_sg_cnt); 1983 rval = -ENOMEM; 1984 goto done; 1985 } 1986 } 1987 1988 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) { 1989 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, 1990 bsg_job->reply_payload.sg_list, 1991 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 1992 if (!rsp_sg_cnt) { 1993 ql_log(ql_log_warn, vha, 0x70c8, 1994 "dma_map_sg return %d for reply\n", rsp_sg_cnt); 1995 rval = -ENOMEM; 1996 goto done_unmap_req_sg; 1997 } 1998 } 1999 2000 ql_dbg(ql_dbg_user, vha, 0x70c9, 2001 "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x " 2002 "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt, 2003 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt); 2004 2005 /* Allocate a dummy fcport structure, since functions preparing the 2006 * IOCB and mailbox command retrieves port specific information 2007 * from fcport structure. For Host based ELS commands there will be 2008 * no fcport structure allocated 2009 */ 2010 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 2011 if (!fcport) { 2012 ql_log(ql_log_warn, vha, 0x70ca, 2013 "Failed to allocate fcport.\n"); 2014 rval = -ENOMEM; 2015 goto done_unmap_rsp_sg; 2016 } 2017 2018 /* Alloc SRB structure */ 2019 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 2020 if (!sp) { 2021 ql_log(ql_log_warn, vha, 0x70cb, 2022 "qla2x00_get_sp failed.\n"); 2023 rval = -ENOMEM; 2024 goto done_free_fcport; 2025 } 2026 2027 /* Initialize all required fields of fcport */ 2028 fcport->vha = vha; 2029 fcport->loop_id = piocb_rqst->dataword; 2030 2031 sp->type = SRB_FXIOCB_BCMD; 2032 sp->name = "bsg_fx_mgmt"; 2033 sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt); 2034 sp->u.bsg_job = bsg_job; 2035 sp->free = qla2x00_bsg_sp_free; 2036 sp->done = qla2x00_bsg_job_done; 2037 2038 ql_dbg(ql_dbg_user, vha, 0x70cc, 2039 "bsg rqst type: %s fx_mgmt_type: %x id=%x\n", 2040 type, piocb_rqst->func_type, fcport->loop_id); 2041 2042 rval = qla2x00_start_sp(sp); 2043 if (rval != QLA_SUCCESS) { 2044 ql_log(ql_log_warn, vha, 0x70cd, 2045 "qla2x00_start_sp failed=%d.\n", rval); 2046 mempool_free(sp, ha->srb_mempool); 2047 rval = -EIO; 2048 goto done_free_fcport; 2049 } 2050 return rval; 2051 2052 done_free_fcport: 2053 kfree(fcport); 2054 2055 done_unmap_rsp_sg: 2056 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) 2057 dma_unmap_sg(&ha->pdev->dev, 2058 bsg_job->reply_payload.sg_list, 2059 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 2060 done_unmap_req_sg: 2061 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) 2062 dma_unmap_sg(&ha->pdev->dev, 2063 bsg_job->request_payload.sg_list, 2064 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 2065 2066 done: 2067 return rval; 2068 } 2069 2070 static int 2071 qla26xx_serdes_op(struct bsg_job *bsg_job) 2072 { 2073 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2074 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 2075 scsi_qla_host_t *vha = shost_priv(host); 2076 int rval = 0; 2077 struct qla_serdes_reg sr; 2078 2079 memset(&sr, 0, sizeof(sr)); 2080 2081 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 2082 bsg_job->request_payload.sg_cnt, &sr, sizeof(sr)); 2083 2084 switch (sr.cmd) { 2085 case INT_SC_SERDES_WRITE_REG: 2086 rval = qla2x00_write_serdes_word(vha, sr.addr, sr.val); 2087 bsg_reply->reply_payload_rcv_len = 0; 2088 break; 2089 case INT_SC_SERDES_READ_REG: 2090 rval = qla2x00_read_serdes_word(vha, sr.addr, &sr.val); 2091 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 2092 bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr)); 2093 bsg_reply->reply_payload_rcv_len = sizeof(sr); 2094 break; 2095 default: 2096 ql_dbg(ql_dbg_user, vha, 0x708c, 2097 "Unknown serdes cmd %x.\n", sr.cmd); 2098 rval = -EINVAL; 2099 break; 2100 } 2101 2102 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 2103 rval ? EXT_STATUS_MAILBOX : 0; 2104 2105 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 2106 bsg_reply->result = DID_OK << 16; 2107 bsg_job_done(bsg_job, bsg_reply->result, 2108 bsg_reply->reply_payload_rcv_len); 2109 return 0; 2110 } 2111 2112 static int 2113 qla8044_serdes_op(struct bsg_job *bsg_job) 2114 { 2115 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2116 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 2117 scsi_qla_host_t *vha = shost_priv(host); 2118 int rval = 0; 2119 struct qla_serdes_reg_ex sr; 2120 2121 memset(&sr, 0, sizeof(sr)); 2122 2123 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 2124 bsg_job->request_payload.sg_cnt, &sr, sizeof(sr)); 2125 2126 switch (sr.cmd) { 2127 case INT_SC_SERDES_WRITE_REG: 2128 rval = qla8044_write_serdes_word(vha, sr.addr, sr.val); 2129 bsg_reply->reply_payload_rcv_len = 0; 2130 break; 2131 case INT_SC_SERDES_READ_REG: 2132 rval = qla8044_read_serdes_word(vha, sr.addr, &sr.val); 2133 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 2134 bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr)); 2135 bsg_reply->reply_payload_rcv_len = sizeof(sr); 2136 break; 2137 default: 2138 ql_dbg(ql_dbg_user, vha, 0x7020, 2139 "Unknown serdes cmd %x.\n", sr.cmd); 2140 rval = -EINVAL; 2141 break; 2142 } 2143 2144 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 2145 rval ? EXT_STATUS_MAILBOX : 0; 2146 2147 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 2148 bsg_reply->result = DID_OK << 16; 2149 bsg_job_done(bsg_job, bsg_reply->result, 2150 bsg_reply->reply_payload_rcv_len); 2151 return 0; 2152 } 2153 2154 static int 2155 qla27xx_get_flash_upd_cap(struct bsg_job *bsg_job) 2156 { 2157 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2158 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 2159 scsi_qla_host_t *vha = shost_priv(host); 2160 struct qla_hw_data *ha = vha->hw; 2161 struct qla_flash_update_caps cap; 2162 2163 if (!(IS_QLA27XX(ha))) 2164 return -EPERM; 2165 2166 memset(&cap, 0, sizeof(cap)); 2167 cap.capabilities = (uint64_t)ha->fw_attributes_ext[1] << 48 | 2168 (uint64_t)ha->fw_attributes_ext[0] << 32 | 2169 (uint64_t)ha->fw_attributes_h << 16 | 2170 (uint64_t)ha->fw_attributes; 2171 2172 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 2173 bsg_job->reply_payload.sg_cnt, &cap, sizeof(cap)); 2174 bsg_reply->reply_payload_rcv_len = sizeof(cap); 2175 2176 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 2177 EXT_STATUS_OK; 2178 2179 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 2180 bsg_reply->result = DID_OK << 16; 2181 bsg_job_done(bsg_job, bsg_reply->result, 2182 bsg_reply->reply_payload_rcv_len); 2183 return 0; 2184 } 2185 2186 static int 2187 qla27xx_set_flash_upd_cap(struct bsg_job *bsg_job) 2188 { 2189 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2190 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 2191 scsi_qla_host_t *vha = shost_priv(host); 2192 struct qla_hw_data *ha = vha->hw; 2193 uint64_t online_fw_attr = 0; 2194 struct qla_flash_update_caps cap; 2195 2196 if (!(IS_QLA27XX(ha))) 2197 return -EPERM; 2198 2199 memset(&cap, 0, sizeof(cap)); 2200 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 2201 bsg_job->request_payload.sg_cnt, &cap, sizeof(cap)); 2202 2203 online_fw_attr = (uint64_t)ha->fw_attributes_ext[1] << 48 | 2204 (uint64_t)ha->fw_attributes_ext[0] << 32 | 2205 (uint64_t)ha->fw_attributes_h << 16 | 2206 (uint64_t)ha->fw_attributes; 2207 2208 if (online_fw_attr != cap.capabilities) { 2209 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 2210 EXT_STATUS_INVALID_PARAM; 2211 return -EINVAL; 2212 } 2213 2214 if (cap.outage_duration < MAX_LOOP_TIMEOUT) { 2215 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 2216 EXT_STATUS_INVALID_PARAM; 2217 return -EINVAL; 2218 } 2219 2220 bsg_reply->reply_payload_rcv_len = 0; 2221 2222 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 2223 EXT_STATUS_OK; 2224 2225 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 2226 bsg_reply->result = DID_OK << 16; 2227 bsg_job_done(bsg_job, bsg_reply->result, 2228 bsg_reply->reply_payload_rcv_len); 2229 return 0; 2230 } 2231 2232 static int 2233 qla27xx_get_bbcr_data(struct bsg_job *bsg_job) 2234 { 2235 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2236 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 2237 scsi_qla_host_t *vha = shost_priv(host); 2238 struct qla_hw_data *ha = vha->hw; 2239 struct qla_bbcr_data bbcr; 2240 uint16_t loop_id, topo, sw_cap; 2241 uint8_t domain, area, al_pa, state; 2242 int rval; 2243 2244 if (!(IS_QLA27XX(ha))) 2245 return -EPERM; 2246 2247 memset(&bbcr, 0, sizeof(bbcr)); 2248 2249 if (vha->flags.bbcr_enable) 2250 bbcr.status = QLA_BBCR_STATUS_ENABLED; 2251 else 2252 bbcr.status = QLA_BBCR_STATUS_DISABLED; 2253 2254 if (bbcr.status == QLA_BBCR_STATUS_ENABLED) { 2255 rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa, 2256 &area, &domain, &topo, &sw_cap); 2257 if (rval != QLA_SUCCESS) { 2258 bbcr.status = QLA_BBCR_STATUS_UNKNOWN; 2259 bbcr.state = QLA_BBCR_STATE_OFFLINE; 2260 bbcr.mbx1 = loop_id; 2261 goto done; 2262 } 2263 2264 state = (vha->bbcr >> 12) & 0x1; 2265 2266 if (state) { 2267 bbcr.state = QLA_BBCR_STATE_OFFLINE; 2268 bbcr.offline_reason_code = QLA_BBCR_REASON_LOGIN_REJECT; 2269 } else { 2270 bbcr.state = QLA_BBCR_STATE_ONLINE; 2271 bbcr.negotiated_bbscn = (vha->bbcr >> 8) & 0xf; 2272 } 2273 2274 bbcr.configured_bbscn = vha->bbcr & 0xf; 2275 } 2276 2277 done: 2278 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 2279 bsg_job->reply_payload.sg_cnt, &bbcr, sizeof(bbcr)); 2280 bsg_reply->reply_payload_rcv_len = sizeof(bbcr); 2281 2282 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK; 2283 2284 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 2285 bsg_reply->result = DID_OK << 16; 2286 bsg_job_done(bsg_job, bsg_reply->result, 2287 bsg_reply->reply_payload_rcv_len); 2288 return 0; 2289 } 2290 2291 static int 2292 qla2x00_get_priv_stats(struct bsg_job *bsg_job) 2293 { 2294 struct fc_bsg_request *bsg_request = bsg_job->request; 2295 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2296 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 2297 scsi_qla_host_t *vha = shost_priv(host); 2298 struct qla_hw_data *ha = vha->hw; 2299 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 2300 struct link_statistics *stats = NULL; 2301 dma_addr_t stats_dma; 2302 int rval; 2303 uint32_t *cmd = bsg_request->rqst_data.h_vendor.vendor_cmd; 2304 uint options = cmd[0] == QL_VND_GET_PRIV_STATS_EX ? cmd[1] : 0; 2305 2306 if (test_bit(UNLOADING, &vha->dpc_flags)) 2307 return -ENODEV; 2308 2309 if (unlikely(pci_channel_offline(ha->pdev))) 2310 return -ENODEV; 2311 2312 if (qla2x00_reset_active(vha)) 2313 return -EBUSY; 2314 2315 if (!IS_FWI2_CAPABLE(ha)) 2316 return -EPERM; 2317 2318 stats = dma_alloc_coherent(&ha->pdev->dev, 2319 sizeof(*stats), &stats_dma, GFP_KERNEL); 2320 if (!stats) { 2321 ql_log(ql_log_warn, vha, 0x70e2, 2322 "Failed to allocate memory for stats.\n"); 2323 return -ENOMEM; 2324 } 2325 2326 memset(stats, 0, sizeof(*stats)); 2327 2328 rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, options); 2329 2330 if (rval == QLA_SUCCESS) { 2331 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, vha, 0x70e3, 2332 (uint8_t *)stats, sizeof(*stats)); 2333 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 2334 bsg_job->reply_payload.sg_cnt, stats, sizeof(*stats)); 2335 } 2336 2337 bsg_reply->reply_payload_rcv_len = sizeof(*stats); 2338 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 2339 rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK; 2340 2341 bsg_job->reply_len = sizeof(*bsg_reply); 2342 bsg_reply->result = DID_OK << 16; 2343 bsg_job_done(bsg_job, bsg_reply->result, 2344 bsg_reply->reply_payload_rcv_len); 2345 2346 dma_free_coherent(&ha->pdev->dev, sizeof(*stats), 2347 stats, stats_dma); 2348 2349 return 0; 2350 } 2351 2352 static int 2353 qla2x00_do_dport_diagnostics(struct bsg_job *bsg_job) 2354 { 2355 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2356 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 2357 scsi_qla_host_t *vha = shost_priv(host); 2358 int rval; 2359 struct qla_dport_diag *dd; 2360 2361 if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw)) 2362 return -EPERM; 2363 2364 dd = kmalloc(sizeof(*dd), GFP_KERNEL); 2365 if (!dd) { 2366 ql_log(ql_log_warn, vha, 0x70db, 2367 "Failed to allocate memory for dport.\n"); 2368 return -ENOMEM; 2369 } 2370 2371 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 2372 bsg_job->request_payload.sg_cnt, dd, sizeof(*dd)); 2373 2374 rval = qla26xx_dport_diagnostics( 2375 vha, dd->buf, sizeof(dd->buf), dd->options); 2376 if (rval == QLA_SUCCESS) { 2377 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 2378 bsg_job->reply_payload.sg_cnt, dd, sizeof(*dd)); 2379 } 2380 2381 bsg_reply->reply_payload_rcv_len = sizeof(*dd); 2382 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 2383 rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK; 2384 2385 bsg_job->reply_len = sizeof(*bsg_reply); 2386 bsg_reply->result = DID_OK << 16; 2387 bsg_job_done(bsg_job, bsg_reply->result, 2388 bsg_reply->reply_payload_rcv_len); 2389 2390 kfree(dd); 2391 2392 return 0; 2393 } 2394 2395 static int 2396 qla2x00_process_vendor_specific(struct bsg_job *bsg_job) 2397 { 2398 struct fc_bsg_request *bsg_request = bsg_job->request; 2399 2400 switch (bsg_request->rqst_data.h_vendor.vendor_cmd[0]) { 2401 case QL_VND_LOOPBACK: 2402 return qla2x00_process_loopback(bsg_job); 2403 2404 case QL_VND_A84_RESET: 2405 return qla84xx_reset(bsg_job); 2406 2407 case QL_VND_A84_UPDATE_FW: 2408 return qla84xx_updatefw(bsg_job); 2409 2410 case QL_VND_A84_MGMT_CMD: 2411 return qla84xx_mgmt_cmd(bsg_job); 2412 2413 case QL_VND_IIDMA: 2414 return qla24xx_iidma(bsg_job); 2415 2416 case QL_VND_FCP_PRIO_CFG_CMD: 2417 return qla24xx_proc_fcp_prio_cfg_cmd(bsg_job); 2418 2419 case QL_VND_READ_FLASH: 2420 return qla2x00_read_optrom(bsg_job); 2421 2422 case QL_VND_UPDATE_FLASH: 2423 return qla2x00_update_optrom(bsg_job); 2424 2425 case QL_VND_SET_FRU_VERSION: 2426 return qla2x00_update_fru_versions(bsg_job); 2427 2428 case QL_VND_READ_FRU_STATUS: 2429 return qla2x00_read_fru_status(bsg_job); 2430 2431 case QL_VND_WRITE_FRU_STATUS: 2432 return qla2x00_write_fru_status(bsg_job); 2433 2434 case QL_VND_WRITE_I2C: 2435 return qla2x00_write_i2c(bsg_job); 2436 2437 case QL_VND_READ_I2C: 2438 return qla2x00_read_i2c(bsg_job); 2439 2440 case QL_VND_DIAG_IO_CMD: 2441 return qla24xx_process_bidir_cmd(bsg_job); 2442 2443 case QL_VND_FX00_MGMT_CMD: 2444 return qlafx00_mgmt_cmd(bsg_job); 2445 2446 case QL_VND_SERDES_OP: 2447 return qla26xx_serdes_op(bsg_job); 2448 2449 case QL_VND_SERDES_OP_EX: 2450 return qla8044_serdes_op(bsg_job); 2451 2452 case QL_VND_GET_FLASH_UPDATE_CAPS: 2453 return qla27xx_get_flash_upd_cap(bsg_job); 2454 2455 case QL_VND_SET_FLASH_UPDATE_CAPS: 2456 return qla27xx_set_flash_upd_cap(bsg_job); 2457 2458 case QL_VND_GET_BBCR_DATA: 2459 return qla27xx_get_bbcr_data(bsg_job); 2460 2461 case QL_VND_GET_PRIV_STATS: 2462 case QL_VND_GET_PRIV_STATS_EX: 2463 return qla2x00_get_priv_stats(bsg_job); 2464 2465 case QL_VND_DPORT_DIAGNOSTICS: 2466 return qla2x00_do_dport_diagnostics(bsg_job); 2467 2468 default: 2469 return -ENOSYS; 2470 } 2471 } 2472 2473 int 2474 qla24xx_bsg_request(struct bsg_job *bsg_job) 2475 { 2476 struct fc_bsg_request *bsg_request = bsg_job->request; 2477 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2478 int ret = -EINVAL; 2479 struct fc_rport *rport; 2480 struct Scsi_Host *host; 2481 scsi_qla_host_t *vha; 2482 2483 /* In case no data transferred. */ 2484 bsg_reply->reply_payload_rcv_len = 0; 2485 2486 if (bsg_request->msgcode == FC_BSG_RPT_ELS) { 2487 rport = fc_bsg_to_rport(bsg_job); 2488 host = rport_to_shost(rport); 2489 vha = shost_priv(host); 2490 } else { 2491 host = fc_bsg_to_shost(bsg_job); 2492 vha = shost_priv(host); 2493 } 2494 2495 if (qla2x00_reset_active(vha)) { 2496 ql_dbg(ql_dbg_user, vha, 0x709f, 2497 "BSG: ISP abort active/needed -- cmd=%d.\n", 2498 bsg_request->msgcode); 2499 return -EBUSY; 2500 } 2501 2502 ql_dbg(ql_dbg_user, vha, 0x7000, 2503 "Entered %s msgcode=0x%x.\n", __func__, bsg_request->msgcode); 2504 2505 switch (bsg_request->msgcode) { 2506 case FC_BSG_RPT_ELS: 2507 case FC_BSG_HST_ELS_NOLOGIN: 2508 ret = qla2x00_process_els(bsg_job); 2509 break; 2510 case FC_BSG_HST_CT: 2511 ret = qla2x00_process_ct(bsg_job); 2512 break; 2513 case FC_BSG_HST_VENDOR: 2514 ret = qla2x00_process_vendor_specific(bsg_job); 2515 break; 2516 case FC_BSG_HST_ADD_RPORT: 2517 case FC_BSG_HST_DEL_RPORT: 2518 case FC_BSG_RPT_CT: 2519 default: 2520 ql_log(ql_log_warn, vha, 0x705a, "Unsupported BSG request.\n"); 2521 break; 2522 } 2523 return ret; 2524 } 2525 2526 int 2527 qla24xx_bsg_timeout(struct bsg_job *bsg_job) 2528 { 2529 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2530 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job)); 2531 struct qla_hw_data *ha = vha->hw; 2532 srb_t *sp; 2533 int cnt, que; 2534 unsigned long flags; 2535 struct req_que *req; 2536 2537 /* find the bsg job from the active list of commands */ 2538 spin_lock_irqsave(&ha->hardware_lock, flags); 2539 for (que = 0; que < ha->max_req_queues; que++) { 2540 req = ha->req_q_map[que]; 2541 if (!req) 2542 continue; 2543 2544 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { 2545 sp = req->outstanding_cmds[cnt]; 2546 if (sp) { 2547 if (((sp->type == SRB_CT_CMD) || 2548 (sp->type == SRB_ELS_CMD_HST) || 2549 (sp->type == SRB_FXIOCB_BCMD)) 2550 && (sp->u.bsg_job == bsg_job)) { 2551 req->outstanding_cmds[cnt] = NULL; 2552 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2553 if (ha->isp_ops->abort_command(sp)) { 2554 ql_log(ql_log_warn, vha, 0x7089, 2555 "mbx abort_command " 2556 "failed.\n"); 2557 scsi_req(bsg_job->req)->result = 2558 bsg_reply->result = -EIO; 2559 } else { 2560 ql_dbg(ql_dbg_user, vha, 0x708a, 2561 "mbx abort_command " 2562 "success.\n"); 2563 scsi_req(bsg_job->req)->result = 2564 bsg_reply->result = 0; 2565 } 2566 spin_lock_irqsave(&ha->hardware_lock, flags); 2567 goto done; 2568 } 2569 } 2570 } 2571 } 2572 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2573 ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n"); 2574 scsi_req(bsg_job->req)->result = bsg_reply->result = -ENXIO; 2575 return 0; 2576 2577 done: 2578 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2579 sp->free(sp); 2580 return 0; 2581 } 2582