1 /* 2 * QLogic Fibre Channel HBA Driver 3 * Copyright (c) 2003-2014 QLogic Corporation 4 * 5 * See LICENSE.qla2xxx for copyright and licensing details. 6 */ 7 #include "qla_def.h" 8 9 #include <linux/kthread.h> 10 #include <linux/vmalloc.h> 11 #include <linux/delay.h> 12 #include <linux/bsg-lib.h> 13 14 /* BSG support for ELS/CT pass through */ 15 void qla2x00_bsg_job_done(srb_t *sp, int res) 16 { 17 struct bsg_job *bsg_job = sp->u.bsg_job; 18 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 19 20 bsg_reply->result = res; 21 bsg_job_done(bsg_job, bsg_reply->result, 22 bsg_reply->reply_payload_rcv_len); 23 sp->free(sp); 24 } 25 26 void qla2x00_bsg_sp_free(srb_t *sp) 27 { 28 struct qla_hw_data *ha = sp->vha->hw; 29 struct bsg_job *bsg_job = sp->u.bsg_job; 30 struct fc_bsg_request *bsg_request = bsg_job->request; 31 struct qla_mt_iocb_rqst_fx00 *piocb_rqst; 32 33 if (sp->type == SRB_FXIOCB_BCMD) { 34 piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *) 35 &bsg_request->rqst_data.h_vendor.vendor_cmd[1]; 36 37 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) 38 dma_unmap_sg(&ha->pdev->dev, 39 bsg_job->request_payload.sg_list, 40 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 41 42 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) 43 dma_unmap_sg(&ha->pdev->dev, 44 bsg_job->reply_payload.sg_list, 45 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 46 } else { 47 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 48 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 49 50 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 51 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 52 } 53 54 if (sp->type == SRB_CT_CMD || 55 sp->type == SRB_FXIOCB_BCMD || 56 sp->type == SRB_ELS_CMD_HST) 57 kfree(sp->fcport); 58 qla2x00_rel_sp(sp); 59 } 60 61 int 62 qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *vha, 63 struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag) 64 { 65 int i, ret, num_valid; 66 uint8_t *bcode; 67 struct qla_fcp_prio_entry *pri_entry; 68 uint32_t *bcode_val_ptr, bcode_val; 69 70 ret = 1; 71 num_valid = 0; 72 bcode = (uint8_t *)pri_cfg; 73 bcode_val_ptr = (uint32_t *)pri_cfg; 74 bcode_val = (uint32_t)(*bcode_val_ptr); 75 76 if (bcode_val == 0xFFFFFFFF) { 77 /* No FCP Priority config data in flash */ 78 ql_dbg(ql_dbg_user, vha, 0x7051, 79 "No FCP Priority config data.\n"); 80 return 0; 81 } 82 83 if (memcmp(bcode, "HQOS", 4)) { 84 /* Invalid FCP priority data header*/ 85 ql_dbg(ql_dbg_user, vha, 0x7052, 86 "Invalid FCP Priority data header. bcode=0x%x.\n", 87 bcode_val); 88 return 0; 89 } 90 if (flag != 1) 91 return ret; 92 93 pri_entry = &pri_cfg->entry[0]; 94 for (i = 0; i < pri_cfg->num_entries; i++) { 95 if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID) 96 num_valid++; 97 pri_entry++; 98 } 99 100 if (num_valid == 0) { 101 /* No valid FCP priority data entries */ 102 ql_dbg(ql_dbg_user, vha, 0x7053, 103 "No valid FCP Priority data entries.\n"); 104 ret = 0; 105 } else { 106 /* FCP priority data is valid */ 107 ql_dbg(ql_dbg_user, vha, 0x7054, 108 "Valid FCP priority data. num entries = %d.\n", 109 num_valid); 110 } 111 112 return ret; 113 } 114 115 static int 116 qla24xx_proc_fcp_prio_cfg_cmd(struct bsg_job *bsg_job) 117 { 118 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 119 struct fc_bsg_request *bsg_request = bsg_job->request; 120 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 121 scsi_qla_host_t *vha = shost_priv(host); 122 struct qla_hw_data *ha = vha->hw; 123 int ret = 0; 124 uint32_t len; 125 uint32_t oper; 126 127 if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_P3P_TYPE(ha))) { 128 ret = -EINVAL; 129 goto exit_fcp_prio_cfg; 130 } 131 132 /* Get the sub command */ 133 oper = bsg_request->rqst_data.h_vendor.vendor_cmd[1]; 134 135 /* Only set config is allowed if config memory is not allocated */ 136 if (!ha->fcp_prio_cfg && (oper != QLFC_FCP_PRIO_SET_CONFIG)) { 137 ret = -EINVAL; 138 goto exit_fcp_prio_cfg; 139 } 140 switch (oper) { 141 case QLFC_FCP_PRIO_DISABLE: 142 if (ha->flags.fcp_prio_enabled) { 143 ha->flags.fcp_prio_enabled = 0; 144 ha->fcp_prio_cfg->attributes &= 145 ~FCP_PRIO_ATTR_ENABLE; 146 qla24xx_update_all_fcp_prio(vha); 147 bsg_reply->result = DID_OK; 148 } else { 149 ret = -EINVAL; 150 bsg_reply->result = (DID_ERROR << 16); 151 goto exit_fcp_prio_cfg; 152 } 153 break; 154 155 case QLFC_FCP_PRIO_ENABLE: 156 if (!ha->flags.fcp_prio_enabled) { 157 if (ha->fcp_prio_cfg) { 158 ha->flags.fcp_prio_enabled = 1; 159 ha->fcp_prio_cfg->attributes |= 160 FCP_PRIO_ATTR_ENABLE; 161 qla24xx_update_all_fcp_prio(vha); 162 bsg_reply->result = DID_OK; 163 } else { 164 ret = -EINVAL; 165 bsg_reply->result = (DID_ERROR << 16); 166 goto exit_fcp_prio_cfg; 167 } 168 } 169 break; 170 171 case QLFC_FCP_PRIO_GET_CONFIG: 172 len = bsg_job->reply_payload.payload_len; 173 if (!len || len > FCP_PRIO_CFG_SIZE) { 174 ret = -EINVAL; 175 bsg_reply->result = (DID_ERROR << 16); 176 goto exit_fcp_prio_cfg; 177 } 178 179 bsg_reply->result = DID_OK; 180 bsg_reply->reply_payload_rcv_len = 181 sg_copy_from_buffer( 182 bsg_job->reply_payload.sg_list, 183 bsg_job->reply_payload.sg_cnt, ha->fcp_prio_cfg, 184 len); 185 186 break; 187 188 case QLFC_FCP_PRIO_SET_CONFIG: 189 len = bsg_job->request_payload.payload_len; 190 if (!len || len > FCP_PRIO_CFG_SIZE) { 191 bsg_reply->result = (DID_ERROR << 16); 192 ret = -EINVAL; 193 goto exit_fcp_prio_cfg; 194 } 195 196 if (!ha->fcp_prio_cfg) { 197 ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE); 198 if (!ha->fcp_prio_cfg) { 199 ql_log(ql_log_warn, vha, 0x7050, 200 "Unable to allocate memory for fcp prio " 201 "config data (%x).\n", FCP_PRIO_CFG_SIZE); 202 bsg_reply->result = (DID_ERROR << 16); 203 ret = -ENOMEM; 204 goto exit_fcp_prio_cfg; 205 } 206 } 207 208 memset(ha->fcp_prio_cfg, 0, FCP_PRIO_CFG_SIZE); 209 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 210 bsg_job->request_payload.sg_cnt, ha->fcp_prio_cfg, 211 FCP_PRIO_CFG_SIZE); 212 213 /* validate fcp priority data */ 214 215 if (!qla24xx_fcp_prio_cfg_valid(vha, 216 (struct qla_fcp_prio_cfg *) ha->fcp_prio_cfg, 1)) { 217 bsg_reply->result = (DID_ERROR << 16); 218 ret = -EINVAL; 219 /* If buffer was invalidatic int 220 * fcp_prio_cfg is of no use 221 */ 222 vfree(ha->fcp_prio_cfg); 223 ha->fcp_prio_cfg = NULL; 224 goto exit_fcp_prio_cfg; 225 } 226 227 ha->flags.fcp_prio_enabled = 0; 228 if (ha->fcp_prio_cfg->attributes & FCP_PRIO_ATTR_ENABLE) 229 ha->flags.fcp_prio_enabled = 1; 230 qla24xx_update_all_fcp_prio(vha); 231 bsg_reply->result = DID_OK; 232 break; 233 default: 234 ret = -EINVAL; 235 break; 236 } 237 exit_fcp_prio_cfg: 238 if (!ret) 239 bsg_job_done(bsg_job, bsg_reply->result, 240 bsg_reply->reply_payload_rcv_len); 241 return ret; 242 } 243 244 static int 245 qla2x00_process_els(struct bsg_job *bsg_job) 246 { 247 struct fc_bsg_request *bsg_request = bsg_job->request; 248 struct fc_rport *rport; 249 fc_port_t *fcport = NULL; 250 struct Scsi_Host *host; 251 scsi_qla_host_t *vha; 252 struct qla_hw_data *ha; 253 srb_t *sp; 254 const char *type; 255 int req_sg_cnt, rsp_sg_cnt; 256 int rval = (DRIVER_ERROR << 16); 257 uint16_t nextlid = 0; 258 259 if (bsg_request->msgcode == FC_BSG_RPT_ELS) { 260 rport = fc_bsg_to_rport(bsg_job); 261 fcport = *(fc_port_t **) rport->dd_data; 262 host = rport_to_shost(rport); 263 vha = shost_priv(host); 264 ha = vha->hw; 265 type = "FC_BSG_RPT_ELS"; 266 } else { 267 host = fc_bsg_to_shost(bsg_job); 268 vha = shost_priv(host); 269 ha = vha->hw; 270 type = "FC_BSG_HST_ELS_NOLOGIN"; 271 } 272 273 if (!vha->flags.online) { 274 ql_log(ql_log_warn, vha, 0x7005, "Host not online.\n"); 275 rval = -EIO; 276 goto done; 277 } 278 279 /* pass through is supported only for ISP 4Gb or higher */ 280 if (!IS_FWI2_CAPABLE(ha)) { 281 ql_dbg(ql_dbg_user, vha, 0x7001, 282 "ELS passthru not supported for ISP23xx based adapters.\n"); 283 rval = -EPERM; 284 goto done; 285 } 286 287 /* Multiple SG's are not supported for ELS requests */ 288 if (bsg_job->request_payload.sg_cnt > 1 || 289 bsg_job->reply_payload.sg_cnt > 1) { 290 ql_dbg(ql_dbg_user, vha, 0x7002, 291 "Multiple SG's are not supported for ELS requests, " 292 "request_sg_cnt=%x reply_sg_cnt=%x.\n", 293 bsg_job->request_payload.sg_cnt, 294 bsg_job->reply_payload.sg_cnt); 295 rval = -EPERM; 296 goto done; 297 } 298 299 /* ELS request for rport */ 300 if (bsg_request->msgcode == FC_BSG_RPT_ELS) { 301 /* make sure the rport is logged in, 302 * if not perform fabric login 303 */ 304 if (qla2x00_fabric_login(vha, fcport, &nextlid)) { 305 ql_dbg(ql_dbg_user, vha, 0x7003, 306 "Failed to login port %06X for ELS passthru.\n", 307 fcport->d_id.b24); 308 rval = -EIO; 309 goto done; 310 } 311 } else { 312 /* Allocate a dummy fcport structure, since functions 313 * preparing the IOCB and mailbox command retrieves port 314 * specific information from fcport structure. For Host based 315 * ELS commands there will be no fcport structure allocated 316 */ 317 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 318 if (!fcport) { 319 rval = -ENOMEM; 320 goto done; 321 } 322 323 /* Initialize all required fields of fcport */ 324 fcport->vha = vha; 325 fcport->d_id.b.al_pa = 326 bsg_request->rqst_data.h_els.port_id[0]; 327 fcport->d_id.b.area = 328 bsg_request->rqst_data.h_els.port_id[1]; 329 fcport->d_id.b.domain = 330 bsg_request->rqst_data.h_els.port_id[2]; 331 fcport->loop_id = 332 (fcport->d_id.b.al_pa == 0xFD) ? 333 NPH_FABRIC_CONTROLLER : NPH_F_PORT; 334 } 335 336 req_sg_cnt = 337 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 338 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 339 if (!req_sg_cnt) { 340 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 341 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 342 rval = -ENOMEM; 343 goto done_free_fcport; 344 } 345 346 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 347 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 348 if (!rsp_sg_cnt) { 349 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 350 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 351 rval = -ENOMEM; 352 goto done_free_fcport; 353 } 354 355 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) || 356 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) { 357 ql_log(ql_log_warn, vha, 0x7008, 358 "dma mapping resulted in different sg counts, " 359 "request_sg_cnt: %x dma_request_sg_cnt:%x reply_sg_cnt:%x " 360 "dma_reply_sg_cnt:%x.\n", bsg_job->request_payload.sg_cnt, 361 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt); 362 rval = -EAGAIN; 363 goto done_unmap_sg; 364 } 365 366 /* Alloc SRB structure */ 367 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 368 if (!sp) { 369 rval = -ENOMEM; 370 goto done_unmap_sg; 371 } 372 373 sp->type = 374 (bsg_request->msgcode == FC_BSG_RPT_ELS ? 375 SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST); 376 sp->name = 377 (bsg_request->msgcode == FC_BSG_RPT_ELS ? 378 "bsg_els_rpt" : "bsg_els_hst"); 379 sp->u.bsg_job = bsg_job; 380 sp->free = qla2x00_bsg_sp_free; 381 sp->done = qla2x00_bsg_job_done; 382 383 ql_dbg(ql_dbg_user, vha, 0x700a, 384 "bsg rqst type: %s els type: %x - loop-id=%x " 385 "portid=%-2x%02x%02x.\n", type, 386 bsg_request->rqst_data.h_els.command_code, fcport->loop_id, 387 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); 388 389 rval = qla2x00_start_sp(sp); 390 if (rval != QLA_SUCCESS) { 391 ql_log(ql_log_warn, vha, 0x700e, 392 "qla2x00_start_sp failed = %d\n", rval); 393 qla2x00_rel_sp(sp); 394 rval = -EIO; 395 goto done_unmap_sg; 396 } 397 return rval; 398 399 done_unmap_sg: 400 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 401 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 402 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 403 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 404 goto done_free_fcport; 405 406 done_free_fcport: 407 if (bsg_request->msgcode == FC_BSG_RPT_ELS) 408 kfree(fcport); 409 done: 410 return rval; 411 } 412 413 static inline uint16_t 414 qla24xx_calc_ct_iocbs(uint16_t dsds) 415 { 416 uint16_t iocbs; 417 418 iocbs = 1; 419 if (dsds > 2) { 420 iocbs += (dsds - 2) / 5; 421 if ((dsds - 2) % 5) 422 iocbs++; 423 } 424 return iocbs; 425 } 426 427 static int 428 qla2x00_process_ct(struct bsg_job *bsg_job) 429 { 430 srb_t *sp; 431 struct fc_bsg_request *bsg_request = bsg_job->request; 432 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 433 scsi_qla_host_t *vha = shost_priv(host); 434 struct qla_hw_data *ha = vha->hw; 435 int rval = (DRIVER_ERROR << 16); 436 int req_sg_cnt, rsp_sg_cnt; 437 uint16_t loop_id; 438 struct fc_port *fcport; 439 char *type = "FC_BSG_HST_CT"; 440 441 req_sg_cnt = 442 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 443 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 444 if (!req_sg_cnt) { 445 ql_log(ql_log_warn, vha, 0x700f, 446 "dma_map_sg return %d for request\n", req_sg_cnt); 447 rval = -ENOMEM; 448 goto done; 449 } 450 451 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 452 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 453 if (!rsp_sg_cnt) { 454 ql_log(ql_log_warn, vha, 0x7010, 455 "dma_map_sg return %d for reply\n", rsp_sg_cnt); 456 rval = -ENOMEM; 457 goto done; 458 } 459 460 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) || 461 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) { 462 ql_log(ql_log_warn, vha, 0x7011, 463 "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x " 464 "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt, 465 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt); 466 rval = -EAGAIN; 467 goto done_unmap_sg; 468 } 469 470 if (!vha->flags.online) { 471 ql_log(ql_log_warn, vha, 0x7012, 472 "Host is not online.\n"); 473 rval = -EIO; 474 goto done_unmap_sg; 475 } 476 477 loop_id = 478 (bsg_request->rqst_data.h_ct.preamble_word1 & 0xFF000000) 479 >> 24; 480 switch (loop_id) { 481 case 0xFC: 482 loop_id = cpu_to_le16(NPH_SNS); 483 break; 484 case 0xFA: 485 loop_id = vha->mgmt_svr_loop_id; 486 break; 487 default: 488 ql_dbg(ql_dbg_user, vha, 0x7013, 489 "Unknown loop id: %x.\n", loop_id); 490 rval = -EINVAL; 491 goto done_unmap_sg; 492 } 493 494 /* Allocate a dummy fcport structure, since functions preparing the 495 * IOCB and mailbox command retrieves port specific information 496 * from fcport structure. For Host based ELS commands there will be 497 * no fcport structure allocated 498 */ 499 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 500 if (!fcport) { 501 ql_log(ql_log_warn, vha, 0x7014, 502 "Failed to allocate fcport.\n"); 503 rval = -ENOMEM; 504 goto done_unmap_sg; 505 } 506 507 /* Initialize all required fields of fcport */ 508 fcport->vha = vha; 509 fcport->d_id.b.al_pa = bsg_request->rqst_data.h_ct.port_id[0]; 510 fcport->d_id.b.area = bsg_request->rqst_data.h_ct.port_id[1]; 511 fcport->d_id.b.domain = bsg_request->rqst_data.h_ct.port_id[2]; 512 fcport->loop_id = loop_id; 513 514 /* Alloc SRB structure */ 515 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 516 if (!sp) { 517 ql_log(ql_log_warn, vha, 0x7015, 518 "qla2x00_get_sp failed.\n"); 519 rval = -ENOMEM; 520 goto done_free_fcport; 521 } 522 523 sp->type = SRB_CT_CMD; 524 sp->name = "bsg_ct"; 525 sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt); 526 sp->u.bsg_job = bsg_job; 527 sp->free = qla2x00_bsg_sp_free; 528 sp->done = qla2x00_bsg_job_done; 529 530 ql_dbg(ql_dbg_user, vha, 0x7016, 531 "bsg rqst type: %s else type: %x - " 532 "loop-id=%x portid=%02x%02x%02x.\n", type, 533 (bsg_request->rqst_data.h_ct.preamble_word2 >> 16), 534 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, 535 fcport->d_id.b.al_pa); 536 537 rval = qla2x00_start_sp(sp); 538 if (rval != QLA_SUCCESS) { 539 ql_log(ql_log_warn, vha, 0x7017, 540 "qla2x00_start_sp failed=%d.\n", rval); 541 qla2x00_rel_sp(sp); 542 rval = -EIO; 543 goto done_free_fcport; 544 } 545 return rval; 546 547 done_free_fcport: 548 kfree(fcport); 549 done_unmap_sg: 550 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 551 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 552 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 553 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 554 done: 555 return rval; 556 } 557 558 /* Disable loopback mode */ 559 static inline int 560 qla81xx_reset_loopback_mode(scsi_qla_host_t *vha, uint16_t *config, 561 int wait, int wait2) 562 { 563 int ret = 0; 564 int rval = 0; 565 uint16_t new_config[4]; 566 struct qla_hw_data *ha = vha->hw; 567 568 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha)) 569 goto done_reset_internal; 570 571 memset(new_config, 0 , sizeof(new_config)); 572 if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 == 573 ENABLE_INTERNAL_LOOPBACK || 574 (config[0] & INTERNAL_LOOPBACK_MASK) >> 1 == 575 ENABLE_EXTERNAL_LOOPBACK) { 576 new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK; 577 ql_dbg(ql_dbg_user, vha, 0x70bf, "new_config[0]=%02x\n", 578 (new_config[0] & INTERNAL_LOOPBACK_MASK)); 579 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ; 580 581 ha->notify_dcbx_comp = wait; 582 ha->notify_lb_portup_comp = wait2; 583 584 ret = qla81xx_set_port_config(vha, new_config); 585 if (ret != QLA_SUCCESS) { 586 ql_log(ql_log_warn, vha, 0x7025, 587 "Set port config failed.\n"); 588 ha->notify_dcbx_comp = 0; 589 ha->notify_lb_portup_comp = 0; 590 rval = -EINVAL; 591 goto done_reset_internal; 592 } 593 594 /* Wait for DCBX complete event */ 595 if (wait && !wait_for_completion_timeout(&ha->dcbx_comp, 596 (DCBX_COMP_TIMEOUT * HZ))) { 597 ql_dbg(ql_dbg_user, vha, 0x7026, 598 "DCBX completion not received.\n"); 599 ha->notify_dcbx_comp = 0; 600 ha->notify_lb_portup_comp = 0; 601 rval = -EINVAL; 602 goto done_reset_internal; 603 } else 604 ql_dbg(ql_dbg_user, vha, 0x7027, 605 "DCBX completion received.\n"); 606 607 if (wait2 && 608 !wait_for_completion_timeout(&ha->lb_portup_comp, 609 (LB_PORTUP_COMP_TIMEOUT * HZ))) { 610 ql_dbg(ql_dbg_user, vha, 0x70c5, 611 "Port up completion not received.\n"); 612 ha->notify_lb_portup_comp = 0; 613 rval = -EINVAL; 614 goto done_reset_internal; 615 } else 616 ql_dbg(ql_dbg_user, vha, 0x70c6, 617 "Port up completion received.\n"); 618 619 ha->notify_dcbx_comp = 0; 620 ha->notify_lb_portup_comp = 0; 621 } 622 done_reset_internal: 623 return rval; 624 } 625 626 /* 627 * Set the port configuration to enable the internal or external loopback 628 * depending on the loopback mode. 629 */ 630 static inline int 631 qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config, 632 uint16_t *new_config, uint16_t mode) 633 { 634 int ret = 0; 635 int rval = 0; 636 unsigned long rem_tmo = 0, current_tmo = 0; 637 struct qla_hw_data *ha = vha->hw; 638 639 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha)) 640 goto done_set_internal; 641 642 if (mode == INTERNAL_LOOPBACK) 643 new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1); 644 else if (mode == EXTERNAL_LOOPBACK) 645 new_config[0] = config[0] | (ENABLE_EXTERNAL_LOOPBACK << 1); 646 ql_dbg(ql_dbg_user, vha, 0x70be, 647 "new_config[0]=%02x\n", (new_config[0] & INTERNAL_LOOPBACK_MASK)); 648 649 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3); 650 651 ha->notify_dcbx_comp = 1; 652 ret = qla81xx_set_port_config(vha, new_config); 653 if (ret != QLA_SUCCESS) { 654 ql_log(ql_log_warn, vha, 0x7021, 655 "set port config failed.\n"); 656 ha->notify_dcbx_comp = 0; 657 rval = -EINVAL; 658 goto done_set_internal; 659 } 660 661 /* Wait for DCBX complete event */ 662 current_tmo = DCBX_COMP_TIMEOUT * HZ; 663 while (1) { 664 rem_tmo = wait_for_completion_timeout(&ha->dcbx_comp, 665 current_tmo); 666 if (!ha->idc_extend_tmo || rem_tmo) { 667 ha->idc_extend_tmo = 0; 668 break; 669 } 670 current_tmo = ha->idc_extend_tmo * HZ; 671 ha->idc_extend_tmo = 0; 672 } 673 674 if (!rem_tmo) { 675 ql_dbg(ql_dbg_user, vha, 0x7022, 676 "DCBX completion not received.\n"); 677 ret = qla81xx_reset_loopback_mode(vha, new_config, 0, 0); 678 /* 679 * If the reset of the loopback mode doesn't work take a FCoE 680 * dump and reset the chip. 681 */ 682 if (ret) { 683 ha->isp_ops->fw_dump(vha, 0); 684 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 685 } 686 rval = -EINVAL; 687 } else { 688 if (ha->flags.idc_compl_status) { 689 ql_dbg(ql_dbg_user, vha, 0x70c3, 690 "Bad status in IDC Completion AEN\n"); 691 rval = -EINVAL; 692 ha->flags.idc_compl_status = 0; 693 } else 694 ql_dbg(ql_dbg_user, vha, 0x7023, 695 "DCBX completion received.\n"); 696 } 697 698 ha->notify_dcbx_comp = 0; 699 ha->idc_extend_tmo = 0; 700 701 done_set_internal: 702 return rval; 703 } 704 705 static int 706 qla2x00_process_loopback(struct bsg_job *bsg_job) 707 { 708 struct fc_bsg_request *bsg_request = bsg_job->request; 709 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 710 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 711 scsi_qla_host_t *vha = shost_priv(host); 712 struct qla_hw_data *ha = vha->hw; 713 int rval; 714 uint8_t command_sent; 715 char *type; 716 struct msg_echo_lb elreq; 717 uint16_t response[MAILBOX_REGISTER_COUNT]; 718 uint16_t config[4], new_config[4]; 719 uint8_t *fw_sts_ptr; 720 uint8_t *req_data = NULL; 721 dma_addr_t req_data_dma; 722 uint32_t req_data_len; 723 uint8_t *rsp_data = NULL; 724 dma_addr_t rsp_data_dma; 725 uint32_t rsp_data_len; 726 727 if (!vha->flags.online) { 728 ql_log(ql_log_warn, vha, 0x7019, "Host is not online.\n"); 729 return -EIO; 730 } 731 732 memset(&elreq, 0, sizeof(elreq)); 733 734 elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev, 735 bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, 736 DMA_TO_DEVICE); 737 738 if (!elreq.req_sg_cnt) { 739 ql_log(ql_log_warn, vha, 0x701a, 740 "dma_map_sg returned %d for request.\n", elreq.req_sg_cnt); 741 return -ENOMEM; 742 } 743 744 elreq.rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, 745 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, 746 DMA_FROM_DEVICE); 747 748 if (!elreq.rsp_sg_cnt) { 749 ql_log(ql_log_warn, vha, 0x701b, 750 "dma_map_sg returned %d for reply.\n", elreq.rsp_sg_cnt); 751 rval = -ENOMEM; 752 goto done_unmap_req_sg; 753 } 754 755 if ((elreq.req_sg_cnt != bsg_job->request_payload.sg_cnt) || 756 (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) { 757 ql_log(ql_log_warn, vha, 0x701c, 758 "dma mapping resulted in different sg counts, " 759 "request_sg_cnt: %x dma_request_sg_cnt: %x " 760 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n", 761 bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt, 762 bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt); 763 rval = -EAGAIN; 764 goto done_unmap_sg; 765 } 766 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len; 767 req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len, 768 &req_data_dma, GFP_KERNEL); 769 if (!req_data) { 770 ql_log(ql_log_warn, vha, 0x701d, 771 "dma alloc failed for req_data.\n"); 772 rval = -ENOMEM; 773 goto done_unmap_sg; 774 } 775 776 rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len, 777 &rsp_data_dma, GFP_KERNEL); 778 if (!rsp_data) { 779 ql_log(ql_log_warn, vha, 0x7004, 780 "dma alloc failed for rsp_data.\n"); 781 rval = -ENOMEM; 782 goto done_free_dma_req; 783 } 784 785 /* Copy the request buffer in req_data now */ 786 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 787 bsg_job->request_payload.sg_cnt, req_data, req_data_len); 788 789 elreq.send_dma = req_data_dma; 790 elreq.rcv_dma = rsp_data_dma; 791 elreq.transfer_size = req_data_len; 792 793 elreq.options = bsg_request->rqst_data.h_vendor.vendor_cmd[1]; 794 elreq.iteration_count = 795 bsg_request->rqst_data.h_vendor.vendor_cmd[2]; 796 797 if (atomic_read(&vha->loop_state) == LOOP_READY && 798 (ha->current_topology == ISP_CFG_F || 799 (le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE && 800 req_data_len == MAX_ELS_FRAME_PAYLOAD)) && 801 elreq.options == EXTERNAL_LOOPBACK) { 802 type = "FC_BSG_HST_VENDOR_ECHO_DIAG"; 803 ql_dbg(ql_dbg_user, vha, 0x701e, 804 "BSG request type: %s.\n", type); 805 command_sent = INT_DEF_LB_ECHO_CMD; 806 rval = qla2x00_echo_test(vha, &elreq, response); 807 } else { 808 if (IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) { 809 memset(config, 0, sizeof(config)); 810 memset(new_config, 0, sizeof(new_config)); 811 812 if (qla81xx_get_port_config(vha, config)) { 813 ql_log(ql_log_warn, vha, 0x701f, 814 "Get port config failed.\n"); 815 rval = -EPERM; 816 goto done_free_dma_rsp; 817 } 818 819 if ((config[0] & INTERNAL_LOOPBACK_MASK) != 0) { 820 ql_dbg(ql_dbg_user, vha, 0x70c4, 821 "Loopback operation already in " 822 "progress.\n"); 823 rval = -EAGAIN; 824 goto done_free_dma_rsp; 825 } 826 827 ql_dbg(ql_dbg_user, vha, 0x70c0, 828 "elreq.options=%04x\n", elreq.options); 829 830 if (elreq.options == EXTERNAL_LOOPBACK) 831 if (IS_QLA8031(ha) || IS_QLA8044(ha)) 832 rval = qla81xx_set_loopback_mode(vha, 833 config, new_config, elreq.options); 834 else 835 rval = qla81xx_reset_loopback_mode(vha, 836 config, 1, 0); 837 else 838 rval = qla81xx_set_loopback_mode(vha, config, 839 new_config, elreq.options); 840 841 if (rval) { 842 rval = -EPERM; 843 goto done_free_dma_rsp; 844 } 845 846 type = "FC_BSG_HST_VENDOR_LOOPBACK"; 847 ql_dbg(ql_dbg_user, vha, 0x7028, 848 "BSG request type: %s.\n", type); 849 850 command_sent = INT_DEF_LB_LOOPBACK_CMD; 851 rval = qla2x00_loopback_test(vha, &elreq, response); 852 853 if (response[0] == MBS_COMMAND_ERROR && 854 response[1] == MBS_LB_RESET) { 855 ql_log(ql_log_warn, vha, 0x7029, 856 "MBX command error, Aborting ISP.\n"); 857 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 858 qla2xxx_wake_dpc(vha); 859 qla2x00_wait_for_chip_reset(vha); 860 /* Also reset the MPI */ 861 if (IS_QLA81XX(ha)) { 862 if (qla81xx_restart_mpi_firmware(vha) != 863 QLA_SUCCESS) { 864 ql_log(ql_log_warn, vha, 0x702a, 865 "MPI reset failed.\n"); 866 } 867 } 868 869 rval = -EIO; 870 goto done_free_dma_rsp; 871 } 872 873 if (new_config[0]) { 874 int ret; 875 876 /* Revert back to original port config 877 * Also clear internal loopback 878 */ 879 ret = qla81xx_reset_loopback_mode(vha, 880 new_config, 0, 1); 881 if (ret) { 882 /* 883 * If the reset of the loopback mode 884 * doesn't work take FCoE dump and then 885 * reset the chip. 886 */ 887 ha->isp_ops->fw_dump(vha, 0); 888 set_bit(ISP_ABORT_NEEDED, 889 &vha->dpc_flags); 890 } 891 892 } 893 894 } else { 895 type = "FC_BSG_HST_VENDOR_LOOPBACK"; 896 ql_dbg(ql_dbg_user, vha, 0x702b, 897 "BSG request type: %s.\n", type); 898 command_sent = INT_DEF_LB_LOOPBACK_CMD; 899 rval = qla2x00_loopback_test(vha, &elreq, response); 900 } 901 } 902 903 if (rval) { 904 ql_log(ql_log_warn, vha, 0x702c, 905 "Vendor request %s failed.\n", type); 906 907 rval = 0; 908 bsg_reply->result = (DID_ERROR << 16); 909 bsg_reply->reply_payload_rcv_len = 0; 910 } else { 911 ql_dbg(ql_dbg_user, vha, 0x702d, 912 "Vendor request %s completed.\n", type); 913 bsg_reply->result = (DID_OK << 16); 914 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 915 bsg_job->reply_payload.sg_cnt, rsp_data, 916 rsp_data_len); 917 } 918 919 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + 920 sizeof(response) + sizeof(uint8_t); 921 fw_sts_ptr = bsg_job->reply + sizeof(struct fc_bsg_reply); 922 memcpy(bsg_job->reply + sizeof(struct fc_bsg_reply), response, 923 sizeof(response)); 924 fw_sts_ptr += sizeof(response); 925 *fw_sts_ptr = command_sent; 926 927 done_free_dma_rsp: 928 dma_free_coherent(&ha->pdev->dev, rsp_data_len, 929 rsp_data, rsp_data_dma); 930 done_free_dma_req: 931 dma_free_coherent(&ha->pdev->dev, req_data_len, 932 req_data, req_data_dma); 933 done_unmap_sg: 934 dma_unmap_sg(&ha->pdev->dev, 935 bsg_job->reply_payload.sg_list, 936 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 937 done_unmap_req_sg: 938 dma_unmap_sg(&ha->pdev->dev, 939 bsg_job->request_payload.sg_list, 940 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 941 if (!rval) 942 bsg_job_done(bsg_job, bsg_reply->result, 943 bsg_reply->reply_payload_rcv_len); 944 return rval; 945 } 946 947 static int 948 qla84xx_reset(struct bsg_job *bsg_job) 949 { 950 struct fc_bsg_request *bsg_request = bsg_job->request; 951 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 952 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 953 scsi_qla_host_t *vha = shost_priv(host); 954 struct qla_hw_data *ha = vha->hw; 955 int rval = 0; 956 uint32_t flag; 957 958 if (!IS_QLA84XX(ha)) { 959 ql_dbg(ql_dbg_user, vha, 0x702f, "Not 84xx, exiting.\n"); 960 return -EINVAL; 961 } 962 963 flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1]; 964 965 rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW); 966 967 if (rval) { 968 ql_log(ql_log_warn, vha, 0x7030, 969 "Vendor request 84xx reset failed.\n"); 970 rval = (DID_ERROR << 16); 971 972 } else { 973 ql_dbg(ql_dbg_user, vha, 0x7031, 974 "Vendor request 84xx reset completed.\n"); 975 bsg_reply->result = DID_OK; 976 bsg_job_done(bsg_job, bsg_reply->result, 977 bsg_reply->reply_payload_rcv_len); 978 } 979 980 return rval; 981 } 982 983 static int 984 qla84xx_updatefw(struct bsg_job *bsg_job) 985 { 986 struct fc_bsg_request *bsg_request = bsg_job->request; 987 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 988 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 989 scsi_qla_host_t *vha = shost_priv(host); 990 struct qla_hw_data *ha = vha->hw; 991 struct verify_chip_entry_84xx *mn = NULL; 992 dma_addr_t mn_dma, fw_dma; 993 void *fw_buf = NULL; 994 int rval = 0; 995 uint32_t sg_cnt; 996 uint32_t data_len; 997 uint16_t options; 998 uint32_t flag; 999 uint32_t fw_ver; 1000 1001 if (!IS_QLA84XX(ha)) { 1002 ql_dbg(ql_dbg_user, vha, 0x7032, 1003 "Not 84xx, exiting.\n"); 1004 return -EINVAL; 1005 } 1006 1007 sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 1008 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 1009 if (!sg_cnt) { 1010 ql_log(ql_log_warn, vha, 0x7033, 1011 "dma_map_sg returned %d for request.\n", sg_cnt); 1012 return -ENOMEM; 1013 } 1014 1015 if (sg_cnt != bsg_job->request_payload.sg_cnt) { 1016 ql_log(ql_log_warn, vha, 0x7034, 1017 "DMA mapping resulted in different sg counts, " 1018 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n", 1019 bsg_job->request_payload.sg_cnt, sg_cnt); 1020 rval = -EAGAIN; 1021 goto done_unmap_sg; 1022 } 1023 1024 data_len = bsg_job->request_payload.payload_len; 1025 fw_buf = dma_alloc_coherent(&ha->pdev->dev, data_len, 1026 &fw_dma, GFP_KERNEL); 1027 if (!fw_buf) { 1028 ql_log(ql_log_warn, vha, 0x7035, 1029 "DMA alloc failed for fw_buf.\n"); 1030 rval = -ENOMEM; 1031 goto done_unmap_sg; 1032 } 1033 1034 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1035 bsg_job->request_payload.sg_cnt, fw_buf, data_len); 1036 1037 mn = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma); 1038 if (!mn) { 1039 ql_log(ql_log_warn, vha, 0x7036, 1040 "DMA alloc failed for fw buffer.\n"); 1041 rval = -ENOMEM; 1042 goto done_free_fw_buf; 1043 } 1044 1045 flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1]; 1046 fw_ver = get_unaligned_le32((uint32_t *)fw_buf + 2); 1047 1048 mn->entry_type = VERIFY_CHIP_IOCB_TYPE; 1049 mn->entry_count = 1; 1050 1051 options = VCO_FORCE_UPDATE | VCO_END_OF_DATA; 1052 if (flag == A84_ISSUE_UPDATE_DIAGFW_CMD) 1053 options |= VCO_DIAG_FW; 1054 1055 mn->options = cpu_to_le16(options); 1056 mn->fw_ver = cpu_to_le32(fw_ver); 1057 mn->fw_size = cpu_to_le32(data_len); 1058 mn->fw_seq_size = cpu_to_le32(data_len); 1059 put_unaligned_le64(fw_dma, &mn->dsd.address); 1060 mn->dsd.length = cpu_to_le32(data_len); 1061 mn->data_seg_cnt = cpu_to_le16(1); 1062 1063 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120); 1064 1065 if (rval) { 1066 ql_log(ql_log_warn, vha, 0x7037, 1067 "Vendor request 84xx updatefw failed.\n"); 1068 1069 rval = (DID_ERROR << 16); 1070 } else { 1071 ql_dbg(ql_dbg_user, vha, 0x7038, 1072 "Vendor request 84xx updatefw completed.\n"); 1073 1074 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1075 bsg_reply->result = DID_OK; 1076 } 1077 1078 dma_pool_free(ha->s_dma_pool, mn, mn_dma); 1079 1080 done_free_fw_buf: 1081 dma_free_coherent(&ha->pdev->dev, data_len, fw_buf, fw_dma); 1082 1083 done_unmap_sg: 1084 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 1085 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 1086 1087 if (!rval) 1088 bsg_job_done(bsg_job, bsg_reply->result, 1089 bsg_reply->reply_payload_rcv_len); 1090 return rval; 1091 } 1092 1093 static int 1094 qla84xx_mgmt_cmd(struct bsg_job *bsg_job) 1095 { 1096 struct fc_bsg_request *bsg_request = bsg_job->request; 1097 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1098 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1099 scsi_qla_host_t *vha = shost_priv(host); 1100 struct qla_hw_data *ha = vha->hw; 1101 struct access_chip_84xx *mn = NULL; 1102 dma_addr_t mn_dma, mgmt_dma; 1103 void *mgmt_b = NULL; 1104 int rval = 0; 1105 struct qla_bsg_a84_mgmt *ql84_mgmt; 1106 uint32_t sg_cnt; 1107 uint32_t data_len = 0; 1108 uint32_t dma_direction = DMA_NONE; 1109 1110 if (!IS_QLA84XX(ha)) { 1111 ql_log(ql_log_warn, vha, 0x703a, 1112 "Not 84xx, exiting.\n"); 1113 return -EINVAL; 1114 } 1115 1116 mn = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma); 1117 if (!mn) { 1118 ql_log(ql_log_warn, vha, 0x703c, 1119 "DMA alloc failed for fw buffer.\n"); 1120 return -ENOMEM; 1121 } 1122 1123 mn->entry_type = ACCESS_CHIP_IOCB_TYPE; 1124 mn->entry_count = 1; 1125 ql84_mgmt = (void *)bsg_request + sizeof(struct fc_bsg_request); 1126 switch (ql84_mgmt->mgmt.cmd) { 1127 case QLA84_MGMT_READ_MEM: 1128 case QLA84_MGMT_GET_INFO: 1129 sg_cnt = dma_map_sg(&ha->pdev->dev, 1130 bsg_job->reply_payload.sg_list, 1131 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 1132 if (!sg_cnt) { 1133 ql_log(ql_log_warn, vha, 0x703d, 1134 "dma_map_sg returned %d for reply.\n", sg_cnt); 1135 rval = -ENOMEM; 1136 goto exit_mgmt; 1137 } 1138 1139 dma_direction = DMA_FROM_DEVICE; 1140 1141 if (sg_cnt != bsg_job->reply_payload.sg_cnt) { 1142 ql_log(ql_log_warn, vha, 0x703e, 1143 "DMA mapping resulted in different sg counts, " 1144 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n", 1145 bsg_job->reply_payload.sg_cnt, sg_cnt); 1146 rval = -EAGAIN; 1147 goto done_unmap_sg; 1148 } 1149 1150 data_len = bsg_job->reply_payload.payload_len; 1151 1152 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len, 1153 &mgmt_dma, GFP_KERNEL); 1154 if (!mgmt_b) { 1155 ql_log(ql_log_warn, vha, 0x703f, 1156 "DMA alloc failed for mgmt_b.\n"); 1157 rval = -ENOMEM; 1158 goto done_unmap_sg; 1159 } 1160 1161 if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) { 1162 mn->options = cpu_to_le16(ACO_DUMP_MEMORY); 1163 mn->parameter1 = 1164 cpu_to_le32( 1165 ql84_mgmt->mgmt.mgmtp.u.mem.start_addr); 1166 1167 } else if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO) { 1168 mn->options = cpu_to_le16(ACO_REQUEST_INFO); 1169 mn->parameter1 = 1170 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.info.type); 1171 1172 mn->parameter2 = 1173 cpu_to_le32( 1174 ql84_mgmt->mgmt.mgmtp.u.info.context); 1175 } 1176 break; 1177 1178 case QLA84_MGMT_WRITE_MEM: 1179 sg_cnt = dma_map_sg(&ha->pdev->dev, 1180 bsg_job->request_payload.sg_list, 1181 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 1182 1183 if (!sg_cnt) { 1184 ql_log(ql_log_warn, vha, 0x7040, 1185 "dma_map_sg returned %d.\n", sg_cnt); 1186 rval = -ENOMEM; 1187 goto exit_mgmt; 1188 } 1189 1190 dma_direction = DMA_TO_DEVICE; 1191 1192 if (sg_cnt != bsg_job->request_payload.sg_cnt) { 1193 ql_log(ql_log_warn, vha, 0x7041, 1194 "DMA mapping resulted in different sg counts, " 1195 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n", 1196 bsg_job->request_payload.sg_cnt, sg_cnt); 1197 rval = -EAGAIN; 1198 goto done_unmap_sg; 1199 } 1200 1201 data_len = bsg_job->request_payload.payload_len; 1202 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len, 1203 &mgmt_dma, GFP_KERNEL); 1204 if (!mgmt_b) { 1205 ql_log(ql_log_warn, vha, 0x7042, 1206 "DMA alloc failed for mgmt_b.\n"); 1207 rval = -ENOMEM; 1208 goto done_unmap_sg; 1209 } 1210 1211 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1212 bsg_job->request_payload.sg_cnt, mgmt_b, data_len); 1213 1214 mn->options = cpu_to_le16(ACO_LOAD_MEMORY); 1215 mn->parameter1 = 1216 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.mem.start_addr); 1217 break; 1218 1219 case QLA84_MGMT_CHNG_CONFIG: 1220 mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM); 1221 mn->parameter1 = 1222 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.id); 1223 1224 mn->parameter2 = 1225 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param0); 1226 1227 mn->parameter3 = 1228 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param1); 1229 break; 1230 1231 default: 1232 rval = -EIO; 1233 goto exit_mgmt; 1234 } 1235 1236 if (ql84_mgmt->mgmt.cmd != QLA84_MGMT_CHNG_CONFIG) { 1237 mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->mgmt.len); 1238 mn->dseg_count = cpu_to_le16(1); 1239 put_unaligned_le64(mgmt_dma, &mn->dsd.address); 1240 mn->dsd.length = cpu_to_le32(ql84_mgmt->mgmt.len); 1241 } 1242 1243 rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0); 1244 1245 if (rval) { 1246 ql_log(ql_log_warn, vha, 0x7043, 1247 "Vendor request 84xx mgmt failed.\n"); 1248 1249 rval = (DID_ERROR << 16); 1250 1251 } else { 1252 ql_dbg(ql_dbg_user, vha, 0x7044, 1253 "Vendor request 84xx mgmt completed.\n"); 1254 1255 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1256 bsg_reply->result = DID_OK; 1257 1258 if ((ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) || 1259 (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO)) { 1260 bsg_reply->reply_payload_rcv_len = 1261 bsg_job->reply_payload.payload_len; 1262 1263 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 1264 bsg_job->reply_payload.sg_cnt, mgmt_b, 1265 data_len); 1266 } 1267 } 1268 1269 done_unmap_sg: 1270 if (mgmt_b) 1271 dma_free_coherent(&ha->pdev->dev, data_len, mgmt_b, mgmt_dma); 1272 1273 if (dma_direction == DMA_TO_DEVICE) 1274 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 1275 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 1276 else if (dma_direction == DMA_FROM_DEVICE) 1277 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 1278 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 1279 1280 exit_mgmt: 1281 dma_pool_free(ha->s_dma_pool, mn, mn_dma); 1282 1283 if (!rval) 1284 bsg_job_done(bsg_job, bsg_reply->result, 1285 bsg_reply->reply_payload_rcv_len); 1286 return rval; 1287 } 1288 1289 static int 1290 qla24xx_iidma(struct bsg_job *bsg_job) 1291 { 1292 struct fc_bsg_request *bsg_request = bsg_job->request; 1293 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1294 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1295 scsi_qla_host_t *vha = shost_priv(host); 1296 int rval = 0; 1297 struct qla_port_param *port_param = NULL; 1298 fc_port_t *fcport = NULL; 1299 int found = 0; 1300 uint16_t mb[MAILBOX_REGISTER_COUNT]; 1301 uint8_t *rsp_ptr = NULL; 1302 1303 if (!IS_IIDMA_CAPABLE(vha->hw)) { 1304 ql_log(ql_log_info, vha, 0x7046, "iiDMA not supported.\n"); 1305 return -EINVAL; 1306 } 1307 1308 port_param = (void *)bsg_request + sizeof(struct fc_bsg_request); 1309 if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) { 1310 ql_log(ql_log_warn, vha, 0x7048, 1311 "Invalid destination type.\n"); 1312 return -EINVAL; 1313 } 1314 1315 list_for_each_entry(fcport, &vha->vp_fcports, list) { 1316 if (fcport->port_type != FCT_TARGET) 1317 continue; 1318 1319 if (memcmp(port_param->fc_scsi_addr.dest_addr.wwpn, 1320 fcport->port_name, sizeof(fcport->port_name))) 1321 continue; 1322 1323 found = 1; 1324 break; 1325 } 1326 1327 if (!found) { 1328 ql_log(ql_log_warn, vha, 0x7049, 1329 "Failed to find port.\n"); 1330 return -EINVAL; 1331 } 1332 1333 if (atomic_read(&fcport->state) != FCS_ONLINE) { 1334 ql_log(ql_log_warn, vha, 0x704a, 1335 "Port is not online.\n"); 1336 return -EINVAL; 1337 } 1338 1339 if (fcport->flags & FCF_LOGIN_NEEDED) { 1340 ql_log(ql_log_warn, vha, 0x704b, 1341 "Remote port not logged in flags = 0x%x.\n", fcport->flags); 1342 return -EINVAL; 1343 } 1344 1345 if (port_param->mode) 1346 rval = qla2x00_set_idma_speed(vha, fcport->loop_id, 1347 port_param->speed, mb); 1348 else 1349 rval = qla2x00_get_idma_speed(vha, fcport->loop_id, 1350 &port_param->speed, mb); 1351 1352 if (rval) { 1353 ql_log(ql_log_warn, vha, 0x704c, 1354 "iiDMA cmd failed for %8phN -- " 1355 "%04x %x %04x %04x.\n", fcport->port_name, 1356 rval, fcport->fp_speed, mb[0], mb[1]); 1357 rval = (DID_ERROR << 16); 1358 } else { 1359 if (!port_param->mode) { 1360 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + 1361 sizeof(struct qla_port_param); 1362 1363 rsp_ptr = ((uint8_t *)bsg_reply) + 1364 sizeof(struct fc_bsg_reply); 1365 1366 memcpy(rsp_ptr, port_param, 1367 sizeof(struct qla_port_param)); 1368 } 1369 1370 bsg_reply->result = DID_OK; 1371 bsg_job_done(bsg_job, bsg_reply->result, 1372 bsg_reply->reply_payload_rcv_len); 1373 } 1374 1375 return rval; 1376 } 1377 1378 static int 1379 qla2x00_optrom_setup(struct bsg_job *bsg_job, scsi_qla_host_t *vha, 1380 uint8_t is_update) 1381 { 1382 struct fc_bsg_request *bsg_request = bsg_job->request; 1383 uint32_t start = 0; 1384 int valid = 0; 1385 struct qla_hw_data *ha = vha->hw; 1386 1387 if (unlikely(pci_channel_offline(ha->pdev))) 1388 return -EINVAL; 1389 1390 start = bsg_request->rqst_data.h_vendor.vendor_cmd[1]; 1391 if (start > ha->optrom_size) { 1392 ql_log(ql_log_warn, vha, 0x7055, 1393 "start %d > optrom_size %d.\n", start, ha->optrom_size); 1394 return -EINVAL; 1395 } 1396 1397 if (ha->optrom_state != QLA_SWAITING) { 1398 ql_log(ql_log_info, vha, 0x7056, 1399 "optrom_state %d.\n", ha->optrom_state); 1400 return -EBUSY; 1401 } 1402 1403 ha->optrom_region_start = start; 1404 ql_dbg(ql_dbg_user, vha, 0x7057, "is_update=%d.\n", is_update); 1405 if (is_update) { 1406 if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0) 1407 valid = 1; 1408 else if (start == (ha->flt_region_boot * 4) || 1409 start == (ha->flt_region_fw * 4)) 1410 valid = 1; 1411 else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || 1412 IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) || 1413 IS_QLA28XX(ha)) 1414 valid = 1; 1415 if (!valid) { 1416 ql_log(ql_log_warn, vha, 0x7058, 1417 "Invalid start region 0x%x/0x%x.\n", start, 1418 bsg_job->request_payload.payload_len); 1419 return -EINVAL; 1420 } 1421 1422 ha->optrom_region_size = start + 1423 bsg_job->request_payload.payload_len > ha->optrom_size ? 1424 ha->optrom_size - start : 1425 bsg_job->request_payload.payload_len; 1426 ha->optrom_state = QLA_SWRITING; 1427 } else { 1428 ha->optrom_region_size = start + 1429 bsg_job->reply_payload.payload_len > ha->optrom_size ? 1430 ha->optrom_size - start : 1431 bsg_job->reply_payload.payload_len; 1432 ha->optrom_state = QLA_SREADING; 1433 } 1434 1435 ha->optrom_buffer = vzalloc(ha->optrom_region_size); 1436 if (!ha->optrom_buffer) { 1437 ql_log(ql_log_warn, vha, 0x7059, 1438 "Read: Unable to allocate memory for optrom retrieval " 1439 "(%x)\n", ha->optrom_region_size); 1440 1441 ha->optrom_state = QLA_SWAITING; 1442 return -ENOMEM; 1443 } 1444 1445 return 0; 1446 } 1447 1448 static int 1449 qla2x00_read_optrom(struct bsg_job *bsg_job) 1450 { 1451 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1452 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1453 scsi_qla_host_t *vha = shost_priv(host); 1454 struct qla_hw_data *ha = vha->hw; 1455 int rval = 0; 1456 1457 if (ha->flags.nic_core_reset_hdlr_active) 1458 return -EBUSY; 1459 1460 mutex_lock(&ha->optrom_mutex); 1461 rval = qla2x00_optrom_setup(bsg_job, vha, 0); 1462 if (rval) { 1463 mutex_unlock(&ha->optrom_mutex); 1464 return rval; 1465 } 1466 1467 ha->isp_ops->read_optrom(vha, ha->optrom_buffer, 1468 ha->optrom_region_start, ha->optrom_region_size); 1469 1470 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 1471 bsg_job->reply_payload.sg_cnt, ha->optrom_buffer, 1472 ha->optrom_region_size); 1473 1474 bsg_reply->reply_payload_rcv_len = ha->optrom_region_size; 1475 bsg_reply->result = DID_OK; 1476 vfree(ha->optrom_buffer); 1477 ha->optrom_buffer = NULL; 1478 ha->optrom_state = QLA_SWAITING; 1479 mutex_unlock(&ha->optrom_mutex); 1480 bsg_job_done(bsg_job, bsg_reply->result, 1481 bsg_reply->reply_payload_rcv_len); 1482 return rval; 1483 } 1484 1485 static int 1486 qla2x00_update_optrom(struct bsg_job *bsg_job) 1487 { 1488 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1489 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1490 scsi_qla_host_t *vha = shost_priv(host); 1491 struct qla_hw_data *ha = vha->hw; 1492 int rval = 0; 1493 1494 mutex_lock(&ha->optrom_mutex); 1495 rval = qla2x00_optrom_setup(bsg_job, vha, 1); 1496 if (rval) { 1497 mutex_unlock(&ha->optrom_mutex); 1498 return rval; 1499 } 1500 1501 /* Set the isp82xx_no_md_cap not to capture minidump */ 1502 ha->flags.isp82xx_no_md_cap = 1; 1503 1504 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1505 bsg_job->request_payload.sg_cnt, ha->optrom_buffer, 1506 ha->optrom_region_size); 1507 1508 ha->isp_ops->write_optrom(vha, ha->optrom_buffer, 1509 ha->optrom_region_start, ha->optrom_region_size); 1510 1511 bsg_reply->result = DID_OK; 1512 vfree(ha->optrom_buffer); 1513 ha->optrom_buffer = NULL; 1514 ha->optrom_state = QLA_SWAITING; 1515 mutex_unlock(&ha->optrom_mutex); 1516 bsg_job_done(bsg_job, bsg_reply->result, 1517 bsg_reply->reply_payload_rcv_len); 1518 return rval; 1519 } 1520 1521 static int 1522 qla2x00_update_fru_versions(struct bsg_job *bsg_job) 1523 { 1524 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1525 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1526 scsi_qla_host_t *vha = shost_priv(host); 1527 struct qla_hw_data *ha = vha->hw; 1528 int rval = 0; 1529 uint8_t bsg[DMA_POOL_SIZE]; 1530 struct qla_image_version_list *list = (void *)bsg; 1531 struct qla_image_version *image; 1532 uint32_t count; 1533 dma_addr_t sfp_dma; 1534 void *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); 1535 1536 if (!sfp) { 1537 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 1538 EXT_STATUS_NO_MEMORY; 1539 goto done; 1540 } 1541 1542 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1543 bsg_job->request_payload.sg_cnt, list, sizeof(bsg)); 1544 1545 image = list->version; 1546 count = list->count; 1547 while (count--) { 1548 memcpy(sfp, &image->field_info, sizeof(image->field_info)); 1549 rval = qla2x00_write_sfp(vha, sfp_dma, sfp, 1550 image->field_address.device, image->field_address.offset, 1551 sizeof(image->field_info), image->field_address.option); 1552 if (rval) { 1553 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 1554 EXT_STATUS_MAILBOX; 1555 goto dealloc; 1556 } 1557 image++; 1558 } 1559 1560 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0; 1561 1562 dealloc: 1563 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); 1564 1565 done: 1566 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1567 bsg_reply->result = DID_OK << 16; 1568 bsg_job_done(bsg_job, bsg_reply->result, 1569 bsg_reply->reply_payload_rcv_len); 1570 1571 return 0; 1572 } 1573 1574 static int 1575 qla2x00_read_fru_status(struct bsg_job *bsg_job) 1576 { 1577 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1578 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1579 scsi_qla_host_t *vha = shost_priv(host); 1580 struct qla_hw_data *ha = vha->hw; 1581 int rval = 0; 1582 uint8_t bsg[DMA_POOL_SIZE]; 1583 struct qla_status_reg *sr = (void *)bsg; 1584 dma_addr_t sfp_dma; 1585 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); 1586 1587 if (!sfp) { 1588 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 1589 EXT_STATUS_NO_MEMORY; 1590 goto done; 1591 } 1592 1593 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1594 bsg_job->request_payload.sg_cnt, sr, sizeof(*sr)); 1595 1596 rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 1597 sr->field_address.device, sr->field_address.offset, 1598 sizeof(sr->status_reg), sr->field_address.option); 1599 sr->status_reg = *sfp; 1600 1601 if (rval) { 1602 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 1603 EXT_STATUS_MAILBOX; 1604 goto dealloc; 1605 } 1606 1607 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 1608 bsg_job->reply_payload.sg_cnt, sr, sizeof(*sr)); 1609 1610 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0; 1611 1612 dealloc: 1613 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); 1614 1615 done: 1616 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1617 bsg_reply->reply_payload_rcv_len = sizeof(*sr); 1618 bsg_reply->result = DID_OK << 16; 1619 bsg_job_done(bsg_job, bsg_reply->result, 1620 bsg_reply->reply_payload_rcv_len); 1621 1622 return 0; 1623 } 1624 1625 static int 1626 qla2x00_write_fru_status(struct bsg_job *bsg_job) 1627 { 1628 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1629 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1630 scsi_qla_host_t *vha = shost_priv(host); 1631 struct qla_hw_data *ha = vha->hw; 1632 int rval = 0; 1633 uint8_t bsg[DMA_POOL_SIZE]; 1634 struct qla_status_reg *sr = (void *)bsg; 1635 dma_addr_t sfp_dma; 1636 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); 1637 1638 if (!sfp) { 1639 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 1640 EXT_STATUS_NO_MEMORY; 1641 goto done; 1642 } 1643 1644 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1645 bsg_job->request_payload.sg_cnt, sr, sizeof(*sr)); 1646 1647 *sfp = sr->status_reg; 1648 rval = qla2x00_write_sfp(vha, sfp_dma, sfp, 1649 sr->field_address.device, sr->field_address.offset, 1650 sizeof(sr->status_reg), sr->field_address.option); 1651 1652 if (rval) { 1653 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 1654 EXT_STATUS_MAILBOX; 1655 goto dealloc; 1656 } 1657 1658 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0; 1659 1660 dealloc: 1661 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); 1662 1663 done: 1664 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1665 bsg_reply->result = DID_OK << 16; 1666 bsg_job_done(bsg_job, bsg_reply->result, 1667 bsg_reply->reply_payload_rcv_len); 1668 1669 return 0; 1670 } 1671 1672 static int 1673 qla2x00_write_i2c(struct bsg_job *bsg_job) 1674 { 1675 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1676 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1677 scsi_qla_host_t *vha = shost_priv(host); 1678 struct qla_hw_data *ha = vha->hw; 1679 int rval = 0; 1680 uint8_t bsg[DMA_POOL_SIZE]; 1681 struct qla_i2c_access *i2c = (void *)bsg; 1682 dma_addr_t sfp_dma; 1683 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); 1684 1685 if (!sfp) { 1686 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 1687 EXT_STATUS_NO_MEMORY; 1688 goto done; 1689 } 1690 1691 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1692 bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c)); 1693 1694 memcpy(sfp, i2c->buffer, i2c->length); 1695 rval = qla2x00_write_sfp(vha, sfp_dma, sfp, 1696 i2c->device, i2c->offset, i2c->length, i2c->option); 1697 1698 if (rval) { 1699 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 1700 EXT_STATUS_MAILBOX; 1701 goto dealloc; 1702 } 1703 1704 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0; 1705 1706 dealloc: 1707 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); 1708 1709 done: 1710 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1711 bsg_reply->result = DID_OK << 16; 1712 bsg_job_done(bsg_job, bsg_reply->result, 1713 bsg_reply->reply_payload_rcv_len); 1714 1715 return 0; 1716 } 1717 1718 static int 1719 qla2x00_read_i2c(struct bsg_job *bsg_job) 1720 { 1721 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1722 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1723 scsi_qla_host_t *vha = shost_priv(host); 1724 struct qla_hw_data *ha = vha->hw; 1725 int rval = 0; 1726 uint8_t bsg[DMA_POOL_SIZE]; 1727 struct qla_i2c_access *i2c = (void *)bsg; 1728 dma_addr_t sfp_dma; 1729 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); 1730 1731 if (!sfp) { 1732 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 1733 EXT_STATUS_NO_MEMORY; 1734 goto done; 1735 } 1736 1737 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1738 bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c)); 1739 1740 rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 1741 i2c->device, i2c->offset, i2c->length, i2c->option); 1742 1743 if (rval) { 1744 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 1745 EXT_STATUS_MAILBOX; 1746 goto dealloc; 1747 } 1748 1749 memcpy(i2c->buffer, sfp, i2c->length); 1750 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 1751 bsg_job->reply_payload.sg_cnt, i2c, sizeof(*i2c)); 1752 1753 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0; 1754 1755 dealloc: 1756 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); 1757 1758 done: 1759 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1760 bsg_reply->reply_payload_rcv_len = sizeof(*i2c); 1761 bsg_reply->result = DID_OK << 16; 1762 bsg_job_done(bsg_job, bsg_reply->result, 1763 bsg_reply->reply_payload_rcv_len); 1764 1765 return 0; 1766 } 1767 1768 static int 1769 qla24xx_process_bidir_cmd(struct bsg_job *bsg_job) 1770 { 1771 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1772 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1773 scsi_qla_host_t *vha = shost_priv(host); 1774 struct qla_hw_data *ha = vha->hw; 1775 uint32_t rval = EXT_STATUS_OK; 1776 uint16_t req_sg_cnt = 0; 1777 uint16_t rsp_sg_cnt = 0; 1778 uint16_t nextlid = 0; 1779 uint32_t tot_dsds; 1780 srb_t *sp = NULL; 1781 uint32_t req_data_len; 1782 uint32_t rsp_data_len; 1783 1784 /* Check the type of the adapter */ 1785 if (!IS_BIDI_CAPABLE(ha)) { 1786 ql_log(ql_log_warn, vha, 0x70a0, 1787 "This adapter is not supported\n"); 1788 rval = EXT_STATUS_NOT_SUPPORTED; 1789 goto done; 1790 } 1791 1792 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) || 1793 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) || 1794 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { 1795 rval = EXT_STATUS_BUSY; 1796 goto done; 1797 } 1798 1799 /* Check if host is online */ 1800 if (!vha->flags.online) { 1801 ql_log(ql_log_warn, vha, 0x70a1, 1802 "Host is not online\n"); 1803 rval = EXT_STATUS_DEVICE_OFFLINE; 1804 goto done; 1805 } 1806 1807 /* Check if cable is plugged in or not */ 1808 if (vha->device_flags & DFLG_NO_CABLE) { 1809 ql_log(ql_log_warn, vha, 0x70a2, 1810 "Cable is unplugged...\n"); 1811 rval = EXT_STATUS_INVALID_CFG; 1812 goto done; 1813 } 1814 1815 /* Check if the switch is connected or not */ 1816 if (ha->current_topology != ISP_CFG_F) { 1817 ql_log(ql_log_warn, vha, 0x70a3, 1818 "Host is not connected to the switch\n"); 1819 rval = EXT_STATUS_INVALID_CFG; 1820 goto done; 1821 } 1822 1823 /* Check if operating mode is P2P */ 1824 if (ha->operating_mode != P2P) { 1825 ql_log(ql_log_warn, vha, 0x70a4, 1826 "Host operating mode is not P2p\n"); 1827 rval = EXT_STATUS_INVALID_CFG; 1828 goto done; 1829 } 1830 1831 mutex_lock(&ha->selflogin_lock); 1832 if (vha->self_login_loop_id == 0) { 1833 /* Initialize all required fields of fcport */ 1834 vha->bidir_fcport.vha = vha; 1835 vha->bidir_fcport.d_id.b.al_pa = vha->d_id.b.al_pa; 1836 vha->bidir_fcport.d_id.b.area = vha->d_id.b.area; 1837 vha->bidir_fcport.d_id.b.domain = vha->d_id.b.domain; 1838 vha->bidir_fcport.loop_id = vha->loop_id; 1839 1840 if (qla2x00_fabric_login(vha, &(vha->bidir_fcport), &nextlid)) { 1841 ql_log(ql_log_warn, vha, 0x70a7, 1842 "Failed to login port %06X for bidirectional IOCB\n", 1843 vha->bidir_fcport.d_id.b24); 1844 mutex_unlock(&ha->selflogin_lock); 1845 rval = EXT_STATUS_MAILBOX; 1846 goto done; 1847 } 1848 vha->self_login_loop_id = nextlid - 1; 1849 1850 } 1851 /* Assign the self login loop id to fcport */ 1852 mutex_unlock(&ha->selflogin_lock); 1853 1854 vha->bidir_fcport.loop_id = vha->self_login_loop_id; 1855 1856 req_sg_cnt = dma_map_sg(&ha->pdev->dev, 1857 bsg_job->request_payload.sg_list, 1858 bsg_job->request_payload.sg_cnt, 1859 DMA_TO_DEVICE); 1860 1861 if (!req_sg_cnt) { 1862 rval = EXT_STATUS_NO_MEMORY; 1863 goto done; 1864 } 1865 1866 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, 1867 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, 1868 DMA_FROM_DEVICE); 1869 1870 if (!rsp_sg_cnt) { 1871 rval = EXT_STATUS_NO_MEMORY; 1872 goto done_unmap_req_sg; 1873 } 1874 1875 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) || 1876 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) { 1877 ql_dbg(ql_dbg_user, vha, 0x70a9, 1878 "Dma mapping resulted in different sg counts " 1879 "[request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt: " 1880 "%x dma_reply_sg_cnt: %x]\n", 1881 bsg_job->request_payload.sg_cnt, req_sg_cnt, 1882 bsg_job->reply_payload.sg_cnt, rsp_sg_cnt); 1883 rval = EXT_STATUS_NO_MEMORY; 1884 goto done_unmap_sg; 1885 } 1886 1887 req_data_len = bsg_job->request_payload.payload_len; 1888 rsp_data_len = bsg_job->reply_payload.payload_len; 1889 1890 if (req_data_len != rsp_data_len) { 1891 rval = EXT_STATUS_BUSY; 1892 ql_log(ql_log_warn, vha, 0x70aa, 1893 "req_data_len != rsp_data_len\n"); 1894 goto done_unmap_sg; 1895 } 1896 1897 /* Alloc SRB structure */ 1898 sp = qla2x00_get_sp(vha, &(vha->bidir_fcport), GFP_KERNEL); 1899 if (!sp) { 1900 ql_dbg(ql_dbg_user, vha, 0x70ac, 1901 "Alloc SRB structure failed\n"); 1902 rval = EXT_STATUS_NO_MEMORY; 1903 goto done_unmap_sg; 1904 } 1905 1906 /*Populate srb->ctx with bidir ctx*/ 1907 sp->u.bsg_job = bsg_job; 1908 sp->free = qla2x00_bsg_sp_free; 1909 sp->type = SRB_BIDI_CMD; 1910 sp->done = qla2x00_bsg_job_done; 1911 1912 /* Add the read and write sg count */ 1913 tot_dsds = rsp_sg_cnt + req_sg_cnt; 1914 1915 rval = qla2x00_start_bidir(sp, vha, tot_dsds); 1916 if (rval != EXT_STATUS_OK) 1917 goto done_free_srb; 1918 /* the bsg request will be completed in the interrupt handler */ 1919 return rval; 1920 1921 done_free_srb: 1922 mempool_free(sp, ha->srb_mempool); 1923 done_unmap_sg: 1924 dma_unmap_sg(&ha->pdev->dev, 1925 bsg_job->reply_payload.sg_list, 1926 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 1927 done_unmap_req_sg: 1928 dma_unmap_sg(&ha->pdev->dev, 1929 bsg_job->request_payload.sg_list, 1930 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 1931 done: 1932 1933 /* Return an error vendor specific response 1934 * and complete the bsg request 1935 */ 1936 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval; 1937 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1938 bsg_reply->reply_payload_rcv_len = 0; 1939 bsg_reply->result = (DID_OK) << 16; 1940 bsg_job_done(bsg_job, bsg_reply->result, 1941 bsg_reply->reply_payload_rcv_len); 1942 /* Always return success, vendor rsp carries correct status */ 1943 return 0; 1944 } 1945 1946 static int 1947 qlafx00_mgmt_cmd(struct bsg_job *bsg_job) 1948 { 1949 struct fc_bsg_request *bsg_request = bsg_job->request; 1950 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1951 scsi_qla_host_t *vha = shost_priv(host); 1952 struct qla_hw_data *ha = vha->hw; 1953 int rval = (DRIVER_ERROR << 16); 1954 struct qla_mt_iocb_rqst_fx00 *piocb_rqst; 1955 srb_t *sp; 1956 int req_sg_cnt = 0, rsp_sg_cnt = 0; 1957 struct fc_port *fcport; 1958 char *type = "FC_BSG_HST_FX_MGMT"; 1959 1960 /* Copy the IOCB specific information */ 1961 piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *) 1962 &bsg_request->rqst_data.h_vendor.vendor_cmd[1]; 1963 1964 /* Dump the vendor information */ 1965 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose , vha, 0x70cf, 1966 piocb_rqst, sizeof(*piocb_rqst)); 1967 1968 if (!vha->flags.online) { 1969 ql_log(ql_log_warn, vha, 0x70d0, 1970 "Host is not online.\n"); 1971 rval = -EIO; 1972 goto done; 1973 } 1974 1975 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) { 1976 req_sg_cnt = dma_map_sg(&ha->pdev->dev, 1977 bsg_job->request_payload.sg_list, 1978 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 1979 if (!req_sg_cnt) { 1980 ql_log(ql_log_warn, vha, 0x70c7, 1981 "dma_map_sg return %d for request\n", req_sg_cnt); 1982 rval = -ENOMEM; 1983 goto done; 1984 } 1985 } 1986 1987 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) { 1988 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, 1989 bsg_job->reply_payload.sg_list, 1990 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 1991 if (!rsp_sg_cnt) { 1992 ql_log(ql_log_warn, vha, 0x70c8, 1993 "dma_map_sg return %d for reply\n", rsp_sg_cnt); 1994 rval = -ENOMEM; 1995 goto done_unmap_req_sg; 1996 } 1997 } 1998 1999 ql_dbg(ql_dbg_user, vha, 0x70c9, 2000 "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x " 2001 "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt, 2002 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt); 2003 2004 /* Allocate a dummy fcport structure, since functions preparing the 2005 * IOCB and mailbox command retrieves port specific information 2006 * from fcport structure. For Host based ELS commands there will be 2007 * no fcport structure allocated 2008 */ 2009 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 2010 if (!fcport) { 2011 ql_log(ql_log_warn, vha, 0x70ca, 2012 "Failed to allocate fcport.\n"); 2013 rval = -ENOMEM; 2014 goto done_unmap_rsp_sg; 2015 } 2016 2017 /* Alloc SRB structure */ 2018 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 2019 if (!sp) { 2020 ql_log(ql_log_warn, vha, 0x70cb, 2021 "qla2x00_get_sp failed.\n"); 2022 rval = -ENOMEM; 2023 goto done_free_fcport; 2024 } 2025 2026 /* Initialize all required fields of fcport */ 2027 fcport->vha = vha; 2028 fcport->loop_id = piocb_rqst->dataword; 2029 2030 sp->type = SRB_FXIOCB_BCMD; 2031 sp->name = "bsg_fx_mgmt"; 2032 sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt); 2033 sp->u.bsg_job = bsg_job; 2034 sp->free = qla2x00_bsg_sp_free; 2035 sp->done = qla2x00_bsg_job_done; 2036 2037 ql_dbg(ql_dbg_user, vha, 0x70cc, 2038 "bsg rqst type: %s fx_mgmt_type: %x id=%x\n", 2039 type, piocb_rqst->func_type, fcport->loop_id); 2040 2041 rval = qla2x00_start_sp(sp); 2042 if (rval != QLA_SUCCESS) { 2043 ql_log(ql_log_warn, vha, 0x70cd, 2044 "qla2x00_start_sp failed=%d.\n", rval); 2045 mempool_free(sp, ha->srb_mempool); 2046 rval = -EIO; 2047 goto done_free_fcport; 2048 } 2049 return rval; 2050 2051 done_free_fcport: 2052 kfree(fcport); 2053 2054 done_unmap_rsp_sg: 2055 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) 2056 dma_unmap_sg(&ha->pdev->dev, 2057 bsg_job->reply_payload.sg_list, 2058 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 2059 done_unmap_req_sg: 2060 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) 2061 dma_unmap_sg(&ha->pdev->dev, 2062 bsg_job->request_payload.sg_list, 2063 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 2064 2065 done: 2066 return rval; 2067 } 2068 2069 static int 2070 qla26xx_serdes_op(struct bsg_job *bsg_job) 2071 { 2072 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2073 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 2074 scsi_qla_host_t *vha = shost_priv(host); 2075 int rval = 0; 2076 struct qla_serdes_reg sr; 2077 2078 memset(&sr, 0, sizeof(sr)); 2079 2080 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 2081 bsg_job->request_payload.sg_cnt, &sr, sizeof(sr)); 2082 2083 switch (sr.cmd) { 2084 case INT_SC_SERDES_WRITE_REG: 2085 rval = qla2x00_write_serdes_word(vha, sr.addr, sr.val); 2086 bsg_reply->reply_payload_rcv_len = 0; 2087 break; 2088 case INT_SC_SERDES_READ_REG: 2089 rval = qla2x00_read_serdes_word(vha, sr.addr, &sr.val); 2090 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 2091 bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr)); 2092 bsg_reply->reply_payload_rcv_len = sizeof(sr); 2093 break; 2094 default: 2095 ql_dbg(ql_dbg_user, vha, 0x708c, 2096 "Unknown serdes cmd %x.\n", sr.cmd); 2097 rval = -EINVAL; 2098 break; 2099 } 2100 2101 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 2102 rval ? EXT_STATUS_MAILBOX : 0; 2103 2104 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 2105 bsg_reply->result = DID_OK << 16; 2106 bsg_job_done(bsg_job, bsg_reply->result, 2107 bsg_reply->reply_payload_rcv_len); 2108 return 0; 2109 } 2110 2111 static int 2112 qla8044_serdes_op(struct bsg_job *bsg_job) 2113 { 2114 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2115 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 2116 scsi_qla_host_t *vha = shost_priv(host); 2117 int rval = 0; 2118 struct qla_serdes_reg_ex sr; 2119 2120 memset(&sr, 0, sizeof(sr)); 2121 2122 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 2123 bsg_job->request_payload.sg_cnt, &sr, sizeof(sr)); 2124 2125 switch (sr.cmd) { 2126 case INT_SC_SERDES_WRITE_REG: 2127 rval = qla8044_write_serdes_word(vha, sr.addr, sr.val); 2128 bsg_reply->reply_payload_rcv_len = 0; 2129 break; 2130 case INT_SC_SERDES_READ_REG: 2131 rval = qla8044_read_serdes_word(vha, sr.addr, &sr.val); 2132 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 2133 bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr)); 2134 bsg_reply->reply_payload_rcv_len = sizeof(sr); 2135 break; 2136 default: 2137 ql_dbg(ql_dbg_user, vha, 0x7020, 2138 "Unknown serdes cmd %x.\n", sr.cmd); 2139 rval = -EINVAL; 2140 break; 2141 } 2142 2143 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 2144 rval ? EXT_STATUS_MAILBOX : 0; 2145 2146 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 2147 bsg_reply->result = DID_OK << 16; 2148 bsg_job_done(bsg_job, bsg_reply->result, 2149 bsg_reply->reply_payload_rcv_len); 2150 return 0; 2151 } 2152 2153 static int 2154 qla27xx_get_flash_upd_cap(struct bsg_job *bsg_job) 2155 { 2156 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2157 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 2158 scsi_qla_host_t *vha = shost_priv(host); 2159 struct qla_hw_data *ha = vha->hw; 2160 struct qla_flash_update_caps cap; 2161 2162 if (!(IS_QLA27XX(ha)) && !IS_QLA28XX(ha)) 2163 return -EPERM; 2164 2165 memset(&cap, 0, sizeof(cap)); 2166 cap.capabilities = (uint64_t)ha->fw_attributes_ext[1] << 48 | 2167 (uint64_t)ha->fw_attributes_ext[0] << 32 | 2168 (uint64_t)ha->fw_attributes_h << 16 | 2169 (uint64_t)ha->fw_attributes; 2170 2171 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 2172 bsg_job->reply_payload.sg_cnt, &cap, sizeof(cap)); 2173 bsg_reply->reply_payload_rcv_len = sizeof(cap); 2174 2175 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 2176 EXT_STATUS_OK; 2177 2178 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 2179 bsg_reply->result = DID_OK << 16; 2180 bsg_job_done(bsg_job, bsg_reply->result, 2181 bsg_reply->reply_payload_rcv_len); 2182 return 0; 2183 } 2184 2185 static int 2186 qla27xx_set_flash_upd_cap(struct bsg_job *bsg_job) 2187 { 2188 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2189 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 2190 scsi_qla_host_t *vha = shost_priv(host); 2191 struct qla_hw_data *ha = vha->hw; 2192 uint64_t online_fw_attr = 0; 2193 struct qla_flash_update_caps cap; 2194 2195 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 2196 return -EPERM; 2197 2198 memset(&cap, 0, sizeof(cap)); 2199 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 2200 bsg_job->request_payload.sg_cnt, &cap, sizeof(cap)); 2201 2202 online_fw_attr = (uint64_t)ha->fw_attributes_ext[1] << 48 | 2203 (uint64_t)ha->fw_attributes_ext[0] << 32 | 2204 (uint64_t)ha->fw_attributes_h << 16 | 2205 (uint64_t)ha->fw_attributes; 2206 2207 if (online_fw_attr != cap.capabilities) { 2208 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 2209 EXT_STATUS_INVALID_PARAM; 2210 return -EINVAL; 2211 } 2212 2213 if (cap.outage_duration < MAX_LOOP_TIMEOUT) { 2214 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 2215 EXT_STATUS_INVALID_PARAM; 2216 return -EINVAL; 2217 } 2218 2219 bsg_reply->reply_payload_rcv_len = 0; 2220 2221 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 2222 EXT_STATUS_OK; 2223 2224 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 2225 bsg_reply->result = DID_OK << 16; 2226 bsg_job_done(bsg_job, bsg_reply->result, 2227 bsg_reply->reply_payload_rcv_len); 2228 return 0; 2229 } 2230 2231 static int 2232 qla27xx_get_bbcr_data(struct bsg_job *bsg_job) 2233 { 2234 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2235 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 2236 scsi_qla_host_t *vha = shost_priv(host); 2237 struct qla_hw_data *ha = vha->hw; 2238 struct qla_bbcr_data bbcr; 2239 uint16_t loop_id, topo, sw_cap; 2240 uint8_t domain, area, al_pa, state; 2241 int rval; 2242 2243 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 2244 return -EPERM; 2245 2246 memset(&bbcr, 0, sizeof(bbcr)); 2247 2248 if (vha->flags.bbcr_enable) 2249 bbcr.status = QLA_BBCR_STATUS_ENABLED; 2250 else 2251 bbcr.status = QLA_BBCR_STATUS_DISABLED; 2252 2253 if (bbcr.status == QLA_BBCR_STATUS_ENABLED) { 2254 rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa, 2255 &area, &domain, &topo, &sw_cap); 2256 if (rval != QLA_SUCCESS) { 2257 bbcr.status = QLA_BBCR_STATUS_UNKNOWN; 2258 bbcr.state = QLA_BBCR_STATE_OFFLINE; 2259 bbcr.mbx1 = loop_id; 2260 goto done; 2261 } 2262 2263 state = (vha->bbcr >> 12) & 0x1; 2264 2265 if (state) { 2266 bbcr.state = QLA_BBCR_STATE_OFFLINE; 2267 bbcr.offline_reason_code = QLA_BBCR_REASON_LOGIN_REJECT; 2268 } else { 2269 bbcr.state = QLA_BBCR_STATE_ONLINE; 2270 bbcr.negotiated_bbscn = (vha->bbcr >> 8) & 0xf; 2271 } 2272 2273 bbcr.configured_bbscn = vha->bbcr & 0xf; 2274 } 2275 2276 done: 2277 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 2278 bsg_job->reply_payload.sg_cnt, &bbcr, sizeof(bbcr)); 2279 bsg_reply->reply_payload_rcv_len = sizeof(bbcr); 2280 2281 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK; 2282 2283 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 2284 bsg_reply->result = DID_OK << 16; 2285 bsg_job_done(bsg_job, bsg_reply->result, 2286 bsg_reply->reply_payload_rcv_len); 2287 return 0; 2288 } 2289 2290 static int 2291 qla2x00_get_priv_stats(struct bsg_job *bsg_job) 2292 { 2293 struct fc_bsg_request *bsg_request = bsg_job->request; 2294 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2295 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 2296 scsi_qla_host_t *vha = shost_priv(host); 2297 struct qla_hw_data *ha = vha->hw; 2298 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 2299 struct link_statistics *stats = NULL; 2300 dma_addr_t stats_dma; 2301 int rval; 2302 uint32_t *cmd = bsg_request->rqst_data.h_vendor.vendor_cmd; 2303 uint options = cmd[0] == QL_VND_GET_PRIV_STATS_EX ? cmd[1] : 0; 2304 2305 if (test_bit(UNLOADING, &vha->dpc_flags)) 2306 return -ENODEV; 2307 2308 if (unlikely(pci_channel_offline(ha->pdev))) 2309 return -ENODEV; 2310 2311 if (qla2x00_reset_active(vha)) 2312 return -EBUSY; 2313 2314 if (!IS_FWI2_CAPABLE(ha)) 2315 return -EPERM; 2316 2317 stats = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stats), &stats_dma, 2318 GFP_KERNEL); 2319 if (!stats) { 2320 ql_log(ql_log_warn, vha, 0x70e2, 2321 "Failed to allocate memory for stats.\n"); 2322 return -ENOMEM; 2323 } 2324 2325 rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, options); 2326 2327 if (rval == QLA_SUCCESS) { 2328 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, vha, 0x70e5, 2329 stats, sizeof(*stats)); 2330 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 2331 bsg_job->reply_payload.sg_cnt, stats, sizeof(*stats)); 2332 } 2333 2334 bsg_reply->reply_payload_rcv_len = sizeof(*stats); 2335 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 2336 rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK; 2337 2338 bsg_job->reply_len = sizeof(*bsg_reply); 2339 bsg_reply->result = DID_OK << 16; 2340 bsg_job_done(bsg_job, bsg_reply->result, 2341 bsg_reply->reply_payload_rcv_len); 2342 2343 dma_free_coherent(&ha->pdev->dev, sizeof(*stats), 2344 stats, stats_dma); 2345 2346 return 0; 2347 } 2348 2349 static int 2350 qla2x00_do_dport_diagnostics(struct bsg_job *bsg_job) 2351 { 2352 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2353 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 2354 scsi_qla_host_t *vha = shost_priv(host); 2355 int rval; 2356 struct qla_dport_diag *dd; 2357 2358 if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) && 2359 !IS_QLA28XX(vha->hw)) 2360 return -EPERM; 2361 2362 dd = kmalloc(sizeof(*dd), GFP_KERNEL); 2363 if (!dd) { 2364 ql_log(ql_log_warn, vha, 0x70db, 2365 "Failed to allocate memory for dport.\n"); 2366 return -ENOMEM; 2367 } 2368 2369 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 2370 bsg_job->request_payload.sg_cnt, dd, sizeof(*dd)); 2371 2372 rval = qla26xx_dport_diagnostics( 2373 vha, dd->buf, sizeof(dd->buf), dd->options); 2374 if (rval == QLA_SUCCESS) { 2375 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 2376 bsg_job->reply_payload.sg_cnt, dd, sizeof(*dd)); 2377 } 2378 2379 bsg_reply->reply_payload_rcv_len = sizeof(*dd); 2380 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 2381 rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK; 2382 2383 bsg_job->reply_len = sizeof(*bsg_reply); 2384 bsg_reply->result = DID_OK << 16; 2385 bsg_job_done(bsg_job, bsg_reply->result, 2386 bsg_reply->reply_payload_rcv_len); 2387 2388 kfree(dd); 2389 2390 return 0; 2391 } 2392 2393 static int 2394 qla2x00_get_flash_image_status(struct bsg_job *bsg_job) 2395 { 2396 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job)); 2397 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2398 struct qla_hw_data *ha = vha->hw; 2399 struct qla_active_regions regions = { }; 2400 struct active_regions active_regions = { }; 2401 2402 qla28xx_get_aux_images(vha, &active_regions); 2403 regions.global_image = active_regions.global; 2404 2405 if (IS_QLA28XX(ha)) { 2406 qla27xx_get_active_image(vha, &active_regions); 2407 regions.board_config = active_regions.aux.board_config; 2408 regions.vpd_nvram = active_regions.aux.vpd_nvram; 2409 regions.npiv_config_0_1 = active_regions.aux.npiv_config_0_1; 2410 regions.npiv_config_2_3 = active_regions.aux.npiv_config_2_3; 2411 } 2412 2413 ql_dbg(ql_dbg_user, vha, 0x70e1, 2414 "%s(%lu): FW=%u BCFG=%u VPDNVR=%u NPIV01=%u NPIV02=%u\n", 2415 __func__, vha->host_no, regions.global_image, 2416 regions.board_config, regions.vpd_nvram, 2417 regions.npiv_config_0_1, regions.npiv_config_2_3); 2418 2419 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 2420 bsg_job->reply_payload.sg_cnt, ®ions, sizeof(regions)); 2421 2422 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK; 2423 bsg_reply->reply_payload_rcv_len = sizeof(regions); 2424 bsg_reply->result = DID_OK << 16; 2425 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 2426 bsg_job_done(bsg_job, bsg_reply->result, 2427 bsg_reply->reply_payload_rcv_len); 2428 2429 return 0; 2430 } 2431 2432 static int 2433 qla2x00_process_vendor_specific(struct bsg_job *bsg_job) 2434 { 2435 struct fc_bsg_request *bsg_request = bsg_job->request; 2436 2437 switch (bsg_request->rqst_data.h_vendor.vendor_cmd[0]) { 2438 case QL_VND_LOOPBACK: 2439 return qla2x00_process_loopback(bsg_job); 2440 2441 case QL_VND_A84_RESET: 2442 return qla84xx_reset(bsg_job); 2443 2444 case QL_VND_A84_UPDATE_FW: 2445 return qla84xx_updatefw(bsg_job); 2446 2447 case QL_VND_A84_MGMT_CMD: 2448 return qla84xx_mgmt_cmd(bsg_job); 2449 2450 case QL_VND_IIDMA: 2451 return qla24xx_iidma(bsg_job); 2452 2453 case QL_VND_FCP_PRIO_CFG_CMD: 2454 return qla24xx_proc_fcp_prio_cfg_cmd(bsg_job); 2455 2456 case QL_VND_READ_FLASH: 2457 return qla2x00_read_optrom(bsg_job); 2458 2459 case QL_VND_UPDATE_FLASH: 2460 return qla2x00_update_optrom(bsg_job); 2461 2462 case QL_VND_SET_FRU_VERSION: 2463 return qla2x00_update_fru_versions(bsg_job); 2464 2465 case QL_VND_READ_FRU_STATUS: 2466 return qla2x00_read_fru_status(bsg_job); 2467 2468 case QL_VND_WRITE_FRU_STATUS: 2469 return qla2x00_write_fru_status(bsg_job); 2470 2471 case QL_VND_WRITE_I2C: 2472 return qla2x00_write_i2c(bsg_job); 2473 2474 case QL_VND_READ_I2C: 2475 return qla2x00_read_i2c(bsg_job); 2476 2477 case QL_VND_DIAG_IO_CMD: 2478 return qla24xx_process_bidir_cmd(bsg_job); 2479 2480 case QL_VND_FX00_MGMT_CMD: 2481 return qlafx00_mgmt_cmd(bsg_job); 2482 2483 case QL_VND_SERDES_OP: 2484 return qla26xx_serdes_op(bsg_job); 2485 2486 case QL_VND_SERDES_OP_EX: 2487 return qla8044_serdes_op(bsg_job); 2488 2489 case QL_VND_GET_FLASH_UPDATE_CAPS: 2490 return qla27xx_get_flash_upd_cap(bsg_job); 2491 2492 case QL_VND_SET_FLASH_UPDATE_CAPS: 2493 return qla27xx_set_flash_upd_cap(bsg_job); 2494 2495 case QL_VND_GET_BBCR_DATA: 2496 return qla27xx_get_bbcr_data(bsg_job); 2497 2498 case QL_VND_GET_PRIV_STATS: 2499 case QL_VND_GET_PRIV_STATS_EX: 2500 return qla2x00_get_priv_stats(bsg_job); 2501 2502 case QL_VND_DPORT_DIAGNOSTICS: 2503 return qla2x00_do_dport_diagnostics(bsg_job); 2504 2505 case QL_VND_SS_GET_FLASH_IMAGE_STATUS: 2506 return qla2x00_get_flash_image_status(bsg_job); 2507 2508 default: 2509 return -ENOSYS; 2510 } 2511 } 2512 2513 int 2514 qla24xx_bsg_request(struct bsg_job *bsg_job) 2515 { 2516 struct fc_bsg_request *bsg_request = bsg_job->request; 2517 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2518 int ret = -EINVAL; 2519 struct fc_rport *rport; 2520 struct Scsi_Host *host; 2521 scsi_qla_host_t *vha; 2522 2523 /* In case no data transferred. */ 2524 bsg_reply->reply_payload_rcv_len = 0; 2525 2526 if (bsg_request->msgcode == FC_BSG_RPT_ELS) { 2527 rport = fc_bsg_to_rport(bsg_job); 2528 host = rport_to_shost(rport); 2529 vha = shost_priv(host); 2530 } else { 2531 host = fc_bsg_to_shost(bsg_job); 2532 vha = shost_priv(host); 2533 } 2534 2535 if (qla2x00_chip_is_down(vha)) { 2536 ql_dbg(ql_dbg_user, vha, 0x709f, 2537 "BSG: ISP abort active/needed -- cmd=%d.\n", 2538 bsg_request->msgcode); 2539 return -EBUSY; 2540 } 2541 2542 ql_dbg(ql_dbg_user, vha, 0x7000, 2543 "Entered %s msgcode=0x%x.\n", __func__, bsg_request->msgcode); 2544 2545 switch (bsg_request->msgcode) { 2546 case FC_BSG_RPT_ELS: 2547 case FC_BSG_HST_ELS_NOLOGIN: 2548 ret = qla2x00_process_els(bsg_job); 2549 break; 2550 case FC_BSG_HST_CT: 2551 ret = qla2x00_process_ct(bsg_job); 2552 break; 2553 case FC_BSG_HST_VENDOR: 2554 ret = qla2x00_process_vendor_specific(bsg_job); 2555 break; 2556 case FC_BSG_HST_ADD_RPORT: 2557 case FC_BSG_HST_DEL_RPORT: 2558 case FC_BSG_RPT_CT: 2559 default: 2560 ql_log(ql_log_warn, vha, 0x705a, "Unsupported BSG request.\n"); 2561 break; 2562 } 2563 return ret; 2564 } 2565 2566 int 2567 qla24xx_bsg_timeout(struct bsg_job *bsg_job) 2568 { 2569 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2570 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job)); 2571 struct qla_hw_data *ha = vha->hw; 2572 srb_t *sp; 2573 int cnt, que; 2574 unsigned long flags; 2575 struct req_que *req; 2576 2577 /* find the bsg job from the active list of commands */ 2578 spin_lock_irqsave(&ha->hardware_lock, flags); 2579 for (que = 0; que < ha->max_req_queues; que++) { 2580 req = ha->req_q_map[que]; 2581 if (!req) 2582 continue; 2583 2584 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { 2585 sp = req->outstanding_cmds[cnt]; 2586 if (sp) { 2587 if (((sp->type == SRB_CT_CMD) || 2588 (sp->type == SRB_ELS_CMD_HST) || 2589 (sp->type == SRB_FXIOCB_BCMD)) 2590 && (sp->u.bsg_job == bsg_job)) { 2591 req->outstanding_cmds[cnt] = NULL; 2592 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2593 if (ha->isp_ops->abort_command(sp)) { 2594 ql_log(ql_log_warn, vha, 0x7089, 2595 "mbx abort_command " 2596 "failed.\n"); 2597 bsg_reply->result = -EIO; 2598 } else { 2599 ql_dbg(ql_dbg_user, vha, 0x708a, 2600 "mbx abort_command " 2601 "success.\n"); 2602 bsg_reply->result = 0; 2603 } 2604 spin_lock_irqsave(&ha->hardware_lock, flags); 2605 goto done; 2606 } 2607 } 2608 } 2609 } 2610 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2611 ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n"); 2612 bsg_reply->result = -ENXIO; 2613 return 0; 2614 2615 done: 2616 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2617 sp->free(sp); 2618 return 0; 2619 } 2620