1 /* 2 * QLogic Fibre Channel HBA Driver 3 * Copyright (c) 2003-2014 QLogic Corporation 4 * 5 * See LICENSE.qla2xxx for copyright and licensing details. 6 */ 7 #include "qla_def.h" 8 9 #include <linux/kthread.h> 10 #include <linux/vmalloc.h> 11 #include <linux/delay.h> 12 #include <linux/bsg-lib.h> 13 14 /* BSG support for ELS/CT pass through */ 15 void qla2x00_bsg_job_done(srb_t *sp, int res) 16 { 17 struct bsg_job *bsg_job = sp->u.bsg_job; 18 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 19 20 bsg_reply->result = res; 21 bsg_job_done(bsg_job, bsg_reply->result, 22 bsg_reply->reply_payload_rcv_len); 23 sp->free(sp); 24 } 25 26 void qla2x00_bsg_sp_free(srb_t *sp) 27 { 28 struct qla_hw_data *ha = sp->vha->hw; 29 struct bsg_job *bsg_job = sp->u.bsg_job; 30 struct fc_bsg_request *bsg_request = bsg_job->request; 31 struct qla_mt_iocb_rqst_fx00 *piocb_rqst; 32 33 if (sp->type == SRB_FXIOCB_BCMD) { 34 piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *) 35 &bsg_request->rqst_data.h_vendor.vendor_cmd[1]; 36 37 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) 38 dma_unmap_sg(&ha->pdev->dev, 39 bsg_job->request_payload.sg_list, 40 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 41 42 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) 43 dma_unmap_sg(&ha->pdev->dev, 44 bsg_job->reply_payload.sg_list, 45 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 46 } else { 47 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 48 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 49 50 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 51 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 52 } 53 54 if (sp->type == SRB_CT_CMD || 55 sp->type == SRB_FXIOCB_BCMD || 56 sp->type == SRB_ELS_CMD_HST) 57 qla2x00_free_fcport(sp->fcport); 58 59 qla2x00_rel_sp(sp); 60 } 61 62 int 63 qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *vha, 64 struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag) 65 { 66 int i, ret, num_valid; 67 uint8_t *bcode; 68 struct qla_fcp_prio_entry *pri_entry; 69 uint32_t *bcode_val_ptr, bcode_val; 70 71 ret = 1; 72 num_valid = 0; 73 bcode = (uint8_t *)pri_cfg; 74 bcode_val_ptr = (uint32_t *)pri_cfg; 75 bcode_val = (uint32_t)(*bcode_val_ptr); 76 77 if (bcode_val == 0xFFFFFFFF) { 78 /* No FCP Priority config data in flash */ 79 ql_dbg(ql_dbg_user, vha, 0x7051, 80 "No FCP Priority config data.\n"); 81 return 0; 82 } 83 84 if (memcmp(bcode, "HQOS", 4)) { 85 /* Invalid FCP priority data header*/ 86 ql_dbg(ql_dbg_user, vha, 0x7052, 87 "Invalid FCP Priority data header. bcode=0x%x.\n", 88 bcode_val); 89 return 0; 90 } 91 if (flag != 1) 92 return ret; 93 94 pri_entry = &pri_cfg->entry[0]; 95 for (i = 0; i < pri_cfg->num_entries; i++) { 96 if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID) 97 num_valid++; 98 pri_entry++; 99 } 100 101 if (num_valid == 0) { 102 /* No valid FCP priority data entries */ 103 ql_dbg(ql_dbg_user, vha, 0x7053, 104 "No valid FCP Priority data entries.\n"); 105 ret = 0; 106 } else { 107 /* FCP priority data is valid */ 108 ql_dbg(ql_dbg_user, vha, 0x7054, 109 "Valid FCP priority data. num entries = %d.\n", 110 num_valid); 111 } 112 113 return ret; 114 } 115 116 static int 117 qla24xx_proc_fcp_prio_cfg_cmd(struct bsg_job *bsg_job) 118 { 119 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 120 struct fc_bsg_request *bsg_request = bsg_job->request; 121 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 122 scsi_qla_host_t *vha = shost_priv(host); 123 struct qla_hw_data *ha = vha->hw; 124 int ret = 0; 125 uint32_t len; 126 uint32_t oper; 127 128 if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_P3P_TYPE(ha))) { 129 ret = -EINVAL; 130 goto exit_fcp_prio_cfg; 131 } 132 133 /* Get the sub command */ 134 oper = bsg_request->rqst_data.h_vendor.vendor_cmd[1]; 135 136 /* Only set config is allowed if config memory is not allocated */ 137 if (!ha->fcp_prio_cfg && (oper != QLFC_FCP_PRIO_SET_CONFIG)) { 138 ret = -EINVAL; 139 goto exit_fcp_prio_cfg; 140 } 141 switch (oper) { 142 case QLFC_FCP_PRIO_DISABLE: 143 if (ha->flags.fcp_prio_enabled) { 144 ha->flags.fcp_prio_enabled = 0; 145 ha->fcp_prio_cfg->attributes &= 146 ~FCP_PRIO_ATTR_ENABLE; 147 qla24xx_update_all_fcp_prio(vha); 148 bsg_reply->result = DID_OK; 149 } else { 150 ret = -EINVAL; 151 bsg_reply->result = (DID_ERROR << 16); 152 goto exit_fcp_prio_cfg; 153 } 154 break; 155 156 case QLFC_FCP_PRIO_ENABLE: 157 if (!ha->flags.fcp_prio_enabled) { 158 if (ha->fcp_prio_cfg) { 159 ha->flags.fcp_prio_enabled = 1; 160 ha->fcp_prio_cfg->attributes |= 161 FCP_PRIO_ATTR_ENABLE; 162 qla24xx_update_all_fcp_prio(vha); 163 bsg_reply->result = DID_OK; 164 } else { 165 ret = -EINVAL; 166 bsg_reply->result = (DID_ERROR << 16); 167 goto exit_fcp_prio_cfg; 168 } 169 } 170 break; 171 172 case QLFC_FCP_PRIO_GET_CONFIG: 173 len = bsg_job->reply_payload.payload_len; 174 if (!len || len > FCP_PRIO_CFG_SIZE) { 175 ret = -EINVAL; 176 bsg_reply->result = (DID_ERROR << 16); 177 goto exit_fcp_prio_cfg; 178 } 179 180 bsg_reply->result = DID_OK; 181 bsg_reply->reply_payload_rcv_len = 182 sg_copy_from_buffer( 183 bsg_job->reply_payload.sg_list, 184 bsg_job->reply_payload.sg_cnt, ha->fcp_prio_cfg, 185 len); 186 187 break; 188 189 case QLFC_FCP_PRIO_SET_CONFIG: 190 len = bsg_job->request_payload.payload_len; 191 if (!len || len > FCP_PRIO_CFG_SIZE) { 192 bsg_reply->result = (DID_ERROR << 16); 193 ret = -EINVAL; 194 goto exit_fcp_prio_cfg; 195 } 196 197 if (!ha->fcp_prio_cfg) { 198 ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE); 199 if (!ha->fcp_prio_cfg) { 200 ql_log(ql_log_warn, vha, 0x7050, 201 "Unable to allocate memory for fcp prio " 202 "config data (%x).\n", FCP_PRIO_CFG_SIZE); 203 bsg_reply->result = (DID_ERROR << 16); 204 ret = -ENOMEM; 205 goto exit_fcp_prio_cfg; 206 } 207 } 208 209 memset(ha->fcp_prio_cfg, 0, FCP_PRIO_CFG_SIZE); 210 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 211 bsg_job->request_payload.sg_cnt, ha->fcp_prio_cfg, 212 FCP_PRIO_CFG_SIZE); 213 214 /* validate fcp priority data */ 215 216 if (!qla24xx_fcp_prio_cfg_valid(vha, 217 (struct qla_fcp_prio_cfg *) ha->fcp_prio_cfg, 1)) { 218 bsg_reply->result = (DID_ERROR << 16); 219 ret = -EINVAL; 220 /* If buffer was invalidatic int 221 * fcp_prio_cfg is of no use 222 */ 223 vfree(ha->fcp_prio_cfg); 224 ha->fcp_prio_cfg = NULL; 225 goto exit_fcp_prio_cfg; 226 } 227 228 ha->flags.fcp_prio_enabled = 0; 229 if (ha->fcp_prio_cfg->attributes & FCP_PRIO_ATTR_ENABLE) 230 ha->flags.fcp_prio_enabled = 1; 231 qla24xx_update_all_fcp_prio(vha); 232 bsg_reply->result = DID_OK; 233 break; 234 default: 235 ret = -EINVAL; 236 break; 237 } 238 exit_fcp_prio_cfg: 239 if (!ret) 240 bsg_job_done(bsg_job, bsg_reply->result, 241 bsg_reply->reply_payload_rcv_len); 242 return ret; 243 } 244 245 static int 246 qla2x00_process_els(struct bsg_job *bsg_job) 247 { 248 struct fc_bsg_request *bsg_request = bsg_job->request; 249 struct fc_rport *rport; 250 fc_port_t *fcport = NULL; 251 struct Scsi_Host *host; 252 scsi_qla_host_t *vha; 253 struct qla_hw_data *ha; 254 srb_t *sp; 255 const char *type; 256 int req_sg_cnt, rsp_sg_cnt; 257 int rval = (DID_ERROR << 16); 258 uint16_t nextlid = 0; 259 260 if (bsg_request->msgcode == FC_BSG_RPT_ELS) { 261 rport = fc_bsg_to_rport(bsg_job); 262 fcport = *(fc_port_t **) rport->dd_data; 263 host = rport_to_shost(rport); 264 vha = shost_priv(host); 265 ha = vha->hw; 266 type = "FC_BSG_RPT_ELS"; 267 } else { 268 host = fc_bsg_to_shost(bsg_job); 269 vha = shost_priv(host); 270 ha = vha->hw; 271 type = "FC_BSG_HST_ELS_NOLOGIN"; 272 } 273 274 if (!vha->flags.online) { 275 ql_log(ql_log_warn, vha, 0x7005, "Host not online.\n"); 276 rval = -EIO; 277 goto done; 278 } 279 280 /* pass through is supported only for ISP 4Gb or higher */ 281 if (!IS_FWI2_CAPABLE(ha)) { 282 ql_dbg(ql_dbg_user, vha, 0x7001, 283 "ELS passthru not supported for ISP23xx based adapters.\n"); 284 rval = -EPERM; 285 goto done; 286 } 287 288 /* Multiple SG's are not supported for ELS requests */ 289 if (bsg_job->request_payload.sg_cnt > 1 || 290 bsg_job->reply_payload.sg_cnt > 1) { 291 ql_dbg(ql_dbg_user, vha, 0x7002, 292 "Multiple SG's are not supported for ELS requests, " 293 "request_sg_cnt=%x reply_sg_cnt=%x.\n", 294 bsg_job->request_payload.sg_cnt, 295 bsg_job->reply_payload.sg_cnt); 296 rval = -EPERM; 297 goto done; 298 } 299 300 /* ELS request for rport */ 301 if (bsg_request->msgcode == FC_BSG_RPT_ELS) { 302 /* make sure the rport is logged in, 303 * if not perform fabric login 304 */ 305 if (qla2x00_fabric_login(vha, fcport, &nextlid)) { 306 ql_dbg(ql_dbg_user, vha, 0x7003, 307 "Failed to login port %06X for ELS passthru.\n", 308 fcport->d_id.b24); 309 rval = -EIO; 310 goto done; 311 } 312 } else { 313 /* Allocate a dummy fcport structure, since functions 314 * preparing the IOCB and mailbox command retrieves port 315 * specific information from fcport structure. For Host based 316 * ELS commands there will be no fcport structure allocated 317 */ 318 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 319 if (!fcport) { 320 rval = -ENOMEM; 321 goto done; 322 } 323 324 /* Initialize all required fields of fcport */ 325 fcport->vha = vha; 326 fcport->d_id.b.al_pa = 327 bsg_request->rqst_data.h_els.port_id[0]; 328 fcport->d_id.b.area = 329 bsg_request->rqst_data.h_els.port_id[1]; 330 fcport->d_id.b.domain = 331 bsg_request->rqst_data.h_els.port_id[2]; 332 fcport->loop_id = 333 (fcport->d_id.b.al_pa == 0xFD) ? 334 NPH_FABRIC_CONTROLLER : NPH_F_PORT; 335 } 336 337 req_sg_cnt = 338 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 339 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 340 if (!req_sg_cnt) { 341 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 342 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 343 rval = -ENOMEM; 344 goto done_free_fcport; 345 } 346 347 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 348 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 349 if (!rsp_sg_cnt) { 350 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 351 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 352 rval = -ENOMEM; 353 goto done_free_fcport; 354 } 355 356 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) || 357 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) { 358 ql_log(ql_log_warn, vha, 0x7008, 359 "dma mapping resulted in different sg counts, " 360 "request_sg_cnt: %x dma_request_sg_cnt:%x reply_sg_cnt:%x " 361 "dma_reply_sg_cnt:%x.\n", bsg_job->request_payload.sg_cnt, 362 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt); 363 rval = -EAGAIN; 364 goto done_unmap_sg; 365 } 366 367 /* Alloc SRB structure */ 368 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 369 if (!sp) { 370 rval = -ENOMEM; 371 goto done_unmap_sg; 372 } 373 374 sp->type = 375 (bsg_request->msgcode == FC_BSG_RPT_ELS ? 376 SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST); 377 sp->name = 378 (bsg_request->msgcode == FC_BSG_RPT_ELS ? 379 "bsg_els_rpt" : "bsg_els_hst"); 380 sp->u.bsg_job = bsg_job; 381 sp->free = qla2x00_bsg_sp_free; 382 sp->done = qla2x00_bsg_job_done; 383 384 ql_dbg(ql_dbg_user, vha, 0x700a, 385 "bsg rqst type: %s els type: %x - loop-id=%x " 386 "portid=%-2x%02x%02x.\n", type, 387 bsg_request->rqst_data.h_els.command_code, fcport->loop_id, 388 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); 389 390 rval = qla2x00_start_sp(sp); 391 if (rval != QLA_SUCCESS) { 392 ql_log(ql_log_warn, vha, 0x700e, 393 "qla2x00_start_sp failed = %d\n", rval); 394 qla2x00_rel_sp(sp); 395 rval = -EIO; 396 goto done_unmap_sg; 397 } 398 return rval; 399 400 done_unmap_sg: 401 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 402 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 403 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 404 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 405 goto done_free_fcport; 406 407 done_free_fcport: 408 if (bsg_request->msgcode == FC_BSG_RPT_ELS) 409 qla2x00_free_fcport(fcport); 410 done: 411 return rval; 412 } 413 414 static inline uint16_t 415 qla24xx_calc_ct_iocbs(uint16_t dsds) 416 { 417 uint16_t iocbs; 418 419 iocbs = 1; 420 if (dsds > 2) { 421 iocbs += (dsds - 2) / 5; 422 if ((dsds - 2) % 5) 423 iocbs++; 424 } 425 return iocbs; 426 } 427 428 static int 429 qla2x00_process_ct(struct bsg_job *bsg_job) 430 { 431 srb_t *sp; 432 struct fc_bsg_request *bsg_request = bsg_job->request; 433 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 434 scsi_qla_host_t *vha = shost_priv(host); 435 struct qla_hw_data *ha = vha->hw; 436 int rval = (DID_ERROR << 16); 437 int req_sg_cnt, rsp_sg_cnt; 438 uint16_t loop_id; 439 struct fc_port *fcport; 440 char *type = "FC_BSG_HST_CT"; 441 442 req_sg_cnt = 443 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 444 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 445 if (!req_sg_cnt) { 446 ql_log(ql_log_warn, vha, 0x700f, 447 "dma_map_sg return %d for request\n", req_sg_cnt); 448 rval = -ENOMEM; 449 goto done; 450 } 451 452 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 453 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 454 if (!rsp_sg_cnt) { 455 ql_log(ql_log_warn, vha, 0x7010, 456 "dma_map_sg return %d for reply\n", rsp_sg_cnt); 457 rval = -ENOMEM; 458 goto done; 459 } 460 461 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) || 462 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) { 463 ql_log(ql_log_warn, vha, 0x7011, 464 "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x " 465 "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt, 466 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt); 467 rval = -EAGAIN; 468 goto done_unmap_sg; 469 } 470 471 if (!vha->flags.online) { 472 ql_log(ql_log_warn, vha, 0x7012, 473 "Host is not online.\n"); 474 rval = -EIO; 475 goto done_unmap_sg; 476 } 477 478 loop_id = 479 (bsg_request->rqst_data.h_ct.preamble_word1 & 0xFF000000) 480 >> 24; 481 switch (loop_id) { 482 case 0xFC: 483 loop_id = cpu_to_le16(NPH_SNS); 484 break; 485 case 0xFA: 486 loop_id = vha->mgmt_svr_loop_id; 487 break; 488 default: 489 ql_dbg(ql_dbg_user, vha, 0x7013, 490 "Unknown loop id: %x.\n", loop_id); 491 rval = -EINVAL; 492 goto done_unmap_sg; 493 } 494 495 /* Allocate a dummy fcport structure, since functions preparing the 496 * IOCB and mailbox command retrieves port specific information 497 * from fcport structure. For Host based ELS commands there will be 498 * no fcport structure allocated 499 */ 500 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 501 if (!fcport) { 502 ql_log(ql_log_warn, vha, 0x7014, 503 "Failed to allocate fcport.\n"); 504 rval = -ENOMEM; 505 goto done_unmap_sg; 506 } 507 508 /* Initialize all required fields of fcport */ 509 fcport->vha = vha; 510 fcport->d_id.b.al_pa = bsg_request->rqst_data.h_ct.port_id[0]; 511 fcport->d_id.b.area = bsg_request->rqst_data.h_ct.port_id[1]; 512 fcport->d_id.b.domain = bsg_request->rqst_data.h_ct.port_id[2]; 513 fcport->loop_id = loop_id; 514 515 /* Alloc SRB structure */ 516 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 517 if (!sp) { 518 ql_log(ql_log_warn, vha, 0x7015, 519 "qla2x00_get_sp failed.\n"); 520 rval = -ENOMEM; 521 goto done_free_fcport; 522 } 523 524 sp->type = SRB_CT_CMD; 525 sp->name = "bsg_ct"; 526 sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt); 527 sp->u.bsg_job = bsg_job; 528 sp->free = qla2x00_bsg_sp_free; 529 sp->done = qla2x00_bsg_job_done; 530 531 ql_dbg(ql_dbg_user, vha, 0x7016, 532 "bsg rqst type: %s else type: %x - " 533 "loop-id=%x portid=%02x%02x%02x.\n", type, 534 (bsg_request->rqst_data.h_ct.preamble_word2 >> 16), 535 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, 536 fcport->d_id.b.al_pa); 537 538 rval = qla2x00_start_sp(sp); 539 if (rval != QLA_SUCCESS) { 540 ql_log(ql_log_warn, vha, 0x7017, 541 "qla2x00_start_sp failed=%d.\n", rval); 542 qla2x00_rel_sp(sp); 543 rval = -EIO; 544 goto done_free_fcport; 545 } 546 return rval; 547 548 done_free_fcport: 549 qla2x00_free_fcport(fcport); 550 done_unmap_sg: 551 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 552 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 553 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 554 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 555 done: 556 return rval; 557 } 558 559 /* Disable loopback mode */ 560 static inline int 561 qla81xx_reset_loopback_mode(scsi_qla_host_t *vha, uint16_t *config, 562 int wait, int wait2) 563 { 564 int ret = 0; 565 int rval = 0; 566 uint16_t new_config[4]; 567 struct qla_hw_data *ha = vha->hw; 568 569 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha)) 570 goto done_reset_internal; 571 572 memset(new_config, 0 , sizeof(new_config)); 573 if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 == 574 ENABLE_INTERNAL_LOOPBACK || 575 (config[0] & INTERNAL_LOOPBACK_MASK) >> 1 == 576 ENABLE_EXTERNAL_LOOPBACK) { 577 new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK; 578 ql_dbg(ql_dbg_user, vha, 0x70bf, "new_config[0]=%02x\n", 579 (new_config[0] & INTERNAL_LOOPBACK_MASK)); 580 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ; 581 582 ha->notify_dcbx_comp = wait; 583 ha->notify_lb_portup_comp = wait2; 584 585 ret = qla81xx_set_port_config(vha, new_config); 586 if (ret != QLA_SUCCESS) { 587 ql_log(ql_log_warn, vha, 0x7025, 588 "Set port config failed.\n"); 589 ha->notify_dcbx_comp = 0; 590 ha->notify_lb_portup_comp = 0; 591 rval = -EINVAL; 592 goto done_reset_internal; 593 } 594 595 /* Wait for DCBX complete event */ 596 if (wait && !wait_for_completion_timeout(&ha->dcbx_comp, 597 (DCBX_COMP_TIMEOUT * HZ))) { 598 ql_dbg(ql_dbg_user, vha, 0x7026, 599 "DCBX completion not received.\n"); 600 ha->notify_dcbx_comp = 0; 601 ha->notify_lb_portup_comp = 0; 602 rval = -EINVAL; 603 goto done_reset_internal; 604 } else 605 ql_dbg(ql_dbg_user, vha, 0x7027, 606 "DCBX completion received.\n"); 607 608 if (wait2 && 609 !wait_for_completion_timeout(&ha->lb_portup_comp, 610 (LB_PORTUP_COMP_TIMEOUT * HZ))) { 611 ql_dbg(ql_dbg_user, vha, 0x70c5, 612 "Port up completion not received.\n"); 613 ha->notify_lb_portup_comp = 0; 614 rval = -EINVAL; 615 goto done_reset_internal; 616 } else 617 ql_dbg(ql_dbg_user, vha, 0x70c6, 618 "Port up completion received.\n"); 619 620 ha->notify_dcbx_comp = 0; 621 ha->notify_lb_portup_comp = 0; 622 } 623 done_reset_internal: 624 return rval; 625 } 626 627 /* 628 * Set the port configuration to enable the internal or external loopback 629 * depending on the loopback mode. 630 */ 631 static inline int 632 qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config, 633 uint16_t *new_config, uint16_t mode) 634 { 635 int ret = 0; 636 int rval = 0; 637 unsigned long rem_tmo = 0, current_tmo = 0; 638 struct qla_hw_data *ha = vha->hw; 639 640 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha)) 641 goto done_set_internal; 642 643 if (mode == INTERNAL_LOOPBACK) 644 new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1); 645 else if (mode == EXTERNAL_LOOPBACK) 646 new_config[0] = config[0] | (ENABLE_EXTERNAL_LOOPBACK << 1); 647 ql_dbg(ql_dbg_user, vha, 0x70be, 648 "new_config[0]=%02x\n", (new_config[0] & INTERNAL_LOOPBACK_MASK)); 649 650 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3); 651 652 ha->notify_dcbx_comp = 1; 653 ret = qla81xx_set_port_config(vha, new_config); 654 if (ret != QLA_SUCCESS) { 655 ql_log(ql_log_warn, vha, 0x7021, 656 "set port config failed.\n"); 657 ha->notify_dcbx_comp = 0; 658 rval = -EINVAL; 659 goto done_set_internal; 660 } 661 662 /* Wait for DCBX complete event */ 663 current_tmo = DCBX_COMP_TIMEOUT * HZ; 664 while (1) { 665 rem_tmo = wait_for_completion_timeout(&ha->dcbx_comp, 666 current_tmo); 667 if (!ha->idc_extend_tmo || rem_tmo) { 668 ha->idc_extend_tmo = 0; 669 break; 670 } 671 current_tmo = ha->idc_extend_tmo * HZ; 672 ha->idc_extend_tmo = 0; 673 } 674 675 if (!rem_tmo) { 676 ql_dbg(ql_dbg_user, vha, 0x7022, 677 "DCBX completion not received.\n"); 678 ret = qla81xx_reset_loopback_mode(vha, new_config, 0, 0); 679 /* 680 * If the reset of the loopback mode doesn't work take a FCoE 681 * dump and reset the chip. 682 */ 683 if (ret) { 684 ha->isp_ops->fw_dump(vha, 0); 685 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 686 } 687 rval = -EINVAL; 688 } else { 689 if (ha->flags.idc_compl_status) { 690 ql_dbg(ql_dbg_user, vha, 0x70c3, 691 "Bad status in IDC Completion AEN\n"); 692 rval = -EINVAL; 693 ha->flags.idc_compl_status = 0; 694 } else 695 ql_dbg(ql_dbg_user, vha, 0x7023, 696 "DCBX completion received.\n"); 697 } 698 699 ha->notify_dcbx_comp = 0; 700 ha->idc_extend_tmo = 0; 701 702 done_set_internal: 703 return rval; 704 } 705 706 static int 707 qla2x00_process_loopback(struct bsg_job *bsg_job) 708 { 709 struct fc_bsg_request *bsg_request = bsg_job->request; 710 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 711 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 712 scsi_qla_host_t *vha = shost_priv(host); 713 struct qla_hw_data *ha = vha->hw; 714 int rval; 715 uint8_t command_sent; 716 char *type; 717 struct msg_echo_lb elreq; 718 uint16_t response[MAILBOX_REGISTER_COUNT]; 719 uint16_t config[4], new_config[4]; 720 uint8_t *fw_sts_ptr; 721 uint8_t *req_data = NULL; 722 dma_addr_t req_data_dma; 723 uint32_t req_data_len; 724 uint8_t *rsp_data = NULL; 725 dma_addr_t rsp_data_dma; 726 uint32_t rsp_data_len; 727 728 if (!vha->flags.online) { 729 ql_log(ql_log_warn, vha, 0x7019, "Host is not online.\n"); 730 return -EIO; 731 } 732 733 memset(&elreq, 0, sizeof(elreq)); 734 735 elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev, 736 bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, 737 DMA_TO_DEVICE); 738 739 if (!elreq.req_sg_cnt) { 740 ql_log(ql_log_warn, vha, 0x701a, 741 "dma_map_sg returned %d for request.\n", elreq.req_sg_cnt); 742 return -ENOMEM; 743 } 744 745 elreq.rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, 746 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, 747 DMA_FROM_DEVICE); 748 749 if (!elreq.rsp_sg_cnt) { 750 ql_log(ql_log_warn, vha, 0x701b, 751 "dma_map_sg returned %d for reply.\n", elreq.rsp_sg_cnt); 752 rval = -ENOMEM; 753 goto done_unmap_req_sg; 754 } 755 756 if ((elreq.req_sg_cnt != bsg_job->request_payload.sg_cnt) || 757 (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) { 758 ql_log(ql_log_warn, vha, 0x701c, 759 "dma mapping resulted in different sg counts, " 760 "request_sg_cnt: %x dma_request_sg_cnt: %x " 761 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n", 762 bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt, 763 bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt); 764 rval = -EAGAIN; 765 goto done_unmap_sg; 766 } 767 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len; 768 req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len, 769 &req_data_dma, GFP_KERNEL); 770 if (!req_data) { 771 ql_log(ql_log_warn, vha, 0x701d, 772 "dma alloc failed for req_data.\n"); 773 rval = -ENOMEM; 774 goto done_unmap_sg; 775 } 776 777 rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len, 778 &rsp_data_dma, GFP_KERNEL); 779 if (!rsp_data) { 780 ql_log(ql_log_warn, vha, 0x7004, 781 "dma alloc failed for rsp_data.\n"); 782 rval = -ENOMEM; 783 goto done_free_dma_req; 784 } 785 786 /* Copy the request buffer in req_data now */ 787 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 788 bsg_job->request_payload.sg_cnt, req_data, req_data_len); 789 790 elreq.send_dma = req_data_dma; 791 elreq.rcv_dma = rsp_data_dma; 792 elreq.transfer_size = req_data_len; 793 794 elreq.options = bsg_request->rqst_data.h_vendor.vendor_cmd[1]; 795 elreq.iteration_count = 796 bsg_request->rqst_data.h_vendor.vendor_cmd[2]; 797 798 if (atomic_read(&vha->loop_state) == LOOP_READY && 799 (ha->current_topology == ISP_CFG_F || 800 (get_unaligned_le32(req_data) == ELS_OPCODE_BYTE && 801 req_data_len == MAX_ELS_FRAME_PAYLOAD)) && 802 elreq.options == EXTERNAL_LOOPBACK) { 803 type = "FC_BSG_HST_VENDOR_ECHO_DIAG"; 804 ql_dbg(ql_dbg_user, vha, 0x701e, 805 "BSG request type: %s.\n", type); 806 command_sent = INT_DEF_LB_ECHO_CMD; 807 rval = qla2x00_echo_test(vha, &elreq, response); 808 } else { 809 if (IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) { 810 memset(config, 0, sizeof(config)); 811 memset(new_config, 0, sizeof(new_config)); 812 813 if (qla81xx_get_port_config(vha, config)) { 814 ql_log(ql_log_warn, vha, 0x701f, 815 "Get port config failed.\n"); 816 rval = -EPERM; 817 goto done_free_dma_rsp; 818 } 819 820 if ((config[0] & INTERNAL_LOOPBACK_MASK) != 0) { 821 ql_dbg(ql_dbg_user, vha, 0x70c4, 822 "Loopback operation already in " 823 "progress.\n"); 824 rval = -EAGAIN; 825 goto done_free_dma_rsp; 826 } 827 828 ql_dbg(ql_dbg_user, vha, 0x70c0, 829 "elreq.options=%04x\n", elreq.options); 830 831 if (elreq.options == EXTERNAL_LOOPBACK) 832 if (IS_QLA8031(ha) || IS_QLA8044(ha)) 833 rval = qla81xx_set_loopback_mode(vha, 834 config, new_config, elreq.options); 835 else 836 rval = qla81xx_reset_loopback_mode(vha, 837 config, 1, 0); 838 else 839 rval = qla81xx_set_loopback_mode(vha, config, 840 new_config, elreq.options); 841 842 if (rval) { 843 rval = -EPERM; 844 goto done_free_dma_rsp; 845 } 846 847 type = "FC_BSG_HST_VENDOR_LOOPBACK"; 848 ql_dbg(ql_dbg_user, vha, 0x7028, 849 "BSG request type: %s.\n", type); 850 851 command_sent = INT_DEF_LB_LOOPBACK_CMD; 852 rval = qla2x00_loopback_test(vha, &elreq, response); 853 854 if (response[0] == MBS_COMMAND_ERROR && 855 response[1] == MBS_LB_RESET) { 856 ql_log(ql_log_warn, vha, 0x7029, 857 "MBX command error, Aborting ISP.\n"); 858 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 859 qla2xxx_wake_dpc(vha); 860 qla2x00_wait_for_chip_reset(vha); 861 /* Also reset the MPI */ 862 if (IS_QLA81XX(ha)) { 863 if (qla81xx_restart_mpi_firmware(vha) != 864 QLA_SUCCESS) { 865 ql_log(ql_log_warn, vha, 0x702a, 866 "MPI reset failed.\n"); 867 } 868 } 869 870 rval = -EIO; 871 goto done_free_dma_rsp; 872 } 873 874 if (new_config[0]) { 875 int ret; 876 877 /* Revert back to original port config 878 * Also clear internal loopback 879 */ 880 ret = qla81xx_reset_loopback_mode(vha, 881 new_config, 0, 1); 882 if (ret) { 883 /* 884 * If the reset of the loopback mode 885 * doesn't work take FCoE dump and then 886 * reset the chip. 887 */ 888 ha->isp_ops->fw_dump(vha, 0); 889 set_bit(ISP_ABORT_NEEDED, 890 &vha->dpc_flags); 891 } 892 893 } 894 895 } else { 896 type = "FC_BSG_HST_VENDOR_LOOPBACK"; 897 ql_dbg(ql_dbg_user, vha, 0x702b, 898 "BSG request type: %s.\n", type); 899 command_sent = INT_DEF_LB_LOOPBACK_CMD; 900 rval = qla2x00_loopback_test(vha, &elreq, response); 901 } 902 } 903 904 if (rval) { 905 ql_log(ql_log_warn, vha, 0x702c, 906 "Vendor request %s failed.\n", type); 907 908 rval = 0; 909 bsg_reply->result = (DID_ERROR << 16); 910 bsg_reply->reply_payload_rcv_len = 0; 911 } else { 912 ql_dbg(ql_dbg_user, vha, 0x702d, 913 "Vendor request %s completed.\n", type); 914 bsg_reply->result = (DID_OK << 16); 915 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 916 bsg_job->reply_payload.sg_cnt, rsp_data, 917 rsp_data_len); 918 } 919 920 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + 921 sizeof(response) + sizeof(uint8_t); 922 fw_sts_ptr = bsg_job->reply + sizeof(struct fc_bsg_reply); 923 memcpy(bsg_job->reply + sizeof(struct fc_bsg_reply), response, 924 sizeof(response)); 925 fw_sts_ptr += sizeof(response); 926 *fw_sts_ptr = command_sent; 927 928 done_free_dma_rsp: 929 dma_free_coherent(&ha->pdev->dev, rsp_data_len, 930 rsp_data, rsp_data_dma); 931 done_free_dma_req: 932 dma_free_coherent(&ha->pdev->dev, req_data_len, 933 req_data, req_data_dma); 934 done_unmap_sg: 935 dma_unmap_sg(&ha->pdev->dev, 936 bsg_job->reply_payload.sg_list, 937 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 938 done_unmap_req_sg: 939 dma_unmap_sg(&ha->pdev->dev, 940 bsg_job->request_payload.sg_list, 941 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 942 if (!rval) 943 bsg_job_done(bsg_job, bsg_reply->result, 944 bsg_reply->reply_payload_rcv_len); 945 return rval; 946 } 947 948 static int 949 qla84xx_reset(struct bsg_job *bsg_job) 950 { 951 struct fc_bsg_request *bsg_request = bsg_job->request; 952 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 953 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 954 scsi_qla_host_t *vha = shost_priv(host); 955 struct qla_hw_data *ha = vha->hw; 956 int rval = 0; 957 uint32_t flag; 958 959 if (!IS_QLA84XX(ha)) { 960 ql_dbg(ql_dbg_user, vha, 0x702f, "Not 84xx, exiting.\n"); 961 return -EINVAL; 962 } 963 964 flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1]; 965 966 rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW); 967 968 if (rval) { 969 ql_log(ql_log_warn, vha, 0x7030, 970 "Vendor request 84xx reset failed.\n"); 971 rval = (DID_ERROR << 16); 972 973 } else { 974 ql_dbg(ql_dbg_user, vha, 0x7031, 975 "Vendor request 84xx reset completed.\n"); 976 bsg_reply->result = DID_OK; 977 bsg_job_done(bsg_job, bsg_reply->result, 978 bsg_reply->reply_payload_rcv_len); 979 } 980 981 return rval; 982 } 983 984 static int 985 qla84xx_updatefw(struct bsg_job *bsg_job) 986 { 987 struct fc_bsg_request *bsg_request = bsg_job->request; 988 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 989 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 990 scsi_qla_host_t *vha = shost_priv(host); 991 struct qla_hw_data *ha = vha->hw; 992 struct verify_chip_entry_84xx *mn = NULL; 993 dma_addr_t mn_dma, fw_dma; 994 void *fw_buf = NULL; 995 int rval = 0; 996 uint32_t sg_cnt; 997 uint32_t data_len; 998 uint16_t options; 999 uint32_t flag; 1000 uint32_t fw_ver; 1001 1002 if (!IS_QLA84XX(ha)) { 1003 ql_dbg(ql_dbg_user, vha, 0x7032, 1004 "Not 84xx, exiting.\n"); 1005 return -EINVAL; 1006 } 1007 1008 sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 1009 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 1010 if (!sg_cnt) { 1011 ql_log(ql_log_warn, vha, 0x7033, 1012 "dma_map_sg returned %d for request.\n", sg_cnt); 1013 return -ENOMEM; 1014 } 1015 1016 if (sg_cnt != bsg_job->request_payload.sg_cnt) { 1017 ql_log(ql_log_warn, vha, 0x7034, 1018 "DMA mapping resulted in different sg counts, " 1019 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n", 1020 bsg_job->request_payload.sg_cnt, sg_cnt); 1021 rval = -EAGAIN; 1022 goto done_unmap_sg; 1023 } 1024 1025 data_len = bsg_job->request_payload.payload_len; 1026 fw_buf = dma_alloc_coherent(&ha->pdev->dev, data_len, 1027 &fw_dma, GFP_KERNEL); 1028 if (!fw_buf) { 1029 ql_log(ql_log_warn, vha, 0x7035, 1030 "DMA alloc failed for fw_buf.\n"); 1031 rval = -ENOMEM; 1032 goto done_unmap_sg; 1033 } 1034 1035 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1036 bsg_job->request_payload.sg_cnt, fw_buf, data_len); 1037 1038 mn = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma); 1039 if (!mn) { 1040 ql_log(ql_log_warn, vha, 0x7036, 1041 "DMA alloc failed for fw buffer.\n"); 1042 rval = -ENOMEM; 1043 goto done_free_fw_buf; 1044 } 1045 1046 flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1]; 1047 fw_ver = get_unaligned_le32((uint32_t *)fw_buf + 2); 1048 1049 mn->entry_type = VERIFY_CHIP_IOCB_TYPE; 1050 mn->entry_count = 1; 1051 1052 options = VCO_FORCE_UPDATE | VCO_END_OF_DATA; 1053 if (flag == A84_ISSUE_UPDATE_DIAGFW_CMD) 1054 options |= VCO_DIAG_FW; 1055 1056 mn->options = cpu_to_le16(options); 1057 mn->fw_ver = cpu_to_le32(fw_ver); 1058 mn->fw_size = cpu_to_le32(data_len); 1059 mn->fw_seq_size = cpu_to_le32(data_len); 1060 put_unaligned_le64(fw_dma, &mn->dsd.address); 1061 mn->dsd.length = cpu_to_le32(data_len); 1062 mn->data_seg_cnt = cpu_to_le16(1); 1063 1064 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120); 1065 1066 if (rval) { 1067 ql_log(ql_log_warn, vha, 0x7037, 1068 "Vendor request 84xx updatefw failed.\n"); 1069 1070 rval = (DID_ERROR << 16); 1071 } else { 1072 ql_dbg(ql_dbg_user, vha, 0x7038, 1073 "Vendor request 84xx updatefw completed.\n"); 1074 1075 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1076 bsg_reply->result = DID_OK; 1077 } 1078 1079 dma_pool_free(ha->s_dma_pool, mn, mn_dma); 1080 1081 done_free_fw_buf: 1082 dma_free_coherent(&ha->pdev->dev, data_len, fw_buf, fw_dma); 1083 1084 done_unmap_sg: 1085 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 1086 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 1087 1088 if (!rval) 1089 bsg_job_done(bsg_job, bsg_reply->result, 1090 bsg_reply->reply_payload_rcv_len); 1091 return rval; 1092 } 1093 1094 static int 1095 qla84xx_mgmt_cmd(struct bsg_job *bsg_job) 1096 { 1097 struct fc_bsg_request *bsg_request = bsg_job->request; 1098 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1099 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1100 scsi_qla_host_t *vha = shost_priv(host); 1101 struct qla_hw_data *ha = vha->hw; 1102 struct access_chip_84xx *mn = NULL; 1103 dma_addr_t mn_dma, mgmt_dma; 1104 void *mgmt_b = NULL; 1105 int rval = 0; 1106 struct qla_bsg_a84_mgmt *ql84_mgmt; 1107 uint32_t sg_cnt; 1108 uint32_t data_len = 0; 1109 uint32_t dma_direction = DMA_NONE; 1110 1111 if (!IS_QLA84XX(ha)) { 1112 ql_log(ql_log_warn, vha, 0x703a, 1113 "Not 84xx, exiting.\n"); 1114 return -EINVAL; 1115 } 1116 1117 mn = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma); 1118 if (!mn) { 1119 ql_log(ql_log_warn, vha, 0x703c, 1120 "DMA alloc failed for fw buffer.\n"); 1121 return -ENOMEM; 1122 } 1123 1124 mn->entry_type = ACCESS_CHIP_IOCB_TYPE; 1125 mn->entry_count = 1; 1126 ql84_mgmt = (void *)bsg_request + sizeof(struct fc_bsg_request); 1127 switch (ql84_mgmt->mgmt.cmd) { 1128 case QLA84_MGMT_READ_MEM: 1129 case QLA84_MGMT_GET_INFO: 1130 sg_cnt = dma_map_sg(&ha->pdev->dev, 1131 bsg_job->reply_payload.sg_list, 1132 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 1133 if (!sg_cnt) { 1134 ql_log(ql_log_warn, vha, 0x703d, 1135 "dma_map_sg returned %d for reply.\n", sg_cnt); 1136 rval = -ENOMEM; 1137 goto exit_mgmt; 1138 } 1139 1140 dma_direction = DMA_FROM_DEVICE; 1141 1142 if (sg_cnt != bsg_job->reply_payload.sg_cnt) { 1143 ql_log(ql_log_warn, vha, 0x703e, 1144 "DMA mapping resulted in different sg counts, " 1145 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n", 1146 bsg_job->reply_payload.sg_cnt, sg_cnt); 1147 rval = -EAGAIN; 1148 goto done_unmap_sg; 1149 } 1150 1151 data_len = bsg_job->reply_payload.payload_len; 1152 1153 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len, 1154 &mgmt_dma, GFP_KERNEL); 1155 if (!mgmt_b) { 1156 ql_log(ql_log_warn, vha, 0x703f, 1157 "DMA alloc failed for mgmt_b.\n"); 1158 rval = -ENOMEM; 1159 goto done_unmap_sg; 1160 } 1161 1162 if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) { 1163 mn->options = cpu_to_le16(ACO_DUMP_MEMORY); 1164 mn->parameter1 = 1165 cpu_to_le32( 1166 ql84_mgmt->mgmt.mgmtp.u.mem.start_addr); 1167 1168 } else if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO) { 1169 mn->options = cpu_to_le16(ACO_REQUEST_INFO); 1170 mn->parameter1 = 1171 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.info.type); 1172 1173 mn->parameter2 = 1174 cpu_to_le32( 1175 ql84_mgmt->mgmt.mgmtp.u.info.context); 1176 } 1177 break; 1178 1179 case QLA84_MGMT_WRITE_MEM: 1180 sg_cnt = dma_map_sg(&ha->pdev->dev, 1181 bsg_job->request_payload.sg_list, 1182 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 1183 1184 if (!sg_cnt) { 1185 ql_log(ql_log_warn, vha, 0x7040, 1186 "dma_map_sg returned %d.\n", sg_cnt); 1187 rval = -ENOMEM; 1188 goto exit_mgmt; 1189 } 1190 1191 dma_direction = DMA_TO_DEVICE; 1192 1193 if (sg_cnt != bsg_job->request_payload.sg_cnt) { 1194 ql_log(ql_log_warn, vha, 0x7041, 1195 "DMA mapping resulted in different sg counts, " 1196 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n", 1197 bsg_job->request_payload.sg_cnt, sg_cnt); 1198 rval = -EAGAIN; 1199 goto done_unmap_sg; 1200 } 1201 1202 data_len = bsg_job->request_payload.payload_len; 1203 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len, 1204 &mgmt_dma, GFP_KERNEL); 1205 if (!mgmt_b) { 1206 ql_log(ql_log_warn, vha, 0x7042, 1207 "DMA alloc failed for mgmt_b.\n"); 1208 rval = -ENOMEM; 1209 goto done_unmap_sg; 1210 } 1211 1212 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1213 bsg_job->request_payload.sg_cnt, mgmt_b, data_len); 1214 1215 mn->options = cpu_to_le16(ACO_LOAD_MEMORY); 1216 mn->parameter1 = 1217 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.mem.start_addr); 1218 break; 1219 1220 case QLA84_MGMT_CHNG_CONFIG: 1221 mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM); 1222 mn->parameter1 = 1223 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.id); 1224 1225 mn->parameter2 = 1226 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param0); 1227 1228 mn->parameter3 = 1229 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param1); 1230 break; 1231 1232 default: 1233 rval = -EIO; 1234 goto exit_mgmt; 1235 } 1236 1237 if (ql84_mgmt->mgmt.cmd != QLA84_MGMT_CHNG_CONFIG) { 1238 mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->mgmt.len); 1239 mn->dseg_count = cpu_to_le16(1); 1240 put_unaligned_le64(mgmt_dma, &mn->dsd.address); 1241 mn->dsd.length = cpu_to_le32(ql84_mgmt->mgmt.len); 1242 } 1243 1244 rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0); 1245 1246 if (rval) { 1247 ql_log(ql_log_warn, vha, 0x7043, 1248 "Vendor request 84xx mgmt failed.\n"); 1249 1250 rval = (DID_ERROR << 16); 1251 1252 } else { 1253 ql_dbg(ql_dbg_user, vha, 0x7044, 1254 "Vendor request 84xx mgmt completed.\n"); 1255 1256 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1257 bsg_reply->result = DID_OK; 1258 1259 if ((ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) || 1260 (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO)) { 1261 bsg_reply->reply_payload_rcv_len = 1262 bsg_job->reply_payload.payload_len; 1263 1264 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 1265 bsg_job->reply_payload.sg_cnt, mgmt_b, 1266 data_len); 1267 } 1268 } 1269 1270 done_unmap_sg: 1271 if (mgmt_b) 1272 dma_free_coherent(&ha->pdev->dev, data_len, mgmt_b, mgmt_dma); 1273 1274 if (dma_direction == DMA_TO_DEVICE) 1275 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 1276 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 1277 else if (dma_direction == DMA_FROM_DEVICE) 1278 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 1279 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 1280 1281 exit_mgmt: 1282 dma_pool_free(ha->s_dma_pool, mn, mn_dma); 1283 1284 if (!rval) 1285 bsg_job_done(bsg_job, bsg_reply->result, 1286 bsg_reply->reply_payload_rcv_len); 1287 return rval; 1288 } 1289 1290 static int 1291 qla24xx_iidma(struct bsg_job *bsg_job) 1292 { 1293 struct fc_bsg_request *bsg_request = bsg_job->request; 1294 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1295 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1296 scsi_qla_host_t *vha = shost_priv(host); 1297 int rval = 0; 1298 struct qla_port_param *port_param = NULL; 1299 fc_port_t *fcport = NULL; 1300 int found = 0; 1301 uint16_t mb[MAILBOX_REGISTER_COUNT]; 1302 uint8_t *rsp_ptr = NULL; 1303 1304 if (!IS_IIDMA_CAPABLE(vha->hw)) { 1305 ql_log(ql_log_info, vha, 0x7046, "iiDMA not supported.\n"); 1306 return -EINVAL; 1307 } 1308 1309 port_param = (void *)bsg_request + sizeof(struct fc_bsg_request); 1310 if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) { 1311 ql_log(ql_log_warn, vha, 0x7048, 1312 "Invalid destination type.\n"); 1313 return -EINVAL; 1314 } 1315 1316 list_for_each_entry(fcport, &vha->vp_fcports, list) { 1317 if (fcport->port_type != FCT_TARGET) 1318 continue; 1319 1320 if (memcmp(port_param->fc_scsi_addr.dest_addr.wwpn, 1321 fcport->port_name, sizeof(fcport->port_name))) 1322 continue; 1323 1324 found = 1; 1325 break; 1326 } 1327 1328 if (!found) { 1329 ql_log(ql_log_warn, vha, 0x7049, 1330 "Failed to find port.\n"); 1331 return -EINVAL; 1332 } 1333 1334 if (atomic_read(&fcport->state) != FCS_ONLINE) { 1335 ql_log(ql_log_warn, vha, 0x704a, 1336 "Port is not online.\n"); 1337 return -EINVAL; 1338 } 1339 1340 if (fcport->flags & FCF_LOGIN_NEEDED) { 1341 ql_log(ql_log_warn, vha, 0x704b, 1342 "Remote port not logged in flags = 0x%x.\n", fcport->flags); 1343 return -EINVAL; 1344 } 1345 1346 if (port_param->mode) 1347 rval = qla2x00_set_idma_speed(vha, fcport->loop_id, 1348 port_param->speed, mb); 1349 else 1350 rval = qla2x00_get_idma_speed(vha, fcport->loop_id, 1351 &port_param->speed, mb); 1352 1353 if (rval) { 1354 ql_log(ql_log_warn, vha, 0x704c, 1355 "iiDMA cmd failed for %8phN -- " 1356 "%04x %x %04x %04x.\n", fcport->port_name, 1357 rval, fcport->fp_speed, mb[0], mb[1]); 1358 rval = (DID_ERROR << 16); 1359 } else { 1360 if (!port_param->mode) { 1361 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + 1362 sizeof(struct qla_port_param); 1363 1364 rsp_ptr = ((uint8_t *)bsg_reply) + 1365 sizeof(struct fc_bsg_reply); 1366 1367 memcpy(rsp_ptr, port_param, 1368 sizeof(struct qla_port_param)); 1369 } 1370 1371 bsg_reply->result = DID_OK; 1372 bsg_job_done(bsg_job, bsg_reply->result, 1373 bsg_reply->reply_payload_rcv_len); 1374 } 1375 1376 return rval; 1377 } 1378 1379 static int 1380 qla2x00_optrom_setup(struct bsg_job *bsg_job, scsi_qla_host_t *vha, 1381 uint8_t is_update) 1382 { 1383 struct fc_bsg_request *bsg_request = bsg_job->request; 1384 uint32_t start = 0; 1385 int valid = 0; 1386 struct qla_hw_data *ha = vha->hw; 1387 1388 if (unlikely(pci_channel_offline(ha->pdev))) 1389 return -EINVAL; 1390 1391 start = bsg_request->rqst_data.h_vendor.vendor_cmd[1]; 1392 if (start > ha->optrom_size) { 1393 ql_log(ql_log_warn, vha, 0x7055, 1394 "start %d > optrom_size %d.\n", start, ha->optrom_size); 1395 return -EINVAL; 1396 } 1397 1398 if (ha->optrom_state != QLA_SWAITING) { 1399 ql_log(ql_log_info, vha, 0x7056, 1400 "optrom_state %d.\n", ha->optrom_state); 1401 return -EBUSY; 1402 } 1403 1404 ha->optrom_region_start = start; 1405 ql_dbg(ql_dbg_user, vha, 0x7057, "is_update=%d.\n", is_update); 1406 if (is_update) { 1407 if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0) 1408 valid = 1; 1409 else if (start == (ha->flt_region_boot * 4) || 1410 start == (ha->flt_region_fw * 4)) 1411 valid = 1; 1412 else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || 1413 IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) || 1414 IS_QLA28XX(ha)) 1415 valid = 1; 1416 if (!valid) { 1417 ql_log(ql_log_warn, vha, 0x7058, 1418 "Invalid start region 0x%x/0x%x.\n", start, 1419 bsg_job->request_payload.payload_len); 1420 return -EINVAL; 1421 } 1422 1423 ha->optrom_region_size = start + 1424 bsg_job->request_payload.payload_len > ha->optrom_size ? 1425 ha->optrom_size - start : 1426 bsg_job->request_payload.payload_len; 1427 ha->optrom_state = QLA_SWRITING; 1428 } else { 1429 ha->optrom_region_size = start + 1430 bsg_job->reply_payload.payload_len > ha->optrom_size ? 1431 ha->optrom_size - start : 1432 bsg_job->reply_payload.payload_len; 1433 ha->optrom_state = QLA_SREADING; 1434 } 1435 1436 ha->optrom_buffer = vzalloc(ha->optrom_region_size); 1437 if (!ha->optrom_buffer) { 1438 ql_log(ql_log_warn, vha, 0x7059, 1439 "Read: Unable to allocate memory for optrom retrieval " 1440 "(%x)\n", ha->optrom_region_size); 1441 1442 ha->optrom_state = QLA_SWAITING; 1443 return -ENOMEM; 1444 } 1445 1446 return 0; 1447 } 1448 1449 static int 1450 qla2x00_read_optrom(struct bsg_job *bsg_job) 1451 { 1452 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1453 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1454 scsi_qla_host_t *vha = shost_priv(host); 1455 struct qla_hw_data *ha = vha->hw; 1456 int rval = 0; 1457 1458 if (ha->flags.nic_core_reset_hdlr_active) 1459 return -EBUSY; 1460 1461 mutex_lock(&ha->optrom_mutex); 1462 rval = qla2x00_optrom_setup(bsg_job, vha, 0); 1463 if (rval) { 1464 mutex_unlock(&ha->optrom_mutex); 1465 return rval; 1466 } 1467 1468 ha->isp_ops->read_optrom(vha, ha->optrom_buffer, 1469 ha->optrom_region_start, ha->optrom_region_size); 1470 1471 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 1472 bsg_job->reply_payload.sg_cnt, ha->optrom_buffer, 1473 ha->optrom_region_size); 1474 1475 bsg_reply->reply_payload_rcv_len = ha->optrom_region_size; 1476 bsg_reply->result = DID_OK; 1477 vfree(ha->optrom_buffer); 1478 ha->optrom_buffer = NULL; 1479 ha->optrom_state = QLA_SWAITING; 1480 mutex_unlock(&ha->optrom_mutex); 1481 bsg_job_done(bsg_job, bsg_reply->result, 1482 bsg_reply->reply_payload_rcv_len); 1483 return rval; 1484 } 1485 1486 static int 1487 qla2x00_update_optrom(struct bsg_job *bsg_job) 1488 { 1489 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1490 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1491 scsi_qla_host_t *vha = shost_priv(host); 1492 struct qla_hw_data *ha = vha->hw; 1493 int rval = 0; 1494 1495 mutex_lock(&ha->optrom_mutex); 1496 rval = qla2x00_optrom_setup(bsg_job, vha, 1); 1497 if (rval) { 1498 mutex_unlock(&ha->optrom_mutex); 1499 return rval; 1500 } 1501 1502 /* Set the isp82xx_no_md_cap not to capture minidump */ 1503 ha->flags.isp82xx_no_md_cap = 1; 1504 1505 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1506 bsg_job->request_payload.sg_cnt, ha->optrom_buffer, 1507 ha->optrom_region_size); 1508 1509 ha->isp_ops->write_optrom(vha, ha->optrom_buffer, 1510 ha->optrom_region_start, ha->optrom_region_size); 1511 1512 bsg_reply->result = DID_OK; 1513 vfree(ha->optrom_buffer); 1514 ha->optrom_buffer = NULL; 1515 ha->optrom_state = QLA_SWAITING; 1516 mutex_unlock(&ha->optrom_mutex); 1517 bsg_job_done(bsg_job, bsg_reply->result, 1518 bsg_reply->reply_payload_rcv_len); 1519 return rval; 1520 } 1521 1522 static int 1523 qla2x00_update_fru_versions(struct bsg_job *bsg_job) 1524 { 1525 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1526 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1527 scsi_qla_host_t *vha = shost_priv(host); 1528 struct qla_hw_data *ha = vha->hw; 1529 int rval = 0; 1530 uint8_t bsg[DMA_POOL_SIZE]; 1531 struct qla_image_version_list *list = (void *)bsg; 1532 struct qla_image_version *image; 1533 uint32_t count; 1534 dma_addr_t sfp_dma; 1535 void *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); 1536 1537 if (!sfp) { 1538 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 1539 EXT_STATUS_NO_MEMORY; 1540 goto done; 1541 } 1542 1543 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1544 bsg_job->request_payload.sg_cnt, list, sizeof(bsg)); 1545 1546 image = list->version; 1547 count = list->count; 1548 while (count--) { 1549 memcpy(sfp, &image->field_info, sizeof(image->field_info)); 1550 rval = qla2x00_write_sfp(vha, sfp_dma, sfp, 1551 image->field_address.device, image->field_address.offset, 1552 sizeof(image->field_info), image->field_address.option); 1553 if (rval) { 1554 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 1555 EXT_STATUS_MAILBOX; 1556 goto dealloc; 1557 } 1558 image++; 1559 } 1560 1561 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0; 1562 1563 dealloc: 1564 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); 1565 1566 done: 1567 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1568 bsg_reply->result = DID_OK << 16; 1569 bsg_job_done(bsg_job, bsg_reply->result, 1570 bsg_reply->reply_payload_rcv_len); 1571 1572 return 0; 1573 } 1574 1575 static int 1576 qla2x00_read_fru_status(struct bsg_job *bsg_job) 1577 { 1578 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1579 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1580 scsi_qla_host_t *vha = shost_priv(host); 1581 struct qla_hw_data *ha = vha->hw; 1582 int rval = 0; 1583 uint8_t bsg[DMA_POOL_SIZE]; 1584 struct qla_status_reg *sr = (void *)bsg; 1585 dma_addr_t sfp_dma; 1586 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); 1587 1588 if (!sfp) { 1589 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 1590 EXT_STATUS_NO_MEMORY; 1591 goto done; 1592 } 1593 1594 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1595 bsg_job->request_payload.sg_cnt, sr, sizeof(*sr)); 1596 1597 rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 1598 sr->field_address.device, sr->field_address.offset, 1599 sizeof(sr->status_reg), sr->field_address.option); 1600 sr->status_reg = *sfp; 1601 1602 if (rval) { 1603 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 1604 EXT_STATUS_MAILBOX; 1605 goto dealloc; 1606 } 1607 1608 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 1609 bsg_job->reply_payload.sg_cnt, sr, sizeof(*sr)); 1610 1611 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0; 1612 1613 dealloc: 1614 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); 1615 1616 done: 1617 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1618 bsg_reply->reply_payload_rcv_len = sizeof(*sr); 1619 bsg_reply->result = DID_OK << 16; 1620 bsg_job_done(bsg_job, bsg_reply->result, 1621 bsg_reply->reply_payload_rcv_len); 1622 1623 return 0; 1624 } 1625 1626 static int 1627 qla2x00_write_fru_status(struct bsg_job *bsg_job) 1628 { 1629 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1630 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1631 scsi_qla_host_t *vha = shost_priv(host); 1632 struct qla_hw_data *ha = vha->hw; 1633 int rval = 0; 1634 uint8_t bsg[DMA_POOL_SIZE]; 1635 struct qla_status_reg *sr = (void *)bsg; 1636 dma_addr_t sfp_dma; 1637 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); 1638 1639 if (!sfp) { 1640 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 1641 EXT_STATUS_NO_MEMORY; 1642 goto done; 1643 } 1644 1645 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1646 bsg_job->request_payload.sg_cnt, sr, sizeof(*sr)); 1647 1648 *sfp = sr->status_reg; 1649 rval = qla2x00_write_sfp(vha, sfp_dma, sfp, 1650 sr->field_address.device, sr->field_address.offset, 1651 sizeof(sr->status_reg), sr->field_address.option); 1652 1653 if (rval) { 1654 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 1655 EXT_STATUS_MAILBOX; 1656 goto dealloc; 1657 } 1658 1659 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0; 1660 1661 dealloc: 1662 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); 1663 1664 done: 1665 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1666 bsg_reply->result = DID_OK << 16; 1667 bsg_job_done(bsg_job, bsg_reply->result, 1668 bsg_reply->reply_payload_rcv_len); 1669 1670 return 0; 1671 } 1672 1673 static int 1674 qla2x00_write_i2c(struct bsg_job *bsg_job) 1675 { 1676 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1677 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1678 scsi_qla_host_t *vha = shost_priv(host); 1679 struct qla_hw_data *ha = vha->hw; 1680 int rval = 0; 1681 uint8_t bsg[DMA_POOL_SIZE]; 1682 struct qla_i2c_access *i2c = (void *)bsg; 1683 dma_addr_t sfp_dma; 1684 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); 1685 1686 if (!sfp) { 1687 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 1688 EXT_STATUS_NO_MEMORY; 1689 goto done; 1690 } 1691 1692 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1693 bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c)); 1694 1695 memcpy(sfp, i2c->buffer, i2c->length); 1696 rval = qla2x00_write_sfp(vha, sfp_dma, sfp, 1697 i2c->device, i2c->offset, i2c->length, i2c->option); 1698 1699 if (rval) { 1700 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 1701 EXT_STATUS_MAILBOX; 1702 goto dealloc; 1703 } 1704 1705 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0; 1706 1707 dealloc: 1708 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); 1709 1710 done: 1711 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1712 bsg_reply->result = DID_OK << 16; 1713 bsg_job_done(bsg_job, bsg_reply->result, 1714 bsg_reply->reply_payload_rcv_len); 1715 1716 return 0; 1717 } 1718 1719 static int 1720 qla2x00_read_i2c(struct bsg_job *bsg_job) 1721 { 1722 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1723 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1724 scsi_qla_host_t *vha = shost_priv(host); 1725 struct qla_hw_data *ha = vha->hw; 1726 int rval = 0; 1727 uint8_t bsg[DMA_POOL_SIZE]; 1728 struct qla_i2c_access *i2c = (void *)bsg; 1729 dma_addr_t sfp_dma; 1730 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); 1731 1732 if (!sfp) { 1733 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 1734 EXT_STATUS_NO_MEMORY; 1735 goto done; 1736 } 1737 1738 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1739 bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c)); 1740 1741 rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 1742 i2c->device, i2c->offset, i2c->length, i2c->option); 1743 1744 if (rval) { 1745 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 1746 EXT_STATUS_MAILBOX; 1747 goto dealloc; 1748 } 1749 1750 memcpy(i2c->buffer, sfp, i2c->length); 1751 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 1752 bsg_job->reply_payload.sg_cnt, i2c, sizeof(*i2c)); 1753 1754 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0; 1755 1756 dealloc: 1757 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); 1758 1759 done: 1760 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1761 bsg_reply->reply_payload_rcv_len = sizeof(*i2c); 1762 bsg_reply->result = DID_OK << 16; 1763 bsg_job_done(bsg_job, bsg_reply->result, 1764 bsg_reply->reply_payload_rcv_len); 1765 1766 return 0; 1767 } 1768 1769 static int 1770 qla24xx_process_bidir_cmd(struct bsg_job *bsg_job) 1771 { 1772 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 1773 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1774 scsi_qla_host_t *vha = shost_priv(host); 1775 struct qla_hw_data *ha = vha->hw; 1776 uint32_t rval = EXT_STATUS_OK; 1777 uint16_t req_sg_cnt = 0; 1778 uint16_t rsp_sg_cnt = 0; 1779 uint16_t nextlid = 0; 1780 uint32_t tot_dsds; 1781 srb_t *sp = NULL; 1782 uint32_t req_data_len; 1783 uint32_t rsp_data_len; 1784 1785 /* Check the type of the adapter */ 1786 if (!IS_BIDI_CAPABLE(ha)) { 1787 ql_log(ql_log_warn, vha, 0x70a0, 1788 "This adapter is not supported\n"); 1789 rval = EXT_STATUS_NOT_SUPPORTED; 1790 goto done; 1791 } 1792 1793 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) || 1794 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) || 1795 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { 1796 rval = EXT_STATUS_BUSY; 1797 goto done; 1798 } 1799 1800 /* Check if host is online */ 1801 if (!vha->flags.online) { 1802 ql_log(ql_log_warn, vha, 0x70a1, 1803 "Host is not online\n"); 1804 rval = EXT_STATUS_DEVICE_OFFLINE; 1805 goto done; 1806 } 1807 1808 /* Check if cable is plugged in or not */ 1809 if (vha->device_flags & DFLG_NO_CABLE) { 1810 ql_log(ql_log_warn, vha, 0x70a2, 1811 "Cable is unplugged...\n"); 1812 rval = EXT_STATUS_INVALID_CFG; 1813 goto done; 1814 } 1815 1816 /* Check if the switch is connected or not */ 1817 if (ha->current_topology != ISP_CFG_F) { 1818 ql_log(ql_log_warn, vha, 0x70a3, 1819 "Host is not connected to the switch\n"); 1820 rval = EXT_STATUS_INVALID_CFG; 1821 goto done; 1822 } 1823 1824 /* Check if operating mode is P2P */ 1825 if (ha->operating_mode != P2P) { 1826 ql_log(ql_log_warn, vha, 0x70a4, 1827 "Host operating mode is not P2p\n"); 1828 rval = EXT_STATUS_INVALID_CFG; 1829 goto done; 1830 } 1831 1832 mutex_lock(&ha->selflogin_lock); 1833 if (vha->self_login_loop_id == 0) { 1834 /* Initialize all required fields of fcport */ 1835 vha->bidir_fcport.vha = vha; 1836 vha->bidir_fcport.d_id.b.al_pa = vha->d_id.b.al_pa; 1837 vha->bidir_fcport.d_id.b.area = vha->d_id.b.area; 1838 vha->bidir_fcport.d_id.b.domain = vha->d_id.b.domain; 1839 vha->bidir_fcport.loop_id = vha->loop_id; 1840 1841 if (qla2x00_fabric_login(vha, &(vha->bidir_fcport), &nextlid)) { 1842 ql_log(ql_log_warn, vha, 0x70a7, 1843 "Failed to login port %06X for bidirectional IOCB\n", 1844 vha->bidir_fcport.d_id.b24); 1845 mutex_unlock(&ha->selflogin_lock); 1846 rval = EXT_STATUS_MAILBOX; 1847 goto done; 1848 } 1849 vha->self_login_loop_id = nextlid - 1; 1850 1851 } 1852 /* Assign the self login loop id to fcport */ 1853 mutex_unlock(&ha->selflogin_lock); 1854 1855 vha->bidir_fcport.loop_id = vha->self_login_loop_id; 1856 1857 req_sg_cnt = dma_map_sg(&ha->pdev->dev, 1858 bsg_job->request_payload.sg_list, 1859 bsg_job->request_payload.sg_cnt, 1860 DMA_TO_DEVICE); 1861 1862 if (!req_sg_cnt) { 1863 rval = EXT_STATUS_NO_MEMORY; 1864 goto done; 1865 } 1866 1867 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, 1868 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, 1869 DMA_FROM_DEVICE); 1870 1871 if (!rsp_sg_cnt) { 1872 rval = EXT_STATUS_NO_MEMORY; 1873 goto done_unmap_req_sg; 1874 } 1875 1876 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) || 1877 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) { 1878 ql_dbg(ql_dbg_user, vha, 0x70a9, 1879 "Dma mapping resulted in different sg counts " 1880 "[request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt: " 1881 "%x dma_reply_sg_cnt: %x]\n", 1882 bsg_job->request_payload.sg_cnt, req_sg_cnt, 1883 bsg_job->reply_payload.sg_cnt, rsp_sg_cnt); 1884 rval = EXT_STATUS_NO_MEMORY; 1885 goto done_unmap_sg; 1886 } 1887 1888 req_data_len = bsg_job->request_payload.payload_len; 1889 rsp_data_len = bsg_job->reply_payload.payload_len; 1890 1891 if (req_data_len != rsp_data_len) { 1892 rval = EXT_STATUS_BUSY; 1893 ql_log(ql_log_warn, vha, 0x70aa, 1894 "req_data_len != rsp_data_len\n"); 1895 goto done_unmap_sg; 1896 } 1897 1898 /* Alloc SRB structure */ 1899 sp = qla2x00_get_sp(vha, &(vha->bidir_fcport), GFP_KERNEL); 1900 if (!sp) { 1901 ql_dbg(ql_dbg_user, vha, 0x70ac, 1902 "Alloc SRB structure failed\n"); 1903 rval = EXT_STATUS_NO_MEMORY; 1904 goto done_unmap_sg; 1905 } 1906 1907 /*Populate srb->ctx with bidir ctx*/ 1908 sp->u.bsg_job = bsg_job; 1909 sp->free = qla2x00_bsg_sp_free; 1910 sp->type = SRB_BIDI_CMD; 1911 sp->done = qla2x00_bsg_job_done; 1912 1913 /* Add the read and write sg count */ 1914 tot_dsds = rsp_sg_cnt + req_sg_cnt; 1915 1916 rval = qla2x00_start_bidir(sp, vha, tot_dsds); 1917 if (rval != EXT_STATUS_OK) 1918 goto done_free_srb; 1919 /* the bsg request will be completed in the interrupt handler */ 1920 return rval; 1921 1922 done_free_srb: 1923 mempool_free(sp, ha->srb_mempool); 1924 done_unmap_sg: 1925 dma_unmap_sg(&ha->pdev->dev, 1926 bsg_job->reply_payload.sg_list, 1927 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 1928 done_unmap_req_sg: 1929 dma_unmap_sg(&ha->pdev->dev, 1930 bsg_job->request_payload.sg_list, 1931 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 1932 done: 1933 1934 /* Return an error vendor specific response 1935 * and complete the bsg request 1936 */ 1937 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval; 1938 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1939 bsg_reply->reply_payload_rcv_len = 0; 1940 bsg_reply->result = (DID_OK) << 16; 1941 bsg_job_done(bsg_job, bsg_reply->result, 1942 bsg_reply->reply_payload_rcv_len); 1943 /* Always return success, vendor rsp carries correct status */ 1944 return 0; 1945 } 1946 1947 static int 1948 qlafx00_mgmt_cmd(struct bsg_job *bsg_job) 1949 { 1950 struct fc_bsg_request *bsg_request = bsg_job->request; 1951 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 1952 scsi_qla_host_t *vha = shost_priv(host); 1953 struct qla_hw_data *ha = vha->hw; 1954 int rval = (DID_ERROR << 16); 1955 struct qla_mt_iocb_rqst_fx00 *piocb_rqst; 1956 srb_t *sp; 1957 int req_sg_cnt = 0, rsp_sg_cnt = 0; 1958 struct fc_port *fcport; 1959 char *type = "FC_BSG_HST_FX_MGMT"; 1960 1961 /* Copy the IOCB specific information */ 1962 piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *) 1963 &bsg_request->rqst_data.h_vendor.vendor_cmd[1]; 1964 1965 /* Dump the vendor information */ 1966 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose , vha, 0x70cf, 1967 piocb_rqst, sizeof(*piocb_rqst)); 1968 1969 if (!vha->flags.online) { 1970 ql_log(ql_log_warn, vha, 0x70d0, 1971 "Host is not online.\n"); 1972 rval = -EIO; 1973 goto done; 1974 } 1975 1976 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) { 1977 req_sg_cnt = dma_map_sg(&ha->pdev->dev, 1978 bsg_job->request_payload.sg_list, 1979 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 1980 if (!req_sg_cnt) { 1981 ql_log(ql_log_warn, vha, 0x70c7, 1982 "dma_map_sg return %d for request\n", req_sg_cnt); 1983 rval = -ENOMEM; 1984 goto done; 1985 } 1986 } 1987 1988 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) { 1989 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, 1990 bsg_job->reply_payload.sg_list, 1991 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 1992 if (!rsp_sg_cnt) { 1993 ql_log(ql_log_warn, vha, 0x70c8, 1994 "dma_map_sg return %d for reply\n", rsp_sg_cnt); 1995 rval = -ENOMEM; 1996 goto done_unmap_req_sg; 1997 } 1998 } 1999 2000 ql_dbg(ql_dbg_user, vha, 0x70c9, 2001 "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x " 2002 "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt, 2003 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt); 2004 2005 /* Allocate a dummy fcport structure, since functions preparing the 2006 * IOCB and mailbox command retrieves port specific information 2007 * from fcport structure. For Host based ELS commands there will be 2008 * no fcport structure allocated 2009 */ 2010 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 2011 if (!fcport) { 2012 ql_log(ql_log_warn, vha, 0x70ca, 2013 "Failed to allocate fcport.\n"); 2014 rval = -ENOMEM; 2015 goto done_unmap_rsp_sg; 2016 } 2017 2018 /* Alloc SRB structure */ 2019 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 2020 if (!sp) { 2021 ql_log(ql_log_warn, vha, 0x70cb, 2022 "qla2x00_get_sp failed.\n"); 2023 rval = -ENOMEM; 2024 goto done_free_fcport; 2025 } 2026 2027 /* Initialize all required fields of fcport */ 2028 fcport->vha = vha; 2029 fcport->loop_id = piocb_rqst->dataword; 2030 2031 sp->type = SRB_FXIOCB_BCMD; 2032 sp->name = "bsg_fx_mgmt"; 2033 sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt); 2034 sp->u.bsg_job = bsg_job; 2035 sp->free = qla2x00_bsg_sp_free; 2036 sp->done = qla2x00_bsg_job_done; 2037 2038 ql_dbg(ql_dbg_user, vha, 0x70cc, 2039 "bsg rqst type: %s fx_mgmt_type: %x id=%x\n", 2040 type, piocb_rqst->func_type, fcport->loop_id); 2041 2042 rval = qla2x00_start_sp(sp); 2043 if (rval != QLA_SUCCESS) { 2044 ql_log(ql_log_warn, vha, 0x70cd, 2045 "qla2x00_start_sp failed=%d.\n", rval); 2046 mempool_free(sp, ha->srb_mempool); 2047 rval = -EIO; 2048 goto done_free_fcport; 2049 } 2050 return rval; 2051 2052 done_free_fcport: 2053 qla2x00_free_fcport(fcport); 2054 2055 done_unmap_rsp_sg: 2056 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) 2057 dma_unmap_sg(&ha->pdev->dev, 2058 bsg_job->reply_payload.sg_list, 2059 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 2060 done_unmap_req_sg: 2061 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) 2062 dma_unmap_sg(&ha->pdev->dev, 2063 bsg_job->request_payload.sg_list, 2064 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 2065 2066 done: 2067 return rval; 2068 } 2069 2070 static int 2071 qla26xx_serdes_op(struct bsg_job *bsg_job) 2072 { 2073 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2074 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 2075 scsi_qla_host_t *vha = shost_priv(host); 2076 int rval = 0; 2077 struct qla_serdes_reg sr; 2078 2079 memset(&sr, 0, sizeof(sr)); 2080 2081 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 2082 bsg_job->request_payload.sg_cnt, &sr, sizeof(sr)); 2083 2084 switch (sr.cmd) { 2085 case INT_SC_SERDES_WRITE_REG: 2086 rval = qla2x00_write_serdes_word(vha, sr.addr, sr.val); 2087 bsg_reply->reply_payload_rcv_len = 0; 2088 break; 2089 case INT_SC_SERDES_READ_REG: 2090 rval = qla2x00_read_serdes_word(vha, sr.addr, &sr.val); 2091 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 2092 bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr)); 2093 bsg_reply->reply_payload_rcv_len = sizeof(sr); 2094 break; 2095 default: 2096 ql_dbg(ql_dbg_user, vha, 0x708c, 2097 "Unknown serdes cmd %x.\n", sr.cmd); 2098 rval = -EINVAL; 2099 break; 2100 } 2101 2102 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 2103 rval ? EXT_STATUS_MAILBOX : 0; 2104 2105 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 2106 bsg_reply->result = DID_OK << 16; 2107 bsg_job_done(bsg_job, bsg_reply->result, 2108 bsg_reply->reply_payload_rcv_len); 2109 return 0; 2110 } 2111 2112 static int 2113 qla8044_serdes_op(struct bsg_job *bsg_job) 2114 { 2115 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2116 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 2117 scsi_qla_host_t *vha = shost_priv(host); 2118 int rval = 0; 2119 struct qla_serdes_reg_ex sr; 2120 2121 memset(&sr, 0, sizeof(sr)); 2122 2123 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 2124 bsg_job->request_payload.sg_cnt, &sr, sizeof(sr)); 2125 2126 switch (sr.cmd) { 2127 case INT_SC_SERDES_WRITE_REG: 2128 rval = qla8044_write_serdes_word(vha, sr.addr, sr.val); 2129 bsg_reply->reply_payload_rcv_len = 0; 2130 break; 2131 case INT_SC_SERDES_READ_REG: 2132 rval = qla8044_read_serdes_word(vha, sr.addr, &sr.val); 2133 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 2134 bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr)); 2135 bsg_reply->reply_payload_rcv_len = sizeof(sr); 2136 break; 2137 default: 2138 ql_dbg(ql_dbg_user, vha, 0x7020, 2139 "Unknown serdes cmd %x.\n", sr.cmd); 2140 rval = -EINVAL; 2141 break; 2142 } 2143 2144 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 2145 rval ? EXT_STATUS_MAILBOX : 0; 2146 2147 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 2148 bsg_reply->result = DID_OK << 16; 2149 bsg_job_done(bsg_job, bsg_reply->result, 2150 bsg_reply->reply_payload_rcv_len); 2151 return 0; 2152 } 2153 2154 static int 2155 qla27xx_get_flash_upd_cap(struct bsg_job *bsg_job) 2156 { 2157 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2158 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 2159 scsi_qla_host_t *vha = shost_priv(host); 2160 struct qla_hw_data *ha = vha->hw; 2161 struct qla_flash_update_caps cap; 2162 2163 if (!(IS_QLA27XX(ha)) && !IS_QLA28XX(ha)) 2164 return -EPERM; 2165 2166 memset(&cap, 0, sizeof(cap)); 2167 cap.capabilities = (uint64_t)ha->fw_attributes_ext[1] << 48 | 2168 (uint64_t)ha->fw_attributes_ext[0] << 32 | 2169 (uint64_t)ha->fw_attributes_h << 16 | 2170 (uint64_t)ha->fw_attributes; 2171 2172 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 2173 bsg_job->reply_payload.sg_cnt, &cap, sizeof(cap)); 2174 bsg_reply->reply_payload_rcv_len = sizeof(cap); 2175 2176 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 2177 EXT_STATUS_OK; 2178 2179 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 2180 bsg_reply->result = DID_OK << 16; 2181 bsg_job_done(bsg_job, bsg_reply->result, 2182 bsg_reply->reply_payload_rcv_len); 2183 return 0; 2184 } 2185 2186 static int 2187 qla27xx_set_flash_upd_cap(struct bsg_job *bsg_job) 2188 { 2189 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2190 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 2191 scsi_qla_host_t *vha = shost_priv(host); 2192 struct qla_hw_data *ha = vha->hw; 2193 uint64_t online_fw_attr = 0; 2194 struct qla_flash_update_caps cap; 2195 2196 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 2197 return -EPERM; 2198 2199 memset(&cap, 0, sizeof(cap)); 2200 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 2201 bsg_job->request_payload.sg_cnt, &cap, sizeof(cap)); 2202 2203 online_fw_attr = (uint64_t)ha->fw_attributes_ext[1] << 48 | 2204 (uint64_t)ha->fw_attributes_ext[0] << 32 | 2205 (uint64_t)ha->fw_attributes_h << 16 | 2206 (uint64_t)ha->fw_attributes; 2207 2208 if (online_fw_attr != cap.capabilities) { 2209 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 2210 EXT_STATUS_INVALID_PARAM; 2211 return -EINVAL; 2212 } 2213 2214 if (cap.outage_duration < MAX_LOOP_TIMEOUT) { 2215 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 2216 EXT_STATUS_INVALID_PARAM; 2217 return -EINVAL; 2218 } 2219 2220 bsg_reply->reply_payload_rcv_len = 0; 2221 2222 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 2223 EXT_STATUS_OK; 2224 2225 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 2226 bsg_reply->result = DID_OK << 16; 2227 bsg_job_done(bsg_job, bsg_reply->result, 2228 bsg_reply->reply_payload_rcv_len); 2229 return 0; 2230 } 2231 2232 static int 2233 qla27xx_get_bbcr_data(struct bsg_job *bsg_job) 2234 { 2235 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2236 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 2237 scsi_qla_host_t *vha = shost_priv(host); 2238 struct qla_hw_data *ha = vha->hw; 2239 struct qla_bbcr_data bbcr; 2240 uint16_t loop_id, topo, sw_cap; 2241 uint8_t domain, area, al_pa, state; 2242 int rval; 2243 2244 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 2245 return -EPERM; 2246 2247 memset(&bbcr, 0, sizeof(bbcr)); 2248 2249 if (vha->flags.bbcr_enable) 2250 bbcr.status = QLA_BBCR_STATUS_ENABLED; 2251 else 2252 bbcr.status = QLA_BBCR_STATUS_DISABLED; 2253 2254 if (bbcr.status == QLA_BBCR_STATUS_ENABLED) { 2255 rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa, 2256 &area, &domain, &topo, &sw_cap); 2257 if (rval != QLA_SUCCESS) { 2258 bbcr.status = QLA_BBCR_STATUS_UNKNOWN; 2259 bbcr.state = QLA_BBCR_STATE_OFFLINE; 2260 bbcr.mbx1 = loop_id; 2261 goto done; 2262 } 2263 2264 state = (vha->bbcr >> 12) & 0x1; 2265 2266 if (state) { 2267 bbcr.state = QLA_BBCR_STATE_OFFLINE; 2268 bbcr.offline_reason_code = QLA_BBCR_REASON_LOGIN_REJECT; 2269 } else { 2270 bbcr.state = QLA_BBCR_STATE_ONLINE; 2271 bbcr.negotiated_bbscn = (vha->bbcr >> 8) & 0xf; 2272 } 2273 2274 bbcr.configured_bbscn = vha->bbcr & 0xf; 2275 } 2276 2277 done: 2278 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 2279 bsg_job->reply_payload.sg_cnt, &bbcr, sizeof(bbcr)); 2280 bsg_reply->reply_payload_rcv_len = sizeof(bbcr); 2281 2282 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK; 2283 2284 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 2285 bsg_reply->result = DID_OK << 16; 2286 bsg_job_done(bsg_job, bsg_reply->result, 2287 bsg_reply->reply_payload_rcv_len); 2288 return 0; 2289 } 2290 2291 static int 2292 qla2x00_get_priv_stats(struct bsg_job *bsg_job) 2293 { 2294 struct fc_bsg_request *bsg_request = bsg_job->request; 2295 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2296 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 2297 scsi_qla_host_t *vha = shost_priv(host); 2298 struct qla_hw_data *ha = vha->hw; 2299 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 2300 struct link_statistics *stats = NULL; 2301 dma_addr_t stats_dma; 2302 int rval; 2303 uint32_t *cmd = bsg_request->rqst_data.h_vendor.vendor_cmd; 2304 uint options = cmd[0] == QL_VND_GET_PRIV_STATS_EX ? cmd[1] : 0; 2305 2306 if (test_bit(UNLOADING, &vha->dpc_flags)) 2307 return -ENODEV; 2308 2309 if (unlikely(pci_channel_offline(ha->pdev))) 2310 return -ENODEV; 2311 2312 if (qla2x00_reset_active(vha)) 2313 return -EBUSY; 2314 2315 if (!IS_FWI2_CAPABLE(ha)) 2316 return -EPERM; 2317 2318 stats = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stats), &stats_dma, 2319 GFP_KERNEL); 2320 if (!stats) { 2321 ql_log(ql_log_warn, vha, 0x70e2, 2322 "Failed to allocate memory for stats.\n"); 2323 return -ENOMEM; 2324 } 2325 2326 rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, options); 2327 2328 if (rval == QLA_SUCCESS) { 2329 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, vha, 0x70e5, 2330 stats, sizeof(*stats)); 2331 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 2332 bsg_job->reply_payload.sg_cnt, stats, sizeof(*stats)); 2333 } 2334 2335 bsg_reply->reply_payload_rcv_len = sizeof(*stats); 2336 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 2337 rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK; 2338 2339 bsg_job->reply_len = sizeof(*bsg_reply); 2340 bsg_reply->result = DID_OK << 16; 2341 bsg_job_done(bsg_job, bsg_reply->result, 2342 bsg_reply->reply_payload_rcv_len); 2343 2344 dma_free_coherent(&ha->pdev->dev, sizeof(*stats), 2345 stats, stats_dma); 2346 2347 return 0; 2348 } 2349 2350 static int 2351 qla2x00_do_dport_diagnostics(struct bsg_job *bsg_job) 2352 { 2353 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2354 struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); 2355 scsi_qla_host_t *vha = shost_priv(host); 2356 int rval; 2357 struct qla_dport_diag *dd; 2358 2359 if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) && 2360 !IS_QLA28XX(vha->hw)) 2361 return -EPERM; 2362 2363 dd = kmalloc(sizeof(*dd), GFP_KERNEL); 2364 if (!dd) { 2365 ql_log(ql_log_warn, vha, 0x70db, 2366 "Failed to allocate memory for dport.\n"); 2367 return -ENOMEM; 2368 } 2369 2370 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 2371 bsg_job->request_payload.sg_cnt, dd, sizeof(*dd)); 2372 2373 rval = qla26xx_dport_diagnostics( 2374 vha, dd->buf, sizeof(dd->buf), dd->options); 2375 if (rval == QLA_SUCCESS) { 2376 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 2377 bsg_job->reply_payload.sg_cnt, dd, sizeof(*dd)); 2378 } 2379 2380 bsg_reply->reply_payload_rcv_len = sizeof(*dd); 2381 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 2382 rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK; 2383 2384 bsg_job->reply_len = sizeof(*bsg_reply); 2385 bsg_reply->result = DID_OK << 16; 2386 bsg_job_done(bsg_job, bsg_reply->result, 2387 bsg_reply->reply_payload_rcv_len); 2388 2389 kfree(dd); 2390 2391 return 0; 2392 } 2393 2394 static int 2395 qla2x00_get_flash_image_status(struct bsg_job *bsg_job) 2396 { 2397 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job)); 2398 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2399 struct qla_hw_data *ha = vha->hw; 2400 struct qla_active_regions regions = { }; 2401 struct active_regions active_regions = { }; 2402 2403 qla27xx_get_active_image(vha, &active_regions); 2404 regions.global_image = active_regions.global; 2405 2406 if (IS_QLA28XX(ha)) { 2407 qla27xx_get_active_image(vha, &active_regions); 2408 regions.board_config = active_regions.aux.board_config; 2409 regions.vpd_nvram = active_regions.aux.vpd_nvram; 2410 regions.npiv_config_0_1 = active_regions.aux.npiv_config_0_1; 2411 regions.npiv_config_2_3 = active_regions.aux.npiv_config_2_3; 2412 } 2413 2414 ql_dbg(ql_dbg_user, vha, 0x70e1, 2415 "%s(%lu): FW=%u BCFG=%u VPDNVR=%u NPIV01=%u NPIV02=%u\n", 2416 __func__, vha->host_no, regions.global_image, 2417 regions.board_config, regions.vpd_nvram, 2418 regions.npiv_config_0_1, regions.npiv_config_2_3); 2419 2420 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 2421 bsg_job->reply_payload.sg_cnt, ®ions, sizeof(regions)); 2422 2423 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK; 2424 bsg_reply->reply_payload_rcv_len = sizeof(regions); 2425 bsg_reply->result = DID_OK << 16; 2426 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 2427 bsg_job_done(bsg_job, bsg_reply->result, 2428 bsg_reply->reply_payload_rcv_len); 2429 2430 return 0; 2431 } 2432 2433 static int 2434 qla2x00_process_vendor_specific(struct bsg_job *bsg_job) 2435 { 2436 struct fc_bsg_request *bsg_request = bsg_job->request; 2437 2438 switch (bsg_request->rqst_data.h_vendor.vendor_cmd[0]) { 2439 case QL_VND_LOOPBACK: 2440 return qla2x00_process_loopback(bsg_job); 2441 2442 case QL_VND_A84_RESET: 2443 return qla84xx_reset(bsg_job); 2444 2445 case QL_VND_A84_UPDATE_FW: 2446 return qla84xx_updatefw(bsg_job); 2447 2448 case QL_VND_A84_MGMT_CMD: 2449 return qla84xx_mgmt_cmd(bsg_job); 2450 2451 case QL_VND_IIDMA: 2452 return qla24xx_iidma(bsg_job); 2453 2454 case QL_VND_FCP_PRIO_CFG_CMD: 2455 return qla24xx_proc_fcp_prio_cfg_cmd(bsg_job); 2456 2457 case QL_VND_READ_FLASH: 2458 return qla2x00_read_optrom(bsg_job); 2459 2460 case QL_VND_UPDATE_FLASH: 2461 return qla2x00_update_optrom(bsg_job); 2462 2463 case QL_VND_SET_FRU_VERSION: 2464 return qla2x00_update_fru_versions(bsg_job); 2465 2466 case QL_VND_READ_FRU_STATUS: 2467 return qla2x00_read_fru_status(bsg_job); 2468 2469 case QL_VND_WRITE_FRU_STATUS: 2470 return qla2x00_write_fru_status(bsg_job); 2471 2472 case QL_VND_WRITE_I2C: 2473 return qla2x00_write_i2c(bsg_job); 2474 2475 case QL_VND_READ_I2C: 2476 return qla2x00_read_i2c(bsg_job); 2477 2478 case QL_VND_DIAG_IO_CMD: 2479 return qla24xx_process_bidir_cmd(bsg_job); 2480 2481 case QL_VND_FX00_MGMT_CMD: 2482 return qlafx00_mgmt_cmd(bsg_job); 2483 2484 case QL_VND_SERDES_OP: 2485 return qla26xx_serdes_op(bsg_job); 2486 2487 case QL_VND_SERDES_OP_EX: 2488 return qla8044_serdes_op(bsg_job); 2489 2490 case QL_VND_GET_FLASH_UPDATE_CAPS: 2491 return qla27xx_get_flash_upd_cap(bsg_job); 2492 2493 case QL_VND_SET_FLASH_UPDATE_CAPS: 2494 return qla27xx_set_flash_upd_cap(bsg_job); 2495 2496 case QL_VND_GET_BBCR_DATA: 2497 return qla27xx_get_bbcr_data(bsg_job); 2498 2499 case QL_VND_GET_PRIV_STATS: 2500 case QL_VND_GET_PRIV_STATS_EX: 2501 return qla2x00_get_priv_stats(bsg_job); 2502 2503 case QL_VND_DPORT_DIAGNOSTICS: 2504 return qla2x00_do_dport_diagnostics(bsg_job); 2505 2506 case QL_VND_SS_GET_FLASH_IMAGE_STATUS: 2507 return qla2x00_get_flash_image_status(bsg_job); 2508 2509 default: 2510 return -ENOSYS; 2511 } 2512 } 2513 2514 int 2515 qla24xx_bsg_request(struct bsg_job *bsg_job) 2516 { 2517 struct fc_bsg_request *bsg_request = bsg_job->request; 2518 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2519 int ret = -EINVAL; 2520 struct fc_rport *rport; 2521 struct Scsi_Host *host; 2522 scsi_qla_host_t *vha; 2523 2524 /* In case no data transferred. */ 2525 bsg_reply->reply_payload_rcv_len = 0; 2526 2527 if (bsg_request->msgcode == FC_BSG_RPT_ELS) { 2528 rport = fc_bsg_to_rport(bsg_job); 2529 host = rport_to_shost(rport); 2530 vha = shost_priv(host); 2531 } else { 2532 host = fc_bsg_to_shost(bsg_job); 2533 vha = shost_priv(host); 2534 } 2535 2536 if (qla2x00_chip_is_down(vha)) { 2537 ql_dbg(ql_dbg_user, vha, 0x709f, 2538 "BSG: ISP abort active/needed -- cmd=%d.\n", 2539 bsg_request->msgcode); 2540 return -EBUSY; 2541 } 2542 2543 ql_dbg(ql_dbg_user, vha, 0x7000, 2544 "Entered %s msgcode=0x%x.\n", __func__, bsg_request->msgcode); 2545 2546 switch (bsg_request->msgcode) { 2547 case FC_BSG_RPT_ELS: 2548 case FC_BSG_HST_ELS_NOLOGIN: 2549 ret = qla2x00_process_els(bsg_job); 2550 break; 2551 case FC_BSG_HST_CT: 2552 ret = qla2x00_process_ct(bsg_job); 2553 break; 2554 case FC_BSG_HST_VENDOR: 2555 ret = qla2x00_process_vendor_specific(bsg_job); 2556 break; 2557 case FC_BSG_HST_ADD_RPORT: 2558 case FC_BSG_HST_DEL_RPORT: 2559 case FC_BSG_RPT_CT: 2560 default: 2561 ql_log(ql_log_warn, vha, 0x705a, "Unsupported BSG request.\n"); 2562 break; 2563 } 2564 return ret; 2565 } 2566 2567 int 2568 qla24xx_bsg_timeout(struct bsg_job *bsg_job) 2569 { 2570 struct fc_bsg_reply *bsg_reply = bsg_job->reply; 2571 scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job)); 2572 struct qla_hw_data *ha = vha->hw; 2573 srb_t *sp; 2574 int cnt, que; 2575 unsigned long flags; 2576 struct req_que *req; 2577 2578 /* find the bsg job from the active list of commands */ 2579 spin_lock_irqsave(&ha->hardware_lock, flags); 2580 for (que = 0; que < ha->max_req_queues; que++) { 2581 req = ha->req_q_map[que]; 2582 if (!req) 2583 continue; 2584 2585 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { 2586 sp = req->outstanding_cmds[cnt]; 2587 if (sp) { 2588 if (((sp->type == SRB_CT_CMD) || 2589 (sp->type == SRB_ELS_CMD_HST) || 2590 (sp->type == SRB_FXIOCB_BCMD)) 2591 && (sp->u.bsg_job == bsg_job)) { 2592 req->outstanding_cmds[cnt] = NULL; 2593 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2594 if (ha->isp_ops->abort_command(sp)) { 2595 ql_log(ql_log_warn, vha, 0x7089, 2596 "mbx abort_command " 2597 "failed.\n"); 2598 bsg_reply->result = -EIO; 2599 } else { 2600 ql_dbg(ql_dbg_user, vha, 0x708a, 2601 "mbx abort_command " 2602 "success.\n"); 2603 bsg_reply->result = 0; 2604 } 2605 spin_lock_irqsave(&ha->hardware_lock, flags); 2606 goto done; 2607 } 2608 } 2609 } 2610 } 2611 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2612 ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n"); 2613 bsg_reply->result = -ENXIO; 2614 return 0; 2615 2616 done: 2617 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2618 sp->free(sp); 2619 return 0; 2620 } 2621