1 /* 2 * QLogic Fibre Channel HBA Driver 3 * Copyright (c) 2003-2012 QLogic Corporation 4 * 5 * See LICENSE.qla2xxx for copyright and licensing details. 6 */ 7 #include "qla_def.h" 8 9 #include <linux/kthread.h> 10 #include <linux/vmalloc.h> 11 #include <linux/delay.h> 12 13 /* BSG support for ELS/CT pass through */ 14 void 15 qla2x00_bsg_job_done(void *data, void *ptr, int res) 16 { 17 srb_t *sp = (srb_t *)ptr; 18 struct scsi_qla_host *vha = (scsi_qla_host_t *)data; 19 struct fc_bsg_job *bsg_job = sp->u.bsg_job; 20 21 bsg_job->reply->result = res; 22 bsg_job->job_done(bsg_job); 23 sp->free(vha, sp); 24 } 25 26 void 27 qla2x00_bsg_sp_free(void *data, void *ptr) 28 { 29 srb_t *sp = (srb_t *)ptr; 30 struct scsi_qla_host *vha = sp->fcport->vha; 31 struct fc_bsg_job *bsg_job = sp->u.bsg_job; 32 struct qla_hw_data *ha = vha->hw; 33 struct qla_mt_iocb_rqst_fx00 *piocb_rqst; 34 35 if (sp->type == SRB_FXIOCB_BCMD) { 36 piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *) 37 &bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; 38 39 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) 40 dma_unmap_sg(&ha->pdev->dev, 41 bsg_job->request_payload.sg_list, 42 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 43 44 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) 45 dma_unmap_sg(&ha->pdev->dev, 46 bsg_job->reply_payload.sg_list, 47 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 48 } else { 49 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 50 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 51 52 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 53 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 54 } 55 56 if (sp->type == SRB_CT_CMD || 57 sp->type == SRB_FXIOCB_BCMD || 58 sp->type == SRB_ELS_CMD_HST) 59 kfree(sp->fcport); 60 qla2x00_rel_sp(vha, sp); 61 } 62 63 int 64 qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *vha, 65 struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag) 66 { 67 int i, ret, num_valid; 68 uint8_t *bcode; 69 struct qla_fcp_prio_entry *pri_entry; 70 uint32_t *bcode_val_ptr, bcode_val; 71 72 ret = 1; 73 num_valid = 0; 74 bcode = (uint8_t *)pri_cfg; 75 bcode_val_ptr = (uint32_t *)pri_cfg; 76 bcode_val = (uint32_t)(*bcode_val_ptr); 77 78 if (bcode_val == 0xFFFFFFFF) { 79 /* No FCP Priority config data in flash */ 80 ql_dbg(ql_dbg_user, vha, 0x7051, 81 "No FCP Priority config data.\n"); 82 return 0; 83 } 84 85 if (bcode[0] != 'H' || bcode[1] != 'Q' || bcode[2] != 'O' || 86 bcode[3] != 'S') { 87 /* Invalid FCP priority data header*/ 88 ql_dbg(ql_dbg_user, vha, 0x7052, 89 "Invalid FCP Priority data header. bcode=0x%x.\n", 90 bcode_val); 91 return 0; 92 } 93 if (flag != 1) 94 return ret; 95 96 pri_entry = &pri_cfg->entry[0]; 97 for (i = 0; i < pri_cfg->num_entries; i++) { 98 if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID) 99 num_valid++; 100 pri_entry++; 101 } 102 103 if (num_valid == 0) { 104 /* No valid FCP priority data entries */ 105 ql_dbg(ql_dbg_user, vha, 0x7053, 106 "No valid FCP Priority data entries.\n"); 107 ret = 0; 108 } else { 109 /* FCP priority data is valid */ 110 ql_dbg(ql_dbg_user, vha, 0x7054, 111 "Valid FCP priority data. num entries = %d.\n", 112 num_valid); 113 } 114 115 return ret; 116 } 117 118 static int 119 qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job) 120 { 121 struct Scsi_Host *host = bsg_job->shost; 122 scsi_qla_host_t *vha = shost_priv(host); 123 struct qla_hw_data *ha = vha->hw; 124 int ret = 0; 125 uint32_t len; 126 uint32_t oper; 127 128 if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_QLA82XX(ha))) { 129 ret = -EINVAL; 130 goto exit_fcp_prio_cfg; 131 } 132 133 /* Get the sub command */ 134 oper = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; 135 136 /* Only set config is allowed if config memory is not allocated */ 137 if (!ha->fcp_prio_cfg && (oper != QLFC_FCP_PRIO_SET_CONFIG)) { 138 ret = -EINVAL; 139 goto exit_fcp_prio_cfg; 140 } 141 switch (oper) { 142 case QLFC_FCP_PRIO_DISABLE: 143 if (ha->flags.fcp_prio_enabled) { 144 ha->flags.fcp_prio_enabled = 0; 145 ha->fcp_prio_cfg->attributes &= 146 ~FCP_PRIO_ATTR_ENABLE; 147 qla24xx_update_all_fcp_prio(vha); 148 bsg_job->reply->result = DID_OK; 149 } else { 150 ret = -EINVAL; 151 bsg_job->reply->result = (DID_ERROR << 16); 152 goto exit_fcp_prio_cfg; 153 } 154 break; 155 156 case QLFC_FCP_PRIO_ENABLE: 157 if (!ha->flags.fcp_prio_enabled) { 158 if (ha->fcp_prio_cfg) { 159 ha->flags.fcp_prio_enabled = 1; 160 ha->fcp_prio_cfg->attributes |= 161 FCP_PRIO_ATTR_ENABLE; 162 qla24xx_update_all_fcp_prio(vha); 163 bsg_job->reply->result = DID_OK; 164 } else { 165 ret = -EINVAL; 166 bsg_job->reply->result = (DID_ERROR << 16); 167 goto exit_fcp_prio_cfg; 168 } 169 } 170 break; 171 172 case QLFC_FCP_PRIO_GET_CONFIG: 173 len = bsg_job->reply_payload.payload_len; 174 if (!len || len > FCP_PRIO_CFG_SIZE) { 175 ret = -EINVAL; 176 bsg_job->reply->result = (DID_ERROR << 16); 177 goto exit_fcp_prio_cfg; 178 } 179 180 bsg_job->reply->result = DID_OK; 181 bsg_job->reply->reply_payload_rcv_len = 182 sg_copy_from_buffer( 183 bsg_job->reply_payload.sg_list, 184 bsg_job->reply_payload.sg_cnt, ha->fcp_prio_cfg, 185 len); 186 187 break; 188 189 case QLFC_FCP_PRIO_SET_CONFIG: 190 len = bsg_job->request_payload.payload_len; 191 if (!len || len > FCP_PRIO_CFG_SIZE) { 192 bsg_job->reply->result = (DID_ERROR << 16); 193 ret = -EINVAL; 194 goto exit_fcp_prio_cfg; 195 } 196 197 if (!ha->fcp_prio_cfg) { 198 ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE); 199 if (!ha->fcp_prio_cfg) { 200 ql_log(ql_log_warn, vha, 0x7050, 201 "Unable to allocate memory for fcp prio " 202 "config data (%x).\n", FCP_PRIO_CFG_SIZE); 203 bsg_job->reply->result = (DID_ERROR << 16); 204 ret = -ENOMEM; 205 goto exit_fcp_prio_cfg; 206 } 207 } 208 209 memset(ha->fcp_prio_cfg, 0, FCP_PRIO_CFG_SIZE); 210 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 211 bsg_job->request_payload.sg_cnt, ha->fcp_prio_cfg, 212 FCP_PRIO_CFG_SIZE); 213 214 /* validate fcp priority data */ 215 216 if (!qla24xx_fcp_prio_cfg_valid(vha, 217 (struct qla_fcp_prio_cfg *) ha->fcp_prio_cfg, 1)) { 218 bsg_job->reply->result = (DID_ERROR << 16); 219 ret = -EINVAL; 220 /* If buffer was invalidatic int 221 * fcp_prio_cfg is of no use 222 */ 223 vfree(ha->fcp_prio_cfg); 224 ha->fcp_prio_cfg = NULL; 225 goto exit_fcp_prio_cfg; 226 } 227 228 ha->flags.fcp_prio_enabled = 0; 229 if (ha->fcp_prio_cfg->attributes & FCP_PRIO_ATTR_ENABLE) 230 ha->flags.fcp_prio_enabled = 1; 231 qla24xx_update_all_fcp_prio(vha); 232 bsg_job->reply->result = DID_OK; 233 break; 234 default: 235 ret = -EINVAL; 236 break; 237 } 238 exit_fcp_prio_cfg: 239 if (!ret) 240 bsg_job->job_done(bsg_job); 241 return ret; 242 } 243 244 static int 245 qla2x00_process_els(struct fc_bsg_job *bsg_job) 246 { 247 struct fc_rport *rport; 248 fc_port_t *fcport = NULL; 249 struct Scsi_Host *host; 250 scsi_qla_host_t *vha; 251 struct qla_hw_data *ha; 252 srb_t *sp; 253 const char *type; 254 int req_sg_cnt, rsp_sg_cnt; 255 int rval = (DRIVER_ERROR << 16); 256 uint16_t nextlid = 0; 257 258 if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) { 259 rport = bsg_job->rport; 260 fcport = *(fc_port_t **) rport->dd_data; 261 host = rport_to_shost(rport); 262 vha = shost_priv(host); 263 ha = vha->hw; 264 type = "FC_BSG_RPT_ELS"; 265 } else { 266 host = bsg_job->shost; 267 vha = shost_priv(host); 268 ha = vha->hw; 269 type = "FC_BSG_HST_ELS_NOLOGIN"; 270 } 271 272 if (!vha->flags.online) { 273 ql_log(ql_log_warn, vha, 0x7005, "Host not online.\n"); 274 rval = -EIO; 275 goto done; 276 } 277 278 /* pass through is supported only for ISP 4Gb or higher */ 279 if (!IS_FWI2_CAPABLE(ha)) { 280 ql_dbg(ql_dbg_user, vha, 0x7001, 281 "ELS passthru not supported for ISP23xx based adapters.\n"); 282 rval = -EPERM; 283 goto done; 284 } 285 286 /* Multiple SG's are not supported for ELS requests */ 287 if (bsg_job->request_payload.sg_cnt > 1 || 288 bsg_job->reply_payload.sg_cnt > 1) { 289 ql_dbg(ql_dbg_user, vha, 0x7002, 290 "Multiple SG's are not suppored for ELS requests, " 291 "request_sg_cnt=%x reply_sg_cnt=%x.\n", 292 bsg_job->request_payload.sg_cnt, 293 bsg_job->reply_payload.sg_cnt); 294 rval = -EPERM; 295 goto done; 296 } 297 298 /* ELS request for rport */ 299 if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) { 300 /* make sure the rport is logged in, 301 * if not perform fabric login 302 */ 303 if (qla2x00_fabric_login(vha, fcport, &nextlid)) { 304 ql_dbg(ql_dbg_user, vha, 0x7003, 305 "Failed to login port %06X for ELS passthru.\n", 306 fcport->d_id.b24); 307 rval = -EIO; 308 goto done; 309 } 310 } else { 311 /* Allocate a dummy fcport structure, since functions 312 * preparing the IOCB and mailbox command retrieves port 313 * specific information from fcport structure. For Host based 314 * ELS commands there will be no fcport structure allocated 315 */ 316 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 317 if (!fcport) { 318 rval = -ENOMEM; 319 goto done; 320 } 321 322 /* Initialize all required fields of fcport */ 323 fcport->vha = vha; 324 fcport->d_id.b.al_pa = 325 bsg_job->request->rqst_data.h_els.port_id[0]; 326 fcport->d_id.b.area = 327 bsg_job->request->rqst_data.h_els.port_id[1]; 328 fcport->d_id.b.domain = 329 bsg_job->request->rqst_data.h_els.port_id[2]; 330 fcport->loop_id = 331 (fcport->d_id.b.al_pa == 0xFD) ? 332 NPH_FABRIC_CONTROLLER : NPH_F_PORT; 333 } 334 335 req_sg_cnt = 336 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 337 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 338 if (!req_sg_cnt) { 339 rval = -ENOMEM; 340 goto done_free_fcport; 341 } 342 343 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 344 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 345 if (!rsp_sg_cnt) { 346 rval = -ENOMEM; 347 goto done_free_fcport; 348 } 349 350 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) || 351 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) { 352 ql_log(ql_log_warn, vha, 0x7008, 353 "dma mapping resulted in different sg counts, " 354 "request_sg_cnt: %x dma_request_sg_cnt:%x reply_sg_cnt:%x " 355 "dma_reply_sg_cnt:%x.\n", bsg_job->request_payload.sg_cnt, 356 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt); 357 rval = -EAGAIN; 358 goto done_unmap_sg; 359 } 360 361 /* Alloc SRB structure */ 362 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 363 if (!sp) { 364 rval = -ENOMEM; 365 goto done_unmap_sg; 366 } 367 368 sp->type = 369 (bsg_job->request->msgcode == FC_BSG_RPT_ELS ? 370 SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST); 371 sp->name = 372 (bsg_job->request->msgcode == FC_BSG_RPT_ELS ? 373 "bsg_els_rpt" : "bsg_els_hst"); 374 sp->u.bsg_job = bsg_job; 375 sp->free = qla2x00_bsg_sp_free; 376 sp->done = qla2x00_bsg_job_done; 377 378 ql_dbg(ql_dbg_user, vha, 0x700a, 379 "bsg rqst type: %s els type: %x - loop-id=%x " 380 "portid=%-2x%02x%02x.\n", type, 381 bsg_job->request->rqst_data.h_els.command_code, fcport->loop_id, 382 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); 383 384 rval = qla2x00_start_sp(sp); 385 if (rval != QLA_SUCCESS) { 386 ql_log(ql_log_warn, vha, 0x700e, 387 "qla2x00_start_sp failed = %d\n", rval); 388 qla2x00_rel_sp(vha, sp); 389 rval = -EIO; 390 goto done_unmap_sg; 391 } 392 return rval; 393 394 done_unmap_sg: 395 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 396 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 397 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 398 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 399 goto done_free_fcport; 400 401 done_free_fcport: 402 if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) 403 kfree(fcport); 404 done: 405 return rval; 406 } 407 408 inline uint16_t 409 qla24xx_calc_ct_iocbs(uint16_t dsds) 410 { 411 uint16_t iocbs; 412 413 iocbs = 1; 414 if (dsds > 2) { 415 iocbs += (dsds - 2) / 5; 416 if ((dsds - 2) % 5) 417 iocbs++; 418 } 419 return iocbs; 420 } 421 422 static int 423 qla2x00_process_ct(struct fc_bsg_job *bsg_job) 424 { 425 srb_t *sp; 426 struct Scsi_Host *host = bsg_job->shost; 427 scsi_qla_host_t *vha = shost_priv(host); 428 struct qla_hw_data *ha = vha->hw; 429 int rval = (DRIVER_ERROR << 16); 430 int req_sg_cnt, rsp_sg_cnt; 431 uint16_t loop_id; 432 struct fc_port *fcport; 433 char *type = "FC_BSG_HST_CT"; 434 435 req_sg_cnt = 436 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 437 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 438 if (!req_sg_cnt) { 439 ql_log(ql_log_warn, vha, 0x700f, 440 "dma_map_sg return %d for request\n", req_sg_cnt); 441 rval = -ENOMEM; 442 goto done; 443 } 444 445 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 446 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 447 if (!rsp_sg_cnt) { 448 ql_log(ql_log_warn, vha, 0x7010, 449 "dma_map_sg return %d for reply\n", rsp_sg_cnt); 450 rval = -ENOMEM; 451 goto done; 452 } 453 454 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) || 455 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) { 456 ql_log(ql_log_warn, vha, 0x7011, 457 "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x " 458 "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt, 459 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt); 460 rval = -EAGAIN; 461 goto done_unmap_sg; 462 } 463 464 if (!vha->flags.online) { 465 ql_log(ql_log_warn, vha, 0x7012, 466 "Host is not online.\n"); 467 rval = -EIO; 468 goto done_unmap_sg; 469 } 470 471 loop_id = 472 (bsg_job->request->rqst_data.h_ct.preamble_word1 & 0xFF000000) 473 >> 24; 474 switch (loop_id) { 475 case 0xFC: 476 loop_id = cpu_to_le16(NPH_SNS); 477 break; 478 case 0xFA: 479 loop_id = vha->mgmt_svr_loop_id; 480 break; 481 default: 482 ql_dbg(ql_dbg_user, vha, 0x7013, 483 "Unknown loop id: %x.\n", loop_id); 484 rval = -EINVAL; 485 goto done_unmap_sg; 486 } 487 488 /* Allocate a dummy fcport structure, since functions preparing the 489 * IOCB and mailbox command retrieves port specific information 490 * from fcport structure. For Host based ELS commands there will be 491 * no fcport structure allocated 492 */ 493 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 494 if (!fcport) { 495 ql_log(ql_log_warn, vha, 0x7014, 496 "Failed to allocate fcport.\n"); 497 rval = -ENOMEM; 498 goto done_unmap_sg; 499 } 500 501 /* Initialize all required fields of fcport */ 502 fcport->vha = vha; 503 fcport->d_id.b.al_pa = bsg_job->request->rqst_data.h_ct.port_id[0]; 504 fcport->d_id.b.area = bsg_job->request->rqst_data.h_ct.port_id[1]; 505 fcport->d_id.b.domain = bsg_job->request->rqst_data.h_ct.port_id[2]; 506 fcport->loop_id = loop_id; 507 508 /* Alloc SRB structure */ 509 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 510 if (!sp) { 511 ql_log(ql_log_warn, vha, 0x7015, 512 "qla2x00_get_sp failed.\n"); 513 rval = -ENOMEM; 514 goto done_free_fcport; 515 } 516 517 sp->type = SRB_CT_CMD; 518 sp->name = "bsg_ct"; 519 sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt); 520 sp->u.bsg_job = bsg_job; 521 sp->free = qla2x00_bsg_sp_free; 522 sp->done = qla2x00_bsg_job_done; 523 524 ql_dbg(ql_dbg_user, vha, 0x7016, 525 "bsg rqst type: %s else type: %x - " 526 "loop-id=%x portid=%02x%02x%02x.\n", type, 527 (bsg_job->request->rqst_data.h_ct.preamble_word2 >> 16), 528 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, 529 fcport->d_id.b.al_pa); 530 531 rval = qla2x00_start_sp(sp); 532 if (rval != QLA_SUCCESS) { 533 ql_log(ql_log_warn, vha, 0x7017, 534 "qla2x00_start_sp failed=%d.\n", rval); 535 qla2x00_rel_sp(vha, sp); 536 rval = -EIO; 537 goto done_free_fcport; 538 } 539 return rval; 540 541 done_free_fcport: 542 kfree(fcport); 543 done_unmap_sg: 544 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 545 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 546 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 547 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 548 done: 549 return rval; 550 } 551 552 /* Disable loopback mode */ 553 static inline int 554 qla81xx_reset_loopback_mode(scsi_qla_host_t *vha, uint16_t *config, 555 int wait, int wait2) 556 { 557 int ret = 0; 558 int rval = 0; 559 uint16_t new_config[4]; 560 struct qla_hw_data *ha = vha->hw; 561 562 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha)) 563 goto done_reset_internal; 564 565 memset(new_config, 0 , sizeof(new_config)); 566 if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 == 567 ENABLE_INTERNAL_LOOPBACK || 568 (config[0] & INTERNAL_LOOPBACK_MASK) >> 1 == 569 ENABLE_EXTERNAL_LOOPBACK) { 570 new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK; 571 ql_dbg(ql_dbg_user, vha, 0x70bf, "new_config[0]=%02x\n", 572 (new_config[0] & INTERNAL_LOOPBACK_MASK)); 573 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ; 574 575 ha->notify_dcbx_comp = wait; 576 ha->notify_lb_portup_comp = wait2; 577 578 ret = qla81xx_set_port_config(vha, new_config); 579 if (ret != QLA_SUCCESS) { 580 ql_log(ql_log_warn, vha, 0x7025, 581 "Set port config failed.\n"); 582 ha->notify_dcbx_comp = 0; 583 ha->notify_lb_portup_comp = 0; 584 rval = -EINVAL; 585 goto done_reset_internal; 586 } 587 588 /* Wait for DCBX complete event */ 589 if (wait && !wait_for_completion_timeout(&ha->dcbx_comp, 590 (DCBX_COMP_TIMEOUT * HZ))) { 591 ql_dbg(ql_dbg_user, vha, 0x7026, 592 "DCBX completion not received.\n"); 593 ha->notify_dcbx_comp = 0; 594 ha->notify_lb_portup_comp = 0; 595 rval = -EINVAL; 596 goto done_reset_internal; 597 } else 598 ql_dbg(ql_dbg_user, vha, 0x7027, 599 "DCBX completion received.\n"); 600 601 if (wait2 && 602 !wait_for_completion_timeout(&ha->lb_portup_comp, 603 (LB_PORTUP_COMP_TIMEOUT * HZ))) { 604 ql_dbg(ql_dbg_user, vha, 0x70c5, 605 "Port up completion not received.\n"); 606 ha->notify_lb_portup_comp = 0; 607 rval = -EINVAL; 608 goto done_reset_internal; 609 } else 610 ql_dbg(ql_dbg_user, vha, 0x70c6, 611 "Port up completion received.\n"); 612 613 ha->notify_dcbx_comp = 0; 614 ha->notify_lb_portup_comp = 0; 615 } 616 done_reset_internal: 617 return rval; 618 } 619 620 /* 621 * Set the port configuration to enable the internal or external loopback 622 * depending on the loopback mode. 623 */ 624 static inline int 625 qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config, 626 uint16_t *new_config, uint16_t mode) 627 { 628 int ret = 0; 629 int rval = 0; 630 struct qla_hw_data *ha = vha->hw; 631 632 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha)) 633 goto done_set_internal; 634 635 if (mode == INTERNAL_LOOPBACK) 636 new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1); 637 else if (mode == EXTERNAL_LOOPBACK) 638 new_config[0] = config[0] | (ENABLE_EXTERNAL_LOOPBACK << 1); 639 ql_dbg(ql_dbg_user, vha, 0x70be, 640 "new_config[0]=%02x\n", (new_config[0] & INTERNAL_LOOPBACK_MASK)); 641 642 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3); 643 644 ha->notify_dcbx_comp = 1; 645 ret = qla81xx_set_port_config(vha, new_config); 646 if (ret != QLA_SUCCESS) { 647 ql_log(ql_log_warn, vha, 0x7021, 648 "set port config failed.\n"); 649 ha->notify_dcbx_comp = 0; 650 rval = -EINVAL; 651 goto done_set_internal; 652 } 653 654 /* Wait for DCBX complete event */ 655 if (!wait_for_completion_timeout(&ha->dcbx_comp, 656 (DCBX_COMP_TIMEOUT * HZ))) { 657 ql_dbg(ql_dbg_user, vha, 0x7022, 658 "DCBX completion not received.\n"); 659 ret = qla81xx_reset_loopback_mode(vha, new_config, 0, 0); 660 /* 661 * If the reset of the loopback mode doesn't work take a FCoE 662 * dump and reset the chip. 663 */ 664 if (ret) { 665 ha->isp_ops->fw_dump(vha, 0); 666 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 667 } 668 rval = -EINVAL; 669 } else { 670 if (ha->flags.idc_compl_status) { 671 ql_dbg(ql_dbg_user, vha, 0x70c3, 672 "Bad status in IDC Completion AEN\n"); 673 rval = -EINVAL; 674 ha->flags.idc_compl_status = 0; 675 } else 676 ql_dbg(ql_dbg_user, vha, 0x7023, 677 "DCBX completion received.\n"); 678 } 679 680 ha->notify_dcbx_comp = 0; 681 682 done_set_internal: 683 return rval; 684 } 685 686 static int 687 qla2x00_process_loopback(struct fc_bsg_job *bsg_job) 688 { 689 struct Scsi_Host *host = bsg_job->shost; 690 scsi_qla_host_t *vha = shost_priv(host); 691 struct qla_hw_data *ha = vha->hw; 692 int rval; 693 uint8_t command_sent; 694 char *type; 695 struct msg_echo_lb elreq; 696 uint16_t response[MAILBOX_REGISTER_COUNT]; 697 uint16_t config[4], new_config[4]; 698 uint8_t *fw_sts_ptr; 699 uint8_t *req_data = NULL; 700 dma_addr_t req_data_dma; 701 uint32_t req_data_len; 702 uint8_t *rsp_data = NULL; 703 dma_addr_t rsp_data_dma; 704 uint32_t rsp_data_len; 705 706 if (!vha->flags.online) { 707 ql_log(ql_log_warn, vha, 0x7019, "Host is not online.\n"); 708 return -EIO; 709 } 710 711 elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev, 712 bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, 713 DMA_TO_DEVICE); 714 715 if (!elreq.req_sg_cnt) { 716 ql_log(ql_log_warn, vha, 0x701a, 717 "dma_map_sg returned %d for request.\n", elreq.req_sg_cnt); 718 return -ENOMEM; 719 } 720 721 elreq.rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, 722 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, 723 DMA_FROM_DEVICE); 724 725 if (!elreq.rsp_sg_cnt) { 726 ql_log(ql_log_warn, vha, 0x701b, 727 "dma_map_sg returned %d for reply.\n", elreq.rsp_sg_cnt); 728 rval = -ENOMEM; 729 goto done_unmap_req_sg; 730 } 731 732 if ((elreq.req_sg_cnt != bsg_job->request_payload.sg_cnt) || 733 (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) { 734 ql_log(ql_log_warn, vha, 0x701c, 735 "dma mapping resulted in different sg counts, " 736 "request_sg_cnt: %x dma_request_sg_cnt: %x " 737 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n", 738 bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt, 739 bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt); 740 rval = -EAGAIN; 741 goto done_unmap_sg; 742 } 743 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len; 744 req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len, 745 &req_data_dma, GFP_KERNEL); 746 if (!req_data) { 747 ql_log(ql_log_warn, vha, 0x701d, 748 "dma alloc failed for req_data.\n"); 749 rval = -ENOMEM; 750 goto done_unmap_sg; 751 } 752 753 rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len, 754 &rsp_data_dma, GFP_KERNEL); 755 if (!rsp_data) { 756 ql_log(ql_log_warn, vha, 0x7004, 757 "dma alloc failed for rsp_data.\n"); 758 rval = -ENOMEM; 759 goto done_free_dma_req; 760 } 761 762 /* Copy the request buffer in req_data now */ 763 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 764 bsg_job->request_payload.sg_cnt, req_data, req_data_len); 765 766 elreq.send_dma = req_data_dma; 767 elreq.rcv_dma = rsp_data_dma; 768 elreq.transfer_size = req_data_len; 769 770 elreq.options = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; 771 elreq.iteration_count = 772 bsg_job->request->rqst_data.h_vendor.vendor_cmd[2]; 773 774 if (atomic_read(&vha->loop_state) == LOOP_READY && 775 (ha->current_topology == ISP_CFG_F || 776 ((IS_QLA81XX(ha) || IS_QLA8031(ha)) && 777 le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE 778 && req_data_len == MAX_ELS_FRAME_PAYLOAD)) && 779 elreq.options == EXTERNAL_LOOPBACK) { 780 type = "FC_BSG_HST_VENDOR_ECHO_DIAG"; 781 ql_dbg(ql_dbg_user, vha, 0x701e, 782 "BSG request type: %s.\n", type); 783 command_sent = INT_DEF_LB_ECHO_CMD; 784 rval = qla2x00_echo_test(vha, &elreq, response); 785 } else { 786 if (IS_QLA81XX(ha) || IS_QLA8031(ha)) { 787 memset(config, 0, sizeof(config)); 788 memset(new_config, 0, sizeof(new_config)); 789 790 if (qla81xx_get_port_config(vha, config)) { 791 ql_log(ql_log_warn, vha, 0x701f, 792 "Get port config failed.\n"); 793 rval = -EPERM; 794 goto done_free_dma_rsp; 795 } 796 797 if ((config[0] & INTERNAL_LOOPBACK_MASK) != 0) { 798 ql_dbg(ql_dbg_user, vha, 0x70c4, 799 "Loopback operation already in " 800 "progress.\n"); 801 rval = -EAGAIN; 802 goto done_free_dma_rsp; 803 } 804 805 ql_dbg(ql_dbg_user, vha, 0x70c0, 806 "elreq.options=%04x\n", elreq.options); 807 808 if (elreq.options == EXTERNAL_LOOPBACK) 809 if (IS_QLA8031(ha)) 810 rval = qla81xx_set_loopback_mode(vha, 811 config, new_config, elreq.options); 812 else 813 rval = qla81xx_reset_loopback_mode(vha, 814 config, 1, 0); 815 else 816 rval = qla81xx_set_loopback_mode(vha, config, 817 new_config, elreq.options); 818 819 if (rval) { 820 rval = -EPERM; 821 goto done_free_dma_rsp; 822 } 823 824 type = "FC_BSG_HST_VENDOR_LOOPBACK"; 825 ql_dbg(ql_dbg_user, vha, 0x7028, 826 "BSG request type: %s.\n", type); 827 828 command_sent = INT_DEF_LB_LOOPBACK_CMD; 829 rval = qla2x00_loopback_test(vha, &elreq, response); 830 831 if (response[0] == MBS_COMMAND_ERROR && 832 response[1] == MBS_LB_RESET) { 833 ql_log(ql_log_warn, vha, 0x7029, 834 "MBX command error, Aborting ISP.\n"); 835 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 836 qla2xxx_wake_dpc(vha); 837 qla2x00_wait_for_chip_reset(vha); 838 /* Also reset the MPI */ 839 if (IS_QLA81XX(ha)) { 840 if (qla81xx_restart_mpi_firmware(vha) != 841 QLA_SUCCESS) { 842 ql_log(ql_log_warn, vha, 0x702a, 843 "MPI reset failed.\n"); 844 } 845 } 846 847 rval = -EIO; 848 goto done_free_dma_rsp; 849 } 850 851 if (new_config[0]) { 852 int ret; 853 854 /* Revert back to original port config 855 * Also clear internal loopback 856 */ 857 ret = qla81xx_reset_loopback_mode(vha, 858 new_config, 0, 1); 859 if (ret) { 860 /* 861 * If the reset of the loopback mode 862 * doesn't work take FCoE dump and then 863 * reset the chip. 864 */ 865 ha->isp_ops->fw_dump(vha, 0); 866 set_bit(ISP_ABORT_NEEDED, 867 &vha->dpc_flags); 868 } 869 870 } 871 872 } else { 873 type = "FC_BSG_HST_VENDOR_LOOPBACK"; 874 ql_dbg(ql_dbg_user, vha, 0x702b, 875 "BSG request type: %s.\n", type); 876 command_sent = INT_DEF_LB_LOOPBACK_CMD; 877 rval = qla2x00_loopback_test(vha, &elreq, response); 878 } 879 } 880 881 if (rval) { 882 ql_log(ql_log_warn, vha, 0x702c, 883 "Vendor request %s failed.\n", type); 884 885 rval = 0; 886 bsg_job->reply->result = (DID_ERROR << 16); 887 bsg_job->reply->reply_payload_rcv_len = 0; 888 } else { 889 ql_dbg(ql_dbg_user, vha, 0x702d, 890 "Vendor request %s completed.\n", type); 891 bsg_job->reply->result = (DID_OK << 16); 892 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 893 bsg_job->reply_payload.sg_cnt, rsp_data, 894 rsp_data_len); 895 } 896 897 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + 898 sizeof(response) + sizeof(uint8_t); 899 fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) + 900 sizeof(struct fc_bsg_reply); 901 memcpy(fw_sts_ptr, response, sizeof(response)); 902 fw_sts_ptr += sizeof(response); 903 *fw_sts_ptr = command_sent; 904 905 done_free_dma_rsp: 906 dma_free_coherent(&ha->pdev->dev, rsp_data_len, 907 rsp_data, rsp_data_dma); 908 done_free_dma_req: 909 dma_free_coherent(&ha->pdev->dev, req_data_len, 910 req_data, req_data_dma); 911 done_unmap_sg: 912 dma_unmap_sg(&ha->pdev->dev, 913 bsg_job->reply_payload.sg_list, 914 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 915 done_unmap_req_sg: 916 dma_unmap_sg(&ha->pdev->dev, 917 bsg_job->request_payload.sg_list, 918 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 919 if (!rval) 920 bsg_job->job_done(bsg_job); 921 return rval; 922 } 923 924 static int 925 qla84xx_reset(struct fc_bsg_job *bsg_job) 926 { 927 struct Scsi_Host *host = bsg_job->shost; 928 scsi_qla_host_t *vha = shost_priv(host); 929 struct qla_hw_data *ha = vha->hw; 930 int rval = 0; 931 uint32_t flag; 932 933 if (!IS_QLA84XX(ha)) { 934 ql_dbg(ql_dbg_user, vha, 0x702f, "Not 84xx, exiting.\n"); 935 return -EINVAL; 936 } 937 938 flag = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; 939 940 rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW); 941 942 if (rval) { 943 ql_log(ql_log_warn, vha, 0x7030, 944 "Vendor request 84xx reset failed.\n"); 945 rval = (DID_ERROR << 16); 946 947 } else { 948 ql_dbg(ql_dbg_user, vha, 0x7031, 949 "Vendor request 84xx reset completed.\n"); 950 bsg_job->reply->result = DID_OK; 951 bsg_job->job_done(bsg_job); 952 } 953 954 return rval; 955 } 956 957 static int 958 qla84xx_updatefw(struct fc_bsg_job *bsg_job) 959 { 960 struct Scsi_Host *host = bsg_job->shost; 961 scsi_qla_host_t *vha = shost_priv(host); 962 struct qla_hw_data *ha = vha->hw; 963 struct verify_chip_entry_84xx *mn = NULL; 964 dma_addr_t mn_dma, fw_dma; 965 void *fw_buf = NULL; 966 int rval = 0; 967 uint32_t sg_cnt; 968 uint32_t data_len; 969 uint16_t options; 970 uint32_t flag; 971 uint32_t fw_ver; 972 973 if (!IS_QLA84XX(ha)) { 974 ql_dbg(ql_dbg_user, vha, 0x7032, 975 "Not 84xx, exiting.\n"); 976 return -EINVAL; 977 } 978 979 sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 980 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 981 if (!sg_cnt) { 982 ql_log(ql_log_warn, vha, 0x7033, 983 "dma_map_sg returned %d for request.\n", sg_cnt); 984 return -ENOMEM; 985 } 986 987 if (sg_cnt != bsg_job->request_payload.sg_cnt) { 988 ql_log(ql_log_warn, vha, 0x7034, 989 "DMA mapping resulted in different sg counts, " 990 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n", 991 bsg_job->request_payload.sg_cnt, sg_cnt); 992 rval = -EAGAIN; 993 goto done_unmap_sg; 994 } 995 996 data_len = bsg_job->request_payload.payload_len; 997 fw_buf = dma_alloc_coherent(&ha->pdev->dev, data_len, 998 &fw_dma, GFP_KERNEL); 999 if (!fw_buf) { 1000 ql_log(ql_log_warn, vha, 0x7035, 1001 "DMA alloc failed for fw_buf.\n"); 1002 rval = -ENOMEM; 1003 goto done_unmap_sg; 1004 } 1005 1006 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1007 bsg_job->request_payload.sg_cnt, fw_buf, data_len); 1008 1009 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma); 1010 if (!mn) { 1011 ql_log(ql_log_warn, vha, 0x7036, 1012 "DMA alloc failed for fw buffer.\n"); 1013 rval = -ENOMEM; 1014 goto done_free_fw_buf; 1015 } 1016 1017 flag = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; 1018 fw_ver = le32_to_cpu(*((uint32_t *)((uint32_t *)fw_buf + 2))); 1019 1020 memset(mn, 0, sizeof(struct access_chip_84xx)); 1021 mn->entry_type = VERIFY_CHIP_IOCB_TYPE; 1022 mn->entry_count = 1; 1023 1024 options = VCO_FORCE_UPDATE | VCO_END_OF_DATA; 1025 if (flag == A84_ISSUE_UPDATE_DIAGFW_CMD) 1026 options |= VCO_DIAG_FW; 1027 1028 mn->options = cpu_to_le16(options); 1029 mn->fw_ver = cpu_to_le32(fw_ver); 1030 mn->fw_size = cpu_to_le32(data_len); 1031 mn->fw_seq_size = cpu_to_le32(data_len); 1032 mn->dseg_address[0] = cpu_to_le32(LSD(fw_dma)); 1033 mn->dseg_address[1] = cpu_to_le32(MSD(fw_dma)); 1034 mn->dseg_length = cpu_to_le32(data_len); 1035 mn->data_seg_cnt = cpu_to_le16(1); 1036 1037 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120); 1038 1039 if (rval) { 1040 ql_log(ql_log_warn, vha, 0x7037, 1041 "Vendor request 84xx updatefw failed.\n"); 1042 1043 rval = (DID_ERROR << 16); 1044 } else { 1045 ql_dbg(ql_dbg_user, vha, 0x7038, 1046 "Vendor request 84xx updatefw completed.\n"); 1047 1048 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1049 bsg_job->reply->result = DID_OK; 1050 } 1051 1052 dma_pool_free(ha->s_dma_pool, mn, mn_dma); 1053 1054 done_free_fw_buf: 1055 dma_free_coherent(&ha->pdev->dev, data_len, fw_buf, fw_dma); 1056 1057 done_unmap_sg: 1058 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 1059 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 1060 1061 if (!rval) 1062 bsg_job->job_done(bsg_job); 1063 return rval; 1064 } 1065 1066 static int 1067 qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job) 1068 { 1069 struct Scsi_Host *host = bsg_job->shost; 1070 scsi_qla_host_t *vha = shost_priv(host); 1071 struct qla_hw_data *ha = vha->hw; 1072 struct access_chip_84xx *mn = NULL; 1073 dma_addr_t mn_dma, mgmt_dma; 1074 void *mgmt_b = NULL; 1075 int rval = 0; 1076 struct qla_bsg_a84_mgmt *ql84_mgmt; 1077 uint32_t sg_cnt; 1078 uint32_t data_len = 0; 1079 uint32_t dma_direction = DMA_NONE; 1080 1081 if (!IS_QLA84XX(ha)) { 1082 ql_log(ql_log_warn, vha, 0x703a, 1083 "Not 84xx, exiting.\n"); 1084 return -EINVAL; 1085 } 1086 1087 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma); 1088 if (!mn) { 1089 ql_log(ql_log_warn, vha, 0x703c, 1090 "DMA alloc failed for fw buffer.\n"); 1091 return -ENOMEM; 1092 } 1093 1094 memset(mn, 0, sizeof(struct access_chip_84xx)); 1095 mn->entry_type = ACCESS_CHIP_IOCB_TYPE; 1096 mn->entry_count = 1; 1097 ql84_mgmt = (void *)bsg_job->request + sizeof(struct fc_bsg_request); 1098 switch (ql84_mgmt->mgmt.cmd) { 1099 case QLA84_MGMT_READ_MEM: 1100 case QLA84_MGMT_GET_INFO: 1101 sg_cnt = dma_map_sg(&ha->pdev->dev, 1102 bsg_job->reply_payload.sg_list, 1103 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 1104 if (!sg_cnt) { 1105 ql_log(ql_log_warn, vha, 0x703d, 1106 "dma_map_sg returned %d for reply.\n", sg_cnt); 1107 rval = -ENOMEM; 1108 goto exit_mgmt; 1109 } 1110 1111 dma_direction = DMA_FROM_DEVICE; 1112 1113 if (sg_cnt != bsg_job->reply_payload.sg_cnt) { 1114 ql_log(ql_log_warn, vha, 0x703e, 1115 "DMA mapping resulted in different sg counts, " 1116 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n", 1117 bsg_job->reply_payload.sg_cnt, sg_cnt); 1118 rval = -EAGAIN; 1119 goto done_unmap_sg; 1120 } 1121 1122 data_len = bsg_job->reply_payload.payload_len; 1123 1124 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len, 1125 &mgmt_dma, GFP_KERNEL); 1126 if (!mgmt_b) { 1127 ql_log(ql_log_warn, vha, 0x703f, 1128 "DMA alloc failed for mgmt_b.\n"); 1129 rval = -ENOMEM; 1130 goto done_unmap_sg; 1131 } 1132 1133 if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) { 1134 mn->options = cpu_to_le16(ACO_DUMP_MEMORY); 1135 mn->parameter1 = 1136 cpu_to_le32( 1137 ql84_mgmt->mgmt.mgmtp.u.mem.start_addr); 1138 1139 } else if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO) { 1140 mn->options = cpu_to_le16(ACO_REQUEST_INFO); 1141 mn->parameter1 = 1142 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.info.type); 1143 1144 mn->parameter2 = 1145 cpu_to_le32( 1146 ql84_mgmt->mgmt.mgmtp.u.info.context); 1147 } 1148 break; 1149 1150 case QLA84_MGMT_WRITE_MEM: 1151 sg_cnt = dma_map_sg(&ha->pdev->dev, 1152 bsg_job->request_payload.sg_list, 1153 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 1154 1155 if (!sg_cnt) { 1156 ql_log(ql_log_warn, vha, 0x7040, 1157 "dma_map_sg returned %d.\n", sg_cnt); 1158 rval = -ENOMEM; 1159 goto exit_mgmt; 1160 } 1161 1162 dma_direction = DMA_TO_DEVICE; 1163 1164 if (sg_cnt != bsg_job->request_payload.sg_cnt) { 1165 ql_log(ql_log_warn, vha, 0x7041, 1166 "DMA mapping resulted in different sg counts, " 1167 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n", 1168 bsg_job->request_payload.sg_cnt, sg_cnt); 1169 rval = -EAGAIN; 1170 goto done_unmap_sg; 1171 } 1172 1173 data_len = bsg_job->request_payload.payload_len; 1174 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len, 1175 &mgmt_dma, GFP_KERNEL); 1176 if (!mgmt_b) { 1177 ql_log(ql_log_warn, vha, 0x7042, 1178 "DMA alloc failed for mgmt_b.\n"); 1179 rval = -ENOMEM; 1180 goto done_unmap_sg; 1181 } 1182 1183 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1184 bsg_job->request_payload.sg_cnt, mgmt_b, data_len); 1185 1186 mn->options = cpu_to_le16(ACO_LOAD_MEMORY); 1187 mn->parameter1 = 1188 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.mem.start_addr); 1189 break; 1190 1191 case QLA84_MGMT_CHNG_CONFIG: 1192 mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM); 1193 mn->parameter1 = 1194 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.id); 1195 1196 mn->parameter2 = 1197 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param0); 1198 1199 mn->parameter3 = 1200 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param1); 1201 break; 1202 1203 default: 1204 rval = -EIO; 1205 goto exit_mgmt; 1206 } 1207 1208 if (ql84_mgmt->mgmt.cmd != QLA84_MGMT_CHNG_CONFIG) { 1209 mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->mgmt.len); 1210 mn->dseg_count = cpu_to_le16(1); 1211 mn->dseg_address[0] = cpu_to_le32(LSD(mgmt_dma)); 1212 mn->dseg_address[1] = cpu_to_le32(MSD(mgmt_dma)); 1213 mn->dseg_length = cpu_to_le32(ql84_mgmt->mgmt.len); 1214 } 1215 1216 rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0); 1217 1218 if (rval) { 1219 ql_log(ql_log_warn, vha, 0x7043, 1220 "Vendor request 84xx mgmt failed.\n"); 1221 1222 rval = (DID_ERROR << 16); 1223 1224 } else { 1225 ql_dbg(ql_dbg_user, vha, 0x7044, 1226 "Vendor request 84xx mgmt completed.\n"); 1227 1228 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1229 bsg_job->reply->result = DID_OK; 1230 1231 if ((ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) || 1232 (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO)) { 1233 bsg_job->reply->reply_payload_rcv_len = 1234 bsg_job->reply_payload.payload_len; 1235 1236 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 1237 bsg_job->reply_payload.sg_cnt, mgmt_b, 1238 data_len); 1239 } 1240 } 1241 1242 done_unmap_sg: 1243 if (mgmt_b) 1244 dma_free_coherent(&ha->pdev->dev, data_len, mgmt_b, mgmt_dma); 1245 1246 if (dma_direction == DMA_TO_DEVICE) 1247 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 1248 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 1249 else if (dma_direction == DMA_FROM_DEVICE) 1250 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 1251 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 1252 1253 exit_mgmt: 1254 dma_pool_free(ha->s_dma_pool, mn, mn_dma); 1255 1256 if (!rval) 1257 bsg_job->job_done(bsg_job); 1258 return rval; 1259 } 1260 1261 static int 1262 qla24xx_iidma(struct fc_bsg_job *bsg_job) 1263 { 1264 struct Scsi_Host *host = bsg_job->shost; 1265 scsi_qla_host_t *vha = shost_priv(host); 1266 int rval = 0; 1267 struct qla_port_param *port_param = NULL; 1268 fc_port_t *fcport = NULL; 1269 uint16_t mb[MAILBOX_REGISTER_COUNT]; 1270 uint8_t *rsp_ptr = NULL; 1271 1272 if (!IS_IIDMA_CAPABLE(vha->hw)) { 1273 ql_log(ql_log_info, vha, 0x7046, "iiDMA not supported.\n"); 1274 return -EINVAL; 1275 } 1276 1277 port_param = (void *)bsg_job->request + sizeof(struct fc_bsg_request); 1278 if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) { 1279 ql_log(ql_log_warn, vha, 0x7048, 1280 "Invalid destination type.\n"); 1281 return -EINVAL; 1282 } 1283 1284 list_for_each_entry(fcport, &vha->vp_fcports, list) { 1285 if (fcport->port_type != FCT_TARGET) 1286 continue; 1287 1288 if (memcmp(port_param->fc_scsi_addr.dest_addr.wwpn, 1289 fcport->port_name, sizeof(fcport->port_name))) 1290 continue; 1291 break; 1292 } 1293 1294 if (!fcport) { 1295 ql_log(ql_log_warn, vha, 0x7049, 1296 "Failed to find port.\n"); 1297 return -EINVAL; 1298 } 1299 1300 if (atomic_read(&fcport->state) != FCS_ONLINE) { 1301 ql_log(ql_log_warn, vha, 0x704a, 1302 "Port is not online.\n"); 1303 return -EINVAL; 1304 } 1305 1306 if (fcport->flags & FCF_LOGIN_NEEDED) { 1307 ql_log(ql_log_warn, vha, 0x704b, 1308 "Remote port not logged in flags = 0x%x.\n", fcport->flags); 1309 return -EINVAL; 1310 } 1311 1312 if (port_param->mode) 1313 rval = qla2x00_set_idma_speed(vha, fcport->loop_id, 1314 port_param->speed, mb); 1315 else 1316 rval = qla2x00_get_idma_speed(vha, fcport->loop_id, 1317 &port_param->speed, mb); 1318 1319 if (rval) { 1320 ql_log(ql_log_warn, vha, 0x704c, 1321 "iIDMA cmd failed for %02x%02x%02x%02x%02x%02x%02x%02x -- " 1322 "%04x %x %04x %04x.\n", fcport->port_name[0], 1323 fcport->port_name[1], fcport->port_name[2], 1324 fcport->port_name[3], fcport->port_name[4], 1325 fcport->port_name[5], fcport->port_name[6], 1326 fcport->port_name[7], rval, fcport->fp_speed, mb[0], mb[1]); 1327 rval = (DID_ERROR << 16); 1328 } else { 1329 if (!port_param->mode) { 1330 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + 1331 sizeof(struct qla_port_param); 1332 1333 rsp_ptr = ((uint8_t *)bsg_job->reply) + 1334 sizeof(struct fc_bsg_reply); 1335 1336 memcpy(rsp_ptr, port_param, 1337 sizeof(struct qla_port_param)); 1338 } 1339 1340 bsg_job->reply->result = DID_OK; 1341 bsg_job->job_done(bsg_job); 1342 } 1343 1344 return rval; 1345 } 1346 1347 static int 1348 qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, scsi_qla_host_t *vha, 1349 uint8_t is_update) 1350 { 1351 uint32_t start = 0; 1352 int valid = 0; 1353 struct qla_hw_data *ha = vha->hw; 1354 1355 if (unlikely(pci_channel_offline(ha->pdev))) 1356 return -EINVAL; 1357 1358 start = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; 1359 if (start > ha->optrom_size) { 1360 ql_log(ql_log_warn, vha, 0x7055, 1361 "start %d > optrom_size %d.\n", start, ha->optrom_size); 1362 return -EINVAL; 1363 } 1364 1365 if (ha->optrom_state != QLA_SWAITING) { 1366 ql_log(ql_log_info, vha, 0x7056, 1367 "optrom_state %d.\n", ha->optrom_state); 1368 return -EBUSY; 1369 } 1370 1371 ha->optrom_region_start = start; 1372 ql_dbg(ql_dbg_user, vha, 0x7057, "is_update=%d.\n", is_update); 1373 if (is_update) { 1374 if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0) 1375 valid = 1; 1376 else if (start == (ha->flt_region_boot * 4) || 1377 start == (ha->flt_region_fw * 4)) 1378 valid = 1; 1379 else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || 1380 IS_CNA_CAPABLE(ha) || IS_QLA2031(ha)) 1381 valid = 1; 1382 if (!valid) { 1383 ql_log(ql_log_warn, vha, 0x7058, 1384 "Invalid start region 0x%x/0x%x.\n", start, 1385 bsg_job->request_payload.payload_len); 1386 return -EINVAL; 1387 } 1388 1389 ha->optrom_region_size = start + 1390 bsg_job->request_payload.payload_len > ha->optrom_size ? 1391 ha->optrom_size - start : 1392 bsg_job->request_payload.payload_len; 1393 ha->optrom_state = QLA_SWRITING; 1394 } else { 1395 ha->optrom_region_size = start + 1396 bsg_job->reply_payload.payload_len > ha->optrom_size ? 1397 ha->optrom_size - start : 1398 bsg_job->reply_payload.payload_len; 1399 ha->optrom_state = QLA_SREADING; 1400 } 1401 1402 ha->optrom_buffer = vmalloc(ha->optrom_region_size); 1403 if (!ha->optrom_buffer) { 1404 ql_log(ql_log_warn, vha, 0x7059, 1405 "Read: Unable to allocate memory for optrom retrieval " 1406 "(%x)\n", ha->optrom_region_size); 1407 1408 ha->optrom_state = QLA_SWAITING; 1409 return -ENOMEM; 1410 } 1411 1412 memset(ha->optrom_buffer, 0, ha->optrom_region_size); 1413 return 0; 1414 } 1415 1416 static int 1417 qla2x00_read_optrom(struct fc_bsg_job *bsg_job) 1418 { 1419 struct Scsi_Host *host = bsg_job->shost; 1420 scsi_qla_host_t *vha = shost_priv(host); 1421 struct qla_hw_data *ha = vha->hw; 1422 int rval = 0; 1423 1424 if (ha->flags.nic_core_reset_hdlr_active) 1425 return -EBUSY; 1426 1427 rval = qla2x00_optrom_setup(bsg_job, vha, 0); 1428 if (rval) 1429 return rval; 1430 1431 ha->isp_ops->read_optrom(vha, ha->optrom_buffer, 1432 ha->optrom_region_start, ha->optrom_region_size); 1433 1434 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 1435 bsg_job->reply_payload.sg_cnt, ha->optrom_buffer, 1436 ha->optrom_region_size); 1437 1438 bsg_job->reply->reply_payload_rcv_len = ha->optrom_region_size; 1439 bsg_job->reply->result = DID_OK; 1440 vfree(ha->optrom_buffer); 1441 ha->optrom_buffer = NULL; 1442 ha->optrom_state = QLA_SWAITING; 1443 bsg_job->job_done(bsg_job); 1444 return rval; 1445 } 1446 1447 static int 1448 qla2x00_update_optrom(struct fc_bsg_job *bsg_job) 1449 { 1450 struct Scsi_Host *host = bsg_job->shost; 1451 scsi_qla_host_t *vha = shost_priv(host); 1452 struct qla_hw_data *ha = vha->hw; 1453 int rval = 0; 1454 1455 rval = qla2x00_optrom_setup(bsg_job, vha, 1); 1456 if (rval) 1457 return rval; 1458 1459 /* Set the isp82xx_no_md_cap not to capture minidump */ 1460 ha->flags.isp82xx_no_md_cap = 1; 1461 1462 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1463 bsg_job->request_payload.sg_cnt, ha->optrom_buffer, 1464 ha->optrom_region_size); 1465 1466 ha->isp_ops->write_optrom(vha, ha->optrom_buffer, 1467 ha->optrom_region_start, ha->optrom_region_size); 1468 1469 bsg_job->reply->result = DID_OK; 1470 vfree(ha->optrom_buffer); 1471 ha->optrom_buffer = NULL; 1472 ha->optrom_state = QLA_SWAITING; 1473 bsg_job->job_done(bsg_job); 1474 return rval; 1475 } 1476 1477 static int 1478 qla2x00_update_fru_versions(struct fc_bsg_job *bsg_job) 1479 { 1480 struct Scsi_Host *host = bsg_job->shost; 1481 scsi_qla_host_t *vha = shost_priv(host); 1482 struct qla_hw_data *ha = vha->hw; 1483 int rval = 0; 1484 uint8_t bsg[DMA_POOL_SIZE]; 1485 struct qla_image_version_list *list = (void *)bsg; 1486 struct qla_image_version *image; 1487 uint32_t count; 1488 dma_addr_t sfp_dma; 1489 void *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); 1490 if (!sfp) { 1491 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 1492 EXT_STATUS_NO_MEMORY; 1493 goto done; 1494 } 1495 1496 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1497 bsg_job->request_payload.sg_cnt, list, sizeof(bsg)); 1498 1499 image = list->version; 1500 count = list->count; 1501 while (count--) { 1502 memcpy(sfp, &image->field_info, sizeof(image->field_info)); 1503 rval = qla2x00_write_sfp(vha, sfp_dma, sfp, 1504 image->field_address.device, image->field_address.offset, 1505 sizeof(image->field_info), image->field_address.option); 1506 if (rval) { 1507 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 1508 EXT_STATUS_MAILBOX; 1509 goto dealloc; 1510 } 1511 image++; 1512 } 1513 1514 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0; 1515 1516 dealloc: 1517 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); 1518 1519 done: 1520 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1521 bsg_job->reply->result = DID_OK << 16; 1522 bsg_job->job_done(bsg_job); 1523 1524 return 0; 1525 } 1526 1527 static int 1528 qla2x00_read_fru_status(struct fc_bsg_job *bsg_job) 1529 { 1530 struct Scsi_Host *host = bsg_job->shost; 1531 scsi_qla_host_t *vha = shost_priv(host); 1532 struct qla_hw_data *ha = vha->hw; 1533 int rval = 0; 1534 uint8_t bsg[DMA_POOL_SIZE]; 1535 struct qla_status_reg *sr = (void *)bsg; 1536 dma_addr_t sfp_dma; 1537 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); 1538 if (!sfp) { 1539 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 1540 EXT_STATUS_NO_MEMORY; 1541 goto done; 1542 } 1543 1544 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1545 bsg_job->request_payload.sg_cnt, sr, sizeof(*sr)); 1546 1547 rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 1548 sr->field_address.device, sr->field_address.offset, 1549 sizeof(sr->status_reg), sr->field_address.option); 1550 sr->status_reg = *sfp; 1551 1552 if (rval) { 1553 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 1554 EXT_STATUS_MAILBOX; 1555 goto dealloc; 1556 } 1557 1558 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 1559 bsg_job->reply_payload.sg_cnt, sr, sizeof(*sr)); 1560 1561 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0; 1562 1563 dealloc: 1564 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); 1565 1566 done: 1567 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1568 bsg_job->reply->reply_payload_rcv_len = sizeof(*sr); 1569 bsg_job->reply->result = DID_OK << 16; 1570 bsg_job->job_done(bsg_job); 1571 1572 return 0; 1573 } 1574 1575 static int 1576 qla2x00_write_fru_status(struct fc_bsg_job *bsg_job) 1577 { 1578 struct Scsi_Host *host = bsg_job->shost; 1579 scsi_qla_host_t *vha = shost_priv(host); 1580 struct qla_hw_data *ha = vha->hw; 1581 int rval = 0; 1582 uint8_t bsg[DMA_POOL_SIZE]; 1583 struct qla_status_reg *sr = (void *)bsg; 1584 dma_addr_t sfp_dma; 1585 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); 1586 if (!sfp) { 1587 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 1588 EXT_STATUS_NO_MEMORY; 1589 goto done; 1590 } 1591 1592 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1593 bsg_job->request_payload.sg_cnt, sr, sizeof(*sr)); 1594 1595 *sfp = sr->status_reg; 1596 rval = qla2x00_write_sfp(vha, sfp_dma, sfp, 1597 sr->field_address.device, sr->field_address.offset, 1598 sizeof(sr->status_reg), sr->field_address.option); 1599 1600 if (rval) { 1601 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 1602 EXT_STATUS_MAILBOX; 1603 goto dealloc; 1604 } 1605 1606 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0; 1607 1608 dealloc: 1609 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); 1610 1611 done: 1612 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1613 bsg_job->reply->result = DID_OK << 16; 1614 bsg_job->job_done(bsg_job); 1615 1616 return 0; 1617 } 1618 1619 static int 1620 qla2x00_write_i2c(struct fc_bsg_job *bsg_job) 1621 { 1622 struct Scsi_Host *host = bsg_job->shost; 1623 scsi_qla_host_t *vha = shost_priv(host); 1624 struct qla_hw_data *ha = vha->hw; 1625 int rval = 0; 1626 uint8_t bsg[DMA_POOL_SIZE]; 1627 struct qla_i2c_access *i2c = (void *)bsg; 1628 dma_addr_t sfp_dma; 1629 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); 1630 if (!sfp) { 1631 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 1632 EXT_STATUS_NO_MEMORY; 1633 goto done; 1634 } 1635 1636 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1637 bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c)); 1638 1639 memcpy(sfp, i2c->buffer, i2c->length); 1640 rval = qla2x00_write_sfp(vha, sfp_dma, sfp, 1641 i2c->device, i2c->offset, i2c->length, i2c->option); 1642 1643 if (rval) { 1644 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 1645 EXT_STATUS_MAILBOX; 1646 goto dealloc; 1647 } 1648 1649 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0; 1650 1651 dealloc: 1652 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); 1653 1654 done: 1655 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1656 bsg_job->reply->result = DID_OK << 16; 1657 bsg_job->job_done(bsg_job); 1658 1659 return 0; 1660 } 1661 1662 static int 1663 qla2x00_read_i2c(struct fc_bsg_job *bsg_job) 1664 { 1665 struct Scsi_Host *host = bsg_job->shost; 1666 scsi_qla_host_t *vha = shost_priv(host); 1667 struct qla_hw_data *ha = vha->hw; 1668 int rval = 0; 1669 uint8_t bsg[DMA_POOL_SIZE]; 1670 struct qla_i2c_access *i2c = (void *)bsg; 1671 dma_addr_t sfp_dma; 1672 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); 1673 if (!sfp) { 1674 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 1675 EXT_STATUS_NO_MEMORY; 1676 goto done; 1677 } 1678 1679 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1680 bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c)); 1681 1682 rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 1683 i2c->device, i2c->offset, i2c->length, i2c->option); 1684 1685 if (rval) { 1686 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 1687 EXT_STATUS_MAILBOX; 1688 goto dealloc; 1689 } 1690 1691 memcpy(i2c->buffer, sfp, i2c->length); 1692 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 1693 bsg_job->reply_payload.sg_cnt, i2c, sizeof(*i2c)); 1694 1695 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0; 1696 1697 dealloc: 1698 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); 1699 1700 done: 1701 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1702 bsg_job->reply->reply_payload_rcv_len = sizeof(*i2c); 1703 bsg_job->reply->result = DID_OK << 16; 1704 bsg_job->job_done(bsg_job); 1705 1706 return 0; 1707 } 1708 1709 static int 1710 qla24xx_process_bidir_cmd(struct fc_bsg_job *bsg_job) 1711 { 1712 struct Scsi_Host *host = bsg_job->shost; 1713 scsi_qla_host_t *vha = shost_priv(host); 1714 struct qla_hw_data *ha = vha->hw; 1715 uint16_t thread_id; 1716 uint32_t rval = EXT_STATUS_OK; 1717 uint16_t req_sg_cnt = 0; 1718 uint16_t rsp_sg_cnt = 0; 1719 uint16_t nextlid = 0; 1720 uint32_t tot_dsds; 1721 srb_t *sp = NULL; 1722 uint32_t req_data_len = 0; 1723 uint32_t rsp_data_len = 0; 1724 1725 /* Check the type of the adapter */ 1726 if (!IS_BIDI_CAPABLE(ha)) { 1727 ql_log(ql_log_warn, vha, 0x70a0, 1728 "This adapter is not supported\n"); 1729 rval = EXT_STATUS_NOT_SUPPORTED; 1730 goto done; 1731 } 1732 1733 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) || 1734 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) || 1735 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { 1736 rval = EXT_STATUS_BUSY; 1737 goto done; 1738 } 1739 1740 /* Check if host is online */ 1741 if (!vha->flags.online) { 1742 ql_log(ql_log_warn, vha, 0x70a1, 1743 "Host is not online\n"); 1744 rval = EXT_STATUS_DEVICE_OFFLINE; 1745 goto done; 1746 } 1747 1748 /* Check if cable is plugged in or not */ 1749 if (vha->device_flags & DFLG_NO_CABLE) { 1750 ql_log(ql_log_warn, vha, 0x70a2, 1751 "Cable is unplugged...\n"); 1752 rval = EXT_STATUS_INVALID_CFG; 1753 goto done; 1754 } 1755 1756 /* Check if the switch is connected or not */ 1757 if (ha->current_topology != ISP_CFG_F) { 1758 ql_log(ql_log_warn, vha, 0x70a3, 1759 "Host is not connected to the switch\n"); 1760 rval = EXT_STATUS_INVALID_CFG; 1761 goto done; 1762 } 1763 1764 /* Check if operating mode is P2P */ 1765 if (ha->operating_mode != P2P) { 1766 ql_log(ql_log_warn, vha, 0x70a4, 1767 "Host is operating mode is not P2p\n"); 1768 rval = EXT_STATUS_INVALID_CFG; 1769 goto done; 1770 } 1771 1772 thread_id = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; 1773 1774 mutex_lock(&ha->selflogin_lock); 1775 if (vha->self_login_loop_id == 0) { 1776 /* Initialize all required fields of fcport */ 1777 vha->bidir_fcport.vha = vha; 1778 vha->bidir_fcport.d_id.b.al_pa = vha->d_id.b.al_pa; 1779 vha->bidir_fcport.d_id.b.area = vha->d_id.b.area; 1780 vha->bidir_fcport.d_id.b.domain = vha->d_id.b.domain; 1781 vha->bidir_fcport.loop_id = vha->loop_id; 1782 1783 if (qla2x00_fabric_login(vha, &(vha->bidir_fcport), &nextlid)) { 1784 ql_log(ql_log_warn, vha, 0x70a7, 1785 "Failed to login port %06X for bidirectional IOCB\n", 1786 vha->bidir_fcport.d_id.b24); 1787 mutex_unlock(&ha->selflogin_lock); 1788 rval = EXT_STATUS_MAILBOX; 1789 goto done; 1790 } 1791 vha->self_login_loop_id = nextlid - 1; 1792 1793 } 1794 /* Assign the self login loop id to fcport */ 1795 mutex_unlock(&ha->selflogin_lock); 1796 1797 vha->bidir_fcport.loop_id = vha->self_login_loop_id; 1798 1799 req_sg_cnt = dma_map_sg(&ha->pdev->dev, 1800 bsg_job->request_payload.sg_list, 1801 bsg_job->request_payload.sg_cnt, 1802 DMA_TO_DEVICE); 1803 1804 if (!req_sg_cnt) { 1805 rval = EXT_STATUS_NO_MEMORY; 1806 goto done; 1807 } 1808 1809 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, 1810 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, 1811 DMA_FROM_DEVICE); 1812 1813 if (!rsp_sg_cnt) { 1814 rval = EXT_STATUS_NO_MEMORY; 1815 goto done_unmap_req_sg; 1816 } 1817 1818 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) || 1819 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) { 1820 ql_dbg(ql_dbg_user, vha, 0x70a9, 1821 "Dma mapping resulted in different sg counts " 1822 "[request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt: " 1823 "%x dma_reply_sg_cnt: %x]\n", 1824 bsg_job->request_payload.sg_cnt, req_sg_cnt, 1825 bsg_job->reply_payload.sg_cnt, rsp_sg_cnt); 1826 rval = EXT_STATUS_NO_MEMORY; 1827 goto done_unmap_sg; 1828 } 1829 1830 if (req_data_len != rsp_data_len) { 1831 rval = EXT_STATUS_BUSY; 1832 ql_log(ql_log_warn, vha, 0x70aa, 1833 "req_data_len != rsp_data_len\n"); 1834 goto done_unmap_sg; 1835 } 1836 1837 req_data_len = bsg_job->request_payload.payload_len; 1838 rsp_data_len = bsg_job->reply_payload.payload_len; 1839 1840 1841 /* Alloc SRB structure */ 1842 sp = qla2x00_get_sp(vha, &(vha->bidir_fcport), GFP_KERNEL); 1843 if (!sp) { 1844 ql_dbg(ql_dbg_user, vha, 0x70ac, 1845 "Alloc SRB structure failed\n"); 1846 rval = EXT_STATUS_NO_MEMORY; 1847 goto done_unmap_sg; 1848 } 1849 1850 /*Populate srb->ctx with bidir ctx*/ 1851 sp->u.bsg_job = bsg_job; 1852 sp->free = qla2x00_bsg_sp_free; 1853 sp->type = SRB_BIDI_CMD; 1854 sp->done = qla2x00_bsg_job_done; 1855 1856 /* Add the read and write sg count */ 1857 tot_dsds = rsp_sg_cnt + req_sg_cnt; 1858 1859 rval = qla2x00_start_bidir(sp, vha, tot_dsds); 1860 if (rval != EXT_STATUS_OK) 1861 goto done_free_srb; 1862 /* the bsg request will be completed in the interrupt handler */ 1863 return rval; 1864 1865 done_free_srb: 1866 mempool_free(sp, ha->srb_mempool); 1867 done_unmap_sg: 1868 dma_unmap_sg(&ha->pdev->dev, 1869 bsg_job->reply_payload.sg_list, 1870 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 1871 done_unmap_req_sg: 1872 dma_unmap_sg(&ha->pdev->dev, 1873 bsg_job->request_payload.sg_list, 1874 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 1875 done: 1876 1877 /* Return an error vendor specific response 1878 * and complete the bsg request 1879 */ 1880 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = rval; 1881 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1882 bsg_job->reply->reply_payload_rcv_len = 0; 1883 bsg_job->reply->result = (DID_OK) << 16; 1884 bsg_job->job_done(bsg_job); 1885 /* Always retrun success, vendor rsp carries correct status */ 1886 return 0; 1887 } 1888 1889 static int 1890 qlafx00_mgmt_cmd(struct fc_bsg_job *bsg_job) 1891 { 1892 struct Scsi_Host *host = bsg_job->shost; 1893 scsi_qla_host_t *vha = shost_priv(host); 1894 struct qla_hw_data *ha = vha->hw; 1895 int rval = (DRIVER_ERROR << 16); 1896 struct qla_mt_iocb_rqst_fx00 *piocb_rqst; 1897 srb_t *sp; 1898 int req_sg_cnt = 0, rsp_sg_cnt = 0; 1899 struct fc_port *fcport; 1900 char *type = "FC_BSG_HST_FX_MGMT"; 1901 1902 /* Copy the IOCB specific information */ 1903 piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *) 1904 &bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; 1905 1906 /* Dump the vendor information */ 1907 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose , vha, 0x70cf, 1908 (uint8_t *)piocb_rqst, sizeof(struct qla_mt_iocb_rqst_fx00)); 1909 1910 if (!vha->flags.online) { 1911 ql_log(ql_log_warn, vha, 0x70d0, 1912 "Host is not online.\n"); 1913 rval = -EIO; 1914 goto done; 1915 } 1916 1917 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) { 1918 req_sg_cnt = dma_map_sg(&ha->pdev->dev, 1919 bsg_job->request_payload.sg_list, 1920 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 1921 if (!req_sg_cnt) { 1922 ql_log(ql_log_warn, vha, 0x70c7, 1923 "dma_map_sg return %d for request\n", req_sg_cnt); 1924 rval = -ENOMEM; 1925 goto done; 1926 } 1927 } 1928 1929 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) { 1930 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, 1931 bsg_job->reply_payload.sg_list, 1932 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 1933 if (!rsp_sg_cnt) { 1934 ql_log(ql_log_warn, vha, 0x70c8, 1935 "dma_map_sg return %d for reply\n", rsp_sg_cnt); 1936 rval = -ENOMEM; 1937 goto done_unmap_req_sg; 1938 } 1939 } 1940 1941 ql_dbg(ql_dbg_user, vha, 0x70c9, 1942 "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x " 1943 "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt, 1944 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt); 1945 1946 /* Allocate a dummy fcport structure, since functions preparing the 1947 * IOCB and mailbox command retrieves port specific information 1948 * from fcport structure. For Host based ELS commands there will be 1949 * no fcport structure allocated 1950 */ 1951 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 1952 if (!fcport) { 1953 ql_log(ql_log_warn, vha, 0x70ca, 1954 "Failed to allocate fcport.\n"); 1955 rval = -ENOMEM; 1956 goto done_unmap_rsp_sg; 1957 } 1958 1959 /* Alloc SRB structure */ 1960 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 1961 if (!sp) { 1962 ql_log(ql_log_warn, vha, 0x70cb, 1963 "qla2x00_get_sp failed.\n"); 1964 rval = -ENOMEM; 1965 goto done_free_fcport; 1966 } 1967 1968 /* Initialize all required fields of fcport */ 1969 fcport->vha = vha; 1970 fcport->loop_id = piocb_rqst->dataword; 1971 1972 sp->type = SRB_FXIOCB_BCMD; 1973 sp->name = "bsg_fx_mgmt"; 1974 sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt); 1975 sp->u.bsg_job = bsg_job; 1976 sp->free = qla2x00_bsg_sp_free; 1977 sp->done = qla2x00_bsg_job_done; 1978 1979 ql_dbg(ql_dbg_user, vha, 0x70cc, 1980 "bsg rqst type: %s fx_mgmt_type: %x id=%x\n", 1981 type, piocb_rqst->func_type, fcport->loop_id); 1982 1983 rval = qla2x00_start_sp(sp); 1984 if (rval != QLA_SUCCESS) { 1985 ql_log(ql_log_warn, vha, 0x70cd, 1986 "qla2x00_start_sp failed=%d.\n", rval); 1987 mempool_free(sp, ha->srb_mempool); 1988 rval = -EIO; 1989 goto done_free_fcport; 1990 } 1991 return rval; 1992 1993 done_free_fcport: 1994 kfree(fcport); 1995 1996 done_unmap_rsp_sg: 1997 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) 1998 dma_unmap_sg(&ha->pdev->dev, 1999 bsg_job->reply_payload.sg_list, 2000 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 2001 done_unmap_req_sg: 2002 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) 2003 dma_unmap_sg(&ha->pdev->dev, 2004 bsg_job->request_payload.sg_list, 2005 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 2006 2007 done: 2008 return rval; 2009 } 2010 2011 static int 2012 qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job) 2013 { 2014 switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) { 2015 case QL_VND_LOOPBACK: 2016 return qla2x00_process_loopback(bsg_job); 2017 2018 case QL_VND_A84_RESET: 2019 return qla84xx_reset(bsg_job); 2020 2021 case QL_VND_A84_UPDATE_FW: 2022 return qla84xx_updatefw(bsg_job); 2023 2024 case QL_VND_A84_MGMT_CMD: 2025 return qla84xx_mgmt_cmd(bsg_job); 2026 2027 case QL_VND_IIDMA: 2028 return qla24xx_iidma(bsg_job); 2029 2030 case QL_VND_FCP_PRIO_CFG_CMD: 2031 return qla24xx_proc_fcp_prio_cfg_cmd(bsg_job); 2032 2033 case QL_VND_READ_FLASH: 2034 return qla2x00_read_optrom(bsg_job); 2035 2036 case QL_VND_UPDATE_FLASH: 2037 return qla2x00_update_optrom(bsg_job); 2038 2039 case QL_VND_SET_FRU_VERSION: 2040 return qla2x00_update_fru_versions(bsg_job); 2041 2042 case QL_VND_READ_FRU_STATUS: 2043 return qla2x00_read_fru_status(bsg_job); 2044 2045 case QL_VND_WRITE_FRU_STATUS: 2046 return qla2x00_write_fru_status(bsg_job); 2047 2048 case QL_VND_WRITE_I2C: 2049 return qla2x00_write_i2c(bsg_job); 2050 2051 case QL_VND_READ_I2C: 2052 return qla2x00_read_i2c(bsg_job); 2053 2054 case QL_VND_DIAG_IO_CMD: 2055 return qla24xx_process_bidir_cmd(bsg_job); 2056 2057 case QL_VND_FX00_MGMT_CMD: 2058 return qlafx00_mgmt_cmd(bsg_job); 2059 default: 2060 return -ENOSYS; 2061 } 2062 } 2063 2064 int 2065 qla24xx_bsg_request(struct fc_bsg_job *bsg_job) 2066 { 2067 int ret = -EINVAL; 2068 struct fc_rport *rport; 2069 fc_port_t *fcport = NULL; 2070 struct Scsi_Host *host; 2071 scsi_qla_host_t *vha; 2072 2073 /* In case no data transferred. */ 2074 bsg_job->reply->reply_payload_rcv_len = 0; 2075 2076 if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) { 2077 rport = bsg_job->rport; 2078 fcport = *(fc_port_t **) rport->dd_data; 2079 host = rport_to_shost(rport); 2080 vha = shost_priv(host); 2081 } else { 2082 host = bsg_job->shost; 2083 vha = shost_priv(host); 2084 } 2085 2086 if (qla2x00_reset_active(vha)) { 2087 ql_dbg(ql_dbg_user, vha, 0x709f, 2088 "BSG: ISP abort active/needed -- cmd=%d.\n", 2089 bsg_job->request->msgcode); 2090 return -EBUSY; 2091 } 2092 2093 ql_dbg(ql_dbg_user, vha, 0x7000, 2094 "Entered %s msgcode=0x%x.\n", __func__, bsg_job->request->msgcode); 2095 2096 switch (bsg_job->request->msgcode) { 2097 case FC_BSG_RPT_ELS: 2098 case FC_BSG_HST_ELS_NOLOGIN: 2099 ret = qla2x00_process_els(bsg_job); 2100 break; 2101 case FC_BSG_HST_CT: 2102 ret = qla2x00_process_ct(bsg_job); 2103 break; 2104 case FC_BSG_HST_VENDOR: 2105 ret = qla2x00_process_vendor_specific(bsg_job); 2106 break; 2107 case FC_BSG_HST_ADD_RPORT: 2108 case FC_BSG_HST_DEL_RPORT: 2109 case FC_BSG_RPT_CT: 2110 default: 2111 ql_log(ql_log_warn, vha, 0x705a, "Unsupported BSG request.\n"); 2112 break; 2113 } 2114 return ret; 2115 } 2116 2117 int 2118 qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job) 2119 { 2120 scsi_qla_host_t *vha = shost_priv(bsg_job->shost); 2121 struct qla_hw_data *ha = vha->hw; 2122 srb_t *sp; 2123 int cnt, que; 2124 unsigned long flags; 2125 struct req_que *req; 2126 2127 /* find the bsg job from the active list of commands */ 2128 spin_lock_irqsave(&ha->hardware_lock, flags); 2129 for (que = 0; que < ha->max_req_queues; que++) { 2130 req = ha->req_q_map[que]; 2131 if (!req) 2132 continue; 2133 2134 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { 2135 sp = req->outstanding_cmds[cnt]; 2136 if (sp) { 2137 if (((sp->type == SRB_CT_CMD) || 2138 (sp->type == SRB_ELS_CMD_HST) || 2139 (sp->type == SRB_FXIOCB_BCMD)) 2140 && (sp->u.bsg_job == bsg_job)) { 2141 req->outstanding_cmds[cnt] = NULL; 2142 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2143 if (ha->isp_ops->abort_command(sp)) { 2144 ql_log(ql_log_warn, vha, 0x7089, 2145 "mbx abort_command " 2146 "failed.\n"); 2147 bsg_job->req->errors = 2148 bsg_job->reply->result = -EIO; 2149 } else { 2150 ql_dbg(ql_dbg_user, vha, 0x708a, 2151 "mbx abort_command " 2152 "success.\n"); 2153 bsg_job->req->errors = 2154 bsg_job->reply->result = 0; 2155 } 2156 spin_lock_irqsave(&ha->hardware_lock, flags); 2157 goto done; 2158 } 2159 } 2160 } 2161 } 2162 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2163 ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n"); 2164 bsg_job->req->errors = bsg_job->reply->result = -ENXIO; 2165 return 0; 2166 2167 done: 2168 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2169 sp->free(vha, sp); 2170 return 0; 2171 } 2172