1 /* 2 * QLogic Fibre Channel HBA Driver 3 * Copyright (c) 2003-2012 QLogic Corporation 4 * 5 * See LICENSE.qla2xxx for copyright and licensing details. 6 */ 7 #include "qla_def.h" 8 9 #include <linux/kthread.h> 10 #include <linux/vmalloc.h> 11 #include <linux/delay.h> 12 13 /* BSG support for ELS/CT pass through */ 14 void 15 qla2x00_bsg_job_done(void *data, void *ptr, int res) 16 { 17 srb_t *sp = (srb_t *)ptr; 18 struct scsi_qla_host *vha = (scsi_qla_host_t *)data; 19 struct fc_bsg_job *bsg_job = sp->u.bsg_job; 20 21 bsg_job->reply->result = res; 22 bsg_job->job_done(bsg_job); 23 sp->free(vha, sp); 24 } 25 26 void 27 qla2x00_bsg_sp_free(void *data, void *ptr) 28 { 29 srb_t *sp = (srb_t *)ptr; 30 struct scsi_qla_host *vha = sp->fcport->vha; 31 struct fc_bsg_job *bsg_job = sp->u.bsg_job; 32 struct qla_hw_data *ha = vha->hw; 33 struct qla_mt_iocb_rqst_fx00 *piocb_rqst; 34 35 if (sp->type == SRB_FXIOCB_BCMD) { 36 piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *) 37 &bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; 38 39 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) 40 dma_unmap_sg(&ha->pdev->dev, 41 bsg_job->request_payload.sg_list, 42 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 43 44 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) 45 dma_unmap_sg(&ha->pdev->dev, 46 bsg_job->reply_payload.sg_list, 47 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 48 } else { 49 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 50 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 51 52 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 53 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 54 } 55 56 if (sp->type == SRB_CT_CMD || 57 sp->type == SRB_FXIOCB_BCMD || 58 sp->type == SRB_ELS_CMD_HST) 59 kfree(sp->fcport); 60 qla2x00_rel_sp(vha, sp); 61 } 62 63 int 64 qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *vha, 65 struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag) 66 { 67 int i, ret, num_valid; 68 uint8_t *bcode; 69 struct qla_fcp_prio_entry *pri_entry; 70 uint32_t *bcode_val_ptr, bcode_val; 71 72 ret = 1; 73 num_valid = 0; 74 bcode = (uint8_t *)pri_cfg; 75 bcode_val_ptr = (uint32_t *)pri_cfg; 76 bcode_val = (uint32_t)(*bcode_val_ptr); 77 78 if (bcode_val == 0xFFFFFFFF) { 79 /* No FCP Priority config data in flash */ 80 ql_dbg(ql_dbg_user, vha, 0x7051, 81 "No FCP Priority config data.\n"); 82 return 0; 83 } 84 85 if (bcode[0] != 'H' || bcode[1] != 'Q' || bcode[2] != 'O' || 86 bcode[3] != 'S') { 87 /* Invalid FCP priority data header*/ 88 ql_dbg(ql_dbg_user, vha, 0x7052, 89 "Invalid FCP Priority data header. bcode=0x%x.\n", 90 bcode_val); 91 return 0; 92 } 93 if (flag != 1) 94 return ret; 95 96 pri_entry = &pri_cfg->entry[0]; 97 for (i = 0; i < pri_cfg->num_entries; i++) { 98 if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID) 99 num_valid++; 100 pri_entry++; 101 } 102 103 if (num_valid == 0) { 104 /* No valid FCP priority data entries */ 105 ql_dbg(ql_dbg_user, vha, 0x7053, 106 "No valid FCP Priority data entries.\n"); 107 ret = 0; 108 } else { 109 /* FCP priority data is valid */ 110 ql_dbg(ql_dbg_user, vha, 0x7054, 111 "Valid FCP priority data. num entries = %d.\n", 112 num_valid); 113 } 114 115 return ret; 116 } 117 118 static int 119 qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job) 120 { 121 struct Scsi_Host *host = bsg_job->shost; 122 scsi_qla_host_t *vha = shost_priv(host); 123 struct qla_hw_data *ha = vha->hw; 124 int ret = 0; 125 uint32_t len; 126 uint32_t oper; 127 128 if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_P3P_TYPE(ha))) { 129 ret = -EINVAL; 130 goto exit_fcp_prio_cfg; 131 } 132 133 /* Get the sub command */ 134 oper = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; 135 136 /* Only set config is allowed if config memory is not allocated */ 137 if (!ha->fcp_prio_cfg && (oper != QLFC_FCP_PRIO_SET_CONFIG)) { 138 ret = -EINVAL; 139 goto exit_fcp_prio_cfg; 140 } 141 switch (oper) { 142 case QLFC_FCP_PRIO_DISABLE: 143 if (ha->flags.fcp_prio_enabled) { 144 ha->flags.fcp_prio_enabled = 0; 145 ha->fcp_prio_cfg->attributes &= 146 ~FCP_PRIO_ATTR_ENABLE; 147 qla24xx_update_all_fcp_prio(vha); 148 bsg_job->reply->result = DID_OK; 149 } else { 150 ret = -EINVAL; 151 bsg_job->reply->result = (DID_ERROR << 16); 152 goto exit_fcp_prio_cfg; 153 } 154 break; 155 156 case QLFC_FCP_PRIO_ENABLE: 157 if (!ha->flags.fcp_prio_enabled) { 158 if (ha->fcp_prio_cfg) { 159 ha->flags.fcp_prio_enabled = 1; 160 ha->fcp_prio_cfg->attributes |= 161 FCP_PRIO_ATTR_ENABLE; 162 qla24xx_update_all_fcp_prio(vha); 163 bsg_job->reply->result = DID_OK; 164 } else { 165 ret = -EINVAL; 166 bsg_job->reply->result = (DID_ERROR << 16); 167 goto exit_fcp_prio_cfg; 168 } 169 } 170 break; 171 172 case QLFC_FCP_PRIO_GET_CONFIG: 173 len = bsg_job->reply_payload.payload_len; 174 if (!len || len > FCP_PRIO_CFG_SIZE) { 175 ret = -EINVAL; 176 bsg_job->reply->result = (DID_ERROR << 16); 177 goto exit_fcp_prio_cfg; 178 } 179 180 bsg_job->reply->result = DID_OK; 181 bsg_job->reply->reply_payload_rcv_len = 182 sg_copy_from_buffer( 183 bsg_job->reply_payload.sg_list, 184 bsg_job->reply_payload.sg_cnt, ha->fcp_prio_cfg, 185 len); 186 187 break; 188 189 case QLFC_FCP_PRIO_SET_CONFIG: 190 len = bsg_job->request_payload.payload_len; 191 if (!len || len > FCP_PRIO_CFG_SIZE) { 192 bsg_job->reply->result = (DID_ERROR << 16); 193 ret = -EINVAL; 194 goto exit_fcp_prio_cfg; 195 } 196 197 if (!ha->fcp_prio_cfg) { 198 ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE); 199 if (!ha->fcp_prio_cfg) { 200 ql_log(ql_log_warn, vha, 0x7050, 201 "Unable to allocate memory for fcp prio " 202 "config data (%x).\n", FCP_PRIO_CFG_SIZE); 203 bsg_job->reply->result = (DID_ERROR << 16); 204 ret = -ENOMEM; 205 goto exit_fcp_prio_cfg; 206 } 207 } 208 209 memset(ha->fcp_prio_cfg, 0, FCP_PRIO_CFG_SIZE); 210 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 211 bsg_job->request_payload.sg_cnt, ha->fcp_prio_cfg, 212 FCP_PRIO_CFG_SIZE); 213 214 /* validate fcp priority data */ 215 216 if (!qla24xx_fcp_prio_cfg_valid(vha, 217 (struct qla_fcp_prio_cfg *) ha->fcp_prio_cfg, 1)) { 218 bsg_job->reply->result = (DID_ERROR << 16); 219 ret = -EINVAL; 220 /* If buffer was invalidatic int 221 * fcp_prio_cfg is of no use 222 */ 223 vfree(ha->fcp_prio_cfg); 224 ha->fcp_prio_cfg = NULL; 225 goto exit_fcp_prio_cfg; 226 } 227 228 ha->flags.fcp_prio_enabled = 0; 229 if (ha->fcp_prio_cfg->attributes & FCP_PRIO_ATTR_ENABLE) 230 ha->flags.fcp_prio_enabled = 1; 231 qla24xx_update_all_fcp_prio(vha); 232 bsg_job->reply->result = DID_OK; 233 break; 234 default: 235 ret = -EINVAL; 236 break; 237 } 238 exit_fcp_prio_cfg: 239 if (!ret) 240 bsg_job->job_done(bsg_job); 241 return ret; 242 } 243 244 static int 245 qla2x00_process_els(struct fc_bsg_job *bsg_job) 246 { 247 struct fc_rport *rport; 248 fc_port_t *fcport = NULL; 249 struct Scsi_Host *host; 250 scsi_qla_host_t *vha; 251 struct qla_hw_data *ha; 252 srb_t *sp; 253 const char *type; 254 int req_sg_cnt, rsp_sg_cnt; 255 int rval = (DRIVER_ERROR << 16); 256 uint16_t nextlid = 0; 257 258 if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) { 259 rport = bsg_job->rport; 260 fcport = *(fc_port_t **) rport->dd_data; 261 host = rport_to_shost(rport); 262 vha = shost_priv(host); 263 ha = vha->hw; 264 type = "FC_BSG_RPT_ELS"; 265 } else { 266 host = bsg_job->shost; 267 vha = shost_priv(host); 268 ha = vha->hw; 269 type = "FC_BSG_HST_ELS_NOLOGIN"; 270 } 271 272 if (!vha->flags.online) { 273 ql_log(ql_log_warn, vha, 0x7005, "Host not online.\n"); 274 rval = -EIO; 275 goto done; 276 } 277 278 /* pass through is supported only for ISP 4Gb or higher */ 279 if (!IS_FWI2_CAPABLE(ha)) { 280 ql_dbg(ql_dbg_user, vha, 0x7001, 281 "ELS passthru not supported for ISP23xx based adapters.\n"); 282 rval = -EPERM; 283 goto done; 284 } 285 286 /* Multiple SG's are not supported for ELS requests */ 287 if (bsg_job->request_payload.sg_cnt > 1 || 288 bsg_job->reply_payload.sg_cnt > 1) { 289 ql_dbg(ql_dbg_user, vha, 0x7002, 290 "Multiple SG's are not suppored for ELS requests, " 291 "request_sg_cnt=%x reply_sg_cnt=%x.\n", 292 bsg_job->request_payload.sg_cnt, 293 bsg_job->reply_payload.sg_cnt); 294 rval = -EPERM; 295 goto done; 296 } 297 298 /* ELS request for rport */ 299 if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) { 300 /* make sure the rport is logged in, 301 * if not perform fabric login 302 */ 303 if (qla2x00_fabric_login(vha, fcport, &nextlid)) { 304 ql_dbg(ql_dbg_user, vha, 0x7003, 305 "Failed to login port %06X for ELS passthru.\n", 306 fcport->d_id.b24); 307 rval = -EIO; 308 goto done; 309 } 310 } else { 311 /* Allocate a dummy fcport structure, since functions 312 * preparing the IOCB and mailbox command retrieves port 313 * specific information from fcport structure. For Host based 314 * ELS commands there will be no fcport structure allocated 315 */ 316 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 317 if (!fcport) { 318 rval = -ENOMEM; 319 goto done; 320 } 321 322 /* Initialize all required fields of fcport */ 323 fcport->vha = vha; 324 fcport->d_id.b.al_pa = 325 bsg_job->request->rqst_data.h_els.port_id[0]; 326 fcport->d_id.b.area = 327 bsg_job->request->rqst_data.h_els.port_id[1]; 328 fcport->d_id.b.domain = 329 bsg_job->request->rqst_data.h_els.port_id[2]; 330 fcport->loop_id = 331 (fcport->d_id.b.al_pa == 0xFD) ? 332 NPH_FABRIC_CONTROLLER : NPH_F_PORT; 333 } 334 335 req_sg_cnt = 336 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 337 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 338 if (!req_sg_cnt) { 339 rval = -ENOMEM; 340 goto done_free_fcport; 341 } 342 343 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 344 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 345 if (!rsp_sg_cnt) { 346 rval = -ENOMEM; 347 goto done_free_fcport; 348 } 349 350 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) || 351 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) { 352 ql_log(ql_log_warn, vha, 0x7008, 353 "dma mapping resulted in different sg counts, " 354 "request_sg_cnt: %x dma_request_sg_cnt:%x reply_sg_cnt:%x " 355 "dma_reply_sg_cnt:%x.\n", bsg_job->request_payload.sg_cnt, 356 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt); 357 rval = -EAGAIN; 358 goto done_unmap_sg; 359 } 360 361 /* Alloc SRB structure */ 362 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 363 if (!sp) { 364 rval = -ENOMEM; 365 goto done_unmap_sg; 366 } 367 368 sp->type = 369 (bsg_job->request->msgcode == FC_BSG_RPT_ELS ? 370 SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST); 371 sp->name = 372 (bsg_job->request->msgcode == FC_BSG_RPT_ELS ? 373 "bsg_els_rpt" : "bsg_els_hst"); 374 sp->u.bsg_job = bsg_job; 375 sp->free = qla2x00_bsg_sp_free; 376 sp->done = qla2x00_bsg_job_done; 377 378 ql_dbg(ql_dbg_user, vha, 0x700a, 379 "bsg rqst type: %s els type: %x - loop-id=%x " 380 "portid=%-2x%02x%02x.\n", type, 381 bsg_job->request->rqst_data.h_els.command_code, fcport->loop_id, 382 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); 383 384 rval = qla2x00_start_sp(sp); 385 if (rval != QLA_SUCCESS) { 386 ql_log(ql_log_warn, vha, 0x700e, 387 "qla2x00_start_sp failed = %d\n", rval); 388 qla2x00_rel_sp(vha, sp); 389 rval = -EIO; 390 goto done_unmap_sg; 391 } 392 return rval; 393 394 done_unmap_sg: 395 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 396 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 397 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 398 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 399 goto done_free_fcport; 400 401 done_free_fcport: 402 if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) 403 kfree(fcport); 404 done: 405 return rval; 406 } 407 408 inline uint16_t 409 qla24xx_calc_ct_iocbs(uint16_t dsds) 410 { 411 uint16_t iocbs; 412 413 iocbs = 1; 414 if (dsds > 2) { 415 iocbs += (dsds - 2) / 5; 416 if ((dsds - 2) % 5) 417 iocbs++; 418 } 419 return iocbs; 420 } 421 422 static int 423 qla2x00_process_ct(struct fc_bsg_job *bsg_job) 424 { 425 srb_t *sp; 426 struct Scsi_Host *host = bsg_job->shost; 427 scsi_qla_host_t *vha = shost_priv(host); 428 struct qla_hw_data *ha = vha->hw; 429 int rval = (DRIVER_ERROR << 16); 430 int req_sg_cnt, rsp_sg_cnt; 431 uint16_t loop_id; 432 struct fc_port *fcport; 433 char *type = "FC_BSG_HST_CT"; 434 435 req_sg_cnt = 436 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 437 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 438 if (!req_sg_cnt) { 439 ql_log(ql_log_warn, vha, 0x700f, 440 "dma_map_sg return %d for request\n", req_sg_cnt); 441 rval = -ENOMEM; 442 goto done; 443 } 444 445 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 446 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 447 if (!rsp_sg_cnt) { 448 ql_log(ql_log_warn, vha, 0x7010, 449 "dma_map_sg return %d for reply\n", rsp_sg_cnt); 450 rval = -ENOMEM; 451 goto done; 452 } 453 454 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) || 455 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) { 456 ql_log(ql_log_warn, vha, 0x7011, 457 "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x " 458 "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt, 459 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt); 460 rval = -EAGAIN; 461 goto done_unmap_sg; 462 } 463 464 if (!vha->flags.online) { 465 ql_log(ql_log_warn, vha, 0x7012, 466 "Host is not online.\n"); 467 rval = -EIO; 468 goto done_unmap_sg; 469 } 470 471 loop_id = 472 (bsg_job->request->rqst_data.h_ct.preamble_word1 & 0xFF000000) 473 >> 24; 474 switch (loop_id) { 475 case 0xFC: 476 loop_id = cpu_to_le16(NPH_SNS); 477 break; 478 case 0xFA: 479 loop_id = vha->mgmt_svr_loop_id; 480 break; 481 default: 482 ql_dbg(ql_dbg_user, vha, 0x7013, 483 "Unknown loop id: %x.\n", loop_id); 484 rval = -EINVAL; 485 goto done_unmap_sg; 486 } 487 488 /* Allocate a dummy fcport structure, since functions preparing the 489 * IOCB and mailbox command retrieves port specific information 490 * from fcport structure. For Host based ELS commands there will be 491 * no fcport structure allocated 492 */ 493 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 494 if (!fcport) { 495 ql_log(ql_log_warn, vha, 0x7014, 496 "Failed to allocate fcport.\n"); 497 rval = -ENOMEM; 498 goto done_unmap_sg; 499 } 500 501 /* Initialize all required fields of fcport */ 502 fcport->vha = vha; 503 fcport->d_id.b.al_pa = bsg_job->request->rqst_data.h_ct.port_id[0]; 504 fcport->d_id.b.area = bsg_job->request->rqst_data.h_ct.port_id[1]; 505 fcport->d_id.b.domain = bsg_job->request->rqst_data.h_ct.port_id[2]; 506 fcport->loop_id = loop_id; 507 508 /* Alloc SRB structure */ 509 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 510 if (!sp) { 511 ql_log(ql_log_warn, vha, 0x7015, 512 "qla2x00_get_sp failed.\n"); 513 rval = -ENOMEM; 514 goto done_free_fcport; 515 } 516 517 sp->type = SRB_CT_CMD; 518 sp->name = "bsg_ct"; 519 sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt); 520 sp->u.bsg_job = bsg_job; 521 sp->free = qla2x00_bsg_sp_free; 522 sp->done = qla2x00_bsg_job_done; 523 524 ql_dbg(ql_dbg_user, vha, 0x7016, 525 "bsg rqst type: %s else type: %x - " 526 "loop-id=%x portid=%02x%02x%02x.\n", type, 527 (bsg_job->request->rqst_data.h_ct.preamble_word2 >> 16), 528 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, 529 fcport->d_id.b.al_pa); 530 531 rval = qla2x00_start_sp(sp); 532 if (rval != QLA_SUCCESS) { 533 ql_log(ql_log_warn, vha, 0x7017, 534 "qla2x00_start_sp failed=%d.\n", rval); 535 qla2x00_rel_sp(vha, sp); 536 rval = -EIO; 537 goto done_free_fcport; 538 } 539 return rval; 540 541 done_free_fcport: 542 kfree(fcport); 543 done_unmap_sg: 544 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 545 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 546 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 547 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 548 done: 549 return rval; 550 } 551 552 /* Disable loopback mode */ 553 static inline int 554 qla81xx_reset_loopback_mode(scsi_qla_host_t *vha, uint16_t *config, 555 int wait, int wait2) 556 { 557 int ret = 0; 558 int rval = 0; 559 uint16_t new_config[4]; 560 struct qla_hw_data *ha = vha->hw; 561 562 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha)) 563 goto done_reset_internal; 564 565 memset(new_config, 0 , sizeof(new_config)); 566 if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 == 567 ENABLE_INTERNAL_LOOPBACK || 568 (config[0] & INTERNAL_LOOPBACK_MASK) >> 1 == 569 ENABLE_EXTERNAL_LOOPBACK) { 570 new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK; 571 ql_dbg(ql_dbg_user, vha, 0x70bf, "new_config[0]=%02x\n", 572 (new_config[0] & INTERNAL_LOOPBACK_MASK)); 573 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ; 574 575 ha->notify_dcbx_comp = wait; 576 ha->notify_lb_portup_comp = wait2; 577 578 ret = qla81xx_set_port_config(vha, new_config); 579 if (ret != QLA_SUCCESS) { 580 ql_log(ql_log_warn, vha, 0x7025, 581 "Set port config failed.\n"); 582 ha->notify_dcbx_comp = 0; 583 ha->notify_lb_portup_comp = 0; 584 rval = -EINVAL; 585 goto done_reset_internal; 586 } 587 588 /* Wait for DCBX complete event */ 589 if (wait && !wait_for_completion_timeout(&ha->dcbx_comp, 590 (DCBX_COMP_TIMEOUT * HZ))) { 591 ql_dbg(ql_dbg_user, vha, 0x7026, 592 "DCBX completion not received.\n"); 593 ha->notify_dcbx_comp = 0; 594 ha->notify_lb_portup_comp = 0; 595 rval = -EINVAL; 596 goto done_reset_internal; 597 } else 598 ql_dbg(ql_dbg_user, vha, 0x7027, 599 "DCBX completion received.\n"); 600 601 if (wait2 && 602 !wait_for_completion_timeout(&ha->lb_portup_comp, 603 (LB_PORTUP_COMP_TIMEOUT * HZ))) { 604 ql_dbg(ql_dbg_user, vha, 0x70c5, 605 "Port up completion not received.\n"); 606 ha->notify_lb_portup_comp = 0; 607 rval = -EINVAL; 608 goto done_reset_internal; 609 } else 610 ql_dbg(ql_dbg_user, vha, 0x70c6, 611 "Port up completion received.\n"); 612 613 ha->notify_dcbx_comp = 0; 614 ha->notify_lb_portup_comp = 0; 615 } 616 done_reset_internal: 617 return rval; 618 } 619 620 /* 621 * Set the port configuration to enable the internal or external loopback 622 * depending on the loopback mode. 623 */ 624 static inline int 625 qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config, 626 uint16_t *new_config, uint16_t mode) 627 { 628 int ret = 0; 629 int rval = 0; 630 unsigned long rem_tmo = 0, current_tmo = 0; 631 struct qla_hw_data *ha = vha->hw; 632 633 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha)) 634 goto done_set_internal; 635 636 if (mode == INTERNAL_LOOPBACK) 637 new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1); 638 else if (mode == EXTERNAL_LOOPBACK) 639 new_config[0] = config[0] | (ENABLE_EXTERNAL_LOOPBACK << 1); 640 ql_dbg(ql_dbg_user, vha, 0x70be, 641 "new_config[0]=%02x\n", (new_config[0] & INTERNAL_LOOPBACK_MASK)); 642 643 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3); 644 645 ha->notify_dcbx_comp = 1; 646 ret = qla81xx_set_port_config(vha, new_config); 647 if (ret != QLA_SUCCESS) { 648 ql_log(ql_log_warn, vha, 0x7021, 649 "set port config failed.\n"); 650 ha->notify_dcbx_comp = 0; 651 rval = -EINVAL; 652 goto done_set_internal; 653 } 654 655 /* Wait for DCBX complete event */ 656 current_tmo = DCBX_COMP_TIMEOUT * HZ; 657 while (1) { 658 rem_tmo = wait_for_completion_timeout(&ha->dcbx_comp, 659 current_tmo); 660 if (!ha->idc_extend_tmo || rem_tmo) { 661 ha->idc_extend_tmo = 0; 662 break; 663 } 664 current_tmo = ha->idc_extend_tmo * HZ; 665 ha->idc_extend_tmo = 0; 666 } 667 668 if (!rem_tmo) { 669 ql_dbg(ql_dbg_user, vha, 0x7022, 670 "DCBX completion not received.\n"); 671 ret = qla81xx_reset_loopback_mode(vha, new_config, 0, 0); 672 /* 673 * If the reset of the loopback mode doesn't work take a FCoE 674 * dump and reset the chip. 675 */ 676 if (ret) { 677 ha->isp_ops->fw_dump(vha, 0); 678 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 679 } 680 rval = -EINVAL; 681 } else { 682 if (ha->flags.idc_compl_status) { 683 ql_dbg(ql_dbg_user, vha, 0x70c3, 684 "Bad status in IDC Completion AEN\n"); 685 rval = -EINVAL; 686 ha->flags.idc_compl_status = 0; 687 } else 688 ql_dbg(ql_dbg_user, vha, 0x7023, 689 "DCBX completion received.\n"); 690 } 691 692 ha->notify_dcbx_comp = 0; 693 ha->idc_extend_tmo = 0; 694 695 done_set_internal: 696 return rval; 697 } 698 699 static int 700 qla2x00_process_loopback(struct fc_bsg_job *bsg_job) 701 { 702 struct Scsi_Host *host = bsg_job->shost; 703 scsi_qla_host_t *vha = shost_priv(host); 704 struct qla_hw_data *ha = vha->hw; 705 int rval; 706 uint8_t command_sent; 707 char *type; 708 struct msg_echo_lb elreq; 709 uint16_t response[MAILBOX_REGISTER_COUNT]; 710 uint16_t config[4], new_config[4]; 711 uint8_t *fw_sts_ptr; 712 uint8_t *req_data = NULL; 713 dma_addr_t req_data_dma; 714 uint32_t req_data_len; 715 uint8_t *rsp_data = NULL; 716 dma_addr_t rsp_data_dma; 717 uint32_t rsp_data_len; 718 719 if (!vha->flags.online) { 720 ql_log(ql_log_warn, vha, 0x7019, "Host is not online.\n"); 721 return -EIO; 722 } 723 724 elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev, 725 bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, 726 DMA_TO_DEVICE); 727 728 if (!elreq.req_sg_cnt) { 729 ql_log(ql_log_warn, vha, 0x701a, 730 "dma_map_sg returned %d for request.\n", elreq.req_sg_cnt); 731 return -ENOMEM; 732 } 733 734 elreq.rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, 735 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, 736 DMA_FROM_DEVICE); 737 738 if (!elreq.rsp_sg_cnt) { 739 ql_log(ql_log_warn, vha, 0x701b, 740 "dma_map_sg returned %d for reply.\n", elreq.rsp_sg_cnt); 741 rval = -ENOMEM; 742 goto done_unmap_req_sg; 743 } 744 745 if ((elreq.req_sg_cnt != bsg_job->request_payload.sg_cnt) || 746 (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) { 747 ql_log(ql_log_warn, vha, 0x701c, 748 "dma mapping resulted in different sg counts, " 749 "request_sg_cnt: %x dma_request_sg_cnt: %x " 750 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n", 751 bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt, 752 bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt); 753 rval = -EAGAIN; 754 goto done_unmap_sg; 755 } 756 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len; 757 req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len, 758 &req_data_dma, GFP_KERNEL); 759 if (!req_data) { 760 ql_log(ql_log_warn, vha, 0x701d, 761 "dma alloc failed for req_data.\n"); 762 rval = -ENOMEM; 763 goto done_unmap_sg; 764 } 765 766 rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len, 767 &rsp_data_dma, GFP_KERNEL); 768 if (!rsp_data) { 769 ql_log(ql_log_warn, vha, 0x7004, 770 "dma alloc failed for rsp_data.\n"); 771 rval = -ENOMEM; 772 goto done_free_dma_req; 773 } 774 775 /* Copy the request buffer in req_data now */ 776 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 777 bsg_job->request_payload.sg_cnt, req_data, req_data_len); 778 779 elreq.send_dma = req_data_dma; 780 elreq.rcv_dma = rsp_data_dma; 781 elreq.transfer_size = req_data_len; 782 783 elreq.options = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; 784 elreq.iteration_count = 785 bsg_job->request->rqst_data.h_vendor.vendor_cmd[2]; 786 787 if (atomic_read(&vha->loop_state) == LOOP_READY && 788 (ha->current_topology == ISP_CFG_F || 789 ((IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) && 790 le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE 791 && req_data_len == MAX_ELS_FRAME_PAYLOAD)) && 792 elreq.options == EXTERNAL_LOOPBACK) { 793 type = "FC_BSG_HST_VENDOR_ECHO_DIAG"; 794 ql_dbg(ql_dbg_user, vha, 0x701e, 795 "BSG request type: %s.\n", type); 796 command_sent = INT_DEF_LB_ECHO_CMD; 797 rval = qla2x00_echo_test(vha, &elreq, response); 798 } else { 799 if (IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) { 800 memset(config, 0, sizeof(config)); 801 memset(new_config, 0, sizeof(new_config)); 802 803 if (qla81xx_get_port_config(vha, config)) { 804 ql_log(ql_log_warn, vha, 0x701f, 805 "Get port config failed.\n"); 806 rval = -EPERM; 807 goto done_free_dma_rsp; 808 } 809 810 if ((config[0] & INTERNAL_LOOPBACK_MASK) != 0) { 811 ql_dbg(ql_dbg_user, vha, 0x70c4, 812 "Loopback operation already in " 813 "progress.\n"); 814 rval = -EAGAIN; 815 goto done_free_dma_rsp; 816 } 817 818 ql_dbg(ql_dbg_user, vha, 0x70c0, 819 "elreq.options=%04x\n", elreq.options); 820 821 if (elreq.options == EXTERNAL_LOOPBACK) 822 if (IS_QLA8031(ha) || IS_QLA8044(ha)) 823 rval = qla81xx_set_loopback_mode(vha, 824 config, new_config, elreq.options); 825 else 826 rval = qla81xx_reset_loopback_mode(vha, 827 config, 1, 0); 828 else 829 rval = qla81xx_set_loopback_mode(vha, config, 830 new_config, elreq.options); 831 832 if (rval) { 833 rval = -EPERM; 834 goto done_free_dma_rsp; 835 } 836 837 type = "FC_BSG_HST_VENDOR_LOOPBACK"; 838 ql_dbg(ql_dbg_user, vha, 0x7028, 839 "BSG request type: %s.\n", type); 840 841 command_sent = INT_DEF_LB_LOOPBACK_CMD; 842 rval = qla2x00_loopback_test(vha, &elreq, response); 843 844 if (response[0] == MBS_COMMAND_ERROR && 845 response[1] == MBS_LB_RESET) { 846 ql_log(ql_log_warn, vha, 0x7029, 847 "MBX command error, Aborting ISP.\n"); 848 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 849 qla2xxx_wake_dpc(vha); 850 qla2x00_wait_for_chip_reset(vha); 851 /* Also reset the MPI */ 852 if (IS_QLA81XX(ha)) { 853 if (qla81xx_restart_mpi_firmware(vha) != 854 QLA_SUCCESS) { 855 ql_log(ql_log_warn, vha, 0x702a, 856 "MPI reset failed.\n"); 857 } 858 } 859 860 rval = -EIO; 861 goto done_free_dma_rsp; 862 } 863 864 if (new_config[0]) { 865 int ret; 866 867 /* Revert back to original port config 868 * Also clear internal loopback 869 */ 870 ret = qla81xx_reset_loopback_mode(vha, 871 new_config, 0, 1); 872 if (ret) { 873 /* 874 * If the reset of the loopback mode 875 * doesn't work take FCoE dump and then 876 * reset the chip. 877 */ 878 ha->isp_ops->fw_dump(vha, 0); 879 set_bit(ISP_ABORT_NEEDED, 880 &vha->dpc_flags); 881 } 882 883 } 884 885 } else { 886 type = "FC_BSG_HST_VENDOR_LOOPBACK"; 887 ql_dbg(ql_dbg_user, vha, 0x702b, 888 "BSG request type: %s.\n", type); 889 command_sent = INT_DEF_LB_LOOPBACK_CMD; 890 rval = qla2x00_loopback_test(vha, &elreq, response); 891 } 892 } 893 894 if (rval) { 895 ql_log(ql_log_warn, vha, 0x702c, 896 "Vendor request %s failed.\n", type); 897 898 rval = 0; 899 bsg_job->reply->result = (DID_ERROR << 16); 900 bsg_job->reply->reply_payload_rcv_len = 0; 901 } else { 902 ql_dbg(ql_dbg_user, vha, 0x702d, 903 "Vendor request %s completed.\n", type); 904 bsg_job->reply->result = (DID_OK << 16); 905 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 906 bsg_job->reply_payload.sg_cnt, rsp_data, 907 rsp_data_len); 908 } 909 910 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + 911 sizeof(response) + sizeof(uint8_t); 912 fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) + 913 sizeof(struct fc_bsg_reply); 914 memcpy(fw_sts_ptr, response, sizeof(response)); 915 fw_sts_ptr += sizeof(response); 916 *fw_sts_ptr = command_sent; 917 918 done_free_dma_rsp: 919 dma_free_coherent(&ha->pdev->dev, rsp_data_len, 920 rsp_data, rsp_data_dma); 921 done_free_dma_req: 922 dma_free_coherent(&ha->pdev->dev, req_data_len, 923 req_data, req_data_dma); 924 done_unmap_sg: 925 dma_unmap_sg(&ha->pdev->dev, 926 bsg_job->reply_payload.sg_list, 927 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 928 done_unmap_req_sg: 929 dma_unmap_sg(&ha->pdev->dev, 930 bsg_job->request_payload.sg_list, 931 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 932 if (!rval) 933 bsg_job->job_done(bsg_job); 934 return rval; 935 } 936 937 static int 938 qla84xx_reset(struct fc_bsg_job *bsg_job) 939 { 940 struct Scsi_Host *host = bsg_job->shost; 941 scsi_qla_host_t *vha = shost_priv(host); 942 struct qla_hw_data *ha = vha->hw; 943 int rval = 0; 944 uint32_t flag; 945 946 if (!IS_QLA84XX(ha)) { 947 ql_dbg(ql_dbg_user, vha, 0x702f, "Not 84xx, exiting.\n"); 948 return -EINVAL; 949 } 950 951 flag = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; 952 953 rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW); 954 955 if (rval) { 956 ql_log(ql_log_warn, vha, 0x7030, 957 "Vendor request 84xx reset failed.\n"); 958 rval = (DID_ERROR << 16); 959 960 } else { 961 ql_dbg(ql_dbg_user, vha, 0x7031, 962 "Vendor request 84xx reset completed.\n"); 963 bsg_job->reply->result = DID_OK; 964 bsg_job->job_done(bsg_job); 965 } 966 967 return rval; 968 } 969 970 static int 971 qla84xx_updatefw(struct fc_bsg_job *bsg_job) 972 { 973 struct Scsi_Host *host = bsg_job->shost; 974 scsi_qla_host_t *vha = shost_priv(host); 975 struct qla_hw_data *ha = vha->hw; 976 struct verify_chip_entry_84xx *mn = NULL; 977 dma_addr_t mn_dma, fw_dma; 978 void *fw_buf = NULL; 979 int rval = 0; 980 uint32_t sg_cnt; 981 uint32_t data_len; 982 uint16_t options; 983 uint32_t flag; 984 uint32_t fw_ver; 985 986 if (!IS_QLA84XX(ha)) { 987 ql_dbg(ql_dbg_user, vha, 0x7032, 988 "Not 84xx, exiting.\n"); 989 return -EINVAL; 990 } 991 992 sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 993 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 994 if (!sg_cnt) { 995 ql_log(ql_log_warn, vha, 0x7033, 996 "dma_map_sg returned %d for request.\n", sg_cnt); 997 return -ENOMEM; 998 } 999 1000 if (sg_cnt != bsg_job->request_payload.sg_cnt) { 1001 ql_log(ql_log_warn, vha, 0x7034, 1002 "DMA mapping resulted in different sg counts, " 1003 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n", 1004 bsg_job->request_payload.sg_cnt, sg_cnt); 1005 rval = -EAGAIN; 1006 goto done_unmap_sg; 1007 } 1008 1009 data_len = bsg_job->request_payload.payload_len; 1010 fw_buf = dma_alloc_coherent(&ha->pdev->dev, data_len, 1011 &fw_dma, GFP_KERNEL); 1012 if (!fw_buf) { 1013 ql_log(ql_log_warn, vha, 0x7035, 1014 "DMA alloc failed for fw_buf.\n"); 1015 rval = -ENOMEM; 1016 goto done_unmap_sg; 1017 } 1018 1019 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1020 bsg_job->request_payload.sg_cnt, fw_buf, data_len); 1021 1022 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma); 1023 if (!mn) { 1024 ql_log(ql_log_warn, vha, 0x7036, 1025 "DMA alloc failed for fw buffer.\n"); 1026 rval = -ENOMEM; 1027 goto done_free_fw_buf; 1028 } 1029 1030 flag = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; 1031 fw_ver = le32_to_cpu(*((uint32_t *)((uint32_t *)fw_buf + 2))); 1032 1033 memset(mn, 0, sizeof(struct access_chip_84xx)); 1034 mn->entry_type = VERIFY_CHIP_IOCB_TYPE; 1035 mn->entry_count = 1; 1036 1037 options = VCO_FORCE_UPDATE | VCO_END_OF_DATA; 1038 if (flag == A84_ISSUE_UPDATE_DIAGFW_CMD) 1039 options |= VCO_DIAG_FW; 1040 1041 mn->options = cpu_to_le16(options); 1042 mn->fw_ver = cpu_to_le32(fw_ver); 1043 mn->fw_size = cpu_to_le32(data_len); 1044 mn->fw_seq_size = cpu_to_le32(data_len); 1045 mn->dseg_address[0] = cpu_to_le32(LSD(fw_dma)); 1046 mn->dseg_address[1] = cpu_to_le32(MSD(fw_dma)); 1047 mn->dseg_length = cpu_to_le32(data_len); 1048 mn->data_seg_cnt = cpu_to_le16(1); 1049 1050 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120); 1051 1052 if (rval) { 1053 ql_log(ql_log_warn, vha, 0x7037, 1054 "Vendor request 84xx updatefw failed.\n"); 1055 1056 rval = (DID_ERROR << 16); 1057 } else { 1058 ql_dbg(ql_dbg_user, vha, 0x7038, 1059 "Vendor request 84xx updatefw completed.\n"); 1060 1061 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1062 bsg_job->reply->result = DID_OK; 1063 } 1064 1065 dma_pool_free(ha->s_dma_pool, mn, mn_dma); 1066 1067 done_free_fw_buf: 1068 dma_free_coherent(&ha->pdev->dev, data_len, fw_buf, fw_dma); 1069 1070 done_unmap_sg: 1071 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 1072 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 1073 1074 if (!rval) 1075 bsg_job->job_done(bsg_job); 1076 return rval; 1077 } 1078 1079 static int 1080 qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job) 1081 { 1082 struct Scsi_Host *host = bsg_job->shost; 1083 scsi_qla_host_t *vha = shost_priv(host); 1084 struct qla_hw_data *ha = vha->hw; 1085 struct access_chip_84xx *mn = NULL; 1086 dma_addr_t mn_dma, mgmt_dma; 1087 void *mgmt_b = NULL; 1088 int rval = 0; 1089 struct qla_bsg_a84_mgmt *ql84_mgmt; 1090 uint32_t sg_cnt; 1091 uint32_t data_len = 0; 1092 uint32_t dma_direction = DMA_NONE; 1093 1094 if (!IS_QLA84XX(ha)) { 1095 ql_log(ql_log_warn, vha, 0x703a, 1096 "Not 84xx, exiting.\n"); 1097 return -EINVAL; 1098 } 1099 1100 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma); 1101 if (!mn) { 1102 ql_log(ql_log_warn, vha, 0x703c, 1103 "DMA alloc failed for fw buffer.\n"); 1104 return -ENOMEM; 1105 } 1106 1107 memset(mn, 0, sizeof(struct access_chip_84xx)); 1108 mn->entry_type = ACCESS_CHIP_IOCB_TYPE; 1109 mn->entry_count = 1; 1110 ql84_mgmt = (void *)bsg_job->request + sizeof(struct fc_bsg_request); 1111 switch (ql84_mgmt->mgmt.cmd) { 1112 case QLA84_MGMT_READ_MEM: 1113 case QLA84_MGMT_GET_INFO: 1114 sg_cnt = dma_map_sg(&ha->pdev->dev, 1115 bsg_job->reply_payload.sg_list, 1116 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 1117 if (!sg_cnt) { 1118 ql_log(ql_log_warn, vha, 0x703d, 1119 "dma_map_sg returned %d for reply.\n", sg_cnt); 1120 rval = -ENOMEM; 1121 goto exit_mgmt; 1122 } 1123 1124 dma_direction = DMA_FROM_DEVICE; 1125 1126 if (sg_cnt != bsg_job->reply_payload.sg_cnt) { 1127 ql_log(ql_log_warn, vha, 0x703e, 1128 "DMA mapping resulted in different sg counts, " 1129 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n", 1130 bsg_job->reply_payload.sg_cnt, sg_cnt); 1131 rval = -EAGAIN; 1132 goto done_unmap_sg; 1133 } 1134 1135 data_len = bsg_job->reply_payload.payload_len; 1136 1137 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len, 1138 &mgmt_dma, GFP_KERNEL); 1139 if (!mgmt_b) { 1140 ql_log(ql_log_warn, vha, 0x703f, 1141 "DMA alloc failed for mgmt_b.\n"); 1142 rval = -ENOMEM; 1143 goto done_unmap_sg; 1144 } 1145 1146 if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) { 1147 mn->options = cpu_to_le16(ACO_DUMP_MEMORY); 1148 mn->parameter1 = 1149 cpu_to_le32( 1150 ql84_mgmt->mgmt.mgmtp.u.mem.start_addr); 1151 1152 } else if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO) { 1153 mn->options = cpu_to_le16(ACO_REQUEST_INFO); 1154 mn->parameter1 = 1155 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.info.type); 1156 1157 mn->parameter2 = 1158 cpu_to_le32( 1159 ql84_mgmt->mgmt.mgmtp.u.info.context); 1160 } 1161 break; 1162 1163 case QLA84_MGMT_WRITE_MEM: 1164 sg_cnt = dma_map_sg(&ha->pdev->dev, 1165 bsg_job->request_payload.sg_list, 1166 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 1167 1168 if (!sg_cnt) { 1169 ql_log(ql_log_warn, vha, 0x7040, 1170 "dma_map_sg returned %d.\n", sg_cnt); 1171 rval = -ENOMEM; 1172 goto exit_mgmt; 1173 } 1174 1175 dma_direction = DMA_TO_DEVICE; 1176 1177 if (sg_cnt != bsg_job->request_payload.sg_cnt) { 1178 ql_log(ql_log_warn, vha, 0x7041, 1179 "DMA mapping resulted in different sg counts, " 1180 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n", 1181 bsg_job->request_payload.sg_cnt, sg_cnt); 1182 rval = -EAGAIN; 1183 goto done_unmap_sg; 1184 } 1185 1186 data_len = bsg_job->request_payload.payload_len; 1187 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len, 1188 &mgmt_dma, GFP_KERNEL); 1189 if (!mgmt_b) { 1190 ql_log(ql_log_warn, vha, 0x7042, 1191 "DMA alloc failed for mgmt_b.\n"); 1192 rval = -ENOMEM; 1193 goto done_unmap_sg; 1194 } 1195 1196 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1197 bsg_job->request_payload.sg_cnt, mgmt_b, data_len); 1198 1199 mn->options = cpu_to_le16(ACO_LOAD_MEMORY); 1200 mn->parameter1 = 1201 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.mem.start_addr); 1202 break; 1203 1204 case QLA84_MGMT_CHNG_CONFIG: 1205 mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM); 1206 mn->parameter1 = 1207 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.id); 1208 1209 mn->parameter2 = 1210 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param0); 1211 1212 mn->parameter3 = 1213 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param1); 1214 break; 1215 1216 default: 1217 rval = -EIO; 1218 goto exit_mgmt; 1219 } 1220 1221 if (ql84_mgmt->mgmt.cmd != QLA84_MGMT_CHNG_CONFIG) { 1222 mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->mgmt.len); 1223 mn->dseg_count = cpu_to_le16(1); 1224 mn->dseg_address[0] = cpu_to_le32(LSD(mgmt_dma)); 1225 mn->dseg_address[1] = cpu_to_le32(MSD(mgmt_dma)); 1226 mn->dseg_length = cpu_to_le32(ql84_mgmt->mgmt.len); 1227 } 1228 1229 rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0); 1230 1231 if (rval) { 1232 ql_log(ql_log_warn, vha, 0x7043, 1233 "Vendor request 84xx mgmt failed.\n"); 1234 1235 rval = (DID_ERROR << 16); 1236 1237 } else { 1238 ql_dbg(ql_dbg_user, vha, 0x7044, 1239 "Vendor request 84xx mgmt completed.\n"); 1240 1241 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1242 bsg_job->reply->result = DID_OK; 1243 1244 if ((ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) || 1245 (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO)) { 1246 bsg_job->reply->reply_payload_rcv_len = 1247 bsg_job->reply_payload.payload_len; 1248 1249 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 1250 bsg_job->reply_payload.sg_cnt, mgmt_b, 1251 data_len); 1252 } 1253 } 1254 1255 done_unmap_sg: 1256 if (mgmt_b) 1257 dma_free_coherent(&ha->pdev->dev, data_len, mgmt_b, mgmt_dma); 1258 1259 if (dma_direction == DMA_TO_DEVICE) 1260 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 1261 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 1262 else if (dma_direction == DMA_FROM_DEVICE) 1263 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, 1264 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 1265 1266 exit_mgmt: 1267 dma_pool_free(ha->s_dma_pool, mn, mn_dma); 1268 1269 if (!rval) 1270 bsg_job->job_done(bsg_job); 1271 return rval; 1272 } 1273 1274 static int 1275 qla24xx_iidma(struct fc_bsg_job *bsg_job) 1276 { 1277 struct Scsi_Host *host = bsg_job->shost; 1278 scsi_qla_host_t *vha = shost_priv(host); 1279 int rval = 0; 1280 struct qla_port_param *port_param = NULL; 1281 fc_port_t *fcport = NULL; 1282 int found = 0; 1283 uint16_t mb[MAILBOX_REGISTER_COUNT]; 1284 uint8_t *rsp_ptr = NULL; 1285 1286 if (!IS_IIDMA_CAPABLE(vha->hw)) { 1287 ql_log(ql_log_info, vha, 0x7046, "iiDMA not supported.\n"); 1288 return -EINVAL; 1289 } 1290 1291 port_param = (void *)bsg_job->request + sizeof(struct fc_bsg_request); 1292 if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) { 1293 ql_log(ql_log_warn, vha, 0x7048, 1294 "Invalid destination type.\n"); 1295 return -EINVAL; 1296 } 1297 1298 list_for_each_entry(fcport, &vha->vp_fcports, list) { 1299 if (fcport->port_type != FCT_TARGET) 1300 continue; 1301 1302 if (memcmp(port_param->fc_scsi_addr.dest_addr.wwpn, 1303 fcport->port_name, sizeof(fcport->port_name))) 1304 continue; 1305 1306 found = 1; 1307 break; 1308 } 1309 1310 if (!found) { 1311 ql_log(ql_log_warn, vha, 0x7049, 1312 "Failed to find port.\n"); 1313 return -EINVAL; 1314 } 1315 1316 if (atomic_read(&fcport->state) != FCS_ONLINE) { 1317 ql_log(ql_log_warn, vha, 0x704a, 1318 "Port is not online.\n"); 1319 return -EINVAL; 1320 } 1321 1322 if (fcport->flags & FCF_LOGIN_NEEDED) { 1323 ql_log(ql_log_warn, vha, 0x704b, 1324 "Remote port not logged in flags = 0x%x.\n", fcport->flags); 1325 return -EINVAL; 1326 } 1327 1328 if (port_param->mode) 1329 rval = qla2x00_set_idma_speed(vha, fcport->loop_id, 1330 port_param->speed, mb); 1331 else 1332 rval = qla2x00_get_idma_speed(vha, fcport->loop_id, 1333 &port_param->speed, mb); 1334 1335 if (rval) { 1336 ql_log(ql_log_warn, vha, 0x704c, 1337 "iIDMA cmd failed for %8phN -- " 1338 "%04x %x %04x %04x.\n", fcport->port_name, 1339 rval, fcport->fp_speed, mb[0], mb[1]); 1340 rval = (DID_ERROR << 16); 1341 } else { 1342 if (!port_param->mode) { 1343 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + 1344 sizeof(struct qla_port_param); 1345 1346 rsp_ptr = ((uint8_t *)bsg_job->reply) + 1347 sizeof(struct fc_bsg_reply); 1348 1349 memcpy(rsp_ptr, port_param, 1350 sizeof(struct qla_port_param)); 1351 } 1352 1353 bsg_job->reply->result = DID_OK; 1354 bsg_job->job_done(bsg_job); 1355 } 1356 1357 return rval; 1358 } 1359 1360 static int 1361 qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, scsi_qla_host_t *vha, 1362 uint8_t is_update) 1363 { 1364 uint32_t start = 0; 1365 int valid = 0; 1366 struct qla_hw_data *ha = vha->hw; 1367 1368 if (unlikely(pci_channel_offline(ha->pdev))) 1369 return -EINVAL; 1370 1371 start = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; 1372 if (start > ha->optrom_size) { 1373 ql_log(ql_log_warn, vha, 0x7055, 1374 "start %d > optrom_size %d.\n", start, ha->optrom_size); 1375 return -EINVAL; 1376 } 1377 1378 if (ha->optrom_state != QLA_SWAITING) { 1379 ql_log(ql_log_info, vha, 0x7056, 1380 "optrom_state %d.\n", ha->optrom_state); 1381 return -EBUSY; 1382 } 1383 1384 ha->optrom_region_start = start; 1385 ql_dbg(ql_dbg_user, vha, 0x7057, "is_update=%d.\n", is_update); 1386 if (is_update) { 1387 if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0) 1388 valid = 1; 1389 else if (start == (ha->flt_region_boot * 4) || 1390 start == (ha->flt_region_fw * 4)) 1391 valid = 1; 1392 else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || 1393 IS_CNA_CAPABLE(ha) || IS_QLA2031(ha)) 1394 valid = 1; 1395 if (!valid) { 1396 ql_log(ql_log_warn, vha, 0x7058, 1397 "Invalid start region 0x%x/0x%x.\n", start, 1398 bsg_job->request_payload.payload_len); 1399 return -EINVAL; 1400 } 1401 1402 ha->optrom_region_size = start + 1403 bsg_job->request_payload.payload_len > ha->optrom_size ? 1404 ha->optrom_size - start : 1405 bsg_job->request_payload.payload_len; 1406 ha->optrom_state = QLA_SWRITING; 1407 } else { 1408 ha->optrom_region_size = start + 1409 bsg_job->reply_payload.payload_len > ha->optrom_size ? 1410 ha->optrom_size - start : 1411 bsg_job->reply_payload.payload_len; 1412 ha->optrom_state = QLA_SREADING; 1413 } 1414 1415 ha->optrom_buffer = vmalloc(ha->optrom_region_size); 1416 if (!ha->optrom_buffer) { 1417 ql_log(ql_log_warn, vha, 0x7059, 1418 "Read: Unable to allocate memory for optrom retrieval " 1419 "(%x)\n", ha->optrom_region_size); 1420 1421 ha->optrom_state = QLA_SWAITING; 1422 return -ENOMEM; 1423 } 1424 1425 memset(ha->optrom_buffer, 0, ha->optrom_region_size); 1426 return 0; 1427 } 1428 1429 static int 1430 qla2x00_read_optrom(struct fc_bsg_job *bsg_job) 1431 { 1432 struct Scsi_Host *host = bsg_job->shost; 1433 scsi_qla_host_t *vha = shost_priv(host); 1434 struct qla_hw_data *ha = vha->hw; 1435 int rval = 0; 1436 1437 if (ha->flags.nic_core_reset_hdlr_active) 1438 return -EBUSY; 1439 1440 rval = qla2x00_optrom_setup(bsg_job, vha, 0); 1441 if (rval) 1442 return rval; 1443 1444 ha->isp_ops->read_optrom(vha, ha->optrom_buffer, 1445 ha->optrom_region_start, ha->optrom_region_size); 1446 1447 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 1448 bsg_job->reply_payload.sg_cnt, ha->optrom_buffer, 1449 ha->optrom_region_size); 1450 1451 bsg_job->reply->reply_payload_rcv_len = ha->optrom_region_size; 1452 bsg_job->reply->result = DID_OK; 1453 vfree(ha->optrom_buffer); 1454 ha->optrom_buffer = NULL; 1455 ha->optrom_state = QLA_SWAITING; 1456 bsg_job->job_done(bsg_job); 1457 return rval; 1458 } 1459 1460 static int 1461 qla2x00_update_optrom(struct fc_bsg_job *bsg_job) 1462 { 1463 struct Scsi_Host *host = bsg_job->shost; 1464 scsi_qla_host_t *vha = shost_priv(host); 1465 struct qla_hw_data *ha = vha->hw; 1466 int rval = 0; 1467 1468 rval = qla2x00_optrom_setup(bsg_job, vha, 1); 1469 if (rval) 1470 return rval; 1471 1472 /* Set the isp82xx_no_md_cap not to capture minidump */ 1473 ha->flags.isp82xx_no_md_cap = 1; 1474 1475 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1476 bsg_job->request_payload.sg_cnt, ha->optrom_buffer, 1477 ha->optrom_region_size); 1478 1479 ha->isp_ops->write_optrom(vha, ha->optrom_buffer, 1480 ha->optrom_region_start, ha->optrom_region_size); 1481 1482 bsg_job->reply->result = DID_OK; 1483 vfree(ha->optrom_buffer); 1484 ha->optrom_buffer = NULL; 1485 ha->optrom_state = QLA_SWAITING; 1486 bsg_job->job_done(bsg_job); 1487 return rval; 1488 } 1489 1490 static int 1491 qla2x00_update_fru_versions(struct fc_bsg_job *bsg_job) 1492 { 1493 struct Scsi_Host *host = bsg_job->shost; 1494 scsi_qla_host_t *vha = shost_priv(host); 1495 struct qla_hw_data *ha = vha->hw; 1496 int rval = 0; 1497 uint8_t bsg[DMA_POOL_SIZE]; 1498 struct qla_image_version_list *list = (void *)bsg; 1499 struct qla_image_version *image; 1500 uint32_t count; 1501 dma_addr_t sfp_dma; 1502 void *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); 1503 if (!sfp) { 1504 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 1505 EXT_STATUS_NO_MEMORY; 1506 goto done; 1507 } 1508 1509 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1510 bsg_job->request_payload.sg_cnt, list, sizeof(bsg)); 1511 1512 image = list->version; 1513 count = list->count; 1514 while (count--) { 1515 memcpy(sfp, &image->field_info, sizeof(image->field_info)); 1516 rval = qla2x00_write_sfp(vha, sfp_dma, sfp, 1517 image->field_address.device, image->field_address.offset, 1518 sizeof(image->field_info), image->field_address.option); 1519 if (rval) { 1520 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 1521 EXT_STATUS_MAILBOX; 1522 goto dealloc; 1523 } 1524 image++; 1525 } 1526 1527 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0; 1528 1529 dealloc: 1530 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); 1531 1532 done: 1533 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1534 bsg_job->reply->result = DID_OK << 16; 1535 bsg_job->job_done(bsg_job); 1536 1537 return 0; 1538 } 1539 1540 static int 1541 qla2x00_read_fru_status(struct fc_bsg_job *bsg_job) 1542 { 1543 struct Scsi_Host *host = bsg_job->shost; 1544 scsi_qla_host_t *vha = shost_priv(host); 1545 struct qla_hw_data *ha = vha->hw; 1546 int rval = 0; 1547 uint8_t bsg[DMA_POOL_SIZE]; 1548 struct qla_status_reg *sr = (void *)bsg; 1549 dma_addr_t sfp_dma; 1550 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); 1551 if (!sfp) { 1552 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 1553 EXT_STATUS_NO_MEMORY; 1554 goto done; 1555 } 1556 1557 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1558 bsg_job->request_payload.sg_cnt, sr, sizeof(*sr)); 1559 1560 rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 1561 sr->field_address.device, sr->field_address.offset, 1562 sizeof(sr->status_reg), sr->field_address.option); 1563 sr->status_reg = *sfp; 1564 1565 if (rval) { 1566 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 1567 EXT_STATUS_MAILBOX; 1568 goto dealloc; 1569 } 1570 1571 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 1572 bsg_job->reply_payload.sg_cnt, sr, sizeof(*sr)); 1573 1574 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0; 1575 1576 dealloc: 1577 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); 1578 1579 done: 1580 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1581 bsg_job->reply->reply_payload_rcv_len = sizeof(*sr); 1582 bsg_job->reply->result = DID_OK << 16; 1583 bsg_job->job_done(bsg_job); 1584 1585 return 0; 1586 } 1587 1588 static int 1589 qla2x00_write_fru_status(struct fc_bsg_job *bsg_job) 1590 { 1591 struct Scsi_Host *host = bsg_job->shost; 1592 scsi_qla_host_t *vha = shost_priv(host); 1593 struct qla_hw_data *ha = vha->hw; 1594 int rval = 0; 1595 uint8_t bsg[DMA_POOL_SIZE]; 1596 struct qla_status_reg *sr = (void *)bsg; 1597 dma_addr_t sfp_dma; 1598 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); 1599 if (!sfp) { 1600 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 1601 EXT_STATUS_NO_MEMORY; 1602 goto done; 1603 } 1604 1605 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1606 bsg_job->request_payload.sg_cnt, sr, sizeof(*sr)); 1607 1608 *sfp = sr->status_reg; 1609 rval = qla2x00_write_sfp(vha, sfp_dma, sfp, 1610 sr->field_address.device, sr->field_address.offset, 1611 sizeof(sr->status_reg), sr->field_address.option); 1612 1613 if (rval) { 1614 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 1615 EXT_STATUS_MAILBOX; 1616 goto dealloc; 1617 } 1618 1619 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0; 1620 1621 dealloc: 1622 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); 1623 1624 done: 1625 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1626 bsg_job->reply->result = DID_OK << 16; 1627 bsg_job->job_done(bsg_job); 1628 1629 return 0; 1630 } 1631 1632 static int 1633 qla2x00_write_i2c(struct fc_bsg_job *bsg_job) 1634 { 1635 struct Scsi_Host *host = bsg_job->shost; 1636 scsi_qla_host_t *vha = shost_priv(host); 1637 struct qla_hw_data *ha = vha->hw; 1638 int rval = 0; 1639 uint8_t bsg[DMA_POOL_SIZE]; 1640 struct qla_i2c_access *i2c = (void *)bsg; 1641 dma_addr_t sfp_dma; 1642 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); 1643 if (!sfp) { 1644 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 1645 EXT_STATUS_NO_MEMORY; 1646 goto done; 1647 } 1648 1649 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1650 bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c)); 1651 1652 memcpy(sfp, i2c->buffer, i2c->length); 1653 rval = qla2x00_write_sfp(vha, sfp_dma, sfp, 1654 i2c->device, i2c->offset, i2c->length, i2c->option); 1655 1656 if (rval) { 1657 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 1658 EXT_STATUS_MAILBOX; 1659 goto dealloc; 1660 } 1661 1662 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0; 1663 1664 dealloc: 1665 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); 1666 1667 done: 1668 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1669 bsg_job->reply->result = DID_OK << 16; 1670 bsg_job->job_done(bsg_job); 1671 1672 return 0; 1673 } 1674 1675 static int 1676 qla2x00_read_i2c(struct fc_bsg_job *bsg_job) 1677 { 1678 struct Scsi_Host *host = bsg_job->shost; 1679 scsi_qla_host_t *vha = shost_priv(host); 1680 struct qla_hw_data *ha = vha->hw; 1681 int rval = 0; 1682 uint8_t bsg[DMA_POOL_SIZE]; 1683 struct qla_i2c_access *i2c = (void *)bsg; 1684 dma_addr_t sfp_dma; 1685 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); 1686 if (!sfp) { 1687 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 1688 EXT_STATUS_NO_MEMORY; 1689 goto done; 1690 } 1691 1692 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1693 bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c)); 1694 1695 rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 1696 i2c->device, i2c->offset, i2c->length, i2c->option); 1697 1698 if (rval) { 1699 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 1700 EXT_STATUS_MAILBOX; 1701 goto dealloc; 1702 } 1703 1704 memcpy(i2c->buffer, sfp, i2c->length); 1705 sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 1706 bsg_job->reply_payload.sg_cnt, i2c, sizeof(*i2c)); 1707 1708 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0; 1709 1710 dealloc: 1711 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); 1712 1713 done: 1714 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1715 bsg_job->reply->reply_payload_rcv_len = sizeof(*i2c); 1716 bsg_job->reply->result = DID_OK << 16; 1717 bsg_job->job_done(bsg_job); 1718 1719 return 0; 1720 } 1721 1722 static int 1723 qla24xx_process_bidir_cmd(struct fc_bsg_job *bsg_job) 1724 { 1725 struct Scsi_Host *host = bsg_job->shost; 1726 scsi_qla_host_t *vha = shost_priv(host); 1727 struct qla_hw_data *ha = vha->hw; 1728 uint16_t thread_id; 1729 uint32_t rval = EXT_STATUS_OK; 1730 uint16_t req_sg_cnt = 0; 1731 uint16_t rsp_sg_cnt = 0; 1732 uint16_t nextlid = 0; 1733 uint32_t tot_dsds; 1734 srb_t *sp = NULL; 1735 uint32_t req_data_len = 0; 1736 uint32_t rsp_data_len = 0; 1737 1738 /* Check the type of the adapter */ 1739 if (!IS_BIDI_CAPABLE(ha)) { 1740 ql_log(ql_log_warn, vha, 0x70a0, 1741 "This adapter is not supported\n"); 1742 rval = EXT_STATUS_NOT_SUPPORTED; 1743 goto done; 1744 } 1745 1746 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) || 1747 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) || 1748 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { 1749 rval = EXT_STATUS_BUSY; 1750 goto done; 1751 } 1752 1753 /* Check if host is online */ 1754 if (!vha->flags.online) { 1755 ql_log(ql_log_warn, vha, 0x70a1, 1756 "Host is not online\n"); 1757 rval = EXT_STATUS_DEVICE_OFFLINE; 1758 goto done; 1759 } 1760 1761 /* Check if cable is plugged in or not */ 1762 if (vha->device_flags & DFLG_NO_CABLE) { 1763 ql_log(ql_log_warn, vha, 0x70a2, 1764 "Cable is unplugged...\n"); 1765 rval = EXT_STATUS_INVALID_CFG; 1766 goto done; 1767 } 1768 1769 /* Check if the switch is connected or not */ 1770 if (ha->current_topology != ISP_CFG_F) { 1771 ql_log(ql_log_warn, vha, 0x70a3, 1772 "Host is not connected to the switch\n"); 1773 rval = EXT_STATUS_INVALID_CFG; 1774 goto done; 1775 } 1776 1777 /* Check if operating mode is P2P */ 1778 if (ha->operating_mode != P2P) { 1779 ql_log(ql_log_warn, vha, 0x70a4, 1780 "Host is operating mode is not P2p\n"); 1781 rval = EXT_STATUS_INVALID_CFG; 1782 goto done; 1783 } 1784 1785 thread_id = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; 1786 1787 mutex_lock(&ha->selflogin_lock); 1788 if (vha->self_login_loop_id == 0) { 1789 /* Initialize all required fields of fcport */ 1790 vha->bidir_fcport.vha = vha; 1791 vha->bidir_fcport.d_id.b.al_pa = vha->d_id.b.al_pa; 1792 vha->bidir_fcport.d_id.b.area = vha->d_id.b.area; 1793 vha->bidir_fcport.d_id.b.domain = vha->d_id.b.domain; 1794 vha->bidir_fcport.loop_id = vha->loop_id; 1795 1796 if (qla2x00_fabric_login(vha, &(vha->bidir_fcport), &nextlid)) { 1797 ql_log(ql_log_warn, vha, 0x70a7, 1798 "Failed to login port %06X for bidirectional IOCB\n", 1799 vha->bidir_fcport.d_id.b24); 1800 mutex_unlock(&ha->selflogin_lock); 1801 rval = EXT_STATUS_MAILBOX; 1802 goto done; 1803 } 1804 vha->self_login_loop_id = nextlid - 1; 1805 1806 } 1807 /* Assign the self login loop id to fcport */ 1808 mutex_unlock(&ha->selflogin_lock); 1809 1810 vha->bidir_fcport.loop_id = vha->self_login_loop_id; 1811 1812 req_sg_cnt = dma_map_sg(&ha->pdev->dev, 1813 bsg_job->request_payload.sg_list, 1814 bsg_job->request_payload.sg_cnt, 1815 DMA_TO_DEVICE); 1816 1817 if (!req_sg_cnt) { 1818 rval = EXT_STATUS_NO_MEMORY; 1819 goto done; 1820 } 1821 1822 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, 1823 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, 1824 DMA_FROM_DEVICE); 1825 1826 if (!rsp_sg_cnt) { 1827 rval = EXT_STATUS_NO_MEMORY; 1828 goto done_unmap_req_sg; 1829 } 1830 1831 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) || 1832 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) { 1833 ql_dbg(ql_dbg_user, vha, 0x70a9, 1834 "Dma mapping resulted in different sg counts " 1835 "[request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt: " 1836 "%x dma_reply_sg_cnt: %x]\n", 1837 bsg_job->request_payload.sg_cnt, req_sg_cnt, 1838 bsg_job->reply_payload.sg_cnt, rsp_sg_cnt); 1839 rval = EXT_STATUS_NO_MEMORY; 1840 goto done_unmap_sg; 1841 } 1842 1843 if (req_data_len != rsp_data_len) { 1844 rval = EXT_STATUS_BUSY; 1845 ql_log(ql_log_warn, vha, 0x70aa, 1846 "req_data_len != rsp_data_len\n"); 1847 goto done_unmap_sg; 1848 } 1849 1850 req_data_len = bsg_job->request_payload.payload_len; 1851 rsp_data_len = bsg_job->reply_payload.payload_len; 1852 1853 1854 /* Alloc SRB structure */ 1855 sp = qla2x00_get_sp(vha, &(vha->bidir_fcport), GFP_KERNEL); 1856 if (!sp) { 1857 ql_dbg(ql_dbg_user, vha, 0x70ac, 1858 "Alloc SRB structure failed\n"); 1859 rval = EXT_STATUS_NO_MEMORY; 1860 goto done_unmap_sg; 1861 } 1862 1863 /*Populate srb->ctx with bidir ctx*/ 1864 sp->u.bsg_job = bsg_job; 1865 sp->free = qla2x00_bsg_sp_free; 1866 sp->type = SRB_BIDI_CMD; 1867 sp->done = qla2x00_bsg_job_done; 1868 1869 /* Add the read and write sg count */ 1870 tot_dsds = rsp_sg_cnt + req_sg_cnt; 1871 1872 rval = qla2x00_start_bidir(sp, vha, tot_dsds); 1873 if (rval != EXT_STATUS_OK) 1874 goto done_free_srb; 1875 /* the bsg request will be completed in the interrupt handler */ 1876 return rval; 1877 1878 done_free_srb: 1879 mempool_free(sp, ha->srb_mempool); 1880 done_unmap_sg: 1881 dma_unmap_sg(&ha->pdev->dev, 1882 bsg_job->reply_payload.sg_list, 1883 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 1884 done_unmap_req_sg: 1885 dma_unmap_sg(&ha->pdev->dev, 1886 bsg_job->request_payload.sg_list, 1887 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 1888 done: 1889 1890 /* Return an error vendor specific response 1891 * and complete the bsg request 1892 */ 1893 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = rval; 1894 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1895 bsg_job->reply->reply_payload_rcv_len = 0; 1896 bsg_job->reply->result = (DID_OK) << 16; 1897 bsg_job->job_done(bsg_job); 1898 /* Always return success, vendor rsp carries correct status */ 1899 return 0; 1900 } 1901 1902 static int 1903 qlafx00_mgmt_cmd(struct fc_bsg_job *bsg_job) 1904 { 1905 struct Scsi_Host *host = bsg_job->shost; 1906 scsi_qla_host_t *vha = shost_priv(host); 1907 struct qla_hw_data *ha = vha->hw; 1908 int rval = (DRIVER_ERROR << 16); 1909 struct qla_mt_iocb_rqst_fx00 *piocb_rqst; 1910 srb_t *sp; 1911 int req_sg_cnt = 0, rsp_sg_cnt = 0; 1912 struct fc_port *fcport; 1913 char *type = "FC_BSG_HST_FX_MGMT"; 1914 1915 /* Copy the IOCB specific information */ 1916 piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *) 1917 &bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; 1918 1919 /* Dump the vendor information */ 1920 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose , vha, 0x70cf, 1921 (uint8_t *)piocb_rqst, sizeof(struct qla_mt_iocb_rqst_fx00)); 1922 1923 if (!vha->flags.online) { 1924 ql_log(ql_log_warn, vha, 0x70d0, 1925 "Host is not online.\n"); 1926 rval = -EIO; 1927 goto done; 1928 } 1929 1930 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) { 1931 req_sg_cnt = dma_map_sg(&ha->pdev->dev, 1932 bsg_job->request_payload.sg_list, 1933 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 1934 if (!req_sg_cnt) { 1935 ql_log(ql_log_warn, vha, 0x70c7, 1936 "dma_map_sg return %d for request\n", req_sg_cnt); 1937 rval = -ENOMEM; 1938 goto done; 1939 } 1940 } 1941 1942 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) { 1943 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, 1944 bsg_job->reply_payload.sg_list, 1945 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 1946 if (!rsp_sg_cnt) { 1947 ql_log(ql_log_warn, vha, 0x70c8, 1948 "dma_map_sg return %d for reply\n", rsp_sg_cnt); 1949 rval = -ENOMEM; 1950 goto done_unmap_req_sg; 1951 } 1952 } 1953 1954 ql_dbg(ql_dbg_user, vha, 0x70c9, 1955 "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x " 1956 "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt, 1957 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt); 1958 1959 /* Allocate a dummy fcport structure, since functions preparing the 1960 * IOCB and mailbox command retrieves port specific information 1961 * from fcport structure. For Host based ELS commands there will be 1962 * no fcport structure allocated 1963 */ 1964 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 1965 if (!fcport) { 1966 ql_log(ql_log_warn, vha, 0x70ca, 1967 "Failed to allocate fcport.\n"); 1968 rval = -ENOMEM; 1969 goto done_unmap_rsp_sg; 1970 } 1971 1972 /* Alloc SRB structure */ 1973 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); 1974 if (!sp) { 1975 ql_log(ql_log_warn, vha, 0x70cb, 1976 "qla2x00_get_sp failed.\n"); 1977 rval = -ENOMEM; 1978 goto done_free_fcport; 1979 } 1980 1981 /* Initialize all required fields of fcport */ 1982 fcport->vha = vha; 1983 fcport->loop_id = piocb_rqst->dataword; 1984 1985 sp->type = SRB_FXIOCB_BCMD; 1986 sp->name = "bsg_fx_mgmt"; 1987 sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt); 1988 sp->u.bsg_job = bsg_job; 1989 sp->free = qla2x00_bsg_sp_free; 1990 sp->done = qla2x00_bsg_job_done; 1991 1992 ql_dbg(ql_dbg_user, vha, 0x70cc, 1993 "bsg rqst type: %s fx_mgmt_type: %x id=%x\n", 1994 type, piocb_rqst->func_type, fcport->loop_id); 1995 1996 rval = qla2x00_start_sp(sp); 1997 if (rval != QLA_SUCCESS) { 1998 ql_log(ql_log_warn, vha, 0x70cd, 1999 "qla2x00_start_sp failed=%d.\n", rval); 2000 mempool_free(sp, ha->srb_mempool); 2001 rval = -EIO; 2002 goto done_free_fcport; 2003 } 2004 return rval; 2005 2006 done_free_fcport: 2007 kfree(fcport); 2008 2009 done_unmap_rsp_sg: 2010 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) 2011 dma_unmap_sg(&ha->pdev->dev, 2012 bsg_job->reply_payload.sg_list, 2013 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); 2014 done_unmap_req_sg: 2015 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) 2016 dma_unmap_sg(&ha->pdev->dev, 2017 bsg_job->request_payload.sg_list, 2018 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); 2019 2020 done: 2021 return rval; 2022 } 2023 2024 static int 2025 qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job) 2026 { 2027 switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) { 2028 case QL_VND_LOOPBACK: 2029 return qla2x00_process_loopback(bsg_job); 2030 2031 case QL_VND_A84_RESET: 2032 return qla84xx_reset(bsg_job); 2033 2034 case QL_VND_A84_UPDATE_FW: 2035 return qla84xx_updatefw(bsg_job); 2036 2037 case QL_VND_A84_MGMT_CMD: 2038 return qla84xx_mgmt_cmd(bsg_job); 2039 2040 case QL_VND_IIDMA: 2041 return qla24xx_iidma(bsg_job); 2042 2043 case QL_VND_FCP_PRIO_CFG_CMD: 2044 return qla24xx_proc_fcp_prio_cfg_cmd(bsg_job); 2045 2046 case QL_VND_READ_FLASH: 2047 return qla2x00_read_optrom(bsg_job); 2048 2049 case QL_VND_UPDATE_FLASH: 2050 return qla2x00_update_optrom(bsg_job); 2051 2052 case QL_VND_SET_FRU_VERSION: 2053 return qla2x00_update_fru_versions(bsg_job); 2054 2055 case QL_VND_READ_FRU_STATUS: 2056 return qla2x00_read_fru_status(bsg_job); 2057 2058 case QL_VND_WRITE_FRU_STATUS: 2059 return qla2x00_write_fru_status(bsg_job); 2060 2061 case QL_VND_WRITE_I2C: 2062 return qla2x00_write_i2c(bsg_job); 2063 2064 case QL_VND_READ_I2C: 2065 return qla2x00_read_i2c(bsg_job); 2066 2067 case QL_VND_DIAG_IO_CMD: 2068 return qla24xx_process_bidir_cmd(bsg_job); 2069 2070 case QL_VND_FX00_MGMT_CMD: 2071 return qlafx00_mgmt_cmd(bsg_job); 2072 default: 2073 return -ENOSYS; 2074 } 2075 } 2076 2077 int 2078 qla24xx_bsg_request(struct fc_bsg_job *bsg_job) 2079 { 2080 int ret = -EINVAL; 2081 struct fc_rport *rport; 2082 fc_port_t *fcport = NULL; 2083 struct Scsi_Host *host; 2084 scsi_qla_host_t *vha; 2085 2086 /* In case no data transferred. */ 2087 bsg_job->reply->reply_payload_rcv_len = 0; 2088 2089 if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) { 2090 rport = bsg_job->rport; 2091 fcport = *(fc_port_t **) rport->dd_data; 2092 host = rport_to_shost(rport); 2093 vha = shost_priv(host); 2094 } else { 2095 host = bsg_job->shost; 2096 vha = shost_priv(host); 2097 } 2098 2099 if (qla2x00_reset_active(vha)) { 2100 ql_dbg(ql_dbg_user, vha, 0x709f, 2101 "BSG: ISP abort active/needed -- cmd=%d.\n", 2102 bsg_job->request->msgcode); 2103 return -EBUSY; 2104 } 2105 2106 ql_dbg(ql_dbg_user, vha, 0x7000, 2107 "Entered %s msgcode=0x%x.\n", __func__, bsg_job->request->msgcode); 2108 2109 switch (bsg_job->request->msgcode) { 2110 case FC_BSG_RPT_ELS: 2111 case FC_BSG_HST_ELS_NOLOGIN: 2112 ret = qla2x00_process_els(bsg_job); 2113 break; 2114 case FC_BSG_HST_CT: 2115 ret = qla2x00_process_ct(bsg_job); 2116 break; 2117 case FC_BSG_HST_VENDOR: 2118 ret = qla2x00_process_vendor_specific(bsg_job); 2119 break; 2120 case FC_BSG_HST_ADD_RPORT: 2121 case FC_BSG_HST_DEL_RPORT: 2122 case FC_BSG_RPT_CT: 2123 default: 2124 ql_log(ql_log_warn, vha, 0x705a, "Unsupported BSG request.\n"); 2125 break; 2126 } 2127 return ret; 2128 } 2129 2130 int 2131 qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job) 2132 { 2133 scsi_qla_host_t *vha = shost_priv(bsg_job->shost); 2134 struct qla_hw_data *ha = vha->hw; 2135 srb_t *sp; 2136 int cnt, que; 2137 unsigned long flags; 2138 struct req_que *req; 2139 2140 /* find the bsg job from the active list of commands */ 2141 spin_lock_irqsave(&ha->hardware_lock, flags); 2142 for (que = 0; que < ha->max_req_queues; que++) { 2143 req = ha->req_q_map[que]; 2144 if (!req) 2145 continue; 2146 2147 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { 2148 sp = req->outstanding_cmds[cnt]; 2149 if (sp) { 2150 if (((sp->type == SRB_CT_CMD) || 2151 (sp->type == SRB_ELS_CMD_HST) || 2152 (sp->type == SRB_FXIOCB_BCMD)) 2153 && (sp->u.bsg_job == bsg_job)) { 2154 req->outstanding_cmds[cnt] = NULL; 2155 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2156 if (ha->isp_ops->abort_command(sp)) { 2157 ql_log(ql_log_warn, vha, 0x7089, 2158 "mbx abort_command " 2159 "failed.\n"); 2160 bsg_job->req->errors = 2161 bsg_job->reply->result = -EIO; 2162 } else { 2163 ql_dbg(ql_dbg_user, vha, 0x708a, 2164 "mbx abort_command " 2165 "success.\n"); 2166 bsg_job->req->errors = 2167 bsg_job->reply->result = 0; 2168 } 2169 spin_lock_irqsave(&ha->hardware_lock, flags); 2170 goto done; 2171 } 2172 } 2173 } 2174 } 2175 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2176 ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n"); 2177 bsg_job->req->errors = bsg_job->reply->result = -ENXIO; 2178 return 0; 2179 2180 done: 2181 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2182 sp->free(vha, sp); 2183 return 0; 2184 } 2185