1 /* 2 * QLogic Fibre Channel HBA Driver 3 * Copyright (c) 2003-2014 QLogic Corporation 4 * 5 * See LICENSE.qla2xxx for copyright and licensing details. 6 */ 7 #include "qla_def.h" 8 #include "qla_gbl.h" 9 #include "qla_target.h" 10 11 #include <linux/moduleparam.h> 12 #include <linux/vmalloc.h> 13 #include <linux/slab.h> 14 #include <linux/list.h> 15 16 #include <scsi/scsi_tcq.h> 17 #include <scsi/scsicam.h> 18 #include <linux/delay.h> 19 20 void 21 qla2x00_vp_stop_timer(scsi_qla_host_t *vha) 22 { 23 if (vha->vp_idx && vha->timer_active) { 24 del_timer_sync(&vha->timer); 25 vha->timer_active = 0; 26 } 27 } 28 29 static uint32_t 30 qla24xx_allocate_vp_id(scsi_qla_host_t *vha) 31 { 32 uint32_t vp_id; 33 struct qla_hw_data *ha = vha->hw; 34 unsigned long flags; 35 36 /* Find an empty slot and assign an vp_id */ 37 mutex_lock(&ha->vport_lock); 38 vp_id = find_first_zero_bit(ha->vp_idx_map, ha->max_npiv_vports + 1); 39 if (vp_id > ha->max_npiv_vports) { 40 ql_dbg(ql_dbg_vport, vha, 0xa000, 41 "vp_id %d is bigger than max-supported %d.\n", 42 vp_id, ha->max_npiv_vports); 43 mutex_unlock(&ha->vport_lock); 44 return vp_id; 45 } 46 47 set_bit(vp_id, ha->vp_idx_map); 48 ha->num_vhosts++; 49 vha->vp_idx = vp_id; 50 51 spin_lock_irqsave(&ha->vport_slock, flags); 52 list_add_tail(&vha->list, &ha->vp_list); 53 spin_unlock_irqrestore(&ha->vport_slock, flags); 54 55 spin_lock_irqsave(&ha->hardware_lock, flags); 56 qlt_update_vp_map(vha, SET_VP_IDX); 57 spin_unlock_irqrestore(&ha->hardware_lock, flags); 58 59 mutex_unlock(&ha->vport_lock); 60 return vp_id; 61 } 62 63 void 64 qla24xx_deallocate_vp_id(scsi_qla_host_t *vha) 65 { 66 uint16_t vp_id; 67 struct qla_hw_data *ha = vha->hw; 68 unsigned long flags = 0; 69 u8 i; 70 71 mutex_lock(&ha->vport_lock); 72 /* 73 * Wait for all pending activities to finish before removing vport from 74 * the list. 75 * Lock needs to be held for safe removal from the list (it 76 * ensures no active vp_list traversal while the vport is removed 77 * from the queue) 78 */ 79 for (i = 0; i < 10; i++) { 80 if (wait_event_timeout(vha->vref_waitq, 81 !atomic_read(&vha->vref_count), HZ) > 0) 82 break; 83 } 84 85 spin_lock_irqsave(&ha->vport_slock, flags); 86 if (atomic_read(&vha->vref_count)) { 87 ql_dbg(ql_dbg_vport, vha, 0xfffa, 88 "vha->vref_count=%u timeout\n", vha->vref_count.counter); 89 vha->vref_count = (atomic_t)ATOMIC_INIT(0); 90 } 91 list_del(&vha->list); 92 qlt_update_vp_map(vha, RESET_VP_IDX); 93 spin_unlock_irqrestore(&ha->vport_slock, flags); 94 95 vp_id = vha->vp_idx; 96 ha->num_vhosts--; 97 clear_bit(vp_id, ha->vp_idx_map); 98 99 mutex_unlock(&ha->vport_lock); 100 } 101 102 static scsi_qla_host_t * 103 qla24xx_find_vhost_by_name(struct qla_hw_data *ha, uint8_t *port_name) 104 { 105 scsi_qla_host_t *vha; 106 struct scsi_qla_host *tvha; 107 unsigned long flags; 108 109 spin_lock_irqsave(&ha->vport_slock, flags); 110 /* Locate matching device in database. */ 111 list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) { 112 if (!memcmp(port_name, vha->port_name, WWN_SIZE)) { 113 spin_unlock_irqrestore(&ha->vport_slock, flags); 114 return vha; 115 } 116 } 117 spin_unlock_irqrestore(&ha->vport_slock, flags); 118 return NULL; 119 } 120 121 /* 122 * qla2x00_mark_vp_devices_dead 123 * Updates fcport state when device goes offline. 124 * 125 * Input: 126 * ha = adapter block pointer. 127 * fcport = port structure pointer. 128 * 129 * Return: 130 * None. 131 * 132 * Context: 133 */ 134 static void 135 qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha) 136 { 137 /* 138 * !!! NOTE !!! 139 * This function, if called in contexts other than vp create, disable 140 * or delete, please make sure this is synchronized with the 141 * delete thread. 142 */ 143 fc_port_t *fcport; 144 145 list_for_each_entry(fcport, &vha->vp_fcports, list) { 146 ql_dbg(ql_dbg_vport, vha, 0xa001, 147 "Marking port dead, loop_id=0x%04x : %x.\n", 148 fcport->loop_id, fcport->vha->vp_idx); 149 150 qla2x00_mark_device_lost(vha, fcport, 0, 0); 151 qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED); 152 } 153 } 154 155 int 156 qla24xx_disable_vp(scsi_qla_host_t *vha) 157 { 158 unsigned long flags; 159 int ret = QLA_SUCCESS; 160 fc_port_t *fcport; 161 162 if (vha->hw->flags.fw_started) 163 ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL); 164 165 atomic_set(&vha->loop_state, LOOP_DOWN); 166 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 167 list_for_each_entry(fcport, &vha->vp_fcports, list) 168 fcport->logout_on_delete = 0; 169 170 qla2x00_mark_all_devices_lost(vha, 0); 171 172 /* Remove port id from vp target map */ 173 spin_lock_irqsave(&vha->hw->hardware_lock, flags); 174 qlt_update_vp_map(vha, RESET_AL_PA); 175 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); 176 177 qla2x00_mark_vp_devices_dead(vha); 178 atomic_set(&vha->vp_state, VP_FAILED); 179 vha->flags.management_server_logged_in = 0; 180 if (ret == QLA_SUCCESS) { 181 fc_vport_set_state(vha->fc_vport, FC_VPORT_DISABLED); 182 } else { 183 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 184 return -1; 185 } 186 return 0; 187 } 188 189 int 190 qla24xx_enable_vp(scsi_qla_host_t *vha) 191 { 192 int ret; 193 struct qla_hw_data *ha = vha->hw; 194 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 195 196 /* Check if physical ha port is Up */ 197 if (atomic_read(&base_vha->loop_state) == LOOP_DOWN || 198 atomic_read(&base_vha->loop_state) == LOOP_DEAD || 199 !(ha->current_topology & ISP_CFG_F)) { 200 vha->vp_err_state = VP_ERR_PORTDWN; 201 fc_vport_set_state(vha->fc_vport, FC_VPORT_LINKDOWN); 202 ql_dbg(ql_dbg_taskm, vha, 0x800b, 203 "%s skip enable. loop_state %x topo %x\n", 204 __func__, base_vha->loop_state.counter, 205 ha->current_topology); 206 207 goto enable_failed; 208 } 209 210 /* Initialize the new vport unless it is a persistent port */ 211 mutex_lock(&ha->vport_lock); 212 ret = qla24xx_modify_vp_config(vha); 213 mutex_unlock(&ha->vport_lock); 214 215 if (ret != QLA_SUCCESS) { 216 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 217 goto enable_failed; 218 } 219 220 ql_dbg(ql_dbg_taskm, vha, 0x801a, 221 "Virtual port with id: %d - Enabled.\n", vha->vp_idx); 222 return 0; 223 224 enable_failed: 225 ql_dbg(ql_dbg_taskm, vha, 0x801b, 226 "Virtual port with id: %d - Disabled.\n", vha->vp_idx); 227 return 1; 228 } 229 230 static void 231 qla24xx_configure_vp(scsi_qla_host_t *vha) 232 { 233 struct fc_vport *fc_vport; 234 int ret; 235 236 fc_vport = vha->fc_vport; 237 238 ql_dbg(ql_dbg_vport, vha, 0xa002, 239 "%s: change request #3.\n", __func__); 240 ret = qla2x00_send_change_request(vha, 0x3, vha->vp_idx); 241 if (ret != QLA_SUCCESS) { 242 ql_dbg(ql_dbg_vport, vha, 0xa003, "Failed to enable " 243 "receiving of RSCN requests: 0x%x.\n", ret); 244 return; 245 } else { 246 /* Corresponds to SCR enabled */ 247 clear_bit(VP_SCR_NEEDED, &vha->vp_flags); 248 } 249 250 vha->flags.online = 1; 251 if (qla24xx_configure_vhba(vha)) 252 return; 253 254 atomic_set(&vha->vp_state, VP_ACTIVE); 255 fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE); 256 } 257 258 void 259 qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb) 260 { 261 scsi_qla_host_t *vha; 262 struct qla_hw_data *ha = rsp->hw; 263 int i = 0; 264 unsigned long flags; 265 266 spin_lock_irqsave(&ha->vport_slock, flags); 267 list_for_each_entry(vha, &ha->vp_list, list) { 268 if (vha->vp_idx) { 269 if (test_bit(VPORT_DELETE, &vha->dpc_flags)) 270 continue; 271 272 atomic_inc(&vha->vref_count); 273 spin_unlock_irqrestore(&ha->vport_slock, flags); 274 275 switch (mb[0]) { 276 case MBA_LIP_OCCURRED: 277 case MBA_LOOP_UP: 278 case MBA_LOOP_DOWN: 279 case MBA_LIP_RESET: 280 case MBA_POINT_TO_POINT: 281 case MBA_CHG_IN_CONNECTION: 282 ql_dbg(ql_dbg_async, vha, 0x5024, 283 "Async_event for VP[%d], mb=0x%x vha=%p.\n", 284 i, *mb, vha); 285 qla2x00_async_event(vha, rsp, mb); 286 break; 287 case MBA_PORT_UPDATE: 288 case MBA_RSCN_UPDATE: 289 if ((mb[3] & 0xff) == vha->vp_idx) { 290 ql_dbg(ql_dbg_async, vha, 0x5024, 291 "Async_event for VP[%d], mb=0x%x vha=%p\n", 292 i, *mb, vha); 293 qla2x00_async_event(vha, rsp, mb); 294 } 295 break; 296 } 297 298 spin_lock_irqsave(&ha->vport_slock, flags); 299 atomic_dec(&vha->vref_count); 300 wake_up(&vha->vref_waitq); 301 } 302 i++; 303 } 304 spin_unlock_irqrestore(&ha->vport_slock, flags); 305 } 306 307 int 308 qla2x00_vp_abort_isp(scsi_qla_host_t *vha) 309 { 310 fc_port_t *fcport; 311 312 /* 313 * To exclusively reset vport, we need to log it out first. 314 * Note: This control_vp can fail if ISP reset is already 315 * issued, this is expected, as the vp would be already 316 * logged out due to ISP reset. 317 */ 318 if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) { 319 qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL); 320 list_for_each_entry(fcport, &vha->vp_fcports, list) 321 fcport->logout_on_delete = 0; 322 } 323 324 /* 325 * Physical port will do most of the abort and recovery work. We can 326 * just treat it as a loop down 327 */ 328 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 329 atomic_set(&vha->loop_state, LOOP_DOWN); 330 qla2x00_mark_all_devices_lost(vha, 0); 331 } else { 332 if (!atomic_read(&vha->loop_down_timer)) 333 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 334 } 335 336 ql_dbg(ql_dbg_taskm, vha, 0x801d, 337 "Scheduling enable of Vport %d.\n", vha->vp_idx); 338 339 return qla24xx_enable_vp(vha); 340 } 341 342 static int 343 qla2x00_do_dpc_vp(scsi_qla_host_t *vha) 344 { 345 struct qla_hw_data *ha = vha->hw; 346 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 347 348 ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x4012, 349 "Entering %s vp_flags: 0x%lx.\n", __func__, vha->vp_flags); 350 351 /* Check if Fw is ready to configure VP first */ 352 if (test_bit(VP_CONFIG_OK, &base_vha->vp_flags)) { 353 if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) { 354 /* VP acquired. complete port configuration */ 355 ql_dbg(ql_dbg_dpc, vha, 0x4014, 356 "Configure VP scheduled.\n"); 357 qla24xx_configure_vp(vha); 358 ql_dbg(ql_dbg_dpc, vha, 0x4015, 359 "Configure VP end.\n"); 360 return 0; 361 } 362 } 363 364 if (test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) { 365 ql_dbg(ql_dbg_dpc, vha, 0x4016, 366 "FCPort update scheduled.\n"); 367 qla2x00_update_fcports(vha); 368 clear_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags); 369 ql_dbg(ql_dbg_dpc, vha, 0x4017, 370 "FCPort update end.\n"); 371 } 372 373 if (test_bit(RELOGIN_NEEDED, &vha->dpc_flags) && 374 !test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) && 375 atomic_read(&vha->loop_state) != LOOP_DOWN) { 376 377 if (!vha->relogin_jif || 378 time_after_eq(jiffies, vha->relogin_jif)) { 379 vha->relogin_jif = jiffies + HZ; 380 clear_bit(RELOGIN_NEEDED, &vha->dpc_flags); 381 382 ql_dbg(ql_dbg_dpc, vha, 0x4018, 383 "Relogin needed scheduled.\n"); 384 qla24xx_post_relogin_work(vha); 385 } 386 } 387 388 if (test_and_clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) && 389 (!(test_and_set_bit(RESET_ACTIVE, &vha->dpc_flags)))) { 390 clear_bit(RESET_ACTIVE, &vha->dpc_flags); 391 } 392 393 if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) { 394 if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))) { 395 ql_dbg(ql_dbg_dpc, vha, 0x401a, 396 "Loop resync scheduled.\n"); 397 qla2x00_loop_resync(vha); 398 clear_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags); 399 ql_dbg(ql_dbg_dpc, vha, 0x401b, 400 "Loop resync end.\n"); 401 } 402 } 403 404 ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x401c, 405 "Exiting %s.\n", __func__); 406 return 0; 407 } 408 409 void 410 qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha) 411 { 412 struct qla_hw_data *ha = vha->hw; 413 scsi_qla_host_t *vp; 414 unsigned long flags = 0; 415 416 if (vha->vp_idx) 417 return; 418 if (list_empty(&ha->vp_list)) 419 return; 420 421 clear_bit(VP_DPC_NEEDED, &vha->dpc_flags); 422 423 if (!(ha->current_topology & ISP_CFG_F)) 424 return; 425 426 spin_lock_irqsave(&ha->vport_slock, flags); 427 list_for_each_entry(vp, &ha->vp_list, list) { 428 if (vp->vp_idx) { 429 atomic_inc(&vp->vref_count); 430 spin_unlock_irqrestore(&ha->vport_slock, flags); 431 432 qla2x00_do_dpc_vp(vp); 433 434 spin_lock_irqsave(&ha->vport_slock, flags); 435 atomic_dec(&vp->vref_count); 436 } 437 } 438 spin_unlock_irqrestore(&ha->vport_slock, flags); 439 } 440 441 int 442 qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport) 443 { 444 scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost); 445 struct qla_hw_data *ha = base_vha->hw; 446 scsi_qla_host_t *vha; 447 uint8_t port_name[WWN_SIZE]; 448 449 if (fc_vport->roles != FC_PORT_ROLE_FCP_INITIATOR) 450 return VPCERR_UNSUPPORTED; 451 452 /* Check up the F/W and H/W support NPIV */ 453 if (!ha->flags.npiv_supported) 454 return VPCERR_UNSUPPORTED; 455 456 /* Check up whether npiv supported switch presented */ 457 if (!(ha->switch_cap & FLOGI_MID_SUPPORT)) 458 return VPCERR_NO_FABRIC_SUPP; 459 460 /* Check up unique WWPN */ 461 u64_to_wwn(fc_vport->port_name, port_name); 462 if (!memcmp(port_name, base_vha->port_name, WWN_SIZE)) 463 return VPCERR_BAD_WWN; 464 vha = qla24xx_find_vhost_by_name(ha, port_name); 465 if (vha) 466 return VPCERR_BAD_WWN; 467 468 /* Check up max-npiv-supports */ 469 if (ha->num_vhosts > ha->max_npiv_vports) { 470 ql_dbg(ql_dbg_vport, vha, 0xa004, 471 "num_vhosts %ud is bigger " 472 "than max_npiv_vports %ud.\n", 473 ha->num_vhosts, ha->max_npiv_vports); 474 return VPCERR_UNSUPPORTED; 475 } 476 return 0; 477 } 478 479 scsi_qla_host_t * 480 qla24xx_create_vhost(struct fc_vport *fc_vport) 481 { 482 scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost); 483 struct qla_hw_data *ha = base_vha->hw; 484 scsi_qla_host_t *vha; 485 struct scsi_host_template *sht = &qla2xxx_driver_template; 486 struct Scsi_Host *host; 487 488 vha = qla2x00_create_host(sht, ha); 489 if (!vha) { 490 ql_log(ql_log_warn, vha, 0xa005, 491 "scsi_host_alloc() failed for vport.\n"); 492 return(NULL); 493 } 494 495 host = vha->host; 496 fc_vport->dd_data = vha; 497 /* New host info */ 498 u64_to_wwn(fc_vport->node_name, vha->node_name); 499 u64_to_wwn(fc_vport->port_name, vha->port_name); 500 501 vha->fc_vport = fc_vport; 502 vha->device_flags = 0; 503 vha->vp_idx = qla24xx_allocate_vp_id(vha); 504 if (vha->vp_idx > ha->max_npiv_vports) { 505 ql_dbg(ql_dbg_vport, vha, 0xa006, 506 "Couldn't allocate vp_id.\n"); 507 goto create_vhost_failed; 508 } 509 vha->mgmt_svr_loop_id = qla2x00_reserve_mgmt_server_loop_id(vha); 510 511 vha->dpc_flags = 0L; 512 513 /* 514 * To fix the issue of processing a parent's RSCN for the vport before 515 * its SCR is complete. 516 */ 517 set_bit(VP_SCR_NEEDED, &vha->vp_flags); 518 atomic_set(&vha->loop_state, LOOP_DOWN); 519 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 520 521 qla2x00_start_timer(vha, WATCH_INTERVAL); 522 523 vha->req = base_vha->req; 524 vha->flags.nvme_enabled = base_vha->flags.nvme_enabled; 525 host->can_queue = base_vha->req->length + 128; 526 host->cmd_per_lun = 3; 527 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) 528 host->max_cmd_len = 32; 529 else 530 host->max_cmd_len = MAX_CMDSZ; 531 host->max_channel = MAX_BUSES - 1; 532 host->max_lun = ql2xmaxlun; 533 host->unique_id = host->host_no; 534 host->max_id = ha->max_fibre_devices; 535 host->transportt = qla2xxx_transport_vport_template; 536 537 ql_dbg(ql_dbg_vport, vha, 0xa007, 538 "Detect vport hba %ld at address = %p.\n", 539 vha->host_no, vha); 540 541 vha->flags.init_done = 1; 542 543 mutex_lock(&ha->vport_lock); 544 set_bit(vha->vp_idx, ha->vp_idx_map); 545 ha->cur_vport_count++; 546 mutex_unlock(&ha->vport_lock); 547 548 return vha; 549 550 create_vhost_failed: 551 return NULL; 552 } 553 554 static void 555 qla25xx_free_req_que(struct scsi_qla_host *vha, struct req_que *req) 556 { 557 struct qla_hw_data *ha = vha->hw; 558 uint16_t que_id = req->id; 559 560 dma_free_coherent(&ha->pdev->dev, (req->length + 1) * 561 sizeof(request_t), req->ring, req->dma); 562 req->ring = NULL; 563 req->dma = 0; 564 if (que_id) { 565 ha->req_q_map[que_id] = NULL; 566 mutex_lock(&ha->vport_lock); 567 clear_bit(que_id, ha->req_qid_map); 568 mutex_unlock(&ha->vport_lock); 569 } 570 kfree(req->outstanding_cmds); 571 kfree(req); 572 req = NULL; 573 } 574 575 static void 576 qla25xx_free_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) 577 { 578 struct qla_hw_data *ha = vha->hw; 579 uint16_t que_id = rsp->id; 580 581 if (rsp->msix && rsp->msix->have_irq) { 582 free_irq(rsp->msix->vector, rsp->msix->handle); 583 rsp->msix->have_irq = 0; 584 rsp->msix->in_use = 0; 585 rsp->msix->handle = NULL; 586 } 587 dma_free_coherent(&ha->pdev->dev, (rsp->length + 1) * 588 sizeof(response_t), rsp->ring, rsp->dma); 589 rsp->ring = NULL; 590 rsp->dma = 0; 591 if (que_id) { 592 ha->rsp_q_map[que_id] = NULL; 593 mutex_lock(&ha->vport_lock); 594 clear_bit(que_id, ha->rsp_qid_map); 595 mutex_unlock(&ha->vport_lock); 596 } 597 kfree(rsp); 598 rsp = NULL; 599 } 600 601 int 602 qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req) 603 { 604 int ret = QLA_SUCCESS; 605 606 if (req && vha->flags.qpairs_req_created) { 607 req->options |= BIT_0; 608 ret = qla25xx_init_req_que(vha, req); 609 if (ret != QLA_SUCCESS) 610 return QLA_FUNCTION_FAILED; 611 612 qla25xx_free_req_que(vha, req); 613 } 614 615 return ret; 616 } 617 618 int 619 qla25xx_delete_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) 620 { 621 int ret = QLA_SUCCESS; 622 623 if (rsp && vha->flags.qpairs_rsp_created) { 624 rsp->options |= BIT_0; 625 ret = qla25xx_init_rsp_que(vha, rsp); 626 if (ret != QLA_SUCCESS) 627 return QLA_FUNCTION_FAILED; 628 629 qla25xx_free_rsp_que(vha, rsp); 630 } 631 632 return ret; 633 } 634 635 /* Delete all queues for a given vhost */ 636 int 637 qla25xx_delete_queues(struct scsi_qla_host *vha) 638 { 639 int cnt, ret = 0; 640 struct req_que *req = NULL; 641 struct rsp_que *rsp = NULL; 642 struct qla_hw_data *ha = vha->hw; 643 struct qla_qpair *qpair, *tqpair; 644 645 if (ql2xmqsupport || ql2xnvmeenable) { 646 list_for_each_entry_safe(qpair, tqpair, &vha->qp_list, 647 qp_list_elem) 648 qla2xxx_delete_qpair(vha, qpair); 649 } else { 650 /* Delete request queues */ 651 for (cnt = 1; cnt < ha->max_req_queues; cnt++) { 652 req = ha->req_q_map[cnt]; 653 if (req && test_bit(cnt, ha->req_qid_map)) { 654 ret = qla25xx_delete_req_que(vha, req); 655 if (ret != QLA_SUCCESS) { 656 ql_log(ql_log_warn, vha, 0x00ea, 657 "Couldn't delete req que %d.\n", 658 req->id); 659 return ret; 660 } 661 } 662 } 663 664 /* Delete response queues */ 665 for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) { 666 rsp = ha->rsp_q_map[cnt]; 667 if (rsp && test_bit(cnt, ha->rsp_qid_map)) { 668 ret = qla25xx_delete_rsp_que(vha, rsp); 669 if (ret != QLA_SUCCESS) { 670 ql_log(ql_log_warn, vha, 0x00eb, 671 "Couldn't delete rsp que %d.\n", 672 rsp->id); 673 return ret; 674 } 675 } 676 } 677 } 678 679 return ret; 680 } 681 682 int 683 qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options, 684 uint8_t vp_idx, uint16_t rid, int rsp_que, uint8_t qos, bool startqp) 685 { 686 int ret = 0; 687 struct req_que *req = NULL; 688 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 689 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev); 690 uint16_t que_id = 0; 691 device_reg_t *reg; 692 uint32_t cnt; 693 694 req = kzalloc(sizeof(struct req_que), GFP_KERNEL); 695 if (req == NULL) { 696 ql_log(ql_log_fatal, base_vha, 0x00d9, 697 "Failed to allocate memory for request queue.\n"); 698 goto failed; 699 } 700 701 req->length = REQUEST_ENTRY_CNT_24XX; 702 req->ring = dma_alloc_coherent(&ha->pdev->dev, 703 (req->length + 1) * sizeof(request_t), 704 &req->dma, GFP_KERNEL); 705 if (req->ring == NULL) { 706 ql_log(ql_log_fatal, base_vha, 0x00da, 707 "Failed to allocate memory for request_ring.\n"); 708 goto que_failed; 709 } 710 711 ret = qla2x00_alloc_outstanding_cmds(ha, req); 712 if (ret != QLA_SUCCESS) 713 goto que_failed; 714 715 mutex_lock(&ha->mq_lock); 716 que_id = find_first_zero_bit(ha->req_qid_map, ha->max_req_queues); 717 if (que_id >= ha->max_req_queues) { 718 mutex_unlock(&ha->mq_lock); 719 ql_log(ql_log_warn, base_vha, 0x00db, 720 "No resources to create additional request queue.\n"); 721 goto que_failed; 722 } 723 set_bit(que_id, ha->req_qid_map); 724 ha->req_q_map[que_id] = req; 725 req->rid = rid; 726 req->vp_idx = vp_idx; 727 req->qos = qos; 728 729 ql_dbg(ql_dbg_multiq, base_vha, 0xc002, 730 "queue_id=%d rid=%d vp_idx=%d qos=%d.\n", 731 que_id, req->rid, req->vp_idx, req->qos); 732 ql_dbg(ql_dbg_init, base_vha, 0x00dc, 733 "queue_id=%d rid=%d vp_idx=%d qos=%d.\n", 734 que_id, req->rid, req->vp_idx, req->qos); 735 if (rsp_que < 0) 736 req->rsp = NULL; 737 else 738 req->rsp = ha->rsp_q_map[rsp_que]; 739 /* Use alternate PCI bus number */ 740 if (MSB(req->rid)) 741 options |= BIT_4; 742 /* Use alternate PCI devfn */ 743 if (LSB(req->rid)) 744 options |= BIT_5; 745 req->options = options; 746 747 ql_dbg(ql_dbg_multiq, base_vha, 0xc003, 748 "options=0x%x.\n", req->options); 749 ql_dbg(ql_dbg_init, base_vha, 0x00dd, 750 "options=0x%x.\n", req->options); 751 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) 752 req->outstanding_cmds[cnt] = NULL; 753 req->current_outstanding_cmd = 1; 754 755 req->ring_ptr = req->ring; 756 req->ring_index = 0; 757 req->cnt = req->length; 758 req->id = que_id; 759 reg = ISP_QUE_REG(ha, que_id); 760 req->req_q_in = ®->isp25mq.req_q_in; 761 req->req_q_out = ®->isp25mq.req_q_out; 762 req->max_q_depth = ha->req_q_map[0]->max_q_depth; 763 req->out_ptr = (void *)(req->ring + req->length); 764 mutex_unlock(&ha->mq_lock); 765 ql_dbg(ql_dbg_multiq, base_vha, 0xc004, 766 "ring_ptr=%p ring_index=%d, " 767 "cnt=%d id=%d max_q_depth=%d.\n", 768 req->ring_ptr, req->ring_index, 769 req->cnt, req->id, req->max_q_depth); 770 ql_dbg(ql_dbg_init, base_vha, 0x00de, 771 "ring_ptr=%p ring_index=%d, " 772 "cnt=%d id=%d max_q_depth=%d.\n", 773 req->ring_ptr, req->ring_index, req->cnt, 774 req->id, req->max_q_depth); 775 776 if (startqp) { 777 ret = qla25xx_init_req_que(base_vha, req); 778 if (ret != QLA_SUCCESS) { 779 ql_log(ql_log_fatal, base_vha, 0x00df, 780 "%s failed.\n", __func__); 781 mutex_lock(&ha->mq_lock); 782 clear_bit(que_id, ha->req_qid_map); 783 mutex_unlock(&ha->mq_lock); 784 goto que_failed; 785 } 786 vha->flags.qpairs_req_created = 1; 787 } 788 789 return req->id; 790 791 que_failed: 792 qla25xx_free_req_que(base_vha, req); 793 failed: 794 return 0; 795 } 796 797 static void qla_do_work(struct work_struct *work) 798 { 799 unsigned long flags; 800 struct qla_qpair *qpair = container_of(work, struct qla_qpair, q_work); 801 struct scsi_qla_host *vha; 802 struct qla_hw_data *ha = qpair->hw; 803 804 spin_lock_irqsave(&qpair->qp_lock, flags); 805 vha = pci_get_drvdata(ha->pdev); 806 qla24xx_process_response_queue(vha, qpair->rsp); 807 spin_unlock_irqrestore(&qpair->qp_lock, flags); 808 809 } 810 811 /* create response queue */ 812 int 813 qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options, 814 uint8_t vp_idx, uint16_t rid, struct qla_qpair *qpair, bool startqp) 815 { 816 int ret = 0; 817 struct rsp_que *rsp = NULL; 818 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 819 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev); 820 uint16_t que_id = 0; 821 device_reg_t *reg; 822 823 rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL); 824 if (rsp == NULL) { 825 ql_log(ql_log_warn, base_vha, 0x0066, 826 "Failed to allocate memory for response queue.\n"); 827 goto failed; 828 } 829 830 rsp->length = RESPONSE_ENTRY_CNT_MQ; 831 rsp->ring = dma_alloc_coherent(&ha->pdev->dev, 832 (rsp->length + 1) * sizeof(response_t), 833 &rsp->dma, GFP_KERNEL); 834 if (rsp->ring == NULL) { 835 ql_log(ql_log_warn, base_vha, 0x00e1, 836 "Failed to allocate memory for response ring.\n"); 837 goto que_failed; 838 } 839 840 mutex_lock(&ha->mq_lock); 841 que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_rsp_queues); 842 if (que_id >= ha->max_rsp_queues) { 843 mutex_unlock(&ha->mq_lock); 844 ql_log(ql_log_warn, base_vha, 0x00e2, 845 "No resources to create additional request queue.\n"); 846 goto que_failed; 847 } 848 set_bit(que_id, ha->rsp_qid_map); 849 850 rsp->msix = qpair->msix; 851 852 ha->rsp_q_map[que_id] = rsp; 853 rsp->rid = rid; 854 rsp->vp_idx = vp_idx; 855 rsp->hw = ha; 856 ql_dbg(ql_dbg_init, base_vha, 0x00e4, 857 "rsp queue_id=%d rid=%d vp_idx=%d hw=%p.\n", 858 que_id, rsp->rid, rsp->vp_idx, rsp->hw); 859 /* Use alternate PCI bus number */ 860 if (MSB(rsp->rid)) 861 options |= BIT_4; 862 /* Use alternate PCI devfn */ 863 if (LSB(rsp->rid)) 864 options |= BIT_5; 865 /* Enable MSIX handshake mode on for uncapable adapters */ 866 if (!IS_MSIX_NACK_CAPABLE(ha)) 867 options |= BIT_6; 868 869 /* Set option to indicate response queue creation */ 870 options |= BIT_1; 871 872 rsp->options = options; 873 rsp->id = que_id; 874 reg = ISP_QUE_REG(ha, que_id); 875 rsp->rsp_q_in = ®->isp25mq.rsp_q_in; 876 rsp->rsp_q_out = ®->isp25mq.rsp_q_out; 877 rsp->in_ptr = (void *)(rsp->ring + rsp->length); 878 mutex_unlock(&ha->mq_lock); 879 ql_dbg(ql_dbg_multiq, base_vha, 0xc00b, 880 "options=%x id=%d rsp_q_in=%p rsp_q_out=%p\n", 881 rsp->options, rsp->id, rsp->rsp_q_in, 882 rsp->rsp_q_out); 883 ql_dbg(ql_dbg_init, base_vha, 0x00e5, 884 "options=%x id=%d rsp_q_in=%p rsp_q_out=%p\n", 885 rsp->options, rsp->id, rsp->rsp_q_in, 886 rsp->rsp_q_out); 887 888 ret = qla25xx_request_irq(ha, qpair, qpair->msix, 889 QLA_MSIX_QPAIR_MULTIQ_RSP_Q); 890 if (ret) 891 goto que_failed; 892 893 if (startqp) { 894 ret = qla25xx_init_rsp_que(base_vha, rsp); 895 if (ret != QLA_SUCCESS) { 896 ql_log(ql_log_fatal, base_vha, 0x00e7, 897 "%s failed.\n", __func__); 898 mutex_lock(&ha->mq_lock); 899 clear_bit(que_id, ha->rsp_qid_map); 900 mutex_unlock(&ha->mq_lock); 901 goto que_failed; 902 } 903 vha->flags.qpairs_rsp_created = 1; 904 } 905 rsp->req = NULL; 906 907 qla2x00_init_response_q_entries(rsp); 908 if (qpair->hw->wq) 909 INIT_WORK(&qpair->q_work, qla_do_work); 910 return rsp->id; 911 912 que_failed: 913 qla25xx_free_rsp_que(base_vha, rsp); 914 failed: 915 return 0; 916 } 917 918 static void qla_ctrlvp_sp_done(srb_t *sp, int res) 919 { 920 if (sp->comp) 921 complete(sp->comp); 922 /* don't free sp here. Let the caller do the free */ 923 } 924 925 /** 926 * qla24xx_control_vp() - Enable a virtual port for given host 927 * @vha: adapter block pointer 928 * @cmd: command type to be sent for enable virtual port 929 * 930 * Return: qla2xxx local function return status code. 931 */ 932 int qla24xx_control_vp(scsi_qla_host_t *vha, int cmd) 933 { 934 int rval = QLA_MEMORY_ALLOC_FAILED; 935 struct qla_hw_data *ha = vha->hw; 936 int vp_index = vha->vp_idx; 937 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 938 DECLARE_COMPLETION_ONSTACK(comp); 939 srb_t *sp; 940 941 ql_dbg(ql_dbg_vport, vha, 0x10c1, 942 "Entered %s cmd %x index %d.\n", __func__, cmd, vp_index); 943 944 if (vp_index == 0 || vp_index >= ha->max_npiv_vports) 945 return QLA_PARAMETER_ERROR; 946 947 sp = qla2x00_get_sp(base_vha, NULL, GFP_KERNEL); 948 if (!sp) 949 return rval; 950 951 sp->type = SRB_CTRL_VP; 952 sp->name = "ctrl_vp"; 953 sp->comp = ∁ 954 sp->done = qla_ctrlvp_sp_done; 955 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout; 956 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); 957 sp->u.iocb_cmd.u.ctrlvp.cmd = cmd; 958 sp->u.iocb_cmd.u.ctrlvp.vp_index = vp_index; 959 960 rval = qla2x00_start_sp(sp); 961 if (rval != QLA_SUCCESS) { 962 ql_dbg(ql_dbg_async, vha, 0xffff, 963 "%s: %s Failed submission. %x.\n", 964 __func__, sp->name, rval); 965 goto done; 966 } 967 968 ql_dbg(ql_dbg_vport, vha, 0x113f, "%s hndl %x submitted\n", 969 sp->name, sp->handle); 970 971 wait_for_completion(&comp); 972 sp->comp = NULL; 973 974 rval = sp->rc; 975 switch (rval) { 976 case QLA_FUNCTION_TIMEOUT: 977 ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s Timeout. %x.\n", 978 __func__, sp->name, rval); 979 break; 980 case QLA_SUCCESS: 981 ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s done.\n", 982 __func__, sp->name); 983 break; 984 default: 985 ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s Failed. %x.\n", 986 __func__, sp->name, rval); 987 break; 988 } 989 done: 990 sp->free(sp); 991 return rval; 992 } 993