1 /* 2 * QLogic Fibre Channel HBA Driver 3 * Copyright (c) 2003-2014 QLogic Corporation 4 * 5 * See LICENSE.qla2xxx for copyright and licensing details. 6 */ 7 #include "qla_def.h" 8 #include "qla_gbl.h" 9 #include "qla_target.h" 10 11 #include <linux/moduleparam.h> 12 #include <linux/vmalloc.h> 13 #include <linux/slab.h> 14 #include <linux/list.h> 15 16 #include <scsi/scsi_tcq.h> 17 #include <scsi/scsicam.h> 18 #include <linux/delay.h> 19 20 void 21 qla2x00_vp_stop_timer(scsi_qla_host_t *vha) 22 { 23 if (vha->vp_idx && vha->timer_active) { 24 del_timer_sync(&vha->timer); 25 vha->timer_active = 0; 26 } 27 } 28 29 static uint32_t 30 qla24xx_allocate_vp_id(scsi_qla_host_t *vha) 31 { 32 uint32_t vp_id; 33 struct qla_hw_data *ha = vha->hw; 34 unsigned long flags; 35 36 /* Find an empty slot and assign an vp_id */ 37 mutex_lock(&ha->vport_lock); 38 vp_id = find_first_zero_bit(ha->vp_idx_map, ha->max_npiv_vports + 1); 39 if (vp_id > ha->max_npiv_vports) { 40 ql_dbg(ql_dbg_vport, vha, 0xa000, 41 "vp_id %d is bigger than max-supported %d.\n", 42 vp_id, ha->max_npiv_vports); 43 mutex_unlock(&ha->vport_lock); 44 return vp_id; 45 } 46 47 set_bit(vp_id, ha->vp_idx_map); 48 ha->num_vhosts++; 49 vha->vp_idx = vp_id; 50 51 spin_lock_irqsave(&ha->vport_slock, flags); 52 list_add_tail(&vha->list, &ha->vp_list); 53 spin_unlock_irqrestore(&ha->vport_slock, flags); 54 55 spin_lock_irqsave(&ha->hardware_lock, flags); 56 qlt_update_vp_map(vha, SET_VP_IDX); 57 spin_unlock_irqrestore(&ha->hardware_lock, flags); 58 59 mutex_unlock(&ha->vport_lock); 60 return vp_id; 61 } 62 63 void 64 qla24xx_deallocate_vp_id(scsi_qla_host_t *vha) 65 { 66 uint16_t vp_id; 67 struct qla_hw_data *ha = vha->hw; 68 unsigned long flags = 0; 69 u8 i; 70 71 mutex_lock(&ha->vport_lock); 72 /* 73 * Wait for all pending activities to finish before removing vport from 74 * the list. 75 * Lock needs to be held for safe removal from the list (it 76 * ensures no active vp_list traversal while the vport is removed 77 * from the queue) 78 */ 79 for (i = 0; i < 10 && atomic_read(&vha->vref_count); i++) 80 wait_event_timeout(vha->vref_waitq, 81 atomic_read(&vha->vref_count), HZ); 82 83 spin_lock_irqsave(&ha->vport_slock, flags); 84 if (atomic_read(&vha->vref_count)) { 85 ql_dbg(ql_dbg_vport, vha, 0xfffa, 86 "vha->vref_count=%u timeout\n", vha->vref_count.counter); 87 vha->vref_count = (atomic_t)ATOMIC_INIT(0); 88 } 89 list_del(&vha->list); 90 qlt_update_vp_map(vha, RESET_VP_IDX); 91 spin_unlock_irqrestore(&ha->vport_slock, flags); 92 93 vp_id = vha->vp_idx; 94 ha->num_vhosts--; 95 clear_bit(vp_id, ha->vp_idx_map); 96 97 mutex_unlock(&ha->vport_lock); 98 } 99 100 static scsi_qla_host_t * 101 qla24xx_find_vhost_by_name(struct qla_hw_data *ha, uint8_t *port_name) 102 { 103 scsi_qla_host_t *vha; 104 struct scsi_qla_host *tvha; 105 unsigned long flags; 106 107 spin_lock_irqsave(&ha->vport_slock, flags); 108 /* Locate matching device in database. */ 109 list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) { 110 if (!memcmp(port_name, vha->port_name, WWN_SIZE)) { 111 spin_unlock_irqrestore(&ha->vport_slock, flags); 112 return vha; 113 } 114 } 115 spin_unlock_irqrestore(&ha->vport_slock, flags); 116 return NULL; 117 } 118 119 /* 120 * qla2x00_mark_vp_devices_dead 121 * Updates fcport state when device goes offline. 122 * 123 * Input: 124 * ha = adapter block pointer. 125 * fcport = port structure pointer. 126 * 127 * Return: 128 * None. 129 * 130 * Context: 131 */ 132 static void 133 qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha) 134 { 135 /* 136 * !!! NOTE !!! 137 * This function, if called in contexts other than vp create, disable 138 * or delete, please make sure this is synchronized with the 139 * delete thread. 140 */ 141 fc_port_t *fcport; 142 143 list_for_each_entry(fcport, &vha->vp_fcports, list) { 144 ql_dbg(ql_dbg_vport, vha, 0xa001, 145 "Marking port dead, loop_id=0x%04x : %x.\n", 146 fcport->loop_id, fcport->vha->vp_idx); 147 148 qla2x00_mark_device_lost(vha, fcport, 0, 0); 149 qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED); 150 } 151 } 152 153 int 154 qla24xx_disable_vp(scsi_qla_host_t *vha) 155 { 156 unsigned long flags; 157 int ret = QLA_SUCCESS; 158 fc_port_t *fcport; 159 160 if (vha->hw->flags.fw_started) 161 ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL); 162 163 atomic_set(&vha->loop_state, LOOP_DOWN); 164 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 165 list_for_each_entry(fcport, &vha->vp_fcports, list) 166 fcport->logout_on_delete = 0; 167 168 qla2x00_mark_all_devices_lost(vha, 0); 169 170 /* Remove port id from vp target map */ 171 spin_lock_irqsave(&vha->hw->hardware_lock, flags); 172 qlt_update_vp_map(vha, RESET_AL_PA); 173 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); 174 175 qla2x00_mark_vp_devices_dead(vha); 176 atomic_set(&vha->vp_state, VP_FAILED); 177 vha->flags.management_server_logged_in = 0; 178 if (ret == QLA_SUCCESS) { 179 fc_vport_set_state(vha->fc_vport, FC_VPORT_DISABLED); 180 } else { 181 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 182 return -1; 183 } 184 return 0; 185 } 186 187 int 188 qla24xx_enable_vp(scsi_qla_host_t *vha) 189 { 190 int ret; 191 struct qla_hw_data *ha = vha->hw; 192 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 193 194 /* Check if physical ha port is Up */ 195 if (atomic_read(&base_vha->loop_state) == LOOP_DOWN || 196 atomic_read(&base_vha->loop_state) == LOOP_DEAD || 197 !(ha->current_topology & ISP_CFG_F)) { 198 vha->vp_err_state = VP_ERR_PORTDWN; 199 fc_vport_set_state(vha->fc_vport, FC_VPORT_LINKDOWN); 200 ql_dbg(ql_dbg_taskm, vha, 0x800b, 201 "%s skip enable. loop_state %x topo %x\n", 202 __func__, base_vha->loop_state.counter, 203 ha->current_topology); 204 205 goto enable_failed; 206 } 207 208 /* Initialize the new vport unless it is a persistent port */ 209 mutex_lock(&ha->vport_lock); 210 ret = qla24xx_modify_vp_config(vha); 211 mutex_unlock(&ha->vport_lock); 212 213 if (ret != QLA_SUCCESS) { 214 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 215 goto enable_failed; 216 } 217 218 ql_dbg(ql_dbg_taskm, vha, 0x801a, 219 "Virtual port with id: %d - Enabled.\n", vha->vp_idx); 220 return 0; 221 222 enable_failed: 223 ql_dbg(ql_dbg_taskm, vha, 0x801b, 224 "Virtual port with id: %d - Disabled.\n", vha->vp_idx); 225 return 1; 226 } 227 228 static void 229 qla24xx_configure_vp(scsi_qla_host_t *vha) 230 { 231 struct fc_vport *fc_vport; 232 int ret; 233 234 fc_vport = vha->fc_vport; 235 236 ql_dbg(ql_dbg_vport, vha, 0xa002, 237 "%s: change request #3.\n", __func__); 238 ret = qla2x00_send_change_request(vha, 0x3, vha->vp_idx); 239 if (ret != QLA_SUCCESS) { 240 ql_dbg(ql_dbg_vport, vha, 0xa003, "Failed to enable " 241 "receiving of RSCN requests: 0x%x.\n", ret); 242 return; 243 } else { 244 /* Corresponds to SCR enabled */ 245 clear_bit(VP_SCR_NEEDED, &vha->vp_flags); 246 } 247 248 vha->flags.online = 1; 249 if (qla24xx_configure_vhba(vha)) 250 return; 251 252 atomic_set(&vha->vp_state, VP_ACTIVE); 253 fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE); 254 } 255 256 void 257 qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb) 258 { 259 scsi_qla_host_t *vha; 260 struct qla_hw_data *ha = rsp->hw; 261 int i = 0; 262 unsigned long flags; 263 264 spin_lock_irqsave(&ha->vport_slock, flags); 265 list_for_each_entry(vha, &ha->vp_list, list) { 266 if (vha->vp_idx) { 267 if (test_bit(VPORT_DELETE, &vha->dpc_flags)) 268 continue; 269 270 atomic_inc(&vha->vref_count); 271 spin_unlock_irqrestore(&ha->vport_slock, flags); 272 273 switch (mb[0]) { 274 case MBA_LIP_OCCURRED: 275 case MBA_LOOP_UP: 276 case MBA_LOOP_DOWN: 277 case MBA_LIP_RESET: 278 case MBA_POINT_TO_POINT: 279 case MBA_CHG_IN_CONNECTION: 280 ql_dbg(ql_dbg_async, vha, 0x5024, 281 "Async_event for VP[%d], mb=0x%x vha=%p.\n", 282 i, *mb, vha); 283 qla2x00_async_event(vha, rsp, mb); 284 break; 285 case MBA_PORT_UPDATE: 286 case MBA_RSCN_UPDATE: 287 if ((mb[3] & 0xff) == vha->vp_idx) { 288 ql_dbg(ql_dbg_async, vha, 0x5024, 289 "Async_event for VP[%d], mb=0x%x vha=%p\n", 290 i, *mb, vha); 291 qla2x00_async_event(vha, rsp, mb); 292 } 293 break; 294 } 295 296 spin_lock_irqsave(&ha->vport_slock, flags); 297 atomic_dec(&vha->vref_count); 298 wake_up(&vha->vref_waitq); 299 } 300 i++; 301 } 302 spin_unlock_irqrestore(&ha->vport_slock, flags); 303 } 304 305 int 306 qla2x00_vp_abort_isp(scsi_qla_host_t *vha) 307 { 308 fc_port_t *fcport; 309 310 /* 311 * To exclusively reset vport, we need to log it out first. 312 * Note: This control_vp can fail if ISP reset is already 313 * issued, this is expected, as the vp would be already 314 * logged out due to ISP reset. 315 */ 316 if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) { 317 qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL); 318 list_for_each_entry(fcport, &vha->vp_fcports, list) 319 fcport->logout_on_delete = 0; 320 } 321 322 /* 323 * Physical port will do most of the abort and recovery work. We can 324 * just treat it as a loop down 325 */ 326 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 327 atomic_set(&vha->loop_state, LOOP_DOWN); 328 qla2x00_mark_all_devices_lost(vha, 0); 329 } else { 330 if (!atomic_read(&vha->loop_down_timer)) 331 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 332 } 333 334 ql_dbg(ql_dbg_taskm, vha, 0x801d, 335 "Scheduling enable of Vport %d.\n", vha->vp_idx); 336 337 return qla24xx_enable_vp(vha); 338 } 339 340 static int 341 qla2x00_do_dpc_vp(scsi_qla_host_t *vha) 342 { 343 struct qla_hw_data *ha = vha->hw; 344 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 345 346 ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x4012, 347 "Entering %s vp_flags: 0x%lx.\n", __func__, vha->vp_flags); 348 349 /* Check if Fw is ready to configure VP first */ 350 if (test_bit(VP_CONFIG_OK, &base_vha->vp_flags)) { 351 if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) { 352 /* VP acquired. complete port configuration */ 353 ql_dbg(ql_dbg_dpc, vha, 0x4014, 354 "Configure VP scheduled.\n"); 355 qla24xx_configure_vp(vha); 356 ql_dbg(ql_dbg_dpc, vha, 0x4015, 357 "Configure VP end.\n"); 358 return 0; 359 } 360 } 361 362 if (test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) { 363 ql_dbg(ql_dbg_dpc, vha, 0x4016, 364 "FCPort update scheduled.\n"); 365 qla2x00_update_fcports(vha); 366 clear_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags); 367 ql_dbg(ql_dbg_dpc, vha, 0x4017, 368 "FCPort update end.\n"); 369 } 370 371 if (test_bit(RELOGIN_NEEDED, &vha->dpc_flags) && 372 !test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) && 373 atomic_read(&vha->loop_state) != LOOP_DOWN) { 374 375 if (!vha->relogin_jif || 376 time_after_eq(jiffies, vha->relogin_jif)) { 377 vha->relogin_jif = jiffies + HZ; 378 clear_bit(RELOGIN_NEEDED, &vha->dpc_flags); 379 380 ql_dbg(ql_dbg_dpc, vha, 0x4018, 381 "Relogin needed scheduled.\n"); 382 qla24xx_post_relogin_work(vha); 383 } 384 } 385 386 if (test_and_clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) && 387 (!(test_and_set_bit(RESET_ACTIVE, &vha->dpc_flags)))) { 388 clear_bit(RESET_ACTIVE, &vha->dpc_flags); 389 } 390 391 if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) { 392 if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))) { 393 ql_dbg(ql_dbg_dpc, vha, 0x401a, 394 "Loop resync scheduled.\n"); 395 qla2x00_loop_resync(vha); 396 clear_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags); 397 ql_dbg(ql_dbg_dpc, vha, 0x401b, 398 "Loop resync end.\n"); 399 } 400 } 401 402 ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x401c, 403 "Exiting %s.\n", __func__); 404 return 0; 405 } 406 407 void 408 qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha) 409 { 410 struct qla_hw_data *ha = vha->hw; 411 scsi_qla_host_t *vp; 412 unsigned long flags = 0; 413 414 if (vha->vp_idx) 415 return; 416 if (list_empty(&ha->vp_list)) 417 return; 418 419 clear_bit(VP_DPC_NEEDED, &vha->dpc_flags); 420 421 if (!(ha->current_topology & ISP_CFG_F)) 422 return; 423 424 spin_lock_irqsave(&ha->vport_slock, flags); 425 list_for_each_entry(vp, &ha->vp_list, list) { 426 if (vp->vp_idx) { 427 atomic_inc(&vp->vref_count); 428 spin_unlock_irqrestore(&ha->vport_slock, flags); 429 430 qla2x00_do_dpc_vp(vp); 431 432 spin_lock_irqsave(&ha->vport_slock, flags); 433 atomic_dec(&vp->vref_count); 434 } 435 } 436 spin_unlock_irqrestore(&ha->vport_slock, flags); 437 } 438 439 int 440 qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport) 441 { 442 scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost); 443 struct qla_hw_data *ha = base_vha->hw; 444 scsi_qla_host_t *vha; 445 uint8_t port_name[WWN_SIZE]; 446 447 if (fc_vport->roles != FC_PORT_ROLE_FCP_INITIATOR) 448 return VPCERR_UNSUPPORTED; 449 450 /* Check up the F/W and H/W support NPIV */ 451 if (!ha->flags.npiv_supported) 452 return VPCERR_UNSUPPORTED; 453 454 /* Check up whether npiv supported switch presented */ 455 if (!(ha->switch_cap & FLOGI_MID_SUPPORT)) 456 return VPCERR_NO_FABRIC_SUPP; 457 458 /* Check up unique WWPN */ 459 u64_to_wwn(fc_vport->port_name, port_name); 460 if (!memcmp(port_name, base_vha->port_name, WWN_SIZE)) 461 return VPCERR_BAD_WWN; 462 vha = qla24xx_find_vhost_by_name(ha, port_name); 463 if (vha) 464 return VPCERR_BAD_WWN; 465 466 /* Check up max-npiv-supports */ 467 if (ha->num_vhosts > ha->max_npiv_vports) { 468 ql_dbg(ql_dbg_vport, vha, 0xa004, 469 "num_vhosts %ud is bigger " 470 "than max_npiv_vports %ud.\n", 471 ha->num_vhosts, ha->max_npiv_vports); 472 return VPCERR_UNSUPPORTED; 473 } 474 return 0; 475 } 476 477 scsi_qla_host_t * 478 qla24xx_create_vhost(struct fc_vport *fc_vport) 479 { 480 scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost); 481 struct qla_hw_data *ha = base_vha->hw; 482 scsi_qla_host_t *vha; 483 struct scsi_host_template *sht = &qla2xxx_driver_template; 484 struct Scsi_Host *host; 485 486 vha = qla2x00_create_host(sht, ha); 487 if (!vha) { 488 ql_log(ql_log_warn, vha, 0xa005, 489 "scsi_host_alloc() failed for vport.\n"); 490 return(NULL); 491 } 492 493 host = vha->host; 494 fc_vport->dd_data = vha; 495 /* New host info */ 496 u64_to_wwn(fc_vport->node_name, vha->node_name); 497 u64_to_wwn(fc_vport->port_name, vha->port_name); 498 499 vha->fc_vport = fc_vport; 500 vha->device_flags = 0; 501 vha->vp_idx = qla24xx_allocate_vp_id(vha); 502 if (vha->vp_idx > ha->max_npiv_vports) { 503 ql_dbg(ql_dbg_vport, vha, 0xa006, 504 "Couldn't allocate vp_id.\n"); 505 goto create_vhost_failed; 506 } 507 vha->mgmt_svr_loop_id = qla2x00_reserve_mgmt_server_loop_id(vha); 508 509 vha->dpc_flags = 0L; 510 511 /* 512 * To fix the issue of processing a parent's RSCN for the vport before 513 * its SCR is complete. 514 */ 515 set_bit(VP_SCR_NEEDED, &vha->vp_flags); 516 atomic_set(&vha->loop_state, LOOP_DOWN); 517 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 518 519 qla2x00_start_timer(vha, WATCH_INTERVAL); 520 521 vha->req = base_vha->req; 522 vha->flags.nvme_enabled = base_vha->flags.nvme_enabled; 523 host->can_queue = base_vha->req->length + 128; 524 host->cmd_per_lun = 3; 525 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) 526 host->max_cmd_len = 32; 527 else 528 host->max_cmd_len = MAX_CMDSZ; 529 host->max_channel = MAX_BUSES - 1; 530 host->max_lun = ql2xmaxlun; 531 host->unique_id = host->host_no; 532 host->max_id = ha->max_fibre_devices; 533 host->transportt = qla2xxx_transport_vport_template; 534 535 ql_dbg(ql_dbg_vport, vha, 0xa007, 536 "Detect vport hba %ld at address = %p.\n", 537 vha->host_no, vha); 538 539 vha->flags.init_done = 1; 540 541 mutex_lock(&ha->vport_lock); 542 set_bit(vha->vp_idx, ha->vp_idx_map); 543 ha->cur_vport_count++; 544 mutex_unlock(&ha->vport_lock); 545 546 return vha; 547 548 create_vhost_failed: 549 return NULL; 550 } 551 552 static void 553 qla25xx_free_req_que(struct scsi_qla_host *vha, struct req_que *req) 554 { 555 struct qla_hw_data *ha = vha->hw; 556 uint16_t que_id = req->id; 557 558 dma_free_coherent(&ha->pdev->dev, (req->length + 1) * 559 sizeof(request_t), req->ring, req->dma); 560 req->ring = NULL; 561 req->dma = 0; 562 if (que_id) { 563 ha->req_q_map[que_id] = NULL; 564 mutex_lock(&ha->vport_lock); 565 clear_bit(que_id, ha->req_qid_map); 566 mutex_unlock(&ha->vport_lock); 567 } 568 kfree(req->outstanding_cmds); 569 kfree(req); 570 req = NULL; 571 } 572 573 static void 574 qla25xx_free_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) 575 { 576 struct qla_hw_data *ha = vha->hw; 577 uint16_t que_id = rsp->id; 578 579 if (rsp->msix && rsp->msix->have_irq) { 580 free_irq(rsp->msix->vector, rsp->msix->handle); 581 rsp->msix->have_irq = 0; 582 rsp->msix->in_use = 0; 583 rsp->msix->handle = NULL; 584 } 585 dma_free_coherent(&ha->pdev->dev, (rsp->length + 1) * 586 sizeof(response_t), rsp->ring, rsp->dma); 587 rsp->ring = NULL; 588 rsp->dma = 0; 589 if (que_id) { 590 ha->rsp_q_map[que_id] = NULL; 591 mutex_lock(&ha->vport_lock); 592 clear_bit(que_id, ha->rsp_qid_map); 593 mutex_unlock(&ha->vport_lock); 594 } 595 kfree(rsp); 596 rsp = NULL; 597 } 598 599 int 600 qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req) 601 { 602 int ret = QLA_SUCCESS; 603 604 if (req && vha->flags.qpairs_req_created) { 605 req->options |= BIT_0; 606 ret = qla25xx_init_req_que(vha, req); 607 if (ret != QLA_SUCCESS) 608 return QLA_FUNCTION_FAILED; 609 610 qla25xx_free_req_que(vha, req); 611 } 612 613 return ret; 614 } 615 616 int 617 qla25xx_delete_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) 618 { 619 int ret = QLA_SUCCESS; 620 621 if (rsp && vha->flags.qpairs_rsp_created) { 622 rsp->options |= BIT_0; 623 ret = qla25xx_init_rsp_que(vha, rsp); 624 if (ret != QLA_SUCCESS) 625 return QLA_FUNCTION_FAILED; 626 627 qla25xx_free_rsp_que(vha, rsp); 628 } 629 630 return ret; 631 } 632 633 /* Delete all queues for a given vhost */ 634 int 635 qla25xx_delete_queues(struct scsi_qla_host *vha) 636 { 637 int cnt, ret = 0; 638 struct req_que *req = NULL; 639 struct rsp_que *rsp = NULL; 640 struct qla_hw_data *ha = vha->hw; 641 struct qla_qpair *qpair, *tqpair; 642 643 if (ql2xmqsupport || ql2xnvmeenable) { 644 list_for_each_entry_safe(qpair, tqpair, &vha->qp_list, 645 qp_list_elem) 646 qla2xxx_delete_qpair(vha, qpair); 647 } else { 648 /* Delete request queues */ 649 for (cnt = 1; cnt < ha->max_req_queues; cnt++) { 650 req = ha->req_q_map[cnt]; 651 if (req && test_bit(cnt, ha->req_qid_map)) { 652 ret = qla25xx_delete_req_que(vha, req); 653 if (ret != QLA_SUCCESS) { 654 ql_log(ql_log_warn, vha, 0x00ea, 655 "Couldn't delete req que %d.\n", 656 req->id); 657 return ret; 658 } 659 } 660 } 661 662 /* Delete response queues */ 663 for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) { 664 rsp = ha->rsp_q_map[cnt]; 665 if (rsp && test_bit(cnt, ha->rsp_qid_map)) { 666 ret = qla25xx_delete_rsp_que(vha, rsp); 667 if (ret != QLA_SUCCESS) { 668 ql_log(ql_log_warn, vha, 0x00eb, 669 "Couldn't delete rsp que %d.\n", 670 rsp->id); 671 return ret; 672 } 673 } 674 } 675 } 676 677 return ret; 678 } 679 680 int 681 qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options, 682 uint8_t vp_idx, uint16_t rid, int rsp_que, uint8_t qos, bool startqp) 683 { 684 int ret = 0; 685 struct req_que *req = NULL; 686 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 687 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev); 688 uint16_t que_id = 0; 689 device_reg_t *reg; 690 uint32_t cnt; 691 692 req = kzalloc(sizeof(struct req_que), GFP_KERNEL); 693 if (req == NULL) { 694 ql_log(ql_log_fatal, base_vha, 0x00d9, 695 "Failed to allocate memory for request queue.\n"); 696 goto failed; 697 } 698 699 req->length = REQUEST_ENTRY_CNT_24XX; 700 req->ring = dma_alloc_coherent(&ha->pdev->dev, 701 (req->length + 1) * sizeof(request_t), 702 &req->dma, GFP_KERNEL); 703 if (req->ring == NULL) { 704 ql_log(ql_log_fatal, base_vha, 0x00da, 705 "Failed to allocate memory for request_ring.\n"); 706 goto que_failed; 707 } 708 709 ret = qla2x00_alloc_outstanding_cmds(ha, req); 710 if (ret != QLA_SUCCESS) 711 goto que_failed; 712 713 mutex_lock(&ha->mq_lock); 714 que_id = find_first_zero_bit(ha->req_qid_map, ha->max_req_queues); 715 if (que_id >= ha->max_req_queues) { 716 mutex_unlock(&ha->mq_lock); 717 ql_log(ql_log_warn, base_vha, 0x00db, 718 "No resources to create additional request queue.\n"); 719 goto que_failed; 720 } 721 set_bit(que_id, ha->req_qid_map); 722 ha->req_q_map[que_id] = req; 723 req->rid = rid; 724 req->vp_idx = vp_idx; 725 req->qos = qos; 726 727 ql_dbg(ql_dbg_multiq, base_vha, 0xc002, 728 "queue_id=%d rid=%d vp_idx=%d qos=%d.\n", 729 que_id, req->rid, req->vp_idx, req->qos); 730 ql_dbg(ql_dbg_init, base_vha, 0x00dc, 731 "queue_id=%d rid=%d vp_idx=%d qos=%d.\n", 732 que_id, req->rid, req->vp_idx, req->qos); 733 if (rsp_que < 0) 734 req->rsp = NULL; 735 else 736 req->rsp = ha->rsp_q_map[rsp_que]; 737 /* Use alternate PCI bus number */ 738 if (MSB(req->rid)) 739 options |= BIT_4; 740 /* Use alternate PCI devfn */ 741 if (LSB(req->rid)) 742 options |= BIT_5; 743 req->options = options; 744 745 ql_dbg(ql_dbg_multiq, base_vha, 0xc003, 746 "options=0x%x.\n", req->options); 747 ql_dbg(ql_dbg_init, base_vha, 0x00dd, 748 "options=0x%x.\n", req->options); 749 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) 750 req->outstanding_cmds[cnt] = NULL; 751 req->current_outstanding_cmd = 1; 752 753 req->ring_ptr = req->ring; 754 req->ring_index = 0; 755 req->cnt = req->length; 756 req->id = que_id; 757 reg = ISP_QUE_REG(ha, que_id); 758 req->req_q_in = ®->isp25mq.req_q_in; 759 req->req_q_out = ®->isp25mq.req_q_out; 760 req->max_q_depth = ha->req_q_map[0]->max_q_depth; 761 req->out_ptr = (void *)(req->ring + req->length); 762 mutex_unlock(&ha->mq_lock); 763 ql_dbg(ql_dbg_multiq, base_vha, 0xc004, 764 "ring_ptr=%p ring_index=%d, " 765 "cnt=%d id=%d max_q_depth=%d.\n", 766 req->ring_ptr, req->ring_index, 767 req->cnt, req->id, req->max_q_depth); 768 ql_dbg(ql_dbg_init, base_vha, 0x00de, 769 "ring_ptr=%p ring_index=%d, " 770 "cnt=%d id=%d max_q_depth=%d.\n", 771 req->ring_ptr, req->ring_index, req->cnt, 772 req->id, req->max_q_depth); 773 774 if (startqp) { 775 ret = qla25xx_init_req_que(base_vha, req); 776 if (ret != QLA_SUCCESS) { 777 ql_log(ql_log_fatal, base_vha, 0x00df, 778 "%s failed.\n", __func__); 779 mutex_lock(&ha->mq_lock); 780 clear_bit(que_id, ha->req_qid_map); 781 mutex_unlock(&ha->mq_lock); 782 goto que_failed; 783 } 784 vha->flags.qpairs_req_created = 1; 785 } 786 787 return req->id; 788 789 que_failed: 790 qla25xx_free_req_que(base_vha, req); 791 failed: 792 return 0; 793 } 794 795 static void qla_do_work(struct work_struct *work) 796 { 797 unsigned long flags; 798 struct qla_qpair *qpair = container_of(work, struct qla_qpair, q_work); 799 struct scsi_qla_host *vha; 800 struct qla_hw_data *ha = qpair->hw; 801 802 spin_lock_irqsave(&qpair->qp_lock, flags); 803 vha = pci_get_drvdata(ha->pdev); 804 qla24xx_process_response_queue(vha, qpair->rsp); 805 spin_unlock_irqrestore(&qpair->qp_lock, flags); 806 807 } 808 809 /* create response queue */ 810 int 811 qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options, 812 uint8_t vp_idx, uint16_t rid, struct qla_qpair *qpair, bool startqp) 813 { 814 int ret = 0; 815 struct rsp_que *rsp = NULL; 816 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 817 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev); 818 uint16_t que_id = 0; 819 device_reg_t *reg; 820 821 rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL); 822 if (rsp == NULL) { 823 ql_log(ql_log_warn, base_vha, 0x0066, 824 "Failed to allocate memory for response queue.\n"); 825 goto failed; 826 } 827 828 rsp->length = RESPONSE_ENTRY_CNT_MQ; 829 rsp->ring = dma_alloc_coherent(&ha->pdev->dev, 830 (rsp->length + 1) * sizeof(response_t), 831 &rsp->dma, GFP_KERNEL); 832 if (rsp->ring == NULL) { 833 ql_log(ql_log_warn, base_vha, 0x00e1, 834 "Failed to allocate memory for response ring.\n"); 835 goto que_failed; 836 } 837 838 mutex_lock(&ha->mq_lock); 839 que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_rsp_queues); 840 if (que_id >= ha->max_rsp_queues) { 841 mutex_unlock(&ha->mq_lock); 842 ql_log(ql_log_warn, base_vha, 0x00e2, 843 "No resources to create additional request queue.\n"); 844 goto que_failed; 845 } 846 set_bit(que_id, ha->rsp_qid_map); 847 848 rsp->msix = qpair->msix; 849 850 ha->rsp_q_map[que_id] = rsp; 851 rsp->rid = rid; 852 rsp->vp_idx = vp_idx; 853 rsp->hw = ha; 854 ql_dbg(ql_dbg_init, base_vha, 0x00e4, 855 "rsp queue_id=%d rid=%d vp_idx=%d hw=%p.\n", 856 que_id, rsp->rid, rsp->vp_idx, rsp->hw); 857 /* Use alternate PCI bus number */ 858 if (MSB(rsp->rid)) 859 options |= BIT_4; 860 /* Use alternate PCI devfn */ 861 if (LSB(rsp->rid)) 862 options |= BIT_5; 863 /* Enable MSIX handshake mode on for uncapable adapters */ 864 if (!IS_MSIX_NACK_CAPABLE(ha)) 865 options |= BIT_6; 866 867 /* Set option to indicate response queue creation */ 868 options |= BIT_1; 869 870 rsp->options = options; 871 rsp->id = que_id; 872 reg = ISP_QUE_REG(ha, que_id); 873 rsp->rsp_q_in = ®->isp25mq.rsp_q_in; 874 rsp->rsp_q_out = ®->isp25mq.rsp_q_out; 875 rsp->in_ptr = (void *)(rsp->ring + rsp->length); 876 mutex_unlock(&ha->mq_lock); 877 ql_dbg(ql_dbg_multiq, base_vha, 0xc00b, 878 "options=%x id=%d rsp_q_in=%p rsp_q_out=%p\n", 879 rsp->options, rsp->id, rsp->rsp_q_in, 880 rsp->rsp_q_out); 881 ql_dbg(ql_dbg_init, base_vha, 0x00e5, 882 "options=%x id=%d rsp_q_in=%p rsp_q_out=%p\n", 883 rsp->options, rsp->id, rsp->rsp_q_in, 884 rsp->rsp_q_out); 885 886 ret = qla25xx_request_irq(ha, qpair, qpair->msix, 887 QLA_MSIX_QPAIR_MULTIQ_RSP_Q); 888 if (ret) 889 goto que_failed; 890 891 if (startqp) { 892 ret = qla25xx_init_rsp_que(base_vha, rsp); 893 if (ret != QLA_SUCCESS) { 894 ql_log(ql_log_fatal, base_vha, 0x00e7, 895 "%s failed.\n", __func__); 896 mutex_lock(&ha->mq_lock); 897 clear_bit(que_id, ha->rsp_qid_map); 898 mutex_unlock(&ha->mq_lock); 899 goto que_failed; 900 } 901 vha->flags.qpairs_rsp_created = 1; 902 } 903 rsp->req = NULL; 904 905 qla2x00_init_response_q_entries(rsp); 906 if (qpair->hw->wq) 907 INIT_WORK(&qpair->q_work, qla_do_work); 908 return rsp->id; 909 910 que_failed: 911 qla25xx_free_rsp_que(base_vha, rsp); 912 failed: 913 return 0; 914 } 915 916 static void qla_ctrlvp_sp_done(srb_t *sp, int res) 917 { 918 if (sp->comp) 919 complete(sp->comp); 920 /* don't free sp here. Let the caller do the free */ 921 } 922 923 /** 924 * qla24xx_control_vp() - Enable a virtual port for given host 925 * @vha: adapter block pointer 926 * @cmd: command type to be sent for enable virtual port 927 * 928 * Return: qla2xxx local function return status code. 929 */ 930 int qla24xx_control_vp(scsi_qla_host_t *vha, int cmd) 931 { 932 int rval = QLA_MEMORY_ALLOC_FAILED; 933 struct qla_hw_data *ha = vha->hw; 934 int vp_index = vha->vp_idx; 935 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 936 DECLARE_COMPLETION_ONSTACK(comp); 937 srb_t *sp; 938 939 ql_dbg(ql_dbg_vport, vha, 0x10c1, 940 "Entered %s cmd %x index %d.\n", __func__, cmd, vp_index); 941 942 if (vp_index == 0 || vp_index >= ha->max_npiv_vports) 943 return QLA_PARAMETER_ERROR; 944 945 sp = qla2x00_get_sp(base_vha, NULL, GFP_KERNEL); 946 if (!sp) 947 goto done; 948 949 sp->type = SRB_CTRL_VP; 950 sp->name = "ctrl_vp"; 951 sp->comp = ∁ 952 sp->done = qla_ctrlvp_sp_done; 953 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout; 954 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); 955 sp->u.iocb_cmd.u.ctrlvp.cmd = cmd; 956 sp->u.iocb_cmd.u.ctrlvp.vp_index = vp_index; 957 958 rval = qla2x00_start_sp(sp); 959 if (rval != QLA_SUCCESS) { 960 ql_dbg(ql_dbg_async, vha, 0xffff, 961 "%s: %s Failed submission. %x.\n", 962 __func__, sp->name, rval); 963 goto done_free_sp; 964 } 965 966 ql_dbg(ql_dbg_vport, vha, 0x113f, "%s hndl %x submitted\n", 967 sp->name, sp->handle); 968 969 wait_for_completion(&comp); 970 sp->comp = NULL; 971 972 rval = sp->rc; 973 switch (rval) { 974 case QLA_FUNCTION_TIMEOUT: 975 ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s Timeout. %x.\n", 976 __func__, sp->name, rval); 977 break; 978 case QLA_SUCCESS: 979 ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s done.\n", 980 __func__, sp->name); 981 goto done_free_sp; 982 default: 983 ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s Failed. %x.\n", 984 __func__, sp->name, rval); 985 goto done_free_sp; 986 } 987 done: 988 return rval; 989 990 done_free_sp: 991 sp->free(sp); 992 return rval; 993 } 994