1 /* 2 * QLogic Fibre Channel HBA Driver 3 * Copyright (c) 2003-2014 QLogic Corporation 4 * 5 * See LICENSE.qla2xxx for copyright and licensing details. 6 */ 7 #include "qla_def.h" 8 #include "qla_gbl.h" 9 #include "qla_target.h" 10 11 #include <linux/moduleparam.h> 12 #include <linux/vmalloc.h> 13 #include <linux/slab.h> 14 #include <linux/list.h> 15 16 #include <scsi/scsi_tcq.h> 17 #include <scsi/scsicam.h> 18 #include <linux/delay.h> 19 20 void 21 qla2x00_vp_stop_timer(scsi_qla_host_t *vha) 22 { 23 if (vha->vp_idx && vha->timer_active) { 24 del_timer_sync(&vha->timer); 25 vha->timer_active = 0; 26 } 27 } 28 29 static uint32_t 30 qla24xx_allocate_vp_id(scsi_qla_host_t *vha) 31 { 32 uint32_t vp_id; 33 struct qla_hw_data *ha = vha->hw; 34 unsigned long flags; 35 36 /* Find an empty slot and assign an vp_id */ 37 mutex_lock(&ha->vport_lock); 38 vp_id = find_first_zero_bit(ha->vp_idx_map, ha->max_npiv_vports + 1); 39 if (vp_id > ha->max_npiv_vports) { 40 ql_dbg(ql_dbg_vport, vha, 0xa000, 41 "vp_id %d is bigger than max-supported %d.\n", 42 vp_id, ha->max_npiv_vports); 43 mutex_unlock(&ha->vport_lock); 44 return vp_id; 45 } 46 47 set_bit(vp_id, ha->vp_idx_map); 48 ha->num_vhosts++; 49 vha->vp_idx = vp_id; 50 51 spin_lock_irqsave(&ha->vport_slock, flags); 52 list_add_tail(&vha->list, &ha->vp_list); 53 spin_unlock_irqrestore(&ha->vport_slock, flags); 54 55 spin_lock_irqsave(&ha->hardware_lock, flags); 56 qlt_update_vp_map(vha, SET_VP_IDX); 57 spin_unlock_irqrestore(&ha->hardware_lock, flags); 58 59 mutex_unlock(&ha->vport_lock); 60 return vp_id; 61 } 62 63 void 64 qla24xx_deallocate_vp_id(scsi_qla_host_t *vha) 65 { 66 uint16_t vp_id; 67 struct qla_hw_data *ha = vha->hw; 68 unsigned long flags = 0; 69 70 mutex_lock(&ha->vport_lock); 71 /* 72 * Wait for all pending activities to finish before removing vport from 73 * the list. 74 * Lock needs to be held for safe removal from the list (it 75 * ensures no active vp_list traversal while the vport is removed 76 * from the queue) 77 */ 78 wait_event_timeout(vha->vref_waitq, !atomic_read(&vha->vref_count), 79 10*HZ); 80 81 spin_lock_irqsave(&ha->vport_slock, flags); 82 if (atomic_read(&vha->vref_count)) { 83 ql_dbg(ql_dbg_vport, vha, 0xfffa, 84 "vha->vref_count=%u timeout\n", vha->vref_count.counter); 85 vha->vref_count = (atomic_t)ATOMIC_INIT(0); 86 } 87 list_del(&vha->list); 88 qlt_update_vp_map(vha, RESET_VP_IDX); 89 spin_unlock_irqrestore(&ha->vport_slock, flags); 90 91 vp_id = vha->vp_idx; 92 ha->num_vhosts--; 93 clear_bit(vp_id, ha->vp_idx_map); 94 95 mutex_unlock(&ha->vport_lock); 96 } 97 98 static scsi_qla_host_t * 99 qla24xx_find_vhost_by_name(struct qla_hw_data *ha, uint8_t *port_name) 100 { 101 scsi_qla_host_t *vha; 102 struct scsi_qla_host *tvha; 103 unsigned long flags; 104 105 spin_lock_irqsave(&ha->vport_slock, flags); 106 /* Locate matching device in database. */ 107 list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) { 108 if (!memcmp(port_name, vha->port_name, WWN_SIZE)) { 109 spin_unlock_irqrestore(&ha->vport_slock, flags); 110 return vha; 111 } 112 } 113 spin_unlock_irqrestore(&ha->vport_slock, flags); 114 return NULL; 115 } 116 117 /* 118 * qla2x00_mark_vp_devices_dead 119 * Updates fcport state when device goes offline. 120 * 121 * Input: 122 * ha = adapter block pointer. 123 * fcport = port structure pointer. 124 * 125 * Return: 126 * None. 127 * 128 * Context: 129 */ 130 static void 131 qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha) 132 { 133 /* 134 * !!! NOTE !!! 135 * This function, if called in contexts other than vp create, disable 136 * or delete, please make sure this is synchronized with the 137 * delete thread. 138 */ 139 fc_port_t *fcport; 140 141 list_for_each_entry(fcport, &vha->vp_fcports, list) { 142 ql_dbg(ql_dbg_vport, vha, 0xa001, 143 "Marking port dead, loop_id=0x%04x : %x.\n", 144 fcport->loop_id, fcport->vha->vp_idx); 145 146 qla2x00_mark_device_lost(vha, fcport, 0, 0); 147 qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED); 148 } 149 } 150 151 int 152 qla24xx_disable_vp(scsi_qla_host_t *vha) 153 { 154 unsigned long flags; 155 int ret = QLA_SUCCESS; 156 fc_port_t *fcport; 157 158 if (vha->hw->flags.fw_started) 159 ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL); 160 161 atomic_set(&vha->loop_state, LOOP_DOWN); 162 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 163 list_for_each_entry(fcport, &vha->vp_fcports, list) 164 fcport->logout_on_delete = 0; 165 166 qla2x00_mark_all_devices_lost(vha, 0); 167 168 /* Remove port id from vp target map */ 169 spin_lock_irqsave(&vha->hw->hardware_lock, flags); 170 qlt_update_vp_map(vha, RESET_AL_PA); 171 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); 172 173 qla2x00_mark_vp_devices_dead(vha); 174 atomic_set(&vha->vp_state, VP_FAILED); 175 vha->flags.management_server_logged_in = 0; 176 if (ret == QLA_SUCCESS) { 177 fc_vport_set_state(vha->fc_vport, FC_VPORT_DISABLED); 178 } else { 179 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 180 return -1; 181 } 182 return 0; 183 } 184 185 int 186 qla24xx_enable_vp(scsi_qla_host_t *vha) 187 { 188 int ret; 189 struct qla_hw_data *ha = vha->hw; 190 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 191 192 /* Check if physical ha port is Up */ 193 if (atomic_read(&base_vha->loop_state) == LOOP_DOWN || 194 atomic_read(&base_vha->loop_state) == LOOP_DEAD || 195 !(ha->current_topology & ISP_CFG_F)) { 196 vha->vp_err_state = VP_ERR_PORTDWN; 197 fc_vport_set_state(vha->fc_vport, FC_VPORT_LINKDOWN); 198 ql_dbg(ql_dbg_taskm, vha, 0x800b, 199 "%s skip enable. loop_state %x topo %x\n", 200 __func__, base_vha->loop_state.counter, 201 ha->current_topology); 202 203 goto enable_failed; 204 } 205 206 /* Initialize the new vport unless it is a persistent port */ 207 mutex_lock(&ha->vport_lock); 208 ret = qla24xx_modify_vp_config(vha); 209 mutex_unlock(&ha->vport_lock); 210 211 if (ret != QLA_SUCCESS) { 212 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 213 goto enable_failed; 214 } 215 216 ql_dbg(ql_dbg_taskm, vha, 0x801a, 217 "Virtual port with id: %d - Enabled.\n", vha->vp_idx); 218 return 0; 219 220 enable_failed: 221 ql_dbg(ql_dbg_taskm, vha, 0x801b, 222 "Virtual port with id: %d - Disabled.\n", vha->vp_idx); 223 return 1; 224 } 225 226 static void 227 qla24xx_configure_vp(scsi_qla_host_t *vha) 228 { 229 struct fc_vport *fc_vport; 230 int ret; 231 232 fc_vport = vha->fc_vport; 233 234 ql_dbg(ql_dbg_vport, vha, 0xa002, 235 "%s: change request #3.\n", __func__); 236 ret = qla2x00_send_change_request(vha, 0x3, vha->vp_idx); 237 if (ret != QLA_SUCCESS) { 238 ql_dbg(ql_dbg_vport, vha, 0xa003, "Failed to enable " 239 "receiving of RSCN requests: 0x%x.\n", ret); 240 return; 241 } else { 242 /* Corresponds to SCR enabled */ 243 clear_bit(VP_SCR_NEEDED, &vha->vp_flags); 244 } 245 246 vha->flags.online = 1; 247 if (qla24xx_configure_vhba(vha)) 248 return; 249 250 atomic_set(&vha->vp_state, VP_ACTIVE); 251 fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE); 252 } 253 254 void 255 qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb) 256 { 257 scsi_qla_host_t *vha; 258 struct qla_hw_data *ha = rsp->hw; 259 int i = 0; 260 unsigned long flags; 261 262 spin_lock_irqsave(&ha->vport_slock, flags); 263 list_for_each_entry(vha, &ha->vp_list, list) { 264 if (vha->vp_idx) { 265 atomic_inc(&vha->vref_count); 266 spin_unlock_irqrestore(&ha->vport_slock, flags); 267 268 switch (mb[0]) { 269 case MBA_LIP_OCCURRED: 270 case MBA_LOOP_UP: 271 case MBA_LOOP_DOWN: 272 case MBA_LIP_RESET: 273 case MBA_POINT_TO_POINT: 274 case MBA_CHG_IN_CONNECTION: 275 ql_dbg(ql_dbg_async, vha, 0x5024, 276 "Async_event for VP[%d], mb=0x%x vha=%p.\n", 277 i, *mb, vha); 278 qla2x00_async_event(vha, rsp, mb); 279 break; 280 case MBA_PORT_UPDATE: 281 case MBA_RSCN_UPDATE: 282 if ((mb[3] & 0xff) == vha->vp_idx) { 283 ql_dbg(ql_dbg_async, vha, 0x5024, 284 "Async_event for VP[%d], mb=0x%x vha=%p\n", 285 i, *mb, vha); 286 qla2x00_async_event(vha, rsp, mb); 287 } 288 break; 289 } 290 291 spin_lock_irqsave(&ha->vport_slock, flags); 292 atomic_dec(&vha->vref_count); 293 wake_up(&vha->vref_waitq); 294 } 295 i++; 296 } 297 spin_unlock_irqrestore(&ha->vport_slock, flags); 298 } 299 300 int 301 qla2x00_vp_abort_isp(scsi_qla_host_t *vha) 302 { 303 /* 304 * Physical port will do most of the abort and recovery work. We can 305 * just treat it as a loop down 306 */ 307 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 308 atomic_set(&vha->loop_state, LOOP_DOWN); 309 qla2x00_mark_all_devices_lost(vha, 0); 310 } else { 311 if (!atomic_read(&vha->loop_down_timer)) 312 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 313 } 314 315 /* 316 * To exclusively reset vport, we need to log it out first. Note: this 317 * control_vp can fail if ISP reset is already issued, this is 318 * expected, as the vp would be already logged out due to ISP reset. 319 */ 320 if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) 321 qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL); 322 323 ql_dbg(ql_dbg_taskm, vha, 0x801d, 324 "Scheduling enable of Vport %d.\n", vha->vp_idx); 325 return qla24xx_enable_vp(vha); 326 } 327 328 static int 329 qla2x00_do_dpc_vp(scsi_qla_host_t *vha) 330 { 331 struct qla_hw_data *ha = vha->hw; 332 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 333 334 ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x4012, 335 "Entering %s vp_flags: 0x%lx.\n", __func__, vha->vp_flags); 336 337 /* Check if Fw is ready to configure VP first */ 338 if (test_bit(VP_CONFIG_OK, &base_vha->vp_flags)) { 339 if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) { 340 /* VP acquired. complete port configuration */ 341 ql_dbg(ql_dbg_dpc, vha, 0x4014, 342 "Configure VP scheduled.\n"); 343 qla24xx_configure_vp(vha); 344 ql_dbg(ql_dbg_dpc, vha, 0x4015, 345 "Configure VP end.\n"); 346 return 0; 347 } 348 } 349 350 if (test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) { 351 ql_dbg(ql_dbg_dpc, vha, 0x4016, 352 "FCPort update scheduled.\n"); 353 qla2x00_update_fcports(vha); 354 clear_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags); 355 ql_dbg(ql_dbg_dpc, vha, 0x4017, 356 "FCPort update end.\n"); 357 } 358 359 if (test_bit(RELOGIN_NEEDED, &vha->dpc_flags) && 360 !test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) && 361 atomic_read(&vha->loop_state) != LOOP_DOWN) { 362 363 if (!vha->relogin_jif || 364 time_after_eq(jiffies, vha->relogin_jif)) { 365 vha->relogin_jif = jiffies + HZ; 366 clear_bit(RELOGIN_NEEDED, &vha->dpc_flags); 367 368 ql_dbg(ql_dbg_dpc, vha, 0x4018, 369 "Relogin needed scheduled.\n"); 370 qla24xx_post_relogin_work(vha); 371 } 372 } 373 374 if (test_and_clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) && 375 (!(test_and_set_bit(RESET_ACTIVE, &vha->dpc_flags)))) { 376 clear_bit(RESET_ACTIVE, &vha->dpc_flags); 377 } 378 379 if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) { 380 if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))) { 381 ql_dbg(ql_dbg_dpc, vha, 0x401a, 382 "Loop resync scheduled.\n"); 383 qla2x00_loop_resync(vha); 384 clear_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags); 385 ql_dbg(ql_dbg_dpc, vha, 0x401b, 386 "Loop resync end.\n"); 387 } 388 } 389 390 ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x401c, 391 "Exiting %s.\n", __func__); 392 return 0; 393 } 394 395 void 396 qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha) 397 { 398 struct qla_hw_data *ha = vha->hw; 399 scsi_qla_host_t *vp; 400 unsigned long flags = 0; 401 402 if (vha->vp_idx) 403 return; 404 if (list_empty(&ha->vp_list)) 405 return; 406 407 clear_bit(VP_DPC_NEEDED, &vha->dpc_flags); 408 409 if (!(ha->current_topology & ISP_CFG_F)) 410 return; 411 412 spin_lock_irqsave(&ha->vport_slock, flags); 413 list_for_each_entry(vp, &ha->vp_list, list) { 414 if (vp->vp_idx) { 415 atomic_inc(&vp->vref_count); 416 spin_unlock_irqrestore(&ha->vport_slock, flags); 417 418 qla2x00_do_dpc_vp(vp); 419 420 spin_lock_irqsave(&ha->vport_slock, flags); 421 atomic_dec(&vp->vref_count); 422 } 423 } 424 spin_unlock_irqrestore(&ha->vport_slock, flags); 425 } 426 427 int 428 qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport) 429 { 430 scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost); 431 struct qla_hw_data *ha = base_vha->hw; 432 scsi_qla_host_t *vha; 433 uint8_t port_name[WWN_SIZE]; 434 435 if (fc_vport->roles != FC_PORT_ROLE_FCP_INITIATOR) 436 return VPCERR_UNSUPPORTED; 437 438 /* Check up the F/W and H/W support NPIV */ 439 if (!ha->flags.npiv_supported) 440 return VPCERR_UNSUPPORTED; 441 442 /* Check up whether npiv supported switch presented */ 443 if (!(ha->switch_cap & FLOGI_MID_SUPPORT)) 444 return VPCERR_NO_FABRIC_SUPP; 445 446 /* Check up unique WWPN */ 447 u64_to_wwn(fc_vport->port_name, port_name); 448 if (!memcmp(port_name, base_vha->port_name, WWN_SIZE)) 449 return VPCERR_BAD_WWN; 450 vha = qla24xx_find_vhost_by_name(ha, port_name); 451 if (vha) 452 return VPCERR_BAD_WWN; 453 454 /* Check up max-npiv-supports */ 455 if (ha->num_vhosts > ha->max_npiv_vports) { 456 ql_dbg(ql_dbg_vport, vha, 0xa004, 457 "num_vhosts %ud is bigger " 458 "than max_npiv_vports %ud.\n", 459 ha->num_vhosts, ha->max_npiv_vports); 460 return VPCERR_UNSUPPORTED; 461 } 462 return 0; 463 } 464 465 scsi_qla_host_t * 466 qla24xx_create_vhost(struct fc_vport *fc_vport) 467 { 468 scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost); 469 struct qla_hw_data *ha = base_vha->hw; 470 scsi_qla_host_t *vha; 471 struct scsi_host_template *sht = &qla2xxx_driver_template; 472 struct Scsi_Host *host; 473 474 vha = qla2x00_create_host(sht, ha); 475 if (!vha) { 476 ql_log(ql_log_warn, vha, 0xa005, 477 "scsi_host_alloc() failed for vport.\n"); 478 return(NULL); 479 } 480 481 host = vha->host; 482 fc_vport->dd_data = vha; 483 /* New host info */ 484 u64_to_wwn(fc_vport->node_name, vha->node_name); 485 u64_to_wwn(fc_vport->port_name, vha->port_name); 486 487 vha->fc_vport = fc_vport; 488 vha->device_flags = 0; 489 vha->vp_idx = qla24xx_allocate_vp_id(vha); 490 if (vha->vp_idx > ha->max_npiv_vports) { 491 ql_dbg(ql_dbg_vport, vha, 0xa006, 492 "Couldn't allocate vp_id.\n"); 493 goto create_vhost_failed; 494 } 495 vha->mgmt_svr_loop_id = qla2x00_reserve_mgmt_server_loop_id(vha); 496 497 vha->dpc_flags = 0L; 498 499 /* 500 * To fix the issue of processing a parent's RSCN for the vport before 501 * its SCR is complete. 502 */ 503 set_bit(VP_SCR_NEEDED, &vha->vp_flags); 504 atomic_set(&vha->loop_state, LOOP_DOWN); 505 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 506 507 qla2x00_start_timer(vha, WATCH_INTERVAL); 508 509 vha->req = base_vha->req; 510 vha->flags.nvme_enabled = base_vha->flags.nvme_enabled; 511 host->can_queue = base_vha->req->length + 128; 512 host->cmd_per_lun = 3; 513 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) 514 host->max_cmd_len = 32; 515 else 516 host->max_cmd_len = MAX_CMDSZ; 517 host->max_channel = MAX_BUSES - 1; 518 host->max_lun = ql2xmaxlun; 519 host->unique_id = host->host_no; 520 host->max_id = ha->max_fibre_devices; 521 host->transportt = qla2xxx_transport_vport_template; 522 523 ql_dbg(ql_dbg_vport, vha, 0xa007, 524 "Detect vport hba %ld at address = %p.\n", 525 vha->host_no, vha); 526 527 vha->flags.init_done = 1; 528 529 mutex_lock(&ha->vport_lock); 530 set_bit(vha->vp_idx, ha->vp_idx_map); 531 ha->cur_vport_count++; 532 mutex_unlock(&ha->vport_lock); 533 534 return vha; 535 536 create_vhost_failed: 537 return NULL; 538 } 539 540 static void 541 qla25xx_free_req_que(struct scsi_qla_host *vha, struct req_que *req) 542 { 543 struct qla_hw_data *ha = vha->hw; 544 uint16_t que_id = req->id; 545 546 dma_free_coherent(&ha->pdev->dev, (req->length + 1) * 547 sizeof(request_t), req->ring, req->dma); 548 req->ring = NULL; 549 req->dma = 0; 550 if (que_id) { 551 ha->req_q_map[que_id] = NULL; 552 mutex_lock(&ha->vport_lock); 553 clear_bit(que_id, ha->req_qid_map); 554 mutex_unlock(&ha->vport_lock); 555 } 556 kfree(req->outstanding_cmds); 557 kfree(req); 558 req = NULL; 559 } 560 561 static void 562 qla25xx_free_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) 563 { 564 struct qla_hw_data *ha = vha->hw; 565 uint16_t que_id = rsp->id; 566 567 if (rsp->msix && rsp->msix->have_irq) { 568 free_irq(rsp->msix->vector, rsp->msix->handle); 569 rsp->msix->have_irq = 0; 570 rsp->msix->in_use = 0; 571 rsp->msix->handle = NULL; 572 } 573 dma_free_coherent(&ha->pdev->dev, (rsp->length + 1) * 574 sizeof(response_t), rsp->ring, rsp->dma); 575 rsp->ring = NULL; 576 rsp->dma = 0; 577 if (que_id) { 578 ha->rsp_q_map[que_id] = NULL; 579 mutex_lock(&ha->vport_lock); 580 clear_bit(que_id, ha->rsp_qid_map); 581 mutex_unlock(&ha->vport_lock); 582 } 583 kfree(rsp); 584 rsp = NULL; 585 } 586 587 int 588 qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req) 589 { 590 int ret = QLA_SUCCESS; 591 592 if (req && vha->flags.qpairs_req_created) { 593 req->options |= BIT_0; 594 ret = qla25xx_init_req_que(vha, req); 595 if (ret != QLA_SUCCESS) 596 return QLA_FUNCTION_FAILED; 597 598 qla25xx_free_req_que(vha, req); 599 } 600 601 return ret; 602 } 603 604 int 605 qla25xx_delete_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) 606 { 607 int ret = QLA_SUCCESS; 608 609 if (rsp && vha->flags.qpairs_rsp_created) { 610 rsp->options |= BIT_0; 611 ret = qla25xx_init_rsp_que(vha, rsp); 612 if (ret != QLA_SUCCESS) 613 return QLA_FUNCTION_FAILED; 614 615 qla25xx_free_rsp_que(vha, rsp); 616 } 617 618 return ret; 619 } 620 621 /* Delete all queues for a given vhost */ 622 int 623 qla25xx_delete_queues(struct scsi_qla_host *vha) 624 { 625 int cnt, ret = 0; 626 struct req_que *req = NULL; 627 struct rsp_que *rsp = NULL; 628 struct qla_hw_data *ha = vha->hw; 629 struct qla_qpair *qpair, *tqpair; 630 631 if (ql2xmqsupport || ql2xnvmeenable) { 632 list_for_each_entry_safe(qpair, tqpair, &vha->qp_list, 633 qp_list_elem) 634 qla2xxx_delete_qpair(vha, qpair); 635 } else { 636 /* Delete request queues */ 637 for (cnt = 1; cnt < ha->max_req_queues; cnt++) { 638 req = ha->req_q_map[cnt]; 639 if (req && test_bit(cnt, ha->req_qid_map)) { 640 ret = qla25xx_delete_req_que(vha, req); 641 if (ret != QLA_SUCCESS) { 642 ql_log(ql_log_warn, vha, 0x00ea, 643 "Couldn't delete req que %d.\n", 644 req->id); 645 return ret; 646 } 647 } 648 } 649 650 /* Delete response queues */ 651 for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) { 652 rsp = ha->rsp_q_map[cnt]; 653 if (rsp && test_bit(cnt, ha->rsp_qid_map)) { 654 ret = qla25xx_delete_rsp_que(vha, rsp); 655 if (ret != QLA_SUCCESS) { 656 ql_log(ql_log_warn, vha, 0x00eb, 657 "Couldn't delete rsp que %d.\n", 658 rsp->id); 659 return ret; 660 } 661 } 662 } 663 } 664 665 return ret; 666 } 667 668 int 669 qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options, 670 uint8_t vp_idx, uint16_t rid, int rsp_que, uint8_t qos, bool startqp) 671 { 672 int ret = 0; 673 struct req_que *req = NULL; 674 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 675 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev); 676 uint16_t que_id = 0; 677 device_reg_t *reg; 678 uint32_t cnt; 679 680 req = kzalloc(sizeof(struct req_que), GFP_KERNEL); 681 if (req == NULL) { 682 ql_log(ql_log_fatal, base_vha, 0x00d9, 683 "Failed to allocate memory for request queue.\n"); 684 goto failed; 685 } 686 687 req->length = REQUEST_ENTRY_CNT_24XX; 688 req->ring = dma_alloc_coherent(&ha->pdev->dev, 689 (req->length + 1) * sizeof(request_t), 690 &req->dma, GFP_KERNEL); 691 if (req->ring == NULL) { 692 ql_log(ql_log_fatal, base_vha, 0x00da, 693 "Failed to allocate memory for request_ring.\n"); 694 goto que_failed; 695 } 696 697 ret = qla2x00_alloc_outstanding_cmds(ha, req); 698 if (ret != QLA_SUCCESS) 699 goto que_failed; 700 701 mutex_lock(&ha->mq_lock); 702 que_id = find_first_zero_bit(ha->req_qid_map, ha->max_req_queues); 703 if (que_id >= ha->max_req_queues) { 704 mutex_unlock(&ha->mq_lock); 705 ql_log(ql_log_warn, base_vha, 0x00db, 706 "No resources to create additional request queue.\n"); 707 goto que_failed; 708 } 709 set_bit(que_id, ha->req_qid_map); 710 ha->req_q_map[que_id] = req; 711 req->rid = rid; 712 req->vp_idx = vp_idx; 713 req->qos = qos; 714 715 ql_dbg(ql_dbg_multiq, base_vha, 0xc002, 716 "queue_id=%d rid=%d vp_idx=%d qos=%d.\n", 717 que_id, req->rid, req->vp_idx, req->qos); 718 ql_dbg(ql_dbg_init, base_vha, 0x00dc, 719 "queue_id=%d rid=%d vp_idx=%d qos=%d.\n", 720 que_id, req->rid, req->vp_idx, req->qos); 721 if (rsp_que < 0) 722 req->rsp = NULL; 723 else 724 req->rsp = ha->rsp_q_map[rsp_que]; 725 /* Use alternate PCI bus number */ 726 if (MSB(req->rid)) 727 options |= BIT_4; 728 /* Use alternate PCI devfn */ 729 if (LSB(req->rid)) 730 options |= BIT_5; 731 req->options = options; 732 733 ql_dbg(ql_dbg_multiq, base_vha, 0xc003, 734 "options=0x%x.\n", req->options); 735 ql_dbg(ql_dbg_init, base_vha, 0x00dd, 736 "options=0x%x.\n", req->options); 737 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) 738 req->outstanding_cmds[cnt] = NULL; 739 req->current_outstanding_cmd = 1; 740 741 req->ring_ptr = req->ring; 742 req->ring_index = 0; 743 req->cnt = req->length; 744 req->id = que_id; 745 reg = ISP_QUE_REG(ha, que_id); 746 req->req_q_in = ®->isp25mq.req_q_in; 747 req->req_q_out = ®->isp25mq.req_q_out; 748 req->max_q_depth = ha->req_q_map[0]->max_q_depth; 749 req->out_ptr = (void *)(req->ring + req->length); 750 mutex_unlock(&ha->mq_lock); 751 ql_dbg(ql_dbg_multiq, base_vha, 0xc004, 752 "ring_ptr=%p ring_index=%d, " 753 "cnt=%d id=%d max_q_depth=%d.\n", 754 req->ring_ptr, req->ring_index, 755 req->cnt, req->id, req->max_q_depth); 756 ql_dbg(ql_dbg_init, base_vha, 0x00de, 757 "ring_ptr=%p ring_index=%d, " 758 "cnt=%d id=%d max_q_depth=%d.\n", 759 req->ring_ptr, req->ring_index, req->cnt, 760 req->id, req->max_q_depth); 761 762 if (startqp) { 763 ret = qla25xx_init_req_que(base_vha, req); 764 if (ret != QLA_SUCCESS) { 765 ql_log(ql_log_fatal, base_vha, 0x00df, 766 "%s failed.\n", __func__); 767 mutex_lock(&ha->mq_lock); 768 clear_bit(que_id, ha->req_qid_map); 769 mutex_unlock(&ha->mq_lock); 770 goto que_failed; 771 } 772 vha->flags.qpairs_req_created = 1; 773 } 774 775 return req->id; 776 777 que_failed: 778 qla25xx_free_req_que(base_vha, req); 779 failed: 780 return 0; 781 } 782 783 static void qla_do_work(struct work_struct *work) 784 { 785 unsigned long flags; 786 struct qla_qpair *qpair = container_of(work, struct qla_qpair, q_work); 787 struct scsi_qla_host *vha; 788 struct qla_hw_data *ha = qpair->hw; 789 790 spin_lock_irqsave(&qpair->qp_lock, flags); 791 vha = pci_get_drvdata(ha->pdev); 792 qla24xx_process_response_queue(vha, qpair->rsp); 793 spin_unlock_irqrestore(&qpair->qp_lock, flags); 794 795 } 796 797 /* create response queue */ 798 int 799 qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options, 800 uint8_t vp_idx, uint16_t rid, struct qla_qpair *qpair, bool startqp) 801 { 802 int ret = 0; 803 struct rsp_que *rsp = NULL; 804 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 805 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev); 806 uint16_t que_id = 0; 807 device_reg_t *reg; 808 809 rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL); 810 if (rsp == NULL) { 811 ql_log(ql_log_warn, base_vha, 0x0066, 812 "Failed to allocate memory for response queue.\n"); 813 goto failed; 814 } 815 816 rsp->length = RESPONSE_ENTRY_CNT_MQ; 817 rsp->ring = dma_alloc_coherent(&ha->pdev->dev, 818 (rsp->length + 1) * sizeof(response_t), 819 &rsp->dma, GFP_KERNEL); 820 if (rsp->ring == NULL) { 821 ql_log(ql_log_warn, base_vha, 0x00e1, 822 "Failed to allocate memory for response ring.\n"); 823 goto que_failed; 824 } 825 826 mutex_lock(&ha->mq_lock); 827 que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_rsp_queues); 828 if (que_id >= ha->max_rsp_queues) { 829 mutex_unlock(&ha->mq_lock); 830 ql_log(ql_log_warn, base_vha, 0x00e2, 831 "No resources to create additional request queue.\n"); 832 goto que_failed; 833 } 834 set_bit(que_id, ha->rsp_qid_map); 835 836 rsp->msix = qpair->msix; 837 838 ha->rsp_q_map[que_id] = rsp; 839 rsp->rid = rid; 840 rsp->vp_idx = vp_idx; 841 rsp->hw = ha; 842 ql_dbg(ql_dbg_init, base_vha, 0x00e4, 843 "rsp queue_id=%d rid=%d vp_idx=%d hw=%p.\n", 844 que_id, rsp->rid, rsp->vp_idx, rsp->hw); 845 /* Use alternate PCI bus number */ 846 if (MSB(rsp->rid)) 847 options |= BIT_4; 848 /* Use alternate PCI devfn */ 849 if (LSB(rsp->rid)) 850 options |= BIT_5; 851 /* Enable MSIX handshake mode on for uncapable adapters */ 852 if (!IS_MSIX_NACK_CAPABLE(ha)) 853 options |= BIT_6; 854 855 /* Set option to indicate response queue creation */ 856 options |= BIT_1; 857 858 rsp->options = options; 859 rsp->id = que_id; 860 reg = ISP_QUE_REG(ha, que_id); 861 rsp->rsp_q_in = ®->isp25mq.rsp_q_in; 862 rsp->rsp_q_out = ®->isp25mq.rsp_q_out; 863 rsp->in_ptr = (void *)(rsp->ring + rsp->length); 864 mutex_unlock(&ha->mq_lock); 865 ql_dbg(ql_dbg_multiq, base_vha, 0xc00b, 866 "options=%x id=%d rsp_q_in=%p rsp_q_out=%p\n", 867 rsp->options, rsp->id, rsp->rsp_q_in, 868 rsp->rsp_q_out); 869 ql_dbg(ql_dbg_init, base_vha, 0x00e5, 870 "options=%x id=%d rsp_q_in=%p rsp_q_out=%p\n", 871 rsp->options, rsp->id, rsp->rsp_q_in, 872 rsp->rsp_q_out); 873 874 ret = qla25xx_request_irq(ha, qpair, qpair->msix, 875 QLA_MSIX_QPAIR_MULTIQ_RSP_Q); 876 if (ret) 877 goto que_failed; 878 879 if (startqp) { 880 ret = qla25xx_init_rsp_que(base_vha, rsp); 881 if (ret != QLA_SUCCESS) { 882 ql_log(ql_log_fatal, base_vha, 0x00e7, 883 "%s failed.\n", __func__); 884 mutex_lock(&ha->mq_lock); 885 clear_bit(que_id, ha->rsp_qid_map); 886 mutex_unlock(&ha->mq_lock); 887 goto que_failed; 888 } 889 vha->flags.qpairs_rsp_created = 1; 890 } 891 rsp->req = NULL; 892 893 qla2x00_init_response_q_entries(rsp); 894 if (qpair->hw->wq) 895 INIT_WORK(&qpair->q_work, qla_do_work); 896 return rsp->id; 897 898 que_failed: 899 qla25xx_free_rsp_que(base_vha, rsp); 900 failed: 901 return 0; 902 } 903 904 static void qla_ctrlvp_sp_done(void *s, int res) 905 { 906 struct srb *sp = s; 907 908 if (sp->comp) 909 complete(sp->comp); 910 /* don't free sp here. Let the caller do the free */ 911 } 912 913 /** 914 * qla24xx_control_vp() - Enable a virtual port for given host 915 * @vha: adapter block pointer 916 * @cmd: command type to be sent for enable virtual port 917 * 918 * Return: qla2xxx local function return status code. 919 */ 920 int qla24xx_control_vp(scsi_qla_host_t *vha, int cmd) 921 { 922 int rval = QLA_MEMORY_ALLOC_FAILED; 923 struct qla_hw_data *ha = vha->hw; 924 int vp_index = vha->vp_idx; 925 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 926 DECLARE_COMPLETION_ONSTACK(comp); 927 srb_t *sp; 928 929 ql_dbg(ql_dbg_vport, vha, 0x10c1, 930 "Entered %s cmd %x index %d.\n", __func__, cmd, vp_index); 931 932 if (vp_index == 0 || vp_index >= ha->max_npiv_vports) 933 return QLA_PARAMETER_ERROR; 934 935 sp = qla2x00_get_sp(base_vha, NULL, GFP_KERNEL); 936 if (!sp) 937 goto done; 938 939 sp->type = SRB_CTRL_VP; 940 sp->name = "ctrl_vp"; 941 sp->comp = ∁ 942 sp->done = qla_ctrlvp_sp_done; 943 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout; 944 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); 945 sp->u.iocb_cmd.u.ctrlvp.cmd = cmd; 946 sp->u.iocb_cmd.u.ctrlvp.vp_index = vp_index; 947 948 rval = qla2x00_start_sp(sp); 949 if (rval != QLA_SUCCESS) { 950 ql_dbg(ql_dbg_async, vha, 0xffff, 951 "%s: %s Failed submission. %x.\n", 952 __func__, sp->name, rval); 953 goto done_free_sp; 954 } 955 956 ql_dbg(ql_dbg_vport, vha, 0x113f, "%s hndl %x submitted\n", 957 sp->name, sp->handle); 958 959 wait_for_completion(&comp); 960 sp->comp = NULL; 961 962 rval = sp->rc; 963 switch (rval) { 964 case QLA_FUNCTION_TIMEOUT: 965 ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s Timeout. %x.\n", 966 __func__, sp->name, rval); 967 break; 968 case QLA_SUCCESS: 969 ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s done.\n", 970 __func__, sp->name); 971 goto done_free_sp; 972 default: 973 ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s Failed. %x.\n", 974 __func__, sp->name, rval); 975 goto done_free_sp; 976 } 977 done: 978 return rval; 979 980 done_free_sp: 981 sp->free(sp); 982 return rval; 983 } 984