1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * QLogic Fibre Channel HBA Driver 4 * Copyright (c) 2003-2014 QLogic Corporation 5 */ 6 #include "qla_def.h" 7 #include "qla_gbl.h" 8 #include "qla_target.h" 9 10 #include <linux/moduleparam.h> 11 #include <linux/vmalloc.h> 12 #include <linux/slab.h> 13 #include <linux/list.h> 14 15 #include <scsi/scsi_tcq.h> 16 #include <scsi/scsicam.h> 17 #include <linux/delay.h> 18 19 void 20 qla2x00_vp_stop_timer(scsi_qla_host_t *vha) 21 { 22 if (vha->vp_idx && vha->timer_active) { 23 del_timer_sync(&vha->timer); 24 vha->timer_active = 0; 25 } 26 } 27 28 static uint32_t 29 qla24xx_allocate_vp_id(scsi_qla_host_t *vha) 30 { 31 uint32_t vp_id; 32 struct qla_hw_data *ha = vha->hw; 33 unsigned long flags; 34 35 /* Find an empty slot and assign an vp_id */ 36 mutex_lock(&ha->vport_lock); 37 vp_id = find_first_zero_bit(ha->vp_idx_map, ha->max_npiv_vports + 1); 38 if (vp_id > ha->max_npiv_vports) { 39 ql_dbg(ql_dbg_vport, vha, 0xa000, 40 "vp_id %d is bigger than max-supported %d.\n", 41 vp_id, ha->max_npiv_vports); 42 mutex_unlock(&ha->vport_lock); 43 return vp_id; 44 } 45 46 set_bit(vp_id, ha->vp_idx_map); 47 ha->num_vhosts++; 48 vha->vp_idx = vp_id; 49 50 spin_lock_irqsave(&ha->vport_slock, flags); 51 list_add_tail(&vha->list, &ha->vp_list); 52 spin_unlock_irqrestore(&ha->vport_slock, flags); 53 54 spin_lock_irqsave(&ha->hardware_lock, flags); 55 qla_update_vp_map(vha, SET_VP_IDX); 56 spin_unlock_irqrestore(&ha->hardware_lock, flags); 57 58 mutex_unlock(&ha->vport_lock); 59 return vp_id; 60 } 61 62 void 63 qla24xx_deallocate_vp_id(scsi_qla_host_t *vha) 64 { 65 uint16_t vp_id; 66 struct qla_hw_data *ha = vha->hw; 67 unsigned long flags = 0; 68 u32 i, bailout; 69 70 mutex_lock(&ha->vport_lock); 71 /* 72 * Wait for all pending activities to finish before removing vport from 73 * the list. 74 * Lock needs to be held for safe removal from the list (it 75 * ensures no active vp_list traversal while the vport is removed 76 * from the queue) 77 */ 78 bailout = 0; 79 for (i = 0; i < 500; i++) { 80 spin_lock_irqsave(&ha->vport_slock, flags); 81 if (atomic_read(&vha->vref_count) == 0) { 82 list_del(&vha->list); 83 qla_update_vp_map(vha, RESET_VP_IDX); 84 bailout = 1; 85 } 86 spin_unlock_irqrestore(&ha->vport_slock, flags); 87 88 if (bailout) 89 break; 90 else 91 msleep(20); 92 } 93 if (!bailout) { 94 ql_log(ql_log_info, vha, 0xfffa, 95 "vha->vref_count=%u timeout\n", vha->vref_count.counter); 96 spin_lock_irqsave(&ha->vport_slock, flags); 97 list_del(&vha->list); 98 qla_update_vp_map(vha, RESET_VP_IDX); 99 spin_unlock_irqrestore(&ha->vport_slock, flags); 100 } 101 102 vp_id = vha->vp_idx; 103 ha->num_vhosts--; 104 clear_bit(vp_id, ha->vp_idx_map); 105 106 mutex_unlock(&ha->vport_lock); 107 } 108 109 static scsi_qla_host_t * 110 qla24xx_find_vhost_by_name(struct qla_hw_data *ha, uint8_t *port_name) 111 { 112 scsi_qla_host_t *vha; 113 struct scsi_qla_host *tvha; 114 unsigned long flags; 115 116 spin_lock_irqsave(&ha->vport_slock, flags); 117 /* Locate matching device in database. */ 118 list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) { 119 if (!memcmp(port_name, vha->port_name, WWN_SIZE)) { 120 spin_unlock_irqrestore(&ha->vport_slock, flags); 121 return vha; 122 } 123 } 124 spin_unlock_irqrestore(&ha->vport_slock, flags); 125 return NULL; 126 } 127 128 /* 129 * qla2x00_mark_vp_devices_dead 130 * Updates fcport state when device goes offline. 131 * 132 * Input: 133 * ha = adapter block pointer. 134 * fcport = port structure pointer. 135 * 136 * Return: 137 * None. 138 * 139 * Context: 140 */ 141 static void 142 qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha) 143 { 144 /* 145 * !!! NOTE !!! 146 * This function, if called in contexts other than vp create, disable 147 * or delete, please make sure this is synchronized with the 148 * delete thread. 149 */ 150 fc_port_t *fcport; 151 152 list_for_each_entry(fcport, &vha->vp_fcports, list) { 153 ql_dbg(ql_dbg_vport, vha, 0xa001, 154 "Marking port dead, loop_id=0x%04x : %x.\n", 155 fcport->loop_id, fcport->vha->vp_idx); 156 157 qla2x00_mark_device_lost(vha, fcport, 0); 158 qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED); 159 } 160 } 161 162 int 163 qla24xx_disable_vp(scsi_qla_host_t *vha) 164 { 165 unsigned long flags; 166 int ret = QLA_SUCCESS; 167 fc_port_t *fcport; 168 169 if (vha->hw->flags.edif_enabled) { 170 if (DBELL_ACTIVE(vha)) 171 qla2x00_post_aen_work(vha, FCH_EVT_VENDOR_UNIQUE, 172 FCH_EVT_VENDOR_UNIQUE_VPORT_DOWN); 173 /* delete sessions and flush sa_indexes */ 174 qla2x00_wait_for_sess_deletion(vha); 175 } 176 177 if (vha->hw->flags.fw_started) 178 ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL); 179 180 atomic_set(&vha->loop_state, LOOP_DOWN); 181 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 182 list_for_each_entry(fcport, &vha->vp_fcports, list) 183 fcport->logout_on_delete = 1; 184 185 if (!vha->hw->flags.edif_enabled) 186 qla2x00_wait_for_sess_deletion(vha); 187 188 /* Remove port id from vp target map */ 189 spin_lock_irqsave(&vha->hw->hardware_lock, flags); 190 qla_update_vp_map(vha, RESET_AL_PA); 191 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); 192 193 qla2x00_mark_vp_devices_dead(vha); 194 atomic_set(&vha->vp_state, VP_FAILED); 195 vha->flags.management_server_logged_in = 0; 196 if (ret == QLA_SUCCESS) { 197 fc_vport_set_state(vha->fc_vport, FC_VPORT_DISABLED); 198 } else { 199 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 200 return -1; 201 } 202 return 0; 203 } 204 205 int 206 qla24xx_enable_vp(scsi_qla_host_t *vha) 207 { 208 int ret; 209 struct qla_hw_data *ha = vha->hw; 210 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 211 212 /* Check if physical ha port is Up */ 213 if (atomic_read(&base_vha->loop_state) == LOOP_DOWN || 214 atomic_read(&base_vha->loop_state) == LOOP_DEAD || 215 !(ha->current_topology & ISP_CFG_F)) { 216 vha->vp_err_state = VP_ERR_PORTDWN; 217 fc_vport_set_state(vha->fc_vport, FC_VPORT_LINKDOWN); 218 ql_dbg(ql_dbg_taskm, vha, 0x800b, 219 "%s skip enable. loop_state %x topo %x\n", 220 __func__, base_vha->loop_state.counter, 221 ha->current_topology); 222 223 goto enable_failed; 224 } 225 226 /* Initialize the new vport unless it is a persistent port */ 227 mutex_lock(&ha->vport_lock); 228 ret = qla24xx_modify_vp_config(vha); 229 mutex_unlock(&ha->vport_lock); 230 231 if (ret != QLA_SUCCESS) { 232 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 233 goto enable_failed; 234 } 235 236 ql_dbg(ql_dbg_taskm, vha, 0x801a, 237 "Virtual port with id: %d - Enabled.\n", vha->vp_idx); 238 return 0; 239 240 enable_failed: 241 ql_dbg(ql_dbg_taskm, vha, 0x801b, 242 "Virtual port with id: %d - Disabled.\n", vha->vp_idx); 243 return 1; 244 } 245 246 static void 247 qla24xx_configure_vp(scsi_qla_host_t *vha) 248 { 249 struct fc_vport *fc_vport; 250 int ret; 251 252 fc_vport = vha->fc_vport; 253 254 ql_dbg(ql_dbg_vport, vha, 0xa002, 255 "%s: change request #3.\n", __func__); 256 ret = qla2x00_send_change_request(vha, 0x3, vha->vp_idx); 257 if (ret != QLA_SUCCESS) { 258 ql_dbg(ql_dbg_vport, vha, 0xa003, "Failed to enable " 259 "receiving of RSCN requests: 0x%x.\n", ret); 260 return; 261 } else { 262 /* Corresponds to SCR enabled */ 263 clear_bit(VP_SCR_NEEDED, &vha->vp_flags); 264 } 265 266 vha->flags.online = 1; 267 if (qla24xx_configure_vhba(vha)) 268 return; 269 270 atomic_set(&vha->vp_state, VP_ACTIVE); 271 fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE); 272 } 273 274 void 275 qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb) 276 { 277 scsi_qla_host_t *vha, *tvp; 278 struct qla_hw_data *ha = rsp->hw; 279 int i = 0; 280 unsigned long flags; 281 282 spin_lock_irqsave(&ha->vport_slock, flags); 283 list_for_each_entry_safe(vha, tvp, &ha->vp_list, list) { 284 if (vha->vp_idx) { 285 if (test_bit(VPORT_DELETE, &vha->dpc_flags)) 286 continue; 287 288 atomic_inc(&vha->vref_count); 289 spin_unlock_irqrestore(&ha->vport_slock, flags); 290 291 switch (mb[0]) { 292 case MBA_LIP_OCCURRED: 293 case MBA_LOOP_UP: 294 case MBA_LOOP_DOWN: 295 case MBA_LIP_RESET: 296 case MBA_POINT_TO_POINT: 297 case MBA_CHG_IN_CONNECTION: 298 ql_dbg(ql_dbg_async, vha, 0x5024, 299 "Async_event for VP[%d], mb=0x%x vha=%p.\n", 300 i, *mb, vha); 301 qla2x00_async_event(vha, rsp, mb); 302 break; 303 case MBA_PORT_UPDATE: 304 case MBA_RSCN_UPDATE: 305 if ((mb[3] & 0xff) == vha->vp_idx) { 306 ql_dbg(ql_dbg_async, vha, 0x5024, 307 "Async_event for VP[%d], mb=0x%x vha=%p\n", 308 i, *mb, vha); 309 qla2x00_async_event(vha, rsp, mb); 310 } 311 break; 312 } 313 314 spin_lock_irqsave(&ha->vport_slock, flags); 315 atomic_dec(&vha->vref_count); 316 wake_up(&vha->vref_waitq); 317 } 318 i++; 319 } 320 spin_unlock_irqrestore(&ha->vport_slock, flags); 321 } 322 323 int 324 qla2x00_vp_abort_isp(scsi_qla_host_t *vha) 325 { 326 fc_port_t *fcport; 327 328 /* 329 * To exclusively reset vport, we need to log it out first. 330 * Note: This control_vp can fail if ISP reset is already 331 * issued, this is expected, as the vp would be already 332 * logged out due to ISP reset. 333 */ 334 if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) { 335 qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL); 336 list_for_each_entry(fcport, &vha->vp_fcports, list) 337 fcport->logout_on_delete = 0; 338 } 339 340 /* 341 * Physical port will do most of the abort and recovery work. We can 342 * just treat it as a loop down 343 */ 344 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 345 atomic_set(&vha->loop_state, LOOP_DOWN); 346 qla2x00_mark_all_devices_lost(vha); 347 } else { 348 if (!atomic_read(&vha->loop_down_timer)) 349 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 350 } 351 352 ql_dbg(ql_dbg_taskm, vha, 0x801d, 353 "Scheduling enable of Vport %d.\n", vha->vp_idx); 354 355 return qla24xx_enable_vp(vha); 356 } 357 358 static int 359 qla2x00_do_dpc_vp(scsi_qla_host_t *vha) 360 { 361 struct qla_hw_data *ha = vha->hw; 362 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 363 364 ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x4012, 365 "Entering %s vp_flags: 0x%lx.\n", __func__, vha->vp_flags); 366 367 /* Check if Fw is ready to configure VP first */ 368 if (test_bit(VP_CONFIG_OK, &base_vha->vp_flags)) { 369 if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) { 370 /* VP acquired. complete port configuration */ 371 ql_dbg(ql_dbg_dpc, vha, 0x4014, 372 "Configure VP scheduled.\n"); 373 qla24xx_configure_vp(vha); 374 ql_dbg(ql_dbg_dpc, vha, 0x4015, 375 "Configure VP end.\n"); 376 return 0; 377 } 378 } 379 380 if (test_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags)) { 381 if (atomic_read(&vha->loop_state) == LOOP_READY) { 382 qla24xx_process_purex_list(&vha->purex_list); 383 clear_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags); 384 } 385 } 386 387 if (test_bit(RELOGIN_NEEDED, &vha->dpc_flags) && 388 !test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) && 389 atomic_read(&vha->loop_state) != LOOP_DOWN) { 390 391 if (!vha->relogin_jif || 392 time_after_eq(jiffies, vha->relogin_jif)) { 393 vha->relogin_jif = jiffies + HZ; 394 clear_bit(RELOGIN_NEEDED, &vha->dpc_flags); 395 396 ql_dbg(ql_dbg_dpc, vha, 0x4018, 397 "Relogin needed scheduled.\n"); 398 qla24xx_post_relogin_work(vha); 399 } 400 } 401 402 if (test_and_clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) && 403 (!(test_and_set_bit(RESET_ACTIVE, &vha->dpc_flags)))) { 404 clear_bit(RESET_ACTIVE, &vha->dpc_flags); 405 } 406 407 if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) { 408 if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))) { 409 ql_dbg(ql_dbg_dpc, vha, 0x401a, 410 "Loop resync scheduled.\n"); 411 qla2x00_loop_resync(vha); 412 clear_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags); 413 ql_dbg(ql_dbg_dpc, vha, 0x401b, 414 "Loop resync end.\n"); 415 } 416 } 417 418 ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x401c, 419 "Exiting %s.\n", __func__); 420 return 0; 421 } 422 423 void 424 qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha) 425 { 426 struct qla_hw_data *ha = vha->hw; 427 scsi_qla_host_t *vp, *tvp; 428 unsigned long flags = 0; 429 430 if (vha->vp_idx) 431 return; 432 if (list_empty(&ha->vp_list)) 433 return; 434 435 clear_bit(VP_DPC_NEEDED, &vha->dpc_flags); 436 437 if (!(ha->current_topology & ISP_CFG_F)) 438 return; 439 440 spin_lock_irqsave(&ha->vport_slock, flags); 441 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { 442 if (vp->vp_idx) { 443 atomic_inc(&vp->vref_count); 444 spin_unlock_irqrestore(&ha->vport_slock, flags); 445 446 qla2x00_do_dpc_vp(vp); 447 448 spin_lock_irqsave(&ha->vport_slock, flags); 449 atomic_dec(&vp->vref_count); 450 } 451 } 452 spin_unlock_irqrestore(&ha->vport_slock, flags); 453 } 454 455 int 456 qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport) 457 { 458 scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost); 459 struct qla_hw_data *ha = base_vha->hw; 460 scsi_qla_host_t *vha; 461 uint8_t port_name[WWN_SIZE]; 462 463 if (fc_vport->roles != FC_PORT_ROLE_FCP_INITIATOR) 464 return VPCERR_UNSUPPORTED; 465 466 /* Check up the F/W and H/W support NPIV */ 467 if (!ha->flags.npiv_supported) 468 return VPCERR_UNSUPPORTED; 469 470 /* Check up whether npiv supported switch presented */ 471 if (!(ha->switch_cap & FLOGI_MID_SUPPORT)) 472 return VPCERR_NO_FABRIC_SUPP; 473 474 /* Check up unique WWPN */ 475 u64_to_wwn(fc_vport->port_name, port_name); 476 if (!memcmp(port_name, base_vha->port_name, WWN_SIZE)) 477 return VPCERR_BAD_WWN; 478 vha = qla24xx_find_vhost_by_name(ha, port_name); 479 if (vha) 480 return VPCERR_BAD_WWN; 481 482 /* Check up max-npiv-supports */ 483 if (ha->num_vhosts > ha->max_npiv_vports) { 484 ql_dbg(ql_dbg_vport, vha, 0xa004, 485 "num_vhosts %ud is bigger " 486 "than max_npiv_vports %ud.\n", 487 ha->num_vhosts, ha->max_npiv_vports); 488 return VPCERR_UNSUPPORTED; 489 } 490 return 0; 491 } 492 493 scsi_qla_host_t * 494 qla24xx_create_vhost(struct fc_vport *fc_vport) 495 { 496 scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost); 497 struct qla_hw_data *ha = base_vha->hw; 498 scsi_qla_host_t *vha; 499 const struct scsi_host_template *sht = &qla2xxx_driver_template; 500 struct Scsi_Host *host; 501 502 vha = qla2x00_create_host(sht, ha); 503 if (!vha) { 504 ql_log(ql_log_warn, vha, 0xa005, 505 "scsi_host_alloc() failed for vport.\n"); 506 return(NULL); 507 } 508 509 vha->irq_offset = QLA_BASE_VECTORS; 510 host = vha->host; 511 fc_vport->dd_data = vha; 512 /* New host info */ 513 u64_to_wwn(fc_vport->node_name, vha->node_name); 514 u64_to_wwn(fc_vport->port_name, vha->port_name); 515 516 vha->fc_vport = fc_vport; 517 vha->device_flags = 0; 518 vha->vp_idx = qla24xx_allocate_vp_id(vha); 519 if (vha->vp_idx > ha->max_npiv_vports) { 520 ql_dbg(ql_dbg_vport, vha, 0xa006, 521 "Couldn't allocate vp_id.\n"); 522 goto create_vhost_failed; 523 } 524 vha->mgmt_svr_loop_id = qla2x00_reserve_mgmt_server_loop_id(vha); 525 526 vha->dpc_flags = 0L; 527 ha->dpc_active = 0; 528 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); 529 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); 530 531 /* 532 * To fix the issue of processing a parent's RSCN for the vport before 533 * its SCR is complete. 534 */ 535 set_bit(VP_SCR_NEEDED, &vha->vp_flags); 536 atomic_set(&vha->loop_state, LOOP_DOWN); 537 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 538 539 qla2x00_start_timer(vha, WATCH_INTERVAL); 540 541 vha->req = base_vha->req; 542 vha->flags.nvme_enabled = base_vha->flags.nvme_enabled; 543 host->can_queue = base_vha->req->length + 128; 544 host->cmd_per_lun = 3; 545 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) 546 host->max_cmd_len = 32; 547 else 548 host->max_cmd_len = MAX_CMDSZ; 549 host->max_channel = MAX_BUSES - 1; 550 host->max_lun = ql2xmaxlun; 551 host->unique_id = host->host_no; 552 host->max_id = ha->max_fibre_devices; 553 host->transportt = qla2xxx_transport_vport_template; 554 555 ql_dbg(ql_dbg_vport, vha, 0xa007, 556 "Detect vport hba %ld at address = %p.\n", 557 vha->host_no, vha); 558 559 vha->flags.init_done = 1; 560 561 mutex_lock(&ha->vport_lock); 562 set_bit(vha->vp_idx, ha->vp_idx_map); 563 ha->cur_vport_count++; 564 mutex_unlock(&ha->vport_lock); 565 566 return vha; 567 568 create_vhost_failed: 569 return NULL; 570 } 571 572 static void 573 qla25xx_free_req_que(struct scsi_qla_host *vha, struct req_que *req) 574 { 575 struct qla_hw_data *ha = vha->hw; 576 uint16_t que_id = req->id; 577 578 dma_free_coherent(&ha->pdev->dev, (req->length + 1) * 579 sizeof(request_t), req->ring, req->dma); 580 req->ring = NULL; 581 req->dma = 0; 582 if (que_id) { 583 ha->req_q_map[que_id] = NULL; 584 mutex_lock(&ha->vport_lock); 585 clear_bit(que_id, ha->req_qid_map); 586 mutex_unlock(&ha->vport_lock); 587 } 588 kfree(req->outstanding_cmds); 589 kfree(req); 590 } 591 592 static void 593 qla25xx_free_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) 594 { 595 struct qla_hw_data *ha = vha->hw; 596 uint16_t que_id = rsp->id; 597 598 if (rsp->msix && rsp->msix->have_irq) { 599 free_irq(rsp->msix->vector, rsp->msix->handle); 600 rsp->msix->have_irq = 0; 601 rsp->msix->in_use = 0; 602 rsp->msix->handle = NULL; 603 } 604 dma_free_coherent(&ha->pdev->dev, (rsp->length + 1) * 605 sizeof(response_t), rsp->ring, rsp->dma); 606 rsp->ring = NULL; 607 rsp->dma = 0; 608 if (que_id) { 609 ha->rsp_q_map[que_id] = NULL; 610 mutex_lock(&ha->vport_lock); 611 clear_bit(que_id, ha->rsp_qid_map); 612 mutex_unlock(&ha->vport_lock); 613 } 614 kfree(rsp); 615 } 616 617 int 618 qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req) 619 { 620 int ret = QLA_SUCCESS; 621 622 if (req && vha->flags.qpairs_req_created) { 623 req->options |= BIT_0; 624 ret = qla25xx_init_req_que(vha, req); 625 if (ret != QLA_SUCCESS) 626 return QLA_FUNCTION_FAILED; 627 628 qla25xx_free_req_que(vha, req); 629 } 630 631 return ret; 632 } 633 634 int 635 qla25xx_delete_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) 636 { 637 int ret = QLA_SUCCESS; 638 639 if (rsp && vha->flags.qpairs_rsp_created) { 640 rsp->options |= BIT_0; 641 ret = qla25xx_init_rsp_que(vha, rsp); 642 if (ret != QLA_SUCCESS) 643 return QLA_FUNCTION_FAILED; 644 645 qla25xx_free_rsp_que(vha, rsp); 646 } 647 648 return ret; 649 } 650 651 /* Delete all queues for a given vhost */ 652 int 653 qla25xx_delete_queues(struct scsi_qla_host *vha) 654 { 655 int cnt, ret = 0; 656 struct req_que *req = NULL; 657 struct rsp_que *rsp = NULL; 658 struct qla_hw_data *ha = vha->hw; 659 struct qla_qpair *qpair, *tqpair; 660 661 if (ql2xmqsupport || ql2xnvmeenable) { 662 list_for_each_entry_safe(qpair, tqpair, &vha->qp_list, 663 qp_list_elem) 664 qla2xxx_delete_qpair(vha, qpair); 665 } else { 666 /* Delete request queues */ 667 for (cnt = 1; cnt < ha->max_req_queues; cnt++) { 668 req = ha->req_q_map[cnt]; 669 if (req && test_bit(cnt, ha->req_qid_map)) { 670 ret = qla25xx_delete_req_que(vha, req); 671 if (ret != QLA_SUCCESS) { 672 ql_log(ql_log_warn, vha, 0x00ea, 673 "Couldn't delete req que %d.\n", 674 req->id); 675 return ret; 676 } 677 } 678 } 679 680 /* Delete response queues */ 681 for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) { 682 rsp = ha->rsp_q_map[cnt]; 683 if (rsp && test_bit(cnt, ha->rsp_qid_map)) { 684 ret = qla25xx_delete_rsp_que(vha, rsp); 685 if (ret != QLA_SUCCESS) { 686 ql_log(ql_log_warn, vha, 0x00eb, 687 "Couldn't delete rsp que %d.\n", 688 rsp->id); 689 return ret; 690 } 691 } 692 } 693 } 694 695 return ret; 696 } 697 698 int 699 qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options, 700 uint8_t vp_idx, uint16_t rid, int rsp_que, uint8_t qos, bool startqp) 701 { 702 int ret = 0; 703 struct req_que *req = NULL; 704 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 705 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev); 706 uint16_t que_id = 0; 707 device_reg_t *reg; 708 uint32_t cnt; 709 710 req = kzalloc(sizeof(struct req_que), GFP_KERNEL); 711 if (req == NULL) { 712 ql_log(ql_log_fatal, base_vha, 0x00d9, 713 "Failed to allocate memory for request queue.\n"); 714 goto failed; 715 } 716 717 req->length = REQUEST_ENTRY_CNT_24XX; 718 req->ring = dma_alloc_coherent(&ha->pdev->dev, 719 (req->length + 1) * sizeof(request_t), 720 &req->dma, GFP_KERNEL); 721 if (req->ring == NULL) { 722 ql_log(ql_log_fatal, base_vha, 0x00da, 723 "Failed to allocate memory for request_ring.\n"); 724 goto que_failed; 725 } 726 727 ret = qla2x00_alloc_outstanding_cmds(ha, req); 728 if (ret != QLA_SUCCESS) 729 goto que_failed; 730 731 mutex_lock(&ha->mq_lock); 732 que_id = find_first_zero_bit(ha->req_qid_map, ha->max_req_queues); 733 if (que_id >= ha->max_req_queues) { 734 mutex_unlock(&ha->mq_lock); 735 ql_log(ql_log_warn, base_vha, 0x00db, 736 "No resources to create additional request queue.\n"); 737 goto que_failed; 738 } 739 set_bit(que_id, ha->req_qid_map); 740 ha->req_q_map[que_id] = req; 741 req->rid = rid; 742 req->vp_idx = vp_idx; 743 req->qos = qos; 744 745 ql_dbg(ql_dbg_multiq, base_vha, 0xc002, 746 "queue_id=%d rid=%d vp_idx=%d qos=%d.\n", 747 que_id, req->rid, req->vp_idx, req->qos); 748 ql_dbg(ql_dbg_init, base_vha, 0x00dc, 749 "queue_id=%d rid=%d vp_idx=%d qos=%d.\n", 750 que_id, req->rid, req->vp_idx, req->qos); 751 if (rsp_que < 0) 752 req->rsp = NULL; 753 else 754 req->rsp = ha->rsp_q_map[rsp_que]; 755 /* Use alternate PCI bus number */ 756 if (MSB(req->rid)) 757 options |= BIT_4; 758 /* Use alternate PCI devfn */ 759 if (LSB(req->rid)) 760 options |= BIT_5; 761 req->options = options; 762 763 ql_dbg(ql_dbg_multiq, base_vha, 0xc003, 764 "options=0x%x.\n", req->options); 765 ql_dbg(ql_dbg_init, base_vha, 0x00dd, 766 "options=0x%x.\n", req->options); 767 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) 768 req->outstanding_cmds[cnt] = NULL; 769 req->current_outstanding_cmd = 1; 770 771 req->ring_ptr = req->ring; 772 req->ring_index = 0; 773 req->cnt = req->length; 774 req->id = que_id; 775 reg = ISP_QUE_REG(ha, que_id); 776 req->req_q_in = ®->isp25mq.req_q_in; 777 req->req_q_out = ®->isp25mq.req_q_out; 778 req->max_q_depth = ha->req_q_map[0]->max_q_depth; 779 req->out_ptr = (uint16_t *)(req->ring + req->length); 780 mutex_unlock(&ha->mq_lock); 781 ql_dbg(ql_dbg_multiq, base_vha, 0xc004, 782 "ring_ptr=%p ring_index=%d, " 783 "cnt=%d id=%d max_q_depth=%d.\n", 784 req->ring_ptr, req->ring_index, 785 req->cnt, req->id, req->max_q_depth); 786 ql_dbg(ql_dbg_init, base_vha, 0x00de, 787 "ring_ptr=%p ring_index=%d, " 788 "cnt=%d id=%d max_q_depth=%d.\n", 789 req->ring_ptr, req->ring_index, req->cnt, 790 req->id, req->max_q_depth); 791 792 if (startqp) { 793 ret = qla25xx_init_req_que(base_vha, req); 794 if (ret != QLA_SUCCESS) { 795 ql_log(ql_log_fatal, base_vha, 0x00df, 796 "%s failed.\n", __func__); 797 mutex_lock(&ha->mq_lock); 798 clear_bit(que_id, ha->req_qid_map); 799 mutex_unlock(&ha->mq_lock); 800 goto que_failed; 801 } 802 vha->flags.qpairs_req_created = 1; 803 } 804 805 return req->id; 806 807 que_failed: 808 qla25xx_free_req_que(base_vha, req); 809 failed: 810 return 0; 811 } 812 813 static void qla_do_work(struct work_struct *work) 814 { 815 unsigned long flags; 816 struct qla_qpair *qpair = container_of(work, struct qla_qpair, q_work); 817 struct scsi_qla_host *vha = qpair->vha; 818 819 spin_lock_irqsave(&qpair->qp_lock, flags); 820 qla24xx_process_response_queue(vha, qpair->rsp); 821 spin_unlock_irqrestore(&qpair->qp_lock, flags); 822 823 } 824 825 /* create response queue */ 826 int 827 qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options, 828 uint8_t vp_idx, uint16_t rid, struct qla_qpair *qpair, bool startqp) 829 { 830 int ret = 0; 831 struct rsp_que *rsp = NULL; 832 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 833 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev); 834 uint16_t que_id = 0; 835 device_reg_t *reg; 836 837 rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL); 838 if (rsp == NULL) { 839 ql_log(ql_log_warn, base_vha, 0x0066, 840 "Failed to allocate memory for response queue.\n"); 841 goto failed; 842 } 843 844 rsp->length = RESPONSE_ENTRY_CNT_MQ; 845 rsp->ring = dma_alloc_coherent(&ha->pdev->dev, 846 (rsp->length + 1) * sizeof(response_t), 847 &rsp->dma, GFP_KERNEL); 848 if (rsp->ring == NULL) { 849 ql_log(ql_log_warn, base_vha, 0x00e1, 850 "Failed to allocate memory for response ring.\n"); 851 goto que_failed; 852 } 853 854 mutex_lock(&ha->mq_lock); 855 que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_rsp_queues); 856 if (que_id >= ha->max_rsp_queues) { 857 mutex_unlock(&ha->mq_lock); 858 ql_log(ql_log_warn, base_vha, 0x00e2, 859 "No resources to create additional request queue.\n"); 860 goto que_failed; 861 } 862 set_bit(que_id, ha->rsp_qid_map); 863 864 rsp->msix = qpair->msix; 865 866 ha->rsp_q_map[que_id] = rsp; 867 rsp->rid = rid; 868 rsp->vp_idx = vp_idx; 869 rsp->hw = ha; 870 ql_dbg(ql_dbg_init, base_vha, 0x00e4, 871 "rsp queue_id=%d rid=%d vp_idx=%d hw=%p.\n", 872 que_id, rsp->rid, rsp->vp_idx, rsp->hw); 873 /* Use alternate PCI bus number */ 874 if (MSB(rsp->rid)) 875 options |= BIT_4; 876 /* Use alternate PCI devfn */ 877 if (LSB(rsp->rid)) 878 options |= BIT_5; 879 /* Enable MSIX handshake mode on for uncapable adapters */ 880 if (!IS_MSIX_NACK_CAPABLE(ha)) 881 options |= BIT_6; 882 883 /* Set option to indicate response queue creation */ 884 options |= BIT_1; 885 886 rsp->options = options; 887 rsp->id = que_id; 888 reg = ISP_QUE_REG(ha, que_id); 889 rsp->rsp_q_in = ®->isp25mq.rsp_q_in; 890 rsp->rsp_q_out = ®->isp25mq.rsp_q_out; 891 rsp->in_ptr = (uint16_t *)(rsp->ring + rsp->length); 892 mutex_unlock(&ha->mq_lock); 893 ql_dbg(ql_dbg_multiq, base_vha, 0xc00b, 894 "options=%x id=%d rsp_q_in=%p rsp_q_out=%p\n", 895 rsp->options, rsp->id, rsp->rsp_q_in, 896 rsp->rsp_q_out); 897 ql_dbg(ql_dbg_init, base_vha, 0x00e5, 898 "options=%x id=%d rsp_q_in=%p rsp_q_out=%p\n", 899 rsp->options, rsp->id, rsp->rsp_q_in, 900 rsp->rsp_q_out); 901 902 ret = qla25xx_request_irq(ha, qpair, qpair->msix, 903 ha->flags.disable_msix_handshake ? 904 QLA_MSIX_QPAIR_MULTIQ_RSP_Q : QLA_MSIX_QPAIR_MULTIQ_RSP_Q_HS); 905 if (ret) 906 goto que_failed; 907 908 if (startqp) { 909 ret = qla25xx_init_rsp_que(base_vha, rsp); 910 if (ret != QLA_SUCCESS) { 911 ql_log(ql_log_fatal, base_vha, 0x00e7, 912 "%s failed.\n", __func__); 913 mutex_lock(&ha->mq_lock); 914 clear_bit(que_id, ha->rsp_qid_map); 915 mutex_unlock(&ha->mq_lock); 916 goto que_failed; 917 } 918 vha->flags.qpairs_rsp_created = 1; 919 } 920 rsp->req = NULL; 921 922 qla2x00_init_response_q_entries(rsp); 923 if (qpair->hw->wq) 924 INIT_WORK(&qpair->q_work, qla_do_work); 925 return rsp->id; 926 927 que_failed: 928 qla25xx_free_rsp_que(base_vha, rsp); 929 failed: 930 return 0; 931 } 932 933 static void qla_ctrlvp_sp_done(srb_t *sp, int res) 934 { 935 if (sp->comp) 936 complete(sp->comp); 937 /* don't free sp here. Let the caller do the free */ 938 } 939 940 /** 941 * qla24xx_control_vp() - Enable a virtual port for given host 942 * @vha: adapter block pointer 943 * @cmd: command type to be sent for enable virtual port 944 * 945 * Return: qla2xxx local function return status code. 946 */ 947 int qla24xx_control_vp(scsi_qla_host_t *vha, int cmd) 948 { 949 int rval = QLA_MEMORY_ALLOC_FAILED; 950 struct qla_hw_data *ha = vha->hw; 951 int vp_index = vha->vp_idx; 952 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 953 DECLARE_COMPLETION_ONSTACK(comp); 954 srb_t *sp; 955 956 ql_dbg(ql_dbg_vport, vha, 0x10c1, 957 "Entered %s cmd %x index %d.\n", __func__, cmd, vp_index); 958 959 if (vp_index == 0 || vp_index >= ha->max_npiv_vports) 960 return QLA_PARAMETER_ERROR; 961 962 /* ref: INIT */ 963 sp = qla2x00_get_sp(base_vha, NULL, GFP_KERNEL); 964 if (!sp) 965 return rval; 966 967 sp->type = SRB_CTRL_VP; 968 sp->name = "ctrl_vp"; 969 sp->comp = ∁ 970 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, 971 qla_ctrlvp_sp_done); 972 sp->u.iocb_cmd.u.ctrlvp.cmd = cmd; 973 sp->u.iocb_cmd.u.ctrlvp.vp_index = vp_index; 974 975 rval = qla2x00_start_sp(sp); 976 if (rval != QLA_SUCCESS) { 977 ql_dbg(ql_dbg_async, vha, 0xffff, 978 "%s: %s Failed submission. %x.\n", 979 __func__, sp->name, rval); 980 goto done; 981 } 982 983 ql_dbg(ql_dbg_vport, vha, 0x113f, "%s hndl %x submitted\n", 984 sp->name, sp->handle); 985 986 wait_for_completion(&comp); 987 sp->comp = NULL; 988 989 rval = sp->rc; 990 switch (rval) { 991 case QLA_FUNCTION_TIMEOUT: 992 ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s Timeout. %x.\n", 993 __func__, sp->name, rval); 994 break; 995 case QLA_SUCCESS: 996 ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s done.\n", 997 __func__, sp->name); 998 break; 999 default: 1000 ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s Failed. %x.\n", 1001 __func__, sp->name, rval); 1002 break; 1003 } 1004 done: 1005 /* ref: INIT */ 1006 kref_put(&sp->cmd_kref, qla2x00_sp_release); 1007 return rval; 1008 } 1009 1010 struct scsi_qla_host *qla_find_host_by_vp_idx(struct scsi_qla_host *vha, uint16_t vp_idx) 1011 { 1012 struct qla_hw_data *ha = vha->hw; 1013 1014 if (vha->vp_idx == vp_idx) 1015 return vha; 1016 1017 BUG_ON(ha->vp_map == NULL); 1018 if (likely(test_bit(vp_idx, ha->vp_idx_map))) 1019 return ha->vp_map[vp_idx].vha; 1020 1021 return NULL; 1022 } 1023 1024 /* vport_slock to be held by the caller */ 1025 void 1026 qla_update_vp_map(struct scsi_qla_host *vha, int cmd) 1027 { 1028 void *slot; 1029 u32 key; 1030 int rc; 1031 1032 if (!vha->hw->vp_map) 1033 return; 1034 1035 key = vha->d_id.b24; 1036 1037 switch (cmd) { 1038 case SET_VP_IDX: 1039 vha->hw->vp_map[vha->vp_idx].vha = vha; 1040 break; 1041 case SET_AL_PA: 1042 slot = btree_lookup32(&vha->hw->host_map, key); 1043 if (!slot) { 1044 ql_dbg(ql_dbg_disc, vha, 0xf018, 1045 "Save vha in host_map %p %06x\n", vha, key); 1046 rc = btree_insert32(&vha->hw->host_map, 1047 key, vha, GFP_ATOMIC); 1048 if (rc) 1049 ql_log(ql_log_info, vha, 0xd03e, 1050 "Unable to insert s_id into host_map: %06x\n", 1051 key); 1052 return; 1053 } 1054 ql_dbg(ql_dbg_disc, vha, 0xf019, 1055 "replace existing vha in host_map %p %06x\n", vha, key); 1056 btree_update32(&vha->hw->host_map, key, vha); 1057 break; 1058 case RESET_VP_IDX: 1059 vha->hw->vp_map[vha->vp_idx].vha = NULL; 1060 break; 1061 case RESET_AL_PA: 1062 ql_dbg(ql_dbg_disc, vha, 0xf01a, 1063 "clear vha in host_map %p %06x\n", vha, key); 1064 slot = btree_lookup32(&vha->hw->host_map, key); 1065 if (slot) 1066 btree_remove32(&vha->hw->host_map, key); 1067 vha->d_id.b24 = 0; 1068 break; 1069 } 1070 } 1071 1072 void qla_update_host_map(struct scsi_qla_host *vha, port_id_t id) 1073 { 1074 1075 if (!vha->d_id.b24) { 1076 vha->d_id = id; 1077 qla_update_vp_map(vha, SET_AL_PA); 1078 } else if (vha->d_id.b24 != id.b24) { 1079 qla_update_vp_map(vha, RESET_AL_PA); 1080 vha->d_id = id; 1081 qla_update_vp_map(vha, SET_AL_PA); 1082 } 1083 } 1084 1085 int qla_create_buf_pool(struct scsi_qla_host *vha, struct qla_qpair *qp) 1086 { 1087 int sz; 1088 1089 qp->buf_pool.num_bufs = qp->req->length; 1090 1091 sz = BITS_TO_LONGS(qp->req->length); 1092 qp->buf_pool.buf_map = kcalloc(sz, sizeof(long), GFP_KERNEL); 1093 if (!qp->buf_pool.buf_map) { 1094 ql_log(ql_log_warn, vha, 0x0186, 1095 "Failed to allocate buf_map(%zd).\n", sz * sizeof(unsigned long)); 1096 return -ENOMEM; 1097 } 1098 sz = qp->req->length * sizeof(void *); 1099 qp->buf_pool.buf_array = kcalloc(qp->req->length, sizeof(void *), GFP_KERNEL); 1100 if (!qp->buf_pool.buf_array) { 1101 ql_log(ql_log_warn, vha, 0x0186, 1102 "Failed to allocate buf_array(%d).\n", sz); 1103 kfree(qp->buf_pool.buf_map); 1104 return -ENOMEM; 1105 } 1106 sz = qp->req->length * sizeof(dma_addr_t); 1107 qp->buf_pool.dma_array = kcalloc(qp->req->length, sizeof(dma_addr_t), GFP_KERNEL); 1108 if (!qp->buf_pool.dma_array) { 1109 ql_log(ql_log_warn, vha, 0x0186, 1110 "Failed to allocate dma_array(%d).\n", sz); 1111 kfree(qp->buf_pool.buf_map); 1112 kfree(qp->buf_pool.buf_array); 1113 return -ENOMEM; 1114 } 1115 set_bit(0, qp->buf_pool.buf_map); 1116 return 0; 1117 } 1118 1119 void qla_free_buf_pool(struct qla_qpair *qp) 1120 { 1121 int i; 1122 struct qla_hw_data *ha = qp->vha->hw; 1123 1124 for (i = 0; i < qp->buf_pool.num_bufs; i++) { 1125 if (qp->buf_pool.buf_array[i] && qp->buf_pool.dma_array[i]) 1126 dma_pool_free(ha->fcp_cmnd_dma_pool, qp->buf_pool.buf_array[i], 1127 qp->buf_pool.dma_array[i]); 1128 qp->buf_pool.buf_array[i] = NULL; 1129 qp->buf_pool.dma_array[i] = 0; 1130 } 1131 1132 kfree(qp->buf_pool.dma_array); 1133 kfree(qp->buf_pool.buf_array); 1134 kfree(qp->buf_pool.buf_map); 1135 } 1136 1137 /* it is assume qp->qp_lock is held at this point */ 1138 int qla_get_buf(struct scsi_qla_host *vha, struct qla_qpair *qp, struct qla_buf_dsc *dsc) 1139 { 1140 u16 tag, i = 0; 1141 void *buf; 1142 dma_addr_t buf_dma; 1143 struct qla_hw_data *ha = vha->hw; 1144 1145 dsc->tag = TAG_FREED; 1146 again: 1147 tag = find_first_zero_bit(qp->buf_pool.buf_map, qp->buf_pool.num_bufs); 1148 if (tag >= qp->buf_pool.num_bufs) { 1149 ql_dbg(ql_dbg_io, vha, 0x00e2, 1150 "qp(%d) ran out of buf resource.\n", qp->id); 1151 return -EIO; 1152 } 1153 if (tag == 0) { 1154 set_bit(0, qp->buf_pool.buf_map); 1155 i++; 1156 if (i == 5) { 1157 ql_dbg(ql_dbg_io, vha, 0x00e3, 1158 "qp(%d) unable to get tag.\n", qp->id); 1159 return -EIO; 1160 } 1161 goto again; 1162 } 1163 1164 if (!qp->buf_pool.buf_array[tag]) { 1165 buf = dma_pool_zalloc(ha->fcp_cmnd_dma_pool, GFP_ATOMIC, &buf_dma); 1166 if (!buf) { 1167 ql_log(ql_log_fatal, vha, 0x13b1, 1168 "Failed to allocate buf.\n"); 1169 return -ENOMEM; 1170 } 1171 1172 dsc->buf = qp->buf_pool.buf_array[tag] = buf; 1173 dsc->buf_dma = qp->buf_pool.dma_array[tag] = buf_dma; 1174 qp->buf_pool.num_alloc++; 1175 } else { 1176 dsc->buf = qp->buf_pool.buf_array[tag]; 1177 dsc->buf_dma = qp->buf_pool.dma_array[tag]; 1178 memset(dsc->buf, 0, FCP_CMND_DMA_POOL_SIZE); 1179 } 1180 1181 qp->buf_pool.num_active++; 1182 if (qp->buf_pool.num_active > qp->buf_pool.max_used) 1183 qp->buf_pool.max_used = qp->buf_pool.num_active; 1184 1185 dsc->tag = tag; 1186 set_bit(tag, qp->buf_pool.buf_map); 1187 return 0; 1188 } 1189 1190 static void qla_trim_buf(struct qla_qpair *qp, u16 trim) 1191 { 1192 int i, j; 1193 struct qla_hw_data *ha = qp->vha->hw; 1194 1195 if (!trim) 1196 return; 1197 1198 for (i = 0; i < trim; i++) { 1199 j = qp->buf_pool.num_alloc - 1; 1200 if (test_bit(j, qp->buf_pool.buf_map)) { 1201 ql_dbg(ql_dbg_io + ql_dbg_verbose, qp->vha, 0x300b, 1202 "QP id(%d): trim active buf[%d]. Remain %d bufs\n", 1203 qp->id, j, qp->buf_pool.num_alloc); 1204 return; 1205 } 1206 1207 if (qp->buf_pool.buf_array[j]) { 1208 dma_pool_free(ha->fcp_cmnd_dma_pool, qp->buf_pool.buf_array[j], 1209 qp->buf_pool.dma_array[j]); 1210 qp->buf_pool.buf_array[j] = NULL; 1211 qp->buf_pool.dma_array[j] = 0; 1212 } 1213 qp->buf_pool.num_alloc--; 1214 if (!qp->buf_pool.num_alloc) 1215 break; 1216 } 1217 ql_dbg(ql_dbg_io + ql_dbg_verbose, qp->vha, 0x3010, 1218 "QP id(%d): trimmed %d bufs. Remain %d bufs\n", 1219 qp->id, trim, qp->buf_pool.num_alloc); 1220 } 1221 1222 static void __qla_adjust_buf(struct qla_qpair *qp) 1223 { 1224 u32 trim; 1225 1226 qp->buf_pool.take_snapshot = 0; 1227 qp->buf_pool.prev_max = qp->buf_pool.max_used; 1228 qp->buf_pool.max_used = qp->buf_pool.num_active; 1229 1230 if (qp->buf_pool.prev_max > qp->buf_pool.max_used && 1231 qp->buf_pool.num_alloc > qp->buf_pool.max_used) { 1232 /* down trend */ 1233 trim = qp->buf_pool.num_alloc - qp->buf_pool.max_used; 1234 trim = (trim * 10) / 100; 1235 trim = trim ? trim : 1; 1236 qla_trim_buf(qp, trim); 1237 } else if (!qp->buf_pool.prev_max && !qp->buf_pool.max_used) { 1238 /* 2 periods of no io */ 1239 qla_trim_buf(qp, qp->buf_pool.num_alloc); 1240 } 1241 } 1242 1243 /* it is assume qp->qp_lock is held at this point */ 1244 void qla_put_buf(struct qla_qpair *qp, struct qla_buf_dsc *dsc) 1245 { 1246 if (dsc->tag == TAG_FREED) 1247 return; 1248 lockdep_assert_held(qp->qp_lock_ptr); 1249 1250 clear_bit(dsc->tag, qp->buf_pool.buf_map); 1251 qp->buf_pool.num_active--; 1252 dsc->tag = TAG_FREED; 1253 1254 if (qp->buf_pool.take_snapshot) 1255 __qla_adjust_buf(qp); 1256 } 1257 1258 #define EXPIRE (60 * HZ) 1259 void qla_adjust_buf(struct scsi_qla_host *vha) 1260 { 1261 unsigned long flags; 1262 int i; 1263 struct qla_qpair *qp; 1264 1265 if (vha->vp_idx) 1266 return; 1267 1268 if (!vha->buf_expired) { 1269 vha->buf_expired = jiffies + EXPIRE; 1270 return; 1271 } 1272 if (time_before(jiffies, vha->buf_expired)) 1273 return; 1274 1275 vha->buf_expired = jiffies + EXPIRE; 1276 1277 for (i = 0; i < vha->hw->num_qpairs; i++) { 1278 qp = vha->hw->queue_pair_map[i]; 1279 if (!qp) 1280 continue; 1281 if (!qp->buf_pool.num_alloc) 1282 continue; 1283 1284 if (qp->buf_pool.take_snapshot) { 1285 /* no io has gone through in the last EXPIRE period */ 1286 spin_lock_irqsave(qp->qp_lock_ptr, flags); 1287 __qla_adjust_buf(qp); 1288 spin_unlock_irqrestore(qp->qp_lock_ptr, flags); 1289 } else { 1290 qp->buf_pool.take_snapshot = 1; 1291 } 1292 } 1293 } 1294