1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2004-2013 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 8 * * 9 * This program is free software; you can redistribute it and/or * 10 * modify it under the terms of version 2 of the GNU General * 11 * Public License as published by the Free Software Foundation. * 12 * This program is distributed in the hope that it will be useful. * 13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 17 * TO BE LEGALLY INVALID. See the GNU General Public License for * 18 * more details, a copy of which can be found in the file COPYING * 19 * included with this package. * 20 *******************************************************************/ 21 22 #include <linux/blkdev.h> 23 #include <linux/delay.h> 24 #include <linux/dma-mapping.h> 25 #include <linux/idr.h> 26 #include <linux/interrupt.h> 27 #include <linux/kthread.h> 28 #include <linux/pci.h> 29 #include <linux/slab.h> 30 #include <linux/spinlock.h> 31 32 #include <scsi/scsi.h> 33 #include <scsi/scsi_device.h> 34 #include <scsi/scsi_host.h> 35 #include <scsi/scsi_transport_fc.h> 36 #include "lpfc_hw4.h" 37 #include "lpfc_hw.h" 38 #include "lpfc_sli.h" 39 #include "lpfc_sli4.h" 40 #include "lpfc_nl.h" 41 #include "lpfc_disc.h" 42 #include "lpfc_scsi.h" 43 #include "lpfc.h" 44 #include "lpfc_logmsg.h" 45 #include "lpfc_crtn.h" 46 #include "lpfc_version.h" 47 #include "lpfc_vport.h" 48 49 inline void lpfc_vport_set_state(struct lpfc_vport *vport, 50 enum fc_vport_state new_state) 51 { 52 struct fc_vport *fc_vport = vport->fc_vport; 53 54 if (fc_vport) { 55 /* 56 * When the transport defines fc_vport_set state we will replace 57 * this code with the following line 58 */ 59 /* fc_vport_set_state(fc_vport, new_state); */ 60 if (new_state != FC_VPORT_INITIALIZING) 61 fc_vport->vport_last_state = fc_vport->vport_state; 62 fc_vport->vport_state = new_state; 63 } 64 65 /* for all the error states we will set the invternal state to FAILED */ 66 switch (new_state) { 67 case FC_VPORT_NO_FABRIC_SUPP: 68 case FC_VPORT_NO_FABRIC_RSCS: 69 case FC_VPORT_FABRIC_LOGOUT: 70 case FC_VPORT_FABRIC_REJ_WWN: 71 case FC_VPORT_FAILED: 72 vport->port_state = LPFC_VPORT_FAILED; 73 break; 74 case FC_VPORT_LINKDOWN: 75 vport->port_state = LPFC_VPORT_UNKNOWN; 76 break; 77 default: 78 /* do nothing */ 79 break; 80 } 81 } 82 83 int 84 lpfc_alloc_vpi(struct lpfc_hba *phba) 85 { 86 unsigned long vpi; 87 88 spin_lock_irq(&phba->hbalock); 89 /* Start at bit 1 because vpi zero is reserved for the physical port */ 90 vpi = find_next_zero_bit(phba->vpi_bmask, (phba->max_vpi + 1), 1); 91 if (vpi > phba->max_vpi) 92 vpi = 0; 93 else 94 set_bit(vpi, phba->vpi_bmask); 95 if (phba->sli_rev == LPFC_SLI_REV4) 96 phba->sli4_hba.max_cfg_param.vpi_used++; 97 spin_unlock_irq(&phba->hbalock); 98 return vpi; 99 } 100 101 static void 102 lpfc_free_vpi(struct lpfc_hba *phba, int vpi) 103 { 104 if (vpi == 0) 105 return; 106 spin_lock_irq(&phba->hbalock); 107 clear_bit(vpi, phba->vpi_bmask); 108 if (phba->sli_rev == LPFC_SLI_REV4) 109 phba->sli4_hba.max_cfg_param.vpi_used--; 110 spin_unlock_irq(&phba->hbalock); 111 } 112 113 static int 114 lpfc_vport_sparm(struct lpfc_hba *phba, struct lpfc_vport *vport) 115 { 116 LPFC_MBOXQ_t *pmb; 117 MAILBOX_t *mb; 118 struct lpfc_dmabuf *mp; 119 int rc; 120 121 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 122 if (!pmb) { 123 return -ENOMEM; 124 } 125 mb = &pmb->u.mb; 126 127 rc = lpfc_read_sparam(phba, pmb, vport->vpi); 128 if (rc) { 129 mempool_free(pmb, phba->mbox_mem_pool); 130 return -ENOMEM; 131 } 132 133 /* 134 * Grab buffer pointer and clear context1 so we can use 135 * lpfc_sli_issue_box_wait 136 */ 137 mp = (struct lpfc_dmabuf *) pmb->context1; 138 pmb->context1 = NULL; 139 140 pmb->vport = vport; 141 rc = lpfc_sli_issue_mbox_wait(phba, pmb, phba->fc_ratov * 2); 142 if (rc != MBX_SUCCESS) { 143 if (signal_pending(current)) { 144 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT | LOG_VPORT, 145 "1830 Signal aborted mbxCmd x%x\n", 146 mb->mbxCommand); 147 lpfc_mbuf_free(phba, mp->virt, mp->phys); 148 kfree(mp); 149 if (rc != MBX_TIMEOUT) 150 mempool_free(pmb, phba->mbox_mem_pool); 151 return -EINTR; 152 } else { 153 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT | LOG_VPORT, 154 "1818 VPort failed init, mbxCmd x%x " 155 "READ_SPARM mbxStatus x%x, rc = x%x\n", 156 mb->mbxCommand, mb->mbxStatus, rc); 157 lpfc_mbuf_free(phba, mp->virt, mp->phys); 158 kfree(mp); 159 if (rc != MBX_TIMEOUT) 160 mempool_free(pmb, phba->mbox_mem_pool); 161 return -EIO; 162 } 163 } 164 165 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm)); 166 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName, 167 sizeof (struct lpfc_name)); 168 memcpy(&vport->fc_portname, &vport->fc_sparam.portName, 169 sizeof (struct lpfc_name)); 170 171 lpfc_mbuf_free(phba, mp->virt, mp->phys); 172 kfree(mp); 173 mempool_free(pmb, phba->mbox_mem_pool); 174 175 return 0; 176 } 177 178 static int 179 lpfc_valid_wwn_format(struct lpfc_hba *phba, struct lpfc_name *wwn, 180 const char *name_type) 181 { 182 /* ensure that IEEE format 1 addresses 183 * contain zeros in bits 59-48 184 */ 185 if (!((wwn->u.wwn[0] >> 4) == 1 && 186 ((wwn->u.wwn[0] & 0xf) != 0 || (wwn->u.wwn[1] & 0xf) != 0))) 187 return 1; 188 189 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT, 190 "1822 Invalid %s: %02x:%02x:%02x:%02x:" 191 "%02x:%02x:%02x:%02x\n", 192 name_type, 193 wwn->u.wwn[0], wwn->u.wwn[1], 194 wwn->u.wwn[2], wwn->u.wwn[3], 195 wwn->u.wwn[4], wwn->u.wwn[5], 196 wwn->u.wwn[6], wwn->u.wwn[7]); 197 return 0; 198 } 199 200 static int 201 lpfc_unique_wwpn(struct lpfc_hba *phba, struct lpfc_vport *new_vport) 202 { 203 struct lpfc_vport *vport; 204 unsigned long flags; 205 206 spin_lock_irqsave(&phba->hbalock, flags); 207 list_for_each_entry(vport, &phba->port_list, listentry) { 208 if (vport == new_vport) 209 continue; 210 /* If they match, return not unique */ 211 if (memcmp(&vport->fc_sparam.portName, 212 &new_vport->fc_sparam.portName, 213 sizeof(struct lpfc_name)) == 0) { 214 spin_unlock_irqrestore(&phba->hbalock, flags); 215 return 0; 216 } 217 } 218 spin_unlock_irqrestore(&phba->hbalock, flags); 219 return 1; 220 } 221 222 /** 223 * lpfc_discovery_wait - Wait for driver discovery to quiesce 224 * @vport: The virtual port for which this call is being executed. 225 * 226 * This driver calls this routine specifically from lpfc_vport_delete 227 * to enforce a synchronous execution of vport 228 * delete relative to discovery activities. The 229 * lpfc_vport_delete routine should not return until it 230 * can reasonably guarantee that discovery has quiesced. 231 * Post FDISC LOGO, the driver must wait until its SAN teardown is 232 * complete and all resources recovered before allowing 233 * cleanup. 234 * 235 * This routine does not require any locks held. 236 **/ 237 static void lpfc_discovery_wait(struct lpfc_vport *vport) 238 { 239 struct lpfc_hba *phba = vport->phba; 240 uint32_t wait_flags = 0; 241 unsigned long wait_time_max; 242 unsigned long start_time; 243 244 wait_flags = FC_RSCN_MODE | FC_RSCN_DISCOVERY | FC_NLP_MORE | 245 FC_RSCN_DEFERRED | FC_NDISC_ACTIVE | FC_DISC_TMO; 246 247 /* 248 * The time constraint on this loop is a balance between the 249 * fabric RA_TOV value and dev_loss tmo. The driver's 250 * devloss_tmo is 10 giving this loop a 3x multiplier minimally. 251 */ 252 wait_time_max = msecs_to_jiffies(((phba->fc_ratov * 3) + 3) * 1000); 253 wait_time_max += jiffies; 254 start_time = jiffies; 255 while (time_before(jiffies, wait_time_max)) { 256 if ((vport->num_disc_nodes > 0) || 257 (vport->fc_flag & wait_flags) || 258 ((vport->port_state > LPFC_VPORT_FAILED) && 259 (vport->port_state < LPFC_VPORT_READY))) { 260 lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT, 261 "1833 Vport discovery quiesce Wait:" 262 " state x%x fc_flags x%x" 263 " num_nodes x%x, waiting 1000 msecs" 264 " total wait msecs x%x\n", 265 vport->port_state, vport->fc_flag, 266 vport->num_disc_nodes, 267 jiffies_to_msecs(jiffies - start_time)); 268 msleep(1000); 269 } else { 270 /* Base case. Wait variants satisfied. Break out */ 271 lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT, 272 "1834 Vport discovery quiesced:" 273 " state x%x fc_flags x%x" 274 " wait msecs x%x\n", 275 vport->port_state, vport->fc_flag, 276 jiffies_to_msecs(jiffies 277 - start_time)); 278 break; 279 } 280 } 281 282 if (time_after(jiffies, wait_time_max)) 283 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, 284 "1835 Vport discovery quiesce failed:" 285 " state x%x fc_flags x%x wait msecs x%x\n", 286 vport->port_state, vport->fc_flag, 287 jiffies_to_msecs(jiffies - start_time)); 288 } 289 290 int 291 lpfc_vport_create(struct fc_vport *fc_vport, bool disable) 292 { 293 struct lpfc_nodelist *ndlp; 294 struct Scsi_Host *shost = fc_vport->shost; 295 struct lpfc_vport *pport = (struct lpfc_vport *) shost->hostdata; 296 struct lpfc_hba *phba = pport->phba; 297 struct lpfc_vport *vport = NULL; 298 int instance; 299 int vpi; 300 int rc = VPORT_ERROR; 301 int status; 302 303 if ((phba->sli_rev < 3) || !(phba->cfg_enable_npiv)) { 304 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT, 305 "1808 Create VPORT failed: " 306 "NPIV is not enabled: SLImode:%d\n", 307 phba->sli_rev); 308 rc = VPORT_INVAL; 309 goto error_out; 310 } 311 312 vpi = lpfc_alloc_vpi(phba); 313 if (vpi == 0) { 314 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT, 315 "1809 Create VPORT failed: " 316 "Max VPORTs (%d) exceeded\n", 317 phba->max_vpi); 318 rc = VPORT_NORESOURCES; 319 goto error_out; 320 } 321 322 /* Assign an unused board number */ 323 if ((instance = lpfc_get_instance()) < 0) { 324 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT, 325 "1810 Create VPORT failed: Cannot get " 326 "instance number\n"); 327 lpfc_free_vpi(phba, vpi); 328 rc = VPORT_NORESOURCES; 329 goto error_out; 330 } 331 332 vport = lpfc_create_port(phba, instance, &fc_vport->dev); 333 if (!vport) { 334 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT, 335 "1811 Create VPORT failed: vpi x%x\n", vpi); 336 lpfc_free_vpi(phba, vpi); 337 rc = VPORT_NORESOURCES; 338 goto error_out; 339 } 340 341 vport->vpi = vpi; 342 lpfc_debugfs_initialize(vport); 343 344 if ((status = lpfc_vport_sparm(phba, vport))) { 345 if (status == -EINTR) { 346 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, 347 "1831 Create VPORT Interrupted.\n"); 348 rc = VPORT_ERROR; 349 } else { 350 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, 351 "1813 Create VPORT failed. " 352 "Cannot get sparam\n"); 353 rc = VPORT_NORESOURCES; 354 } 355 lpfc_free_vpi(phba, vpi); 356 destroy_port(vport); 357 goto error_out; 358 } 359 360 u64_to_wwn(fc_vport->node_name, vport->fc_nodename.u.wwn); 361 u64_to_wwn(fc_vport->port_name, vport->fc_portname.u.wwn); 362 363 memcpy(&vport->fc_sparam.portName, vport->fc_portname.u.wwn, 8); 364 memcpy(&vport->fc_sparam.nodeName, vport->fc_nodename.u.wwn, 8); 365 366 if (!lpfc_valid_wwn_format(phba, &vport->fc_sparam.nodeName, "WWNN") || 367 !lpfc_valid_wwn_format(phba, &vport->fc_sparam.portName, "WWPN")) { 368 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, 369 "1821 Create VPORT failed. " 370 "Invalid WWN format\n"); 371 lpfc_free_vpi(phba, vpi); 372 destroy_port(vport); 373 rc = VPORT_INVAL; 374 goto error_out; 375 } 376 377 if (!lpfc_unique_wwpn(phba, vport)) { 378 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, 379 "1823 Create VPORT failed. " 380 "Duplicate WWN on HBA\n"); 381 lpfc_free_vpi(phba, vpi); 382 destroy_port(vport); 383 rc = VPORT_INVAL; 384 goto error_out; 385 } 386 387 /* Create binary sysfs attribute for vport */ 388 lpfc_alloc_sysfs_attr(vport); 389 390 /* Set the DFT_LUN_Q_DEPTH accordingly */ 391 vport->cfg_lun_queue_depth = phba->pport->cfg_lun_queue_depth; 392 393 *(struct lpfc_vport **)fc_vport->dd_data = vport; 394 vport->fc_vport = fc_vport; 395 396 /* At this point we are fully registered with SCSI Layer. */ 397 vport->load_flag |= FC_ALLOW_FDMI; 398 if (phba->cfg_fdmi_on > LPFC_FDMI_NO_SUPPORT) { 399 /* Setup appropriate attribute masks */ 400 vport->fdmi_hba_mask = phba->pport->fdmi_hba_mask; 401 vport->fdmi_port_mask = phba->pport->fdmi_port_mask; 402 } 403 404 /* 405 * In SLI4, the vpi must be activated before it can be used 406 * by the port. 407 */ 408 if ((phba->sli_rev == LPFC_SLI_REV4) && 409 (pport->fc_flag & FC_VFI_REGISTERED)) { 410 rc = lpfc_sli4_init_vpi(vport); 411 if (rc) { 412 lpfc_printf_log(phba, KERN_ERR, LOG_VPORT, 413 "1838 Failed to INIT_VPI on vpi %d " 414 "status %d\n", vpi, rc); 415 rc = VPORT_NORESOURCES; 416 lpfc_free_vpi(phba, vpi); 417 goto error_out; 418 } 419 } else if (phba->sli_rev == LPFC_SLI_REV4) { 420 /* 421 * Driver cannot INIT_VPI now. Set the flags to 422 * init_vpi when reg_vfi complete. 423 */ 424 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 425 lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN); 426 rc = VPORT_OK; 427 goto out; 428 } 429 430 if ((phba->link_state < LPFC_LINK_UP) || 431 (pport->port_state < LPFC_FABRIC_CFG_LINK) || 432 (phba->fc_topology == LPFC_TOPOLOGY_LOOP)) { 433 lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN); 434 rc = VPORT_OK; 435 goto out; 436 } 437 438 if (disable) { 439 lpfc_vport_set_state(vport, FC_VPORT_DISABLED); 440 rc = VPORT_OK; 441 goto out; 442 } 443 444 /* Use the Physical nodes Fabric NDLP to determine if the link is 445 * up and ready to FDISC. 446 */ 447 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); 448 if (ndlp && NLP_CHK_NODE_ACT(ndlp) && 449 ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { 450 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) { 451 lpfc_set_disctmo(vport); 452 lpfc_initial_fdisc(vport); 453 } else { 454 lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP); 455 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 456 "0262 No NPIV Fabric support\n"); 457 } 458 } else { 459 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 460 } 461 rc = VPORT_OK; 462 463 out: 464 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, 465 "1825 Vport Created.\n"); 466 lpfc_host_attrib_init(lpfc_shost_from_vport(vport)); 467 error_out: 468 return rc; 469 } 470 471 static int 472 disable_vport(struct fc_vport *fc_vport) 473 { 474 struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data; 475 struct lpfc_hba *phba = vport->phba; 476 struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL; 477 long timeout; 478 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 479 480 ndlp = lpfc_findnode_did(vport, Fabric_DID); 481 if (ndlp && NLP_CHK_NODE_ACT(ndlp) 482 && phba->link_state >= LPFC_LINK_UP) { 483 vport->unreg_vpi_cmpl = VPORT_INVAL; 484 timeout = msecs_to_jiffies(phba->fc_ratov * 2000); 485 if (!lpfc_issue_els_npiv_logo(vport, ndlp)) 486 while (vport->unreg_vpi_cmpl == VPORT_INVAL && timeout) 487 timeout = schedule_timeout(timeout); 488 } 489 490 lpfc_sli_host_down(vport); 491 492 /* Mark all nodes for discovery so we can remove them by 493 * calling lpfc_cleanup_rpis(vport, 1) 494 */ 495 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 496 if (!NLP_CHK_NODE_ACT(ndlp)) 497 continue; 498 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) 499 continue; 500 lpfc_disc_state_machine(vport, ndlp, NULL, 501 NLP_EVT_DEVICE_RECOVERY); 502 } 503 lpfc_cleanup_rpis(vport, 1); 504 505 lpfc_stop_vport_timers(vport); 506 lpfc_unreg_all_rpis(vport); 507 lpfc_unreg_default_rpis(vport); 508 /* 509 * Completion of unreg_vpi (lpfc_mbx_cmpl_unreg_vpi) does the 510 * scsi_host_put() to release the vport. 511 */ 512 lpfc_mbx_unreg_vpi(vport); 513 spin_lock_irq(shost->host_lock); 514 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 515 spin_unlock_irq(shost->host_lock); 516 517 lpfc_vport_set_state(vport, FC_VPORT_DISABLED); 518 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, 519 "1826 Vport Disabled.\n"); 520 return VPORT_OK; 521 } 522 523 static int 524 enable_vport(struct fc_vport *fc_vport) 525 { 526 struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data; 527 struct lpfc_hba *phba = vport->phba; 528 struct lpfc_nodelist *ndlp = NULL; 529 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 530 531 if ((phba->link_state < LPFC_LINK_UP) || 532 (phba->fc_topology == LPFC_TOPOLOGY_LOOP)) { 533 lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN); 534 return VPORT_OK; 535 } 536 537 spin_lock_irq(shost->host_lock); 538 vport->load_flag |= FC_LOADING; 539 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 540 spin_unlock_irq(shost->host_lock); 541 542 /* Use the Physical nodes Fabric NDLP to determine if the link is 543 * up and ready to FDISC. 544 */ 545 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); 546 if (ndlp && NLP_CHK_NODE_ACT(ndlp) 547 && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { 548 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) { 549 lpfc_set_disctmo(vport); 550 lpfc_initial_fdisc(vport); 551 } else { 552 lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP); 553 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 554 "0264 No NPIV Fabric support\n"); 555 } 556 } else { 557 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 558 } 559 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, 560 "1827 Vport Enabled.\n"); 561 return VPORT_OK; 562 } 563 564 int 565 lpfc_vport_disable(struct fc_vport *fc_vport, bool disable) 566 { 567 if (disable) 568 return disable_vport(fc_vport); 569 else 570 return enable_vport(fc_vport); 571 } 572 573 574 int 575 lpfc_vport_delete(struct fc_vport *fc_vport) 576 { 577 struct lpfc_nodelist *ndlp = NULL; 578 struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data; 579 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 580 struct lpfc_hba *phba = vport->phba; 581 long timeout; 582 bool ns_ndlp_referenced = false; 583 584 if (vport->port_type == LPFC_PHYSICAL_PORT) { 585 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, 586 "1812 vport_delete failed: Cannot delete " 587 "physical host\n"); 588 return VPORT_ERROR; 589 } 590 591 /* If the vport is a static vport fail the deletion. */ 592 if ((vport->vport_flag & STATIC_VPORT) && 593 !(phba->pport->load_flag & FC_UNLOADING)) { 594 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, 595 "1837 vport_delete failed: Cannot delete " 596 "static vport.\n"); 597 return VPORT_ERROR; 598 } 599 spin_lock_irq(&phba->hbalock); 600 vport->load_flag |= FC_UNLOADING; 601 spin_unlock_irq(&phba->hbalock); 602 /* 603 * If we are not unloading the driver then prevent the vport_delete 604 * from happening until after this vport's discovery is finished. 605 */ 606 if (!(phba->pport->load_flag & FC_UNLOADING)) { 607 int check_count = 0; 608 while (check_count < ((phba->fc_ratov * 3) + 3) && 609 vport->port_state > LPFC_VPORT_FAILED && 610 vport->port_state < LPFC_VPORT_READY) { 611 check_count++; 612 msleep(1000); 613 } 614 if (vport->port_state > LPFC_VPORT_FAILED && 615 vport->port_state < LPFC_VPORT_READY) 616 return -EAGAIN; 617 } 618 /* 619 * This is a bit of a mess. We want to ensure the shost doesn't get 620 * torn down until we're done with the embedded lpfc_vport structure. 621 * 622 * Beyond holding a reference for this function, we also need a 623 * reference for outstanding I/O requests we schedule during delete 624 * processing. But once we scsi_remove_host() we can no longer obtain 625 * a reference through scsi_host_get(). 626 * 627 * So we take two references here. We release one reference at the 628 * bottom of the function -- after delinking the vport. And we 629 * release the other at the completion of the unreg_vpi that get's 630 * initiated after we've disposed of all other resources associated 631 * with the port. 632 */ 633 if (!scsi_host_get(shost)) 634 return VPORT_INVAL; 635 if (!scsi_host_get(shost)) { 636 scsi_host_put(shost); 637 return VPORT_INVAL; 638 } 639 lpfc_free_sysfs_attr(vport); 640 641 lpfc_debugfs_terminate(vport); 642 643 /* 644 * The call to fc_remove_host might release the NameServer ndlp. Since 645 * we might need to use the ndlp to send the DA_ID CT command, 646 * increment the reference for the NameServer ndlp to prevent it from 647 * being released. 648 */ 649 ndlp = lpfc_findnode_did(vport, NameServer_DID); 650 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) { 651 lpfc_nlp_get(ndlp); 652 ns_ndlp_referenced = true; 653 } 654 655 /* Remove FC host and then SCSI host with the vport */ 656 fc_remove_host(shost); 657 scsi_remove_host(shost); 658 659 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); 660 661 /* In case of driver unload, we shall not perform fabric logo as the 662 * worker thread already stopped at this stage and, in this case, we 663 * can safely skip the fabric logo. 664 */ 665 if (phba->pport->load_flag & FC_UNLOADING) { 666 if (ndlp && NLP_CHK_NODE_ACT(ndlp) && 667 ndlp->nlp_state == NLP_STE_UNMAPPED_NODE && 668 phba->link_state >= LPFC_LINK_UP) { 669 /* First look for the Fabric ndlp */ 670 ndlp = lpfc_findnode_did(vport, Fabric_DID); 671 if (!ndlp) 672 goto skip_logo; 673 else if (!NLP_CHK_NODE_ACT(ndlp)) { 674 ndlp = lpfc_enable_node(vport, ndlp, 675 NLP_STE_UNUSED_NODE); 676 if (!ndlp) 677 goto skip_logo; 678 } 679 /* Remove ndlp from vport npld list */ 680 lpfc_dequeue_node(vport, ndlp); 681 682 /* Indicate free memory when release */ 683 spin_lock_irq(&phba->ndlp_lock); 684 NLP_SET_FREE_REQ(ndlp); 685 spin_unlock_irq(&phba->ndlp_lock); 686 /* Kick off release ndlp when it can be safely done */ 687 lpfc_nlp_put(ndlp); 688 } 689 goto skip_logo; 690 } 691 692 /* Otherwise, we will perform fabric logo as needed */ 693 if (ndlp && NLP_CHK_NODE_ACT(ndlp) && 694 ndlp->nlp_state == NLP_STE_UNMAPPED_NODE && 695 phba->link_state >= LPFC_LINK_UP && 696 phba->fc_topology != LPFC_TOPOLOGY_LOOP) { 697 if (vport->cfg_enable_da_id) { 698 timeout = msecs_to_jiffies(phba->fc_ratov * 2000); 699 if (!lpfc_ns_cmd(vport, SLI_CTNS_DA_ID, 0, 0)) 700 while (vport->ct_flags && timeout) 701 timeout = schedule_timeout(timeout); 702 else 703 lpfc_printf_log(vport->phba, KERN_WARNING, 704 LOG_VPORT, 705 "1829 CT command failed to " 706 "delete objects on fabric\n"); 707 } 708 /* First look for the Fabric ndlp */ 709 ndlp = lpfc_findnode_did(vport, Fabric_DID); 710 if (!ndlp) { 711 /* Cannot find existing Fabric ndlp, allocate one */ 712 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 713 if (!ndlp) 714 goto skip_logo; 715 lpfc_nlp_init(vport, ndlp, Fabric_DID); 716 /* Indicate free memory when release */ 717 NLP_SET_FREE_REQ(ndlp); 718 } else { 719 if (!NLP_CHK_NODE_ACT(ndlp)) { 720 ndlp = lpfc_enable_node(vport, ndlp, 721 NLP_STE_UNUSED_NODE); 722 if (!ndlp) 723 goto skip_logo; 724 } 725 726 /* Remove ndlp from vport list */ 727 lpfc_dequeue_node(vport, ndlp); 728 spin_lock_irq(&phba->ndlp_lock); 729 if (!NLP_CHK_FREE_REQ(ndlp)) 730 /* Indicate free memory when release */ 731 NLP_SET_FREE_REQ(ndlp); 732 else { 733 /* Skip this if ndlp is already in free mode */ 734 spin_unlock_irq(&phba->ndlp_lock); 735 goto skip_logo; 736 } 737 spin_unlock_irq(&phba->ndlp_lock); 738 } 739 740 /* 741 * If the vpi is not registered, then a valid FDISC doesn't 742 * exist and there is no need for a ELS LOGO. Just cleanup 743 * the ndlp. 744 */ 745 if (!(vport->vpi_state & LPFC_VPI_REGISTERED)) { 746 lpfc_nlp_put(ndlp); 747 goto skip_logo; 748 } 749 750 vport->unreg_vpi_cmpl = VPORT_INVAL; 751 timeout = msecs_to_jiffies(phba->fc_ratov * 2000); 752 if (!lpfc_issue_els_npiv_logo(vport, ndlp)) 753 while (vport->unreg_vpi_cmpl == VPORT_INVAL && timeout) 754 timeout = schedule_timeout(timeout); 755 } 756 757 if (!(phba->pport->load_flag & FC_UNLOADING)) 758 lpfc_discovery_wait(vport); 759 760 skip_logo: 761 762 /* 763 * If the NameServer ndlp has been incremented to allow the DA_ID CT 764 * command to be sent, decrement the ndlp now. 765 */ 766 if (ns_ndlp_referenced) { 767 ndlp = lpfc_findnode_did(vport, NameServer_DID); 768 lpfc_nlp_put(ndlp); 769 } 770 771 lpfc_cleanup(vport); 772 lpfc_sli_host_down(vport); 773 774 lpfc_stop_vport_timers(vport); 775 776 if (!(phba->pport->load_flag & FC_UNLOADING)) { 777 lpfc_unreg_all_rpis(vport); 778 lpfc_unreg_default_rpis(vport); 779 /* 780 * Completion of unreg_vpi (lpfc_mbx_cmpl_unreg_vpi) 781 * does the scsi_host_put() to release the vport. 782 */ 783 if (!(vport->vpi_state & LPFC_VPI_REGISTERED) || 784 lpfc_mbx_unreg_vpi(vport)) 785 scsi_host_put(shost); 786 } else 787 scsi_host_put(shost); 788 789 lpfc_free_vpi(phba, vport->vpi); 790 vport->work_port_events = 0; 791 spin_lock_irq(&phba->hbalock); 792 list_del_init(&vport->listentry); 793 spin_unlock_irq(&phba->hbalock); 794 lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, 795 "1828 Vport Deleted.\n"); 796 scsi_host_put(shost); 797 return VPORT_OK; 798 } 799 800 struct lpfc_vport ** 801 lpfc_create_vport_work_array(struct lpfc_hba *phba) 802 { 803 struct lpfc_vport *port_iterator; 804 struct lpfc_vport **vports; 805 int index = 0; 806 vports = kzalloc((phba->max_vports + 1) * sizeof(struct lpfc_vport *), 807 GFP_KERNEL); 808 if (vports == NULL) 809 return NULL; 810 spin_lock_irq(&phba->hbalock); 811 list_for_each_entry(port_iterator, &phba->port_list, listentry) { 812 if (port_iterator->load_flag & FC_UNLOADING) 813 continue; 814 if (!scsi_host_get(lpfc_shost_from_vport(port_iterator))) { 815 lpfc_printf_vlog(port_iterator, KERN_ERR, LOG_VPORT, 816 "1801 Create vport work array FAILED: " 817 "cannot do scsi_host_get\n"); 818 continue; 819 } 820 vports[index++] = port_iterator; 821 } 822 spin_unlock_irq(&phba->hbalock); 823 return vports; 824 } 825 826 void 827 lpfc_destroy_vport_work_array(struct lpfc_hba *phba, struct lpfc_vport **vports) 828 { 829 int i; 830 if (vports == NULL) 831 return; 832 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 833 scsi_host_put(lpfc_shost_from_vport(vports[i])); 834 kfree(vports); 835 } 836 837 838 /** 839 * lpfc_vport_reset_stat_data - Reset the statistical data for the vport 840 * @vport: Pointer to vport object. 841 * 842 * This function resets the statistical data for the vport. This function 843 * is called with the host_lock held 844 **/ 845 void 846 lpfc_vport_reset_stat_data(struct lpfc_vport *vport) 847 { 848 struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL; 849 850 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 851 if (!NLP_CHK_NODE_ACT(ndlp)) 852 continue; 853 if (ndlp->lat_data) 854 memset(ndlp->lat_data, 0, LPFC_MAX_BUCKET_COUNT * 855 sizeof(struct lpfc_scsicmd_bkt)); 856 } 857 } 858 859 860 /** 861 * lpfc_alloc_bucket - Allocate data buffer required for statistical data 862 * @vport: Pointer to vport object. 863 * 864 * This function allocates data buffer required for all the FC 865 * nodes of the vport to collect statistical data. 866 **/ 867 void 868 lpfc_alloc_bucket(struct lpfc_vport *vport) 869 { 870 struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL; 871 872 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 873 if (!NLP_CHK_NODE_ACT(ndlp)) 874 continue; 875 876 kfree(ndlp->lat_data); 877 ndlp->lat_data = NULL; 878 879 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) { 880 ndlp->lat_data = kcalloc(LPFC_MAX_BUCKET_COUNT, 881 sizeof(struct lpfc_scsicmd_bkt), 882 GFP_ATOMIC); 883 884 if (!ndlp->lat_data) 885 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, 886 "0287 lpfc_alloc_bucket failed to " 887 "allocate statistical data buffer DID " 888 "0x%x\n", ndlp->nlp_DID); 889 } 890 } 891 } 892 893 /** 894 * lpfc_free_bucket - Free data buffer required for statistical data 895 * @vport: Pointer to vport object. 896 * 897 * Th function frees statistical data buffer of all the FC 898 * nodes of the vport. 899 **/ 900 void 901 lpfc_free_bucket(struct lpfc_vport *vport) 902 { 903 struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL; 904 905 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 906 if (!NLP_CHK_NODE_ACT(ndlp)) 907 continue; 908 909 kfree(ndlp->lat_data); 910 ndlp->lat_data = NULL; 911 } 912 } 913