1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term * 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 * EMULEX and SLI are trademarks of Emulex. * 8 * www.broadcom.com * 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 10 * * 11 * This program is free software; you can redistribute it and/or * 12 * modify it under the terms of version 2 of the GNU General * 13 * Public License as published by the Free Software Foundation. * 14 * This program is distributed in the hope that it will be useful. * 15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 19 * TO BE LEGALLY INVALID. See the GNU General Public License for * 20 * more details, a copy of which can be found in the file COPYING * 21 * included with this package. * 22 *******************************************************************/ 23 /* See Fibre Channel protocol T11 FC-LS for details */ 24 #include <linux/blkdev.h> 25 #include <linux/pci.h> 26 #include <linux/slab.h> 27 #include <linux/interrupt.h> 28 #include <linux/delay.h> 29 30 #include <scsi/scsi.h> 31 #include <scsi/scsi_device.h> 32 #include <scsi/scsi_host.h> 33 #include <scsi/scsi_transport_fc.h> 34 #include <uapi/scsi/fc/fc_fs.h> 35 #include <uapi/scsi/fc/fc_els.h> 36 37 #include "lpfc_hw4.h" 38 #include "lpfc_hw.h" 39 #include "lpfc_sli.h" 40 #include "lpfc_sli4.h" 41 #include "lpfc_nl.h" 42 #include "lpfc_disc.h" 43 #include "lpfc_scsi.h" 44 #include "lpfc.h" 45 #include "lpfc_logmsg.h" 46 #include "lpfc_crtn.h" 47 #include "lpfc_vport.h" 48 #include "lpfc_debugfs.h" 49 50 static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *, 51 struct lpfc_iocbq *); 52 static void lpfc_cmpl_fabric_iocb(struct lpfc_hba *, struct lpfc_iocbq *, 53 struct lpfc_iocbq *); 54 static void lpfc_fabric_abort_vport(struct lpfc_vport *vport); 55 static int lpfc_issue_els_fdisc(struct lpfc_vport *vport, 56 struct lpfc_nodelist *ndlp, uint8_t retry); 57 static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba, 58 struct lpfc_iocbq *iocb); 59 static void lpfc_cmpl_els_edc(struct lpfc_hba *phba, 60 struct lpfc_iocbq *cmdiocb, 61 struct lpfc_iocbq *rspiocb); 62 static void lpfc_cmpl_els_uvem(struct lpfc_hba *, struct lpfc_iocbq *, 63 struct lpfc_iocbq *); 64 65 static int lpfc_max_els_tries = 3; 66 67 static void lpfc_init_cs_ctl_bitmap(struct lpfc_vport *vport); 68 static void lpfc_vmid_set_cs_ctl_range(struct lpfc_vport *vport, u32 min, u32 max); 69 static void lpfc_vmid_put_cs_ctl(struct lpfc_vport *vport, u32 ctcl_vmid); 70 71 /** 72 * lpfc_els_chk_latt - Check host link attention event for a vport 73 * @vport: pointer to a host virtual N_Port data structure. 74 * 75 * This routine checks whether there is an outstanding host link 76 * attention event during the discovery process with the @vport. It is done 77 * by reading the HBA's Host Attention (HA) register. If there is any host 78 * link attention events during this @vport's discovery process, the @vport 79 * shall be marked as FC_ABORT_DISCOVERY, a host link attention clear shall 80 * be issued if the link state is not already in host link cleared state, 81 * and a return code shall indicate whether the host link attention event 82 * had happened. 83 * 84 * Note that, if either the host link is in state LPFC_LINK_DOWN or @vport 85 * state in LPFC_VPORT_READY, the request for checking host link attention 86 * event will be ignored and a return code shall indicate no host link 87 * attention event had happened. 88 * 89 * Return codes 90 * 0 - no host link attention event happened 91 * 1 - host link attention event happened 92 **/ 93 int 94 lpfc_els_chk_latt(struct lpfc_vport *vport) 95 { 96 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 97 struct lpfc_hba *phba = vport->phba; 98 uint32_t ha_copy; 99 100 if (vport->port_state >= LPFC_VPORT_READY || 101 phba->link_state == LPFC_LINK_DOWN || 102 phba->sli_rev > LPFC_SLI_REV3) 103 return 0; 104 105 /* Read the HBA Host Attention Register */ 106 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 107 return 1; 108 109 if (!(ha_copy & HA_LATT)) 110 return 0; 111 112 /* Pending Link Event during Discovery */ 113 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 114 "0237 Pending Link Event during " 115 "Discovery: State x%x\n", 116 phba->pport->port_state); 117 118 /* CLEAR_LA should re-enable link attention events and 119 * we should then immediately take a LATT event. The 120 * LATT processing should call lpfc_linkdown() which 121 * will cleanup any left over in-progress discovery 122 * events. 123 */ 124 spin_lock_irq(shost->host_lock); 125 vport->fc_flag |= FC_ABORT_DISCOVERY; 126 spin_unlock_irq(shost->host_lock); 127 128 if (phba->link_state != LPFC_CLEAR_LA) 129 lpfc_issue_clear_la(phba, vport); 130 131 return 1; 132 } 133 134 /** 135 * lpfc_prep_els_iocb - Allocate and prepare a lpfc iocb data structure 136 * @vport: pointer to a host virtual N_Port data structure. 137 * @expectRsp: flag indicating whether response is expected. 138 * @cmdSize: size of the ELS command. 139 * @retry: number of retries to the command IOCB when it fails. 140 * @ndlp: pointer to a node-list data structure. 141 * @did: destination identifier. 142 * @elscmd: the ELS command code. 143 * 144 * This routine is used for allocating a lpfc-IOCB data structure from 145 * the driver lpfc-IOCB free-list and prepare the IOCB with the parameters 146 * passed into the routine for discovery state machine to issue an Extended 147 * Link Service (ELS) commands. It is a generic lpfc-IOCB allocation 148 * and preparation routine that is used by all the discovery state machine 149 * routines and the ELS command-specific fields will be later set up by 150 * the individual discovery machine routines after calling this routine 151 * allocating and preparing a generic IOCB data structure. It fills in the 152 * Buffer Descriptor Entries (BDEs), allocates buffers for both command 153 * payload and response payload (if expected). The reference count on the 154 * ndlp is incremented by 1 and the reference to the ndlp is put into 155 * context1 of the IOCB data structure for this IOCB to hold the ndlp 156 * reference for the command's callback function to access later. 157 * 158 * Return code 159 * Pointer to the newly allocated/prepared els iocb data structure 160 * NULL - when els iocb data structure allocation/preparation failed 161 **/ 162 struct lpfc_iocbq * 163 lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp, 164 uint16_t cmdSize, uint8_t retry, 165 struct lpfc_nodelist *ndlp, uint32_t did, 166 uint32_t elscmd) 167 { 168 struct lpfc_hba *phba = vport->phba; 169 struct lpfc_iocbq *elsiocb; 170 struct lpfc_dmabuf *pcmd, *prsp, *pbuflist; 171 struct ulp_bde64 *bpl; 172 IOCB_t *icmd; 173 174 175 if (!lpfc_is_link_up(phba)) 176 return NULL; 177 178 /* Allocate buffer for command iocb */ 179 elsiocb = lpfc_sli_get_iocbq(phba); 180 181 if (elsiocb == NULL) 182 return NULL; 183 184 /* 185 * If this command is for fabric controller and HBA running 186 * in FIP mode send FLOGI, FDISC and LOGO as FIP frames. 187 */ 188 if ((did == Fabric_DID) && 189 (phba->hba_flag & HBA_FIP_SUPPORT) && 190 ((elscmd == ELS_CMD_FLOGI) || 191 (elscmd == ELS_CMD_FDISC) || 192 (elscmd == ELS_CMD_LOGO))) 193 switch (elscmd) { 194 case ELS_CMD_FLOGI: 195 elsiocb->iocb_flag |= 196 ((LPFC_ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT) 197 & LPFC_FIP_ELS_ID_MASK); 198 break; 199 case ELS_CMD_FDISC: 200 elsiocb->iocb_flag |= 201 ((LPFC_ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT) 202 & LPFC_FIP_ELS_ID_MASK); 203 break; 204 case ELS_CMD_LOGO: 205 elsiocb->iocb_flag |= 206 ((LPFC_ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT) 207 & LPFC_FIP_ELS_ID_MASK); 208 break; 209 } 210 else 211 elsiocb->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK; 212 213 icmd = &elsiocb->iocb; 214 215 /* fill in BDEs for command */ 216 /* Allocate buffer for command payload */ 217 pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 218 if (pcmd) 219 pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys); 220 if (!pcmd || !pcmd->virt) 221 goto els_iocb_free_pcmb_exit; 222 223 INIT_LIST_HEAD(&pcmd->list); 224 225 /* Allocate buffer for response payload */ 226 if (expectRsp) { 227 prsp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 228 if (prsp) 229 prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 230 &prsp->phys); 231 if (!prsp || !prsp->virt) 232 goto els_iocb_free_prsp_exit; 233 INIT_LIST_HEAD(&prsp->list); 234 } else 235 prsp = NULL; 236 237 /* Allocate buffer for Buffer ptr list */ 238 pbuflist = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 239 if (pbuflist) 240 pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 241 &pbuflist->phys); 242 if (!pbuflist || !pbuflist->virt) 243 goto els_iocb_free_pbuf_exit; 244 245 INIT_LIST_HEAD(&pbuflist->list); 246 247 if (expectRsp) { 248 icmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys); 249 icmd->un.elsreq64.bdl.addrLow = putPaddrLow(pbuflist->phys); 250 icmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; 251 icmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64)); 252 253 icmd->un.elsreq64.remoteID = did; /* DID */ 254 icmd->ulpCommand = CMD_ELS_REQUEST64_CR; 255 if (elscmd == ELS_CMD_FLOGI) 256 icmd->ulpTimeout = FF_DEF_RATOV * 2; 257 else if (elscmd == ELS_CMD_LOGO) 258 icmd->ulpTimeout = phba->fc_ratov; 259 else 260 icmd->ulpTimeout = phba->fc_ratov * 2; 261 } else { 262 icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys); 263 icmd->un.xseq64.bdl.addrLow = putPaddrLow(pbuflist->phys); 264 icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; 265 icmd->un.xseq64.bdl.bdeSize = sizeof(struct ulp_bde64); 266 icmd->un.xseq64.xmit_els_remoteID = did; /* DID */ 267 icmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX; 268 } 269 icmd->ulpBdeCount = 1; 270 icmd->ulpLe = 1; 271 icmd->ulpClass = CLASS3; 272 273 /* 274 * If we have NPIV enabled, we want to send ELS traffic by VPI. 275 * For SLI4, since the driver controls VPIs we also want to include 276 * all ELS pt2pt protocol traffic as well. 277 */ 278 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) || 279 ((phba->sli_rev == LPFC_SLI_REV4) && 280 (vport->fc_flag & FC_PT2PT))) { 281 282 if (expectRsp) { 283 icmd->un.elsreq64.myID = vport->fc_myDID; 284 285 /* For ELS_REQUEST64_CR, use the VPI by default */ 286 icmd->ulpContext = phba->vpi_ids[vport->vpi]; 287 } 288 289 icmd->ulpCt_h = 0; 290 /* The CT field must be 0=INVALID_RPI for the ECHO cmd */ 291 if (elscmd == ELS_CMD_ECHO) 292 icmd->ulpCt_l = 0; /* context = invalid RPI */ 293 else 294 icmd->ulpCt_l = 1; /* context = VPI */ 295 } 296 297 bpl = (struct ulp_bde64 *) pbuflist->virt; 298 bpl->addrLow = le32_to_cpu(putPaddrLow(pcmd->phys)); 299 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pcmd->phys)); 300 bpl->tus.f.bdeSize = cmdSize; 301 bpl->tus.f.bdeFlags = 0; 302 bpl->tus.w = le32_to_cpu(bpl->tus.w); 303 304 if (expectRsp) { 305 bpl++; 306 bpl->addrLow = le32_to_cpu(putPaddrLow(prsp->phys)); 307 bpl->addrHigh = le32_to_cpu(putPaddrHigh(prsp->phys)); 308 bpl->tus.f.bdeSize = FCELSSIZE; 309 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 310 bpl->tus.w = le32_to_cpu(bpl->tus.w); 311 } 312 313 elsiocb->context2 = pcmd; 314 elsiocb->context3 = pbuflist; 315 elsiocb->retry = retry; 316 elsiocb->vport = vport; 317 elsiocb->drvrTimeout = (phba->fc_ratov << 1) + LPFC_DRVR_TIMEOUT; 318 319 if (prsp) { 320 list_add(&prsp->list, &pcmd->list); 321 } 322 if (expectRsp) { 323 /* Xmit ELS command <elsCmd> to remote NPORT <did> */ 324 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 325 "0116 Xmit ELS command x%x to remote " 326 "NPORT x%x I/O tag: x%x, port state:x%x " 327 "rpi x%x fc_flag:x%x nlp_flag:x%x vport:x%p\n", 328 elscmd, did, elsiocb->iotag, 329 vport->port_state, ndlp->nlp_rpi, 330 vport->fc_flag, ndlp->nlp_flag, vport); 331 } else { 332 /* Xmit ELS response <elsCmd> to remote NPORT <did> */ 333 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 334 "0117 Xmit ELS response x%x to remote " 335 "NPORT x%x I/O tag: x%x, size: x%x " 336 "port_state x%x rpi x%x fc_flag x%x\n", 337 elscmd, ndlp->nlp_DID, elsiocb->iotag, 338 cmdSize, vport->port_state, 339 ndlp->nlp_rpi, vport->fc_flag); 340 } 341 return elsiocb; 342 343 els_iocb_free_pbuf_exit: 344 if (expectRsp) 345 lpfc_mbuf_free(phba, prsp->virt, prsp->phys); 346 kfree(pbuflist); 347 348 els_iocb_free_prsp_exit: 349 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys); 350 kfree(prsp); 351 352 els_iocb_free_pcmb_exit: 353 kfree(pcmd); 354 lpfc_sli_release_iocbq(phba, elsiocb); 355 return NULL; 356 } 357 358 /** 359 * lpfc_issue_fabric_reglogin - Issue fabric registration login for a vport 360 * @vport: pointer to a host virtual N_Port data structure. 361 * 362 * This routine issues a fabric registration login for a @vport. An 363 * active ndlp node with Fabric_DID must already exist for this @vport. 364 * The routine invokes two mailbox commands to carry out fabric registration 365 * login through the HBA firmware: the first mailbox command requests the 366 * HBA to perform link configuration for the @vport; and the second mailbox 367 * command requests the HBA to perform the actual fabric registration login 368 * with the @vport. 369 * 370 * Return code 371 * 0 - successfully issued fabric registration login for @vport 372 * -ENXIO -- failed to issue fabric registration login for @vport 373 **/ 374 int 375 lpfc_issue_fabric_reglogin(struct lpfc_vport *vport) 376 { 377 struct lpfc_hba *phba = vport->phba; 378 LPFC_MBOXQ_t *mbox; 379 struct lpfc_dmabuf *mp; 380 struct lpfc_nodelist *ndlp; 381 struct serv_parm *sp; 382 int rc; 383 int err = 0; 384 385 sp = &phba->fc_fabparam; 386 ndlp = lpfc_findnode_did(vport, Fabric_DID); 387 if (!ndlp) { 388 err = 1; 389 goto fail; 390 } 391 392 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 393 if (!mbox) { 394 err = 2; 395 goto fail; 396 } 397 398 vport->port_state = LPFC_FABRIC_CFG_LINK; 399 lpfc_config_link(phba, mbox); 400 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 401 mbox->vport = vport; 402 403 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 404 if (rc == MBX_NOT_FINISHED) { 405 err = 3; 406 goto fail_free_mbox; 407 } 408 409 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 410 if (!mbox) { 411 err = 4; 412 goto fail; 413 } 414 rc = lpfc_reg_rpi(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox, 415 ndlp->nlp_rpi); 416 if (rc) { 417 err = 5; 418 goto fail_free_mbox; 419 } 420 421 mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login; 422 mbox->vport = vport; 423 /* increment the reference count on ndlp to hold reference 424 * for the callback routine. 425 */ 426 mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 427 if (!mbox->ctx_ndlp) { 428 err = 6; 429 goto fail_no_ndlp; 430 } 431 432 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 433 if (rc == MBX_NOT_FINISHED) { 434 err = 7; 435 goto fail_issue_reg_login; 436 } 437 438 return 0; 439 440 fail_issue_reg_login: 441 /* decrement the reference count on ndlp just incremented 442 * for the failed mbox command. 443 */ 444 lpfc_nlp_put(ndlp); 445 fail_no_ndlp: 446 mp = (struct lpfc_dmabuf *)mbox->ctx_buf; 447 lpfc_mbuf_free(phba, mp->virt, mp->phys); 448 kfree(mp); 449 fail_free_mbox: 450 mempool_free(mbox, phba->mbox_mem_pool); 451 452 fail: 453 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 454 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 455 "0249 Cannot issue Register Fabric login: Err %d\n", 456 err); 457 return -ENXIO; 458 } 459 460 /** 461 * lpfc_issue_reg_vfi - Register VFI for this vport's fabric login 462 * @vport: pointer to a host virtual N_Port data structure. 463 * 464 * This routine issues a REG_VFI mailbox for the vfi, vpi, fcfi triplet for 465 * the @vport. This mailbox command is necessary for SLI4 port only. 466 * 467 * Return code 468 * 0 - successfully issued REG_VFI for @vport 469 * A failure code otherwise. 470 **/ 471 int 472 lpfc_issue_reg_vfi(struct lpfc_vport *vport) 473 { 474 struct lpfc_hba *phba = vport->phba; 475 LPFC_MBOXQ_t *mboxq = NULL; 476 struct lpfc_nodelist *ndlp; 477 struct lpfc_dmabuf *dmabuf = NULL; 478 int rc = 0; 479 480 /* move forward in case of SLI4 FC port loopback test and pt2pt mode */ 481 if ((phba->sli_rev == LPFC_SLI_REV4) && 482 !(phba->link_flag & LS_LOOPBACK_MODE) && 483 !(vport->fc_flag & FC_PT2PT)) { 484 ndlp = lpfc_findnode_did(vport, Fabric_DID); 485 if (!ndlp) { 486 rc = -ENODEV; 487 goto fail; 488 } 489 } 490 491 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 492 if (!mboxq) { 493 rc = -ENOMEM; 494 goto fail; 495 } 496 497 /* Supply CSP's only if we are fabric connect or pt-to-pt connect */ 498 if ((vport->fc_flag & FC_FABRIC) || (vport->fc_flag & FC_PT2PT)) { 499 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 500 if (!dmabuf) { 501 rc = -ENOMEM; 502 goto fail; 503 } 504 dmabuf->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &dmabuf->phys); 505 if (!dmabuf->virt) { 506 rc = -ENOMEM; 507 goto fail; 508 } 509 memcpy(dmabuf->virt, &phba->fc_fabparam, 510 sizeof(struct serv_parm)); 511 } 512 513 vport->port_state = LPFC_FABRIC_CFG_LINK; 514 if (dmabuf) 515 lpfc_reg_vfi(mboxq, vport, dmabuf->phys); 516 else 517 lpfc_reg_vfi(mboxq, vport, 0); 518 519 mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi; 520 mboxq->vport = vport; 521 mboxq->ctx_buf = dmabuf; 522 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 523 if (rc == MBX_NOT_FINISHED) { 524 rc = -ENXIO; 525 goto fail; 526 } 527 return 0; 528 529 fail: 530 if (mboxq) 531 mempool_free(mboxq, phba->mbox_mem_pool); 532 if (dmabuf) { 533 if (dmabuf->virt) 534 lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys); 535 kfree(dmabuf); 536 } 537 538 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 539 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 540 "0289 Issue Register VFI failed: Err %d\n", rc); 541 return rc; 542 } 543 544 /** 545 * lpfc_issue_unreg_vfi - Unregister VFI for this vport's fabric login 546 * @vport: pointer to a host virtual N_Port data structure. 547 * 548 * This routine issues a UNREG_VFI mailbox with the vfi, vpi, fcfi triplet for 549 * the @vport. This mailbox command is necessary for SLI4 port only. 550 * 551 * Return code 552 * 0 - successfully issued REG_VFI for @vport 553 * A failure code otherwise. 554 **/ 555 int 556 lpfc_issue_unreg_vfi(struct lpfc_vport *vport) 557 { 558 struct lpfc_hba *phba = vport->phba; 559 struct Scsi_Host *shost; 560 LPFC_MBOXQ_t *mboxq; 561 int rc; 562 563 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 564 if (!mboxq) { 565 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 566 "2556 UNREG_VFI mbox allocation failed" 567 "HBA state x%x\n", phba->pport->port_state); 568 return -ENOMEM; 569 } 570 571 lpfc_unreg_vfi(mboxq, vport); 572 mboxq->vport = vport; 573 mboxq->mbox_cmpl = lpfc_unregister_vfi_cmpl; 574 575 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 576 if (rc == MBX_NOT_FINISHED) { 577 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 578 "2557 UNREG_VFI issue mbox failed rc x%x " 579 "HBA state x%x\n", 580 rc, phba->pport->port_state); 581 mempool_free(mboxq, phba->mbox_mem_pool); 582 return -EIO; 583 } 584 585 shost = lpfc_shost_from_vport(vport); 586 spin_lock_irq(shost->host_lock); 587 vport->fc_flag &= ~FC_VFI_REGISTERED; 588 spin_unlock_irq(shost->host_lock); 589 return 0; 590 } 591 592 /** 593 * lpfc_check_clean_addr_bit - Check whether assigned FCID is clean. 594 * @vport: pointer to a host virtual N_Port data structure. 595 * @sp: pointer to service parameter data structure. 596 * 597 * This routine is called from FLOGI/FDISC completion handler functions. 598 * lpfc_check_clean_addr_bit return 1 when FCID/Fabric portname/ Fabric 599 * node nodename is changed in the completion service parameter else return 600 * 0. This function also set flag in the vport data structure to delay 601 * NP_Port discovery after the FLOGI/FDISC completion if Clean address bit 602 * in FLOGI/FDISC response is cleared and FCID/Fabric portname/ Fabric 603 * node nodename is changed in the completion service parameter. 604 * 605 * Return code 606 * 0 - FCID and Fabric Nodename and Fabric portname is not changed. 607 * 1 - FCID or Fabric Nodename or Fabric portname is changed. 608 * 609 **/ 610 static uint8_t 611 lpfc_check_clean_addr_bit(struct lpfc_vport *vport, 612 struct serv_parm *sp) 613 { 614 struct lpfc_hba *phba = vport->phba; 615 uint8_t fabric_param_changed = 0; 616 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 617 618 if ((vport->fc_prevDID != vport->fc_myDID) || 619 memcmp(&vport->fabric_portname, &sp->portName, 620 sizeof(struct lpfc_name)) || 621 memcmp(&vport->fabric_nodename, &sp->nodeName, 622 sizeof(struct lpfc_name)) || 623 (vport->vport_flag & FAWWPN_PARAM_CHG)) { 624 fabric_param_changed = 1; 625 vport->vport_flag &= ~FAWWPN_PARAM_CHG; 626 } 627 /* 628 * Word 1 Bit 31 in common service parameter is overloaded. 629 * Word 1 Bit 31 in FLOGI request is multiple NPort request 630 * Word 1 Bit 31 in FLOGI response is clean address bit 631 * 632 * If fabric parameter is changed and clean address bit is 633 * cleared delay nport discovery if 634 * - vport->fc_prevDID != 0 (not initial discovery) OR 635 * - lpfc_delay_discovery module parameter is set. 636 */ 637 if (fabric_param_changed && !sp->cmn.clean_address_bit && 638 (vport->fc_prevDID || phba->cfg_delay_discovery)) { 639 spin_lock_irq(shost->host_lock); 640 vport->fc_flag |= FC_DISC_DELAYED; 641 spin_unlock_irq(shost->host_lock); 642 } 643 644 return fabric_param_changed; 645 } 646 647 648 /** 649 * lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port 650 * @vport: pointer to a host virtual N_Port data structure. 651 * @ndlp: pointer to a node-list data structure. 652 * @sp: pointer to service parameter data structure. 653 * @irsp: pointer to the IOCB within the lpfc response IOCB. 654 * 655 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback 656 * function to handle the completion of a Fabric Login (FLOGI) into a fabric 657 * port in a fabric topology. It properly sets up the parameters to the @ndlp 658 * from the IOCB response. It also check the newly assigned N_Port ID to the 659 * @vport against the previously assigned N_Port ID. If it is different from 660 * the previously assigned Destination ID (DID), the lpfc_unreg_rpi() routine 661 * is invoked on all the remaining nodes with the @vport to unregister the 662 * Remote Port Indicators (RPIs). Finally, the lpfc_issue_fabric_reglogin() 663 * is invoked to register login to the fabric. 664 * 665 * Return code 666 * 0 - Success (currently, always return 0) 667 **/ 668 static int 669 lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 670 struct serv_parm *sp, IOCB_t *irsp) 671 { 672 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 673 struct lpfc_hba *phba = vport->phba; 674 struct lpfc_nodelist *np; 675 struct lpfc_nodelist *next_np; 676 uint8_t fabric_param_changed; 677 678 spin_lock_irq(shost->host_lock); 679 vport->fc_flag |= FC_FABRIC; 680 spin_unlock_irq(shost->host_lock); 681 682 phba->fc_edtov = be32_to_cpu(sp->cmn.e_d_tov); 683 if (sp->cmn.edtovResolution) /* E_D_TOV ticks are in nanoseconds */ 684 phba->fc_edtov = (phba->fc_edtov + 999999) / 1000000; 685 686 phba->fc_edtovResol = sp->cmn.edtovResolution; 687 phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000; 688 689 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 690 spin_lock_irq(shost->host_lock); 691 vport->fc_flag |= FC_PUBLIC_LOOP; 692 spin_unlock_irq(shost->host_lock); 693 } 694 695 vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID; 696 memcpy(&ndlp->nlp_portname, &sp->portName, sizeof(struct lpfc_name)); 697 memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof(struct lpfc_name)); 698 ndlp->nlp_class_sup = 0; 699 if (sp->cls1.classValid) 700 ndlp->nlp_class_sup |= FC_COS_CLASS1; 701 if (sp->cls2.classValid) 702 ndlp->nlp_class_sup |= FC_COS_CLASS2; 703 if (sp->cls3.classValid) 704 ndlp->nlp_class_sup |= FC_COS_CLASS3; 705 if (sp->cls4.classValid) 706 ndlp->nlp_class_sup |= FC_COS_CLASS4; 707 ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | 708 sp->cmn.bbRcvSizeLsb; 709 710 fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp); 711 if (fabric_param_changed) { 712 /* Reset FDMI attribute masks based on config parameter */ 713 if (phba->cfg_enable_SmartSAN || 714 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) { 715 /* Setup appropriate attribute masks */ 716 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR; 717 if (phba->cfg_enable_SmartSAN) 718 vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR; 719 else 720 vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR; 721 } else { 722 vport->fdmi_hba_mask = 0; 723 vport->fdmi_port_mask = 0; 724 } 725 726 } 727 memcpy(&vport->fabric_portname, &sp->portName, 728 sizeof(struct lpfc_name)); 729 memcpy(&vport->fabric_nodename, &sp->nodeName, 730 sizeof(struct lpfc_name)); 731 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm)); 732 733 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { 734 if (sp->cmn.response_multiple_NPort) { 735 lpfc_printf_vlog(vport, KERN_WARNING, 736 LOG_ELS | LOG_VPORT, 737 "1816 FLOGI NPIV supported, " 738 "response data 0x%x\n", 739 sp->cmn.response_multiple_NPort); 740 spin_lock_irq(&phba->hbalock); 741 phba->link_flag |= LS_NPIV_FAB_SUPPORTED; 742 spin_unlock_irq(&phba->hbalock); 743 } else { 744 /* Because we asked f/w for NPIV it still expects us 745 to call reg_vnpid at least for the physical host */ 746 lpfc_printf_vlog(vport, KERN_WARNING, 747 LOG_ELS | LOG_VPORT, 748 "1817 Fabric does not support NPIV " 749 "- configuring single port mode.\n"); 750 spin_lock_irq(&phba->hbalock); 751 phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED; 752 spin_unlock_irq(&phba->hbalock); 753 } 754 } 755 756 /* 757 * For FC we need to do some special processing because of the SLI 758 * Port's default settings of the Common Service Parameters. 759 */ 760 if ((phba->sli_rev == LPFC_SLI_REV4) && 761 (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC)) { 762 /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */ 763 if (fabric_param_changed) 764 lpfc_unregister_fcf_prep(phba); 765 766 /* This should just update the VFI CSPs*/ 767 if (vport->fc_flag & FC_VFI_REGISTERED) 768 lpfc_issue_reg_vfi(vport); 769 } 770 771 if (fabric_param_changed && 772 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 773 774 /* If our NportID changed, we need to ensure all 775 * remaining NPORTs get unreg_login'ed. 776 */ 777 list_for_each_entry_safe(np, next_np, 778 &vport->fc_nodes, nlp_listp) { 779 if ((np->nlp_state != NLP_STE_NPR_NODE) || 780 !(np->nlp_flag & NLP_NPR_ADISC)) 781 continue; 782 spin_lock_irq(&np->lock); 783 np->nlp_flag &= ~NLP_NPR_ADISC; 784 spin_unlock_irq(&np->lock); 785 lpfc_unreg_rpi(vport, np); 786 } 787 lpfc_cleanup_pending_mbox(vport); 788 789 if (phba->sli_rev == LPFC_SLI_REV4) { 790 lpfc_sli4_unreg_all_rpis(vport); 791 lpfc_mbx_unreg_vpi(vport); 792 spin_lock_irq(shost->host_lock); 793 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 794 spin_unlock_irq(shost->host_lock); 795 } 796 797 /* 798 * For SLI3 and SLI4, the VPI needs to be reregistered in 799 * response to this fabric parameter change event. 800 */ 801 spin_lock_irq(shost->host_lock); 802 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 803 spin_unlock_irq(shost->host_lock); 804 } else if ((phba->sli_rev == LPFC_SLI_REV4) && 805 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 806 /* 807 * Driver needs to re-reg VPI in order for f/w 808 * to update the MAC address. 809 */ 810 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 811 lpfc_register_new_vport(phba, vport, ndlp); 812 return 0; 813 } 814 815 if (phba->sli_rev < LPFC_SLI_REV4) { 816 lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE); 817 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED && 818 vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) 819 lpfc_register_new_vport(phba, vport, ndlp); 820 else 821 lpfc_issue_fabric_reglogin(vport); 822 } else { 823 ndlp->nlp_type |= NLP_FABRIC; 824 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 825 if ((!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) && 826 (vport->vpi_state & LPFC_VPI_REGISTERED)) { 827 lpfc_start_fdiscs(phba); 828 lpfc_do_scr_ns_plogi(phba, vport); 829 } else if (vport->fc_flag & FC_VFI_REGISTERED) 830 lpfc_issue_init_vpi(vport); 831 else { 832 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 833 "3135 Need register VFI: (x%x/%x)\n", 834 vport->fc_prevDID, vport->fc_myDID); 835 lpfc_issue_reg_vfi(vport); 836 } 837 } 838 return 0; 839 } 840 841 /** 842 * lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port 843 * @vport: pointer to a host virtual N_Port data structure. 844 * @ndlp: pointer to a node-list data structure. 845 * @sp: pointer to service parameter data structure. 846 * 847 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback 848 * function to handle the completion of a Fabric Login (FLOGI) into an N_Port 849 * in a point-to-point topology. First, the @vport's N_Port Name is compared 850 * with the received N_Port Name: if the @vport's N_Port Name is greater than 851 * the received N_Port Name lexicographically, this node shall assign local 852 * N_Port ID (PT2PT_LocalID: 1) and remote N_Port ID (PT2PT_RemoteID: 2) and 853 * will send out Port Login (PLOGI) with the N_Port IDs assigned. Otherwise, 854 * this node shall just wait for the remote node to issue PLOGI and assign 855 * N_Port IDs. 856 * 857 * Return code 858 * 0 - Success 859 * -ENXIO - Fail 860 **/ 861 static int 862 lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 863 struct serv_parm *sp) 864 { 865 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 866 struct lpfc_hba *phba = vport->phba; 867 LPFC_MBOXQ_t *mbox; 868 int rc; 869 870 spin_lock_irq(shost->host_lock); 871 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 872 vport->fc_flag |= FC_PT2PT; 873 spin_unlock_irq(shost->host_lock); 874 875 /* If we are pt2pt with another NPort, force NPIV off! */ 876 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED; 877 878 /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */ 879 if ((phba->sli_rev == LPFC_SLI_REV4) && phba->fc_topology_changed) { 880 lpfc_unregister_fcf_prep(phba); 881 882 spin_lock_irq(shost->host_lock); 883 vport->fc_flag &= ~FC_VFI_REGISTERED; 884 spin_unlock_irq(shost->host_lock); 885 phba->fc_topology_changed = 0; 886 } 887 888 rc = memcmp(&vport->fc_portname, &sp->portName, 889 sizeof(vport->fc_portname)); 890 891 if (rc >= 0) { 892 /* This side will initiate the PLOGI */ 893 spin_lock_irq(shost->host_lock); 894 vport->fc_flag |= FC_PT2PT_PLOGI; 895 spin_unlock_irq(shost->host_lock); 896 897 /* 898 * N_Port ID cannot be 0, set our Id to LocalID 899 * the other side will be RemoteID. 900 */ 901 902 /* not equal */ 903 if (rc) 904 vport->fc_myDID = PT2PT_LocalID; 905 906 /* Decrement ndlp reference count indicating that ndlp can be 907 * safely released when other references to it are done. 908 */ 909 lpfc_nlp_put(ndlp); 910 911 ndlp = lpfc_findnode_did(vport, PT2PT_RemoteID); 912 if (!ndlp) { 913 /* 914 * Cannot find existing Fabric ndlp, so allocate a 915 * new one 916 */ 917 ndlp = lpfc_nlp_init(vport, PT2PT_RemoteID); 918 if (!ndlp) 919 goto fail; 920 } 921 922 memcpy(&ndlp->nlp_portname, &sp->portName, 923 sizeof(struct lpfc_name)); 924 memcpy(&ndlp->nlp_nodename, &sp->nodeName, 925 sizeof(struct lpfc_name)); 926 /* Set state will put ndlp onto node list if not already done */ 927 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 928 spin_lock_irq(&ndlp->lock); 929 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 930 spin_unlock_irq(&ndlp->lock); 931 932 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 933 if (!mbox) 934 goto fail; 935 936 lpfc_config_link(phba, mbox); 937 938 mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link; 939 mbox->vport = vport; 940 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 941 if (rc == MBX_NOT_FINISHED) { 942 mempool_free(mbox, phba->mbox_mem_pool); 943 goto fail; 944 } 945 } else { 946 /* This side will wait for the PLOGI, decrement ndlp reference 947 * count indicating that ndlp can be released when other 948 * references to it are done. 949 */ 950 lpfc_nlp_put(ndlp); 951 952 /* Start discovery - this should just do CLEAR_LA */ 953 lpfc_disc_start(vport); 954 } 955 956 return 0; 957 fail: 958 return -ENXIO; 959 } 960 961 /** 962 * lpfc_cmpl_els_flogi - Completion callback function for flogi 963 * @phba: pointer to lpfc hba data structure. 964 * @cmdiocb: pointer to lpfc command iocb data structure. 965 * @rspiocb: pointer to lpfc response iocb data structure. 966 * 967 * This routine is the top-level completion callback function for issuing 968 * a Fabric Login (FLOGI) command. If the response IOCB reported error, 969 * the lpfc_els_retry() routine shall be invoked to retry the FLOGI. If 970 * retry has been made (either immediately or delayed with lpfc_els_retry() 971 * returning 1), the command IOCB will be released and function returned. 972 * If the retry attempt has been given up (possibly reach the maximum 973 * number of retries), one additional decrement of ndlp reference shall be 974 * invoked before going out after releasing the command IOCB. This will 975 * actually release the remote node (Note, lpfc_els_free_iocb() will also 976 * invoke one decrement of ndlp reference count). If no error reported in 977 * the IOCB status, the command Port ID field is used to determine whether 978 * this is a point-to-point topology or a fabric topology: if the Port ID 979 * field is assigned, it is a fabric topology; otherwise, it is a 980 * point-to-point topology. The routine lpfc_cmpl_els_flogi_fabric() or 981 * lpfc_cmpl_els_flogi_nport() shall be invoked accordingly to handle the 982 * specific topology completion conditions. 983 **/ 984 static void 985 lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 986 struct lpfc_iocbq *rspiocb) 987 { 988 struct lpfc_vport *vport = cmdiocb->vport; 989 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 990 IOCB_t *irsp = &rspiocb->iocb; 991 struct lpfc_nodelist *ndlp = cmdiocb->context1; 992 struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp; 993 struct serv_parm *sp; 994 uint16_t fcf_index; 995 int rc; 996 997 /* Check to see if link went down during discovery */ 998 if (lpfc_els_chk_latt(vport)) { 999 /* One additional decrement on node reference count to 1000 * trigger the release of the node 1001 */ 1002 lpfc_nlp_put(ndlp); 1003 goto out; 1004 } 1005 1006 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1007 "FLOGI cmpl: status:x%x/x%x state:x%x", 1008 irsp->ulpStatus, irsp->un.ulpWord[4], 1009 vport->port_state); 1010 1011 if (irsp->ulpStatus) { 1012 /* 1013 * In case of FIP mode, perform roundrobin FCF failover 1014 * due to new FCF discovery 1015 */ 1016 if ((phba->hba_flag & HBA_FIP_SUPPORT) && 1017 (phba->fcf.fcf_flag & FCF_DISCOVERY)) { 1018 if (phba->link_state < LPFC_LINK_UP) 1019 goto stop_rr_fcf_flogi; 1020 if ((phba->fcoe_cvl_eventtag_attn == 1021 phba->fcoe_cvl_eventtag) && 1022 (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 1023 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == 1024 IOERR_SLI_ABORTED)) 1025 goto stop_rr_fcf_flogi; 1026 else 1027 phba->fcoe_cvl_eventtag_attn = 1028 phba->fcoe_cvl_eventtag; 1029 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS, 1030 "2611 FLOGI failed on FCF (x%x), " 1031 "status:x%x/x%x, tmo:x%x, perform " 1032 "roundrobin FCF failover\n", 1033 phba->fcf.current_rec.fcf_indx, 1034 irsp->ulpStatus, irsp->un.ulpWord[4], 1035 irsp->ulpTimeout); 1036 lpfc_sli4_set_fcf_flogi_fail(phba, 1037 phba->fcf.current_rec.fcf_indx); 1038 fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba); 1039 rc = lpfc_sli4_fcf_rr_next_proc(vport, fcf_index); 1040 if (rc) 1041 goto out; 1042 } 1043 1044 stop_rr_fcf_flogi: 1045 /* FLOGI failure */ 1046 if (!(irsp->ulpStatus == IOSTAT_LOCAL_REJECT && 1047 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == 1048 IOERR_LOOP_OPEN_FAILURE))) 1049 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1050 "2858 FLOGI failure Status:x%x/x%x TMO" 1051 ":x%x Data x%x x%x\n", 1052 irsp->ulpStatus, irsp->un.ulpWord[4], 1053 irsp->ulpTimeout, phba->hba_flag, 1054 phba->fcf.fcf_flag); 1055 1056 /* Check for retry */ 1057 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) 1058 goto out; 1059 1060 lpfc_printf_vlog(vport, KERN_WARNING, LOG_TRACE_EVENT, 1061 "0150 FLOGI failure Status:x%x/x%x " 1062 "xri x%x TMO:x%x\n", 1063 irsp->ulpStatus, irsp->un.ulpWord[4], 1064 cmdiocb->sli4_xritag, irsp->ulpTimeout); 1065 1066 /* If this is not a loop open failure, bail out */ 1067 if (!(irsp->ulpStatus == IOSTAT_LOCAL_REJECT && 1068 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == 1069 IOERR_LOOP_OPEN_FAILURE))) 1070 goto flogifail; 1071 1072 /* FLOGI failed, so there is no fabric */ 1073 spin_lock_irq(shost->host_lock); 1074 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 1075 spin_unlock_irq(shost->host_lock); 1076 1077 /* If private loop, then allow max outstanding els to be 1078 * LPFC_MAX_DISC_THREADS (32). Scanning in the case of no 1079 * alpa map would take too long otherwise. 1080 */ 1081 if (phba->alpa_map[0] == 0) 1082 vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS; 1083 if ((phba->sli_rev == LPFC_SLI_REV4) && 1084 (!(vport->fc_flag & FC_VFI_REGISTERED) || 1085 (vport->fc_prevDID != vport->fc_myDID) || 1086 phba->fc_topology_changed)) { 1087 if (vport->fc_flag & FC_VFI_REGISTERED) { 1088 if (phba->fc_topology_changed) { 1089 lpfc_unregister_fcf_prep(phba); 1090 spin_lock_irq(shost->host_lock); 1091 vport->fc_flag &= ~FC_VFI_REGISTERED; 1092 spin_unlock_irq(shost->host_lock); 1093 phba->fc_topology_changed = 0; 1094 } else { 1095 lpfc_sli4_unreg_all_rpis(vport); 1096 } 1097 } 1098 1099 /* Do not register VFI if the driver aborted FLOGI */ 1100 if (!lpfc_error_lost_link(irsp)) 1101 lpfc_issue_reg_vfi(vport); 1102 1103 lpfc_nlp_put(ndlp); 1104 goto out; 1105 } 1106 goto flogifail; 1107 } 1108 spin_lock_irq(shost->host_lock); 1109 vport->fc_flag &= ~FC_VPORT_CVL_RCVD; 1110 vport->fc_flag &= ~FC_VPORT_LOGO_RCVD; 1111 spin_unlock_irq(shost->host_lock); 1112 1113 /* 1114 * The FLogI succeeded. Sync the data for the CPU before 1115 * accessing it. 1116 */ 1117 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 1118 if (!prsp) 1119 goto out; 1120 sp = prsp->virt + sizeof(uint32_t); 1121 1122 /* FLOGI completes successfully */ 1123 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1124 "0101 FLOGI completes successfully, I/O tag:x%x, " 1125 "xri x%x Data: x%x x%x x%x x%x x%x x%x x%x\n", 1126 cmdiocb->iotag, cmdiocb->sli4_xritag, 1127 irsp->un.ulpWord[4], sp->cmn.e_d_tov, 1128 sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution, 1129 vport->port_state, vport->fc_flag, 1130 sp->cmn.priority_tagging); 1131 1132 if (sp->cmn.priority_tagging) 1133 vport->vmid_flag |= LPFC_VMID_ISSUE_QFPA; 1134 1135 if (vport->port_state == LPFC_FLOGI) { 1136 /* 1137 * If Common Service Parameters indicate Nport 1138 * we are point to point, if Fport we are Fabric. 1139 */ 1140 if (sp->cmn.fPort) 1141 rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp, irsp); 1142 else if (!(phba->hba_flag & HBA_FCOE_MODE)) 1143 rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp); 1144 else { 1145 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1146 "2831 FLOGI response with cleared Fabric " 1147 "bit fcf_index 0x%x " 1148 "Switch Name %02x%02x%02x%02x%02x%02x%02x%02x " 1149 "Fabric Name " 1150 "%02x%02x%02x%02x%02x%02x%02x%02x\n", 1151 phba->fcf.current_rec.fcf_indx, 1152 phba->fcf.current_rec.switch_name[0], 1153 phba->fcf.current_rec.switch_name[1], 1154 phba->fcf.current_rec.switch_name[2], 1155 phba->fcf.current_rec.switch_name[3], 1156 phba->fcf.current_rec.switch_name[4], 1157 phba->fcf.current_rec.switch_name[5], 1158 phba->fcf.current_rec.switch_name[6], 1159 phba->fcf.current_rec.switch_name[7], 1160 phba->fcf.current_rec.fabric_name[0], 1161 phba->fcf.current_rec.fabric_name[1], 1162 phba->fcf.current_rec.fabric_name[2], 1163 phba->fcf.current_rec.fabric_name[3], 1164 phba->fcf.current_rec.fabric_name[4], 1165 phba->fcf.current_rec.fabric_name[5], 1166 phba->fcf.current_rec.fabric_name[6], 1167 phba->fcf.current_rec.fabric_name[7]); 1168 1169 lpfc_nlp_put(ndlp); 1170 spin_lock_irq(&phba->hbalock); 1171 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 1172 phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO); 1173 spin_unlock_irq(&phba->hbalock); 1174 phba->fcf.fcf_redisc_attempted = 0; /* reset */ 1175 goto out; 1176 } 1177 if (!rc) { 1178 /* Mark the FCF discovery process done */ 1179 if (phba->hba_flag & HBA_FIP_SUPPORT) 1180 lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP | 1181 LOG_ELS, 1182 "2769 FLOGI to FCF (x%x) " 1183 "completed successfully\n", 1184 phba->fcf.current_rec.fcf_indx); 1185 spin_lock_irq(&phba->hbalock); 1186 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 1187 phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO); 1188 spin_unlock_irq(&phba->hbalock); 1189 phba->fcf.fcf_redisc_attempted = 0; /* reset */ 1190 goto out; 1191 } 1192 } else if (vport->port_state > LPFC_FLOGI && 1193 vport->fc_flag & FC_PT2PT) { 1194 /* 1195 * In a p2p topology, it is possible that discovery has 1196 * already progressed, and this completion can be ignored. 1197 * Recheck the indicated topology. 1198 */ 1199 if (!sp->cmn.fPort) 1200 goto out; 1201 } 1202 1203 flogifail: 1204 spin_lock_irq(&phba->hbalock); 1205 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 1206 spin_unlock_irq(&phba->hbalock); 1207 1208 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) 1209 lpfc_nlp_put(ndlp); 1210 if (!lpfc_error_lost_link(irsp)) { 1211 /* FLOGI failed, so just use loop map to make discovery list */ 1212 lpfc_disc_list_loopmap(vport); 1213 1214 /* Start discovery */ 1215 lpfc_disc_start(vport); 1216 } else if (((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) || 1217 (((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) != 1218 IOERR_SLI_ABORTED) && 1219 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) != 1220 IOERR_SLI_DOWN))) && 1221 (phba->link_state != LPFC_CLEAR_LA)) { 1222 /* If FLOGI failed enable link interrupt. */ 1223 lpfc_issue_clear_la(phba, vport); 1224 } 1225 out: 1226 phba->hba_flag &= ~HBA_FLOGI_OUTSTANDING; 1227 lpfc_els_free_iocb(phba, cmdiocb); 1228 lpfc_nlp_put(ndlp); 1229 } 1230 1231 /** 1232 * lpfc_cmpl_els_link_down - Completion callback function for ELS command 1233 * aborted during a link down 1234 * @phba: pointer to lpfc hba data structure. 1235 * @cmdiocb: pointer to lpfc command iocb data structure. 1236 * @rspiocb: pointer to lpfc response iocb data structure. 1237 * 1238 */ 1239 static void 1240 lpfc_cmpl_els_link_down(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1241 struct lpfc_iocbq *rspiocb) 1242 { 1243 IOCB_t *irsp; 1244 uint32_t *pcmd; 1245 uint32_t cmd; 1246 1247 pcmd = (uint32_t *)(((struct lpfc_dmabuf *)cmdiocb->context2)->virt); 1248 cmd = *pcmd; 1249 irsp = &rspiocb->iocb; 1250 1251 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 1252 "6445 ELS completes after LINK_DOWN: " 1253 " Status %x/%x cmd x%x flg x%x\n", 1254 irsp->ulpStatus, irsp->un.ulpWord[4], cmd, 1255 cmdiocb->iocb_flag); 1256 1257 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC) { 1258 cmdiocb->iocb_flag &= ~LPFC_IO_FABRIC; 1259 atomic_dec(&phba->fabric_iocb_count); 1260 } 1261 lpfc_els_free_iocb(phba, cmdiocb); 1262 } 1263 1264 /** 1265 * lpfc_issue_els_flogi - Issue an flogi iocb command for a vport 1266 * @vport: pointer to a host virtual N_Port data structure. 1267 * @ndlp: pointer to a node-list data structure. 1268 * @retry: number of retries to the command IOCB. 1269 * 1270 * This routine issues a Fabric Login (FLOGI) Request ELS command 1271 * for a @vport. The initiator service parameters are put into the payload 1272 * of the FLOGI Request IOCB and the top-level callback function pointer 1273 * to lpfc_cmpl_els_flogi() routine is put to the IOCB completion callback 1274 * function field. The lpfc_issue_fabric_iocb routine is invoked to send 1275 * out FLOGI ELS command with one outstanding fabric IOCB at a time. 1276 * 1277 * Note that the ndlp reference count will be incremented by 1 for holding the 1278 * ndlp and the reference to ndlp will be stored into the context1 field of 1279 * the IOCB for the completion callback function to the FLOGI ELS command. 1280 * 1281 * Return code 1282 * 0 - successfully issued flogi iocb for @vport 1283 * 1 - failed to issue flogi iocb for @vport 1284 **/ 1285 static int 1286 lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1287 uint8_t retry) 1288 { 1289 struct lpfc_hba *phba = vport->phba; 1290 struct serv_parm *sp; 1291 IOCB_t *icmd; 1292 struct lpfc_iocbq *elsiocb; 1293 struct lpfc_iocbq defer_flogi_acc; 1294 uint8_t *pcmd; 1295 uint16_t cmdsize; 1296 uint32_t tmo, did; 1297 int rc; 1298 1299 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 1300 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 1301 ndlp->nlp_DID, ELS_CMD_FLOGI); 1302 1303 if (!elsiocb) 1304 return 1; 1305 1306 icmd = &elsiocb->iocb; 1307 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 1308 1309 /* For FLOGI request, remainder of payload is service parameters */ 1310 *((uint32_t *) (pcmd)) = ELS_CMD_FLOGI; 1311 pcmd += sizeof(uint32_t); 1312 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm)); 1313 sp = (struct serv_parm *) pcmd; 1314 1315 /* Setup CSPs accordingly for Fabric */ 1316 sp->cmn.e_d_tov = 0; 1317 sp->cmn.w2.r_a_tov = 0; 1318 sp->cmn.virtual_fabric_support = 0; 1319 sp->cls1.classValid = 0; 1320 if (sp->cmn.fcphLow < FC_PH3) 1321 sp->cmn.fcphLow = FC_PH3; 1322 if (sp->cmn.fcphHigh < FC_PH3) 1323 sp->cmn.fcphHigh = FC_PH3; 1324 1325 /* Determine if switch supports priority tagging */ 1326 if (phba->cfg_vmid_priority_tagging) { 1327 sp->cmn.priority_tagging = 1; 1328 /* lpfc_vmid_host_uuid is combination of wwpn and wwnn */ 1329 if (uuid_is_null((uuid_t *)vport->lpfc_vmid_host_uuid)) { 1330 memcpy(vport->lpfc_vmid_host_uuid, phba->wwpn, 1331 sizeof(phba->wwpn)); 1332 memcpy(&vport->lpfc_vmid_host_uuid[8], phba->wwnn, 1333 sizeof(phba->wwnn)); 1334 } 1335 } 1336 1337 if (phba->sli_rev == LPFC_SLI_REV4) { 1338 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 1339 LPFC_SLI_INTF_IF_TYPE_0) { 1340 elsiocb->iocb.ulpCt_h = ((SLI4_CT_FCFI >> 1) & 1); 1341 elsiocb->iocb.ulpCt_l = (SLI4_CT_FCFI & 1); 1342 /* FLOGI needs to be 3 for WQE FCFI */ 1343 /* Set the fcfi to the fcfi we registered with */ 1344 elsiocb->iocb.ulpContext = phba->fcf.fcfi; 1345 } 1346 /* Can't do SLI4 class2 without support sequence coalescing */ 1347 sp->cls2.classValid = 0; 1348 sp->cls2.seqDelivery = 0; 1349 } else { 1350 /* Historical, setting sequential-delivery bit for SLI3 */ 1351 sp->cls2.seqDelivery = (sp->cls2.classValid) ? 1 : 0; 1352 sp->cls3.seqDelivery = (sp->cls3.classValid) ? 1 : 0; 1353 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { 1354 sp->cmn.request_multiple_Nport = 1; 1355 /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */ 1356 icmd->ulpCt_h = 1; 1357 icmd->ulpCt_l = 0; 1358 } else 1359 sp->cmn.request_multiple_Nport = 0; 1360 } 1361 1362 if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) { 1363 icmd->un.elsreq64.myID = 0; 1364 icmd->un.elsreq64.fl = 1; 1365 } 1366 1367 tmo = phba->fc_ratov; 1368 phba->fc_ratov = LPFC_DISC_FLOGI_TMO; 1369 lpfc_set_disctmo(vport); 1370 phba->fc_ratov = tmo; 1371 1372 phba->fc_stat.elsXmitFLOGI++; 1373 elsiocb->iocb_cmpl = lpfc_cmpl_els_flogi; 1374 1375 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1376 "Issue FLOGI: opt:x%x", 1377 phba->sli3_options, 0, 0); 1378 1379 elsiocb->context1 = lpfc_nlp_get(ndlp); 1380 if (!elsiocb->context1) { 1381 lpfc_els_free_iocb(phba, elsiocb); 1382 return 1; 1383 } 1384 1385 rc = lpfc_issue_fabric_iocb(phba, elsiocb); 1386 if (rc == IOCB_ERROR) { 1387 lpfc_els_free_iocb(phba, elsiocb); 1388 lpfc_nlp_put(ndlp); 1389 return 1; 1390 } 1391 1392 phba->hba_flag |= (HBA_FLOGI_ISSUED | HBA_FLOGI_OUTSTANDING); 1393 1394 /* Check for a deferred FLOGI ACC condition */ 1395 if (phba->defer_flogi_acc_flag) { 1396 did = vport->fc_myDID; 1397 vport->fc_myDID = Fabric_DID; 1398 1399 memset(&defer_flogi_acc, 0, sizeof(struct lpfc_iocbq)); 1400 1401 defer_flogi_acc.iocb.ulpContext = phba->defer_flogi_acc_rx_id; 1402 defer_flogi_acc.iocb.unsli3.rcvsli3.ox_id = 1403 phba->defer_flogi_acc_ox_id; 1404 1405 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1406 "3354 Xmit deferred FLOGI ACC: rx_id: x%x," 1407 " ox_id: x%x, hba_flag x%x\n", 1408 phba->defer_flogi_acc_rx_id, 1409 phba->defer_flogi_acc_ox_id, phba->hba_flag); 1410 1411 /* Send deferred FLOGI ACC */ 1412 lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, &defer_flogi_acc, 1413 ndlp, NULL); 1414 1415 phba->defer_flogi_acc_flag = false; 1416 1417 vport->fc_myDID = did; 1418 } 1419 1420 return 0; 1421 } 1422 1423 /** 1424 * lpfc_els_abort_flogi - Abort all outstanding flogi iocbs 1425 * @phba: pointer to lpfc hba data structure. 1426 * 1427 * This routine aborts all the outstanding Fabric Login (FLOGI) IOCBs 1428 * with a @phba. This routine walks all the outstanding IOCBs on the txcmplq 1429 * list and issues an abort IOCB commond on each outstanding IOCB that 1430 * contains a active Fabric_DID ndlp. Note that this function is to issue 1431 * the abort IOCB command on all the outstanding IOCBs, thus when this 1432 * function returns, it does not guarantee all the IOCBs are actually aborted. 1433 * 1434 * Return code 1435 * 0 - Successfully issued abort iocb on all outstanding flogis (Always 0) 1436 **/ 1437 int 1438 lpfc_els_abort_flogi(struct lpfc_hba *phba) 1439 { 1440 struct lpfc_sli_ring *pring; 1441 struct lpfc_iocbq *iocb, *next_iocb; 1442 struct lpfc_nodelist *ndlp; 1443 IOCB_t *icmd; 1444 1445 /* Abort outstanding I/O on NPort <nlp_DID> */ 1446 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 1447 "0201 Abort outstanding I/O on NPort x%x\n", 1448 Fabric_DID); 1449 1450 pring = lpfc_phba_elsring(phba); 1451 if (unlikely(!pring)) 1452 return -EIO; 1453 1454 /* 1455 * Check the txcmplq for an iocb that matches the nport the driver is 1456 * searching for. 1457 */ 1458 spin_lock_irq(&phba->hbalock); 1459 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) { 1460 icmd = &iocb->iocb; 1461 if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR) { 1462 ndlp = (struct lpfc_nodelist *)(iocb->context1); 1463 if (ndlp && ndlp->nlp_DID == Fabric_DID) { 1464 if ((phba->pport->fc_flag & FC_PT2PT) && 1465 !(phba->pport->fc_flag & FC_PT2PT_PLOGI)) 1466 iocb->fabric_iocb_cmpl = 1467 lpfc_ignore_els_cmpl; 1468 lpfc_sli_issue_abort_iotag(phba, pring, iocb, 1469 NULL); 1470 } 1471 } 1472 } 1473 /* Make sure HBA is alive */ 1474 lpfc_issue_hb_tmo(phba); 1475 1476 spin_unlock_irq(&phba->hbalock); 1477 1478 return 0; 1479 } 1480 1481 /** 1482 * lpfc_initial_flogi - Issue an initial fabric login for a vport 1483 * @vport: pointer to a host virtual N_Port data structure. 1484 * 1485 * This routine issues an initial Fabric Login (FLOGI) for the @vport 1486 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from 1487 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and 1488 * put it into the @vport's ndlp list. If an inactive ndlp found on the list, 1489 * it will just be enabled and made active. The lpfc_issue_els_flogi() routine 1490 * is then invoked with the @vport and the ndlp to perform the FLOGI for the 1491 * @vport. 1492 * 1493 * Return code 1494 * 0 - failed to issue initial flogi for @vport 1495 * 1 - successfully issued initial flogi for @vport 1496 **/ 1497 int 1498 lpfc_initial_flogi(struct lpfc_vport *vport) 1499 { 1500 struct lpfc_nodelist *ndlp; 1501 1502 vport->port_state = LPFC_FLOGI; 1503 lpfc_set_disctmo(vport); 1504 1505 /* First look for the Fabric ndlp */ 1506 ndlp = lpfc_findnode_did(vport, Fabric_DID); 1507 if (!ndlp) { 1508 /* Cannot find existing Fabric ndlp, so allocate a new one */ 1509 ndlp = lpfc_nlp_init(vport, Fabric_DID); 1510 if (!ndlp) 1511 return 0; 1512 /* Set the node type */ 1513 ndlp->nlp_type |= NLP_FABRIC; 1514 1515 /* Put ndlp onto node list */ 1516 lpfc_enqueue_node(vport, ndlp); 1517 } 1518 1519 if (lpfc_issue_els_flogi(vport, ndlp, 0)) { 1520 /* This decrement of reference count to node shall kick off 1521 * the release of the node. 1522 */ 1523 lpfc_nlp_put(ndlp); 1524 return 0; 1525 } 1526 return 1; 1527 } 1528 1529 /** 1530 * lpfc_initial_fdisc - Issue an initial fabric discovery for a vport 1531 * @vport: pointer to a host virtual N_Port data structure. 1532 * 1533 * This routine issues an initial Fabric Discover (FDISC) for the @vport 1534 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from 1535 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and 1536 * put it into the @vport's ndlp list. If an inactive ndlp found on the list, 1537 * it will just be enabled and made active. The lpfc_issue_els_fdisc() routine 1538 * is then invoked with the @vport and the ndlp to perform the FDISC for the 1539 * @vport. 1540 * 1541 * Return code 1542 * 0 - failed to issue initial fdisc for @vport 1543 * 1 - successfully issued initial fdisc for @vport 1544 **/ 1545 int 1546 lpfc_initial_fdisc(struct lpfc_vport *vport) 1547 { 1548 struct lpfc_nodelist *ndlp; 1549 1550 /* First look for the Fabric ndlp */ 1551 ndlp = lpfc_findnode_did(vport, Fabric_DID); 1552 if (!ndlp) { 1553 /* Cannot find existing Fabric ndlp, so allocate a new one */ 1554 ndlp = lpfc_nlp_init(vport, Fabric_DID); 1555 if (!ndlp) 1556 return 0; 1557 1558 /* NPIV is only supported in Fabrics. */ 1559 ndlp->nlp_type |= NLP_FABRIC; 1560 1561 /* Put ndlp onto node list */ 1562 lpfc_enqueue_node(vport, ndlp); 1563 } 1564 1565 if (lpfc_issue_els_fdisc(vport, ndlp, 0)) { 1566 /* decrement node reference count to trigger the release of 1567 * the node. 1568 */ 1569 lpfc_nlp_put(ndlp); 1570 return 0; 1571 } 1572 return 1; 1573 } 1574 1575 /** 1576 * lpfc_more_plogi - Check and issue remaining plogis for a vport 1577 * @vport: pointer to a host virtual N_Port data structure. 1578 * 1579 * This routine checks whether there are more remaining Port Logins 1580 * (PLOGI) to be issued for the @vport. If so, it will invoke the routine 1581 * lpfc_els_disc_plogi() to go through the Node Port Recovery (NPR) nodes 1582 * to issue ELS PLOGIs up to the configured discover threads with the 1583 * @vport (@vport->cfg_discovery_threads). The function also decrement 1584 * the @vport's num_disc_node by 1 if it is not already 0. 1585 **/ 1586 void 1587 lpfc_more_plogi(struct lpfc_vport *vport) 1588 { 1589 if (vport->num_disc_nodes) 1590 vport->num_disc_nodes--; 1591 1592 /* Continue discovery with <num_disc_nodes> PLOGIs to go */ 1593 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 1594 "0232 Continue discovery with %d PLOGIs to go " 1595 "Data: x%x x%x x%x\n", 1596 vport->num_disc_nodes, vport->fc_plogi_cnt, 1597 vport->fc_flag, vport->port_state); 1598 /* Check to see if there are more PLOGIs to be sent */ 1599 if (vport->fc_flag & FC_NLP_MORE) 1600 /* go thru NPR nodes and issue any remaining ELS PLOGIs */ 1601 lpfc_els_disc_plogi(vport); 1602 1603 return; 1604 } 1605 1606 /** 1607 * lpfc_plogi_confirm_nport - Confirm plogi wwpn matches stored ndlp 1608 * @phba: pointer to lpfc hba data structure. 1609 * @prsp: pointer to response IOCB payload. 1610 * @ndlp: pointer to a node-list data structure. 1611 * 1612 * This routine checks and indicates whether the WWPN of an N_Port, retrieved 1613 * from a PLOGI, matches the WWPN that is stored in the @ndlp for that N_POrt. 1614 * The following cases are considered N_Port confirmed: 1615 * 1) The N_Port is a Fabric ndlp; 2) The @ndlp is on vport list and matches 1616 * the WWPN of the N_Port logged into; 3) The @ndlp is not on vport list but 1617 * it does not have WWPN assigned either. If the WWPN is confirmed, the 1618 * pointer to the @ndlp will be returned. If the WWPN is not confirmed: 1619 * 1) if there is a node on vport list other than the @ndlp with the same 1620 * WWPN of the N_Port PLOGI logged into, the lpfc_unreg_rpi() will be invoked 1621 * on that node to release the RPI associated with the node; 2) if there is 1622 * no node found on vport list with the same WWPN of the N_Port PLOGI logged 1623 * into, a new node shall be allocated (or activated). In either case, the 1624 * parameters of the @ndlp shall be copied to the new_ndlp, the @ndlp shall 1625 * be released and the new_ndlp shall be put on to the vport node list and 1626 * its pointer returned as the confirmed node. 1627 * 1628 * Note that before the @ndlp got "released", the keepDID from not-matching 1629 * or inactive "new_ndlp" on the vport node list is assigned to the nlp_DID 1630 * of the @ndlp. This is because the release of @ndlp is actually to put it 1631 * into an inactive state on the vport node list and the vport node list 1632 * management algorithm does not allow two node with a same DID. 1633 * 1634 * Return code 1635 * pointer to the PLOGI N_Port @ndlp 1636 **/ 1637 static struct lpfc_nodelist * 1638 lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, 1639 struct lpfc_nodelist *ndlp) 1640 { 1641 struct lpfc_vport *vport = ndlp->vport; 1642 struct lpfc_nodelist *new_ndlp; 1643 struct serv_parm *sp; 1644 uint8_t name[sizeof(struct lpfc_name)]; 1645 uint32_t keepDID = 0, keep_nlp_flag = 0; 1646 uint32_t keep_new_nlp_flag = 0; 1647 uint16_t keep_nlp_state; 1648 u32 keep_nlp_fc4_type = 0; 1649 struct lpfc_nvme_rport *keep_nrport = NULL; 1650 unsigned long *active_rrqs_xri_bitmap = NULL; 1651 1652 /* Fabric nodes can have the same WWPN so we don't bother searching 1653 * by WWPN. Just return the ndlp that was given to us. 1654 */ 1655 if (ndlp->nlp_type & NLP_FABRIC) 1656 return ndlp; 1657 1658 sp = (struct serv_parm *) ((uint8_t *) prsp + sizeof(uint32_t)); 1659 memset(name, 0, sizeof(struct lpfc_name)); 1660 1661 /* Now we find out if the NPort we are logging into, matches the WWPN 1662 * we have for that ndlp. If not, we have some work to do. 1663 */ 1664 new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName); 1665 1666 /* return immediately if the WWPN matches ndlp */ 1667 if (!new_ndlp || (new_ndlp == ndlp)) 1668 return ndlp; 1669 1670 /* 1671 * Unregister from backend if not done yet. Could have been skipped 1672 * due to ADISC 1673 */ 1674 lpfc_nlp_unreg_node(vport, new_ndlp); 1675 1676 if (phba->sli_rev == LPFC_SLI_REV4) { 1677 active_rrqs_xri_bitmap = mempool_alloc(phba->active_rrq_pool, 1678 GFP_KERNEL); 1679 if (active_rrqs_xri_bitmap) 1680 memset(active_rrqs_xri_bitmap, 0, 1681 phba->cfg_rrq_xri_bitmap_sz); 1682 } 1683 1684 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE, 1685 "3178 PLOGI confirm: ndlp x%x x%x x%x: " 1686 "new_ndlp x%x x%x x%x\n", 1687 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_fc4_type, 1688 (new_ndlp ? new_ndlp->nlp_DID : 0), 1689 (new_ndlp ? new_ndlp->nlp_flag : 0), 1690 (new_ndlp ? new_ndlp->nlp_fc4_type : 0)); 1691 1692 keepDID = new_ndlp->nlp_DID; 1693 1694 if (phba->sli_rev == LPFC_SLI_REV4 && active_rrqs_xri_bitmap) 1695 memcpy(active_rrqs_xri_bitmap, new_ndlp->active_rrqs_xri_bitmap, 1696 phba->cfg_rrq_xri_bitmap_sz); 1697 1698 /* At this point in this routine, we know new_ndlp will be 1699 * returned. however, any previous GID_FTs that were done 1700 * would have updated nlp_fc4_type in ndlp, so we must ensure 1701 * new_ndlp has the right value. 1702 */ 1703 if (vport->fc_flag & FC_FABRIC) { 1704 keep_nlp_fc4_type = new_ndlp->nlp_fc4_type; 1705 new_ndlp->nlp_fc4_type = ndlp->nlp_fc4_type; 1706 } 1707 1708 lpfc_unreg_rpi(vport, new_ndlp); 1709 new_ndlp->nlp_DID = ndlp->nlp_DID; 1710 new_ndlp->nlp_prev_state = ndlp->nlp_prev_state; 1711 if (phba->sli_rev == LPFC_SLI_REV4) 1712 memcpy(new_ndlp->active_rrqs_xri_bitmap, 1713 ndlp->active_rrqs_xri_bitmap, 1714 phba->cfg_rrq_xri_bitmap_sz); 1715 1716 /* Lock both ndlps */ 1717 spin_lock_irq(&ndlp->lock); 1718 spin_lock_irq(&new_ndlp->lock); 1719 keep_new_nlp_flag = new_ndlp->nlp_flag; 1720 keep_nlp_flag = ndlp->nlp_flag; 1721 new_ndlp->nlp_flag = ndlp->nlp_flag; 1722 1723 /* if new_ndlp had NLP_UNREG_INP set, keep it */ 1724 if (keep_new_nlp_flag & NLP_UNREG_INP) 1725 new_ndlp->nlp_flag |= NLP_UNREG_INP; 1726 else 1727 new_ndlp->nlp_flag &= ~NLP_UNREG_INP; 1728 1729 /* if new_ndlp had NLP_RPI_REGISTERED set, keep it */ 1730 if (keep_new_nlp_flag & NLP_RPI_REGISTERED) 1731 new_ndlp->nlp_flag |= NLP_RPI_REGISTERED; 1732 else 1733 new_ndlp->nlp_flag &= ~NLP_RPI_REGISTERED; 1734 1735 /* 1736 * Retain the DROPPED flag. This will take care of the init 1737 * refcount when affecting the state change 1738 */ 1739 if (keep_new_nlp_flag & NLP_DROPPED) 1740 new_ndlp->nlp_flag |= NLP_DROPPED; 1741 else 1742 new_ndlp->nlp_flag &= ~NLP_DROPPED; 1743 1744 ndlp->nlp_flag = keep_new_nlp_flag; 1745 1746 /* if ndlp had NLP_UNREG_INP set, keep it */ 1747 if (keep_nlp_flag & NLP_UNREG_INP) 1748 ndlp->nlp_flag |= NLP_UNREG_INP; 1749 else 1750 ndlp->nlp_flag &= ~NLP_UNREG_INP; 1751 1752 /* if ndlp had NLP_RPI_REGISTERED set, keep it */ 1753 if (keep_nlp_flag & NLP_RPI_REGISTERED) 1754 ndlp->nlp_flag |= NLP_RPI_REGISTERED; 1755 else 1756 ndlp->nlp_flag &= ~NLP_RPI_REGISTERED; 1757 1758 /* 1759 * Retain the DROPPED flag. This will take care of the init 1760 * refcount when affecting the state change 1761 */ 1762 if (keep_nlp_flag & NLP_DROPPED) 1763 ndlp->nlp_flag |= NLP_DROPPED; 1764 else 1765 ndlp->nlp_flag &= ~NLP_DROPPED; 1766 1767 spin_unlock_irq(&new_ndlp->lock); 1768 spin_unlock_irq(&ndlp->lock); 1769 1770 /* Set nlp_states accordingly */ 1771 keep_nlp_state = new_ndlp->nlp_state; 1772 lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state); 1773 1774 /* interchange the nvme remoteport structs */ 1775 keep_nrport = new_ndlp->nrport; 1776 new_ndlp->nrport = ndlp->nrport; 1777 1778 /* Move this back to NPR state */ 1779 if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) { 1780 /* The new_ndlp is replacing ndlp totally, so we need 1781 * to put ndlp on UNUSED list and try to free it. 1782 */ 1783 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1784 "3179 PLOGI confirm NEW: %x %x\n", 1785 new_ndlp->nlp_DID, keepDID); 1786 1787 /* Two ndlps cannot have the same did on the nodelist. 1788 * Note: for this case, ndlp has a NULL WWPN so setting 1789 * the nlp_fc4_type isn't required. 1790 */ 1791 ndlp->nlp_DID = keepDID; 1792 lpfc_nlp_set_state(vport, ndlp, keep_nlp_state); 1793 if (phba->sli_rev == LPFC_SLI_REV4 && 1794 active_rrqs_xri_bitmap) 1795 memcpy(ndlp->active_rrqs_xri_bitmap, 1796 active_rrqs_xri_bitmap, 1797 phba->cfg_rrq_xri_bitmap_sz); 1798 1799 } else { 1800 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1801 "3180 PLOGI confirm SWAP: %x %x\n", 1802 new_ndlp->nlp_DID, keepDID); 1803 1804 lpfc_unreg_rpi(vport, ndlp); 1805 1806 /* Two ndlps cannot have the same did and the fc4 1807 * type must be transferred because the ndlp is in 1808 * flight. 1809 */ 1810 ndlp->nlp_DID = keepDID; 1811 ndlp->nlp_fc4_type = keep_nlp_fc4_type; 1812 1813 if (phba->sli_rev == LPFC_SLI_REV4 && 1814 active_rrqs_xri_bitmap) 1815 memcpy(ndlp->active_rrqs_xri_bitmap, 1816 active_rrqs_xri_bitmap, 1817 phba->cfg_rrq_xri_bitmap_sz); 1818 1819 /* Since we are switching over to the new_ndlp, 1820 * reset the old ndlp state 1821 */ 1822 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) || 1823 (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) 1824 keep_nlp_state = NLP_STE_NPR_NODE; 1825 lpfc_nlp_set_state(vport, ndlp, keep_nlp_state); 1826 ndlp->nrport = keep_nrport; 1827 } 1828 1829 /* 1830 * If ndlp is not associated with any rport we can drop it here else 1831 * let dev_loss_tmo_callbk trigger DEVICE_RM event 1832 */ 1833 if (!ndlp->rport && (ndlp->nlp_state == NLP_STE_NPR_NODE)) 1834 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); 1835 1836 if (phba->sli_rev == LPFC_SLI_REV4 && 1837 active_rrqs_xri_bitmap) 1838 mempool_free(active_rrqs_xri_bitmap, 1839 phba->active_rrq_pool); 1840 1841 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE, 1842 "3173 PLOGI confirm exit: new_ndlp x%x x%x x%x\n", 1843 new_ndlp->nlp_DID, new_ndlp->nlp_flag, 1844 new_ndlp->nlp_fc4_type); 1845 1846 return new_ndlp; 1847 } 1848 1849 /** 1850 * lpfc_end_rscn - Check and handle more rscn for a vport 1851 * @vport: pointer to a host virtual N_Port data structure. 1852 * 1853 * This routine checks whether more Registration State Change 1854 * Notifications (RSCNs) came in while the discovery state machine was in 1855 * the FC_RSCN_MODE. If so, the lpfc_els_handle_rscn() routine will be 1856 * invoked to handle the additional RSCNs for the @vport. Otherwise, the 1857 * FC_RSCN_MODE bit will be cleared with the @vport to mark as the end of 1858 * handling the RSCNs. 1859 **/ 1860 void 1861 lpfc_end_rscn(struct lpfc_vport *vport) 1862 { 1863 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1864 1865 if (vport->fc_flag & FC_RSCN_MODE) { 1866 /* 1867 * Check to see if more RSCNs came in while we were 1868 * processing this one. 1869 */ 1870 if (vport->fc_rscn_id_cnt || 1871 (vport->fc_flag & FC_RSCN_DISCOVERY) != 0) 1872 lpfc_els_handle_rscn(vport); 1873 else { 1874 spin_lock_irq(shost->host_lock); 1875 vport->fc_flag &= ~FC_RSCN_MODE; 1876 spin_unlock_irq(shost->host_lock); 1877 } 1878 } 1879 } 1880 1881 /** 1882 * lpfc_cmpl_els_rrq - Completion handled for els RRQs. 1883 * @phba: pointer to lpfc hba data structure. 1884 * @cmdiocb: pointer to lpfc command iocb data structure. 1885 * @rspiocb: pointer to lpfc response iocb data structure. 1886 * 1887 * This routine will call the clear rrq function to free the rrq and 1888 * clear the xri's bit in the ndlp's xri_bitmap. If the ndlp does not 1889 * exist then the clear_rrq is still called because the rrq needs to 1890 * be freed. 1891 **/ 1892 1893 static void 1894 lpfc_cmpl_els_rrq(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1895 struct lpfc_iocbq *rspiocb) 1896 { 1897 struct lpfc_vport *vport = cmdiocb->vport; 1898 IOCB_t *irsp; 1899 struct lpfc_nodelist *ndlp = cmdiocb->context1; 1900 struct lpfc_node_rrq *rrq; 1901 1902 /* we pass cmdiocb to state machine which needs rspiocb as well */ 1903 rrq = cmdiocb->context_un.rrq; 1904 cmdiocb->context_un.rsp_iocb = rspiocb; 1905 1906 irsp = &rspiocb->iocb; 1907 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1908 "RRQ cmpl: status:x%x/x%x did:x%x", 1909 irsp->ulpStatus, irsp->un.ulpWord[4], 1910 irsp->un.elsreq64.remoteID); 1911 1912 /* rrq completes to NPort <nlp_DID> */ 1913 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1914 "2880 RRQ completes to DID x%x " 1915 "Data: x%x x%x x%x x%x x%x\n", 1916 irsp->un.elsreq64.remoteID, 1917 irsp->ulpStatus, irsp->un.ulpWord[4], 1918 irsp->ulpTimeout, rrq->xritag, rrq->rxid); 1919 1920 if (irsp->ulpStatus) { 1921 /* Check for retry */ 1922 /* RRQ failed Don't print the vport to vport rjts */ 1923 if (irsp->ulpStatus != IOSTAT_LS_RJT || 1924 (((irsp->un.ulpWord[4]) >> 16 != LSRJT_INVALID_CMD) && 1925 ((irsp->un.ulpWord[4]) >> 16 != LSRJT_UNABLE_TPC)) || 1926 (phba)->pport->cfg_log_verbose & LOG_ELS) 1927 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1928 "2881 RRQ failure DID:%06X Status:" 1929 "x%x/x%x\n", 1930 ndlp->nlp_DID, irsp->ulpStatus, 1931 irsp->un.ulpWord[4]); 1932 } 1933 1934 lpfc_clr_rrq_active(phba, rrq->xritag, rrq); 1935 lpfc_els_free_iocb(phba, cmdiocb); 1936 lpfc_nlp_put(ndlp); 1937 return; 1938 } 1939 /** 1940 * lpfc_cmpl_els_plogi - Completion callback function for plogi 1941 * @phba: pointer to lpfc hba data structure. 1942 * @cmdiocb: pointer to lpfc command iocb data structure. 1943 * @rspiocb: pointer to lpfc response iocb data structure. 1944 * 1945 * This routine is the completion callback function for issuing the Port 1946 * Login (PLOGI) command. For PLOGI completion, there must be an active 1947 * ndlp on the vport node list that matches the remote node ID from the 1948 * PLOGI response IOCB. If such ndlp does not exist, the PLOGI is simply 1949 * ignored and command IOCB released. The PLOGI response IOCB status is 1950 * checked for error conditions. If there is error status reported, PLOGI 1951 * retry shall be attempted by invoking the lpfc_els_retry() routine. 1952 * Otherwise, the lpfc_plogi_confirm_nport() routine shall be invoked on 1953 * the ndlp and the NLP_EVT_CMPL_PLOGI state to the Discover State Machine 1954 * (DSM) is set for this PLOGI completion. Finally, it checks whether 1955 * there are additional N_Port nodes with the vport that need to perform 1956 * PLOGI. If so, the lpfc_more_plogi() routine is invoked to issue addition 1957 * PLOGIs. 1958 **/ 1959 static void 1960 lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1961 struct lpfc_iocbq *rspiocb) 1962 { 1963 struct lpfc_vport *vport = cmdiocb->vport; 1964 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1965 IOCB_t *irsp; 1966 struct lpfc_nodelist *ndlp, *free_ndlp; 1967 struct lpfc_dmabuf *prsp; 1968 int disc; 1969 struct serv_parm *sp = NULL; 1970 1971 /* we pass cmdiocb to state machine which needs rspiocb as well */ 1972 cmdiocb->context_un.rsp_iocb = rspiocb; 1973 1974 irsp = &rspiocb->iocb; 1975 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1976 "PLOGI cmpl: status:x%x/x%x did:x%x", 1977 irsp->ulpStatus, irsp->un.ulpWord[4], 1978 irsp->un.elsreq64.remoteID); 1979 1980 ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID); 1981 if (!ndlp) { 1982 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1983 "0136 PLOGI completes to NPort x%x " 1984 "with no ndlp. Data: x%x x%x x%x\n", 1985 irsp->un.elsreq64.remoteID, 1986 irsp->ulpStatus, irsp->un.ulpWord[4], 1987 irsp->ulpIoTag); 1988 goto out_freeiocb; 1989 } 1990 1991 /* Since ndlp can be freed in the disc state machine, note if this node 1992 * is being used during discovery. 1993 */ 1994 spin_lock_irq(&ndlp->lock); 1995 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC); 1996 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 1997 spin_unlock_irq(&ndlp->lock); 1998 1999 /* PLOGI completes to NPort <nlp_DID> */ 2000 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2001 "0102 PLOGI completes to NPort x%06x " 2002 "Data: x%x x%x x%x x%x x%x\n", 2003 ndlp->nlp_DID, ndlp->nlp_fc4_type, 2004 irsp->ulpStatus, irsp->un.ulpWord[4], 2005 disc, vport->num_disc_nodes); 2006 2007 /* Check to see if link went down during discovery */ 2008 if (lpfc_els_chk_latt(vport)) { 2009 spin_lock_irq(&ndlp->lock); 2010 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2011 spin_unlock_irq(&ndlp->lock); 2012 goto out; 2013 } 2014 2015 if (irsp->ulpStatus) { 2016 /* Check for retry */ 2017 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 2018 /* ELS command is being retried */ 2019 if (disc) { 2020 spin_lock_irq(&ndlp->lock); 2021 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2022 spin_unlock_irq(&ndlp->lock); 2023 } 2024 goto out; 2025 } 2026 /* PLOGI failed Don't print the vport to vport rjts */ 2027 if (irsp->ulpStatus != IOSTAT_LS_RJT || 2028 (((irsp->un.ulpWord[4]) >> 16 != LSRJT_INVALID_CMD) && 2029 ((irsp->un.ulpWord[4]) >> 16 != LSRJT_UNABLE_TPC)) || 2030 (phba)->pport->cfg_log_verbose & LOG_ELS) 2031 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2032 "2753 PLOGI failure DID:%06X Status:x%x/x%x\n", 2033 ndlp->nlp_DID, irsp->ulpStatus, 2034 irsp->un.ulpWord[4]); 2035 2036 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 2037 if (!lpfc_error_lost_link(irsp)) 2038 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2039 NLP_EVT_CMPL_PLOGI); 2040 2041 /* If a PLOGI collision occurred, the node needs to continue 2042 * with the reglogin process. 2043 */ 2044 spin_lock_irq(&ndlp->lock); 2045 if ((ndlp->nlp_flag & (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI)) && 2046 ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE) { 2047 spin_unlock_irq(&ndlp->lock); 2048 goto out; 2049 } 2050 spin_unlock_irq(&ndlp->lock); 2051 2052 /* No PLOGI collision and the node is not registered with the 2053 * scsi or nvme transport. It is no longer an active node. Just 2054 * start the device remove process. 2055 */ 2056 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { 2057 spin_lock_irq(&ndlp->lock); 2058 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 2059 spin_unlock_irq(&ndlp->lock); 2060 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2061 NLP_EVT_DEVICE_RM); 2062 } 2063 } else { 2064 /* Good status, call state machine */ 2065 prsp = list_entry(((struct lpfc_dmabuf *) 2066 cmdiocb->context2)->list.next, 2067 struct lpfc_dmabuf, list); 2068 ndlp = lpfc_plogi_confirm_nport(phba, prsp->virt, ndlp); 2069 2070 sp = (struct serv_parm *)((u8 *)prsp->virt + 2071 sizeof(u32)); 2072 2073 ndlp->vmid_support = 0; 2074 if ((phba->cfg_vmid_app_header && sp->cmn.app_hdr_support) || 2075 (phba->cfg_vmid_priority_tagging && 2076 sp->cmn.priority_tagging)) { 2077 lpfc_printf_log(phba, KERN_DEBUG, LOG_ELS, 2078 "4018 app_hdr_support %d tagging %d DID x%x\n", 2079 sp->cmn.app_hdr_support, 2080 sp->cmn.priority_tagging, 2081 ndlp->nlp_DID); 2082 /* if the dest port supports VMID, mark it in ndlp */ 2083 ndlp->vmid_support = 1; 2084 } 2085 2086 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2087 NLP_EVT_CMPL_PLOGI); 2088 } 2089 2090 if (disc && vport->num_disc_nodes) { 2091 /* Check to see if there are more PLOGIs to be sent */ 2092 lpfc_more_plogi(vport); 2093 2094 if (vport->num_disc_nodes == 0) { 2095 spin_lock_irq(shost->host_lock); 2096 vport->fc_flag &= ~FC_NDISC_ACTIVE; 2097 spin_unlock_irq(shost->host_lock); 2098 2099 lpfc_can_disctmo(vport); 2100 lpfc_end_rscn(vport); 2101 } 2102 } 2103 2104 out: 2105 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, 2106 "PLOGI Cmpl PUT: did:x%x refcnt %d", 2107 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 2108 2109 out_freeiocb: 2110 /* Release the reference on the original I/O request. */ 2111 free_ndlp = (struct lpfc_nodelist *)cmdiocb->context1; 2112 2113 lpfc_els_free_iocb(phba, cmdiocb); 2114 lpfc_nlp_put(free_ndlp); 2115 return; 2116 } 2117 2118 /** 2119 * lpfc_issue_els_plogi - Issue an plogi iocb command for a vport 2120 * @vport: pointer to a host virtual N_Port data structure. 2121 * @did: destination port identifier. 2122 * @retry: number of retries to the command IOCB. 2123 * 2124 * This routine issues a Port Login (PLOGI) command to a remote N_Port 2125 * (with the @did) for a @vport. Before issuing a PLOGI to a remote N_Port, 2126 * the ndlp with the remote N_Port DID must exist on the @vport's ndlp list. 2127 * This routine constructs the proper fields of the PLOGI IOCB and invokes 2128 * the lpfc_sli_issue_iocb() routine to send out PLOGI ELS command. 2129 * 2130 * Note that the ndlp reference count will be incremented by 1 for holding 2131 * the ndlp and the reference to ndlp will be stored into the context1 field 2132 * of the IOCB for the completion callback function to the PLOGI ELS command. 2133 * 2134 * Return code 2135 * 0 - Successfully issued a plogi for @vport 2136 * 1 - failed to issue a plogi for @vport 2137 **/ 2138 int 2139 lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry) 2140 { 2141 struct lpfc_hba *phba = vport->phba; 2142 struct serv_parm *sp; 2143 struct lpfc_nodelist *ndlp; 2144 struct lpfc_iocbq *elsiocb; 2145 uint8_t *pcmd; 2146 uint16_t cmdsize; 2147 int ret; 2148 2149 ndlp = lpfc_findnode_did(vport, did); 2150 if (!ndlp) 2151 return 1; 2152 2153 /* Defer the processing of the issue PLOGI until after the 2154 * outstanding UNREG_RPI mbox command completes, unless we 2155 * are going offline. This logic does not apply for Fabric DIDs 2156 */ 2157 if ((ndlp->nlp_flag & NLP_UNREG_INP) && 2158 ((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) && 2159 !(vport->fc_flag & FC_OFFLINE_MODE)) { 2160 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2161 "4110 Issue PLOGI x%x deferred " 2162 "on NPort x%x rpi x%x Data: x%px\n", 2163 ndlp->nlp_defer_did, ndlp->nlp_DID, 2164 ndlp->nlp_rpi, ndlp); 2165 2166 /* We can only defer 1st PLOGI */ 2167 if (ndlp->nlp_defer_did == NLP_EVT_NOTHING_PENDING) 2168 ndlp->nlp_defer_did = did; 2169 return 0; 2170 } 2171 2172 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 2173 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did, 2174 ELS_CMD_PLOGI); 2175 if (!elsiocb) 2176 return 1; 2177 2178 spin_lock_irq(&ndlp->lock); 2179 ndlp->nlp_flag &= ~NLP_FCP_PRLI_RJT; 2180 spin_unlock_irq(&ndlp->lock); 2181 2182 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2183 2184 /* For PLOGI request, remainder of payload is service parameters */ 2185 *((uint32_t *) (pcmd)) = ELS_CMD_PLOGI; 2186 pcmd += sizeof(uint32_t); 2187 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm)); 2188 sp = (struct serv_parm *) pcmd; 2189 2190 /* 2191 * If we are a N-port connected to a Fabric, fix-up paramm's so logins 2192 * to device on remote loops work. 2193 */ 2194 if ((vport->fc_flag & FC_FABRIC) && !(vport->fc_flag & FC_PUBLIC_LOOP)) 2195 sp->cmn.altBbCredit = 1; 2196 2197 if (sp->cmn.fcphLow < FC_PH_4_3) 2198 sp->cmn.fcphLow = FC_PH_4_3; 2199 2200 if (sp->cmn.fcphHigh < FC_PH3) 2201 sp->cmn.fcphHigh = FC_PH3; 2202 2203 sp->cmn.valid_vendor_ver_level = 0; 2204 memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion)); 2205 sp->cmn.bbRcvSizeMsb &= 0xF; 2206 2207 /* Check if the destination port supports VMID */ 2208 ndlp->vmid_support = 0; 2209 if (vport->vmid_priority_tagging) 2210 sp->cmn.priority_tagging = 1; 2211 else if (phba->cfg_vmid_app_header && 2212 bf_get(lpfc_ftr_ashdr, &phba->sli4_hba.sli4_flags)) 2213 sp->cmn.app_hdr_support = 1; 2214 2215 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2216 "Issue PLOGI: did:x%x", 2217 did, 0, 0); 2218 2219 /* If our firmware supports this feature, convey that 2220 * information to the target using the vendor specific field. 2221 */ 2222 if (phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) { 2223 sp->cmn.valid_vendor_ver_level = 1; 2224 sp->un.vv.vid = cpu_to_be32(LPFC_VV_EMLX_ID); 2225 sp->un.vv.flags = cpu_to_be32(LPFC_VV_SUPPRESS_RSP); 2226 } 2227 2228 phba->fc_stat.elsXmitPLOGI++; 2229 elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi; 2230 2231 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2232 "Issue PLOGI: did:x%x refcnt %d", 2233 did, kref_read(&ndlp->kref), 0); 2234 elsiocb->context1 = lpfc_nlp_get(ndlp); 2235 if (!elsiocb->context1) { 2236 lpfc_els_free_iocb(phba, elsiocb); 2237 return 1; 2238 } 2239 2240 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 2241 if (ret) { 2242 lpfc_els_free_iocb(phba, elsiocb); 2243 lpfc_nlp_put(ndlp); 2244 return 1; 2245 } 2246 2247 return 0; 2248 } 2249 2250 /** 2251 * lpfc_cmpl_els_prli - Completion callback function for prli 2252 * @phba: pointer to lpfc hba data structure. 2253 * @cmdiocb: pointer to lpfc command iocb data structure. 2254 * @rspiocb: pointer to lpfc response iocb data structure. 2255 * 2256 * This routine is the completion callback function for a Process Login 2257 * (PRLI) ELS command. The PRLI response IOCB status is checked for error 2258 * status. If there is error status reported, PRLI retry shall be attempted 2259 * by invoking the lpfc_els_retry() routine. Otherwise, the state 2260 * NLP_EVT_CMPL_PRLI is sent to the Discover State Machine (DSM) for this 2261 * ndlp to mark the PRLI completion. 2262 **/ 2263 static void 2264 lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2265 struct lpfc_iocbq *rspiocb) 2266 { 2267 struct lpfc_vport *vport = cmdiocb->vport; 2268 IOCB_t *irsp; 2269 struct lpfc_nodelist *ndlp; 2270 char *mode; 2271 u32 loglevel; 2272 2273 /* we pass cmdiocb to state machine which needs rspiocb as well */ 2274 cmdiocb->context_un.rsp_iocb = rspiocb; 2275 2276 irsp = &(rspiocb->iocb); 2277 ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 2278 spin_lock_irq(&ndlp->lock); 2279 ndlp->nlp_flag &= ~NLP_PRLI_SND; 2280 2281 /* Driver supports multiple FC4 types. Counters matter. */ 2282 vport->fc_prli_sent--; 2283 ndlp->fc4_prli_sent--; 2284 spin_unlock_irq(&ndlp->lock); 2285 2286 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2287 "PRLI cmpl: status:x%x/x%x did:x%x", 2288 irsp->ulpStatus, irsp->un.ulpWord[4], 2289 ndlp->nlp_DID); 2290 2291 /* PRLI completes to NPort <nlp_DID> */ 2292 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2293 "0103 PRLI completes to NPort x%06x " 2294 "Data: x%x x%x x%x x%x\n", 2295 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4], 2296 vport->num_disc_nodes, ndlp->fc4_prli_sent); 2297 2298 /* Check to see if link went down during discovery */ 2299 if (lpfc_els_chk_latt(vport)) 2300 goto out; 2301 2302 if (irsp->ulpStatus) { 2303 /* Check for retry */ 2304 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 2305 /* ELS command is being retried */ 2306 goto out; 2307 } 2308 2309 /* If we don't send GFT_ID to Fabric, a PRLI error 2310 * could be expected. 2311 */ 2312 if ((vport->fc_flag & FC_FABRIC) || 2313 (vport->cfg_enable_fc4_type != LPFC_ENABLE_BOTH)) { 2314 mode = KERN_ERR; 2315 loglevel = LOG_TRACE_EVENT; 2316 } else { 2317 mode = KERN_INFO; 2318 loglevel = LOG_ELS; 2319 } 2320 2321 /* PRLI failed */ 2322 lpfc_printf_vlog(vport, mode, loglevel, 2323 "2754 PRLI failure DID:%06X Status:x%x/x%x, " 2324 "data: x%x\n", 2325 ndlp->nlp_DID, irsp->ulpStatus, 2326 irsp->un.ulpWord[4], ndlp->fc4_prli_sent); 2327 2328 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 2329 if (!lpfc_error_lost_link(irsp)) 2330 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2331 NLP_EVT_CMPL_PRLI); 2332 2333 /* As long as this node is not registered with the SCSI 2334 * or NVMe transport and no other PRLIs are outstanding, 2335 * it is no longer an active node. Otherwise devloss 2336 * handles the final cleanup. 2337 */ 2338 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) && 2339 !ndlp->fc4_prli_sent) { 2340 spin_lock_irq(&ndlp->lock); 2341 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 2342 spin_unlock_irq(&ndlp->lock); 2343 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2344 NLP_EVT_DEVICE_RM); 2345 } 2346 } else { 2347 /* Good status, call state machine. However, if another 2348 * PRLI is outstanding, don't call the state machine 2349 * because final disposition to Mapped or Unmapped is 2350 * completed there. 2351 */ 2352 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2353 NLP_EVT_CMPL_PRLI); 2354 } 2355 2356 out: 2357 lpfc_els_free_iocb(phba, cmdiocb); 2358 lpfc_nlp_put(ndlp); 2359 return; 2360 } 2361 2362 /** 2363 * lpfc_issue_els_prli - Issue a prli iocb command for a vport 2364 * @vport: pointer to a host virtual N_Port data structure. 2365 * @ndlp: pointer to a node-list data structure. 2366 * @retry: number of retries to the command IOCB. 2367 * 2368 * This routine issues a Process Login (PRLI) ELS command for the 2369 * @vport. The PRLI service parameters are set up in the payload of the 2370 * PRLI Request command and the pointer to lpfc_cmpl_els_prli() routine 2371 * is put to the IOCB completion callback func field before invoking the 2372 * routine lpfc_sli_issue_iocb() to send out PRLI command. 2373 * 2374 * Note that the ndlp reference count will be incremented by 1 for holding the 2375 * ndlp and the reference to ndlp will be stored into the context1 field of 2376 * the IOCB for the completion callback function to the PRLI ELS command. 2377 * 2378 * Return code 2379 * 0 - successfully issued prli iocb command for @vport 2380 * 1 - failed to issue prli iocb command for @vport 2381 **/ 2382 int 2383 lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2384 uint8_t retry) 2385 { 2386 int rc = 0; 2387 struct lpfc_hba *phba = vport->phba; 2388 PRLI *npr; 2389 struct lpfc_nvme_prli *npr_nvme; 2390 struct lpfc_iocbq *elsiocb; 2391 uint8_t *pcmd; 2392 uint16_t cmdsize; 2393 u32 local_nlp_type, elscmd; 2394 2395 /* 2396 * If we are in RSCN mode, the FC4 types supported from a 2397 * previous GFT_ID command may not be accurate. So, if we 2398 * are a NVME Initiator, always look for the possibility of 2399 * the remote NPort beng a NVME Target. 2400 */ 2401 if (phba->sli_rev == LPFC_SLI_REV4 && 2402 vport->fc_flag & FC_RSCN_MODE && 2403 vport->nvmei_support) 2404 ndlp->nlp_fc4_type |= NLP_FC4_NVME; 2405 local_nlp_type = ndlp->nlp_fc4_type; 2406 2407 /* This routine will issue 1 or 2 PRLIs, so zero all the ndlp 2408 * fields here before any of them can complete. 2409 */ 2410 ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); 2411 ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR); 2412 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; 2413 ndlp->nlp_flag &= ~(NLP_FIRSTBURST | NLP_NPR_2B_DISC); 2414 ndlp->nvme_fb_size = 0; 2415 2416 send_next_prli: 2417 if (local_nlp_type & NLP_FC4_FCP) { 2418 /* Payload is 4 + 16 = 20 x14 bytes. */ 2419 cmdsize = (sizeof(uint32_t) + sizeof(PRLI)); 2420 elscmd = ELS_CMD_PRLI; 2421 } else if (local_nlp_type & NLP_FC4_NVME) { 2422 /* Payload is 4 + 20 = 24 x18 bytes. */ 2423 cmdsize = (sizeof(uint32_t) + sizeof(struct lpfc_nvme_prli)); 2424 elscmd = ELS_CMD_NVMEPRLI; 2425 } else { 2426 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2427 "3083 Unknown FC_TYPE x%x ndlp x%06x\n", 2428 ndlp->nlp_fc4_type, ndlp->nlp_DID); 2429 return 1; 2430 } 2431 2432 /* SLI3 ports don't support NVME. If this rport is a strict NVME 2433 * FC4 type, implicitly LOGO. 2434 */ 2435 if (phba->sli_rev == LPFC_SLI_REV3 && 2436 ndlp->nlp_fc4_type == NLP_FC4_NVME) { 2437 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2438 "3088 Rport fc4 type 0x%x not supported by SLI3 adapter\n", 2439 ndlp->nlp_type); 2440 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); 2441 return 1; 2442 } 2443 2444 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 2445 ndlp->nlp_DID, elscmd); 2446 if (!elsiocb) 2447 return 1; 2448 2449 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2450 2451 /* For PRLI request, remainder of payload is service parameters */ 2452 memset(pcmd, 0, cmdsize); 2453 2454 if (local_nlp_type & NLP_FC4_FCP) { 2455 /* Remainder of payload is FCP PRLI parameter page. 2456 * Note: this data structure is defined as 2457 * BE/LE in the structure definition so no 2458 * byte swap call is made. 2459 */ 2460 *((uint32_t *)(pcmd)) = ELS_CMD_PRLI; 2461 pcmd += sizeof(uint32_t); 2462 npr = (PRLI *)pcmd; 2463 2464 /* 2465 * If our firmware version is 3.20 or later, 2466 * set the following bits for FC-TAPE support. 2467 */ 2468 if (phba->vpd.rev.feaLevelHigh >= 0x02) { 2469 npr->ConfmComplAllowed = 1; 2470 npr->Retry = 1; 2471 npr->TaskRetryIdReq = 1; 2472 } 2473 npr->estabImagePair = 1; 2474 npr->readXferRdyDis = 1; 2475 if (vport->cfg_first_burst_size) 2476 npr->writeXferRdyDis = 1; 2477 2478 /* For FCP support */ 2479 npr->prliType = PRLI_FCP_TYPE; 2480 npr->initiatorFunc = 1; 2481 elsiocb->iocb_flag |= LPFC_PRLI_FCP_REQ; 2482 2483 /* Remove FCP type - processed. */ 2484 local_nlp_type &= ~NLP_FC4_FCP; 2485 } else if (local_nlp_type & NLP_FC4_NVME) { 2486 /* Remainder of payload is NVME PRLI parameter page. 2487 * This data structure is the newer definition that 2488 * uses bf macros so a byte swap is required. 2489 */ 2490 *((uint32_t *)(pcmd)) = ELS_CMD_NVMEPRLI; 2491 pcmd += sizeof(uint32_t); 2492 npr_nvme = (struct lpfc_nvme_prli *)pcmd; 2493 bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE); 2494 bf_set(prli_estabImagePair, npr_nvme, 0); /* Should be 0 */ 2495 if (phba->nsler) { 2496 bf_set(prli_nsler, npr_nvme, 1); 2497 bf_set(prli_conf, npr_nvme, 1); 2498 } 2499 2500 /* Only initiators request first burst. */ 2501 if ((phba->cfg_nvme_enable_fb) && 2502 !phba->nvmet_support) 2503 bf_set(prli_fba, npr_nvme, 1); 2504 2505 if (phba->nvmet_support) { 2506 bf_set(prli_tgt, npr_nvme, 1); 2507 bf_set(prli_disc, npr_nvme, 1); 2508 } else { 2509 bf_set(prli_init, npr_nvme, 1); 2510 bf_set(prli_conf, npr_nvme, 1); 2511 } 2512 2513 npr_nvme->word1 = cpu_to_be32(npr_nvme->word1); 2514 npr_nvme->word4 = cpu_to_be32(npr_nvme->word4); 2515 elsiocb->iocb_flag |= LPFC_PRLI_NVME_REQ; 2516 2517 /* Remove NVME type - processed. */ 2518 local_nlp_type &= ~NLP_FC4_NVME; 2519 } 2520 2521 phba->fc_stat.elsXmitPRLI++; 2522 elsiocb->iocb_cmpl = lpfc_cmpl_els_prli; 2523 spin_lock_irq(&ndlp->lock); 2524 ndlp->nlp_flag |= NLP_PRLI_SND; 2525 2526 /* The vport counters are used for lpfc_scan_finished, but 2527 * the ndlp is used to track outstanding PRLIs for different 2528 * FC4 types. 2529 */ 2530 vport->fc_prli_sent++; 2531 ndlp->fc4_prli_sent++; 2532 spin_unlock_irq(&ndlp->lock); 2533 2534 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2535 "Issue PRLI: did:x%x refcnt %d", 2536 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 2537 elsiocb->context1 = lpfc_nlp_get(ndlp); 2538 if (!elsiocb->context1) { 2539 lpfc_els_free_iocb(phba, elsiocb); 2540 goto err; 2541 } 2542 2543 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 2544 if (rc == IOCB_ERROR) { 2545 lpfc_els_free_iocb(phba, elsiocb); 2546 lpfc_nlp_put(ndlp); 2547 goto err; 2548 } 2549 2550 2551 /* The driver supports 2 FC4 types. Make sure 2552 * a PRLI is issued for all types before exiting. 2553 */ 2554 if (phba->sli_rev == LPFC_SLI_REV4 && 2555 local_nlp_type & (NLP_FC4_FCP | NLP_FC4_NVME)) 2556 goto send_next_prli; 2557 else 2558 return 0; 2559 2560 err: 2561 spin_lock_irq(&ndlp->lock); 2562 ndlp->nlp_flag &= ~NLP_PRLI_SND; 2563 spin_unlock_irq(&ndlp->lock); 2564 return 1; 2565 } 2566 2567 /** 2568 * lpfc_rscn_disc - Perform rscn discovery for a vport 2569 * @vport: pointer to a host virtual N_Port data structure. 2570 * 2571 * This routine performs Registration State Change Notification (RSCN) 2572 * discovery for a @vport. If the @vport's node port recovery count is not 2573 * zero, it will invoke the lpfc_els_disc_plogi() to perform PLOGI for all 2574 * the nodes that need recovery. If none of the PLOGI were needed through 2575 * the lpfc_els_disc_plogi() routine, the lpfc_end_rscn() routine shall be 2576 * invoked to check and handle possible more RSCN came in during the period 2577 * of processing the current ones. 2578 **/ 2579 static void 2580 lpfc_rscn_disc(struct lpfc_vport *vport) 2581 { 2582 lpfc_can_disctmo(vport); 2583 2584 /* RSCN discovery */ 2585 /* go thru NPR nodes and issue ELS PLOGIs */ 2586 if (vport->fc_npr_cnt) 2587 if (lpfc_els_disc_plogi(vport)) 2588 return; 2589 2590 lpfc_end_rscn(vport); 2591 } 2592 2593 /** 2594 * lpfc_adisc_done - Complete the adisc phase of discovery 2595 * @vport: pointer to lpfc_vport hba data structure that finished all ADISCs. 2596 * 2597 * This function is called when the final ADISC is completed during discovery. 2598 * This function handles clearing link attention or issuing reg_vpi depending 2599 * on whether npiv is enabled. This function also kicks off the PLOGI phase of 2600 * discovery. 2601 * This function is called with no locks held. 2602 **/ 2603 static void 2604 lpfc_adisc_done(struct lpfc_vport *vport) 2605 { 2606 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2607 struct lpfc_hba *phba = vport->phba; 2608 2609 /* 2610 * For NPIV, cmpl_reg_vpi will set port_state to READY, 2611 * and continue discovery. 2612 */ 2613 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 2614 !(vport->fc_flag & FC_RSCN_MODE) && 2615 (phba->sli_rev < LPFC_SLI_REV4)) { 2616 2617 /* 2618 * If link is down, clear_la and reg_vpi will be done after 2619 * flogi following a link up event 2620 */ 2621 if (!lpfc_is_link_up(phba)) 2622 return; 2623 2624 /* The ADISCs are complete. Doesn't matter if they 2625 * succeeded or failed because the ADISC completion 2626 * routine guarantees to call the state machine and 2627 * the RPI is either unregistered (failed ADISC response) 2628 * or the RPI is still valid and the node is marked 2629 * mapped for a target. The exchanges should be in the 2630 * correct state. This code is specific to SLI3. 2631 */ 2632 lpfc_issue_clear_la(phba, vport); 2633 lpfc_issue_reg_vpi(phba, vport); 2634 return; 2635 } 2636 /* 2637 * For SLI2, we need to set port_state to READY 2638 * and continue discovery. 2639 */ 2640 if (vport->port_state < LPFC_VPORT_READY) { 2641 /* If we get here, there is nothing to ADISC */ 2642 lpfc_issue_clear_la(phba, vport); 2643 if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) { 2644 vport->num_disc_nodes = 0; 2645 /* go thru NPR list, issue ELS PLOGIs */ 2646 if (vport->fc_npr_cnt) 2647 lpfc_els_disc_plogi(vport); 2648 if (!vport->num_disc_nodes) { 2649 spin_lock_irq(shost->host_lock); 2650 vport->fc_flag &= ~FC_NDISC_ACTIVE; 2651 spin_unlock_irq(shost->host_lock); 2652 lpfc_can_disctmo(vport); 2653 lpfc_end_rscn(vport); 2654 } 2655 } 2656 vport->port_state = LPFC_VPORT_READY; 2657 } else 2658 lpfc_rscn_disc(vport); 2659 } 2660 2661 /** 2662 * lpfc_more_adisc - Issue more adisc as needed 2663 * @vport: pointer to a host virtual N_Port data structure. 2664 * 2665 * This routine determines whether there are more ndlps on a @vport 2666 * node list need to have Address Discover (ADISC) issued. If so, it will 2667 * invoke the lpfc_els_disc_adisc() routine to issue ADISC on the @vport's 2668 * remaining nodes which need to have ADISC sent. 2669 **/ 2670 void 2671 lpfc_more_adisc(struct lpfc_vport *vport) 2672 { 2673 if (vport->num_disc_nodes) 2674 vport->num_disc_nodes--; 2675 /* Continue discovery with <num_disc_nodes> ADISCs to go */ 2676 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2677 "0210 Continue discovery with %d ADISCs to go " 2678 "Data: x%x x%x x%x\n", 2679 vport->num_disc_nodes, vport->fc_adisc_cnt, 2680 vport->fc_flag, vport->port_state); 2681 /* Check to see if there are more ADISCs to be sent */ 2682 if (vport->fc_flag & FC_NLP_MORE) { 2683 lpfc_set_disctmo(vport); 2684 /* go thru NPR nodes and issue any remaining ELS ADISCs */ 2685 lpfc_els_disc_adisc(vport); 2686 } 2687 if (!vport->num_disc_nodes) 2688 lpfc_adisc_done(vport); 2689 return; 2690 } 2691 2692 /** 2693 * lpfc_cmpl_els_adisc - Completion callback function for adisc 2694 * @phba: pointer to lpfc hba data structure. 2695 * @cmdiocb: pointer to lpfc command iocb data structure. 2696 * @rspiocb: pointer to lpfc response iocb data structure. 2697 * 2698 * This routine is the completion function for issuing the Address Discover 2699 * (ADISC) command. It first checks to see whether link went down during 2700 * the discovery process. If so, the node will be marked as node port 2701 * recovery for issuing discover IOCB by the link attention handler and 2702 * exit. Otherwise, the response status is checked. If error was reported 2703 * in the response status, the ADISC command shall be retried by invoking 2704 * the lpfc_els_retry() routine. Otherwise, if no error was reported in 2705 * the response status, the state machine is invoked to set transition 2706 * with respect to NLP_EVT_CMPL_ADISC event. 2707 **/ 2708 static void 2709 lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2710 struct lpfc_iocbq *rspiocb) 2711 { 2712 struct lpfc_vport *vport = cmdiocb->vport; 2713 IOCB_t *irsp; 2714 struct lpfc_nodelist *ndlp; 2715 int disc; 2716 2717 /* we pass cmdiocb to state machine which needs rspiocb as well */ 2718 cmdiocb->context_un.rsp_iocb = rspiocb; 2719 2720 irsp = &(rspiocb->iocb); 2721 ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 2722 2723 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2724 "ADISC cmpl: status:x%x/x%x did:x%x", 2725 irsp->ulpStatus, irsp->un.ulpWord[4], 2726 ndlp->nlp_DID); 2727 2728 /* Since ndlp can be freed in the disc state machine, note if this node 2729 * is being used during discovery. 2730 */ 2731 spin_lock_irq(&ndlp->lock); 2732 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC); 2733 ndlp->nlp_flag &= ~(NLP_ADISC_SND | NLP_NPR_2B_DISC); 2734 spin_unlock_irq(&ndlp->lock); 2735 /* ADISC completes to NPort <nlp_DID> */ 2736 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2737 "0104 ADISC completes to NPort x%x " 2738 "Data: x%x x%x x%x x%x x%x\n", 2739 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4], 2740 irsp->ulpTimeout, disc, vport->num_disc_nodes); 2741 /* Check to see if link went down during discovery */ 2742 if (lpfc_els_chk_latt(vport)) { 2743 spin_lock_irq(&ndlp->lock); 2744 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2745 spin_unlock_irq(&ndlp->lock); 2746 goto out; 2747 } 2748 2749 if (irsp->ulpStatus) { 2750 /* Check for retry */ 2751 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 2752 /* ELS command is being retried */ 2753 if (disc) { 2754 spin_lock_irq(&ndlp->lock); 2755 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2756 spin_unlock_irq(&ndlp->lock); 2757 lpfc_set_disctmo(vport); 2758 } 2759 goto out; 2760 } 2761 /* ADISC failed */ 2762 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2763 "2755 ADISC failure DID:%06X Status:x%x/x%x\n", 2764 ndlp->nlp_DID, irsp->ulpStatus, 2765 irsp->un.ulpWord[4]); 2766 2767 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2768 NLP_EVT_CMPL_ADISC); 2769 2770 /* As long as this node is not registered with the SCSI or NVMe 2771 * transport, it is no longer an active node. Otherwise 2772 * devloss handles the final cleanup. 2773 */ 2774 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { 2775 spin_lock_irq(&ndlp->lock); 2776 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 2777 spin_unlock_irq(&ndlp->lock); 2778 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2779 NLP_EVT_DEVICE_RM); 2780 } 2781 } else 2782 /* Good status, call state machine */ 2783 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2784 NLP_EVT_CMPL_ADISC); 2785 2786 /* Check to see if there are more ADISCs to be sent */ 2787 if (disc && vport->num_disc_nodes) 2788 lpfc_more_adisc(vport); 2789 out: 2790 lpfc_els_free_iocb(phba, cmdiocb); 2791 lpfc_nlp_put(ndlp); 2792 return; 2793 } 2794 2795 /** 2796 * lpfc_issue_els_adisc - Issue an address discover iocb to an node on a vport 2797 * @vport: pointer to a virtual N_Port data structure. 2798 * @ndlp: pointer to a node-list data structure. 2799 * @retry: number of retries to the command IOCB. 2800 * 2801 * This routine issues an Address Discover (ADISC) for an @ndlp on a 2802 * @vport. It prepares the payload of the ADISC ELS command, updates the 2803 * and states of the ndlp, and invokes the lpfc_sli_issue_iocb() routine 2804 * to issue the ADISC ELS command. 2805 * 2806 * Note that the ndlp reference count will be incremented by 1 for holding the 2807 * ndlp and the reference to ndlp will be stored into the context1 field of 2808 * the IOCB for the completion callback function to the ADISC ELS command. 2809 * 2810 * Return code 2811 * 0 - successfully issued adisc 2812 * 1 - failed to issue adisc 2813 **/ 2814 int 2815 lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2816 uint8_t retry) 2817 { 2818 int rc = 0; 2819 struct lpfc_hba *phba = vport->phba; 2820 ADISC *ap; 2821 struct lpfc_iocbq *elsiocb; 2822 uint8_t *pcmd; 2823 uint16_t cmdsize; 2824 2825 cmdsize = (sizeof(uint32_t) + sizeof(ADISC)); 2826 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 2827 ndlp->nlp_DID, ELS_CMD_ADISC); 2828 if (!elsiocb) 2829 return 1; 2830 2831 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2832 2833 /* For ADISC request, remainder of payload is service parameters */ 2834 *((uint32_t *) (pcmd)) = ELS_CMD_ADISC; 2835 pcmd += sizeof(uint32_t); 2836 2837 /* Fill in ADISC payload */ 2838 ap = (ADISC *) pcmd; 2839 ap->hardAL_PA = phba->fc_pref_ALPA; 2840 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name)); 2841 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 2842 ap->DID = be32_to_cpu(vport->fc_myDID); 2843 2844 phba->fc_stat.elsXmitADISC++; 2845 elsiocb->iocb_cmpl = lpfc_cmpl_els_adisc; 2846 spin_lock_irq(&ndlp->lock); 2847 ndlp->nlp_flag |= NLP_ADISC_SND; 2848 spin_unlock_irq(&ndlp->lock); 2849 elsiocb->context1 = lpfc_nlp_get(ndlp); 2850 if (!elsiocb->context1) { 2851 lpfc_els_free_iocb(phba, elsiocb); 2852 goto err; 2853 } 2854 2855 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2856 "Issue ADISC: did:x%x refcnt %d", 2857 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 2858 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 2859 if (rc == IOCB_ERROR) { 2860 lpfc_els_free_iocb(phba, elsiocb); 2861 lpfc_nlp_put(ndlp); 2862 goto err; 2863 } 2864 2865 return 0; 2866 2867 err: 2868 spin_lock_irq(&ndlp->lock); 2869 ndlp->nlp_flag &= ~NLP_ADISC_SND; 2870 spin_unlock_irq(&ndlp->lock); 2871 return 1; 2872 } 2873 2874 /** 2875 * lpfc_cmpl_els_logo - Completion callback function for logo 2876 * @phba: pointer to lpfc hba data structure. 2877 * @cmdiocb: pointer to lpfc command iocb data structure. 2878 * @rspiocb: pointer to lpfc response iocb data structure. 2879 * 2880 * This routine is the completion function for issuing the ELS Logout (LOGO) 2881 * command. If no error status was reported from the LOGO response, the 2882 * state machine of the associated ndlp shall be invoked for transition with 2883 * respect to NLP_EVT_CMPL_LOGO event. 2884 **/ 2885 static void 2886 lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2887 struct lpfc_iocbq *rspiocb) 2888 { 2889 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 2890 struct lpfc_vport *vport = ndlp->vport; 2891 IOCB_t *irsp; 2892 unsigned long flags; 2893 uint32_t skip_recovery = 0; 2894 int wake_up_waiter = 0; 2895 2896 /* we pass cmdiocb to state machine which needs rspiocb as well */ 2897 cmdiocb->context_un.rsp_iocb = rspiocb; 2898 2899 irsp = &(rspiocb->iocb); 2900 spin_lock_irq(&ndlp->lock); 2901 ndlp->nlp_flag &= ~NLP_LOGO_SND; 2902 if (ndlp->upcall_flags & NLP_WAIT_FOR_LOGO) { 2903 wake_up_waiter = 1; 2904 ndlp->upcall_flags &= ~NLP_WAIT_FOR_LOGO; 2905 } 2906 spin_unlock_irq(&ndlp->lock); 2907 2908 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2909 "LOGO cmpl: status:x%x/x%x did:x%x", 2910 irsp->ulpStatus, irsp->un.ulpWord[4], 2911 ndlp->nlp_DID); 2912 2913 /* LOGO completes to NPort <nlp_DID> */ 2914 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2915 "0105 LOGO completes to NPort x%x " 2916 "refcnt %d nflags x%x Data: x%x x%x x%x x%x\n", 2917 ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp->nlp_flag, 2918 irsp->ulpStatus, irsp->un.ulpWord[4], 2919 irsp->ulpTimeout, vport->num_disc_nodes); 2920 2921 if (lpfc_els_chk_latt(vport)) { 2922 skip_recovery = 1; 2923 goto out; 2924 } 2925 2926 /* The LOGO will not be retried on failure. A LOGO was 2927 * issued to the remote rport and a ACC or RJT or no Answer are 2928 * all acceptable. Note the failure and move forward with 2929 * discovery. The PLOGI will retry. 2930 */ 2931 if (irsp->ulpStatus) { 2932 /* LOGO failed */ 2933 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2934 "2756 LOGO failure, No Retry DID:%06X Status:x%x/x%x\n", 2935 ndlp->nlp_DID, irsp->ulpStatus, 2936 irsp->un.ulpWord[4]); 2937 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 2938 if (lpfc_error_lost_link(irsp)) { 2939 skip_recovery = 1; 2940 goto out; 2941 } 2942 } 2943 2944 /* Call state machine. This will unregister the rpi if needed. */ 2945 lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_CMPL_LOGO); 2946 2947 /* The driver sets this flag for an NPIV instance that doesn't want to 2948 * log into the remote port. 2949 */ 2950 if (ndlp->nlp_flag & NLP_TARGET_REMOVE) { 2951 spin_lock_irq(&ndlp->lock); 2952 if (phba->sli_rev == LPFC_SLI_REV4) 2953 ndlp->nlp_flag |= NLP_RELEASE_RPI; 2954 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 2955 spin_unlock_irq(&ndlp->lock); 2956 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2957 NLP_EVT_DEVICE_RM); 2958 lpfc_els_free_iocb(phba, cmdiocb); 2959 lpfc_nlp_put(ndlp); 2960 2961 /* Presume the node was released. */ 2962 return; 2963 } 2964 2965 out: 2966 /* Driver is done with the IO. */ 2967 lpfc_els_free_iocb(phba, cmdiocb); 2968 lpfc_nlp_put(ndlp); 2969 2970 /* At this point, the LOGO processing is complete. NOTE: For a 2971 * pt2pt topology, we are assuming the NPortID will only change 2972 * on link up processing. For a LOGO / PLOGI initiated by the 2973 * Initiator, we are assuming the NPortID is not going to change. 2974 */ 2975 2976 if (wake_up_waiter && ndlp->logo_waitq) 2977 wake_up(ndlp->logo_waitq); 2978 /* 2979 * If the node is a target, the handling attempts to recover the port. 2980 * For any other port type, the rpi is unregistered as an implicit 2981 * LOGO. 2982 */ 2983 if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET) && 2984 skip_recovery == 0) { 2985 lpfc_cancel_retry_delay_tmo(vport, ndlp); 2986 spin_lock_irqsave(&ndlp->lock, flags); 2987 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2988 spin_unlock_irqrestore(&ndlp->lock, flags); 2989 2990 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2991 "3187 LOGO completes to NPort x%x: Start " 2992 "Recovery Data: x%x x%x x%x x%x\n", 2993 ndlp->nlp_DID, irsp->ulpStatus, 2994 irsp->un.ulpWord[4], irsp->ulpTimeout, 2995 vport->num_disc_nodes); 2996 lpfc_disc_start(vport); 2997 return; 2998 } 2999 3000 /* Cleanup path for failed REG_RPI handling. If REG_RPI fails, the 3001 * driver sends a LOGO to the rport to cleanup. For fabric and 3002 * initiator ports cleanup the node as long as it the node is not 3003 * register with the transport. 3004 */ 3005 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { 3006 spin_lock_irq(&ndlp->lock); 3007 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 3008 spin_unlock_irq(&ndlp->lock); 3009 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 3010 NLP_EVT_DEVICE_RM); 3011 } 3012 } 3013 3014 /** 3015 * lpfc_issue_els_logo - Issue a logo to an node on a vport 3016 * @vport: pointer to a virtual N_Port data structure. 3017 * @ndlp: pointer to a node-list data structure. 3018 * @retry: number of retries to the command IOCB. 3019 * 3020 * This routine constructs and issues an ELS Logout (LOGO) iocb command 3021 * to a remote node, referred by an @ndlp on a @vport. It constructs the 3022 * payload of the IOCB, properly sets up the @ndlp state, and invokes the 3023 * lpfc_sli_issue_iocb() routine to send out the LOGO ELS command. 3024 * 3025 * Note that the ndlp reference count will be incremented by 1 for holding the 3026 * ndlp and the reference to ndlp will be stored into the context1 field of 3027 * the IOCB for the completion callback function to the LOGO ELS command. 3028 * 3029 * Callers of this routine are expected to unregister the RPI first 3030 * 3031 * Return code 3032 * 0 - successfully issued logo 3033 * 1 - failed to issue logo 3034 **/ 3035 int 3036 lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 3037 uint8_t retry) 3038 { 3039 struct lpfc_hba *phba = vport->phba; 3040 struct lpfc_iocbq *elsiocb; 3041 uint8_t *pcmd; 3042 uint16_t cmdsize; 3043 int rc; 3044 3045 spin_lock_irq(&ndlp->lock); 3046 if (ndlp->nlp_flag & NLP_LOGO_SND) { 3047 spin_unlock_irq(&ndlp->lock); 3048 return 0; 3049 } 3050 spin_unlock_irq(&ndlp->lock); 3051 3052 cmdsize = (2 * sizeof(uint32_t)) + sizeof(struct lpfc_name); 3053 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3054 ndlp->nlp_DID, ELS_CMD_LOGO); 3055 if (!elsiocb) 3056 return 1; 3057 3058 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 3059 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO; 3060 pcmd += sizeof(uint32_t); 3061 3062 /* Fill in LOGO payload */ 3063 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID); 3064 pcmd += sizeof(uint32_t); 3065 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name)); 3066 3067 phba->fc_stat.elsXmitLOGO++; 3068 elsiocb->iocb_cmpl = lpfc_cmpl_els_logo; 3069 spin_lock_irq(&ndlp->lock); 3070 ndlp->nlp_flag |= NLP_LOGO_SND; 3071 ndlp->nlp_flag &= ~NLP_ISSUE_LOGO; 3072 spin_unlock_irq(&ndlp->lock); 3073 elsiocb->context1 = lpfc_nlp_get(ndlp); 3074 if (!elsiocb->context1) { 3075 lpfc_els_free_iocb(phba, elsiocb); 3076 goto err; 3077 } 3078 3079 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3080 "Issue LOGO: did:x%x refcnt %d", 3081 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 3082 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3083 if (rc == IOCB_ERROR) { 3084 lpfc_els_free_iocb(phba, elsiocb); 3085 lpfc_nlp_put(ndlp); 3086 goto err; 3087 } 3088 3089 spin_lock_irq(&ndlp->lock); 3090 ndlp->nlp_prev_state = ndlp->nlp_state; 3091 spin_unlock_irq(&ndlp->lock); 3092 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE); 3093 return 0; 3094 3095 err: 3096 spin_lock_irq(&ndlp->lock); 3097 ndlp->nlp_flag &= ~NLP_LOGO_SND; 3098 spin_unlock_irq(&ndlp->lock); 3099 return 1; 3100 } 3101 3102 /** 3103 * lpfc_cmpl_els_cmd - Completion callback function for generic els command 3104 * @phba: pointer to lpfc hba data structure. 3105 * @cmdiocb: pointer to lpfc command iocb data structure. 3106 * @rspiocb: pointer to lpfc response iocb data structure. 3107 * 3108 * This routine is a generic completion callback function for ELS commands. 3109 * Specifically, it is the callback function which does not need to perform 3110 * any command specific operations. It is currently used by the ELS command 3111 * issuing routines for RSCN, lpfc_issue_els_rscn, and the ELS Fibre Channel 3112 * Address Resolution Protocol Response (FARPR) routine, lpfc_issue_els_farpr(). 3113 * Other than certain debug loggings, this callback function simply invokes the 3114 * lpfc_els_chk_latt() routine to check whether link went down during the 3115 * discovery process. 3116 **/ 3117 static void 3118 lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 3119 struct lpfc_iocbq *rspiocb) 3120 { 3121 struct lpfc_vport *vport = cmdiocb->vport; 3122 struct lpfc_nodelist *free_ndlp; 3123 IOCB_t *irsp; 3124 3125 irsp = &rspiocb->iocb; 3126 3127 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3128 "ELS cmd cmpl: status:x%x/x%x did:x%x", 3129 irsp->ulpStatus, irsp->un.ulpWord[4], 3130 irsp->un.elsreq64.remoteID); 3131 3132 /* ELS cmd tag <ulpIoTag> completes */ 3133 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3134 "0106 ELS cmd tag x%x completes Data: x%x x%x x%x\n", 3135 irsp->ulpIoTag, irsp->ulpStatus, 3136 irsp->un.ulpWord[4], irsp->ulpTimeout); 3137 3138 /* Check to see if link went down during discovery */ 3139 lpfc_els_chk_latt(vport); 3140 3141 free_ndlp = (struct lpfc_nodelist *)cmdiocb->context1; 3142 3143 lpfc_els_free_iocb(phba, cmdiocb); 3144 lpfc_nlp_put(free_ndlp); 3145 } 3146 3147 /** 3148 * lpfc_reg_fab_ctrl_node - RPI register the fabric controller node. 3149 * @vport: pointer to lpfc_vport data structure. 3150 * @fc_ndlp: pointer to the fabric controller (0xfffffd) node. 3151 * 3152 * This routine registers the rpi assigned to the fabric controller 3153 * NPort_ID (0xfffffd) with the port and moves the node to UNMAPPED 3154 * state triggering a registration with the SCSI transport. 3155 * 3156 * This routine is single out because the fabric controller node 3157 * does not receive a PLOGI. This routine is consumed by the 3158 * SCR and RDF ELS commands. Callers are expected to qualify 3159 * with SLI4 first. 3160 **/ 3161 static int 3162 lpfc_reg_fab_ctrl_node(struct lpfc_vport *vport, struct lpfc_nodelist *fc_ndlp) 3163 { 3164 int rc = 0; 3165 struct lpfc_hba *phba = vport->phba; 3166 struct lpfc_nodelist *ns_ndlp; 3167 LPFC_MBOXQ_t *mbox; 3168 struct lpfc_dmabuf *mp; 3169 3170 if (fc_ndlp->nlp_flag & NLP_RPI_REGISTERED) 3171 return rc; 3172 3173 ns_ndlp = lpfc_findnode_did(vport, NameServer_DID); 3174 if (!ns_ndlp) 3175 return -ENODEV; 3176 3177 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, 3178 "0935 %s: Reg FC RPI x%x on FC DID x%x NSSte: x%x\n", 3179 __func__, fc_ndlp->nlp_rpi, fc_ndlp->nlp_DID, 3180 ns_ndlp->nlp_state); 3181 if (ns_ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) 3182 return -ENODEV; 3183 3184 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3185 if (!mbox) { 3186 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, 3187 "0936 %s: no memory for reg_login " 3188 "Data: x%x x%x x%x x%x\n", __func__, 3189 fc_ndlp->nlp_DID, fc_ndlp->nlp_state, 3190 fc_ndlp->nlp_flag, fc_ndlp->nlp_rpi); 3191 return -ENOMEM; 3192 } 3193 rc = lpfc_reg_rpi(phba, vport->vpi, fc_ndlp->nlp_DID, 3194 (u8 *)&vport->fc_sparam, mbox, fc_ndlp->nlp_rpi); 3195 if (rc) { 3196 rc = -EACCES; 3197 goto out; 3198 } 3199 3200 fc_ndlp->nlp_flag |= NLP_REG_LOGIN_SEND; 3201 mbox->mbox_cmpl = lpfc_mbx_cmpl_fc_reg_login; 3202 mbox->ctx_ndlp = lpfc_nlp_get(fc_ndlp); 3203 if (!mbox->ctx_ndlp) { 3204 rc = -ENOMEM; 3205 goto out_mem; 3206 } 3207 3208 mbox->vport = vport; 3209 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 3210 if (rc == MBX_NOT_FINISHED) { 3211 rc = -ENODEV; 3212 lpfc_nlp_put(fc_ndlp); 3213 goto out_mem; 3214 } 3215 /* Success path. Exit. */ 3216 lpfc_nlp_set_state(vport, fc_ndlp, 3217 NLP_STE_REG_LOGIN_ISSUE); 3218 return 0; 3219 3220 out_mem: 3221 fc_ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; 3222 mp = (struct lpfc_dmabuf *)mbox->ctx_buf; 3223 lpfc_mbuf_free(phba, mp->virt, mp->phys); 3224 kfree(mp); 3225 3226 out: 3227 mempool_free(mbox, phba->mbox_mem_pool); 3228 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, 3229 "0938 %s: failed to format reg_login " 3230 "Data: x%x x%x x%x x%x\n", __func__, 3231 fc_ndlp->nlp_DID, fc_ndlp->nlp_state, 3232 fc_ndlp->nlp_flag, fc_ndlp->nlp_rpi); 3233 return rc; 3234 } 3235 3236 /** 3237 * lpfc_cmpl_els_disc_cmd - Completion callback function for Discovery ELS cmd 3238 * @phba: pointer to lpfc hba data structure. 3239 * @cmdiocb: pointer to lpfc command iocb data structure. 3240 * @rspiocb: pointer to lpfc response iocb data structure. 3241 * 3242 * This routine is a generic completion callback function for Discovery ELS cmd. 3243 * Currently used by the ELS command issuing routines for the ELS State Change 3244 * Request (SCR), lpfc_issue_els_scr() and the ELS RDF, lpfc_issue_els_rdf(). 3245 * These commands will be retried once only for ELS timeout errors. 3246 **/ 3247 static void 3248 lpfc_cmpl_els_disc_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 3249 struct lpfc_iocbq *rspiocb) 3250 { 3251 struct lpfc_vport *vport = cmdiocb->vport; 3252 IOCB_t *irsp; 3253 struct lpfc_els_rdf_rsp *prdf; 3254 struct lpfc_dmabuf *pcmd, *prsp; 3255 u32 *pdata; 3256 u32 cmd; 3257 struct lpfc_nodelist *ndlp = cmdiocb->context1; 3258 3259 irsp = &rspiocb->iocb; 3260 3261 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3262 "ELS cmd cmpl: status:x%x/x%x did:x%x", 3263 irsp->ulpStatus, irsp->un.ulpWord[4], 3264 irsp->un.elsreq64.remoteID); 3265 /* ELS cmd tag <ulpIoTag> completes */ 3266 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 3267 "0217 ELS cmd tag x%x completes Data: x%x x%x x%x " 3268 "x%x\n", 3269 irsp->ulpIoTag, irsp->ulpStatus, 3270 irsp->un.ulpWord[4], irsp->ulpTimeout, 3271 cmdiocb->retry); 3272 3273 pcmd = (struct lpfc_dmabuf *)cmdiocb->context2; 3274 if (!pcmd) 3275 goto out; 3276 3277 pdata = (u32 *)pcmd->virt; 3278 if (!pdata) 3279 goto out; 3280 cmd = *pdata; 3281 3282 /* Only 1 retry for ELS Timeout only */ 3283 if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT && 3284 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == 3285 IOERR_SEQUENCE_TIMEOUT)) { 3286 cmdiocb->retry++; 3287 if (cmdiocb->retry <= 1) { 3288 switch (cmd) { 3289 case ELS_CMD_SCR: 3290 lpfc_issue_els_scr(vport, cmdiocb->retry); 3291 break; 3292 case ELS_CMD_EDC: 3293 lpfc_issue_els_edc(vport, cmdiocb->retry); 3294 break; 3295 case ELS_CMD_RDF: 3296 cmdiocb->context1 = NULL; /* save ndlp refcnt */ 3297 lpfc_issue_els_rdf(vport, cmdiocb->retry); 3298 break; 3299 } 3300 goto out; 3301 } 3302 phba->fc_stat.elsRetryExceeded++; 3303 } 3304 if (cmd == ELS_CMD_EDC) { 3305 /* must be called before checking uplStatus and returning */ 3306 lpfc_cmpl_els_edc(phba, cmdiocb, rspiocb); 3307 return; 3308 } 3309 if (irsp->ulpStatus) { 3310 /* ELS discovery cmd completes with error */ 3311 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, 3312 "4203 ELS cmd x%x error: x%x x%X\n", cmd, 3313 irsp->ulpStatus, irsp->un.ulpWord[4]); 3314 goto out; 3315 } 3316 3317 /* The RDF response doesn't have any impact on the running driver 3318 * but the notification descriptors are dumped here for support. 3319 */ 3320 if (cmd == ELS_CMD_RDF) { 3321 int i; 3322 3323 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 3324 if (!prsp) 3325 goto out; 3326 3327 prdf = (struct lpfc_els_rdf_rsp *)prsp->virt; 3328 if (!prdf) 3329 goto out; 3330 3331 for (i = 0; i < ELS_RDF_REG_TAG_CNT && 3332 i < be32_to_cpu(prdf->reg_d1.reg_desc.count); i++) 3333 lpfc_printf_vlog(vport, KERN_INFO, 3334 LOG_ELS | LOG_CGN_MGMT, 3335 "4677 Fabric RDF Notification Grant " 3336 "Data: 0x%08x Reg: %x %x\n", 3337 be32_to_cpu( 3338 prdf->reg_d1.desc_tags[i]), 3339 phba->cgn_reg_signal, 3340 phba->cgn_reg_fpin); 3341 } 3342 3343 out: 3344 /* Check to see if link went down during discovery */ 3345 lpfc_els_chk_latt(vport); 3346 lpfc_els_free_iocb(phba, cmdiocb); 3347 lpfc_nlp_put(ndlp); 3348 return; 3349 } 3350 3351 /** 3352 * lpfc_issue_els_scr - Issue a scr to an node on a vport 3353 * @vport: pointer to a host virtual N_Port data structure. 3354 * @retry: retry counter for the command IOCB. 3355 * 3356 * This routine issues a State Change Request (SCR) to a fabric node 3357 * on a @vport. The remote node is Fabric Controller (0xfffffd). It 3358 * first search the @vport node list to find the matching ndlp. If no such 3359 * ndlp is found, a new ndlp shall be created for this (SCR) purpose. An 3360 * IOCB is allocated, payload prepared, and the lpfc_sli_issue_iocb() 3361 * routine is invoked to send the SCR IOCB. 3362 * 3363 * Note that the ndlp reference count will be incremented by 1 for holding the 3364 * ndlp and the reference to ndlp will be stored into the context1 field of 3365 * the IOCB for the completion callback function to the SCR ELS command. 3366 * 3367 * Return code 3368 * 0 - Successfully issued scr command 3369 * 1 - Failed to issue scr command 3370 **/ 3371 int 3372 lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry) 3373 { 3374 int rc = 0; 3375 struct lpfc_hba *phba = vport->phba; 3376 struct lpfc_iocbq *elsiocb; 3377 uint8_t *pcmd; 3378 uint16_t cmdsize; 3379 struct lpfc_nodelist *ndlp; 3380 3381 cmdsize = (sizeof(uint32_t) + sizeof(SCR)); 3382 3383 ndlp = lpfc_findnode_did(vport, Fabric_Cntl_DID); 3384 if (!ndlp) { 3385 ndlp = lpfc_nlp_init(vport, Fabric_Cntl_DID); 3386 if (!ndlp) 3387 return 1; 3388 lpfc_enqueue_node(vport, ndlp); 3389 } 3390 3391 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3392 ndlp->nlp_DID, ELS_CMD_SCR); 3393 if (!elsiocb) 3394 return 1; 3395 3396 if (phba->sli_rev == LPFC_SLI_REV4) { 3397 rc = lpfc_reg_fab_ctrl_node(vport, ndlp); 3398 if (rc) { 3399 lpfc_els_free_iocb(phba, elsiocb); 3400 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, 3401 "0937 %s: Failed to reg fc node, rc %d\n", 3402 __func__, rc); 3403 return 1; 3404 } 3405 } 3406 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 3407 3408 *((uint32_t *) (pcmd)) = ELS_CMD_SCR; 3409 pcmd += sizeof(uint32_t); 3410 3411 /* For SCR, remainder of payload is SCR parameter page */ 3412 memset(pcmd, 0, sizeof(SCR)); 3413 ((SCR *) pcmd)->Function = SCR_FUNC_FULL; 3414 3415 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3416 "Issue SCR: did:x%x", 3417 ndlp->nlp_DID, 0, 0); 3418 3419 phba->fc_stat.elsXmitSCR++; 3420 elsiocb->iocb_cmpl = lpfc_cmpl_els_disc_cmd; 3421 elsiocb->context1 = lpfc_nlp_get(ndlp); 3422 if (!elsiocb->context1) { 3423 lpfc_els_free_iocb(phba, elsiocb); 3424 return 1; 3425 } 3426 3427 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3428 "Issue SCR: did:x%x refcnt %d", 3429 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 3430 3431 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3432 if (rc == IOCB_ERROR) { 3433 lpfc_els_free_iocb(phba, elsiocb); 3434 lpfc_nlp_put(ndlp); 3435 return 1; 3436 } 3437 3438 return 0; 3439 } 3440 3441 /** 3442 * lpfc_issue_els_rscn - Issue an RSCN to the Fabric Controller (Fabric) 3443 * or the other nport (pt2pt). 3444 * @vport: pointer to a host virtual N_Port data structure. 3445 * @retry: number of retries to the command IOCB. 3446 * 3447 * This routine issues a RSCN to the Fabric Controller (DID 0xFFFFFD) 3448 * when connected to a fabric, or to the remote port when connected 3449 * in point-to-point mode. When sent to the Fabric Controller, it will 3450 * replay the RSCN to registered recipients. 3451 * 3452 * Note that the ndlp reference count will be incremented by 1 for holding the 3453 * ndlp and the reference to ndlp will be stored into the context1 field of 3454 * the IOCB for the completion callback function to the RSCN ELS command. 3455 * 3456 * Return code 3457 * 0 - Successfully issued RSCN command 3458 * 1 - Failed to issue RSCN command 3459 **/ 3460 int 3461 lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry) 3462 { 3463 int rc = 0; 3464 struct lpfc_hba *phba = vport->phba; 3465 struct lpfc_iocbq *elsiocb; 3466 struct lpfc_nodelist *ndlp; 3467 struct { 3468 struct fc_els_rscn rscn; 3469 struct fc_els_rscn_page portid; 3470 } *event; 3471 uint32_t nportid; 3472 uint16_t cmdsize = sizeof(*event); 3473 3474 /* Not supported for private loop */ 3475 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP && 3476 !(vport->fc_flag & FC_PUBLIC_LOOP)) 3477 return 1; 3478 3479 if (vport->fc_flag & FC_PT2PT) { 3480 /* find any mapped nport - that would be the other nport */ 3481 ndlp = lpfc_findnode_mapped(vport); 3482 if (!ndlp) 3483 return 1; 3484 } else { 3485 nportid = FC_FID_FCTRL; 3486 /* find the fabric controller node */ 3487 ndlp = lpfc_findnode_did(vport, nportid); 3488 if (!ndlp) { 3489 /* if one didn't exist, make one */ 3490 ndlp = lpfc_nlp_init(vport, nportid); 3491 if (!ndlp) 3492 return 1; 3493 lpfc_enqueue_node(vport, ndlp); 3494 } 3495 } 3496 3497 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3498 ndlp->nlp_DID, ELS_CMD_RSCN_XMT); 3499 3500 if (!elsiocb) 3501 return 1; 3502 3503 event = ((struct lpfc_dmabuf *)elsiocb->context2)->virt; 3504 3505 event->rscn.rscn_cmd = ELS_RSCN; 3506 event->rscn.rscn_page_len = sizeof(struct fc_els_rscn_page); 3507 event->rscn.rscn_plen = cpu_to_be16(cmdsize); 3508 3509 nportid = vport->fc_myDID; 3510 /* appears that page flags must be 0 for fabric to broadcast RSCN */ 3511 event->portid.rscn_page_flags = 0; 3512 event->portid.rscn_fid[0] = (nportid & 0x00FF0000) >> 16; 3513 event->portid.rscn_fid[1] = (nportid & 0x0000FF00) >> 8; 3514 event->portid.rscn_fid[2] = nportid & 0x000000FF; 3515 3516 phba->fc_stat.elsXmitRSCN++; 3517 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd; 3518 elsiocb->context1 = lpfc_nlp_get(ndlp); 3519 if (!elsiocb->context1) { 3520 lpfc_els_free_iocb(phba, elsiocb); 3521 return 1; 3522 } 3523 3524 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3525 "Issue RSCN: did:x%x", 3526 ndlp->nlp_DID, 0, 0); 3527 3528 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3529 if (rc == IOCB_ERROR) { 3530 lpfc_els_free_iocb(phba, elsiocb); 3531 lpfc_nlp_put(ndlp); 3532 return 1; 3533 } 3534 3535 /* This will cause the callback-function lpfc_cmpl_els_cmd to 3536 * trigger the release of node. 3537 */ 3538 if (!(vport->fc_flag & FC_PT2PT)) 3539 lpfc_nlp_put(ndlp); 3540 return 0; 3541 } 3542 3543 /** 3544 * lpfc_issue_els_farpr - Issue a farp to an node on a vport 3545 * @vport: pointer to a host virtual N_Port data structure. 3546 * @nportid: N_Port identifier to the remote node. 3547 * @retry: number of retries to the command IOCB. 3548 * 3549 * This routine issues a Fibre Channel Address Resolution Response 3550 * (FARPR) to a node on a vport. The remote node N_Port identifier (@nportid) 3551 * is passed into the function. It first search the @vport node list to find 3552 * the matching ndlp. If no such ndlp is found, a new ndlp shall be created 3553 * for this (FARPR) purpose. An IOCB is allocated, payload prepared, and the 3554 * lpfc_sli_issue_iocb() routine is invoked to send the FARPR ELS command. 3555 * 3556 * Note that the ndlp reference count will be incremented by 1 for holding the 3557 * ndlp and the reference to ndlp will be stored into the context1 field of 3558 * the IOCB for the completion callback function to the FARPR ELS command. 3559 * 3560 * Return code 3561 * 0 - Successfully issued farpr command 3562 * 1 - Failed to issue farpr command 3563 **/ 3564 static int 3565 lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) 3566 { 3567 int rc = 0; 3568 struct lpfc_hba *phba = vport->phba; 3569 struct lpfc_iocbq *elsiocb; 3570 FARP *fp; 3571 uint8_t *pcmd; 3572 uint32_t *lp; 3573 uint16_t cmdsize; 3574 struct lpfc_nodelist *ondlp; 3575 struct lpfc_nodelist *ndlp; 3576 3577 cmdsize = (sizeof(uint32_t) + sizeof(FARP)); 3578 3579 ndlp = lpfc_findnode_did(vport, nportid); 3580 if (!ndlp) { 3581 ndlp = lpfc_nlp_init(vport, nportid); 3582 if (!ndlp) 3583 return 1; 3584 lpfc_enqueue_node(vport, ndlp); 3585 } 3586 3587 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3588 ndlp->nlp_DID, ELS_CMD_RNID); 3589 if (!elsiocb) 3590 return 1; 3591 3592 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 3593 3594 *((uint32_t *) (pcmd)) = ELS_CMD_FARPR; 3595 pcmd += sizeof(uint32_t); 3596 3597 /* Fill in FARPR payload */ 3598 fp = (FARP *) (pcmd); 3599 memset(fp, 0, sizeof(FARP)); 3600 lp = (uint32_t *) pcmd; 3601 *lp++ = be32_to_cpu(nportid); 3602 *lp++ = be32_to_cpu(vport->fc_myDID); 3603 fp->Rflags = 0; 3604 fp->Mflags = (FARP_MATCH_PORT | FARP_MATCH_NODE); 3605 3606 memcpy(&fp->RportName, &vport->fc_portname, sizeof(struct lpfc_name)); 3607 memcpy(&fp->RnodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 3608 ondlp = lpfc_findnode_did(vport, nportid); 3609 if (ondlp) { 3610 memcpy(&fp->OportName, &ondlp->nlp_portname, 3611 sizeof(struct lpfc_name)); 3612 memcpy(&fp->OnodeName, &ondlp->nlp_nodename, 3613 sizeof(struct lpfc_name)); 3614 } 3615 3616 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3617 "Issue FARPR: did:x%x", 3618 ndlp->nlp_DID, 0, 0); 3619 3620 phba->fc_stat.elsXmitFARPR++; 3621 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd; 3622 elsiocb->context1 = lpfc_nlp_get(ndlp); 3623 if (!elsiocb->context1) { 3624 lpfc_els_free_iocb(phba, elsiocb); 3625 return 1; 3626 } 3627 3628 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3629 if (rc == IOCB_ERROR) { 3630 /* The additional lpfc_nlp_put will cause the following 3631 * lpfc_els_free_iocb routine to trigger the release of 3632 * the node. 3633 */ 3634 lpfc_els_free_iocb(phba, elsiocb); 3635 lpfc_nlp_put(ndlp); 3636 return 1; 3637 } 3638 /* This will cause the callback-function lpfc_cmpl_els_cmd to 3639 * trigger the release of the node. 3640 */ 3641 /* Don't release reference count as RDF is likely outstanding */ 3642 return 0; 3643 } 3644 3645 /** 3646 * lpfc_issue_els_rdf - Register for diagnostic functions from the fabric. 3647 * @vport: pointer to a host virtual N_Port data structure. 3648 * @retry: retry counter for the command IOCB. 3649 * 3650 * This routine issues an ELS RDF to the Fabric Controller to register 3651 * for diagnostic functions. 3652 * 3653 * Note that the ndlp reference count will be incremented by 1 for holding the 3654 * ndlp and the reference to ndlp will be stored into the context1 field of 3655 * the IOCB for the completion callback function to the RDF ELS command. 3656 * 3657 * Return code 3658 * 0 - Successfully issued rdf command 3659 * 1 - Failed to issue rdf command 3660 **/ 3661 int 3662 lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry) 3663 { 3664 struct lpfc_hba *phba = vport->phba; 3665 struct lpfc_iocbq *elsiocb; 3666 struct lpfc_els_rdf_req *prdf; 3667 struct lpfc_nodelist *ndlp; 3668 uint16_t cmdsize; 3669 int rc; 3670 3671 cmdsize = sizeof(*prdf); 3672 3673 ndlp = lpfc_findnode_did(vport, Fabric_Cntl_DID); 3674 if (!ndlp) { 3675 ndlp = lpfc_nlp_init(vport, Fabric_Cntl_DID); 3676 if (!ndlp) 3677 return -ENODEV; 3678 lpfc_enqueue_node(vport, ndlp); 3679 } 3680 3681 /* RDF ELS is not required on an NPIV VN_Port. */ 3682 if (vport->port_type == LPFC_NPIV_PORT) 3683 return -EACCES; 3684 3685 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3686 ndlp->nlp_DID, ELS_CMD_RDF); 3687 if (!elsiocb) 3688 return -ENOMEM; 3689 3690 /* Configure the payload for the supported FPIN events. */ 3691 prdf = (struct lpfc_els_rdf_req *) 3692 (((struct lpfc_dmabuf *)elsiocb->context2)->virt); 3693 memset(prdf, 0, cmdsize); 3694 prdf->rdf.fpin_cmd = ELS_RDF; 3695 prdf->rdf.desc_len = cpu_to_be32(sizeof(struct lpfc_els_rdf_req) - 3696 sizeof(struct fc_els_rdf)); 3697 prdf->reg_d1.reg_desc.desc_tag = cpu_to_be32(ELS_DTAG_FPIN_REGISTER); 3698 prdf->reg_d1.reg_desc.desc_len = cpu_to_be32( 3699 FC_TLV_DESC_LENGTH_FROM_SZ(prdf->reg_d1)); 3700 prdf->reg_d1.reg_desc.count = cpu_to_be32(ELS_RDF_REG_TAG_CNT); 3701 prdf->reg_d1.desc_tags[0] = cpu_to_be32(ELS_DTAG_LNK_INTEGRITY); 3702 prdf->reg_d1.desc_tags[1] = cpu_to_be32(ELS_DTAG_DELIVERY); 3703 prdf->reg_d1.desc_tags[2] = cpu_to_be32(ELS_DTAG_PEER_CONGEST); 3704 prdf->reg_d1.desc_tags[3] = cpu_to_be32(ELS_DTAG_CONGESTION); 3705 3706 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 3707 "6444 Xmit RDF to remote NPORT x%x Reg: %x %x\n", 3708 ndlp->nlp_DID, phba->cgn_reg_signal, 3709 phba->cgn_reg_fpin); 3710 3711 phba->cgn_fpin_frequency = LPFC_FPIN_INIT_FREQ; 3712 elsiocb->iocb_cmpl = lpfc_cmpl_els_disc_cmd; 3713 elsiocb->context1 = lpfc_nlp_get(ndlp); 3714 if (!elsiocb->context1) { 3715 lpfc_els_free_iocb(phba, elsiocb); 3716 return -EIO; 3717 } 3718 3719 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3720 "Issue RDF: did:x%x refcnt %d", 3721 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 3722 3723 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3724 if (rc == IOCB_ERROR) { 3725 lpfc_els_free_iocb(phba, elsiocb); 3726 lpfc_nlp_put(ndlp); 3727 return -EIO; 3728 } 3729 return 0; 3730 } 3731 3732 /** 3733 * lpfc_els_rcv_rdf - Receive RDF ELS request from the fabric. 3734 * @vport: pointer to a host virtual N_Port data structure. 3735 * @cmdiocb: pointer to lpfc command iocb data structure. 3736 * @ndlp: pointer to a node-list data structure. 3737 * 3738 * A received RDF implies a possible change to fabric supported diagnostic 3739 * functions. This routine sends LS_ACC and then has the Nx_Port issue a new 3740 * RDF request to reregister for supported diagnostic functions. 3741 * 3742 * Return code 3743 * 0 - Success 3744 * -EIO - Failed to process received RDF 3745 **/ 3746 static int 3747 lpfc_els_rcv_rdf(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 3748 struct lpfc_nodelist *ndlp) 3749 { 3750 /* Send LS_ACC */ 3751 if (lpfc_els_rsp_acc(vport, ELS_CMD_RDF, cmdiocb, ndlp, NULL)) { 3752 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 3753 "1623 Failed to RDF_ACC from x%x for x%x\n", 3754 ndlp->nlp_DID, vport->fc_myDID); 3755 return -EIO; 3756 } 3757 3758 /* Issue new RDF for reregistering */ 3759 if (lpfc_issue_els_rdf(vport, 0)) { 3760 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 3761 "2623 Failed to re register RDF for x%x\n", 3762 vport->fc_myDID); 3763 return -EIO; 3764 } 3765 3766 return 0; 3767 } 3768 3769 /** 3770 * lpfc_least_capable_settings - helper function for EDC rsp processing 3771 * @phba: pointer to lpfc hba data structure. 3772 * @pcgd: pointer to congestion detection descriptor in EDC rsp. 3773 * 3774 * This helper routine determines the least capable setting for 3775 * congestion signals, signal freq, including scale, from the 3776 * congestion detection descriptor in the EDC rsp. The routine 3777 * sets @phba values in preparation for a set_featues mailbox. 3778 **/ 3779 static void 3780 lpfc_least_capable_settings(struct lpfc_hba *phba, 3781 struct fc_diag_cg_sig_desc *pcgd) 3782 { 3783 u32 rsp_sig_cap = 0, drv_sig_cap = 0; 3784 u32 rsp_sig_freq_cyc = 0, rsp_sig_freq_scale = 0; 3785 struct lpfc_cgn_info *cp; 3786 u32 crc; 3787 u16 sig_freq; 3788 3789 /* Get rsp signal and frequency capabilities. */ 3790 rsp_sig_cap = be32_to_cpu(pcgd->xmt_signal_capability); 3791 rsp_sig_freq_cyc = be16_to_cpu(pcgd->xmt_signal_frequency.count); 3792 rsp_sig_freq_scale = be16_to_cpu(pcgd->xmt_signal_frequency.units); 3793 3794 /* If the Fport does not support signals. Set FPIN only */ 3795 if (rsp_sig_cap == EDC_CG_SIG_NOTSUPPORTED) 3796 goto out_no_support; 3797 3798 /* Apply the xmt scale to the xmt cycle to get the correct frequency. 3799 * Adapter default is 100 millisSeconds. Convert all xmt cycle values 3800 * to milliSeconds. 3801 */ 3802 switch (rsp_sig_freq_scale) { 3803 case EDC_CG_SIGFREQ_SEC: 3804 rsp_sig_freq_cyc *= MSEC_PER_SEC; 3805 break; 3806 case EDC_CG_SIGFREQ_MSEC: 3807 rsp_sig_freq_cyc = 1; 3808 break; 3809 default: 3810 goto out_no_support; 3811 } 3812 3813 /* Convenient shorthand. */ 3814 drv_sig_cap = phba->cgn_reg_signal; 3815 3816 /* Choose the least capable frequency. */ 3817 if (rsp_sig_freq_cyc > phba->cgn_sig_freq) 3818 phba->cgn_sig_freq = rsp_sig_freq_cyc; 3819 3820 /* Should be some common signals support. Settle on least capable 3821 * signal and adjust FPIN values. Initialize defaults to ease the 3822 * decision. 3823 */ 3824 phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM; 3825 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; 3826 if (rsp_sig_cap == EDC_CG_SIG_WARN_ONLY && 3827 (drv_sig_cap == EDC_CG_SIG_WARN_ONLY || 3828 drv_sig_cap == EDC_CG_SIG_WARN_ALARM)) { 3829 phba->cgn_reg_signal = EDC_CG_SIG_WARN_ONLY; 3830 phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN; 3831 } 3832 if (rsp_sig_cap == EDC_CG_SIG_WARN_ALARM) { 3833 if (drv_sig_cap == EDC_CG_SIG_WARN_ALARM) { 3834 phba->cgn_reg_signal = EDC_CG_SIG_WARN_ALARM; 3835 phba->cgn_reg_fpin = LPFC_CGN_FPIN_NONE; 3836 } 3837 if (drv_sig_cap == EDC_CG_SIG_WARN_ONLY) { 3838 phba->cgn_reg_signal = EDC_CG_SIG_WARN_ONLY; 3839 phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN; 3840 } 3841 } 3842 3843 if (!phba->cgn_i) 3844 return; 3845 3846 /* Update signal frequency in congestion info buffer */ 3847 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; 3848 3849 /* Frequency (in ms) Signal Warning/Signal Congestion Notifications 3850 * are received by the HBA 3851 */ 3852 sig_freq = phba->cgn_sig_freq; 3853 3854 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY) 3855 cp->cgn_warn_freq = cpu_to_le16(sig_freq); 3856 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) { 3857 cp->cgn_alarm_freq = cpu_to_le16(sig_freq); 3858 cp->cgn_warn_freq = cpu_to_le16(sig_freq); 3859 } 3860 crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED); 3861 cp->cgn_info_crc = cpu_to_le32(crc); 3862 return; 3863 3864 out_no_support: 3865 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; 3866 phba->cgn_sig_freq = 0; 3867 phba->cgn_reg_fpin = LPFC_CGN_FPIN_ALARM | LPFC_CGN_FPIN_WARN; 3868 } 3869 3870 DECLARE_ENUM2STR_LOOKUP(lpfc_get_tlv_dtag_nm, fc_ls_tlv_dtag, 3871 FC_LS_TLV_DTAG_INIT); 3872 3873 /** 3874 * lpfc_cmpl_els_edc - Completion callback function for EDC 3875 * @phba: pointer to lpfc hba data structure. 3876 * @cmdiocb: pointer to lpfc command iocb data structure. 3877 * @rspiocb: pointer to lpfc response iocb data structure. 3878 * 3879 * This routine is the completion callback function for issuing the Exchange 3880 * Diagnostic Capabilities (EDC) command. The driver issues an EDC to 3881 * notify the FPort of its Congestion and Link Fault capabilities. This 3882 * routine parses the FPort's response and decides on the least common 3883 * values applicable to both FPort and NPort for Warnings and Alarms that 3884 * are communicated via hardware signals. 3885 **/ 3886 static void 3887 lpfc_cmpl_els_edc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 3888 struct lpfc_iocbq *rspiocb) 3889 { 3890 IOCB_t *irsp; 3891 struct fc_els_edc_resp *edc_rsp; 3892 struct fc_tlv_desc *tlv; 3893 struct fc_diag_cg_sig_desc *pcgd; 3894 struct fc_diag_lnkflt_desc *plnkflt; 3895 struct lpfc_dmabuf *pcmd, *prsp; 3896 const char *dtag_nm; 3897 u32 *pdata, dtag; 3898 int desc_cnt = 0, bytes_remain; 3899 bool rcv_cap_desc = false; 3900 struct lpfc_nodelist *ndlp; 3901 3902 irsp = &rspiocb->iocb; 3903 ndlp = cmdiocb->context1; 3904 3905 lpfc_debugfs_disc_trc(phba->pport, LPFC_DISC_TRC_ELS_CMD, 3906 "EDC cmpl: status:x%x/x%x did:x%x", 3907 irsp->ulpStatus, irsp->un.ulpWord[4], 3908 irsp->un.elsreq64.remoteID); 3909 3910 /* ELS cmd tag <ulpIoTag> completes */ 3911 lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 3912 "4201 EDC cmd tag x%x completes Data: x%x x%x x%x\n", 3913 irsp->ulpIoTag, irsp->ulpStatus, 3914 irsp->un.ulpWord[4], irsp->ulpTimeout); 3915 3916 pcmd = (struct lpfc_dmabuf *)cmdiocb->context2; 3917 if (!pcmd) 3918 goto out; 3919 3920 pdata = (u32 *)pcmd->virt; 3921 if (!pdata) 3922 goto out; 3923 3924 /* Need to clear signal values, send features MB and RDF with FPIN. */ 3925 if (irsp->ulpStatus) 3926 goto out; 3927 3928 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 3929 if (!prsp) 3930 goto out; 3931 3932 edc_rsp = prsp->virt; 3933 if (!edc_rsp) 3934 goto out; 3935 3936 /* ELS cmd tag <ulpIoTag> completes */ 3937 lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 3938 "4676 Fabric EDC Rsp: " 3939 "0x%02x, 0x%08x\n", 3940 edc_rsp->acc_hdr.la_cmd, 3941 be32_to_cpu(edc_rsp->desc_list_len)); 3942 3943 /* 3944 * Payload length in bytes is the response descriptor list 3945 * length minus the 12 bytes of Link Service Request 3946 * Information descriptor in the reply. 3947 */ 3948 bytes_remain = be32_to_cpu(edc_rsp->desc_list_len) - 3949 sizeof(struct fc_els_lsri_desc); 3950 if (bytes_remain <= 0) 3951 goto out; 3952 3953 tlv = edc_rsp->desc; 3954 3955 /* 3956 * cycle through EDC diagnostic descriptors to find the 3957 * congestion signaling capability descriptor 3958 */ 3959 while (bytes_remain) { 3960 if (bytes_remain < FC_TLV_DESC_HDR_SZ) { 3961 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 3962 "6461 Truncated TLV hdr on " 3963 "Diagnostic descriptor[%d]\n", 3964 desc_cnt); 3965 goto out; 3966 } 3967 3968 dtag = be32_to_cpu(tlv->desc_tag); 3969 switch (dtag) { 3970 case ELS_DTAG_LNK_FAULT_CAP: 3971 if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) || 3972 FC_TLV_DESC_SZ_FROM_LENGTH(tlv) != 3973 sizeof(struct fc_diag_lnkflt_desc)) { 3974 lpfc_printf_log( 3975 phba, KERN_WARNING, LOG_CGN_MGMT, 3976 "6462 Truncated Link Fault Diagnostic " 3977 "descriptor[%d]: %d vs 0x%zx 0x%zx\n", 3978 desc_cnt, bytes_remain, 3979 FC_TLV_DESC_SZ_FROM_LENGTH(tlv), 3980 sizeof(struct fc_diag_cg_sig_desc)); 3981 goto out; 3982 } 3983 plnkflt = (struct fc_diag_lnkflt_desc *)tlv; 3984 lpfc_printf_log( 3985 phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 3986 "4617 Link Fault Desc Data: 0x%08x 0x%08x " 3987 "0x%08x 0x%08x 0x%08x\n", 3988 be32_to_cpu(plnkflt->desc_tag), 3989 be32_to_cpu(plnkflt->desc_len), 3990 be32_to_cpu( 3991 plnkflt->degrade_activate_threshold), 3992 be32_to_cpu( 3993 plnkflt->degrade_deactivate_threshold), 3994 be32_to_cpu(plnkflt->fec_degrade_interval)); 3995 break; 3996 case ELS_DTAG_CG_SIGNAL_CAP: 3997 if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) || 3998 FC_TLV_DESC_SZ_FROM_LENGTH(tlv) != 3999 sizeof(struct fc_diag_cg_sig_desc)) { 4000 lpfc_printf_log( 4001 phba, KERN_WARNING, LOG_CGN_MGMT, 4002 "6463 Truncated Cgn Signal Diagnostic " 4003 "descriptor[%d]: %d vs 0x%zx 0x%zx\n", 4004 desc_cnt, bytes_remain, 4005 FC_TLV_DESC_SZ_FROM_LENGTH(tlv), 4006 sizeof(struct fc_diag_cg_sig_desc)); 4007 goto out; 4008 } 4009 4010 pcgd = (struct fc_diag_cg_sig_desc *)tlv; 4011 lpfc_printf_log( 4012 phba, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 4013 "4616 CGN Desc Data: 0x%08x 0x%08x " 4014 "0x%08x 0x%04x 0x%04x 0x%08x 0x%04x 0x%04x\n", 4015 be32_to_cpu(pcgd->desc_tag), 4016 be32_to_cpu(pcgd->desc_len), 4017 be32_to_cpu(pcgd->xmt_signal_capability), 4018 be32_to_cpu(pcgd->xmt_signal_frequency.count), 4019 be32_to_cpu(pcgd->xmt_signal_frequency.units), 4020 be32_to_cpu(pcgd->rcv_signal_capability), 4021 be32_to_cpu(pcgd->rcv_signal_frequency.count), 4022 be32_to_cpu(pcgd->rcv_signal_frequency.units)); 4023 4024 /* Compare driver and Fport capabilities and choose 4025 * least common. 4026 */ 4027 lpfc_least_capable_settings(phba, pcgd); 4028 rcv_cap_desc = true; 4029 break; 4030 default: 4031 dtag_nm = lpfc_get_tlv_dtag_nm(dtag); 4032 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 4033 "4919 unknown Diagnostic " 4034 "Descriptor[%d]: tag x%x (%s)\n", 4035 desc_cnt, dtag, dtag_nm); 4036 } 4037 4038 bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv); 4039 tlv = fc_tlv_next_desc(tlv); 4040 desc_cnt++; 4041 } 4042 4043 out: 4044 if (!rcv_cap_desc) { 4045 phba->cgn_reg_fpin = LPFC_CGN_FPIN_ALARM | LPFC_CGN_FPIN_WARN; 4046 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; 4047 phba->cgn_sig_freq = 0; 4048 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_CGN_MGMT, 4049 "4202 EDC rsp error - sending RDF " 4050 "for FPIN only.\n"); 4051 } 4052 4053 lpfc_config_cgn_signal(phba); 4054 4055 /* Check to see if link went down during discovery */ 4056 lpfc_els_chk_latt(phba->pport); 4057 lpfc_debugfs_disc_trc(phba->pport, LPFC_DISC_TRC_ELS_CMD, 4058 "EDC Cmpl: did:x%x refcnt %d", 4059 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 4060 lpfc_els_free_iocb(phba, cmdiocb); 4061 lpfc_nlp_put(ndlp); 4062 } 4063 4064 static void 4065 lpfc_format_edc_cgn_desc(struct lpfc_hba *phba, struct fc_diag_cg_sig_desc *cgd) 4066 { 4067 /* We are assuming cgd was zero'ed before calling this routine */ 4068 4069 /* Configure the congestion detection capability */ 4070 cgd->desc_tag = cpu_to_be32(ELS_DTAG_CG_SIGNAL_CAP); 4071 4072 /* Descriptor len doesn't include the tag or len fields. */ 4073 cgd->desc_len = cpu_to_be32( 4074 FC_TLV_DESC_LENGTH_FROM_SZ(struct fc_diag_cg_sig_desc)); 4075 4076 /* xmt_signal_capability already set to EDC_CG_SIG_NOTSUPPORTED. 4077 * xmt_signal_frequency.count already set to 0. 4078 * xmt_signal_frequency.units already set to 0. 4079 */ 4080 4081 if (phba->cmf_active_mode == LPFC_CFG_OFF) { 4082 /* rcv_signal_capability already set to EDC_CG_SIG_NOTSUPPORTED. 4083 * rcv_signal_frequency.count already set to 0. 4084 * rcv_signal_frequency.units already set to 0. 4085 */ 4086 phba->cgn_sig_freq = 0; 4087 return; 4088 } 4089 switch (phba->cgn_reg_signal) { 4090 case EDC_CG_SIG_WARN_ONLY: 4091 cgd->rcv_signal_capability = cpu_to_be32(EDC_CG_SIG_WARN_ONLY); 4092 break; 4093 case EDC_CG_SIG_WARN_ALARM: 4094 cgd->rcv_signal_capability = cpu_to_be32(EDC_CG_SIG_WARN_ALARM); 4095 break; 4096 default: 4097 /* rcv_signal_capability left 0 thus no support */ 4098 break; 4099 } 4100 4101 /* We start negotiation with lpfc_fabric_cgn_frequency, after 4102 * the completion we settle on the higher frequency. 4103 */ 4104 cgd->rcv_signal_frequency.count = 4105 cpu_to_be16(lpfc_fabric_cgn_frequency); 4106 cgd->rcv_signal_frequency.units = 4107 cpu_to_be16(EDC_CG_SIGFREQ_MSEC); 4108 } 4109 4110 /** 4111 * lpfc_issue_els_edc - Exchange Diagnostic Capabilities with the fabric. 4112 * @vport: pointer to a host virtual N_Port data structure. 4113 * @retry: retry counter for the command iocb. 4114 * 4115 * This routine issues an ELS EDC to the F-Port Controller to communicate 4116 * this N_Port's support of hardware signals in its Congestion 4117 * Capabilities Descriptor. 4118 * 4119 * Note: This routine does not check if one or more signals are 4120 * set in the cgn_reg_signal parameter. The caller makes the 4121 * decision to enforce cgn_reg_signal as nonzero or zero depending 4122 * on the conditions. During Fabric requests, the driver 4123 * requires cgn_reg_signals to be nonzero. But a dynamic request 4124 * to set the congestion mode to OFF from Monitor or Manage 4125 * would correctly issue an EDC with no signals enabled to 4126 * turn off switch functionality and then update the FW. 4127 * 4128 * Return code 4129 * 0 - Successfully issued edc command 4130 * 1 - Failed to issue edc command 4131 **/ 4132 int 4133 lpfc_issue_els_edc(struct lpfc_vport *vport, uint8_t retry) 4134 { 4135 struct lpfc_hba *phba = vport->phba; 4136 struct lpfc_iocbq *elsiocb; 4137 struct lpfc_els_edc_req *edc_req; 4138 struct fc_diag_cg_sig_desc *cgn_desc; 4139 u16 cmdsize; 4140 struct lpfc_nodelist *ndlp; 4141 u8 *pcmd = NULL; 4142 u32 edc_req_size, cgn_desc_size; 4143 int rc; 4144 4145 if (vport->port_type == LPFC_NPIV_PORT) 4146 return -EACCES; 4147 4148 ndlp = lpfc_findnode_did(vport, Fabric_DID); 4149 if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) 4150 return -ENODEV; 4151 4152 /* If HBA doesn't support signals, drop into RDF */ 4153 if (!phba->cgn_init_reg_signal) 4154 goto try_rdf; 4155 4156 edc_req_size = sizeof(struct fc_els_edc); 4157 cgn_desc_size = sizeof(struct fc_diag_cg_sig_desc); 4158 cmdsize = edc_req_size + cgn_desc_size; 4159 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 4160 ndlp->nlp_DID, ELS_CMD_EDC); 4161 if (!elsiocb) 4162 goto try_rdf; 4163 4164 /* Configure the payload for the supported Diagnostics capabilities. */ 4165 pcmd = (u8 *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt); 4166 memset(pcmd, 0, cmdsize); 4167 edc_req = (struct lpfc_els_edc_req *)pcmd; 4168 edc_req->edc.desc_len = cpu_to_be32(cgn_desc_size); 4169 edc_req->edc.edc_cmd = ELS_EDC; 4170 4171 cgn_desc = &edc_req->cgn_desc; 4172 4173 lpfc_format_edc_cgn_desc(phba, cgn_desc); 4174 4175 phba->cgn_sig_freq = lpfc_fabric_cgn_frequency; 4176 4177 lpfc_printf_vlog(vport, KERN_INFO, LOG_CGN_MGMT, 4178 "4623 Xmit EDC to remote " 4179 "NPORT x%x reg_sig x%x reg_fpin:x%x\n", 4180 ndlp->nlp_DID, phba->cgn_reg_signal, 4181 phba->cgn_reg_fpin); 4182 4183 elsiocb->iocb_cmpl = lpfc_cmpl_els_disc_cmd; 4184 elsiocb->context1 = lpfc_nlp_get(ndlp); 4185 if (!elsiocb->context1) { 4186 lpfc_els_free_iocb(phba, elsiocb); 4187 return -EIO; 4188 } 4189 4190 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 4191 "Issue EDC: did:x%x refcnt %d", 4192 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 4193 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 4194 if (rc == IOCB_ERROR) { 4195 /* The additional lpfc_nlp_put will cause the following 4196 * lpfc_els_free_iocb routine to trigger the rlease of 4197 * the node. 4198 */ 4199 lpfc_els_free_iocb(phba, elsiocb); 4200 lpfc_nlp_put(ndlp); 4201 goto try_rdf; 4202 } 4203 return 0; 4204 try_rdf: 4205 phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM; 4206 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; 4207 rc = lpfc_issue_els_rdf(vport, 0); 4208 return rc; 4209 } 4210 4211 /** 4212 * lpfc_cancel_retry_delay_tmo - Cancel the timer with delayed iocb-cmd retry 4213 * @vport: pointer to a host virtual N_Port data structure. 4214 * @nlp: pointer to a node-list data structure. 4215 * 4216 * This routine cancels the timer with a delayed IOCB-command retry for 4217 * a @vport's @ndlp. It stops the timer for the delayed function retrial and 4218 * removes the ELS retry event if it presents. In addition, if the 4219 * NLP_NPR_2B_DISC bit is set in the @nlp's nlp_flag bitmap, ADISC IOCB 4220 * commands are sent for the @vport's nodes that require issuing discovery 4221 * ADISC. 4222 **/ 4223 void 4224 lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp) 4225 { 4226 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4227 struct lpfc_work_evt *evtp; 4228 4229 if (!(nlp->nlp_flag & NLP_DELAY_TMO)) 4230 return; 4231 spin_lock_irq(&nlp->lock); 4232 nlp->nlp_flag &= ~NLP_DELAY_TMO; 4233 spin_unlock_irq(&nlp->lock); 4234 del_timer_sync(&nlp->nlp_delayfunc); 4235 nlp->nlp_last_elscmd = 0; 4236 if (!list_empty(&nlp->els_retry_evt.evt_listp)) { 4237 list_del_init(&nlp->els_retry_evt.evt_listp); 4238 /* Decrement nlp reference count held for the delayed retry */ 4239 evtp = &nlp->els_retry_evt; 4240 lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1); 4241 } 4242 if (nlp->nlp_flag & NLP_NPR_2B_DISC) { 4243 spin_lock_irq(&nlp->lock); 4244 nlp->nlp_flag &= ~NLP_NPR_2B_DISC; 4245 spin_unlock_irq(&nlp->lock); 4246 if (vport->num_disc_nodes) { 4247 if (vport->port_state < LPFC_VPORT_READY) { 4248 /* Check if there are more ADISCs to be sent */ 4249 lpfc_more_adisc(vport); 4250 } else { 4251 /* Check if there are more PLOGIs to be sent */ 4252 lpfc_more_plogi(vport); 4253 if (vport->num_disc_nodes == 0) { 4254 spin_lock_irq(shost->host_lock); 4255 vport->fc_flag &= ~FC_NDISC_ACTIVE; 4256 spin_unlock_irq(shost->host_lock); 4257 lpfc_can_disctmo(vport); 4258 lpfc_end_rscn(vport); 4259 } 4260 } 4261 } 4262 } 4263 return; 4264 } 4265 4266 /** 4267 * lpfc_els_retry_delay - Timer function with a ndlp delayed function timer 4268 * @t: pointer to the timer function associated data (ndlp). 4269 * 4270 * This routine is invoked by the ndlp delayed-function timer to check 4271 * whether there is any pending ELS retry event(s) with the node. If not, it 4272 * simply returns. Otherwise, if there is at least one ELS delayed event, it 4273 * adds the delayed events to the HBA work list and invokes the 4274 * lpfc_worker_wake_up() routine to wake up worker thread to process the 4275 * event. Note that lpfc_nlp_get() is called before posting the event to 4276 * the work list to hold reference count of ndlp so that it guarantees the 4277 * reference to ndlp will still be available when the worker thread gets 4278 * to the event associated with the ndlp. 4279 **/ 4280 void 4281 lpfc_els_retry_delay(struct timer_list *t) 4282 { 4283 struct lpfc_nodelist *ndlp = from_timer(ndlp, t, nlp_delayfunc); 4284 struct lpfc_vport *vport = ndlp->vport; 4285 struct lpfc_hba *phba = vport->phba; 4286 unsigned long flags; 4287 struct lpfc_work_evt *evtp = &ndlp->els_retry_evt; 4288 4289 spin_lock_irqsave(&phba->hbalock, flags); 4290 if (!list_empty(&evtp->evt_listp)) { 4291 spin_unlock_irqrestore(&phba->hbalock, flags); 4292 return; 4293 } 4294 4295 /* We need to hold the node by incrementing the reference 4296 * count until the queued work is done 4297 */ 4298 evtp->evt_arg1 = lpfc_nlp_get(ndlp); 4299 if (evtp->evt_arg1) { 4300 evtp->evt = LPFC_EVT_ELS_RETRY; 4301 list_add_tail(&evtp->evt_listp, &phba->work_list); 4302 lpfc_worker_wake_up(phba); 4303 } 4304 spin_unlock_irqrestore(&phba->hbalock, flags); 4305 return; 4306 } 4307 4308 /** 4309 * lpfc_els_retry_delay_handler - Work thread handler for ndlp delayed function 4310 * @ndlp: pointer to a node-list data structure. 4311 * 4312 * This routine is the worker-thread handler for processing the @ndlp delayed 4313 * event(s), posted by the lpfc_els_retry_delay() routine. It simply retrieves 4314 * the last ELS command from the associated ndlp and invokes the proper ELS 4315 * function according to the delayed ELS command to retry the command. 4316 **/ 4317 void 4318 lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp) 4319 { 4320 struct lpfc_vport *vport = ndlp->vport; 4321 uint32_t cmd, retry; 4322 4323 spin_lock_irq(&ndlp->lock); 4324 cmd = ndlp->nlp_last_elscmd; 4325 ndlp->nlp_last_elscmd = 0; 4326 4327 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { 4328 spin_unlock_irq(&ndlp->lock); 4329 return; 4330 } 4331 4332 ndlp->nlp_flag &= ~NLP_DELAY_TMO; 4333 spin_unlock_irq(&ndlp->lock); 4334 /* 4335 * If a discovery event readded nlp_delayfunc after timer 4336 * firing and before processing the timer, cancel the 4337 * nlp_delayfunc. 4338 */ 4339 del_timer_sync(&ndlp->nlp_delayfunc); 4340 retry = ndlp->nlp_retry; 4341 ndlp->nlp_retry = 0; 4342 4343 switch (cmd) { 4344 case ELS_CMD_FLOGI: 4345 lpfc_issue_els_flogi(vport, ndlp, retry); 4346 break; 4347 case ELS_CMD_PLOGI: 4348 if (!lpfc_issue_els_plogi(vport, ndlp->nlp_DID, retry)) { 4349 ndlp->nlp_prev_state = ndlp->nlp_state; 4350 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 4351 } 4352 break; 4353 case ELS_CMD_ADISC: 4354 if (!lpfc_issue_els_adisc(vport, ndlp, retry)) { 4355 ndlp->nlp_prev_state = ndlp->nlp_state; 4356 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 4357 } 4358 break; 4359 case ELS_CMD_PRLI: 4360 case ELS_CMD_NVMEPRLI: 4361 if (!lpfc_issue_els_prli(vport, ndlp, retry)) { 4362 ndlp->nlp_prev_state = ndlp->nlp_state; 4363 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); 4364 } 4365 break; 4366 case ELS_CMD_LOGO: 4367 if (!lpfc_issue_els_logo(vport, ndlp, retry)) { 4368 ndlp->nlp_prev_state = ndlp->nlp_state; 4369 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE); 4370 } 4371 break; 4372 case ELS_CMD_FDISC: 4373 if (!(vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI)) 4374 lpfc_issue_els_fdisc(vport, ndlp, retry); 4375 break; 4376 } 4377 return; 4378 } 4379 4380 /** 4381 * lpfc_link_reset - Issue link reset 4382 * @vport: pointer to a virtual N_Port data structure. 4383 * 4384 * This routine performs link reset by sending INIT_LINK mailbox command. 4385 * For SLI-3 adapter, link attention interrupt is enabled before issuing 4386 * INIT_LINK mailbox command. 4387 * 4388 * Return code 4389 * 0 - Link reset initiated successfully 4390 * 1 - Failed to initiate link reset 4391 **/ 4392 int 4393 lpfc_link_reset(struct lpfc_vport *vport) 4394 { 4395 struct lpfc_hba *phba = vport->phba; 4396 LPFC_MBOXQ_t *mbox; 4397 uint32_t control; 4398 int rc; 4399 4400 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 4401 "2851 Attempt link reset\n"); 4402 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4403 if (!mbox) { 4404 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4405 "2852 Failed to allocate mbox memory"); 4406 return 1; 4407 } 4408 4409 /* Enable Link attention interrupts */ 4410 if (phba->sli_rev <= LPFC_SLI_REV3) { 4411 spin_lock_irq(&phba->hbalock); 4412 phba->sli.sli_flag |= LPFC_PROCESS_LA; 4413 control = readl(phba->HCregaddr); 4414 control |= HC_LAINT_ENA; 4415 writel(control, phba->HCregaddr); 4416 readl(phba->HCregaddr); /* flush */ 4417 spin_unlock_irq(&phba->hbalock); 4418 } 4419 4420 lpfc_init_link(phba, mbox, phba->cfg_topology, 4421 phba->cfg_link_speed); 4422 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 4423 mbox->vport = vport; 4424 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 4425 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 4426 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4427 "2853 Failed to issue INIT_LINK " 4428 "mbox command, rc:x%x\n", rc); 4429 mempool_free(mbox, phba->mbox_mem_pool); 4430 return 1; 4431 } 4432 4433 return 0; 4434 } 4435 4436 /** 4437 * lpfc_els_retry - Make retry decision on an els command iocb 4438 * @phba: pointer to lpfc hba data structure. 4439 * @cmdiocb: pointer to lpfc command iocb data structure. 4440 * @rspiocb: pointer to lpfc response iocb data structure. 4441 * 4442 * This routine makes a retry decision on an ELS command IOCB, which has 4443 * failed. The following ELS IOCBs use this function for retrying the command 4444 * when previously issued command responsed with error status: FLOGI, PLOGI, 4445 * PRLI, ADISC and FDISC. Based on the ELS command type and the 4446 * returned error status, it makes the decision whether a retry shall be 4447 * issued for the command, and whether a retry shall be made immediately or 4448 * delayed. In the former case, the corresponding ELS command issuing-function 4449 * is called to retry the command. In the later case, the ELS command shall 4450 * be posted to the ndlp delayed event and delayed function timer set to the 4451 * ndlp for the delayed command issusing. 4452 * 4453 * Return code 4454 * 0 - No retry of els command is made 4455 * 1 - Immediate or delayed retry of els command is made 4456 **/ 4457 static int 4458 lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 4459 struct lpfc_iocbq *rspiocb) 4460 { 4461 struct lpfc_vport *vport = cmdiocb->vport; 4462 IOCB_t *irsp = &rspiocb->iocb; 4463 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 4464 struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 4465 uint32_t *elscmd; 4466 struct ls_rjt stat; 4467 int retry = 0, maxretry = lpfc_max_els_tries, delay = 0; 4468 int logerr = 0; 4469 uint32_t cmd = 0; 4470 uint32_t did; 4471 int link_reset = 0, rc; 4472 4473 4474 /* Note: context2 may be 0 for internal driver abort 4475 * of delays ELS command. 4476 */ 4477 4478 if (pcmd && pcmd->virt) { 4479 elscmd = (uint32_t *) (pcmd->virt); 4480 cmd = *elscmd++; 4481 } 4482 4483 if (ndlp) 4484 did = ndlp->nlp_DID; 4485 else { 4486 /* We should only hit this case for retrying PLOGI */ 4487 did = irsp->un.elsreq64.remoteID; 4488 ndlp = lpfc_findnode_did(vport, did); 4489 if (!ndlp && (cmd != ELS_CMD_PLOGI)) 4490 return 0; 4491 } 4492 4493 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 4494 "Retry ELS: wd7:x%x wd4:x%x did:x%x", 4495 *(((uint32_t *)irsp) + 7), irsp->un.ulpWord[4], did); 4496 4497 switch (irsp->ulpStatus) { 4498 case IOSTAT_FCP_RSP_ERROR: 4499 break; 4500 case IOSTAT_REMOTE_STOP: 4501 if (phba->sli_rev == LPFC_SLI_REV4) { 4502 /* This IO was aborted by the target, we don't 4503 * know the rxid and because we did not send the 4504 * ABTS we cannot generate and RRQ. 4505 */ 4506 lpfc_set_rrq_active(phba, ndlp, 4507 cmdiocb->sli4_lxritag, 0, 0); 4508 } 4509 break; 4510 case IOSTAT_LOCAL_REJECT: 4511 switch ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK)) { 4512 case IOERR_LOOP_OPEN_FAILURE: 4513 if (cmd == ELS_CMD_FLOGI) { 4514 if (PCI_DEVICE_ID_HORNET == 4515 phba->pcidev->device) { 4516 phba->fc_topology = LPFC_TOPOLOGY_LOOP; 4517 phba->pport->fc_myDID = 0; 4518 phba->alpa_map[0] = 0; 4519 phba->alpa_map[1] = 0; 4520 } 4521 } 4522 if (cmd == ELS_CMD_PLOGI && cmdiocb->retry == 0) 4523 delay = 1000; 4524 retry = 1; 4525 break; 4526 4527 case IOERR_ILLEGAL_COMMAND: 4528 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 4529 "0124 Retry illegal cmd x%x " 4530 "retry:x%x delay:x%x\n", 4531 cmd, cmdiocb->retry, delay); 4532 retry = 1; 4533 /* All command's retry policy */ 4534 maxretry = 8; 4535 if (cmdiocb->retry > 2) 4536 delay = 1000; 4537 break; 4538 4539 case IOERR_NO_RESOURCES: 4540 logerr = 1; /* HBA out of resources */ 4541 retry = 1; 4542 if (cmdiocb->retry > 100) 4543 delay = 100; 4544 maxretry = 250; 4545 break; 4546 4547 case IOERR_ILLEGAL_FRAME: 4548 delay = 100; 4549 retry = 1; 4550 break; 4551 4552 case IOERR_INVALID_RPI: 4553 if (cmd == ELS_CMD_PLOGI && 4554 did == NameServer_DID) { 4555 /* Continue forever if plogi to */ 4556 /* the nameserver fails */ 4557 maxretry = 0; 4558 delay = 100; 4559 } 4560 retry = 1; 4561 break; 4562 4563 case IOERR_SEQUENCE_TIMEOUT: 4564 if (cmd == ELS_CMD_PLOGI && 4565 did == NameServer_DID && 4566 (cmdiocb->retry + 1) == maxretry) { 4567 /* Reset the Link */ 4568 link_reset = 1; 4569 break; 4570 } 4571 retry = 1; 4572 delay = 100; 4573 break; 4574 } 4575 break; 4576 4577 case IOSTAT_NPORT_RJT: 4578 case IOSTAT_FABRIC_RJT: 4579 if (irsp->un.ulpWord[4] & RJT_UNAVAIL_TEMP) { 4580 retry = 1; 4581 break; 4582 } 4583 break; 4584 4585 case IOSTAT_NPORT_BSY: 4586 case IOSTAT_FABRIC_BSY: 4587 logerr = 1; /* Fabric / Remote NPort out of resources */ 4588 retry = 1; 4589 break; 4590 4591 case IOSTAT_LS_RJT: 4592 stat.un.lsRjtError = be32_to_cpu(irsp->un.ulpWord[4]); 4593 /* Added for Vendor specifc support 4594 * Just keep retrying for these Rsn / Exp codes 4595 */ 4596 switch (stat.un.b.lsRjtRsnCode) { 4597 case LSRJT_UNABLE_TPC: 4598 /* The driver has a VALID PLOGI but the rport has 4599 * rejected the PRLI - can't do it now. Delay 4600 * for 1 second and try again. 4601 * 4602 * However, if explanation is REQ_UNSUPPORTED there's 4603 * no point to retry PRLI. 4604 */ 4605 if ((cmd == ELS_CMD_PRLI || cmd == ELS_CMD_NVMEPRLI) && 4606 stat.un.b.lsRjtRsnCodeExp != 4607 LSEXP_REQ_UNSUPPORTED) { 4608 delay = 1000; 4609 maxretry = lpfc_max_els_tries + 1; 4610 retry = 1; 4611 break; 4612 } 4613 4614 /* Legacy bug fix code for targets with PLOGI delays. */ 4615 if (stat.un.b.lsRjtRsnCodeExp == 4616 LSEXP_CMD_IN_PROGRESS) { 4617 if (cmd == ELS_CMD_PLOGI) { 4618 delay = 1000; 4619 maxretry = 48; 4620 } 4621 retry = 1; 4622 break; 4623 } 4624 if (stat.un.b.lsRjtRsnCodeExp == 4625 LSEXP_CANT_GIVE_DATA) { 4626 if (cmd == ELS_CMD_PLOGI) { 4627 delay = 1000; 4628 maxretry = 48; 4629 } 4630 retry = 1; 4631 break; 4632 } 4633 if (cmd == ELS_CMD_PLOGI) { 4634 delay = 1000; 4635 maxretry = lpfc_max_els_tries + 1; 4636 retry = 1; 4637 break; 4638 } 4639 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 4640 (cmd == ELS_CMD_FDISC) && 4641 (stat.un.b.lsRjtRsnCodeExp == LSEXP_OUT_OF_RESOURCE)){ 4642 lpfc_printf_vlog(vport, KERN_ERR, 4643 LOG_TRACE_EVENT, 4644 "0125 FDISC Failed (x%x). " 4645 "Fabric out of resources\n", 4646 stat.un.lsRjtError); 4647 lpfc_vport_set_state(vport, 4648 FC_VPORT_NO_FABRIC_RSCS); 4649 } 4650 break; 4651 4652 case LSRJT_LOGICAL_BSY: 4653 if ((cmd == ELS_CMD_PLOGI) || 4654 (cmd == ELS_CMD_PRLI) || 4655 (cmd == ELS_CMD_NVMEPRLI)) { 4656 delay = 1000; 4657 maxretry = 48; 4658 } else if (cmd == ELS_CMD_FDISC) { 4659 /* FDISC retry policy */ 4660 maxretry = 48; 4661 if (cmdiocb->retry >= 32) 4662 delay = 1000; 4663 } 4664 retry = 1; 4665 break; 4666 4667 case LSRJT_LOGICAL_ERR: 4668 /* There are some cases where switches return this 4669 * error when they are not ready and should be returning 4670 * Logical Busy. We should delay every time. 4671 */ 4672 if (cmd == ELS_CMD_FDISC && 4673 stat.un.b.lsRjtRsnCodeExp == LSEXP_PORT_LOGIN_REQ) { 4674 maxretry = 3; 4675 delay = 1000; 4676 retry = 1; 4677 } else if (cmd == ELS_CMD_FLOGI && 4678 stat.un.b.lsRjtRsnCodeExp == 4679 LSEXP_NOTHING_MORE) { 4680 vport->fc_sparam.cmn.bbRcvSizeMsb &= 0xf; 4681 retry = 1; 4682 lpfc_printf_vlog(vport, KERN_ERR, 4683 LOG_TRACE_EVENT, 4684 "0820 FLOGI Failed (x%x). " 4685 "BBCredit Not Supported\n", 4686 stat.un.lsRjtError); 4687 } 4688 break; 4689 4690 case LSRJT_PROTOCOL_ERR: 4691 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 4692 (cmd == ELS_CMD_FDISC) && 4693 ((stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_PNAME) || 4694 (stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_NPORT_ID)) 4695 ) { 4696 lpfc_printf_vlog(vport, KERN_ERR, 4697 LOG_TRACE_EVENT, 4698 "0122 FDISC Failed (x%x). " 4699 "Fabric Detected Bad WWN\n", 4700 stat.un.lsRjtError); 4701 lpfc_vport_set_state(vport, 4702 FC_VPORT_FABRIC_REJ_WWN); 4703 } 4704 break; 4705 case LSRJT_VENDOR_UNIQUE: 4706 if ((stat.un.b.vendorUnique == 0x45) && 4707 (cmd == ELS_CMD_FLOGI)) { 4708 goto out_retry; 4709 } 4710 break; 4711 case LSRJT_CMD_UNSUPPORTED: 4712 /* lpfc nvmet returns this type of LS_RJT when it 4713 * receives an FCP PRLI because lpfc nvmet only 4714 * support NVME. ELS request is terminated for FCP4 4715 * on this rport. 4716 */ 4717 if (stat.un.b.lsRjtRsnCodeExp == 4718 LSEXP_REQ_UNSUPPORTED && cmd == ELS_CMD_PRLI) { 4719 spin_lock_irq(&ndlp->lock); 4720 ndlp->nlp_flag |= NLP_FCP_PRLI_RJT; 4721 spin_unlock_irq(&ndlp->lock); 4722 retry = 0; 4723 goto out_retry; 4724 } 4725 break; 4726 } 4727 break; 4728 4729 case IOSTAT_INTERMED_RSP: 4730 case IOSTAT_BA_RJT: 4731 break; 4732 4733 default: 4734 break; 4735 } 4736 4737 if (link_reset) { 4738 rc = lpfc_link_reset(vport); 4739 if (rc) { 4740 /* Do not give up. Retry PLOGI one more time and attempt 4741 * link reset if PLOGI fails again. 4742 */ 4743 retry = 1; 4744 delay = 100; 4745 goto out_retry; 4746 } 4747 return 1; 4748 } 4749 4750 if (did == FDMI_DID) 4751 retry = 1; 4752 4753 if ((cmd == ELS_CMD_FLOGI) && 4754 (phba->fc_topology != LPFC_TOPOLOGY_LOOP) && 4755 !lpfc_error_lost_link(irsp)) { 4756 /* FLOGI retry policy */ 4757 retry = 1; 4758 /* retry FLOGI forever */ 4759 if (phba->link_flag != LS_LOOPBACK_MODE) 4760 maxretry = 0; 4761 else 4762 maxretry = 2; 4763 4764 if (cmdiocb->retry >= 100) 4765 delay = 5000; 4766 else if (cmdiocb->retry >= 32) 4767 delay = 1000; 4768 } else if ((cmd == ELS_CMD_FDISC) && !lpfc_error_lost_link(irsp)) { 4769 /* retry FDISCs every second up to devloss */ 4770 retry = 1; 4771 maxretry = vport->cfg_devloss_tmo; 4772 delay = 1000; 4773 } 4774 4775 cmdiocb->retry++; 4776 if (maxretry && (cmdiocb->retry >= maxretry)) { 4777 phba->fc_stat.elsRetryExceeded++; 4778 retry = 0; 4779 } 4780 4781 if ((vport->load_flag & FC_UNLOADING) != 0) 4782 retry = 0; 4783 4784 out_retry: 4785 if (retry) { 4786 if ((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_FDISC)) { 4787 /* Stop retrying PLOGI and FDISC if in FCF discovery */ 4788 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 4789 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4790 "2849 Stop retry ELS command " 4791 "x%x to remote NPORT x%x, " 4792 "Data: x%x x%x\n", cmd, did, 4793 cmdiocb->retry, delay); 4794 return 0; 4795 } 4796 } 4797 4798 /* Retry ELS command <elsCmd> to remote NPORT <did> */ 4799 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4800 "0107 Retry ELS command x%x to remote " 4801 "NPORT x%x Data: x%x x%x\n", 4802 cmd, did, cmdiocb->retry, delay); 4803 4804 if (((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) && 4805 ((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) || 4806 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) != 4807 IOERR_NO_RESOURCES))) { 4808 /* Don't reset timer for no resources */ 4809 4810 /* If discovery / RSCN timer is running, reset it */ 4811 if (timer_pending(&vport->fc_disctmo) || 4812 (vport->fc_flag & FC_RSCN_MODE)) 4813 lpfc_set_disctmo(vport); 4814 } 4815 4816 phba->fc_stat.elsXmitRetry++; 4817 if (ndlp && delay) { 4818 phba->fc_stat.elsDelayRetry++; 4819 ndlp->nlp_retry = cmdiocb->retry; 4820 4821 /* delay is specified in milliseconds */ 4822 mod_timer(&ndlp->nlp_delayfunc, 4823 jiffies + msecs_to_jiffies(delay)); 4824 spin_lock_irq(&ndlp->lock); 4825 ndlp->nlp_flag |= NLP_DELAY_TMO; 4826 spin_unlock_irq(&ndlp->lock); 4827 4828 ndlp->nlp_prev_state = ndlp->nlp_state; 4829 if ((cmd == ELS_CMD_PRLI) || 4830 (cmd == ELS_CMD_NVMEPRLI)) 4831 lpfc_nlp_set_state(vport, ndlp, 4832 NLP_STE_PRLI_ISSUE); 4833 else if (cmd != ELS_CMD_ADISC) 4834 lpfc_nlp_set_state(vport, ndlp, 4835 NLP_STE_NPR_NODE); 4836 ndlp->nlp_last_elscmd = cmd; 4837 4838 return 1; 4839 } 4840 switch (cmd) { 4841 case ELS_CMD_FLOGI: 4842 lpfc_issue_els_flogi(vport, ndlp, cmdiocb->retry); 4843 return 1; 4844 case ELS_CMD_FDISC: 4845 lpfc_issue_els_fdisc(vport, ndlp, cmdiocb->retry); 4846 return 1; 4847 case ELS_CMD_PLOGI: 4848 if (ndlp) { 4849 ndlp->nlp_prev_state = ndlp->nlp_state; 4850 lpfc_nlp_set_state(vport, ndlp, 4851 NLP_STE_PLOGI_ISSUE); 4852 } 4853 lpfc_issue_els_plogi(vport, did, cmdiocb->retry); 4854 return 1; 4855 case ELS_CMD_ADISC: 4856 ndlp->nlp_prev_state = ndlp->nlp_state; 4857 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 4858 lpfc_issue_els_adisc(vport, ndlp, cmdiocb->retry); 4859 return 1; 4860 case ELS_CMD_PRLI: 4861 case ELS_CMD_NVMEPRLI: 4862 ndlp->nlp_prev_state = ndlp->nlp_state; 4863 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); 4864 lpfc_issue_els_prli(vport, ndlp, cmdiocb->retry); 4865 return 1; 4866 case ELS_CMD_LOGO: 4867 ndlp->nlp_prev_state = ndlp->nlp_state; 4868 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE); 4869 lpfc_issue_els_logo(vport, ndlp, cmdiocb->retry); 4870 return 1; 4871 } 4872 } 4873 /* No retry ELS command <elsCmd> to remote NPORT <did> */ 4874 if (logerr) { 4875 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 4876 "0137 No retry ELS command x%x to remote " 4877 "NPORT x%x: Out of Resources: Error:x%x/%x\n", 4878 cmd, did, irsp->ulpStatus, 4879 irsp->un.ulpWord[4]); 4880 } 4881 else { 4882 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4883 "0108 No retry ELS command x%x to remote " 4884 "NPORT x%x Retried:%d Error:x%x/%x\n", 4885 cmd, did, cmdiocb->retry, irsp->ulpStatus, 4886 irsp->un.ulpWord[4]); 4887 } 4888 return 0; 4889 } 4890 4891 /** 4892 * lpfc_els_free_data - Free lpfc dma buffer and data structure with an iocb 4893 * @phba: pointer to lpfc hba data structure. 4894 * @buf_ptr1: pointer to the lpfc DMA buffer data structure. 4895 * 4896 * This routine releases the lpfc DMA (Direct Memory Access) buffer(s) 4897 * associated with a command IOCB back to the lpfc DMA buffer pool. It first 4898 * checks to see whether there is a lpfc DMA buffer associated with the 4899 * response of the command IOCB. If so, it will be released before releasing 4900 * the lpfc DMA buffer associated with the IOCB itself. 4901 * 4902 * Return code 4903 * 0 - Successfully released lpfc DMA buffer (currently, always return 0) 4904 **/ 4905 static int 4906 lpfc_els_free_data(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr1) 4907 { 4908 struct lpfc_dmabuf *buf_ptr; 4909 4910 /* Free the response before processing the command. */ 4911 if (!list_empty(&buf_ptr1->list)) { 4912 list_remove_head(&buf_ptr1->list, buf_ptr, 4913 struct lpfc_dmabuf, 4914 list); 4915 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 4916 kfree(buf_ptr); 4917 } 4918 lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys); 4919 kfree(buf_ptr1); 4920 return 0; 4921 } 4922 4923 /** 4924 * lpfc_els_free_bpl - Free lpfc dma buffer and data structure with bpl 4925 * @phba: pointer to lpfc hba data structure. 4926 * @buf_ptr: pointer to the lpfc dma buffer data structure. 4927 * 4928 * This routine releases the lpfc Direct Memory Access (DMA) buffer 4929 * associated with a Buffer Pointer List (BPL) back to the lpfc DMA buffer 4930 * pool. 4931 * 4932 * Return code 4933 * 0 - Successfully released lpfc DMA buffer (currently, always return 0) 4934 **/ 4935 static int 4936 lpfc_els_free_bpl(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr) 4937 { 4938 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 4939 kfree(buf_ptr); 4940 return 0; 4941 } 4942 4943 /** 4944 * lpfc_els_free_iocb - Free a command iocb and its associated resources 4945 * @phba: pointer to lpfc hba data structure. 4946 * @elsiocb: pointer to lpfc els command iocb data structure. 4947 * 4948 * This routine frees a command IOCB and its associated resources. The 4949 * command IOCB data structure contains the reference to various associated 4950 * resources, these fields must be set to NULL if the associated reference 4951 * not present: 4952 * context1 - reference to ndlp 4953 * context2 - reference to cmd 4954 * context2->next - reference to rsp 4955 * context3 - reference to bpl 4956 * 4957 * It first properly decrements the reference count held on ndlp for the 4958 * IOCB completion callback function. If LPFC_DELAY_MEM_FREE flag is not 4959 * set, it invokes the lpfc_els_free_data() routine to release the Direct 4960 * Memory Access (DMA) buffers associated with the IOCB. Otherwise, it 4961 * adds the DMA buffer the @phba data structure for the delayed release. 4962 * If reference to the Buffer Pointer List (BPL) is present, the 4963 * lpfc_els_free_bpl() routine is invoked to release the DMA memory 4964 * associated with BPL. Finally, the lpfc_sli_release_iocbq() routine is 4965 * invoked to release the IOCB data structure back to @phba IOCBQ list. 4966 * 4967 * Return code 4968 * 0 - Success (currently, always return 0) 4969 **/ 4970 int 4971 lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb) 4972 { 4973 struct lpfc_dmabuf *buf_ptr, *buf_ptr1; 4974 4975 /* The I/O iocb is complete. Clear the context1 data. */ 4976 elsiocb->context1 = NULL; 4977 4978 /* context2 = cmd, context2->next = rsp, context3 = bpl */ 4979 if (elsiocb->context2) { 4980 if (elsiocb->iocb_flag & LPFC_DELAY_MEM_FREE) { 4981 /* Firmware could still be in progress of DMAing 4982 * payload, so don't free data buffer till after 4983 * a hbeat. 4984 */ 4985 elsiocb->iocb_flag &= ~LPFC_DELAY_MEM_FREE; 4986 buf_ptr = elsiocb->context2; 4987 elsiocb->context2 = NULL; 4988 if (buf_ptr) { 4989 buf_ptr1 = NULL; 4990 spin_lock_irq(&phba->hbalock); 4991 if (!list_empty(&buf_ptr->list)) { 4992 list_remove_head(&buf_ptr->list, 4993 buf_ptr1, struct lpfc_dmabuf, 4994 list); 4995 INIT_LIST_HEAD(&buf_ptr1->list); 4996 list_add_tail(&buf_ptr1->list, 4997 &phba->elsbuf); 4998 phba->elsbuf_cnt++; 4999 } 5000 INIT_LIST_HEAD(&buf_ptr->list); 5001 list_add_tail(&buf_ptr->list, &phba->elsbuf); 5002 phba->elsbuf_cnt++; 5003 spin_unlock_irq(&phba->hbalock); 5004 } 5005 } else { 5006 buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2; 5007 lpfc_els_free_data(phba, buf_ptr1); 5008 elsiocb->context2 = NULL; 5009 } 5010 } 5011 5012 if (elsiocb->context3) { 5013 buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3; 5014 lpfc_els_free_bpl(phba, buf_ptr); 5015 elsiocb->context3 = NULL; 5016 } 5017 lpfc_sli_release_iocbq(phba, elsiocb); 5018 return 0; 5019 } 5020 5021 /** 5022 * lpfc_cmpl_els_logo_acc - Completion callback function to logo acc response 5023 * @phba: pointer to lpfc hba data structure. 5024 * @cmdiocb: pointer to lpfc command iocb data structure. 5025 * @rspiocb: pointer to lpfc response iocb data structure. 5026 * 5027 * This routine is the completion callback function to the Logout (LOGO) 5028 * Accept (ACC) Response ELS command. This routine is invoked to indicate 5029 * the completion of the LOGO process. It invokes the lpfc_nlp_not_used() to 5030 * release the ndlp if it has the last reference remaining (reference count 5031 * is 1). If succeeded (meaning ndlp released), it sets the IOCB context1 5032 * field to NULL to inform the following lpfc_els_free_iocb() routine no 5033 * ndlp reference count needs to be decremented. Otherwise, the ndlp 5034 * reference use-count shall be decremented by the lpfc_els_free_iocb() 5035 * routine. Finally, the lpfc_els_free_iocb() is invoked to release the 5036 * IOCB data structure. 5037 **/ 5038 static void 5039 lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 5040 struct lpfc_iocbq *rspiocb) 5041 { 5042 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 5043 struct lpfc_vport *vport = cmdiocb->vport; 5044 IOCB_t *irsp; 5045 5046 irsp = &rspiocb->iocb; 5047 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5048 "ACC LOGO cmpl: status:x%x/x%x did:x%x", 5049 irsp->ulpStatus, irsp->un.ulpWord[4], ndlp->nlp_DID); 5050 /* ACC to LOGO completes to NPort <nlp_DID> */ 5051 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5052 "0109 ACC to LOGO completes to NPort x%x refcnt %d " 5053 "Data: x%x x%x x%x\n", 5054 ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp->nlp_flag, 5055 ndlp->nlp_state, ndlp->nlp_rpi); 5056 5057 /* This clause allows the LOGO ACC to complete and free resources 5058 * for the Fabric Domain Controller. It does deliberately skip 5059 * the unreg_rpi and release rpi because some fabrics send RDP 5060 * requests after logging out from the initiator. 5061 */ 5062 if (ndlp->nlp_type & NLP_FABRIC && 5063 ((ndlp->nlp_DID & WELL_KNOWN_DID_MASK) != WELL_KNOWN_DID_MASK)) 5064 goto out; 5065 5066 if (ndlp->nlp_state == NLP_STE_NPR_NODE) { 5067 5068 /* If PLOGI is being retried, PLOGI completion will cleanup the 5069 * node. The NLP_NPR_2B_DISC flag needs to be retained to make 5070 * progress on nodes discovered from last RSCN. 5071 */ 5072 if ((ndlp->nlp_flag & NLP_DELAY_TMO) && 5073 (ndlp->nlp_last_elscmd == ELS_CMD_PLOGI)) 5074 goto out; 5075 5076 /* NPort Recovery mode or node is just allocated */ 5077 if (!lpfc_nlp_not_used(ndlp)) { 5078 /* A LOGO is completing and the node is in NPR state. 5079 * If this a fabric node that cleared its transport 5080 * registration, release the rpi. 5081 */ 5082 spin_lock_irq(&ndlp->lock); 5083 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 5084 if (phba->sli_rev == LPFC_SLI_REV4) 5085 ndlp->nlp_flag |= NLP_RELEASE_RPI; 5086 spin_unlock_irq(&ndlp->lock); 5087 lpfc_unreg_rpi(vport, ndlp); 5088 } else { 5089 /* Indicate the node has already released, should 5090 * not reference to it from within lpfc_els_free_iocb. 5091 */ 5092 cmdiocb->context1 = NULL; 5093 } 5094 } 5095 out: 5096 /* 5097 * The driver received a LOGO from the rport and has ACK'd it. 5098 * At this point, the driver is done so release the IOCB 5099 */ 5100 lpfc_els_free_iocb(phba, cmdiocb); 5101 lpfc_nlp_put(ndlp); 5102 } 5103 5104 /** 5105 * lpfc_mbx_cmpl_dflt_rpi - Completion callbk func for unreg dflt rpi mbox cmd 5106 * @phba: pointer to lpfc hba data structure. 5107 * @pmb: pointer to the driver internal queue element for mailbox command. 5108 * 5109 * This routine is the completion callback function for unregister default 5110 * RPI (Remote Port Index) mailbox command to the @phba. It simply releases 5111 * the associated lpfc Direct Memory Access (DMA) buffer back to the pool and 5112 * decrements the ndlp reference count held for this completion callback 5113 * function. After that, it invokes the lpfc_nlp_not_used() to check 5114 * whether there is only one reference left on the ndlp. If so, it will 5115 * perform one more decrement and trigger the release of the ndlp. 5116 **/ 5117 void 5118 lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 5119 { 5120 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf); 5121 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; 5122 u32 mbx_flag = pmb->mbox_flag; 5123 u32 mbx_cmd = pmb->u.mb.mbxCommand; 5124 5125 pmb->ctx_buf = NULL; 5126 pmb->ctx_ndlp = NULL; 5127 5128 if (ndlp) { 5129 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, 5130 "0006 rpi x%x DID:%x flg:%x %d x%px " 5131 "mbx_cmd x%x mbx_flag x%x x%px\n", 5132 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, 5133 kref_read(&ndlp->kref), ndlp, mbx_cmd, 5134 mbx_flag, pmb); 5135 5136 /* This ends the default/temporary RPI cleanup logic for this 5137 * ndlp and the node and rpi needs to be released. Free the rpi 5138 * first on an UNREG_LOGIN and then release the final 5139 * references. 5140 */ 5141 spin_lock_irq(&ndlp->lock); 5142 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; 5143 if (mbx_cmd == MBX_UNREG_LOGIN) 5144 ndlp->nlp_flag &= ~NLP_UNREG_INP; 5145 spin_unlock_irq(&ndlp->lock); 5146 lpfc_nlp_put(ndlp); 5147 lpfc_drop_node(ndlp->vport, ndlp); 5148 } 5149 5150 lpfc_mbuf_free(phba, mp->virt, mp->phys); 5151 kfree(mp); 5152 mempool_free(pmb, phba->mbox_mem_pool); 5153 return; 5154 } 5155 5156 /** 5157 * lpfc_cmpl_els_rsp - Completion callback function for els response iocb cmd 5158 * @phba: pointer to lpfc hba data structure. 5159 * @cmdiocb: pointer to lpfc command iocb data structure. 5160 * @rspiocb: pointer to lpfc response iocb data structure. 5161 * 5162 * This routine is the completion callback function for ELS Response IOCB 5163 * command. In normal case, this callback function just properly sets the 5164 * nlp_flag bitmap in the ndlp data structure, if the mbox command reference 5165 * field in the command IOCB is not NULL, the referred mailbox command will 5166 * be send out, and then invokes the lpfc_els_free_iocb() routine to release 5167 * the IOCB. 5168 **/ 5169 static void 5170 lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 5171 struct lpfc_iocbq *rspiocb) 5172 { 5173 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 5174 struct lpfc_vport *vport = ndlp ? ndlp->vport : NULL; 5175 struct Scsi_Host *shost = vport ? lpfc_shost_from_vport(vport) : NULL; 5176 IOCB_t *irsp; 5177 LPFC_MBOXQ_t *mbox = NULL; 5178 struct lpfc_dmabuf *mp = NULL; 5179 5180 irsp = &rspiocb->iocb; 5181 5182 if (!vport) { 5183 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5184 "3177 ELS response failed\n"); 5185 goto out; 5186 } 5187 if (cmdiocb->context_un.mbox) 5188 mbox = cmdiocb->context_un.mbox; 5189 5190 /* Check to see if link went down during discovery */ 5191 if (!ndlp || lpfc_els_chk_latt(vport)) { 5192 if (mbox) { 5193 mp = (struct lpfc_dmabuf *)mbox->ctx_buf; 5194 if (mp) { 5195 lpfc_mbuf_free(phba, mp->virt, mp->phys); 5196 kfree(mp); 5197 } 5198 mempool_free(mbox, phba->mbox_mem_pool); 5199 } 5200 goto out; 5201 } 5202 5203 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5204 "ELS rsp cmpl: status:x%x/x%x did:x%x", 5205 irsp->ulpStatus, irsp->un.ulpWord[4], 5206 cmdiocb->iocb.un.elsreq64.remoteID); 5207 /* ELS response tag <ulpIoTag> completes */ 5208 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5209 "0110 ELS response tag x%x completes " 5210 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%px\n", 5211 cmdiocb->iocb.ulpIoTag, rspiocb->iocb.ulpStatus, 5212 rspiocb->iocb.un.ulpWord[4], rspiocb->iocb.ulpTimeout, 5213 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5214 ndlp->nlp_rpi, kref_read(&ndlp->kref), mbox); 5215 if (mbox) { 5216 if ((rspiocb->iocb.ulpStatus == 0) && 5217 (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) { 5218 if (!lpfc_unreg_rpi(vport, ndlp) && 5219 (!(vport->fc_flag & FC_PT2PT))) { 5220 if (ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE) { 5221 lpfc_printf_vlog(vport, KERN_INFO, 5222 LOG_DISCOVERY, 5223 "0314 PLOGI recov " 5224 "DID x%x " 5225 "Data: x%x x%x x%x\n", 5226 ndlp->nlp_DID, 5227 ndlp->nlp_state, 5228 ndlp->nlp_rpi, 5229 ndlp->nlp_flag); 5230 mp = mbox->ctx_buf; 5231 if (mp) { 5232 lpfc_mbuf_free(phba, mp->virt, 5233 mp->phys); 5234 kfree(mp); 5235 } 5236 mempool_free(mbox, phba->mbox_mem_pool); 5237 goto out; 5238 } 5239 } 5240 5241 /* Increment reference count to ndlp to hold the 5242 * reference to ndlp for the callback function. 5243 */ 5244 mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 5245 if (!mbox->ctx_ndlp) 5246 goto out; 5247 5248 mbox->vport = vport; 5249 if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) { 5250 mbox->mbox_flag |= LPFC_MBX_IMED_UNREG; 5251 mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; 5252 } 5253 else { 5254 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login; 5255 ndlp->nlp_prev_state = ndlp->nlp_state; 5256 lpfc_nlp_set_state(vport, ndlp, 5257 NLP_STE_REG_LOGIN_ISSUE); 5258 } 5259 5260 ndlp->nlp_flag |= NLP_REG_LOGIN_SEND; 5261 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 5262 != MBX_NOT_FINISHED) 5263 goto out; 5264 5265 /* Decrement the ndlp reference count we 5266 * set for this failed mailbox command. 5267 */ 5268 lpfc_nlp_put(ndlp); 5269 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; 5270 5271 /* ELS rsp: Cannot issue reg_login for <NPortid> */ 5272 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 5273 "0138 ELS rsp: Cannot issue reg_login for x%x " 5274 "Data: x%x x%x x%x\n", 5275 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5276 ndlp->nlp_rpi); 5277 } 5278 mp = (struct lpfc_dmabuf *)mbox->ctx_buf; 5279 if (mp) { 5280 lpfc_mbuf_free(phba, mp->virt, mp->phys); 5281 kfree(mp); 5282 } 5283 mempool_free(mbox, phba->mbox_mem_pool); 5284 } 5285 out: 5286 if (ndlp && shost) { 5287 spin_lock_irq(&ndlp->lock); 5288 if (mbox) 5289 ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN; 5290 ndlp->nlp_flag &= ~NLP_RM_DFLT_RPI; 5291 spin_unlock_irq(&ndlp->lock); 5292 } 5293 5294 /* An SLI4 NPIV instance wants to drop the node at this point under 5295 * these conditions and release the RPI. 5296 */ 5297 if (phba->sli_rev == LPFC_SLI_REV4 && 5298 (vport && vport->port_type == LPFC_NPIV_PORT) && 5299 ndlp->nlp_flag & NLP_RELEASE_RPI) { 5300 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi); 5301 spin_lock_irq(&ndlp->lock); 5302 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; 5303 ndlp->nlp_flag &= ~NLP_RELEASE_RPI; 5304 spin_unlock_irq(&ndlp->lock); 5305 lpfc_drop_node(vport, ndlp); 5306 } 5307 5308 /* Release the originating I/O reference. */ 5309 lpfc_els_free_iocb(phba, cmdiocb); 5310 lpfc_nlp_put(ndlp); 5311 return; 5312 } 5313 5314 /** 5315 * lpfc_els_rsp_acc - Prepare and issue an acc response iocb command 5316 * @vport: pointer to a host virtual N_Port data structure. 5317 * @flag: the els command code to be accepted. 5318 * @oldiocb: pointer to the original lpfc command iocb data structure. 5319 * @ndlp: pointer to a node-list data structure. 5320 * @mbox: pointer to the driver internal queue element for mailbox command. 5321 * 5322 * This routine prepares and issues an Accept (ACC) response IOCB 5323 * command. It uses the @flag to properly set up the IOCB field for the 5324 * specific ACC response command to be issued and invokes the 5325 * lpfc_sli_issue_iocb() routine to send out ACC response IOCB. If a 5326 * @mbox pointer is passed in, it will be put into the context_un.mbox 5327 * field of the IOCB for the completion callback function to issue the 5328 * mailbox command to the HBA later when callback is invoked. 5329 * 5330 * Note that the ndlp reference count will be incremented by 1 for holding the 5331 * ndlp and the reference to ndlp will be stored into the context1 field of 5332 * the IOCB for the completion callback function to the corresponding 5333 * response ELS IOCB command. 5334 * 5335 * Return code 5336 * 0 - Successfully issued acc response 5337 * 1 - Failed to issue acc response 5338 **/ 5339 int 5340 lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, 5341 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp, 5342 LPFC_MBOXQ_t *mbox) 5343 { 5344 struct lpfc_hba *phba = vport->phba; 5345 IOCB_t *icmd; 5346 IOCB_t *oldcmd; 5347 struct lpfc_iocbq *elsiocb; 5348 uint8_t *pcmd; 5349 struct serv_parm *sp; 5350 uint16_t cmdsize; 5351 int rc; 5352 ELS_PKT *els_pkt_ptr; 5353 struct fc_els_rdf_resp *rdf_resp; 5354 5355 oldcmd = &oldiocb->iocb; 5356 5357 switch (flag) { 5358 case ELS_CMD_ACC: 5359 cmdsize = sizeof(uint32_t); 5360 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 5361 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 5362 if (!elsiocb) { 5363 spin_lock_irq(&ndlp->lock); 5364 ndlp->nlp_flag &= ~NLP_LOGO_ACC; 5365 spin_unlock_irq(&ndlp->lock); 5366 return 1; 5367 } 5368 5369 icmd = &elsiocb->iocb; 5370 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5371 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 5372 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 5373 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 5374 pcmd += sizeof(uint32_t); 5375 5376 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5377 "Issue ACC: did:x%x flg:x%x", 5378 ndlp->nlp_DID, ndlp->nlp_flag, 0); 5379 break; 5380 case ELS_CMD_FLOGI: 5381 case ELS_CMD_PLOGI: 5382 cmdsize = (sizeof(struct serv_parm) + sizeof(uint32_t)); 5383 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 5384 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 5385 if (!elsiocb) 5386 return 1; 5387 5388 icmd = &elsiocb->iocb; 5389 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5390 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 5391 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 5392 5393 if (mbox) 5394 elsiocb->context_un.mbox = mbox; 5395 5396 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 5397 pcmd += sizeof(uint32_t); 5398 sp = (struct serv_parm *)pcmd; 5399 5400 if (flag == ELS_CMD_FLOGI) { 5401 /* Copy the received service parameters back */ 5402 memcpy(sp, &phba->fc_fabparam, 5403 sizeof(struct serv_parm)); 5404 5405 /* Clear the F_Port bit */ 5406 sp->cmn.fPort = 0; 5407 5408 /* Mark all class service parameters as invalid */ 5409 sp->cls1.classValid = 0; 5410 sp->cls2.classValid = 0; 5411 sp->cls3.classValid = 0; 5412 sp->cls4.classValid = 0; 5413 5414 /* Copy our worldwide names */ 5415 memcpy(&sp->portName, &vport->fc_sparam.portName, 5416 sizeof(struct lpfc_name)); 5417 memcpy(&sp->nodeName, &vport->fc_sparam.nodeName, 5418 sizeof(struct lpfc_name)); 5419 } else { 5420 memcpy(pcmd, &vport->fc_sparam, 5421 sizeof(struct serv_parm)); 5422 5423 sp->cmn.valid_vendor_ver_level = 0; 5424 memset(sp->un.vendorVersion, 0, 5425 sizeof(sp->un.vendorVersion)); 5426 sp->cmn.bbRcvSizeMsb &= 0xF; 5427 5428 /* If our firmware supports this feature, convey that 5429 * info to the target using the vendor specific field. 5430 */ 5431 if (phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) { 5432 sp->cmn.valid_vendor_ver_level = 1; 5433 sp->un.vv.vid = cpu_to_be32(LPFC_VV_EMLX_ID); 5434 sp->un.vv.flags = 5435 cpu_to_be32(LPFC_VV_SUPPRESS_RSP); 5436 } 5437 } 5438 5439 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5440 "Issue ACC FLOGI/PLOGI: did:x%x flg:x%x", 5441 ndlp->nlp_DID, ndlp->nlp_flag, 0); 5442 break; 5443 case ELS_CMD_PRLO: 5444 cmdsize = sizeof(uint32_t) + sizeof(PRLO); 5445 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 5446 ndlp, ndlp->nlp_DID, ELS_CMD_PRLO); 5447 if (!elsiocb) 5448 return 1; 5449 5450 icmd = &elsiocb->iocb; 5451 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5452 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 5453 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 5454 5455 memcpy(pcmd, ((struct lpfc_dmabuf *) oldiocb->context2)->virt, 5456 sizeof(uint32_t) + sizeof(PRLO)); 5457 *((uint32_t *) (pcmd)) = ELS_CMD_PRLO_ACC; 5458 els_pkt_ptr = (ELS_PKT *) pcmd; 5459 els_pkt_ptr->un.prlo.acceptRspCode = PRLO_REQ_EXECUTED; 5460 5461 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5462 "Issue ACC PRLO: did:x%x flg:x%x", 5463 ndlp->nlp_DID, ndlp->nlp_flag, 0); 5464 break; 5465 case ELS_CMD_RDF: 5466 cmdsize = sizeof(*rdf_resp); 5467 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 5468 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 5469 if (!elsiocb) 5470 return 1; 5471 5472 icmd = &elsiocb->iocb; 5473 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5474 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 5475 pcmd = (((struct lpfc_dmabuf *)elsiocb->context2)->virt); 5476 rdf_resp = (struct fc_els_rdf_resp *)pcmd; 5477 memset(rdf_resp, 0, sizeof(*rdf_resp)); 5478 rdf_resp->acc_hdr.la_cmd = ELS_LS_ACC; 5479 5480 /* FC-LS-5 specifies desc_list_len shall be set to 12 */ 5481 rdf_resp->desc_list_len = cpu_to_be32(12); 5482 5483 /* FC-LS-5 specifies LS REQ Information descriptor */ 5484 rdf_resp->lsri.desc_tag = cpu_to_be32(1); 5485 rdf_resp->lsri.desc_len = cpu_to_be32(sizeof(u32)); 5486 rdf_resp->lsri.rqst_w0.cmd = ELS_RDF; 5487 break; 5488 default: 5489 return 1; 5490 } 5491 if (ndlp->nlp_flag & NLP_LOGO_ACC) { 5492 spin_lock_irq(&ndlp->lock); 5493 if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED || 5494 ndlp->nlp_flag & NLP_REG_LOGIN_SEND)) 5495 ndlp->nlp_flag &= ~NLP_LOGO_ACC; 5496 spin_unlock_irq(&ndlp->lock); 5497 elsiocb->iocb_cmpl = lpfc_cmpl_els_logo_acc; 5498 } else { 5499 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 5500 } 5501 5502 phba->fc_stat.elsXmitACC++; 5503 elsiocb->context1 = lpfc_nlp_get(ndlp); 5504 if (!elsiocb->context1) { 5505 lpfc_els_free_iocb(phba, elsiocb); 5506 return 1; 5507 } 5508 5509 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5510 if (rc == IOCB_ERROR) { 5511 lpfc_els_free_iocb(phba, elsiocb); 5512 lpfc_nlp_put(ndlp); 5513 return 1; 5514 } 5515 5516 /* Xmit ELS ACC response tag <ulpIoTag> */ 5517 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5518 "0128 Xmit ELS ACC response Status: x%x, IoTag: x%x, " 5519 "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x " 5520 "RPI: x%x, fc_flag x%x refcnt %d\n", 5521 rc, elsiocb->iotag, elsiocb->sli4_xritag, 5522 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5523 ndlp->nlp_rpi, vport->fc_flag, kref_read(&ndlp->kref)); 5524 return 0; 5525 } 5526 5527 /** 5528 * lpfc_els_rsp_reject - Prepare and issue a rjt response iocb command 5529 * @vport: pointer to a virtual N_Port data structure. 5530 * @rejectError: reject response to issue 5531 * @oldiocb: pointer to the original lpfc command iocb data structure. 5532 * @ndlp: pointer to a node-list data structure. 5533 * @mbox: pointer to the driver internal queue element for mailbox command. 5534 * 5535 * This routine prepares and issue an Reject (RJT) response IOCB 5536 * command. If a @mbox pointer is passed in, it will be put into the 5537 * context_un.mbox field of the IOCB for the completion callback function 5538 * to issue to the HBA later. 5539 * 5540 * Note that the ndlp reference count will be incremented by 1 for holding the 5541 * ndlp and the reference to ndlp will be stored into the context1 field of 5542 * the IOCB for the completion callback function to the reject response 5543 * ELS IOCB command. 5544 * 5545 * Return code 5546 * 0 - Successfully issued reject response 5547 * 1 - Failed to issue reject response 5548 **/ 5549 int 5550 lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError, 5551 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp, 5552 LPFC_MBOXQ_t *mbox) 5553 { 5554 int rc; 5555 struct lpfc_hba *phba = vport->phba; 5556 IOCB_t *icmd; 5557 IOCB_t *oldcmd; 5558 struct lpfc_iocbq *elsiocb; 5559 uint8_t *pcmd; 5560 uint16_t cmdsize; 5561 5562 cmdsize = 2 * sizeof(uint32_t); 5563 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 5564 ndlp->nlp_DID, ELS_CMD_LS_RJT); 5565 if (!elsiocb) 5566 return 1; 5567 5568 icmd = &elsiocb->iocb; 5569 oldcmd = &oldiocb->iocb; 5570 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5571 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 5572 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 5573 5574 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT; 5575 pcmd += sizeof(uint32_t); 5576 *((uint32_t *) (pcmd)) = rejectError; 5577 5578 if (mbox) 5579 elsiocb->context_un.mbox = mbox; 5580 5581 /* Xmit ELS RJT <err> response tag <ulpIoTag> */ 5582 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5583 "0129 Xmit ELS RJT x%x response tag x%x " 5584 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, " 5585 "rpi x%x\n", 5586 rejectError, elsiocb->iotag, 5587 elsiocb->iocb.ulpContext, ndlp->nlp_DID, 5588 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); 5589 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5590 "Issue LS_RJT: did:x%x flg:x%x err:x%x", 5591 ndlp->nlp_DID, ndlp->nlp_flag, rejectError); 5592 5593 phba->fc_stat.elsXmitLSRJT++; 5594 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 5595 elsiocb->context1 = lpfc_nlp_get(ndlp); 5596 if (!elsiocb->context1) { 5597 lpfc_els_free_iocb(phba, elsiocb); 5598 return 1; 5599 } 5600 5601 /* The NPIV instance is rejecting this unsolicited ELS. Make sure the 5602 * node's assigned RPI needs to be released as this node will get 5603 * freed. 5604 */ 5605 if (phba->sli_rev == LPFC_SLI_REV4 && 5606 vport->port_type == LPFC_NPIV_PORT) { 5607 spin_lock_irq(&ndlp->lock); 5608 ndlp->nlp_flag |= NLP_RELEASE_RPI; 5609 spin_unlock_irq(&ndlp->lock); 5610 } 5611 5612 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5613 if (rc == IOCB_ERROR) { 5614 lpfc_els_free_iocb(phba, elsiocb); 5615 lpfc_nlp_put(ndlp); 5616 return 1; 5617 } 5618 5619 return 0; 5620 } 5621 5622 /** 5623 * lpfc_issue_els_edc_rsp - Exchange Diagnostic Capabilities with the fabric. 5624 * @vport: pointer to a host virtual N_Port data structure. 5625 * @cmdiocb: pointer to the original lpfc command iocb data structure. 5626 * @ndlp: NPort to where rsp is directed 5627 * 5628 * This routine issues an EDC ACC RSP to the F-Port Controller to communicate 5629 * this N_Port's support of hardware signals in its Congestion 5630 * Capabilities Descriptor. 5631 * 5632 * Return code 5633 * 0 - Successfully issued edc rsp command 5634 * 1 - Failed to issue edc rsp command 5635 **/ 5636 static int 5637 lpfc_issue_els_edc_rsp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 5638 struct lpfc_nodelist *ndlp) 5639 { 5640 struct lpfc_hba *phba = vport->phba; 5641 struct lpfc_els_edc_rsp *edc_rsp; 5642 struct lpfc_iocbq *elsiocb; 5643 IOCB_t *icmd, *cmd; 5644 uint8_t *pcmd; 5645 int cmdsize, rc; 5646 5647 cmdsize = sizeof(struct lpfc_els_edc_rsp); 5648 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, cmdiocb->retry, 5649 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 5650 if (!elsiocb) 5651 return 1; 5652 5653 icmd = &elsiocb->iocb; 5654 cmd = &cmdiocb->iocb; 5655 icmd->ulpContext = cmd->ulpContext; /* Xri / rx_id */ 5656 icmd->unsli3.rcvsli3.ox_id = cmd->unsli3.rcvsli3.ox_id; 5657 pcmd = (((struct lpfc_dmabuf *)elsiocb->context2)->virt); 5658 memset(pcmd, 0, cmdsize); 5659 5660 edc_rsp = (struct lpfc_els_edc_rsp *)pcmd; 5661 edc_rsp->edc_rsp.acc_hdr.la_cmd = ELS_LS_ACC; 5662 edc_rsp->edc_rsp.desc_list_len = cpu_to_be32( 5663 FC_TLV_DESC_LENGTH_FROM_SZ(struct lpfc_els_edc_rsp)); 5664 edc_rsp->edc_rsp.lsri.desc_tag = cpu_to_be32(ELS_DTAG_LS_REQ_INFO); 5665 edc_rsp->edc_rsp.lsri.desc_len = cpu_to_be32( 5666 FC_TLV_DESC_LENGTH_FROM_SZ(struct fc_els_lsri_desc)); 5667 edc_rsp->edc_rsp.lsri.rqst_w0.cmd = ELS_EDC; 5668 lpfc_format_edc_cgn_desc(phba, &edc_rsp->cgn_desc); 5669 5670 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5671 "Issue EDC ACC: did:x%x flg:x%x refcnt %d", 5672 ndlp->nlp_DID, ndlp->nlp_flag, 5673 kref_read(&ndlp->kref)); 5674 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 5675 5676 phba->fc_stat.elsXmitACC++; 5677 elsiocb->context1 = lpfc_nlp_get(ndlp); 5678 if (!elsiocb->context1) { 5679 lpfc_els_free_iocb(phba, elsiocb); 5680 return 1; 5681 } 5682 5683 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5684 if (rc == IOCB_ERROR) { 5685 lpfc_els_free_iocb(phba, elsiocb); 5686 lpfc_nlp_put(ndlp); 5687 return 1; 5688 } 5689 5690 /* Xmit ELS ACC response tag <ulpIoTag> */ 5691 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5692 "0152 Xmit EDC ACC response Status: x%x, IoTag: x%x, " 5693 "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x " 5694 "RPI: x%x, fc_flag x%x\n", 5695 rc, elsiocb->iotag, elsiocb->sli4_xritag, 5696 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5697 ndlp->nlp_rpi, vport->fc_flag); 5698 5699 return 0; 5700 } 5701 5702 /** 5703 * lpfc_els_rsp_adisc_acc - Prepare and issue acc response to adisc iocb cmd 5704 * @vport: pointer to a virtual N_Port data structure. 5705 * @oldiocb: pointer to the original lpfc command iocb data structure. 5706 * @ndlp: pointer to a node-list data structure. 5707 * 5708 * This routine prepares and issues an Accept (ACC) response to Address 5709 * Discover (ADISC) ELS command. It simply prepares the payload of the IOCB 5710 * and invokes the lpfc_sli_issue_iocb() routine to send out the command. 5711 * 5712 * Note that the ndlp reference count will be incremented by 1 for holding the 5713 * ndlp and the reference to ndlp will be stored into the context1 field of 5714 * the IOCB for the completion callback function to the ADISC Accept response 5715 * ELS IOCB command. 5716 * 5717 * Return code 5718 * 0 - Successfully issued acc adisc response 5719 * 1 - Failed to issue adisc acc response 5720 **/ 5721 int 5722 lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, 5723 struct lpfc_nodelist *ndlp) 5724 { 5725 struct lpfc_hba *phba = vport->phba; 5726 ADISC *ap; 5727 IOCB_t *icmd, *oldcmd; 5728 struct lpfc_iocbq *elsiocb; 5729 uint8_t *pcmd; 5730 uint16_t cmdsize; 5731 int rc; 5732 5733 cmdsize = sizeof(uint32_t) + sizeof(ADISC); 5734 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 5735 ndlp->nlp_DID, ELS_CMD_ACC); 5736 if (!elsiocb) 5737 return 1; 5738 5739 icmd = &elsiocb->iocb; 5740 oldcmd = &oldiocb->iocb; 5741 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5742 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 5743 5744 /* Xmit ADISC ACC response tag <ulpIoTag> */ 5745 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5746 "0130 Xmit ADISC ACC response iotag x%x xri: " 5747 "x%x, did x%x, nlp_flag x%x, nlp_state x%x rpi x%x\n", 5748 elsiocb->iotag, elsiocb->iocb.ulpContext, 5749 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5750 ndlp->nlp_rpi); 5751 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 5752 5753 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 5754 pcmd += sizeof(uint32_t); 5755 5756 ap = (ADISC *) (pcmd); 5757 ap->hardAL_PA = phba->fc_pref_ALPA; 5758 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name)); 5759 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 5760 ap->DID = be32_to_cpu(vport->fc_myDID); 5761 5762 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5763 "Issue ACC ADISC: did:x%x flg:x%x refcnt %d", 5764 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); 5765 5766 phba->fc_stat.elsXmitACC++; 5767 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 5768 elsiocb->context1 = lpfc_nlp_get(ndlp); 5769 if (!elsiocb->context1) { 5770 lpfc_els_free_iocb(phba, elsiocb); 5771 return 1; 5772 } 5773 5774 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5775 if (rc == IOCB_ERROR) { 5776 lpfc_els_free_iocb(phba, elsiocb); 5777 lpfc_nlp_put(ndlp); 5778 return 1; 5779 } 5780 5781 /* Xmit ELS ACC response tag <ulpIoTag> */ 5782 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5783 "0128 Xmit ELS ACC response Status: x%x, IoTag: x%x, " 5784 "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x " 5785 "RPI: x%x, fc_flag x%x\n", 5786 rc, elsiocb->iotag, elsiocb->sli4_xritag, 5787 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5788 ndlp->nlp_rpi, vport->fc_flag); 5789 return 0; 5790 } 5791 5792 /** 5793 * lpfc_els_rsp_prli_acc - Prepare and issue acc response to prli iocb cmd 5794 * @vport: pointer to a virtual N_Port data structure. 5795 * @oldiocb: pointer to the original lpfc command iocb data structure. 5796 * @ndlp: pointer to a node-list data structure. 5797 * 5798 * This routine prepares and issues an Accept (ACC) response to Process 5799 * Login (PRLI) ELS command. It simply prepares the payload of the IOCB 5800 * and invokes the lpfc_sli_issue_iocb() routine to send out the command. 5801 * 5802 * Note that the ndlp reference count will be incremented by 1 for holding the 5803 * ndlp and the reference to ndlp will be stored into the context1 field of 5804 * the IOCB for the completion callback function to the PRLI Accept response 5805 * ELS IOCB command. 5806 * 5807 * Return code 5808 * 0 - Successfully issued acc prli response 5809 * 1 - Failed to issue acc prli response 5810 **/ 5811 int 5812 lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, 5813 struct lpfc_nodelist *ndlp) 5814 { 5815 struct lpfc_hba *phba = vport->phba; 5816 PRLI *npr; 5817 struct lpfc_nvme_prli *npr_nvme; 5818 lpfc_vpd_t *vpd; 5819 IOCB_t *icmd; 5820 IOCB_t *oldcmd; 5821 struct lpfc_iocbq *elsiocb; 5822 uint8_t *pcmd; 5823 uint16_t cmdsize; 5824 uint32_t prli_fc4_req, *req_payload; 5825 struct lpfc_dmabuf *req_buf; 5826 int rc; 5827 u32 elsrspcmd; 5828 5829 /* Need the incoming PRLI payload to determine if the ACC is for an 5830 * FC4 or NVME PRLI type. The PRLI type is at word 1. 5831 */ 5832 req_buf = (struct lpfc_dmabuf *)oldiocb->context2; 5833 req_payload = (((uint32_t *)req_buf->virt) + 1); 5834 5835 /* PRLI type payload is at byte 3 for FCP or NVME. */ 5836 prli_fc4_req = be32_to_cpu(*req_payload); 5837 prli_fc4_req = (prli_fc4_req >> 24) & 0xff; 5838 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5839 "6127 PRLI_ACC: Req Type x%x, Word1 x%08x\n", 5840 prli_fc4_req, *((uint32_t *)req_payload)); 5841 5842 if (prli_fc4_req == PRLI_FCP_TYPE) { 5843 cmdsize = sizeof(uint32_t) + sizeof(PRLI); 5844 elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK)); 5845 } else if (prli_fc4_req & PRLI_NVME_TYPE) { 5846 cmdsize = sizeof(uint32_t) + sizeof(struct lpfc_nvme_prli); 5847 elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_NVMEPRLI & ~ELS_RSP_MASK)); 5848 } else { 5849 return 1; 5850 } 5851 5852 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 5853 ndlp->nlp_DID, elsrspcmd); 5854 if (!elsiocb) 5855 return 1; 5856 5857 icmd = &elsiocb->iocb; 5858 oldcmd = &oldiocb->iocb; 5859 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5860 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 5861 5862 /* Xmit PRLI ACC response tag <ulpIoTag> */ 5863 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5864 "0131 Xmit PRLI ACC response tag x%x xri x%x, " 5865 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", 5866 elsiocb->iotag, elsiocb->iocb.ulpContext, 5867 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5868 ndlp->nlp_rpi); 5869 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 5870 memset(pcmd, 0, cmdsize); 5871 5872 *((uint32_t *)(pcmd)) = elsrspcmd; 5873 pcmd += sizeof(uint32_t); 5874 5875 /* For PRLI, remainder of payload is PRLI parameter page */ 5876 vpd = &phba->vpd; 5877 5878 if (prli_fc4_req == PRLI_FCP_TYPE) { 5879 /* 5880 * If the remote port is a target and our firmware version 5881 * is 3.20 or later, set the following bits for FC-TAPE 5882 * support. 5883 */ 5884 npr = (PRLI *) pcmd; 5885 if ((ndlp->nlp_type & NLP_FCP_TARGET) && 5886 (vpd->rev.feaLevelHigh >= 0x02)) { 5887 npr->ConfmComplAllowed = 1; 5888 npr->Retry = 1; 5889 npr->TaskRetryIdReq = 1; 5890 } 5891 npr->acceptRspCode = PRLI_REQ_EXECUTED; 5892 npr->estabImagePair = 1; 5893 npr->readXferRdyDis = 1; 5894 npr->ConfmComplAllowed = 1; 5895 npr->prliType = PRLI_FCP_TYPE; 5896 npr->initiatorFunc = 1; 5897 } else if (prli_fc4_req & PRLI_NVME_TYPE) { 5898 /* Respond with an NVME PRLI Type */ 5899 npr_nvme = (struct lpfc_nvme_prli *) pcmd; 5900 bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE); 5901 bf_set(prli_estabImagePair, npr_nvme, 0); /* Should be 0 */ 5902 bf_set(prli_acc_rsp_code, npr_nvme, PRLI_REQ_EXECUTED); 5903 if (phba->nvmet_support) { 5904 bf_set(prli_tgt, npr_nvme, 1); 5905 bf_set(prli_disc, npr_nvme, 1); 5906 if (phba->cfg_nvme_enable_fb) { 5907 bf_set(prli_fba, npr_nvme, 1); 5908 5909 /* TBD. Target mode needs to post buffers 5910 * that support the configured first burst 5911 * byte size. 5912 */ 5913 bf_set(prli_fb_sz, npr_nvme, 5914 phba->cfg_nvmet_fb_size); 5915 } 5916 } else { 5917 bf_set(prli_init, npr_nvme, 1); 5918 } 5919 5920 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, 5921 "6015 NVME issue PRLI ACC word1 x%08x " 5922 "word4 x%08x word5 x%08x flag x%x, " 5923 "fcp_info x%x nlp_type x%x\n", 5924 npr_nvme->word1, npr_nvme->word4, 5925 npr_nvme->word5, ndlp->nlp_flag, 5926 ndlp->nlp_fcp_info, ndlp->nlp_type); 5927 npr_nvme->word1 = cpu_to_be32(npr_nvme->word1); 5928 npr_nvme->word4 = cpu_to_be32(npr_nvme->word4); 5929 npr_nvme->word5 = cpu_to_be32(npr_nvme->word5); 5930 } else 5931 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 5932 "6128 Unknown FC_TYPE x%x x%x ndlp x%06x\n", 5933 prli_fc4_req, ndlp->nlp_fc4_type, 5934 ndlp->nlp_DID); 5935 5936 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5937 "Issue ACC PRLI: did:x%x flg:x%x", 5938 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); 5939 5940 phba->fc_stat.elsXmitACC++; 5941 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 5942 elsiocb->context1 = lpfc_nlp_get(ndlp); 5943 if (!elsiocb->context1) { 5944 lpfc_els_free_iocb(phba, elsiocb); 5945 return 1; 5946 } 5947 5948 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5949 if (rc == IOCB_ERROR) { 5950 lpfc_els_free_iocb(phba, elsiocb); 5951 lpfc_nlp_put(ndlp); 5952 return 1; 5953 } 5954 5955 return 0; 5956 } 5957 5958 /** 5959 * lpfc_els_rsp_rnid_acc - Issue rnid acc response iocb command 5960 * @vport: pointer to a virtual N_Port data structure. 5961 * @format: rnid command format. 5962 * @oldiocb: pointer to the original lpfc command iocb data structure. 5963 * @ndlp: pointer to a node-list data structure. 5964 * 5965 * This routine issues a Request Node Identification Data (RNID) Accept 5966 * (ACC) response. It constructs the RNID ACC response command according to 5967 * the proper @format and then calls the lpfc_sli_issue_iocb() routine to 5968 * issue the response. 5969 * 5970 * Note that the ndlp reference count will be incremented by 1 for holding the 5971 * ndlp and the reference to ndlp will be stored into the context1 field of 5972 * the IOCB for the completion callback function. 5973 * 5974 * Return code 5975 * 0 - Successfully issued acc rnid response 5976 * 1 - Failed to issue acc rnid response 5977 **/ 5978 static int 5979 lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format, 5980 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) 5981 { 5982 struct lpfc_hba *phba = vport->phba; 5983 RNID *rn; 5984 IOCB_t *icmd, *oldcmd; 5985 struct lpfc_iocbq *elsiocb; 5986 uint8_t *pcmd; 5987 uint16_t cmdsize; 5988 int rc; 5989 5990 cmdsize = sizeof(uint32_t) + sizeof(uint32_t) 5991 + (2 * sizeof(struct lpfc_name)); 5992 if (format) 5993 cmdsize += sizeof(RNID_TOP_DISC); 5994 5995 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 5996 ndlp->nlp_DID, ELS_CMD_ACC); 5997 if (!elsiocb) 5998 return 1; 5999 6000 icmd = &elsiocb->iocb; 6001 oldcmd = &oldiocb->iocb; 6002 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 6003 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 6004 6005 /* Xmit RNID ACC response tag <ulpIoTag> */ 6006 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6007 "0132 Xmit RNID ACC response tag x%x xri x%x\n", 6008 elsiocb->iotag, elsiocb->iocb.ulpContext); 6009 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 6010 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 6011 pcmd += sizeof(uint32_t); 6012 6013 memset(pcmd, 0, sizeof(RNID)); 6014 rn = (RNID *) (pcmd); 6015 rn->Format = format; 6016 rn->CommonLen = (2 * sizeof(struct lpfc_name)); 6017 memcpy(&rn->portName, &vport->fc_portname, sizeof(struct lpfc_name)); 6018 memcpy(&rn->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 6019 switch (format) { 6020 case 0: 6021 rn->SpecificLen = 0; 6022 break; 6023 case RNID_TOPOLOGY_DISC: 6024 rn->SpecificLen = sizeof(RNID_TOP_DISC); 6025 memcpy(&rn->un.topologyDisc.portName, 6026 &vport->fc_portname, sizeof(struct lpfc_name)); 6027 rn->un.topologyDisc.unitType = RNID_HBA; 6028 rn->un.topologyDisc.physPort = 0; 6029 rn->un.topologyDisc.attachedNodes = 0; 6030 break; 6031 default: 6032 rn->CommonLen = 0; 6033 rn->SpecificLen = 0; 6034 break; 6035 } 6036 6037 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 6038 "Issue ACC RNID: did:x%x flg:x%x refcnt %d", 6039 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); 6040 6041 phba->fc_stat.elsXmitACC++; 6042 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 6043 elsiocb->context1 = lpfc_nlp_get(ndlp); 6044 if (!elsiocb->context1) { 6045 lpfc_els_free_iocb(phba, elsiocb); 6046 return 1; 6047 } 6048 6049 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 6050 if (rc == IOCB_ERROR) { 6051 lpfc_els_free_iocb(phba, elsiocb); 6052 lpfc_nlp_put(ndlp); 6053 return 1; 6054 } 6055 6056 return 0; 6057 } 6058 6059 /** 6060 * lpfc_els_clear_rrq - Clear the rq that this rrq describes. 6061 * @vport: pointer to a virtual N_Port data structure. 6062 * @iocb: pointer to the lpfc command iocb data structure. 6063 * @ndlp: pointer to a node-list data structure. 6064 * 6065 * Return 6066 **/ 6067 static void 6068 lpfc_els_clear_rrq(struct lpfc_vport *vport, 6069 struct lpfc_iocbq *iocb, struct lpfc_nodelist *ndlp) 6070 { 6071 struct lpfc_hba *phba = vport->phba; 6072 uint8_t *pcmd; 6073 struct RRQ *rrq; 6074 uint16_t rxid; 6075 uint16_t xri; 6076 struct lpfc_node_rrq *prrq; 6077 6078 6079 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) iocb->context2)->virt); 6080 pcmd += sizeof(uint32_t); 6081 rrq = (struct RRQ *)pcmd; 6082 rrq->rrq_exchg = be32_to_cpu(rrq->rrq_exchg); 6083 rxid = bf_get(rrq_rxid, rrq); 6084 6085 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6086 "2883 Clear RRQ for SID:x%x OXID:x%x RXID:x%x" 6087 " x%x x%x\n", 6088 be32_to_cpu(bf_get(rrq_did, rrq)), 6089 bf_get(rrq_oxid, rrq), 6090 rxid, 6091 iocb->iotag, iocb->iocb.ulpContext); 6092 6093 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 6094 "Clear RRQ: did:x%x flg:x%x exchg:x%.08x", 6095 ndlp->nlp_DID, ndlp->nlp_flag, rrq->rrq_exchg); 6096 if (vport->fc_myDID == be32_to_cpu(bf_get(rrq_did, rrq))) 6097 xri = bf_get(rrq_oxid, rrq); 6098 else 6099 xri = rxid; 6100 prrq = lpfc_get_active_rrq(vport, xri, ndlp->nlp_DID); 6101 if (prrq) 6102 lpfc_clr_rrq_active(phba, xri, prrq); 6103 return; 6104 } 6105 6106 /** 6107 * lpfc_els_rsp_echo_acc - Issue echo acc response 6108 * @vport: pointer to a virtual N_Port data structure. 6109 * @data: pointer to echo data to return in the accept. 6110 * @oldiocb: pointer to the original lpfc command iocb data structure. 6111 * @ndlp: pointer to a node-list data structure. 6112 * 6113 * Return code 6114 * 0 - Successfully issued acc echo response 6115 * 1 - Failed to issue acc echo response 6116 **/ 6117 static int 6118 lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data, 6119 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) 6120 { 6121 struct lpfc_hba *phba = vport->phba; 6122 struct lpfc_iocbq *elsiocb; 6123 uint8_t *pcmd; 6124 uint16_t cmdsize; 6125 int rc; 6126 6127 cmdsize = oldiocb->iocb.unsli3.rcvsli3.acc_len; 6128 6129 /* The accumulated length can exceed the BPL_SIZE. For 6130 * now, use this as the limit 6131 */ 6132 if (cmdsize > LPFC_BPL_SIZE) 6133 cmdsize = LPFC_BPL_SIZE; 6134 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 6135 ndlp->nlp_DID, ELS_CMD_ACC); 6136 if (!elsiocb) 6137 return 1; 6138 6139 elsiocb->iocb.ulpContext = oldiocb->iocb.ulpContext; /* Xri / rx_id */ 6140 elsiocb->iocb.unsli3.rcvsli3.ox_id = oldiocb->iocb.unsli3.rcvsli3.ox_id; 6141 6142 /* Xmit ECHO ACC response tag <ulpIoTag> */ 6143 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6144 "2876 Xmit ECHO ACC response tag x%x xri x%x\n", 6145 elsiocb->iotag, elsiocb->iocb.ulpContext); 6146 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 6147 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 6148 pcmd += sizeof(uint32_t); 6149 memcpy(pcmd, data, cmdsize - sizeof(uint32_t)); 6150 6151 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 6152 "Issue ACC ECHO: did:x%x flg:x%x refcnt %d", 6153 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); 6154 6155 phba->fc_stat.elsXmitACC++; 6156 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 6157 elsiocb->context1 = lpfc_nlp_get(ndlp); 6158 if (!elsiocb->context1) { 6159 lpfc_els_free_iocb(phba, elsiocb); 6160 return 1; 6161 } 6162 6163 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 6164 if (rc == IOCB_ERROR) { 6165 lpfc_els_free_iocb(phba, elsiocb); 6166 lpfc_nlp_put(ndlp); 6167 return 1; 6168 } 6169 6170 return 0; 6171 } 6172 6173 /** 6174 * lpfc_els_disc_adisc - Issue remaining adisc iocbs to npr nodes of a vport 6175 * @vport: pointer to a host virtual N_Port data structure. 6176 * 6177 * This routine issues Address Discover (ADISC) ELS commands to those 6178 * N_Ports which are in node port recovery state and ADISC has not been issued 6179 * for the @vport. Each time an ELS ADISC IOCB is issued by invoking the 6180 * lpfc_issue_els_adisc() routine, the per @vport number of discover count 6181 * (num_disc_nodes) shall be incremented. If the num_disc_nodes reaches a 6182 * pre-configured threshold (cfg_discovery_threads), the @vport fc_flag will 6183 * be marked with FC_NLP_MORE bit and the process of issuing remaining ADISC 6184 * IOCBs quit for later pick up. On the other hand, after walking through 6185 * all the ndlps with the @vport and there is none ADISC IOCB issued, the 6186 * @vport fc_flag shall be cleared with FC_NLP_MORE bit indicating there is 6187 * no more ADISC need to be sent. 6188 * 6189 * Return code 6190 * The number of N_Ports with adisc issued. 6191 **/ 6192 int 6193 lpfc_els_disc_adisc(struct lpfc_vport *vport) 6194 { 6195 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6196 struct lpfc_nodelist *ndlp, *next_ndlp; 6197 int sentadisc = 0; 6198 6199 /* go thru NPR nodes and issue any remaining ELS ADISCs */ 6200 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 6201 6202 if (ndlp->nlp_state != NLP_STE_NPR_NODE || 6203 !(ndlp->nlp_flag & NLP_NPR_ADISC)) 6204 continue; 6205 6206 spin_lock_irq(&ndlp->lock); 6207 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 6208 spin_unlock_irq(&ndlp->lock); 6209 6210 if (!(ndlp->nlp_flag & NLP_NPR_2B_DISC)) { 6211 /* This node was marked for ADISC but was not picked 6212 * for discovery. This is possible if the node was 6213 * missing in gidft response. 6214 * 6215 * At time of marking node for ADISC, we skipped unreg 6216 * from backend 6217 */ 6218 lpfc_nlp_unreg_node(vport, ndlp); 6219 continue; 6220 } 6221 6222 ndlp->nlp_prev_state = ndlp->nlp_state; 6223 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 6224 lpfc_issue_els_adisc(vport, ndlp, 0); 6225 sentadisc++; 6226 vport->num_disc_nodes++; 6227 if (vport->num_disc_nodes >= 6228 vport->cfg_discovery_threads) { 6229 spin_lock_irq(shost->host_lock); 6230 vport->fc_flag |= FC_NLP_MORE; 6231 spin_unlock_irq(shost->host_lock); 6232 break; 6233 } 6234 6235 } 6236 if (sentadisc == 0) { 6237 spin_lock_irq(shost->host_lock); 6238 vport->fc_flag &= ~FC_NLP_MORE; 6239 spin_unlock_irq(shost->host_lock); 6240 } 6241 return sentadisc; 6242 } 6243 6244 /** 6245 * lpfc_els_disc_plogi - Issue plogi for all npr nodes of a vport before adisc 6246 * @vport: pointer to a host virtual N_Port data structure. 6247 * 6248 * This routine issues Port Login (PLOGI) ELS commands to all the N_Ports 6249 * which are in node port recovery state, with a @vport. Each time an ELS 6250 * ADISC PLOGI IOCB is issued by invoking the lpfc_issue_els_plogi() routine, 6251 * the per @vport number of discover count (num_disc_nodes) shall be 6252 * incremented. If the num_disc_nodes reaches a pre-configured threshold 6253 * (cfg_discovery_threads), the @vport fc_flag will be marked with FC_NLP_MORE 6254 * bit set and quit the process of issuing remaining ADISC PLOGIN IOCBs for 6255 * later pick up. On the other hand, after walking through all the ndlps with 6256 * the @vport and there is none ADISC PLOGI IOCB issued, the @vport fc_flag 6257 * shall be cleared with the FC_NLP_MORE bit indicating there is no more ADISC 6258 * PLOGI need to be sent. 6259 * 6260 * Return code 6261 * The number of N_Ports with plogi issued. 6262 **/ 6263 int 6264 lpfc_els_disc_plogi(struct lpfc_vport *vport) 6265 { 6266 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6267 struct lpfc_nodelist *ndlp, *next_ndlp; 6268 int sentplogi = 0; 6269 6270 /* go thru NPR nodes and issue any remaining ELS PLOGIs */ 6271 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 6272 if (ndlp->nlp_state == NLP_STE_NPR_NODE && 6273 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 && 6274 (ndlp->nlp_flag & NLP_DELAY_TMO) == 0 && 6275 (ndlp->nlp_flag & NLP_NPR_ADISC) == 0) { 6276 ndlp->nlp_prev_state = ndlp->nlp_state; 6277 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 6278 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 6279 sentplogi++; 6280 vport->num_disc_nodes++; 6281 if (vport->num_disc_nodes >= 6282 vport->cfg_discovery_threads) { 6283 spin_lock_irq(shost->host_lock); 6284 vport->fc_flag |= FC_NLP_MORE; 6285 spin_unlock_irq(shost->host_lock); 6286 break; 6287 } 6288 } 6289 } 6290 6291 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 6292 "6452 Discover PLOGI %d flag x%x\n", 6293 sentplogi, vport->fc_flag); 6294 6295 if (sentplogi) { 6296 lpfc_set_disctmo(vport); 6297 } 6298 else { 6299 spin_lock_irq(shost->host_lock); 6300 vport->fc_flag &= ~FC_NLP_MORE; 6301 spin_unlock_irq(shost->host_lock); 6302 } 6303 return sentplogi; 6304 } 6305 6306 static uint32_t 6307 lpfc_rdp_res_link_service(struct fc_rdp_link_service_desc *desc, 6308 uint32_t word0) 6309 { 6310 6311 desc->tag = cpu_to_be32(RDP_LINK_SERVICE_DESC_TAG); 6312 desc->payload.els_req = word0; 6313 desc->length = cpu_to_be32(sizeof(desc->payload)); 6314 6315 return sizeof(struct fc_rdp_link_service_desc); 6316 } 6317 6318 static uint32_t 6319 lpfc_rdp_res_sfp_desc(struct fc_rdp_sfp_desc *desc, 6320 uint8_t *page_a0, uint8_t *page_a2) 6321 { 6322 uint16_t wavelength; 6323 uint16_t temperature; 6324 uint16_t rx_power; 6325 uint16_t tx_bias; 6326 uint16_t tx_power; 6327 uint16_t vcc; 6328 uint16_t flag = 0; 6329 struct sff_trasnceiver_codes_byte4 *trasn_code_byte4; 6330 struct sff_trasnceiver_codes_byte5 *trasn_code_byte5; 6331 6332 desc->tag = cpu_to_be32(RDP_SFP_DESC_TAG); 6333 6334 trasn_code_byte4 = (struct sff_trasnceiver_codes_byte4 *) 6335 &page_a0[SSF_TRANSCEIVER_CODE_B4]; 6336 trasn_code_byte5 = (struct sff_trasnceiver_codes_byte5 *) 6337 &page_a0[SSF_TRANSCEIVER_CODE_B5]; 6338 6339 if ((trasn_code_byte4->fc_sw_laser) || 6340 (trasn_code_byte5->fc_sw_laser_sl) || 6341 (trasn_code_byte5->fc_sw_laser_sn)) { /* check if its short WL */ 6342 flag |= (SFP_FLAG_PT_SWLASER << SFP_FLAG_PT_SHIFT); 6343 } else if (trasn_code_byte4->fc_lw_laser) { 6344 wavelength = (page_a0[SSF_WAVELENGTH_B1] << 8) | 6345 page_a0[SSF_WAVELENGTH_B0]; 6346 if (wavelength == SFP_WAVELENGTH_LC1310) 6347 flag |= SFP_FLAG_PT_LWLASER_LC1310 << SFP_FLAG_PT_SHIFT; 6348 if (wavelength == SFP_WAVELENGTH_LL1550) 6349 flag |= SFP_FLAG_PT_LWLASER_LL1550 << SFP_FLAG_PT_SHIFT; 6350 } 6351 /* check if its SFP+ */ 6352 flag |= ((page_a0[SSF_IDENTIFIER] == SFF_PG0_IDENT_SFP) ? 6353 SFP_FLAG_CT_SFP_PLUS : SFP_FLAG_CT_UNKNOWN) 6354 << SFP_FLAG_CT_SHIFT; 6355 6356 /* check if its OPTICAL */ 6357 flag |= ((page_a0[SSF_CONNECTOR] == SFF_PG0_CONNECTOR_LC) ? 6358 SFP_FLAG_IS_OPTICAL_PORT : 0) 6359 << SFP_FLAG_IS_OPTICAL_SHIFT; 6360 6361 temperature = (page_a2[SFF_TEMPERATURE_B1] << 8 | 6362 page_a2[SFF_TEMPERATURE_B0]); 6363 vcc = (page_a2[SFF_VCC_B1] << 8 | 6364 page_a2[SFF_VCC_B0]); 6365 tx_power = (page_a2[SFF_TXPOWER_B1] << 8 | 6366 page_a2[SFF_TXPOWER_B0]); 6367 tx_bias = (page_a2[SFF_TX_BIAS_CURRENT_B1] << 8 | 6368 page_a2[SFF_TX_BIAS_CURRENT_B0]); 6369 rx_power = (page_a2[SFF_RXPOWER_B1] << 8 | 6370 page_a2[SFF_RXPOWER_B0]); 6371 desc->sfp_info.temperature = cpu_to_be16(temperature); 6372 desc->sfp_info.rx_power = cpu_to_be16(rx_power); 6373 desc->sfp_info.tx_bias = cpu_to_be16(tx_bias); 6374 desc->sfp_info.tx_power = cpu_to_be16(tx_power); 6375 desc->sfp_info.vcc = cpu_to_be16(vcc); 6376 6377 desc->sfp_info.flags = cpu_to_be16(flag); 6378 desc->length = cpu_to_be32(sizeof(desc->sfp_info)); 6379 6380 return sizeof(struct fc_rdp_sfp_desc); 6381 } 6382 6383 static uint32_t 6384 lpfc_rdp_res_link_error(struct fc_rdp_link_error_status_desc *desc, 6385 READ_LNK_VAR *stat) 6386 { 6387 uint32_t type; 6388 6389 desc->tag = cpu_to_be32(RDP_LINK_ERROR_STATUS_DESC_TAG); 6390 6391 type = VN_PT_PHY_PF_PORT << VN_PT_PHY_SHIFT; 6392 6393 desc->info.port_type = cpu_to_be32(type); 6394 6395 desc->info.link_status.link_failure_cnt = 6396 cpu_to_be32(stat->linkFailureCnt); 6397 desc->info.link_status.loss_of_synch_cnt = 6398 cpu_to_be32(stat->lossSyncCnt); 6399 desc->info.link_status.loss_of_signal_cnt = 6400 cpu_to_be32(stat->lossSignalCnt); 6401 desc->info.link_status.primitive_seq_proto_err = 6402 cpu_to_be32(stat->primSeqErrCnt); 6403 desc->info.link_status.invalid_trans_word = 6404 cpu_to_be32(stat->invalidXmitWord); 6405 desc->info.link_status.invalid_crc_cnt = cpu_to_be32(stat->crcCnt); 6406 6407 desc->length = cpu_to_be32(sizeof(desc->info)); 6408 6409 return sizeof(struct fc_rdp_link_error_status_desc); 6410 } 6411 6412 static uint32_t 6413 lpfc_rdp_res_bbc_desc(struct fc_rdp_bbc_desc *desc, READ_LNK_VAR *stat, 6414 struct lpfc_vport *vport) 6415 { 6416 uint32_t bbCredit; 6417 6418 desc->tag = cpu_to_be32(RDP_BBC_DESC_TAG); 6419 6420 bbCredit = vport->fc_sparam.cmn.bbCreditLsb | 6421 (vport->fc_sparam.cmn.bbCreditMsb << 8); 6422 desc->bbc_info.port_bbc = cpu_to_be32(bbCredit); 6423 if (vport->phba->fc_topology != LPFC_TOPOLOGY_LOOP) { 6424 bbCredit = vport->phba->fc_fabparam.cmn.bbCreditLsb | 6425 (vport->phba->fc_fabparam.cmn.bbCreditMsb << 8); 6426 desc->bbc_info.attached_port_bbc = cpu_to_be32(bbCredit); 6427 } else { 6428 desc->bbc_info.attached_port_bbc = 0; 6429 } 6430 6431 desc->bbc_info.rtt = 0; 6432 desc->length = cpu_to_be32(sizeof(desc->bbc_info)); 6433 6434 return sizeof(struct fc_rdp_bbc_desc); 6435 } 6436 6437 static uint32_t 6438 lpfc_rdp_res_oed_temp_desc(struct lpfc_hba *phba, 6439 struct fc_rdp_oed_sfp_desc *desc, uint8_t *page_a2) 6440 { 6441 uint32_t flags = 0; 6442 6443 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 6444 6445 desc->oed_info.hi_alarm = page_a2[SSF_TEMP_HIGH_ALARM]; 6446 desc->oed_info.lo_alarm = page_a2[SSF_TEMP_LOW_ALARM]; 6447 desc->oed_info.hi_warning = page_a2[SSF_TEMP_HIGH_WARNING]; 6448 desc->oed_info.lo_warning = page_a2[SSF_TEMP_LOW_WARNING]; 6449 6450 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TEMPERATURE) 6451 flags |= RDP_OET_HIGH_ALARM; 6452 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TEMPERATURE) 6453 flags |= RDP_OET_LOW_ALARM; 6454 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TEMPERATURE) 6455 flags |= RDP_OET_HIGH_WARNING; 6456 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TEMPERATURE) 6457 flags |= RDP_OET_LOW_WARNING; 6458 6459 flags |= ((0xf & RDP_OED_TEMPERATURE) << RDP_OED_TYPE_SHIFT); 6460 desc->oed_info.function_flags = cpu_to_be32(flags); 6461 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 6462 return sizeof(struct fc_rdp_oed_sfp_desc); 6463 } 6464 6465 static uint32_t 6466 lpfc_rdp_res_oed_voltage_desc(struct lpfc_hba *phba, 6467 struct fc_rdp_oed_sfp_desc *desc, 6468 uint8_t *page_a2) 6469 { 6470 uint32_t flags = 0; 6471 6472 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 6473 6474 desc->oed_info.hi_alarm = page_a2[SSF_VOLTAGE_HIGH_ALARM]; 6475 desc->oed_info.lo_alarm = page_a2[SSF_VOLTAGE_LOW_ALARM]; 6476 desc->oed_info.hi_warning = page_a2[SSF_VOLTAGE_HIGH_WARNING]; 6477 desc->oed_info.lo_warning = page_a2[SSF_VOLTAGE_LOW_WARNING]; 6478 6479 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_VOLTAGE) 6480 flags |= RDP_OET_HIGH_ALARM; 6481 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_VOLTAGE) 6482 flags |= RDP_OET_LOW_ALARM; 6483 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_VOLTAGE) 6484 flags |= RDP_OET_HIGH_WARNING; 6485 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_VOLTAGE) 6486 flags |= RDP_OET_LOW_WARNING; 6487 6488 flags |= ((0xf & RDP_OED_VOLTAGE) << RDP_OED_TYPE_SHIFT); 6489 desc->oed_info.function_flags = cpu_to_be32(flags); 6490 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 6491 return sizeof(struct fc_rdp_oed_sfp_desc); 6492 } 6493 6494 static uint32_t 6495 lpfc_rdp_res_oed_txbias_desc(struct lpfc_hba *phba, 6496 struct fc_rdp_oed_sfp_desc *desc, 6497 uint8_t *page_a2) 6498 { 6499 uint32_t flags = 0; 6500 6501 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 6502 6503 desc->oed_info.hi_alarm = page_a2[SSF_BIAS_HIGH_ALARM]; 6504 desc->oed_info.lo_alarm = page_a2[SSF_BIAS_LOW_ALARM]; 6505 desc->oed_info.hi_warning = page_a2[SSF_BIAS_HIGH_WARNING]; 6506 desc->oed_info.lo_warning = page_a2[SSF_BIAS_LOW_WARNING]; 6507 6508 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TXBIAS) 6509 flags |= RDP_OET_HIGH_ALARM; 6510 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TXBIAS) 6511 flags |= RDP_OET_LOW_ALARM; 6512 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TXBIAS) 6513 flags |= RDP_OET_HIGH_WARNING; 6514 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TXBIAS) 6515 flags |= RDP_OET_LOW_WARNING; 6516 6517 flags |= ((0xf & RDP_OED_TXBIAS) << RDP_OED_TYPE_SHIFT); 6518 desc->oed_info.function_flags = cpu_to_be32(flags); 6519 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 6520 return sizeof(struct fc_rdp_oed_sfp_desc); 6521 } 6522 6523 static uint32_t 6524 lpfc_rdp_res_oed_txpower_desc(struct lpfc_hba *phba, 6525 struct fc_rdp_oed_sfp_desc *desc, 6526 uint8_t *page_a2) 6527 { 6528 uint32_t flags = 0; 6529 6530 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 6531 6532 desc->oed_info.hi_alarm = page_a2[SSF_TXPOWER_HIGH_ALARM]; 6533 desc->oed_info.lo_alarm = page_a2[SSF_TXPOWER_LOW_ALARM]; 6534 desc->oed_info.hi_warning = page_a2[SSF_TXPOWER_HIGH_WARNING]; 6535 desc->oed_info.lo_warning = page_a2[SSF_TXPOWER_LOW_WARNING]; 6536 6537 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TXPOWER) 6538 flags |= RDP_OET_HIGH_ALARM; 6539 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TXPOWER) 6540 flags |= RDP_OET_LOW_ALARM; 6541 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TXPOWER) 6542 flags |= RDP_OET_HIGH_WARNING; 6543 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TXPOWER) 6544 flags |= RDP_OET_LOW_WARNING; 6545 6546 flags |= ((0xf & RDP_OED_TXPOWER) << RDP_OED_TYPE_SHIFT); 6547 desc->oed_info.function_flags = cpu_to_be32(flags); 6548 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 6549 return sizeof(struct fc_rdp_oed_sfp_desc); 6550 } 6551 6552 6553 static uint32_t 6554 lpfc_rdp_res_oed_rxpower_desc(struct lpfc_hba *phba, 6555 struct fc_rdp_oed_sfp_desc *desc, 6556 uint8_t *page_a2) 6557 { 6558 uint32_t flags = 0; 6559 6560 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 6561 6562 desc->oed_info.hi_alarm = page_a2[SSF_RXPOWER_HIGH_ALARM]; 6563 desc->oed_info.lo_alarm = page_a2[SSF_RXPOWER_LOW_ALARM]; 6564 desc->oed_info.hi_warning = page_a2[SSF_RXPOWER_HIGH_WARNING]; 6565 desc->oed_info.lo_warning = page_a2[SSF_RXPOWER_LOW_WARNING]; 6566 6567 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_RXPOWER) 6568 flags |= RDP_OET_HIGH_ALARM; 6569 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_RXPOWER) 6570 flags |= RDP_OET_LOW_ALARM; 6571 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_RXPOWER) 6572 flags |= RDP_OET_HIGH_WARNING; 6573 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_RXPOWER) 6574 flags |= RDP_OET_LOW_WARNING; 6575 6576 flags |= ((0xf & RDP_OED_RXPOWER) << RDP_OED_TYPE_SHIFT); 6577 desc->oed_info.function_flags = cpu_to_be32(flags); 6578 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 6579 return sizeof(struct fc_rdp_oed_sfp_desc); 6580 } 6581 6582 static uint32_t 6583 lpfc_rdp_res_opd_desc(struct fc_rdp_opd_sfp_desc *desc, 6584 uint8_t *page_a0, struct lpfc_vport *vport) 6585 { 6586 desc->tag = cpu_to_be32(RDP_OPD_DESC_TAG); 6587 memcpy(desc->opd_info.vendor_name, &page_a0[SSF_VENDOR_NAME], 16); 6588 memcpy(desc->opd_info.model_number, &page_a0[SSF_VENDOR_PN], 16); 6589 memcpy(desc->opd_info.serial_number, &page_a0[SSF_VENDOR_SN], 16); 6590 memcpy(desc->opd_info.revision, &page_a0[SSF_VENDOR_REV], 4); 6591 memcpy(desc->opd_info.date, &page_a0[SSF_DATE_CODE], 8); 6592 desc->length = cpu_to_be32(sizeof(desc->opd_info)); 6593 return sizeof(struct fc_rdp_opd_sfp_desc); 6594 } 6595 6596 static uint32_t 6597 lpfc_rdp_res_fec_desc(struct fc_fec_rdp_desc *desc, READ_LNK_VAR *stat) 6598 { 6599 if (bf_get(lpfc_read_link_stat_gec2, stat) == 0) 6600 return 0; 6601 desc->tag = cpu_to_be32(RDP_FEC_DESC_TAG); 6602 6603 desc->info.CorrectedBlocks = 6604 cpu_to_be32(stat->fecCorrBlkCount); 6605 desc->info.UncorrectableBlocks = 6606 cpu_to_be32(stat->fecUncorrBlkCount); 6607 6608 desc->length = cpu_to_be32(sizeof(desc->info)); 6609 6610 return sizeof(struct fc_fec_rdp_desc); 6611 } 6612 6613 static uint32_t 6614 lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba) 6615 { 6616 uint16_t rdp_cap = 0; 6617 uint16_t rdp_speed; 6618 6619 desc->tag = cpu_to_be32(RDP_PORT_SPEED_DESC_TAG); 6620 6621 switch (phba->fc_linkspeed) { 6622 case LPFC_LINK_SPEED_1GHZ: 6623 rdp_speed = RDP_PS_1GB; 6624 break; 6625 case LPFC_LINK_SPEED_2GHZ: 6626 rdp_speed = RDP_PS_2GB; 6627 break; 6628 case LPFC_LINK_SPEED_4GHZ: 6629 rdp_speed = RDP_PS_4GB; 6630 break; 6631 case LPFC_LINK_SPEED_8GHZ: 6632 rdp_speed = RDP_PS_8GB; 6633 break; 6634 case LPFC_LINK_SPEED_10GHZ: 6635 rdp_speed = RDP_PS_10GB; 6636 break; 6637 case LPFC_LINK_SPEED_16GHZ: 6638 rdp_speed = RDP_PS_16GB; 6639 break; 6640 case LPFC_LINK_SPEED_32GHZ: 6641 rdp_speed = RDP_PS_32GB; 6642 break; 6643 case LPFC_LINK_SPEED_64GHZ: 6644 rdp_speed = RDP_PS_64GB; 6645 break; 6646 case LPFC_LINK_SPEED_128GHZ: 6647 rdp_speed = RDP_PS_128GB; 6648 break; 6649 case LPFC_LINK_SPEED_256GHZ: 6650 rdp_speed = RDP_PS_256GB; 6651 break; 6652 default: 6653 rdp_speed = RDP_PS_UNKNOWN; 6654 break; 6655 } 6656 6657 desc->info.port_speed.speed = cpu_to_be16(rdp_speed); 6658 6659 if (phba->lmt & LMT_256Gb) 6660 rdp_cap |= RDP_PS_256GB; 6661 if (phba->lmt & LMT_128Gb) 6662 rdp_cap |= RDP_PS_128GB; 6663 if (phba->lmt & LMT_64Gb) 6664 rdp_cap |= RDP_PS_64GB; 6665 if (phba->lmt & LMT_32Gb) 6666 rdp_cap |= RDP_PS_32GB; 6667 if (phba->lmt & LMT_16Gb) 6668 rdp_cap |= RDP_PS_16GB; 6669 if (phba->lmt & LMT_10Gb) 6670 rdp_cap |= RDP_PS_10GB; 6671 if (phba->lmt & LMT_8Gb) 6672 rdp_cap |= RDP_PS_8GB; 6673 if (phba->lmt & LMT_4Gb) 6674 rdp_cap |= RDP_PS_4GB; 6675 if (phba->lmt & LMT_2Gb) 6676 rdp_cap |= RDP_PS_2GB; 6677 if (phba->lmt & LMT_1Gb) 6678 rdp_cap |= RDP_PS_1GB; 6679 6680 if (rdp_cap == 0) 6681 rdp_cap = RDP_CAP_UNKNOWN; 6682 if (phba->cfg_link_speed != LPFC_USER_LINK_SPEED_AUTO) 6683 rdp_cap |= RDP_CAP_USER_CONFIGURED; 6684 6685 desc->info.port_speed.capabilities = cpu_to_be16(rdp_cap); 6686 desc->length = cpu_to_be32(sizeof(desc->info)); 6687 return sizeof(struct fc_rdp_port_speed_desc); 6688 } 6689 6690 static uint32_t 6691 lpfc_rdp_res_diag_port_names(struct fc_rdp_port_name_desc *desc, 6692 struct lpfc_vport *vport) 6693 { 6694 6695 desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG); 6696 6697 memcpy(desc->port_names.wwnn, &vport->fc_nodename, 6698 sizeof(desc->port_names.wwnn)); 6699 6700 memcpy(desc->port_names.wwpn, &vport->fc_portname, 6701 sizeof(desc->port_names.wwpn)); 6702 6703 desc->length = cpu_to_be32(sizeof(desc->port_names)); 6704 return sizeof(struct fc_rdp_port_name_desc); 6705 } 6706 6707 static uint32_t 6708 lpfc_rdp_res_attach_port_names(struct fc_rdp_port_name_desc *desc, 6709 struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 6710 { 6711 6712 desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG); 6713 if (vport->fc_flag & FC_FABRIC) { 6714 memcpy(desc->port_names.wwnn, &vport->fabric_nodename, 6715 sizeof(desc->port_names.wwnn)); 6716 6717 memcpy(desc->port_names.wwpn, &vport->fabric_portname, 6718 sizeof(desc->port_names.wwpn)); 6719 } else { /* Point to Point */ 6720 memcpy(desc->port_names.wwnn, &ndlp->nlp_nodename, 6721 sizeof(desc->port_names.wwnn)); 6722 6723 memcpy(desc->port_names.wwpn, &ndlp->nlp_portname, 6724 sizeof(desc->port_names.wwpn)); 6725 } 6726 6727 desc->length = cpu_to_be32(sizeof(desc->port_names)); 6728 return sizeof(struct fc_rdp_port_name_desc); 6729 } 6730 6731 static void 6732 lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context, 6733 int status) 6734 { 6735 struct lpfc_nodelist *ndlp = rdp_context->ndlp; 6736 struct lpfc_vport *vport = ndlp->vport; 6737 struct lpfc_iocbq *elsiocb; 6738 struct ulp_bde64 *bpl; 6739 IOCB_t *icmd; 6740 uint8_t *pcmd; 6741 struct ls_rjt *stat; 6742 struct fc_rdp_res_frame *rdp_res; 6743 uint32_t cmdsize, len; 6744 uint16_t *flag_ptr; 6745 int rc; 6746 6747 if (status != SUCCESS) 6748 goto error; 6749 6750 /* This will change once we know the true size of the RDP payload */ 6751 cmdsize = sizeof(struct fc_rdp_res_frame); 6752 6753 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, 6754 lpfc_max_els_tries, rdp_context->ndlp, 6755 rdp_context->ndlp->nlp_DID, ELS_CMD_ACC); 6756 if (!elsiocb) 6757 goto free_rdp_context; 6758 6759 icmd = &elsiocb->iocb; 6760 icmd->ulpContext = rdp_context->rx_id; 6761 icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id; 6762 6763 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6764 "2171 Xmit RDP response tag x%x xri x%x, " 6765 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x", 6766 elsiocb->iotag, elsiocb->iocb.ulpContext, 6767 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 6768 ndlp->nlp_rpi); 6769 rdp_res = (struct fc_rdp_res_frame *) 6770 (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 6771 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 6772 memset(pcmd, 0, sizeof(struct fc_rdp_res_frame)); 6773 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 6774 6775 /* Update Alarm and Warning */ 6776 flag_ptr = (uint16_t *)(rdp_context->page_a2 + SSF_ALARM_FLAGS); 6777 phba->sfp_alarm |= *flag_ptr; 6778 flag_ptr = (uint16_t *)(rdp_context->page_a2 + SSF_WARNING_FLAGS); 6779 phba->sfp_warning |= *flag_ptr; 6780 6781 /* For RDP payload */ 6782 len = 8; 6783 len += lpfc_rdp_res_link_service((struct fc_rdp_link_service_desc *) 6784 (len + pcmd), ELS_CMD_RDP); 6785 6786 len += lpfc_rdp_res_sfp_desc((struct fc_rdp_sfp_desc *)(len + pcmd), 6787 rdp_context->page_a0, rdp_context->page_a2); 6788 len += lpfc_rdp_res_speed((struct fc_rdp_port_speed_desc *)(len + pcmd), 6789 phba); 6790 len += lpfc_rdp_res_link_error((struct fc_rdp_link_error_status_desc *) 6791 (len + pcmd), &rdp_context->link_stat); 6792 len += lpfc_rdp_res_diag_port_names((struct fc_rdp_port_name_desc *) 6793 (len + pcmd), vport); 6794 len += lpfc_rdp_res_attach_port_names((struct fc_rdp_port_name_desc *) 6795 (len + pcmd), vport, ndlp); 6796 len += lpfc_rdp_res_fec_desc((struct fc_fec_rdp_desc *)(len + pcmd), 6797 &rdp_context->link_stat); 6798 len += lpfc_rdp_res_bbc_desc((struct fc_rdp_bbc_desc *)(len + pcmd), 6799 &rdp_context->link_stat, vport); 6800 len += lpfc_rdp_res_oed_temp_desc(phba, 6801 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 6802 rdp_context->page_a2); 6803 len += lpfc_rdp_res_oed_voltage_desc(phba, 6804 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 6805 rdp_context->page_a2); 6806 len += lpfc_rdp_res_oed_txbias_desc(phba, 6807 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 6808 rdp_context->page_a2); 6809 len += lpfc_rdp_res_oed_txpower_desc(phba, 6810 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 6811 rdp_context->page_a2); 6812 len += lpfc_rdp_res_oed_rxpower_desc(phba, 6813 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 6814 rdp_context->page_a2); 6815 len += lpfc_rdp_res_opd_desc((struct fc_rdp_opd_sfp_desc *)(len + pcmd), 6816 rdp_context->page_a0, vport); 6817 6818 rdp_res->length = cpu_to_be32(len - 8); 6819 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 6820 6821 /* Now that we know the true size of the payload, update the BPL */ 6822 bpl = (struct ulp_bde64 *) 6823 (((struct lpfc_dmabuf *)(elsiocb->context3))->virt); 6824 bpl->tus.f.bdeSize = len; 6825 bpl->tus.f.bdeFlags = 0; 6826 bpl->tus.w = le32_to_cpu(bpl->tus.w); 6827 6828 phba->fc_stat.elsXmitACC++; 6829 elsiocb->context1 = lpfc_nlp_get(ndlp); 6830 if (!elsiocb->context1) { 6831 lpfc_els_free_iocb(phba, elsiocb); 6832 goto free_rdp_context; 6833 } 6834 6835 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 6836 if (rc == IOCB_ERROR) { 6837 lpfc_els_free_iocb(phba, elsiocb); 6838 lpfc_nlp_put(ndlp); 6839 } 6840 6841 goto free_rdp_context; 6842 6843 error: 6844 cmdsize = 2 * sizeof(uint32_t); 6845 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, lpfc_max_els_tries, 6846 ndlp, ndlp->nlp_DID, ELS_CMD_LS_RJT); 6847 if (!elsiocb) 6848 goto free_rdp_context; 6849 6850 icmd = &elsiocb->iocb; 6851 icmd->ulpContext = rdp_context->rx_id; 6852 icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id; 6853 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 6854 6855 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT; 6856 stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t)); 6857 stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 6858 6859 phba->fc_stat.elsXmitLSRJT++; 6860 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 6861 elsiocb->context1 = lpfc_nlp_get(ndlp); 6862 if (!elsiocb->context1) { 6863 lpfc_els_free_iocb(phba, elsiocb); 6864 goto free_rdp_context; 6865 } 6866 6867 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 6868 if (rc == IOCB_ERROR) { 6869 lpfc_els_free_iocb(phba, elsiocb); 6870 lpfc_nlp_put(ndlp); 6871 } 6872 6873 free_rdp_context: 6874 /* This reference put is for the original unsolicited RDP. If the 6875 * iocb prep failed, there is no reference to remove. 6876 */ 6877 lpfc_nlp_put(ndlp); 6878 kfree(rdp_context); 6879 } 6880 6881 static int 6882 lpfc_get_rdp_info(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context) 6883 { 6884 LPFC_MBOXQ_t *mbox = NULL; 6885 int rc; 6886 6887 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6888 if (!mbox) { 6889 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_ELS, 6890 "7105 failed to allocate mailbox memory"); 6891 return 1; 6892 } 6893 6894 if (lpfc_sli4_dump_page_a0(phba, mbox)) 6895 goto prep_mbox_fail; 6896 mbox->vport = rdp_context->ndlp->vport; 6897 mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_page_a0; 6898 mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context; 6899 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 6900 if (rc == MBX_NOT_FINISHED) 6901 goto issue_mbox_fail; 6902 6903 return 0; 6904 6905 prep_mbox_fail: 6906 issue_mbox_fail: 6907 mempool_free(mbox, phba->mbox_mem_pool); 6908 return 1; 6909 } 6910 6911 /* 6912 * lpfc_els_rcv_rdp - Process an unsolicited RDP ELS. 6913 * @vport: pointer to a host virtual N_Port data structure. 6914 * @cmdiocb: pointer to lpfc command iocb data structure. 6915 * @ndlp: pointer to a node-list data structure. 6916 * 6917 * This routine processes an unsolicited RDP(Read Diagnostic Parameters) 6918 * IOCB. First, the payload of the unsolicited RDP is checked. 6919 * Then it will (1) send MBX_DUMP_MEMORY, Embedded DMP_LMSD sub command TYPE-3 6920 * for Page A0, (2) send MBX_DUMP_MEMORY, DMP_LMSD for Page A2, 6921 * (3) send MBX_READ_LNK_STAT to get link stat, (4) Call lpfc_els_rdp_cmpl 6922 * gather all data and send RDP response. 6923 * 6924 * Return code 6925 * 0 - Sent the acc response 6926 * 1 - Sent the reject response. 6927 */ 6928 static int 6929 lpfc_els_rcv_rdp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 6930 struct lpfc_nodelist *ndlp) 6931 { 6932 struct lpfc_hba *phba = vport->phba; 6933 struct lpfc_dmabuf *pcmd; 6934 uint8_t rjt_err, rjt_expl = LSEXP_NOTHING_MORE; 6935 struct fc_rdp_req_frame *rdp_req; 6936 struct lpfc_rdp_context *rdp_context; 6937 IOCB_t *cmd = NULL; 6938 struct ls_rjt stat; 6939 6940 if (phba->sli_rev < LPFC_SLI_REV4 || 6941 bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 6942 LPFC_SLI_INTF_IF_TYPE_2) { 6943 rjt_err = LSRJT_UNABLE_TPC; 6944 rjt_expl = LSEXP_REQ_UNSUPPORTED; 6945 goto error; 6946 } 6947 6948 if (phba->sli_rev < LPFC_SLI_REV4 || (phba->hba_flag & HBA_FCOE_MODE)) { 6949 rjt_err = LSRJT_UNABLE_TPC; 6950 rjt_expl = LSEXP_REQ_UNSUPPORTED; 6951 goto error; 6952 } 6953 6954 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 6955 rdp_req = (struct fc_rdp_req_frame *) pcmd->virt; 6956 6957 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6958 "2422 ELS RDP Request " 6959 "dec len %d tag x%x port_id %d len %d\n", 6960 be32_to_cpu(rdp_req->rdp_des_length), 6961 be32_to_cpu(rdp_req->nport_id_desc.tag), 6962 be32_to_cpu(rdp_req->nport_id_desc.nport_id), 6963 be32_to_cpu(rdp_req->nport_id_desc.length)); 6964 6965 if (sizeof(struct fc_rdp_nport_desc) != 6966 be32_to_cpu(rdp_req->rdp_des_length)) 6967 goto rjt_logerr; 6968 if (RDP_N_PORT_DESC_TAG != be32_to_cpu(rdp_req->nport_id_desc.tag)) 6969 goto rjt_logerr; 6970 if (RDP_NPORT_ID_SIZE != 6971 be32_to_cpu(rdp_req->nport_id_desc.length)) 6972 goto rjt_logerr; 6973 rdp_context = kzalloc(sizeof(struct lpfc_rdp_context), GFP_KERNEL); 6974 if (!rdp_context) { 6975 rjt_err = LSRJT_UNABLE_TPC; 6976 goto error; 6977 } 6978 6979 cmd = &cmdiocb->iocb; 6980 rdp_context->ndlp = lpfc_nlp_get(ndlp); 6981 if (!rdp_context->ndlp) { 6982 kfree(rdp_context); 6983 rjt_err = LSRJT_UNABLE_TPC; 6984 goto error; 6985 } 6986 rdp_context->ox_id = cmd->unsli3.rcvsli3.ox_id; 6987 rdp_context->rx_id = cmd->ulpContext; 6988 rdp_context->cmpl = lpfc_els_rdp_cmpl; 6989 if (lpfc_get_rdp_info(phba, rdp_context)) { 6990 lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_ELS, 6991 "2423 Unable to send mailbox"); 6992 kfree(rdp_context); 6993 rjt_err = LSRJT_UNABLE_TPC; 6994 lpfc_nlp_put(ndlp); 6995 goto error; 6996 } 6997 6998 return 0; 6999 7000 rjt_logerr: 7001 rjt_err = LSRJT_LOGICAL_ERR; 7002 7003 error: 7004 memset(&stat, 0, sizeof(stat)); 7005 stat.un.b.lsRjtRsnCode = rjt_err; 7006 stat.un.b.lsRjtRsnCodeExp = rjt_expl; 7007 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 7008 return 1; 7009 } 7010 7011 7012 static void 7013 lpfc_els_lcb_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 7014 { 7015 MAILBOX_t *mb; 7016 IOCB_t *icmd; 7017 uint8_t *pcmd; 7018 struct lpfc_iocbq *elsiocb; 7019 struct lpfc_nodelist *ndlp; 7020 struct ls_rjt *stat; 7021 union lpfc_sli4_cfg_shdr *shdr; 7022 struct lpfc_lcb_context *lcb_context; 7023 struct fc_lcb_res_frame *lcb_res; 7024 uint32_t cmdsize, shdr_status, shdr_add_status; 7025 int rc; 7026 7027 mb = &pmb->u.mb; 7028 lcb_context = (struct lpfc_lcb_context *)pmb->ctx_ndlp; 7029 ndlp = lcb_context->ndlp; 7030 pmb->ctx_ndlp = NULL; 7031 pmb->ctx_buf = NULL; 7032 7033 shdr = (union lpfc_sli4_cfg_shdr *) 7034 &pmb->u.mqe.un.beacon_config.header.cfg_shdr; 7035 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 7036 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 7037 7038 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX, 7039 "0194 SET_BEACON_CONFIG mailbox " 7040 "completed with status x%x add_status x%x," 7041 " mbx status x%x\n", 7042 shdr_status, shdr_add_status, mb->mbxStatus); 7043 7044 if ((mb->mbxStatus != MBX_SUCCESS) || shdr_status || 7045 (shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE) || 7046 (shdr_add_status == ADD_STATUS_INVALID_REQUEST)) { 7047 mempool_free(pmb, phba->mbox_mem_pool); 7048 goto error; 7049 } 7050 7051 mempool_free(pmb, phba->mbox_mem_pool); 7052 cmdsize = sizeof(struct fc_lcb_res_frame); 7053 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 7054 lpfc_max_els_tries, ndlp, 7055 ndlp->nlp_DID, ELS_CMD_ACC); 7056 7057 /* Decrement the ndlp reference count from previous mbox command */ 7058 lpfc_nlp_put(ndlp); 7059 7060 if (!elsiocb) 7061 goto free_lcb_context; 7062 7063 lcb_res = (struct fc_lcb_res_frame *) 7064 (((struct lpfc_dmabuf *)elsiocb->context2)->virt); 7065 7066 memset(lcb_res, 0, sizeof(struct fc_lcb_res_frame)); 7067 icmd = &elsiocb->iocb; 7068 icmd->ulpContext = lcb_context->rx_id; 7069 icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id; 7070 7071 pcmd = (uint8_t *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt); 7072 *((uint32_t *)(pcmd)) = ELS_CMD_ACC; 7073 lcb_res->lcb_sub_command = lcb_context->sub_command; 7074 lcb_res->lcb_type = lcb_context->type; 7075 lcb_res->capability = lcb_context->capability; 7076 lcb_res->lcb_frequency = lcb_context->frequency; 7077 lcb_res->lcb_duration = lcb_context->duration; 7078 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 7079 phba->fc_stat.elsXmitACC++; 7080 7081 elsiocb->context1 = lpfc_nlp_get(ndlp); 7082 if (!elsiocb->context1) { 7083 lpfc_els_free_iocb(phba, elsiocb); 7084 goto out; 7085 } 7086 7087 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 7088 if (rc == IOCB_ERROR) { 7089 lpfc_els_free_iocb(phba, elsiocb); 7090 lpfc_nlp_put(ndlp); 7091 } 7092 out: 7093 kfree(lcb_context); 7094 return; 7095 7096 error: 7097 cmdsize = sizeof(struct fc_lcb_res_frame); 7098 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 7099 lpfc_max_els_tries, ndlp, 7100 ndlp->nlp_DID, ELS_CMD_LS_RJT); 7101 lpfc_nlp_put(ndlp); 7102 if (!elsiocb) 7103 goto free_lcb_context; 7104 7105 icmd = &elsiocb->iocb; 7106 icmd->ulpContext = lcb_context->rx_id; 7107 icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id; 7108 pcmd = (uint8_t *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt); 7109 7110 *((uint32_t *)(pcmd)) = ELS_CMD_LS_RJT; 7111 stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t)); 7112 stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 7113 7114 if (shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE) 7115 stat->un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS; 7116 7117 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 7118 phba->fc_stat.elsXmitLSRJT++; 7119 elsiocb->context1 = lpfc_nlp_get(ndlp); 7120 if (!elsiocb->context1) { 7121 lpfc_els_free_iocb(phba, elsiocb); 7122 goto free_lcb_context; 7123 } 7124 7125 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 7126 if (rc == IOCB_ERROR) { 7127 lpfc_els_free_iocb(phba, elsiocb); 7128 lpfc_nlp_put(ndlp); 7129 } 7130 free_lcb_context: 7131 kfree(lcb_context); 7132 } 7133 7134 static int 7135 lpfc_sli4_set_beacon(struct lpfc_vport *vport, 7136 struct lpfc_lcb_context *lcb_context, 7137 uint32_t beacon_state) 7138 { 7139 struct lpfc_hba *phba = vport->phba; 7140 union lpfc_sli4_cfg_shdr *cfg_shdr; 7141 LPFC_MBOXQ_t *mbox = NULL; 7142 uint32_t len; 7143 int rc; 7144 7145 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 7146 if (!mbox) 7147 return 1; 7148 7149 cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr; 7150 len = sizeof(struct lpfc_mbx_set_beacon_config) - 7151 sizeof(struct lpfc_sli4_cfg_mhdr); 7152 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 7153 LPFC_MBOX_OPCODE_SET_BEACON_CONFIG, len, 7154 LPFC_SLI4_MBX_EMBED); 7155 mbox->ctx_ndlp = (void *)lcb_context; 7156 mbox->vport = phba->pport; 7157 mbox->mbox_cmpl = lpfc_els_lcb_rsp; 7158 bf_set(lpfc_mbx_set_beacon_port_num, &mbox->u.mqe.un.beacon_config, 7159 phba->sli4_hba.physical_port); 7160 bf_set(lpfc_mbx_set_beacon_state, &mbox->u.mqe.un.beacon_config, 7161 beacon_state); 7162 mbox->u.mqe.un.beacon_config.word5 = 0; /* Reserved */ 7163 7164 /* 7165 * Check bv1s bit before issuing the mailbox 7166 * if bv1s == 1, LCB V1 supported 7167 * else, LCB V0 supported 7168 */ 7169 7170 if (phba->sli4_hba.pc_sli4_params.bv1s) { 7171 /* COMMON_SET_BEACON_CONFIG_V1 */ 7172 cfg_shdr->request.word9 = BEACON_VERSION_V1; 7173 lcb_context->capability |= LCB_CAPABILITY_DURATION; 7174 bf_set(lpfc_mbx_set_beacon_port_type, 7175 &mbox->u.mqe.un.beacon_config, 0); 7176 bf_set(lpfc_mbx_set_beacon_duration_v1, 7177 &mbox->u.mqe.un.beacon_config, 7178 be16_to_cpu(lcb_context->duration)); 7179 } else { 7180 /* COMMON_SET_BEACON_CONFIG_V0 */ 7181 if (be16_to_cpu(lcb_context->duration) != 0) { 7182 mempool_free(mbox, phba->mbox_mem_pool); 7183 return 1; 7184 } 7185 cfg_shdr->request.word9 = BEACON_VERSION_V0; 7186 lcb_context->capability &= ~(LCB_CAPABILITY_DURATION); 7187 bf_set(lpfc_mbx_set_beacon_state, 7188 &mbox->u.mqe.un.beacon_config, beacon_state); 7189 bf_set(lpfc_mbx_set_beacon_port_type, 7190 &mbox->u.mqe.un.beacon_config, 1); 7191 bf_set(lpfc_mbx_set_beacon_duration, 7192 &mbox->u.mqe.un.beacon_config, 7193 be16_to_cpu(lcb_context->duration)); 7194 } 7195 7196 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 7197 if (rc == MBX_NOT_FINISHED) { 7198 mempool_free(mbox, phba->mbox_mem_pool); 7199 return 1; 7200 } 7201 7202 return 0; 7203 } 7204 7205 7206 /** 7207 * lpfc_els_rcv_lcb - Process an unsolicited LCB 7208 * @vport: pointer to a host virtual N_Port data structure. 7209 * @cmdiocb: pointer to lpfc command iocb data structure. 7210 * @ndlp: pointer to a node-list data structure. 7211 * 7212 * This routine processes an unsolicited LCB(LINK CABLE BEACON) IOCB. 7213 * First, the payload of the unsolicited LCB is checked. 7214 * Then based on Subcommand beacon will either turn on or off. 7215 * 7216 * Return code 7217 * 0 - Sent the acc response 7218 * 1 - Sent the reject response. 7219 **/ 7220 static int 7221 lpfc_els_rcv_lcb(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7222 struct lpfc_nodelist *ndlp) 7223 { 7224 struct lpfc_hba *phba = vport->phba; 7225 struct lpfc_dmabuf *pcmd; 7226 uint8_t *lp; 7227 struct fc_lcb_request_frame *beacon; 7228 struct lpfc_lcb_context *lcb_context; 7229 u8 state, rjt_err = 0; 7230 struct ls_rjt stat; 7231 7232 pcmd = (struct lpfc_dmabuf *)cmdiocb->context2; 7233 lp = (uint8_t *)pcmd->virt; 7234 beacon = (struct fc_lcb_request_frame *)pcmd->virt; 7235 7236 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7237 "0192 ELS LCB Data x%x x%x x%x x%x sub x%x " 7238 "type x%x frequency %x duration x%x\n", 7239 lp[0], lp[1], lp[2], 7240 beacon->lcb_command, 7241 beacon->lcb_sub_command, 7242 beacon->lcb_type, 7243 beacon->lcb_frequency, 7244 be16_to_cpu(beacon->lcb_duration)); 7245 7246 if (beacon->lcb_sub_command != LPFC_LCB_ON && 7247 beacon->lcb_sub_command != LPFC_LCB_OFF) { 7248 rjt_err = LSRJT_CMD_UNSUPPORTED; 7249 goto rjt; 7250 } 7251 7252 if (phba->sli_rev < LPFC_SLI_REV4 || 7253 phba->hba_flag & HBA_FCOE_MODE || 7254 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 7255 LPFC_SLI_INTF_IF_TYPE_2)) { 7256 rjt_err = LSRJT_CMD_UNSUPPORTED; 7257 goto rjt; 7258 } 7259 7260 lcb_context = kmalloc(sizeof(*lcb_context), GFP_KERNEL); 7261 if (!lcb_context) { 7262 rjt_err = LSRJT_UNABLE_TPC; 7263 goto rjt; 7264 } 7265 7266 state = (beacon->lcb_sub_command == LPFC_LCB_ON) ? 1 : 0; 7267 lcb_context->sub_command = beacon->lcb_sub_command; 7268 lcb_context->capability = 0; 7269 lcb_context->type = beacon->lcb_type; 7270 lcb_context->frequency = beacon->lcb_frequency; 7271 lcb_context->duration = beacon->lcb_duration; 7272 lcb_context->ox_id = cmdiocb->iocb.unsli3.rcvsli3.ox_id; 7273 lcb_context->rx_id = cmdiocb->iocb.ulpContext; 7274 lcb_context->ndlp = lpfc_nlp_get(ndlp); 7275 if (!lcb_context->ndlp) { 7276 rjt_err = LSRJT_UNABLE_TPC; 7277 goto rjt_free; 7278 } 7279 7280 if (lpfc_sli4_set_beacon(vport, lcb_context, state)) { 7281 lpfc_printf_vlog(ndlp->vport, KERN_ERR, LOG_TRACE_EVENT, 7282 "0193 failed to send mail box"); 7283 lpfc_nlp_put(ndlp); 7284 rjt_err = LSRJT_UNABLE_TPC; 7285 goto rjt_free; 7286 } 7287 return 0; 7288 7289 rjt_free: 7290 kfree(lcb_context); 7291 rjt: 7292 memset(&stat, 0, sizeof(stat)); 7293 stat.un.b.lsRjtRsnCode = rjt_err; 7294 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 7295 return 1; 7296 } 7297 7298 7299 /** 7300 * lpfc_els_flush_rscn - Clean up any rscn activities with a vport 7301 * @vport: pointer to a host virtual N_Port data structure. 7302 * 7303 * This routine cleans up any Registration State Change Notification 7304 * (RSCN) activity with a @vport. Note that the fc_rscn_flush flag of the 7305 * @vport together with the host_lock is used to prevent multiple thread 7306 * trying to access the RSCN array on a same @vport at the same time. 7307 **/ 7308 void 7309 lpfc_els_flush_rscn(struct lpfc_vport *vport) 7310 { 7311 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7312 struct lpfc_hba *phba = vport->phba; 7313 int i; 7314 7315 spin_lock_irq(shost->host_lock); 7316 if (vport->fc_rscn_flush) { 7317 /* Another thread is walking fc_rscn_id_list on this vport */ 7318 spin_unlock_irq(shost->host_lock); 7319 return; 7320 } 7321 /* Indicate we are walking lpfc_els_flush_rscn on this vport */ 7322 vport->fc_rscn_flush = 1; 7323 spin_unlock_irq(shost->host_lock); 7324 7325 for (i = 0; i < vport->fc_rscn_id_cnt; i++) { 7326 lpfc_in_buf_free(phba, vport->fc_rscn_id_list[i]); 7327 vport->fc_rscn_id_list[i] = NULL; 7328 } 7329 spin_lock_irq(shost->host_lock); 7330 vport->fc_rscn_id_cnt = 0; 7331 vport->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY); 7332 spin_unlock_irq(shost->host_lock); 7333 lpfc_can_disctmo(vport); 7334 /* Indicate we are done walking this fc_rscn_id_list */ 7335 vport->fc_rscn_flush = 0; 7336 } 7337 7338 /** 7339 * lpfc_rscn_payload_check - Check whether there is a pending rscn to a did 7340 * @vport: pointer to a host virtual N_Port data structure. 7341 * @did: remote destination port identifier. 7342 * 7343 * This routine checks whether there is any pending Registration State 7344 * Configuration Notification (RSCN) to a @did on @vport. 7345 * 7346 * Return code 7347 * None zero - The @did matched with a pending rscn 7348 * 0 - not able to match @did with a pending rscn 7349 **/ 7350 int 7351 lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did) 7352 { 7353 D_ID ns_did; 7354 D_ID rscn_did; 7355 uint32_t *lp; 7356 uint32_t payload_len, i; 7357 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7358 7359 ns_did.un.word = did; 7360 7361 /* Never match fabric nodes for RSCNs */ 7362 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) 7363 return 0; 7364 7365 /* If we are doing a FULL RSCN rediscovery, match everything */ 7366 if (vport->fc_flag & FC_RSCN_DISCOVERY) 7367 return did; 7368 7369 spin_lock_irq(shost->host_lock); 7370 if (vport->fc_rscn_flush) { 7371 /* Another thread is walking fc_rscn_id_list on this vport */ 7372 spin_unlock_irq(shost->host_lock); 7373 return 0; 7374 } 7375 /* Indicate we are walking fc_rscn_id_list on this vport */ 7376 vport->fc_rscn_flush = 1; 7377 spin_unlock_irq(shost->host_lock); 7378 for (i = 0; i < vport->fc_rscn_id_cnt; i++) { 7379 lp = vport->fc_rscn_id_list[i]->virt; 7380 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK); 7381 payload_len -= sizeof(uint32_t); /* take off word 0 */ 7382 while (payload_len) { 7383 rscn_did.un.word = be32_to_cpu(*lp++); 7384 payload_len -= sizeof(uint32_t); 7385 switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) { 7386 case RSCN_ADDRESS_FORMAT_PORT: 7387 if ((ns_did.un.b.domain == rscn_did.un.b.domain) 7388 && (ns_did.un.b.area == rscn_did.un.b.area) 7389 && (ns_did.un.b.id == rscn_did.un.b.id)) 7390 goto return_did_out; 7391 break; 7392 case RSCN_ADDRESS_FORMAT_AREA: 7393 if ((ns_did.un.b.domain == rscn_did.un.b.domain) 7394 && (ns_did.un.b.area == rscn_did.un.b.area)) 7395 goto return_did_out; 7396 break; 7397 case RSCN_ADDRESS_FORMAT_DOMAIN: 7398 if (ns_did.un.b.domain == rscn_did.un.b.domain) 7399 goto return_did_out; 7400 break; 7401 case RSCN_ADDRESS_FORMAT_FABRIC: 7402 goto return_did_out; 7403 } 7404 } 7405 } 7406 /* Indicate we are done with walking fc_rscn_id_list on this vport */ 7407 vport->fc_rscn_flush = 0; 7408 return 0; 7409 return_did_out: 7410 /* Indicate we are done with walking fc_rscn_id_list on this vport */ 7411 vport->fc_rscn_flush = 0; 7412 return did; 7413 } 7414 7415 /** 7416 * lpfc_rscn_recovery_check - Send recovery event to vport nodes matching rscn 7417 * @vport: pointer to a host virtual N_Port data structure. 7418 * 7419 * This routine sends recovery (NLP_EVT_DEVICE_RECOVERY) event to the 7420 * state machine for a @vport's nodes that are with pending RSCN (Registration 7421 * State Change Notification). 7422 * 7423 * Return code 7424 * 0 - Successful (currently alway return 0) 7425 **/ 7426 static int 7427 lpfc_rscn_recovery_check(struct lpfc_vport *vport) 7428 { 7429 struct lpfc_nodelist *ndlp = NULL; 7430 7431 /* Move all affected nodes by pending RSCNs to NPR state. */ 7432 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 7433 if ((ndlp->nlp_state == NLP_STE_UNUSED_NODE) || 7434 !lpfc_rscn_payload_check(vport, ndlp->nlp_DID)) 7435 continue; 7436 7437 /* NVME Target mode does not do RSCN Recovery. */ 7438 if (vport->phba->nvmet_support) 7439 continue; 7440 7441 /* If we are in the process of doing discovery on this 7442 * NPort, let it continue on its own. 7443 */ 7444 switch (ndlp->nlp_state) { 7445 case NLP_STE_PLOGI_ISSUE: 7446 case NLP_STE_ADISC_ISSUE: 7447 case NLP_STE_REG_LOGIN_ISSUE: 7448 case NLP_STE_PRLI_ISSUE: 7449 case NLP_STE_LOGO_ISSUE: 7450 continue; 7451 } 7452 7453 lpfc_disc_state_machine(vport, ndlp, NULL, 7454 NLP_EVT_DEVICE_RECOVERY); 7455 lpfc_cancel_retry_delay_tmo(vport, ndlp); 7456 } 7457 return 0; 7458 } 7459 7460 /** 7461 * lpfc_send_rscn_event - Send an RSCN event to management application 7462 * @vport: pointer to a host virtual N_Port data structure. 7463 * @cmdiocb: pointer to lpfc command iocb data structure. 7464 * 7465 * lpfc_send_rscn_event sends an RSCN netlink event to management 7466 * applications. 7467 */ 7468 static void 7469 lpfc_send_rscn_event(struct lpfc_vport *vport, 7470 struct lpfc_iocbq *cmdiocb) 7471 { 7472 struct lpfc_dmabuf *pcmd; 7473 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7474 uint32_t *payload_ptr; 7475 uint32_t payload_len; 7476 struct lpfc_rscn_event_header *rscn_event_data; 7477 7478 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 7479 payload_ptr = (uint32_t *) pcmd->virt; 7480 payload_len = be32_to_cpu(*payload_ptr & ~ELS_CMD_MASK); 7481 7482 rscn_event_data = kmalloc(sizeof(struct lpfc_rscn_event_header) + 7483 payload_len, GFP_KERNEL); 7484 if (!rscn_event_data) { 7485 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 7486 "0147 Failed to allocate memory for RSCN event\n"); 7487 return; 7488 } 7489 rscn_event_data->event_type = FC_REG_RSCN_EVENT; 7490 rscn_event_data->payload_length = payload_len; 7491 memcpy(rscn_event_data->rscn_payload, payload_ptr, 7492 payload_len); 7493 7494 fc_host_post_vendor_event(shost, 7495 fc_get_event_number(), 7496 sizeof(struct lpfc_rscn_event_header) + payload_len, 7497 (char *)rscn_event_data, 7498 LPFC_NL_VENDOR_ID); 7499 7500 kfree(rscn_event_data); 7501 } 7502 7503 /** 7504 * lpfc_els_rcv_rscn - Process an unsolicited rscn iocb 7505 * @vport: pointer to a host virtual N_Port data structure. 7506 * @cmdiocb: pointer to lpfc command iocb data structure. 7507 * @ndlp: pointer to a node-list data structure. 7508 * 7509 * This routine processes an unsolicited RSCN (Registration State Change 7510 * Notification) IOCB. First, the payload of the unsolicited RSCN is walked 7511 * to invoke fc_host_post_event() routine to the FC transport layer. If the 7512 * discover state machine is about to begin discovery, it just accepts the 7513 * RSCN and the discovery process will satisfy the RSCN. If this RSCN only 7514 * contains N_Port IDs for other vports on this HBA, it just accepts the 7515 * RSCN and ignore processing it. If the state machine is in the recovery 7516 * state, the fc_rscn_id_list of this @vport is walked and the 7517 * lpfc_rscn_recovery_check() routine is invoked to send recovery event for 7518 * all nodes that match RSCN payload. Otherwise, the lpfc_els_handle_rscn() 7519 * routine is invoked to handle the RSCN event. 7520 * 7521 * Return code 7522 * 0 - Just sent the acc response 7523 * 1 - Sent the acc response and waited for name server completion 7524 **/ 7525 static int 7526 lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7527 struct lpfc_nodelist *ndlp) 7528 { 7529 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7530 struct lpfc_hba *phba = vport->phba; 7531 struct lpfc_dmabuf *pcmd; 7532 uint32_t *lp, *datap; 7533 uint32_t payload_len, length, nportid, *cmd; 7534 int rscn_cnt; 7535 int rscn_id = 0, hba_id = 0; 7536 int i, tmo; 7537 7538 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 7539 lp = (uint32_t *) pcmd->virt; 7540 7541 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK); 7542 payload_len -= sizeof(uint32_t); /* take off word 0 */ 7543 /* RSCN received */ 7544 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 7545 "0214 RSCN received Data: x%x x%x x%x x%x\n", 7546 vport->fc_flag, payload_len, *lp, 7547 vport->fc_rscn_id_cnt); 7548 7549 /* Send an RSCN event to the management application */ 7550 lpfc_send_rscn_event(vport, cmdiocb); 7551 7552 for (i = 0; i < payload_len/sizeof(uint32_t); i++) 7553 fc_host_post_event(shost, fc_get_event_number(), 7554 FCH_EVT_RSCN, lp[i]); 7555 7556 /* Check if RSCN is coming from a direct-connected remote NPort */ 7557 if (vport->fc_flag & FC_PT2PT) { 7558 /* If so, just ACC it, no other action needed for now */ 7559 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7560 "2024 pt2pt RSCN %08x Data: x%x x%x\n", 7561 *lp, vport->fc_flag, payload_len); 7562 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 7563 7564 /* Check to see if we need to NVME rescan this target 7565 * remoteport. 7566 */ 7567 if (ndlp->nlp_fc4_type & NLP_FC4_NVME && 7568 ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_DISCOVERY)) 7569 lpfc_nvme_rescan_port(vport, ndlp); 7570 return 0; 7571 } 7572 7573 /* If we are about to begin discovery, just ACC the RSCN. 7574 * Discovery processing will satisfy it. 7575 */ 7576 if (vport->port_state <= LPFC_NS_QRY) { 7577 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 7578 "RCV RSCN ignore: did:x%x/ste:x%x flg:x%x", 7579 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); 7580 7581 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 7582 return 0; 7583 } 7584 7585 /* If this RSCN just contains NPortIDs for other vports on this HBA, 7586 * just ACC and ignore it. 7587 */ 7588 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 7589 !(vport->cfg_peer_port_login)) { 7590 i = payload_len; 7591 datap = lp; 7592 while (i > 0) { 7593 nportid = *datap++; 7594 nportid = ((be32_to_cpu(nportid)) & Mask_DID); 7595 i -= sizeof(uint32_t); 7596 rscn_id++; 7597 if (lpfc_find_vport_by_did(phba, nportid)) 7598 hba_id++; 7599 } 7600 if (rscn_id == hba_id) { 7601 /* ALL NPortIDs in RSCN are on HBA */ 7602 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 7603 "0219 Ignore RSCN " 7604 "Data: x%x x%x x%x x%x\n", 7605 vport->fc_flag, payload_len, 7606 *lp, vport->fc_rscn_id_cnt); 7607 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 7608 "RCV RSCN vport: did:x%x/ste:x%x flg:x%x", 7609 ndlp->nlp_DID, vport->port_state, 7610 ndlp->nlp_flag); 7611 7612 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, 7613 ndlp, NULL); 7614 return 0; 7615 } 7616 } 7617 7618 spin_lock_irq(shost->host_lock); 7619 if (vport->fc_rscn_flush) { 7620 /* Another thread is walking fc_rscn_id_list on this vport */ 7621 vport->fc_flag |= FC_RSCN_DISCOVERY; 7622 spin_unlock_irq(shost->host_lock); 7623 /* Send back ACC */ 7624 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 7625 return 0; 7626 } 7627 /* Indicate we are walking fc_rscn_id_list on this vport */ 7628 vport->fc_rscn_flush = 1; 7629 spin_unlock_irq(shost->host_lock); 7630 /* Get the array count after successfully have the token */ 7631 rscn_cnt = vport->fc_rscn_id_cnt; 7632 /* If we are already processing an RSCN, save the received 7633 * RSCN payload buffer, cmdiocb->context2 to process later. 7634 */ 7635 if (vport->fc_flag & (FC_RSCN_MODE | FC_NDISC_ACTIVE)) { 7636 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 7637 "RCV RSCN defer: did:x%x/ste:x%x flg:x%x", 7638 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); 7639 7640 spin_lock_irq(shost->host_lock); 7641 vport->fc_flag |= FC_RSCN_DEFERRED; 7642 7643 /* Restart disctmo if its already running */ 7644 if (vport->fc_flag & FC_DISC_TMO) { 7645 tmo = ((phba->fc_ratov * 3) + 3); 7646 mod_timer(&vport->fc_disctmo, 7647 jiffies + msecs_to_jiffies(1000 * tmo)); 7648 } 7649 if ((rscn_cnt < FC_MAX_HOLD_RSCN) && 7650 !(vport->fc_flag & FC_RSCN_DISCOVERY)) { 7651 vport->fc_flag |= FC_RSCN_MODE; 7652 spin_unlock_irq(shost->host_lock); 7653 if (rscn_cnt) { 7654 cmd = vport->fc_rscn_id_list[rscn_cnt-1]->virt; 7655 length = be32_to_cpu(*cmd & ~ELS_CMD_MASK); 7656 } 7657 if ((rscn_cnt) && 7658 (payload_len + length <= LPFC_BPL_SIZE)) { 7659 *cmd &= ELS_CMD_MASK; 7660 *cmd |= cpu_to_be32(payload_len + length); 7661 memcpy(((uint8_t *)cmd) + length, lp, 7662 payload_len); 7663 } else { 7664 vport->fc_rscn_id_list[rscn_cnt] = pcmd; 7665 vport->fc_rscn_id_cnt++; 7666 /* If we zero, cmdiocb->context2, the calling 7667 * routine will not try to free it. 7668 */ 7669 cmdiocb->context2 = NULL; 7670 } 7671 /* Deferred RSCN */ 7672 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 7673 "0235 Deferred RSCN " 7674 "Data: x%x x%x x%x\n", 7675 vport->fc_rscn_id_cnt, vport->fc_flag, 7676 vport->port_state); 7677 } else { 7678 vport->fc_flag |= FC_RSCN_DISCOVERY; 7679 spin_unlock_irq(shost->host_lock); 7680 /* ReDiscovery RSCN */ 7681 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 7682 "0234 ReDiscovery RSCN " 7683 "Data: x%x x%x x%x\n", 7684 vport->fc_rscn_id_cnt, vport->fc_flag, 7685 vport->port_state); 7686 } 7687 /* Indicate we are done walking fc_rscn_id_list on this vport */ 7688 vport->fc_rscn_flush = 0; 7689 /* Send back ACC */ 7690 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 7691 /* send RECOVERY event for ALL nodes that match RSCN payload */ 7692 lpfc_rscn_recovery_check(vport); 7693 return 0; 7694 } 7695 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 7696 "RCV RSCN: did:x%x/ste:x%x flg:x%x", 7697 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); 7698 7699 spin_lock_irq(shost->host_lock); 7700 vport->fc_flag |= FC_RSCN_MODE; 7701 spin_unlock_irq(shost->host_lock); 7702 vport->fc_rscn_id_list[vport->fc_rscn_id_cnt++] = pcmd; 7703 /* Indicate we are done walking fc_rscn_id_list on this vport */ 7704 vport->fc_rscn_flush = 0; 7705 /* 7706 * If we zero, cmdiocb->context2, the calling routine will 7707 * not try to free it. 7708 */ 7709 cmdiocb->context2 = NULL; 7710 lpfc_set_disctmo(vport); 7711 /* Send back ACC */ 7712 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 7713 /* send RECOVERY event for ALL nodes that match RSCN payload */ 7714 lpfc_rscn_recovery_check(vport); 7715 return lpfc_els_handle_rscn(vport); 7716 } 7717 7718 /** 7719 * lpfc_els_handle_rscn - Handle rscn for a vport 7720 * @vport: pointer to a host virtual N_Port data structure. 7721 * 7722 * This routine handles the Registration State Configuration Notification 7723 * (RSCN) for a @vport. If login to NameServer does not exist, a new ndlp shall 7724 * be created and a Port Login (PLOGI) to the NameServer is issued. Otherwise, 7725 * if the ndlp to NameServer exists, a Common Transport (CT) command to the 7726 * NameServer shall be issued. If CT command to the NameServer fails to be 7727 * issued, the lpfc_els_flush_rscn() routine shall be invoked to clean up any 7728 * RSCN activities with the @vport. 7729 * 7730 * Return code 7731 * 0 - Cleaned up rscn on the @vport 7732 * 1 - Wait for plogi to name server before proceed 7733 **/ 7734 int 7735 lpfc_els_handle_rscn(struct lpfc_vport *vport) 7736 { 7737 struct lpfc_nodelist *ndlp; 7738 struct lpfc_hba *phba = vport->phba; 7739 7740 /* Ignore RSCN if the port is being torn down. */ 7741 if (vport->load_flag & FC_UNLOADING) { 7742 lpfc_els_flush_rscn(vport); 7743 return 0; 7744 } 7745 7746 /* Start timer for RSCN processing */ 7747 lpfc_set_disctmo(vport); 7748 7749 /* RSCN processed */ 7750 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 7751 "0215 RSCN processed Data: x%x x%x x%x x%x x%x x%x\n", 7752 vport->fc_flag, 0, vport->fc_rscn_id_cnt, 7753 vport->port_state, vport->num_disc_nodes, 7754 vport->gidft_inp); 7755 7756 /* To process RSCN, first compare RSCN data with NameServer */ 7757 vport->fc_ns_retry = 0; 7758 vport->num_disc_nodes = 0; 7759 7760 ndlp = lpfc_findnode_did(vport, NameServer_DID); 7761 if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { 7762 /* Good ndlp, issue CT Request to NameServer. Need to 7763 * know how many gidfts were issued. If none, then just 7764 * flush the RSCN. Otherwise, the outstanding requests 7765 * need to complete. 7766 */ 7767 if (phba->cfg_ns_query == LPFC_NS_QUERY_GID_FT) { 7768 if (lpfc_issue_gidft(vport) > 0) 7769 return 1; 7770 } else if (phba->cfg_ns_query == LPFC_NS_QUERY_GID_PT) { 7771 if (lpfc_issue_gidpt(vport) > 0) 7772 return 1; 7773 } else { 7774 return 1; 7775 } 7776 } else { 7777 /* Nameserver login in question. Revalidate. */ 7778 if (ndlp) { 7779 ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE; 7780 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 7781 } else { 7782 ndlp = lpfc_nlp_init(vport, NameServer_DID); 7783 if (!ndlp) { 7784 lpfc_els_flush_rscn(vport); 7785 return 0; 7786 } 7787 ndlp->nlp_prev_state = ndlp->nlp_state; 7788 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 7789 } 7790 ndlp->nlp_type |= NLP_FABRIC; 7791 lpfc_issue_els_plogi(vport, NameServer_DID, 0); 7792 /* Wait for NameServer login cmpl before we can 7793 * continue 7794 */ 7795 return 1; 7796 } 7797 7798 lpfc_els_flush_rscn(vport); 7799 return 0; 7800 } 7801 7802 /** 7803 * lpfc_els_rcv_flogi - Process an unsolicited flogi iocb 7804 * @vport: pointer to a host virtual N_Port data structure. 7805 * @cmdiocb: pointer to lpfc command iocb data structure. 7806 * @ndlp: pointer to a node-list data structure. 7807 * 7808 * This routine processes Fabric Login (FLOGI) IOCB received as an ELS 7809 * unsolicited event. An unsolicited FLOGI can be received in a point-to- 7810 * point topology. As an unsolicited FLOGI should not be received in a loop 7811 * mode, any unsolicited FLOGI received in loop mode shall be ignored. The 7812 * lpfc_check_sparm() routine is invoked to check the parameters in the 7813 * unsolicited FLOGI. If parameters validation failed, the routine 7814 * lpfc_els_rsp_reject() shall be called with reject reason code set to 7815 * LSEXP_SPARM_OPTIONS to reject the FLOGI. Otherwise, the Port WWN in the 7816 * FLOGI shall be compared with the Port WWN of the @vport to determine who 7817 * will initiate PLOGI. The higher lexicographical value party shall has 7818 * higher priority (as the winning port) and will initiate PLOGI and 7819 * communicate Port_IDs (Addresses) for both nodes in PLOGI. The result 7820 * of this will be marked in the @vport fc_flag field with FC_PT2PT_PLOGI 7821 * and then the lpfc_els_rsp_acc() routine is invoked to accept the FLOGI. 7822 * 7823 * Return code 7824 * 0 - Successfully processed the unsolicited flogi 7825 * 1 - Failed to process the unsolicited flogi 7826 **/ 7827 static int 7828 lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7829 struct lpfc_nodelist *ndlp) 7830 { 7831 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7832 struct lpfc_hba *phba = vport->phba; 7833 struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 7834 uint32_t *lp = (uint32_t *) pcmd->virt; 7835 IOCB_t *icmd = &cmdiocb->iocb; 7836 struct serv_parm *sp; 7837 LPFC_MBOXQ_t *mbox; 7838 uint32_t cmd, did; 7839 int rc; 7840 uint32_t fc_flag = 0; 7841 uint32_t port_state = 0; 7842 7843 cmd = *lp++; 7844 sp = (struct serv_parm *) lp; 7845 7846 /* FLOGI received */ 7847 7848 lpfc_set_disctmo(vport); 7849 7850 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 7851 /* We should never receive a FLOGI in loop mode, ignore it */ 7852 did = icmd->un.elsreq64.remoteID; 7853 7854 /* An FLOGI ELS command <elsCmd> was received from DID <did> in 7855 Loop Mode */ 7856 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 7857 "0113 An FLOGI ELS command x%x was " 7858 "received from DID x%x in Loop Mode\n", 7859 cmd, did); 7860 return 1; 7861 } 7862 7863 (void) lpfc_check_sparm(vport, ndlp, sp, CLASS3, 1); 7864 7865 /* 7866 * If our portname is greater than the remote portname, 7867 * then we initiate Nport login. 7868 */ 7869 7870 rc = memcmp(&vport->fc_portname, &sp->portName, 7871 sizeof(struct lpfc_name)); 7872 7873 if (!rc) { 7874 if (phba->sli_rev < LPFC_SLI_REV4) { 7875 mbox = mempool_alloc(phba->mbox_mem_pool, 7876 GFP_KERNEL); 7877 if (!mbox) 7878 return 1; 7879 lpfc_linkdown(phba); 7880 lpfc_init_link(phba, mbox, 7881 phba->cfg_topology, 7882 phba->cfg_link_speed); 7883 mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0; 7884 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 7885 mbox->vport = vport; 7886 rc = lpfc_sli_issue_mbox(phba, mbox, 7887 MBX_NOWAIT); 7888 lpfc_set_loopback_flag(phba); 7889 if (rc == MBX_NOT_FINISHED) 7890 mempool_free(mbox, phba->mbox_mem_pool); 7891 return 1; 7892 } 7893 7894 /* abort the flogi coming back to ourselves 7895 * due to external loopback on the port. 7896 */ 7897 lpfc_els_abort_flogi(phba); 7898 return 0; 7899 7900 } else if (rc > 0) { /* greater than */ 7901 spin_lock_irq(shost->host_lock); 7902 vport->fc_flag |= FC_PT2PT_PLOGI; 7903 spin_unlock_irq(shost->host_lock); 7904 7905 /* If we have the high WWPN we can assign our own 7906 * myDID; otherwise, we have to WAIT for a PLOGI 7907 * from the remote NPort to find out what it 7908 * will be. 7909 */ 7910 vport->fc_myDID = PT2PT_LocalID; 7911 } else { 7912 vport->fc_myDID = PT2PT_RemoteID; 7913 } 7914 7915 /* 7916 * The vport state should go to LPFC_FLOGI only 7917 * AFTER we issue a FLOGI, not receive one. 7918 */ 7919 spin_lock_irq(shost->host_lock); 7920 fc_flag = vport->fc_flag; 7921 port_state = vport->port_state; 7922 vport->fc_flag |= FC_PT2PT; 7923 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 7924 7925 /* Acking an unsol FLOGI. Count 1 for link bounce 7926 * work-around. 7927 */ 7928 vport->rcv_flogi_cnt++; 7929 spin_unlock_irq(shost->host_lock); 7930 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7931 "3311 Rcv Flogi PS x%x new PS x%x " 7932 "fc_flag x%x new fc_flag x%x\n", 7933 port_state, vport->port_state, 7934 fc_flag, vport->fc_flag); 7935 7936 /* 7937 * We temporarily set fc_myDID to make it look like we are 7938 * a Fabric. This is done just so we end up with the right 7939 * did / sid on the FLOGI ACC rsp. 7940 */ 7941 did = vport->fc_myDID; 7942 vport->fc_myDID = Fabric_DID; 7943 7944 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm)); 7945 7946 /* Defer ACC response until AFTER we issue a FLOGI */ 7947 if (!(phba->hba_flag & HBA_FLOGI_ISSUED)) { 7948 phba->defer_flogi_acc_rx_id = cmdiocb->iocb.ulpContext; 7949 phba->defer_flogi_acc_ox_id = 7950 cmdiocb->iocb.unsli3.rcvsli3.ox_id; 7951 7952 vport->fc_myDID = did; 7953 7954 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7955 "3344 Deferring FLOGI ACC: rx_id: x%x," 7956 " ox_id: x%x, hba_flag x%x\n", 7957 phba->defer_flogi_acc_rx_id, 7958 phba->defer_flogi_acc_ox_id, phba->hba_flag); 7959 7960 phba->defer_flogi_acc_flag = true; 7961 7962 return 0; 7963 } 7964 7965 /* Send back ACC */ 7966 lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, cmdiocb, ndlp, NULL); 7967 7968 /* Now lets put fc_myDID back to what its supposed to be */ 7969 vport->fc_myDID = did; 7970 7971 return 0; 7972 } 7973 7974 /** 7975 * lpfc_els_rcv_rnid - Process an unsolicited rnid iocb 7976 * @vport: pointer to a host virtual N_Port data structure. 7977 * @cmdiocb: pointer to lpfc command iocb data structure. 7978 * @ndlp: pointer to a node-list data structure. 7979 * 7980 * This routine processes Request Node Identification Data (RNID) IOCB 7981 * received as an ELS unsolicited event. Only when the RNID specified format 7982 * 0x0 or 0xDF (Topology Discovery Specific Node Identification Data) 7983 * present, this routine will invoke the lpfc_els_rsp_rnid_acc() routine to 7984 * Accept (ACC) the RNID ELS command. All the other RNID formats are 7985 * rejected by invoking the lpfc_els_rsp_reject() routine. 7986 * 7987 * Return code 7988 * 0 - Successfully processed rnid iocb (currently always return 0) 7989 **/ 7990 static int 7991 lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7992 struct lpfc_nodelist *ndlp) 7993 { 7994 struct lpfc_dmabuf *pcmd; 7995 uint32_t *lp; 7996 RNID *rn; 7997 struct ls_rjt stat; 7998 7999 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 8000 lp = (uint32_t *) pcmd->virt; 8001 8002 lp++; 8003 rn = (RNID *) lp; 8004 8005 /* RNID received */ 8006 8007 switch (rn->Format) { 8008 case 0: 8009 case RNID_TOPOLOGY_DISC: 8010 /* Send back ACC */ 8011 lpfc_els_rsp_rnid_acc(vport, rn->Format, cmdiocb, ndlp); 8012 break; 8013 default: 8014 /* Reject this request because format not supported */ 8015 stat.un.b.lsRjtRsvd0 = 0; 8016 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 8017 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 8018 stat.un.b.vendorUnique = 0; 8019 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, 8020 NULL); 8021 } 8022 return 0; 8023 } 8024 8025 /** 8026 * lpfc_els_rcv_echo - Process an unsolicited echo iocb 8027 * @vport: pointer to a host virtual N_Port data structure. 8028 * @cmdiocb: pointer to lpfc command iocb data structure. 8029 * @ndlp: pointer to a node-list data structure. 8030 * 8031 * Return code 8032 * 0 - Successfully processed echo iocb (currently always return 0) 8033 **/ 8034 static int 8035 lpfc_els_rcv_echo(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8036 struct lpfc_nodelist *ndlp) 8037 { 8038 uint8_t *pcmd; 8039 8040 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt); 8041 8042 /* skip over first word of echo command to find echo data */ 8043 pcmd += sizeof(uint32_t); 8044 8045 lpfc_els_rsp_echo_acc(vport, pcmd, cmdiocb, ndlp); 8046 return 0; 8047 } 8048 8049 /** 8050 * lpfc_els_rcv_lirr - Process an unsolicited lirr iocb 8051 * @vport: pointer to a host virtual N_Port data structure. 8052 * @cmdiocb: pointer to lpfc command iocb data structure. 8053 * @ndlp: pointer to a node-list data structure. 8054 * 8055 * This routine processes a Link Incident Report Registration(LIRR) IOCB 8056 * received as an ELS unsolicited event. Currently, this function just invokes 8057 * the lpfc_els_rsp_reject() routine to reject the LIRR IOCB unconditionally. 8058 * 8059 * Return code 8060 * 0 - Successfully processed lirr iocb (currently always return 0) 8061 **/ 8062 static int 8063 lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8064 struct lpfc_nodelist *ndlp) 8065 { 8066 struct ls_rjt stat; 8067 8068 /* For now, unconditionally reject this command */ 8069 stat.un.b.lsRjtRsvd0 = 0; 8070 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 8071 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 8072 stat.un.b.vendorUnique = 0; 8073 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 8074 return 0; 8075 } 8076 8077 /** 8078 * lpfc_els_rcv_rrq - Process an unsolicited rrq iocb 8079 * @vport: pointer to a host virtual N_Port data structure. 8080 * @cmdiocb: pointer to lpfc command iocb data structure. 8081 * @ndlp: pointer to a node-list data structure. 8082 * 8083 * This routine processes a Reinstate Recovery Qualifier (RRQ) IOCB 8084 * received as an ELS unsolicited event. A request to RRQ shall only 8085 * be accepted if the Originator Nx_Port N_Port_ID or the Responder 8086 * Nx_Port N_Port_ID of the target Exchange is the same as the 8087 * N_Port_ID of the Nx_Port that makes the request. If the RRQ is 8088 * not accepted, an LS_RJT with reason code "Unable to perform 8089 * command request" and reason code explanation "Invalid Originator 8090 * S_ID" shall be returned. For now, we just unconditionally accept 8091 * RRQ from the target. 8092 **/ 8093 static void 8094 lpfc_els_rcv_rrq(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8095 struct lpfc_nodelist *ndlp) 8096 { 8097 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 8098 if (vport->phba->sli_rev == LPFC_SLI_REV4) 8099 lpfc_els_clear_rrq(vport, cmdiocb, ndlp); 8100 } 8101 8102 /** 8103 * lpfc_els_rsp_rls_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd 8104 * @phba: pointer to lpfc hba data structure. 8105 * @pmb: pointer to the driver internal queue element for mailbox command. 8106 * 8107 * This routine is the completion callback function for the MBX_READ_LNK_STAT 8108 * mailbox command. This callback function is to actually send the Accept 8109 * (ACC) response to a Read Link Status (RLS) unsolicited IOCB event. It 8110 * collects the link statistics from the completion of the MBX_READ_LNK_STAT 8111 * mailbox command, constructs the RLS response with the link statistics 8112 * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC 8113 * response to the RLS. 8114 * 8115 * Note that the ndlp reference count will be incremented by 1 for holding the 8116 * ndlp and the reference to ndlp will be stored into the context1 field of 8117 * the IOCB for the completion callback function to the RLS Accept Response 8118 * ELS IOCB command. 8119 * 8120 **/ 8121 static void 8122 lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 8123 { 8124 int rc = 0; 8125 MAILBOX_t *mb; 8126 IOCB_t *icmd; 8127 struct RLS_RSP *rls_rsp; 8128 uint8_t *pcmd; 8129 struct lpfc_iocbq *elsiocb; 8130 struct lpfc_nodelist *ndlp; 8131 uint16_t oxid; 8132 uint16_t rxid; 8133 uint32_t cmdsize; 8134 8135 mb = &pmb->u.mb; 8136 8137 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; 8138 rxid = (uint16_t)((unsigned long)(pmb->ctx_buf) & 0xffff); 8139 oxid = (uint16_t)(((unsigned long)(pmb->ctx_buf) >> 16) & 0xffff); 8140 pmb->ctx_buf = NULL; 8141 pmb->ctx_ndlp = NULL; 8142 8143 if (mb->mbxStatus) { 8144 mempool_free(pmb, phba->mbox_mem_pool); 8145 return; 8146 } 8147 8148 cmdsize = sizeof(struct RLS_RSP) + sizeof(uint32_t); 8149 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 8150 lpfc_max_els_tries, ndlp, 8151 ndlp->nlp_DID, ELS_CMD_ACC); 8152 8153 /* Decrement the ndlp reference count from previous mbox command */ 8154 lpfc_nlp_put(ndlp); 8155 8156 if (!elsiocb) { 8157 mempool_free(pmb, phba->mbox_mem_pool); 8158 return; 8159 } 8160 8161 icmd = &elsiocb->iocb; 8162 icmd->ulpContext = rxid; 8163 icmd->unsli3.rcvsli3.ox_id = oxid; 8164 8165 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 8166 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 8167 pcmd += sizeof(uint32_t); /* Skip past command */ 8168 rls_rsp = (struct RLS_RSP *)pcmd; 8169 8170 rls_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt); 8171 rls_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt); 8172 rls_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt); 8173 rls_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt); 8174 rls_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord); 8175 rls_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt); 8176 mempool_free(pmb, phba->mbox_mem_pool); 8177 /* Xmit ELS RLS ACC response tag <ulpIoTag> */ 8178 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS, 8179 "2874 Xmit ELS RLS ACC response tag x%x xri x%x, " 8180 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", 8181 elsiocb->iotag, elsiocb->iocb.ulpContext, 8182 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 8183 ndlp->nlp_rpi); 8184 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 8185 phba->fc_stat.elsXmitACC++; 8186 elsiocb->context1 = lpfc_nlp_get(ndlp); 8187 if (!elsiocb->context1) { 8188 lpfc_els_free_iocb(phba, elsiocb); 8189 return; 8190 } 8191 8192 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 8193 if (rc == IOCB_ERROR) { 8194 lpfc_els_free_iocb(phba, elsiocb); 8195 lpfc_nlp_put(ndlp); 8196 } 8197 return; 8198 } 8199 8200 /** 8201 * lpfc_els_rcv_rls - Process an unsolicited rls iocb 8202 * @vport: pointer to a host virtual N_Port data structure. 8203 * @cmdiocb: pointer to lpfc command iocb data structure. 8204 * @ndlp: pointer to a node-list data structure. 8205 * 8206 * This routine processes Read Link Status (RLS) IOCB received as an 8207 * ELS unsolicited event. It first checks the remote port state. If the 8208 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE 8209 * state, it invokes the lpfc_els_rsl_reject() routine to send the reject 8210 * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command 8211 * for reading the HBA link statistics. It is for the callback function, 8212 * lpfc_els_rsp_rls_acc(), set to the MBX_READ_LNK_STAT mailbox command 8213 * to actually sending out RPL Accept (ACC) response. 8214 * 8215 * Return codes 8216 * 0 - Successfully processed rls iocb (currently always return 0) 8217 **/ 8218 static int 8219 lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8220 struct lpfc_nodelist *ndlp) 8221 { 8222 struct lpfc_hba *phba = vport->phba; 8223 LPFC_MBOXQ_t *mbox; 8224 struct ls_rjt stat; 8225 8226 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 8227 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) 8228 /* reject the unsolicited RLS request and done with it */ 8229 goto reject_out; 8230 8231 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC); 8232 if (mbox) { 8233 lpfc_read_lnk_stat(phba, mbox); 8234 mbox->ctx_buf = (void *)((unsigned long) 8235 ((cmdiocb->iocb.unsli3.rcvsli3.ox_id << 16) | 8236 cmdiocb->iocb.ulpContext)); /* rx_id */ 8237 mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 8238 if (!mbox->ctx_ndlp) 8239 goto node_err; 8240 mbox->vport = vport; 8241 mbox->mbox_cmpl = lpfc_els_rsp_rls_acc; 8242 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 8243 != MBX_NOT_FINISHED) 8244 /* Mbox completion will send ELS Response */ 8245 return 0; 8246 /* Decrement reference count used for the failed mbox 8247 * command. 8248 */ 8249 lpfc_nlp_put(ndlp); 8250 node_err: 8251 mempool_free(mbox, phba->mbox_mem_pool); 8252 } 8253 reject_out: 8254 /* issue rejection response */ 8255 stat.un.b.lsRjtRsvd0 = 0; 8256 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 8257 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 8258 stat.un.b.vendorUnique = 0; 8259 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 8260 return 0; 8261 } 8262 8263 /** 8264 * lpfc_els_rcv_rtv - Process an unsolicited rtv iocb 8265 * @vport: pointer to a host virtual N_Port data structure. 8266 * @cmdiocb: pointer to lpfc command iocb data structure. 8267 * @ndlp: pointer to a node-list data structure. 8268 * 8269 * This routine processes Read Timout Value (RTV) IOCB received as an 8270 * ELS unsolicited event. It first checks the remote port state. If the 8271 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE 8272 * state, it invokes the lpfc_els_rsl_reject() routine to send the reject 8273 * response. Otherwise, it sends the Accept(ACC) response to a Read Timeout 8274 * Value (RTV) unsolicited IOCB event. 8275 * 8276 * Note that the ndlp reference count will be incremented by 1 for holding the 8277 * ndlp and the reference to ndlp will be stored into the context1 field of 8278 * the IOCB for the completion callback function to the RTV Accept Response 8279 * ELS IOCB command. 8280 * 8281 * Return codes 8282 * 0 - Successfully processed rtv iocb (currently always return 0) 8283 **/ 8284 static int 8285 lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8286 struct lpfc_nodelist *ndlp) 8287 { 8288 int rc = 0; 8289 struct lpfc_hba *phba = vport->phba; 8290 struct ls_rjt stat; 8291 struct RTV_RSP *rtv_rsp; 8292 uint8_t *pcmd; 8293 struct lpfc_iocbq *elsiocb; 8294 uint32_t cmdsize; 8295 8296 8297 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 8298 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) 8299 /* reject the unsolicited RTV request and done with it */ 8300 goto reject_out; 8301 8302 cmdsize = sizeof(struct RTV_RSP) + sizeof(uint32_t); 8303 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 8304 lpfc_max_els_tries, ndlp, 8305 ndlp->nlp_DID, ELS_CMD_ACC); 8306 8307 if (!elsiocb) 8308 return 1; 8309 8310 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 8311 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 8312 pcmd += sizeof(uint32_t); /* Skip past command */ 8313 8314 /* use the command's xri in the response */ 8315 elsiocb->iocb.ulpContext = cmdiocb->iocb.ulpContext; /* Xri / rx_id */ 8316 elsiocb->iocb.unsli3.rcvsli3.ox_id = cmdiocb->iocb.unsli3.rcvsli3.ox_id; 8317 8318 rtv_rsp = (struct RTV_RSP *)pcmd; 8319 8320 /* populate RTV payload */ 8321 rtv_rsp->ratov = cpu_to_be32(phba->fc_ratov * 1000); /* report msecs */ 8322 rtv_rsp->edtov = cpu_to_be32(phba->fc_edtov); 8323 bf_set(qtov_edtovres, rtv_rsp, phba->fc_edtovResol ? 1 : 0); 8324 bf_set(qtov_rttov, rtv_rsp, 0); /* Field is for FC ONLY */ 8325 rtv_rsp->qtov = cpu_to_be32(rtv_rsp->qtov); 8326 8327 /* Xmit ELS RLS ACC response tag <ulpIoTag> */ 8328 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS, 8329 "2875 Xmit ELS RTV ACC response tag x%x xri x%x, " 8330 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x, " 8331 "Data: x%x x%x x%x\n", 8332 elsiocb->iotag, elsiocb->iocb.ulpContext, 8333 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 8334 ndlp->nlp_rpi, 8335 rtv_rsp->ratov, rtv_rsp->edtov, rtv_rsp->qtov); 8336 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 8337 phba->fc_stat.elsXmitACC++; 8338 elsiocb->context1 = lpfc_nlp_get(ndlp); 8339 if (!elsiocb->context1) { 8340 lpfc_els_free_iocb(phba, elsiocb); 8341 return 0; 8342 } 8343 8344 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 8345 if (rc == IOCB_ERROR) { 8346 lpfc_els_free_iocb(phba, elsiocb); 8347 lpfc_nlp_put(ndlp); 8348 } 8349 return 0; 8350 8351 reject_out: 8352 /* issue rejection response */ 8353 stat.un.b.lsRjtRsvd0 = 0; 8354 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 8355 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 8356 stat.un.b.vendorUnique = 0; 8357 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 8358 return 0; 8359 } 8360 8361 /* lpfc_issue_els_rrq - Process an unsolicited rrq iocb 8362 * @vport: pointer to a host virtual N_Port data structure. 8363 * @ndlp: pointer to a node-list data structure. 8364 * @did: DID of the target. 8365 * @rrq: Pointer to the rrq struct. 8366 * 8367 * Build a ELS RRQ command and send it to the target. If the issue_iocb is 8368 * Successful the the completion handler will clear the RRQ. 8369 * 8370 * Return codes 8371 * 0 - Successfully sent rrq els iocb. 8372 * 1 - Failed to send rrq els iocb. 8373 **/ 8374 static int 8375 lpfc_issue_els_rrq(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 8376 uint32_t did, struct lpfc_node_rrq *rrq) 8377 { 8378 struct lpfc_hba *phba = vport->phba; 8379 struct RRQ *els_rrq; 8380 struct lpfc_iocbq *elsiocb; 8381 uint8_t *pcmd; 8382 uint16_t cmdsize; 8383 int ret; 8384 8385 if (!ndlp) 8386 return 1; 8387 8388 /* If ndlp is not NULL, we will bump the reference count on it */ 8389 cmdsize = (sizeof(uint32_t) + sizeof(struct RRQ)); 8390 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, did, 8391 ELS_CMD_RRQ); 8392 if (!elsiocb) 8393 return 1; 8394 8395 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 8396 8397 /* For RRQ request, remainder of payload is Exchange IDs */ 8398 *((uint32_t *) (pcmd)) = ELS_CMD_RRQ; 8399 pcmd += sizeof(uint32_t); 8400 els_rrq = (struct RRQ *) pcmd; 8401 8402 bf_set(rrq_oxid, els_rrq, phba->sli4_hba.xri_ids[rrq->xritag]); 8403 bf_set(rrq_rxid, els_rrq, rrq->rxid); 8404 bf_set(rrq_did, els_rrq, vport->fc_myDID); 8405 els_rrq->rrq = cpu_to_be32(els_rrq->rrq); 8406 els_rrq->rrq_exchg = cpu_to_be32(els_rrq->rrq_exchg); 8407 8408 8409 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 8410 "Issue RRQ: did:x%x", 8411 did, rrq->xritag, rrq->rxid); 8412 elsiocb->context_un.rrq = rrq; 8413 elsiocb->iocb_cmpl = lpfc_cmpl_els_rrq; 8414 8415 lpfc_nlp_get(ndlp); 8416 elsiocb->context1 = ndlp; 8417 8418 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 8419 if (ret == IOCB_ERROR) 8420 goto io_err; 8421 return 0; 8422 8423 io_err: 8424 lpfc_els_free_iocb(phba, elsiocb); 8425 lpfc_nlp_put(ndlp); 8426 return 1; 8427 } 8428 8429 /** 8430 * lpfc_send_rrq - Sends ELS RRQ if needed. 8431 * @phba: pointer to lpfc hba data structure. 8432 * @rrq: pointer to the active rrq. 8433 * 8434 * This routine will call the lpfc_issue_els_rrq if the rrq is 8435 * still active for the xri. If this function returns a failure then 8436 * the caller needs to clean up the RRQ by calling lpfc_clr_active_rrq. 8437 * 8438 * Returns 0 Success. 8439 * 1 Failure. 8440 **/ 8441 int 8442 lpfc_send_rrq(struct lpfc_hba *phba, struct lpfc_node_rrq *rrq) 8443 { 8444 struct lpfc_nodelist *ndlp = lpfc_findnode_did(rrq->vport, 8445 rrq->nlp_DID); 8446 if (!ndlp) 8447 return 1; 8448 8449 if (lpfc_test_rrq_active(phba, ndlp, rrq->xritag)) 8450 return lpfc_issue_els_rrq(rrq->vport, ndlp, 8451 rrq->nlp_DID, rrq); 8452 else 8453 return 1; 8454 } 8455 8456 /** 8457 * lpfc_els_rsp_rpl_acc - Issue an accept rpl els command 8458 * @vport: pointer to a host virtual N_Port data structure. 8459 * @cmdsize: size of the ELS command. 8460 * @oldiocb: pointer to the original lpfc command iocb data structure. 8461 * @ndlp: pointer to a node-list data structure. 8462 * 8463 * This routine issuees an Accept (ACC) Read Port List (RPL) ELS command. 8464 * It is to be called by the lpfc_els_rcv_rpl() routine to accept the RPL. 8465 * 8466 * Note that the ndlp reference count will be incremented by 1 for holding the 8467 * ndlp and the reference to ndlp will be stored into the context1 field of 8468 * the IOCB for the completion callback function to the RPL Accept Response 8469 * ELS command. 8470 * 8471 * Return code 8472 * 0 - Successfully issued ACC RPL ELS command 8473 * 1 - Failed to issue ACC RPL ELS command 8474 **/ 8475 static int 8476 lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize, 8477 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) 8478 { 8479 int rc = 0; 8480 struct lpfc_hba *phba = vport->phba; 8481 IOCB_t *icmd, *oldcmd; 8482 RPL_RSP rpl_rsp; 8483 struct lpfc_iocbq *elsiocb; 8484 uint8_t *pcmd; 8485 8486 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 8487 ndlp->nlp_DID, ELS_CMD_ACC); 8488 8489 if (!elsiocb) 8490 return 1; 8491 8492 icmd = &elsiocb->iocb; 8493 oldcmd = &oldiocb->iocb; 8494 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 8495 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 8496 8497 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 8498 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 8499 pcmd += sizeof(uint16_t); 8500 *((uint16_t *)(pcmd)) = be16_to_cpu(cmdsize); 8501 pcmd += sizeof(uint16_t); 8502 8503 /* Setup the RPL ACC payload */ 8504 rpl_rsp.listLen = be32_to_cpu(1); 8505 rpl_rsp.index = 0; 8506 rpl_rsp.port_num_blk.portNum = 0; 8507 rpl_rsp.port_num_blk.portID = be32_to_cpu(vport->fc_myDID); 8508 memcpy(&rpl_rsp.port_num_blk.portName, &vport->fc_portname, 8509 sizeof(struct lpfc_name)); 8510 memcpy(pcmd, &rpl_rsp, cmdsize - sizeof(uint32_t)); 8511 /* Xmit ELS RPL ACC response tag <ulpIoTag> */ 8512 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 8513 "0120 Xmit ELS RPL ACC response tag x%x " 8514 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, " 8515 "rpi x%x\n", 8516 elsiocb->iotag, elsiocb->iocb.ulpContext, 8517 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 8518 ndlp->nlp_rpi); 8519 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 8520 phba->fc_stat.elsXmitACC++; 8521 elsiocb->context1 = lpfc_nlp_get(ndlp); 8522 if (!elsiocb->context1) { 8523 lpfc_els_free_iocb(phba, elsiocb); 8524 return 1; 8525 } 8526 8527 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 8528 if (rc == IOCB_ERROR) { 8529 lpfc_els_free_iocb(phba, elsiocb); 8530 lpfc_nlp_put(ndlp); 8531 return 1; 8532 } 8533 8534 return 0; 8535 } 8536 8537 /** 8538 * lpfc_els_rcv_rpl - Process an unsolicited rpl iocb 8539 * @vport: pointer to a host virtual N_Port data structure. 8540 * @cmdiocb: pointer to lpfc command iocb data structure. 8541 * @ndlp: pointer to a node-list data structure. 8542 * 8543 * This routine processes Read Port List (RPL) IOCB received as an ELS 8544 * unsolicited event. It first checks the remote port state. If the remote 8545 * port is not in NLP_STE_UNMAPPED_NODE and NLP_STE_MAPPED_NODE states, it 8546 * invokes the lpfc_els_rsp_reject() routine to send reject response. 8547 * Otherwise, this routine then invokes the lpfc_els_rsp_rpl_acc() routine 8548 * to accept the RPL. 8549 * 8550 * Return code 8551 * 0 - Successfully processed rpl iocb (currently always return 0) 8552 **/ 8553 static int 8554 lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8555 struct lpfc_nodelist *ndlp) 8556 { 8557 struct lpfc_dmabuf *pcmd; 8558 uint32_t *lp; 8559 uint32_t maxsize; 8560 uint16_t cmdsize; 8561 RPL *rpl; 8562 struct ls_rjt stat; 8563 8564 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 8565 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) { 8566 /* issue rejection response */ 8567 stat.un.b.lsRjtRsvd0 = 0; 8568 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 8569 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 8570 stat.un.b.vendorUnique = 0; 8571 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, 8572 NULL); 8573 /* rejected the unsolicited RPL request and done with it */ 8574 return 0; 8575 } 8576 8577 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 8578 lp = (uint32_t *) pcmd->virt; 8579 rpl = (RPL *) (lp + 1); 8580 maxsize = be32_to_cpu(rpl->maxsize); 8581 8582 /* We support only one port */ 8583 if ((rpl->index == 0) && 8584 ((maxsize == 0) || 8585 ((maxsize * sizeof(uint32_t)) >= sizeof(RPL_RSP)))) { 8586 cmdsize = sizeof(uint32_t) + sizeof(RPL_RSP); 8587 } else { 8588 cmdsize = sizeof(uint32_t) + maxsize * sizeof(uint32_t); 8589 } 8590 lpfc_els_rsp_rpl_acc(vport, cmdsize, cmdiocb, ndlp); 8591 8592 return 0; 8593 } 8594 8595 /** 8596 * lpfc_els_rcv_farp - Process an unsolicited farp request els command 8597 * @vport: pointer to a virtual N_Port data structure. 8598 * @cmdiocb: pointer to lpfc command iocb data structure. 8599 * @ndlp: pointer to a node-list data structure. 8600 * 8601 * This routine processes Fibre Channel Address Resolution Protocol 8602 * (FARP) Request IOCB received as an ELS unsolicited event. Currently, 8603 * the lpfc driver only supports matching on WWPN or WWNN for FARP. As such, 8604 * FARP_MATCH_PORT flag and FARP_MATCH_NODE flag are checked against the 8605 * Match Flag in the FARP request IOCB: if FARP_MATCH_PORT flag is set, the 8606 * remote PortName is compared against the FC PortName stored in the @vport 8607 * data structure; if FARP_MATCH_NODE flag is set, the remote NodeName is 8608 * compared against the FC NodeName stored in the @vport data structure. 8609 * If any of these matches and the FARP_REQUEST_FARPR flag is set in the 8610 * FARP request IOCB Response Flag, the lpfc_issue_els_farpr() routine is 8611 * invoked to send out FARP Response to the remote node. Before sending the 8612 * FARP Response, however, the FARP_REQUEST_PLOGI flag is check in the FARP 8613 * request IOCB Response Flag and, if it is set, the lpfc_issue_els_plogi() 8614 * routine is invoked to log into the remote port first. 8615 * 8616 * Return code 8617 * 0 - Either the FARP Match Mode not supported or successfully processed 8618 **/ 8619 static int 8620 lpfc_els_rcv_farp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8621 struct lpfc_nodelist *ndlp) 8622 { 8623 struct lpfc_dmabuf *pcmd; 8624 uint32_t *lp; 8625 IOCB_t *icmd; 8626 FARP *fp; 8627 uint32_t cnt, did; 8628 8629 icmd = &cmdiocb->iocb; 8630 did = icmd->un.elsreq64.remoteID; 8631 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 8632 lp = (uint32_t *) pcmd->virt; 8633 8634 lp++; 8635 fp = (FARP *) lp; 8636 /* FARP-REQ received from DID <did> */ 8637 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 8638 "0601 FARP-REQ received from DID x%x\n", did); 8639 /* We will only support match on WWPN or WWNN */ 8640 if (fp->Mflags & ~(FARP_MATCH_NODE | FARP_MATCH_PORT)) { 8641 return 0; 8642 } 8643 8644 cnt = 0; 8645 /* If this FARP command is searching for my portname */ 8646 if (fp->Mflags & FARP_MATCH_PORT) { 8647 if (memcmp(&fp->RportName, &vport->fc_portname, 8648 sizeof(struct lpfc_name)) == 0) 8649 cnt = 1; 8650 } 8651 8652 /* If this FARP command is searching for my nodename */ 8653 if (fp->Mflags & FARP_MATCH_NODE) { 8654 if (memcmp(&fp->RnodeName, &vport->fc_nodename, 8655 sizeof(struct lpfc_name)) == 0) 8656 cnt = 1; 8657 } 8658 8659 if (cnt) { 8660 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) || 8661 (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) { 8662 /* Log back into the node before sending the FARP. */ 8663 if (fp->Rflags & FARP_REQUEST_PLOGI) { 8664 ndlp->nlp_prev_state = ndlp->nlp_state; 8665 lpfc_nlp_set_state(vport, ndlp, 8666 NLP_STE_PLOGI_ISSUE); 8667 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 8668 } 8669 8670 /* Send a FARP response to that node */ 8671 if (fp->Rflags & FARP_REQUEST_FARPR) 8672 lpfc_issue_els_farpr(vport, did, 0); 8673 } 8674 } 8675 return 0; 8676 } 8677 8678 /** 8679 * lpfc_els_rcv_farpr - Process an unsolicited farp response iocb 8680 * @vport: pointer to a host virtual N_Port data structure. 8681 * @cmdiocb: pointer to lpfc command iocb data structure. 8682 * @ndlp: pointer to a node-list data structure. 8683 * 8684 * This routine processes Fibre Channel Address Resolution Protocol 8685 * Response (FARPR) IOCB received as an ELS unsolicited event. It simply 8686 * invokes the lpfc_els_rsp_acc() routine to the remote node to accept 8687 * the FARP response request. 8688 * 8689 * Return code 8690 * 0 - Successfully processed FARPR IOCB (currently always return 0) 8691 **/ 8692 static int 8693 lpfc_els_rcv_farpr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8694 struct lpfc_nodelist *ndlp) 8695 { 8696 struct lpfc_dmabuf *pcmd; 8697 uint32_t *lp; 8698 IOCB_t *icmd; 8699 uint32_t did; 8700 8701 icmd = &cmdiocb->iocb; 8702 did = icmd->un.elsreq64.remoteID; 8703 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 8704 lp = (uint32_t *) pcmd->virt; 8705 8706 lp++; 8707 /* FARP-RSP received from DID <did> */ 8708 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 8709 "0600 FARP-RSP received from DID x%x\n", did); 8710 /* ACCEPT the Farp resp request */ 8711 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 8712 8713 return 0; 8714 } 8715 8716 /** 8717 * lpfc_els_rcv_fan - Process an unsolicited fan iocb command 8718 * @vport: pointer to a host virtual N_Port data structure. 8719 * @cmdiocb: pointer to lpfc command iocb data structure. 8720 * @fan_ndlp: pointer to a node-list data structure. 8721 * 8722 * This routine processes a Fabric Address Notification (FAN) IOCB 8723 * command received as an ELS unsolicited event. The FAN ELS command will 8724 * only be processed on a physical port (i.e., the @vport represents the 8725 * physical port). The fabric NodeName and PortName from the FAN IOCB are 8726 * compared against those in the phba data structure. If any of those is 8727 * different, the lpfc_initial_flogi() routine is invoked to initialize 8728 * Fabric Login (FLOGI) to the fabric to start the discover over. Otherwise, 8729 * if both of those are identical, the lpfc_issue_fabric_reglogin() routine 8730 * is invoked to register login to the fabric. 8731 * 8732 * Return code 8733 * 0 - Successfully processed fan iocb (currently always return 0). 8734 **/ 8735 static int 8736 lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8737 struct lpfc_nodelist *fan_ndlp) 8738 { 8739 struct lpfc_hba *phba = vport->phba; 8740 uint32_t *lp; 8741 FAN *fp; 8742 8743 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0265 FAN received\n"); 8744 lp = (uint32_t *)((struct lpfc_dmabuf *)cmdiocb->context2)->virt; 8745 fp = (FAN *) ++lp; 8746 /* FAN received; Fan does not have a reply sequence */ 8747 if ((vport == phba->pport) && 8748 (vport->port_state == LPFC_LOCAL_CFG_LINK)) { 8749 if ((memcmp(&phba->fc_fabparam.nodeName, &fp->FnodeName, 8750 sizeof(struct lpfc_name))) || 8751 (memcmp(&phba->fc_fabparam.portName, &fp->FportName, 8752 sizeof(struct lpfc_name)))) { 8753 /* This port has switched fabrics. FLOGI is required */ 8754 lpfc_issue_init_vfi(vport); 8755 } else { 8756 /* FAN verified - skip FLOGI */ 8757 vport->fc_myDID = vport->fc_prevDID; 8758 if (phba->sli_rev < LPFC_SLI_REV4) 8759 lpfc_issue_fabric_reglogin(vport); 8760 else { 8761 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 8762 "3138 Need register VFI: (x%x/%x)\n", 8763 vport->fc_prevDID, vport->fc_myDID); 8764 lpfc_issue_reg_vfi(vport); 8765 } 8766 } 8767 } 8768 return 0; 8769 } 8770 8771 /** 8772 * lpfc_els_rcv_edc - Process an unsolicited EDC iocb 8773 * @vport: pointer to a host virtual N_Port data structure. 8774 * @cmdiocb: pointer to lpfc command iocb data structure. 8775 * @ndlp: pointer to a node-list data structure. 8776 * 8777 * Return code 8778 * 0 - Successfully processed echo iocb (currently always return 0) 8779 **/ 8780 static int 8781 lpfc_els_rcv_edc(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8782 struct lpfc_nodelist *ndlp) 8783 { 8784 struct lpfc_hba *phba = vport->phba; 8785 struct fc_els_edc *edc_req; 8786 struct fc_tlv_desc *tlv; 8787 uint8_t *payload; 8788 uint32_t *ptr, dtag; 8789 const char *dtag_nm; 8790 int desc_cnt = 0, bytes_remain; 8791 bool rcv_cap_desc = false; 8792 8793 payload = ((struct lpfc_dmabuf *)cmdiocb->context2)->virt; 8794 8795 edc_req = (struct fc_els_edc *)payload; 8796 bytes_remain = be32_to_cpu(edc_req->desc_len); 8797 8798 ptr = (uint32_t *)payload; 8799 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_CGN_MGMT, 8800 "3319 Rcv EDC payload len %d: x%x x%x x%x\n", 8801 bytes_remain, be32_to_cpu(*ptr), 8802 be32_to_cpu(*(ptr + 1)), be32_to_cpu(*(ptr + 2))); 8803 8804 /* No signal support unless there is a congestion descriptor */ 8805 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED; 8806 phba->cgn_sig_freq = 0; 8807 phba->cgn_reg_fpin = LPFC_CGN_FPIN_ALARM | LPFC_CGN_FPIN_WARN; 8808 8809 if (bytes_remain <= 0) 8810 goto out; 8811 8812 tlv = edc_req->desc; 8813 8814 /* 8815 * cycle through EDC diagnostic descriptors to find the 8816 * congestion signaling capability descriptor 8817 */ 8818 while (bytes_remain && !rcv_cap_desc) { 8819 if (bytes_remain < FC_TLV_DESC_HDR_SZ) { 8820 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 8821 "6464 Truncated TLV hdr on " 8822 "Diagnostic descriptor[%d]\n", 8823 desc_cnt); 8824 goto out; 8825 } 8826 8827 dtag = be32_to_cpu(tlv->desc_tag); 8828 switch (dtag) { 8829 case ELS_DTAG_LNK_FAULT_CAP: 8830 if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) || 8831 FC_TLV_DESC_SZ_FROM_LENGTH(tlv) != 8832 sizeof(struct fc_diag_lnkflt_desc)) { 8833 lpfc_printf_log( 8834 phba, KERN_WARNING, LOG_CGN_MGMT, 8835 "6465 Truncated Link Fault Diagnostic " 8836 "descriptor[%d]: %d vs 0x%zx 0x%zx\n", 8837 desc_cnt, bytes_remain, 8838 FC_TLV_DESC_SZ_FROM_LENGTH(tlv), 8839 sizeof(struct fc_diag_cg_sig_desc)); 8840 goto out; 8841 } 8842 /* No action for Link Fault descriptor for now */ 8843 break; 8844 case ELS_DTAG_CG_SIGNAL_CAP: 8845 if (bytes_remain < FC_TLV_DESC_SZ_FROM_LENGTH(tlv) || 8846 FC_TLV_DESC_SZ_FROM_LENGTH(tlv) != 8847 sizeof(struct fc_diag_cg_sig_desc)) { 8848 lpfc_printf_log( 8849 phba, KERN_WARNING, LOG_CGN_MGMT, 8850 "6466 Truncated cgn signal Diagnostic " 8851 "descriptor[%d]: %d vs 0x%zx 0x%zx\n", 8852 desc_cnt, bytes_remain, 8853 FC_TLV_DESC_SZ_FROM_LENGTH(tlv), 8854 sizeof(struct fc_diag_cg_sig_desc)); 8855 goto out; 8856 } 8857 8858 phba->cgn_reg_fpin = phba->cgn_init_reg_fpin; 8859 phba->cgn_reg_signal = phba->cgn_init_reg_signal; 8860 8861 /* We start negotiation with lpfc_fabric_cgn_frequency. 8862 * When we process the EDC, we will settle on the 8863 * higher frequency. 8864 */ 8865 phba->cgn_sig_freq = lpfc_fabric_cgn_frequency; 8866 8867 lpfc_least_capable_settings( 8868 phba, (struct fc_diag_cg_sig_desc *)tlv); 8869 rcv_cap_desc = true; 8870 break; 8871 default: 8872 dtag_nm = lpfc_get_tlv_dtag_nm(dtag); 8873 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 8874 "6467 unknown Diagnostic " 8875 "Descriptor[%d]: tag x%x (%s)\n", 8876 desc_cnt, dtag, dtag_nm); 8877 } 8878 bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv); 8879 tlv = fc_tlv_next_desc(tlv); 8880 desc_cnt++; 8881 } 8882 out: 8883 /* Need to send back an ACC */ 8884 lpfc_issue_els_edc_rsp(vport, cmdiocb, ndlp); 8885 8886 lpfc_config_cgn_signal(phba); 8887 return 0; 8888 } 8889 8890 /** 8891 * lpfc_els_timeout - Handler funciton to the els timer 8892 * @t: timer context used to obtain the vport. 8893 * 8894 * This routine is invoked by the ELS timer after timeout. It posts the ELS 8895 * timer timeout event by setting the WORKER_ELS_TMO bit to the work port 8896 * event bitmap and then invokes the lpfc_worker_wake_up() routine to wake 8897 * up the worker thread. It is for the worker thread to invoke the routine 8898 * lpfc_els_timeout_handler() to work on the posted event WORKER_ELS_TMO. 8899 **/ 8900 void 8901 lpfc_els_timeout(struct timer_list *t) 8902 { 8903 struct lpfc_vport *vport = from_timer(vport, t, els_tmofunc); 8904 struct lpfc_hba *phba = vport->phba; 8905 uint32_t tmo_posted; 8906 unsigned long iflag; 8907 8908 spin_lock_irqsave(&vport->work_port_lock, iflag); 8909 tmo_posted = vport->work_port_events & WORKER_ELS_TMO; 8910 if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING))) 8911 vport->work_port_events |= WORKER_ELS_TMO; 8912 spin_unlock_irqrestore(&vport->work_port_lock, iflag); 8913 8914 if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING))) 8915 lpfc_worker_wake_up(phba); 8916 return; 8917 } 8918 8919 8920 /** 8921 * lpfc_els_timeout_handler - Process an els timeout event 8922 * @vport: pointer to a virtual N_Port data structure. 8923 * 8924 * This routine is the actual handler function that processes an ELS timeout 8925 * event. It walks the ELS ring to get and abort all the IOCBs (except the 8926 * ABORT/CLOSE/FARP/FARPR/FDISC), which are associated with the @vport by 8927 * invoking the lpfc_sli_issue_abort_iotag() routine. 8928 **/ 8929 void 8930 lpfc_els_timeout_handler(struct lpfc_vport *vport) 8931 { 8932 struct lpfc_hba *phba = vport->phba; 8933 struct lpfc_sli_ring *pring; 8934 struct lpfc_iocbq *tmp_iocb, *piocb; 8935 IOCB_t *cmd = NULL; 8936 struct lpfc_dmabuf *pcmd; 8937 uint32_t els_command = 0; 8938 uint32_t timeout; 8939 uint32_t remote_ID = 0xffffffff; 8940 LIST_HEAD(abort_list); 8941 8942 8943 timeout = (uint32_t)(phba->fc_ratov << 1); 8944 8945 pring = lpfc_phba_elsring(phba); 8946 if (unlikely(!pring)) 8947 return; 8948 8949 if (phba->pport->load_flag & FC_UNLOADING) 8950 return; 8951 8952 spin_lock_irq(&phba->hbalock); 8953 if (phba->sli_rev == LPFC_SLI_REV4) 8954 spin_lock(&pring->ring_lock); 8955 8956 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { 8957 cmd = &piocb->iocb; 8958 8959 if ((piocb->iocb_flag & LPFC_IO_LIBDFC) != 0 || 8960 piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN || 8961 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN) 8962 continue; 8963 8964 if (piocb->vport != vport) 8965 continue; 8966 8967 pcmd = (struct lpfc_dmabuf *) piocb->context2; 8968 if (pcmd) 8969 els_command = *(uint32_t *) (pcmd->virt); 8970 8971 if (els_command == ELS_CMD_FARP || 8972 els_command == ELS_CMD_FARPR || 8973 els_command == ELS_CMD_FDISC) 8974 continue; 8975 8976 if (piocb->drvrTimeout > 0) { 8977 if (piocb->drvrTimeout >= timeout) 8978 piocb->drvrTimeout -= timeout; 8979 else 8980 piocb->drvrTimeout = 0; 8981 continue; 8982 } 8983 8984 remote_ID = 0xffffffff; 8985 if (cmd->ulpCommand != CMD_GEN_REQUEST64_CR) 8986 remote_ID = cmd->un.elsreq64.remoteID; 8987 else { 8988 struct lpfc_nodelist *ndlp; 8989 ndlp = __lpfc_findnode_rpi(vport, cmd->ulpContext); 8990 if (ndlp) 8991 remote_ID = ndlp->nlp_DID; 8992 } 8993 list_add_tail(&piocb->dlist, &abort_list); 8994 } 8995 if (phba->sli_rev == LPFC_SLI_REV4) 8996 spin_unlock(&pring->ring_lock); 8997 spin_unlock_irq(&phba->hbalock); 8998 8999 list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) { 9000 cmd = &piocb->iocb; 9001 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9002 "0127 ELS timeout Data: x%x x%x x%x " 9003 "x%x\n", els_command, 9004 remote_ID, cmd->ulpCommand, cmd->ulpIoTag); 9005 spin_lock_irq(&phba->hbalock); 9006 list_del_init(&piocb->dlist); 9007 lpfc_sli_issue_abort_iotag(phba, pring, piocb, NULL); 9008 spin_unlock_irq(&phba->hbalock); 9009 } 9010 9011 /* Make sure HBA is alive */ 9012 lpfc_issue_hb_tmo(phba); 9013 9014 if (!list_empty(&pring->txcmplq)) 9015 if (!(phba->pport->load_flag & FC_UNLOADING)) 9016 mod_timer(&vport->els_tmofunc, 9017 jiffies + msecs_to_jiffies(1000 * timeout)); 9018 } 9019 9020 /** 9021 * lpfc_els_flush_cmd - Clean up the outstanding els commands to a vport 9022 * @vport: pointer to a host virtual N_Port data structure. 9023 * 9024 * This routine is used to clean up all the outstanding ELS commands on a 9025 * @vport. It first aborts the @vport by invoking lpfc_fabric_abort_vport() 9026 * routine. After that, it walks the ELS transmit queue to remove all the 9027 * IOCBs with the @vport other than the QUE_RING and ABORT/CLOSE IOCBs. For 9028 * the IOCBs with a non-NULL completion callback function, the callback 9029 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and 9030 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs with a NULL completion 9031 * callback function, the IOCB will simply be released. Finally, it walks 9032 * the ELS transmit completion queue to issue an abort IOCB to any transmit 9033 * completion queue IOCB that is associated with the @vport and is not 9034 * an IOCB from libdfc (i.e., the management plane IOCBs that are not 9035 * part of the discovery state machine) out to HBA by invoking the 9036 * lpfc_sli_issue_abort_iotag() routine. Note that this function issues the 9037 * abort IOCB to any transmit completion queueed IOCB, it does not guarantee 9038 * the IOCBs are aborted when this function returns. 9039 **/ 9040 void 9041 lpfc_els_flush_cmd(struct lpfc_vport *vport) 9042 { 9043 LIST_HEAD(abort_list); 9044 struct lpfc_hba *phba = vport->phba; 9045 struct lpfc_sli_ring *pring; 9046 struct lpfc_iocbq *tmp_iocb, *piocb; 9047 IOCB_t *cmd = NULL; 9048 unsigned long iflags = 0; 9049 9050 lpfc_fabric_abort_vport(vport); 9051 9052 /* 9053 * For SLI3, only the hbalock is required. But SLI4 needs to coordinate 9054 * with the ring insert operation. Because lpfc_sli_issue_abort_iotag 9055 * ultimately grabs the ring_lock, the driver must splice the list into 9056 * a working list and release the locks before calling the abort. 9057 */ 9058 spin_lock_irqsave(&phba->hbalock, iflags); 9059 pring = lpfc_phba_elsring(phba); 9060 9061 /* Bail out if we've no ELS wq, like in PCI error recovery case. */ 9062 if (unlikely(!pring)) { 9063 spin_unlock_irqrestore(&phba->hbalock, iflags); 9064 return; 9065 } 9066 9067 if (phba->sli_rev == LPFC_SLI_REV4) 9068 spin_lock(&pring->ring_lock); 9069 9070 /* First we need to issue aborts to outstanding cmds on txcmpl */ 9071 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { 9072 if (piocb->iocb_flag & LPFC_IO_LIBDFC) 9073 continue; 9074 9075 if (piocb->vport != vport) 9076 continue; 9077 9078 if (piocb->iocb_flag & LPFC_DRIVER_ABORTED) 9079 continue; 9080 9081 /* On the ELS ring we can have ELS_REQUESTs or 9082 * GEN_REQUESTs waiting for a response. 9083 */ 9084 cmd = &piocb->iocb; 9085 if (cmd->ulpCommand == CMD_ELS_REQUEST64_CR) { 9086 list_add_tail(&piocb->dlist, &abort_list); 9087 9088 /* If the link is down when flushing ELS commands 9089 * the firmware will not complete them till after 9090 * the link comes back up. This may confuse 9091 * discovery for the new link up, so we need to 9092 * change the compl routine to just clean up the iocb 9093 * and avoid any retry logic. 9094 */ 9095 if (phba->link_state == LPFC_LINK_DOWN) 9096 piocb->iocb_cmpl = lpfc_cmpl_els_link_down; 9097 } 9098 if (cmd->ulpCommand == CMD_GEN_REQUEST64_CR) 9099 list_add_tail(&piocb->dlist, &abort_list); 9100 } 9101 9102 if (phba->sli_rev == LPFC_SLI_REV4) 9103 spin_unlock(&pring->ring_lock); 9104 spin_unlock_irqrestore(&phba->hbalock, iflags); 9105 9106 /* Abort each txcmpl iocb on aborted list and remove the dlist links. */ 9107 list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) { 9108 spin_lock_irqsave(&phba->hbalock, iflags); 9109 list_del_init(&piocb->dlist); 9110 lpfc_sli_issue_abort_iotag(phba, pring, piocb, NULL); 9111 spin_unlock_irqrestore(&phba->hbalock, iflags); 9112 } 9113 /* Make sure HBA is alive */ 9114 lpfc_issue_hb_tmo(phba); 9115 9116 if (!list_empty(&abort_list)) 9117 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9118 "3387 abort list for txq not empty\n"); 9119 INIT_LIST_HEAD(&abort_list); 9120 9121 spin_lock_irqsave(&phba->hbalock, iflags); 9122 if (phba->sli_rev == LPFC_SLI_REV4) 9123 spin_lock(&pring->ring_lock); 9124 9125 /* No need to abort the txq list, 9126 * just queue them up for lpfc_sli_cancel_iocbs 9127 */ 9128 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) { 9129 cmd = &piocb->iocb; 9130 9131 if (piocb->iocb_flag & LPFC_IO_LIBDFC) { 9132 continue; 9133 } 9134 9135 /* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */ 9136 if (cmd->ulpCommand == CMD_QUE_RING_BUF_CN || 9137 cmd->ulpCommand == CMD_QUE_RING_BUF64_CN || 9138 cmd->ulpCommand == CMD_CLOSE_XRI_CN || 9139 cmd->ulpCommand == CMD_ABORT_XRI_CN) 9140 continue; 9141 9142 if (piocb->vport != vport) 9143 continue; 9144 9145 list_del_init(&piocb->list); 9146 list_add_tail(&piocb->list, &abort_list); 9147 } 9148 9149 /* The same holds true for any FLOGI/FDISC on the fabric_iocb_list */ 9150 if (vport == phba->pport) { 9151 list_for_each_entry_safe(piocb, tmp_iocb, 9152 &phba->fabric_iocb_list, list) { 9153 cmd = &piocb->iocb; 9154 list_del_init(&piocb->list); 9155 list_add_tail(&piocb->list, &abort_list); 9156 } 9157 } 9158 9159 if (phba->sli_rev == LPFC_SLI_REV4) 9160 spin_unlock(&pring->ring_lock); 9161 spin_unlock_irqrestore(&phba->hbalock, iflags); 9162 9163 /* Cancel all the IOCBs from the completions list */ 9164 lpfc_sli_cancel_iocbs(phba, &abort_list, 9165 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); 9166 9167 return; 9168 } 9169 9170 /** 9171 * lpfc_els_flush_all_cmd - Clean up all the outstanding els commands to a HBA 9172 * @phba: pointer to lpfc hba data structure. 9173 * 9174 * This routine is used to clean up all the outstanding ELS commands on a 9175 * @phba. It first aborts the @phba by invoking the lpfc_fabric_abort_hba() 9176 * routine. After that, it walks the ELS transmit queue to remove all the 9177 * IOCBs to the @phba other than the QUE_RING and ABORT/CLOSE IOCBs. For 9178 * the IOCBs with the completion callback function associated, the callback 9179 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and 9180 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs without the completion 9181 * callback function associated, the IOCB will simply be released. Finally, 9182 * it walks the ELS transmit completion queue to issue an abort IOCB to any 9183 * transmit completion queue IOCB that is not an IOCB from libdfc (i.e., the 9184 * management plane IOCBs that are not part of the discovery state machine) 9185 * out to HBA by invoking the lpfc_sli_issue_abort_iotag() routine. 9186 **/ 9187 void 9188 lpfc_els_flush_all_cmd(struct lpfc_hba *phba) 9189 { 9190 struct lpfc_vport *vport; 9191 9192 spin_lock_irq(&phba->port_list_lock); 9193 list_for_each_entry(vport, &phba->port_list, listentry) 9194 lpfc_els_flush_cmd(vport); 9195 spin_unlock_irq(&phba->port_list_lock); 9196 9197 return; 9198 } 9199 9200 /** 9201 * lpfc_send_els_failure_event - Posts an ELS command failure event 9202 * @phba: Pointer to hba context object. 9203 * @cmdiocbp: Pointer to command iocb which reported error. 9204 * @rspiocbp: Pointer to response iocb which reported error. 9205 * 9206 * This function sends an event when there is an ELS command 9207 * failure. 9208 **/ 9209 void 9210 lpfc_send_els_failure_event(struct lpfc_hba *phba, 9211 struct lpfc_iocbq *cmdiocbp, 9212 struct lpfc_iocbq *rspiocbp) 9213 { 9214 struct lpfc_vport *vport = cmdiocbp->vport; 9215 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 9216 struct lpfc_lsrjt_event lsrjt_event; 9217 struct lpfc_fabric_event_header fabric_event; 9218 struct ls_rjt stat; 9219 struct lpfc_nodelist *ndlp; 9220 uint32_t *pcmd; 9221 9222 ndlp = cmdiocbp->context1; 9223 if (!ndlp) 9224 return; 9225 9226 if (rspiocbp->iocb.ulpStatus == IOSTAT_LS_RJT) { 9227 lsrjt_event.header.event_type = FC_REG_ELS_EVENT; 9228 lsrjt_event.header.subcategory = LPFC_EVENT_LSRJT_RCV; 9229 memcpy(lsrjt_event.header.wwpn, &ndlp->nlp_portname, 9230 sizeof(struct lpfc_name)); 9231 memcpy(lsrjt_event.header.wwnn, &ndlp->nlp_nodename, 9232 sizeof(struct lpfc_name)); 9233 pcmd = (uint32_t *) (((struct lpfc_dmabuf *) 9234 cmdiocbp->context2)->virt); 9235 lsrjt_event.command = (pcmd != NULL) ? *pcmd : 0; 9236 stat.un.lsRjtError = be32_to_cpu(rspiocbp->iocb.un.ulpWord[4]); 9237 lsrjt_event.reason_code = stat.un.b.lsRjtRsnCode; 9238 lsrjt_event.explanation = stat.un.b.lsRjtRsnCodeExp; 9239 fc_host_post_vendor_event(shost, 9240 fc_get_event_number(), 9241 sizeof(lsrjt_event), 9242 (char *)&lsrjt_event, 9243 LPFC_NL_VENDOR_ID); 9244 return; 9245 } 9246 if ((rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY) || 9247 (rspiocbp->iocb.ulpStatus == IOSTAT_FABRIC_BSY)) { 9248 fabric_event.event_type = FC_REG_FABRIC_EVENT; 9249 if (rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY) 9250 fabric_event.subcategory = LPFC_EVENT_PORT_BUSY; 9251 else 9252 fabric_event.subcategory = LPFC_EVENT_FABRIC_BUSY; 9253 memcpy(fabric_event.wwpn, &ndlp->nlp_portname, 9254 sizeof(struct lpfc_name)); 9255 memcpy(fabric_event.wwnn, &ndlp->nlp_nodename, 9256 sizeof(struct lpfc_name)); 9257 fc_host_post_vendor_event(shost, 9258 fc_get_event_number(), 9259 sizeof(fabric_event), 9260 (char *)&fabric_event, 9261 LPFC_NL_VENDOR_ID); 9262 return; 9263 } 9264 9265 } 9266 9267 /** 9268 * lpfc_send_els_event - Posts unsolicited els event 9269 * @vport: Pointer to vport object. 9270 * @ndlp: Pointer FC node object. 9271 * @payload: ELS command code type. 9272 * 9273 * This function posts an event when there is an incoming 9274 * unsolicited ELS command. 9275 **/ 9276 static void 9277 lpfc_send_els_event(struct lpfc_vport *vport, 9278 struct lpfc_nodelist *ndlp, 9279 uint32_t *payload) 9280 { 9281 struct lpfc_els_event_header *els_data = NULL; 9282 struct lpfc_logo_event *logo_data = NULL; 9283 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 9284 9285 if (*payload == ELS_CMD_LOGO) { 9286 logo_data = kmalloc(sizeof(struct lpfc_logo_event), GFP_KERNEL); 9287 if (!logo_data) { 9288 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9289 "0148 Failed to allocate memory " 9290 "for LOGO event\n"); 9291 return; 9292 } 9293 els_data = &logo_data->header; 9294 } else { 9295 els_data = kmalloc(sizeof(struct lpfc_els_event_header), 9296 GFP_KERNEL); 9297 if (!els_data) { 9298 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9299 "0149 Failed to allocate memory " 9300 "for ELS event\n"); 9301 return; 9302 } 9303 } 9304 els_data->event_type = FC_REG_ELS_EVENT; 9305 switch (*payload) { 9306 case ELS_CMD_PLOGI: 9307 els_data->subcategory = LPFC_EVENT_PLOGI_RCV; 9308 break; 9309 case ELS_CMD_PRLO: 9310 els_data->subcategory = LPFC_EVENT_PRLO_RCV; 9311 break; 9312 case ELS_CMD_ADISC: 9313 els_data->subcategory = LPFC_EVENT_ADISC_RCV; 9314 break; 9315 case ELS_CMD_LOGO: 9316 els_data->subcategory = LPFC_EVENT_LOGO_RCV; 9317 /* Copy the WWPN in the LOGO payload */ 9318 memcpy(logo_data->logo_wwpn, &payload[2], 9319 sizeof(struct lpfc_name)); 9320 break; 9321 default: 9322 kfree(els_data); 9323 return; 9324 } 9325 memcpy(els_data->wwpn, &ndlp->nlp_portname, sizeof(struct lpfc_name)); 9326 memcpy(els_data->wwnn, &ndlp->nlp_nodename, sizeof(struct lpfc_name)); 9327 if (*payload == ELS_CMD_LOGO) { 9328 fc_host_post_vendor_event(shost, 9329 fc_get_event_number(), 9330 sizeof(struct lpfc_logo_event), 9331 (char *)logo_data, 9332 LPFC_NL_VENDOR_ID); 9333 kfree(logo_data); 9334 } else { 9335 fc_host_post_vendor_event(shost, 9336 fc_get_event_number(), 9337 sizeof(struct lpfc_els_event_header), 9338 (char *)els_data, 9339 LPFC_NL_VENDOR_ID); 9340 kfree(els_data); 9341 } 9342 9343 return; 9344 } 9345 9346 9347 DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_li_event_nm, fc_fpin_li_event_types, 9348 FC_FPIN_LI_EVT_TYPES_INIT); 9349 9350 DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_deli_event_nm, fc_fpin_deli_event_types, 9351 FC_FPIN_DELI_EVT_TYPES_INIT); 9352 9353 DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_congn_event_nm, fc_fpin_congn_event_types, 9354 FC_FPIN_CONGN_EVT_TYPES_INIT); 9355 9356 DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_congn_severity_nm, 9357 fc_fpin_congn_severity_types, 9358 FC_FPIN_CONGN_SEVERITY_INIT); 9359 9360 9361 /** 9362 * lpfc_display_fpin_wwpn - Display WWPNs accessible by the attached port 9363 * @phba: Pointer to phba object. 9364 * @wwnlist: Pointer to list of WWPNs in FPIN payload 9365 * @cnt: count of WWPNs in FPIN payload 9366 * 9367 * This routine is called by LI and PC descriptors. 9368 * Limit the number of WWPNs displayed to 6 log messages, 6 per log message 9369 */ 9370 static void 9371 lpfc_display_fpin_wwpn(struct lpfc_hba *phba, __be64 *wwnlist, u32 cnt) 9372 { 9373 char buf[LPFC_FPIN_WWPN_LINE_SZ]; 9374 __be64 wwn; 9375 u64 wwpn; 9376 int i, len; 9377 int line = 0; 9378 int wcnt = 0; 9379 bool endit = false; 9380 9381 len = scnprintf(buf, LPFC_FPIN_WWPN_LINE_SZ, "Accessible WWPNs:"); 9382 for (i = 0; i < cnt; i++) { 9383 /* Are we on the last WWPN */ 9384 if (i == (cnt - 1)) 9385 endit = true; 9386 9387 /* Extract the next WWPN from the payload */ 9388 wwn = *wwnlist++; 9389 wwpn = be64_to_cpu(wwn); 9390 len += scnprintf(buf + len, LPFC_FPIN_WWPN_LINE_SZ, 9391 " %016llx", wwpn); 9392 9393 /* Log a message if we are on the last WWPN 9394 * or if we hit the max allowed per message. 9395 */ 9396 wcnt++; 9397 if (wcnt == LPFC_FPIN_WWPN_LINE_CNT || endit) { 9398 buf[len] = 0; 9399 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 9400 "4686 %s\n", buf); 9401 9402 /* Check if we reached the last WWPN */ 9403 if (endit) 9404 return; 9405 9406 /* Limit the number of log message displayed per FPIN */ 9407 line++; 9408 if (line == LPFC_FPIN_WWPN_NUM_LINE) { 9409 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 9410 "4687 %d WWPNs Truncated\n", 9411 cnt - i - 1); 9412 return; 9413 } 9414 9415 /* Start over with next log message */ 9416 wcnt = 0; 9417 len = scnprintf(buf, LPFC_FPIN_WWPN_LINE_SZ, 9418 "Additional WWPNs:"); 9419 } 9420 } 9421 } 9422 9423 /** 9424 * lpfc_els_rcv_fpin_li - Process an FPIN Link Integrity Event. 9425 * @phba: Pointer to phba object. 9426 * @tlv: Pointer to the Link Integrity Notification Descriptor. 9427 * 9428 * This function processes a Link Integrity FPIN event by logging a message. 9429 **/ 9430 static void 9431 lpfc_els_rcv_fpin_li(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) 9432 { 9433 struct fc_fn_li_desc *li = (struct fc_fn_li_desc *)tlv; 9434 const char *li_evt_str; 9435 u32 li_evt, cnt; 9436 9437 li_evt = be16_to_cpu(li->event_type); 9438 li_evt_str = lpfc_get_fpin_li_event_nm(li_evt); 9439 cnt = be32_to_cpu(li->pname_count); 9440 9441 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 9442 "4680 FPIN Link Integrity %s (x%x) " 9443 "Detecting PN x%016llx Attached PN x%016llx " 9444 "Duration %d mSecs Count %d Port Cnt %d\n", 9445 li_evt_str, li_evt, 9446 be64_to_cpu(li->detecting_wwpn), 9447 be64_to_cpu(li->attached_wwpn), 9448 be32_to_cpu(li->event_threshold), 9449 be32_to_cpu(li->event_count), cnt); 9450 9451 lpfc_display_fpin_wwpn(phba, (__be64 *)&li->pname_list, cnt); 9452 } 9453 9454 /** 9455 * lpfc_els_rcv_fpin_del - Process an FPIN Delivery Event. 9456 * @phba: Pointer to hba object. 9457 * @tlv: Pointer to the Delivery Notification Descriptor TLV 9458 * 9459 * This function processes a Delivery FPIN event by logging a message. 9460 **/ 9461 static void 9462 lpfc_els_rcv_fpin_del(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) 9463 { 9464 struct fc_fn_deli_desc *del = (struct fc_fn_deli_desc *)tlv; 9465 const char *del_rsn_str; 9466 u32 del_rsn; 9467 __be32 *frame; 9468 9469 del_rsn = be16_to_cpu(del->deli_reason_code); 9470 del_rsn_str = lpfc_get_fpin_deli_event_nm(del_rsn); 9471 9472 /* Skip over desc_tag/desc_len header to payload */ 9473 frame = (__be32 *)(del + 1); 9474 9475 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 9476 "4681 FPIN Delivery %s (x%x) " 9477 "Detecting PN x%016llx Attached PN x%016llx " 9478 "DiscHdr0 x%08x " 9479 "DiscHdr1 x%08x DiscHdr2 x%08x DiscHdr3 x%08x " 9480 "DiscHdr4 x%08x DiscHdr5 x%08x\n", 9481 del_rsn_str, del_rsn, 9482 be64_to_cpu(del->detecting_wwpn), 9483 be64_to_cpu(del->attached_wwpn), 9484 be32_to_cpu(frame[0]), 9485 be32_to_cpu(frame[1]), 9486 be32_to_cpu(frame[2]), 9487 be32_to_cpu(frame[3]), 9488 be32_to_cpu(frame[4]), 9489 be32_to_cpu(frame[5])); 9490 } 9491 9492 /** 9493 * lpfc_els_rcv_fpin_peer_cgn - Process a FPIN Peer Congestion Event. 9494 * @phba: Pointer to hba object. 9495 * @tlv: Pointer to the Peer Congestion Notification Descriptor TLV 9496 * 9497 * This function processes a Peer Congestion FPIN event by logging a message. 9498 **/ 9499 static void 9500 lpfc_els_rcv_fpin_peer_cgn(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) 9501 { 9502 struct fc_fn_peer_congn_desc *pc = (struct fc_fn_peer_congn_desc *)tlv; 9503 const char *pc_evt_str; 9504 u32 pc_evt, cnt; 9505 9506 pc_evt = be16_to_cpu(pc->event_type); 9507 pc_evt_str = lpfc_get_fpin_congn_event_nm(pc_evt); 9508 cnt = be32_to_cpu(pc->pname_count); 9509 9510 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_ELS, 9511 "4684 FPIN Peer Congestion %s (x%x) " 9512 "Duration %d mSecs " 9513 "Detecting PN x%016llx Attached PN x%016llx " 9514 "Impacted Port Cnt %d\n", 9515 pc_evt_str, pc_evt, 9516 be32_to_cpu(pc->event_period), 9517 be64_to_cpu(pc->detecting_wwpn), 9518 be64_to_cpu(pc->attached_wwpn), 9519 cnt); 9520 9521 lpfc_display_fpin_wwpn(phba, (__be64 *)&pc->pname_list, cnt); 9522 } 9523 9524 /** 9525 * lpfc_els_rcv_fpin_cgn - Process an FPIN Congestion notification 9526 * @phba: Pointer to hba object. 9527 * @tlv: Pointer to the Congestion Notification Descriptor TLV 9528 * 9529 * This function processes an FPIN Congestion Notifiction. The notification 9530 * could be an Alarm or Warning. This routine feeds that data into driver's 9531 * running congestion algorithm. It also processes the FPIN by 9532 * logging a message. It returns 1 to indicate deliver this message 9533 * to the upper layer or 0 to indicate don't deliver it. 9534 **/ 9535 static int 9536 lpfc_els_rcv_fpin_cgn(struct lpfc_hba *phba, struct fc_tlv_desc *tlv) 9537 { 9538 struct lpfc_cgn_info *cp; 9539 struct fc_fn_congn_desc *cgn = (struct fc_fn_congn_desc *)tlv; 9540 const char *cgn_evt_str; 9541 u32 cgn_evt; 9542 const char *cgn_sev_str; 9543 u32 cgn_sev; 9544 uint16_t value; 9545 u32 crc; 9546 bool nm_log = false; 9547 int rc = 1; 9548 9549 cgn_evt = be16_to_cpu(cgn->event_type); 9550 cgn_evt_str = lpfc_get_fpin_congn_event_nm(cgn_evt); 9551 cgn_sev = cgn->severity; 9552 cgn_sev_str = lpfc_get_fpin_congn_severity_nm(cgn_sev); 9553 9554 /* The driver only takes action on a Credit Stall or Oversubscription 9555 * event type to engage the IO algorithm. The driver prints an 9556 * unmaskable message only for Lost Credit and Credit Stall. 9557 * TODO: Still need to have definition of host action on clear, 9558 * lost credit and device specific event types. 9559 */ 9560 switch (cgn_evt) { 9561 case FPIN_CONGN_LOST_CREDIT: 9562 nm_log = true; 9563 break; 9564 case FPIN_CONGN_CREDIT_STALL: 9565 nm_log = true; 9566 fallthrough; 9567 case FPIN_CONGN_OVERSUBSCRIPTION: 9568 if (cgn_evt == FPIN_CONGN_OVERSUBSCRIPTION) 9569 nm_log = false; 9570 switch (cgn_sev) { 9571 case FPIN_CONGN_SEVERITY_ERROR: 9572 /* Take action here for an Alarm event */ 9573 if (phba->cmf_active_mode != LPFC_CFG_OFF) { 9574 if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_ALARM) { 9575 /* Track of alarm cnt for cgn_info */ 9576 atomic_inc(&phba->cgn_fabric_alarm_cnt); 9577 /* Track of alarm cnt for SYNC_WQE */ 9578 atomic_inc(&phba->cgn_sync_alarm_cnt); 9579 } 9580 goto cleanup; 9581 } 9582 break; 9583 case FPIN_CONGN_SEVERITY_WARNING: 9584 /* Take action here for a Warning event */ 9585 if (phba->cmf_active_mode != LPFC_CFG_OFF) { 9586 if (phba->cgn_reg_fpin & LPFC_CGN_FPIN_WARN) { 9587 /* Track of warning cnt for cgn_info */ 9588 atomic_inc(&phba->cgn_fabric_warn_cnt); 9589 /* Track of warning cnt for SYNC_WQE */ 9590 atomic_inc(&phba->cgn_sync_warn_cnt); 9591 } 9592 cleanup: 9593 /* Save frequency in ms */ 9594 phba->cgn_fpin_frequency = 9595 be32_to_cpu(cgn->event_period); 9596 value = phba->cgn_fpin_frequency; 9597 if (phba->cgn_i) { 9598 cp = (struct lpfc_cgn_info *) 9599 phba->cgn_i->virt; 9600 if (phba->cgn_reg_fpin & 9601 LPFC_CGN_FPIN_ALARM) 9602 cp->cgn_alarm_freq = 9603 cpu_to_le16(value); 9604 if (phba->cgn_reg_fpin & 9605 LPFC_CGN_FPIN_WARN) 9606 cp->cgn_warn_freq = 9607 cpu_to_le16(value); 9608 crc = lpfc_cgn_calc_crc32 9609 (cp, 9610 LPFC_CGN_INFO_SZ, 9611 LPFC_CGN_CRC32_SEED); 9612 cp->cgn_info_crc = cpu_to_le32(crc); 9613 } 9614 9615 /* Don't deliver to upper layer since 9616 * driver took action on this tlv. 9617 */ 9618 rc = 0; 9619 } 9620 break; 9621 } 9622 break; 9623 } 9624 9625 /* Change the log level to unmaskable for the following event types. */ 9626 lpfc_printf_log(phba, (nm_log ? KERN_WARNING : KERN_INFO), 9627 LOG_CGN_MGMT | LOG_ELS, 9628 "4683 FPIN CONGESTION %s type %s (x%x) Event " 9629 "Duration %d mSecs\n", 9630 cgn_sev_str, cgn_evt_str, cgn_evt, 9631 be32_to_cpu(cgn->event_period)); 9632 return rc; 9633 } 9634 9635 void 9636 lpfc_els_rcv_fpin(struct lpfc_vport *vport, void *p, u32 fpin_length) 9637 { 9638 struct lpfc_hba *phba = vport->phba; 9639 struct fc_els_fpin *fpin = (struct fc_els_fpin *)p; 9640 struct fc_tlv_desc *tlv, *first_tlv, *current_tlv; 9641 const char *dtag_nm; 9642 int desc_cnt = 0, bytes_remain, cnt; 9643 u32 dtag, deliver = 0; 9644 int len; 9645 9646 /* FPINs handled only if we are in the right discovery state */ 9647 if (vport->port_state < LPFC_DISC_AUTH) 9648 return; 9649 9650 /* make sure there is the full fpin header */ 9651 if (fpin_length < sizeof(struct fc_els_fpin)) 9652 return; 9653 9654 /* Sanity check descriptor length. The desc_len value does not 9655 * include space for the ELS command and the desc_len fields. 9656 */ 9657 len = be32_to_cpu(fpin->desc_len); 9658 if (fpin_length < len + sizeof(struct fc_els_fpin)) { 9659 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 9660 "4671 Bad ELS FPIN length %d: %d\n", 9661 len, fpin_length); 9662 return; 9663 } 9664 9665 tlv = (struct fc_tlv_desc *)&fpin->fpin_desc[0]; 9666 first_tlv = tlv; 9667 bytes_remain = fpin_length - offsetof(struct fc_els_fpin, fpin_desc); 9668 bytes_remain = min_t(u32, bytes_remain, be32_to_cpu(fpin->desc_len)); 9669 9670 /* process each descriptor separately */ 9671 while (bytes_remain >= FC_TLV_DESC_HDR_SZ && 9672 bytes_remain >= FC_TLV_DESC_SZ_FROM_LENGTH(tlv)) { 9673 dtag = be32_to_cpu(tlv->desc_tag); 9674 switch (dtag) { 9675 case ELS_DTAG_LNK_INTEGRITY: 9676 lpfc_els_rcv_fpin_li(phba, tlv); 9677 deliver = 1; 9678 break; 9679 case ELS_DTAG_DELIVERY: 9680 lpfc_els_rcv_fpin_del(phba, tlv); 9681 deliver = 1; 9682 break; 9683 case ELS_DTAG_PEER_CONGEST: 9684 lpfc_els_rcv_fpin_peer_cgn(phba, tlv); 9685 deliver = 1; 9686 break; 9687 case ELS_DTAG_CONGESTION: 9688 deliver = lpfc_els_rcv_fpin_cgn(phba, tlv); 9689 break; 9690 default: 9691 dtag_nm = lpfc_get_tlv_dtag_nm(dtag); 9692 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 9693 "4678 unknown FPIN descriptor[%d]: " 9694 "tag x%x (%s)\n", 9695 desc_cnt, dtag, dtag_nm); 9696 9697 /* If descriptor is bad, drop the rest of the data */ 9698 return; 9699 } 9700 lpfc_cgn_update_stat(phba, dtag); 9701 cnt = be32_to_cpu(tlv->desc_len); 9702 9703 /* Sanity check descriptor length. The desc_len value does not 9704 * include space for the desc_tag and the desc_len fields. 9705 */ 9706 len -= (cnt + sizeof(struct fc_tlv_desc)); 9707 if (len < 0) { 9708 dtag_nm = lpfc_get_tlv_dtag_nm(dtag); 9709 lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT, 9710 "4672 Bad FPIN descriptor TLV length " 9711 "%d: %d %d %s\n", 9712 cnt, len, fpin_length, dtag_nm); 9713 return; 9714 } 9715 9716 current_tlv = tlv; 9717 bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv); 9718 tlv = fc_tlv_next_desc(tlv); 9719 9720 /* Format payload such that the FPIN delivered to the 9721 * upper layer is a single descriptor FPIN. 9722 */ 9723 if (desc_cnt) 9724 memcpy(first_tlv, current_tlv, 9725 (cnt + sizeof(struct fc_els_fpin))); 9726 9727 /* Adjust the length so that it only reflects a 9728 * single descriptor FPIN. 9729 */ 9730 fpin_length = cnt + sizeof(struct fc_els_fpin); 9731 fpin->desc_len = cpu_to_be32(fpin_length); 9732 fpin_length += sizeof(struct fc_els_fpin); /* the entire FPIN */ 9733 9734 /* Send every descriptor individually to the upper layer */ 9735 if (deliver) 9736 fc_host_fpin_rcv(lpfc_shost_from_vport(vport), 9737 fpin_length, (char *)fpin); 9738 desc_cnt++; 9739 } 9740 } 9741 9742 /** 9743 * lpfc_els_unsol_buffer - Process an unsolicited event data buffer 9744 * @phba: pointer to lpfc hba data structure. 9745 * @pring: pointer to a SLI ring. 9746 * @vport: pointer to a host virtual N_Port data structure. 9747 * @elsiocb: pointer to lpfc els command iocb data structure. 9748 * 9749 * This routine is used for processing the IOCB associated with a unsolicited 9750 * event. It first determines whether there is an existing ndlp that matches 9751 * the DID from the unsolicited IOCB. If not, it will create a new one with 9752 * the DID from the unsolicited IOCB. The ELS command from the unsolicited 9753 * IOCB is then used to invoke the proper routine and to set up proper state 9754 * of the discovery state machine. 9755 **/ 9756 static void 9757 lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 9758 struct lpfc_vport *vport, struct lpfc_iocbq *elsiocb) 9759 { 9760 struct lpfc_nodelist *ndlp; 9761 struct ls_rjt stat; 9762 uint32_t *payload, payload_len; 9763 uint32_t cmd, did, newnode; 9764 uint8_t rjt_exp, rjt_err = 0, init_link = 0; 9765 IOCB_t *icmd = &elsiocb->iocb; 9766 LPFC_MBOXQ_t *mbox; 9767 9768 if (!vport || !(elsiocb->context2)) 9769 goto dropit; 9770 9771 newnode = 0; 9772 payload = ((struct lpfc_dmabuf *)elsiocb->context2)->virt; 9773 payload_len = elsiocb->iocb.unsli3.rcvsli3.acc_len; 9774 cmd = *payload; 9775 if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) == 0) 9776 lpfc_post_buffer(phba, pring, 1); 9777 9778 did = icmd->un.rcvels.remoteID; 9779 if (icmd->ulpStatus) { 9780 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 9781 "RCV Unsol ELS: status:x%x/x%x did:x%x", 9782 icmd->ulpStatus, icmd->un.ulpWord[4], did); 9783 goto dropit; 9784 } 9785 9786 /* Check to see if link went down during discovery */ 9787 if (lpfc_els_chk_latt(vport)) 9788 goto dropit; 9789 9790 /* Ignore traffic received during vport shutdown. */ 9791 if (vport->load_flag & FC_UNLOADING) 9792 goto dropit; 9793 9794 /* If NPort discovery is delayed drop incoming ELS */ 9795 if ((vport->fc_flag & FC_DISC_DELAYED) && 9796 (cmd != ELS_CMD_PLOGI)) 9797 goto dropit; 9798 9799 ndlp = lpfc_findnode_did(vport, did); 9800 if (!ndlp) { 9801 /* Cannot find existing Fabric ndlp, so allocate a new one */ 9802 ndlp = lpfc_nlp_init(vport, did); 9803 if (!ndlp) 9804 goto dropit; 9805 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 9806 newnode = 1; 9807 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) 9808 ndlp->nlp_type |= NLP_FABRIC; 9809 } else if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) { 9810 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 9811 newnode = 1; 9812 } 9813 9814 phba->fc_stat.elsRcvFrame++; 9815 9816 /* 9817 * Do not process any unsolicited ELS commands 9818 * if the ndlp is in DEV_LOSS 9819 */ 9820 spin_lock_irq(&ndlp->lock); 9821 if (ndlp->nlp_flag & NLP_IN_DEV_LOSS) { 9822 spin_unlock_irq(&ndlp->lock); 9823 if (newnode) 9824 lpfc_nlp_put(ndlp); 9825 goto dropit; 9826 } 9827 spin_unlock_irq(&ndlp->lock); 9828 9829 elsiocb->context1 = lpfc_nlp_get(ndlp); 9830 if (!elsiocb->context1) 9831 goto dropit; 9832 elsiocb->vport = vport; 9833 9834 if ((cmd & ELS_CMD_MASK) == ELS_CMD_RSCN) { 9835 cmd &= ELS_CMD_MASK; 9836 } 9837 /* ELS command <elsCmd> received from NPORT <did> */ 9838 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 9839 "0112 ELS command x%x received from NPORT x%x " 9840 "refcnt %d Data: x%x x%x x%x x%x\n", 9841 cmd, did, kref_read(&ndlp->kref), vport->port_state, 9842 vport->fc_flag, vport->fc_myDID, vport->fc_prevDID); 9843 9844 /* reject till our FLOGI completes or PLOGI assigned DID via PT2PT */ 9845 if ((vport->port_state < LPFC_FABRIC_CFG_LINK) && 9846 (cmd != ELS_CMD_FLOGI) && 9847 !((cmd == ELS_CMD_PLOGI) && (vport->fc_flag & FC_PT2PT))) { 9848 rjt_err = LSRJT_LOGICAL_BSY; 9849 rjt_exp = LSEXP_NOTHING_MORE; 9850 goto lsrjt; 9851 } 9852 9853 switch (cmd) { 9854 case ELS_CMD_PLOGI: 9855 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 9856 "RCV PLOGI: did:x%x/ste:x%x flg:x%x", 9857 did, vport->port_state, ndlp->nlp_flag); 9858 9859 phba->fc_stat.elsRcvPLOGI++; 9860 ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp); 9861 if (phba->sli_rev == LPFC_SLI_REV4 && 9862 (phba->pport->fc_flag & FC_PT2PT)) { 9863 vport->fc_prevDID = vport->fc_myDID; 9864 /* Our DID needs to be updated before registering 9865 * the vfi. This is done in lpfc_rcv_plogi but 9866 * that is called after the reg_vfi. 9867 */ 9868 vport->fc_myDID = elsiocb->iocb.un.rcvels.parmRo; 9869 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 9870 "3312 Remote port assigned DID x%x " 9871 "%x\n", vport->fc_myDID, 9872 vport->fc_prevDID); 9873 } 9874 9875 lpfc_send_els_event(vport, ndlp, payload); 9876 9877 /* If Nport discovery is delayed, reject PLOGIs */ 9878 if (vport->fc_flag & FC_DISC_DELAYED) { 9879 rjt_err = LSRJT_UNABLE_TPC; 9880 rjt_exp = LSEXP_NOTHING_MORE; 9881 break; 9882 } 9883 9884 if (vport->port_state < LPFC_DISC_AUTH) { 9885 if (!(phba->pport->fc_flag & FC_PT2PT) || 9886 (phba->pport->fc_flag & FC_PT2PT_PLOGI)) { 9887 rjt_err = LSRJT_UNABLE_TPC; 9888 rjt_exp = LSEXP_NOTHING_MORE; 9889 break; 9890 } 9891 } 9892 9893 spin_lock_irq(&ndlp->lock); 9894 ndlp->nlp_flag &= ~NLP_TARGET_REMOVE; 9895 spin_unlock_irq(&ndlp->lock); 9896 9897 lpfc_disc_state_machine(vport, ndlp, elsiocb, 9898 NLP_EVT_RCV_PLOGI); 9899 9900 break; 9901 case ELS_CMD_FLOGI: 9902 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 9903 "RCV FLOGI: did:x%x/ste:x%x flg:x%x", 9904 did, vport->port_state, ndlp->nlp_flag); 9905 9906 phba->fc_stat.elsRcvFLOGI++; 9907 9908 /* If the driver believes fabric discovery is done and is ready, 9909 * bounce the link. There is some descrepancy. 9910 */ 9911 if (vport->port_state >= LPFC_LOCAL_CFG_LINK && 9912 vport->fc_flag & FC_PT2PT && 9913 vport->rcv_flogi_cnt >= 1) { 9914 rjt_err = LSRJT_LOGICAL_BSY; 9915 rjt_exp = LSEXP_NOTHING_MORE; 9916 init_link++; 9917 goto lsrjt; 9918 } 9919 9920 lpfc_els_rcv_flogi(vport, elsiocb, ndlp); 9921 if (newnode) 9922 lpfc_disc_state_machine(vport, ndlp, NULL, 9923 NLP_EVT_DEVICE_RM); 9924 break; 9925 case ELS_CMD_LOGO: 9926 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 9927 "RCV LOGO: did:x%x/ste:x%x flg:x%x", 9928 did, vport->port_state, ndlp->nlp_flag); 9929 9930 phba->fc_stat.elsRcvLOGO++; 9931 lpfc_send_els_event(vport, ndlp, payload); 9932 if (vport->port_state < LPFC_DISC_AUTH) { 9933 rjt_err = LSRJT_UNABLE_TPC; 9934 rjt_exp = LSEXP_NOTHING_MORE; 9935 break; 9936 } 9937 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_LOGO); 9938 if (newnode) 9939 lpfc_disc_state_machine(vport, ndlp, NULL, 9940 NLP_EVT_DEVICE_RM); 9941 break; 9942 case ELS_CMD_PRLO: 9943 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 9944 "RCV PRLO: did:x%x/ste:x%x flg:x%x", 9945 did, vport->port_state, ndlp->nlp_flag); 9946 9947 phba->fc_stat.elsRcvPRLO++; 9948 lpfc_send_els_event(vport, ndlp, payload); 9949 if (vport->port_state < LPFC_DISC_AUTH) { 9950 rjt_err = LSRJT_UNABLE_TPC; 9951 rjt_exp = LSEXP_NOTHING_MORE; 9952 break; 9953 } 9954 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLO); 9955 break; 9956 case ELS_CMD_LCB: 9957 phba->fc_stat.elsRcvLCB++; 9958 lpfc_els_rcv_lcb(vport, elsiocb, ndlp); 9959 break; 9960 case ELS_CMD_RDP: 9961 phba->fc_stat.elsRcvRDP++; 9962 lpfc_els_rcv_rdp(vport, elsiocb, ndlp); 9963 break; 9964 case ELS_CMD_RSCN: 9965 phba->fc_stat.elsRcvRSCN++; 9966 lpfc_els_rcv_rscn(vport, elsiocb, ndlp); 9967 if (newnode) 9968 lpfc_disc_state_machine(vport, ndlp, NULL, 9969 NLP_EVT_DEVICE_RM); 9970 break; 9971 case ELS_CMD_ADISC: 9972 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 9973 "RCV ADISC: did:x%x/ste:x%x flg:x%x", 9974 did, vport->port_state, ndlp->nlp_flag); 9975 9976 lpfc_send_els_event(vport, ndlp, payload); 9977 phba->fc_stat.elsRcvADISC++; 9978 if (vport->port_state < LPFC_DISC_AUTH) { 9979 rjt_err = LSRJT_UNABLE_TPC; 9980 rjt_exp = LSEXP_NOTHING_MORE; 9981 break; 9982 } 9983 lpfc_disc_state_machine(vport, ndlp, elsiocb, 9984 NLP_EVT_RCV_ADISC); 9985 break; 9986 case ELS_CMD_PDISC: 9987 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 9988 "RCV PDISC: did:x%x/ste:x%x flg:x%x", 9989 did, vport->port_state, ndlp->nlp_flag); 9990 9991 phba->fc_stat.elsRcvPDISC++; 9992 if (vport->port_state < LPFC_DISC_AUTH) { 9993 rjt_err = LSRJT_UNABLE_TPC; 9994 rjt_exp = LSEXP_NOTHING_MORE; 9995 break; 9996 } 9997 lpfc_disc_state_machine(vport, ndlp, elsiocb, 9998 NLP_EVT_RCV_PDISC); 9999 break; 10000 case ELS_CMD_FARPR: 10001 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10002 "RCV FARPR: did:x%x/ste:x%x flg:x%x", 10003 did, vport->port_state, ndlp->nlp_flag); 10004 10005 phba->fc_stat.elsRcvFARPR++; 10006 lpfc_els_rcv_farpr(vport, elsiocb, ndlp); 10007 break; 10008 case ELS_CMD_FARP: 10009 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10010 "RCV FARP: did:x%x/ste:x%x flg:x%x", 10011 did, vport->port_state, ndlp->nlp_flag); 10012 10013 phba->fc_stat.elsRcvFARP++; 10014 lpfc_els_rcv_farp(vport, elsiocb, ndlp); 10015 break; 10016 case ELS_CMD_FAN: 10017 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10018 "RCV FAN: did:x%x/ste:x%x flg:x%x", 10019 did, vport->port_state, ndlp->nlp_flag); 10020 10021 phba->fc_stat.elsRcvFAN++; 10022 lpfc_els_rcv_fan(vport, elsiocb, ndlp); 10023 break; 10024 case ELS_CMD_PRLI: 10025 case ELS_CMD_NVMEPRLI: 10026 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10027 "RCV PRLI: did:x%x/ste:x%x flg:x%x", 10028 did, vport->port_state, ndlp->nlp_flag); 10029 10030 phba->fc_stat.elsRcvPRLI++; 10031 if ((vport->port_state < LPFC_DISC_AUTH) && 10032 (vport->fc_flag & FC_FABRIC)) { 10033 rjt_err = LSRJT_UNABLE_TPC; 10034 rjt_exp = LSEXP_NOTHING_MORE; 10035 break; 10036 } 10037 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLI); 10038 break; 10039 case ELS_CMD_LIRR: 10040 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10041 "RCV LIRR: did:x%x/ste:x%x flg:x%x", 10042 did, vport->port_state, ndlp->nlp_flag); 10043 10044 phba->fc_stat.elsRcvLIRR++; 10045 lpfc_els_rcv_lirr(vport, elsiocb, ndlp); 10046 if (newnode) 10047 lpfc_disc_state_machine(vport, ndlp, NULL, 10048 NLP_EVT_DEVICE_RM); 10049 break; 10050 case ELS_CMD_RLS: 10051 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10052 "RCV RLS: did:x%x/ste:x%x flg:x%x", 10053 did, vport->port_state, ndlp->nlp_flag); 10054 10055 phba->fc_stat.elsRcvRLS++; 10056 lpfc_els_rcv_rls(vport, elsiocb, ndlp); 10057 if (newnode) 10058 lpfc_disc_state_machine(vport, ndlp, NULL, 10059 NLP_EVT_DEVICE_RM); 10060 break; 10061 case ELS_CMD_RPL: 10062 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10063 "RCV RPL: did:x%x/ste:x%x flg:x%x", 10064 did, vport->port_state, ndlp->nlp_flag); 10065 10066 phba->fc_stat.elsRcvRPL++; 10067 lpfc_els_rcv_rpl(vport, elsiocb, ndlp); 10068 if (newnode) 10069 lpfc_disc_state_machine(vport, ndlp, NULL, 10070 NLP_EVT_DEVICE_RM); 10071 break; 10072 case ELS_CMD_RNID: 10073 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10074 "RCV RNID: did:x%x/ste:x%x flg:x%x", 10075 did, vport->port_state, ndlp->nlp_flag); 10076 10077 phba->fc_stat.elsRcvRNID++; 10078 lpfc_els_rcv_rnid(vport, elsiocb, ndlp); 10079 if (newnode) 10080 lpfc_disc_state_machine(vport, ndlp, NULL, 10081 NLP_EVT_DEVICE_RM); 10082 break; 10083 case ELS_CMD_RTV: 10084 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10085 "RCV RTV: did:x%x/ste:x%x flg:x%x", 10086 did, vport->port_state, ndlp->nlp_flag); 10087 phba->fc_stat.elsRcvRTV++; 10088 lpfc_els_rcv_rtv(vport, elsiocb, ndlp); 10089 if (newnode) 10090 lpfc_disc_state_machine(vport, ndlp, NULL, 10091 NLP_EVT_DEVICE_RM); 10092 break; 10093 case ELS_CMD_RRQ: 10094 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10095 "RCV RRQ: did:x%x/ste:x%x flg:x%x", 10096 did, vport->port_state, ndlp->nlp_flag); 10097 10098 phba->fc_stat.elsRcvRRQ++; 10099 lpfc_els_rcv_rrq(vport, elsiocb, ndlp); 10100 if (newnode) 10101 lpfc_disc_state_machine(vport, ndlp, NULL, 10102 NLP_EVT_DEVICE_RM); 10103 break; 10104 case ELS_CMD_ECHO: 10105 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10106 "RCV ECHO: did:x%x/ste:x%x flg:x%x", 10107 did, vport->port_state, ndlp->nlp_flag); 10108 10109 phba->fc_stat.elsRcvECHO++; 10110 lpfc_els_rcv_echo(vport, elsiocb, ndlp); 10111 if (newnode) 10112 lpfc_disc_state_machine(vport, ndlp, NULL, 10113 NLP_EVT_DEVICE_RM); 10114 break; 10115 case ELS_CMD_REC: 10116 /* receive this due to exchange closed */ 10117 rjt_err = LSRJT_UNABLE_TPC; 10118 rjt_exp = LSEXP_INVALID_OX_RX; 10119 break; 10120 case ELS_CMD_FPIN: 10121 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10122 "RCV FPIN: did:x%x/ste:x%x flg:x%x", 10123 did, vport->port_state, ndlp->nlp_flag); 10124 10125 lpfc_els_rcv_fpin(vport, (struct fc_els_fpin *)payload, 10126 payload_len); 10127 10128 /* There are no replies, so no rjt codes */ 10129 break; 10130 case ELS_CMD_EDC: 10131 lpfc_els_rcv_edc(vport, elsiocb, ndlp); 10132 break; 10133 case ELS_CMD_RDF: 10134 phba->fc_stat.elsRcvRDF++; 10135 /* Accept RDF only from fabric controller */ 10136 if (did != Fabric_Cntl_DID) { 10137 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, 10138 "1115 Received RDF from invalid DID " 10139 "x%x\n", did); 10140 rjt_err = LSRJT_PROTOCOL_ERR; 10141 rjt_exp = LSEXP_NOTHING_MORE; 10142 goto lsrjt; 10143 } 10144 10145 lpfc_els_rcv_rdf(vport, elsiocb, ndlp); 10146 break; 10147 default: 10148 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 10149 "RCV ELS cmd: cmd:x%x did:x%x/ste:x%x", 10150 cmd, did, vport->port_state); 10151 10152 /* Unsupported ELS command, reject */ 10153 rjt_err = LSRJT_CMD_UNSUPPORTED; 10154 rjt_exp = LSEXP_NOTHING_MORE; 10155 10156 /* Unknown ELS command <elsCmd> received from NPORT <did> */ 10157 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10158 "0115 Unknown ELS command x%x " 10159 "received from NPORT x%x\n", cmd, did); 10160 if (newnode) 10161 lpfc_disc_state_machine(vport, ndlp, NULL, 10162 NLP_EVT_DEVICE_RM); 10163 break; 10164 } 10165 10166 lsrjt: 10167 /* check if need to LS_RJT received ELS cmd */ 10168 if (rjt_err) { 10169 memset(&stat, 0, sizeof(stat)); 10170 stat.un.b.lsRjtRsnCode = rjt_err; 10171 stat.un.b.lsRjtRsnCodeExp = rjt_exp; 10172 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, elsiocb, ndlp, 10173 NULL); 10174 /* Remove the reference from above for new nodes. */ 10175 if (newnode) 10176 lpfc_disc_state_machine(vport, ndlp, NULL, 10177 NLP_EVT_DEVICE_RM); 10178 } 10179 10180 /* Release the reference on this elsiocb, not the ndlp. */ 10181 lpfc_nlp_put(elsiocb->context1); 10182 elsiocb->context1 = NULL; 10183 10184 /* Special case. Driver received an unsolicited command that 10185 * unsupportable given the driver's current state. Reset the 10186 * link and start over. 10187 */ 10188 if (init_link) { 10189 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 10190 if (!mbox) 10191 return; 10192 lpfc_linkdown(phba); 10193 lpfc_init_link(phba, mbox, 10194 phba->cfg_topology, 10195 phba->cfg_link_speed); 10196 mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0; 10197 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 10198 mbox->vport = vport; 10199 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) == 10200 MBX_NOT_FINISHED) 10201 mempool_free(mbox, phba->mbox_mem_pool); 10202 } 10203 10204 return; 10205 10206 dropit: 10207 if (vport && !(vport->load_flag & FC_UNLOADING)) 10208 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10209 "0111 Dropping received ELS cmd " 10210 "Data: x%x x%x x%x\n", 10211 icmd->ulpStatus, icmd->un.ulpWord[4], icmd->ulpTimeout); 10212 phba->fc_stat.elsRcvDrop++; 10213 } 10214 10215 /** 10216 * lpfc_els_unsol_event - Process an unsolicited event from an els sli ring 10217 * @phba: pointer to lpfc hba data structure. 10218 * @pring: pointer to a SLI ring. 10219 * @elsiocb: pointer to lpfc els iocb data structure. 10220 * 10221 * This routine is used to process an unsolicited event received from a SLI 10222 * (Service Level Interface) ring. The actual processing of the data buffer 10223 * associated with the unsolicited event is done by invoking the routine 10224 * lpfc_els_unsol_buffer() after properly set up the iocb buffer from the 10225 * SLI ring on which the unsolicited event was received. 10226 **/ 10227 void 10228 lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 10229 struct lpfc_iocbq *elsiocb) 10230 { 10231 struct lpfc_vport *vport = phba->pport; 10232 IOCB_t *icmd = &elsiocb->iocb; 10233 dma_addr_t paddr; 10234 struct lpfc_dmabuf *bdeBuf1 = elsiocb->context2; 10235 struct lpfc_dmabuf *bdeBuf2 = elsiocb->context3; 10236 10237 elsiocb->context1 = NULL; 10238 elsiocb->context2 = NULL; 10239 elsiocb->context3 = NULL; 10240 10241 if (icmd->ulpStatus == IOSTAT_NEED_BUFFER) { 10242 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); 10243 } else if (icmd->ulpStatus == IOSTAT_LOCAL_REJECT && 10244 (icmd->un.ulpWord[4] & IOERR_PARAM_MASK) == 10245 IOERR_RCV_BUFFER_WAITING) { 10246 phba->fc_stat.NoRcvBuf++; 10247 /* Not enough posted buffers; Try posting more buffers */ 10248 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) 10249 lpfc_post_buffer(phba, pring, 0); 10250 return; 10251 } 10252 10253 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 10254 (icmd->ulpCommand == CMD_IOCB_RCV_ELS64_CX || 10255 icmd->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) { 10256 if (icmd->unsli3.rcvsli3.vpi == 0xffff) 10257 vport = phba->pport; 10258 else 10259 vport = lpfc_find_vport_by_vpid(phba, 10260 icmd->unsli3.rcvsli3.vpi); 10261 } 10262 10263 /* If there are no BDEs associated 10264 * with this IOCB, there is nothing to do. 10265 */ 10266 if (icmd->ulpBdeCount == 0) 10267 return; 10268 10269 /* type of ELS cmd is first 32bit word 10270 * in packet 10271 */ 10272 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 10273 elsiocb->context2 = bdeBuf1; 10274 } else { 10275 paddr = getPaddr(icmd->un.cont64[0].addrHigh, 10276 icmd->un.cont64[0].addrLow); 10277 elsiocb->context2 = lpfc_sli_ringpostbuf_get(phba, pring, 10278 paddr); 10279 } 10280 10281 lpfc_els_unsol_buffer(phba, pring, vport, elsiocb); 10282 /* 10283 * The different unsolicited event handlers would tell us 10284 * if they are done with "mp" by setting context2 to NULL. 10285 */ 10286 if (elsiocb->context2) { 10287 lpfc_in_buf_free(phba, (struct lpfc_dmabuf *)elsiocb->context2); 10288 elsiocb->context2 = NULL; 10289 } 10290 10291 /* RCV_ELS64_CX provide for 2 BDEs - process 2nd if included */ 10292 if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) && 10293 icmd->ulpBdeCount == 2) { 10294 elsiocb->context2 = bdeBuf2; 10295 lpfc_els_unsol_buffer(phba, pring, vport, elsiocb); 10296 /* free mp if we are done with it */ 10297 if (elsiocb->context2) { 10298 lpfc_in_buf_free(phba, elsiocb->context2); 10299 elsiocb->context2 = NULL; 10300 } 10301 } 10302 } 10303 10304 static void 10305 lpfc_start_fdmi(struct lpfc_vport *vport) 10306 { 10307 struct lpfc_nodelist *ndlp; 10308 10309 /* If this is the first time, allocate an ndlp and initialize 10310 * it. Otherwise, make sure the node is enabled and then do the 10311 * login. 10312 */ 10313 ndlp = lpfc_findnode_did(vport, FDMI_DID); 10314 if (!ndlp) { 10315 ndlp = lpfc_nlp_init(vport, FDMI_DID); 10316 if (ndlp) { 10317 ndlp->nlp_type |= NLP_FABRIC; 10318 } else { 10319 return; 10320 } 10321 } 10322 10323 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 10324 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 10325 } 10326 10327 /** 10328 * lpfc_do_scr_ns_plogi - Issue a plogi to the name server for scr 10329 * @phba: pointer to lpfc hba data structure. 10330 * @vport: pointer to a virtual N_Port data structure. 10331 * 10332 * This routine issues a Port Login (PLOGI) to the Name Server with 10333 * State Change Request (SCR) for a @vport. This routine will create an 10334 * ndlp for the Name Server associated to the @vport if such node does 10335 * not already exist. The PLOGI to Name Server is issued by invoking the 10336 * lpfc_issue_els_plogi() routine. If Fabric-Device Management Interface 10337 * (FDMI) is configured to the @vport, a FDMI node will be created and 10338 * the PLOGI to FDMI is issued by invoking lpfc_issue_els_plogi() routine. 10339 **/ 10340 void 10341 lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport) 10342 { 10343 struct lpfc_nodelist *ndlp; 10344 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 10345 10346 /* 10347 * If lpfc_delay_discovery parameter is set and the clean address 10348 * bit is cleared and fc fabric parameters chenged, delay FC NPort 10349 * discovery. 10350 */ 10351 spin_lock_irq(shost->host_lock); 10352 if (vport->fc_flag & FC_DISC_DELAYED) { 10353 spin_unlock_irq(shost->host_lock); 10354 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10355 "3334 Delay fc port discovery for %d secs\n", 10356 phba->fc_ratov); 10357 mod_timer(&vport->delayed_disc_tmo, 10358 jiffies + msecs_to_jiffies(1000 * phba->fc_ratov)); 10359 return; 10360 } 10361 spin_unlock_irq(shost->host_lock); 10362 10363 ndlp = lpfc_findnode_did(vport, NameServer_DID); 10364 if (!ndlp) { 10365 ndlp = lpfc_nlp_init(vport, NameServer_DID); 10366 if (!ndlp) { 10367 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 10368 lpfc_disc_start(vport); 10369 return; 10370 } 10371 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 10372 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10373 "0251 NameServer login: no memory\n"); 10374 return; 10375 } 10376 } 10377 10378 ndlp->nlp_type |= NLP_FABRIC; 10379 10380 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 10381 10382 if (lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0)) { 10383 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 10384 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10385 "0252 Cannot issue NameServer login\n"); 10386 return; 10387 } 10388 10389 if ((phba->cfg_enable_SmartSAN || 10390 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) && 10391 (vport->load_flag & FC_ALLOW_FDMI)) 10392 lpfc_start_fdmi(vport); 10393 } 10394 10395 /** 10396 * lpfc_cmpl_reg_new_vport - Completion callback function to register new vport 10397 * @phba: pointer to lpfc hba data structure. 10398 * @pmb: pointer to the driver internal queue element for mailbox command. 10399 * 10400 * This routine is the completion callback function to register new vport 10401 * mailbox command. If the new vport mailbox command completes successfully, 10402 * the fabric registration login shall be performed on physical port (the 10403 * new vport created is actually a physical port, with VPI 0) or the port 10404 * login to Name Server for State Change Request (SCR) will be performed 10405 * on virtual port (real virtual port, with VPI greater than 0). 10406 **/ 10407 static void 10408 lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 10409 { 10410 struct lpfc_vport *vport = pmb->vport; 10411 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 10412 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; 10413 MAILBOX_t *mb = &pmb->u.mb; 10414 int rc; 10415 10416 spin_lock_irq(shost->host_lock); 10417 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 10418 spin_unlock_irq(shost->host_lock); 10419 10420 if (mb->mbxStatus) { 10421 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10422 "0915 Register VPI failed : Status: x%x" 10423 " upd bit: x%x \n", mb->mbxStatus, 10424 mb->un.varRegVpi.upd); 10425 if (phba->sli_rev == LPFC_SLI_REV4 && 10426 mb->un.varRegVpi.upd) 10427 goto mbox_err_exit ; 10428 10429 switch (mb->mbxStatus) { 10430 case 0x11: /* unsupported feature */ 10431 case 0x9603: /* max_vpi exceeded */ 10432 case 0x9602: /* Link event since CLEAR_LA */ 10433 /* giving up on vport registration */ 10434 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 10435 spin_lock_irq(shost->host_lock); 10436 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 10437 spin_unlock_irq(shost->host_lock); 10438 lpfc_can_disctmo(vport); 10439 break; 10440 /* If reg_vpi fail with invalid VPI status, re-init VPI */ 10441 case 0x20: 10442 spin_lock_irq(shost->host_lock); 10443 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 10444 spin_unlock_irq(shost->host_lock); 10445 lpfc_init_vpi(phba, pmb, vport->vpi); 10446 pmb->vport = vport; 10447 pmb->mbox_cmpl = lpfc_init_vpi_cmpl; 10448 rc = lpfc_sli_issue_mbox(phba, pmb, 10449 MBX_NOWAIT); 10450 if (rc == MBX_NOT_FINISHED) { 10451 lpfc_printf_vlog(vport, KERN_ERR, 10452 LOG_TRACE_EVENT, 10453 "2732 Failed to issue INIT_VPI" 10454 " mailbox command\n"); 10455 } else { 10456 lpfc_nlp_put(ndlp); 10457 return; 10458 } 10459 fallthrough; 10460 default: 10461 /* Try to recover from this error */ 10462 if (phba->sli_rev == LPFC_SLI_REV4) 10463 lpfc_sli4_unreg_all_rpis(vport); 10464 lpfc_mbx_unreg_vpi(vport); 10465 spin_lock_irq(shost->host_lock); 10466 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 10467 spin_unlock_irq(shost->host_lock); 10468 if (mb->mbxStatus == MBX_NOT_FINISHED) 10469 break; 10470 if ((vport->port_type == LPFC_PHYSICAL_PORT) && 10471 !(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG)) { 10472 if (phba->sli_rev == LPFC_SLI_REV4) 10473 lpfc_issue_init_vfi(vport); 10474 else 10475 lpfc_initial_flogi(vport); 10476 } else { 10477 lpfc_initial_fdisc(vport); 10478 } 10479 break; 10480 } 10481 } else { 10482 spin_lock_irq(shost->host_lock); 10483 vport->vpi_state |= LPFC_VPI_REGISTERED; 10484 spin_unlock_irq(shost->host_lock); 10485 if (vport == phba->pport) { 10486 if (phba->sli_rev < LPFC_SLI_REV4) 10487 lpfc_issue_fabric_reglogin(vport); 10488 else { 10489 /* 10490 * If the physical port is instantiated using 10491 * FDISC, do not start vport discovery. 10492 */ 10493 if (vport->port_state != LPFC_FDISC) 10494 lpfc_start_fdiscs(phba); 10495 lpfc_do_scr_ns_plogi(phba, vport); 10496 } 10497 } else { 10498 lpfc_do_scr_ns_plogi(phba, vport); 10499 } 10500 } 10501 mbox_err_exit: 10502 /* Now, we decrement the ndlp reference count held for this 10503 * callback function 10504 */ 10505 lpfc_nlp_put(ndlp); 10506 10507 mempool_free(pmb, phba->mbox_mem_pool); 10508 return; 10509 } 10510 10511 /** 10512 * lpfc_register_new_vport - Register a new vport with a HBA 10513 * @phba: pointer to lpfc hba data structure. 10514 * @vport: pointer to a host virtual N_Port data structure. 10515 * @ndlp: pointer to a node-list data structure. 10516 * 10517 * This routine registers the @vport as a new virtual port with a HBA. 10518 * It is done through a registering vpi mailbox command. 10519 **/ 10520 void 10521 lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport, 10522 struct lpfc_nodelist *ndlp) 10523 { 10524 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 10525 LPFC_MBOXQ_t *mbox; 10526 10527 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 10528 if (mbox) { 10529 lpfc_reg_vpi(vport, mbox); 10530 mbox->vport = vport; 10531 mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 10532 if (!mbox->ctx_ndlp) { 10533 mempool_free(mbox, phba->mbox_mem_pool); 10534 goto mbox_err_exit; 10535 } 10536 10537 mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport; 10538 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 10539 == MBX_NOT_FINISHED) { 10540 /* mailbox command not success, decrement ndlp 10541 * reference count for this command 10542 */ 10543 lpfc_nlp_put(ndlp); 10544 mempool_free(mbox, phba->mbox_mem_pool); 10545 10546 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10547 "0253 Register VPI: Can't send mbox\n"); 10548 goto mbox_err_exit; 10549 } 10550 } else { 10551 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10552 "0254 Register VPI: no memory\n"); 10553 goto mbox_err_exit; 10554 } 10555 return; 10556 10557 mbox_err_exit: 10558 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 10559 spin_lock_irq(shost->host_lock); 10560 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 10561 spin_unlock_irq(shost->host_lock); 10562 return; 10563 } 10564 10565 /** 10566 * lpfc_cancel_all_vport_retry_delay_timer - Cancel all vport retry delay timer 10567 * @phba: pointer to lpfc hba data structure. 10568 * 10569 * This routine cancels the retry delay timers to all the vports. 10570 **/ 10571 void 10572 lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *phba) 10573 { 10574 struct lpfc_vport **vports; 10575 struct lpfc_nodelist *ndlp; 10576 uint32_t link_state; 10577 int i; 10578 10579 /* Treat this failure as linkdown for all vports */ 10580 link_state = phba->link_state; 10581 lpfc_linkdown(phba); 10582 phba->link_state = link_state; 10583 10584 vports = lpfc_create_vport_work_array(phba); 10585 10586 if (vports) { 10587 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 10588 ndlp = lpfc_findnode_did(vports[i], Fabric_DID); 10589 if (ndlp) 10590 lpfc_cancel_retry_delay_tmo(vports[i], ndlp); 10591 lpfc_els_flush_cmd(vports[i]); 10592 } 10593 lpfc_destroy_vport_work_array(phba, vports); 10594 } 10595 } 10596 10597 /** 10598 * lpfc_retry_pport_discovery - Start timer to retry FLOGI. 10599 * @phba: pointer to lpfc hba data structure. 10600 * 10601 * This routine abort all pending discovery commands and 10602 * start a timer to retry FLOGI for the physical port 10603 * discovery. 10604 **/ 10605 void 10606 lpfc_retry_pport_discovery(struct lpfc_hba *phba) 10607 { 10608 struct lpfc_nodelist *ndlp; 10609 10610 /* Cancel the all vports retry delay retry timers */ 10611 lpfc_cancel_all_vport_retry_delay_timer(phba); 10612 10613 /* If fabric require FLOGI, then re-instantiate physical login */ 10614 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); 10615 if (!ndlp) 10616 return; 10617 10618 mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000)); 10619 spin_lock_irq(&ndlp->lock); 10620 ndlp->nlp_flag |= NLP_DELAY_TMO; 10621 spin_unlock_irq(&ndlp->lock); 10622 ndlp->nlp_last_elscmd = ELS_CMD_FLOGI; 10623 phba->pport->port_state = LPFC_FLOGI; 10624 return; 10625 } 10626 10627 /** 10628 * lpfc_fabric_login_reqd - Check if FLOGI required. 10629 * @phba: pointer to lpfc hba data structure. 10630 * @cmdiocb: pointer to FDISC command iocb. 10631 * @rspiocb: pointer to FDISC response iocb. 10632 * 10633 * This routine checks if a FLOGI is reguired for FDISC 10634 * to succeed. 10635 **/ 10636 static int 10637 lpfc_fabric_login_reqd(struct lpfc_hba *phba, 10638 struct lpfc_iocbq *cmdiocb, 10639 struct lpfc_iocbq *rspiocb) 10640 { 10641 10642 if ((rspiocb->iocb.ulpStatus != IOSTAT_FABRIC_RJT) || 10643 (rspiocb->iocb.un.ulpWord[4] != RJT_LOGIN_REQUIRED)) 10644 return 0; 10645 else 10646 return 1; 10647 } 10648 10649 /** 10650 * lpfc_cmpl_els_fdisc - Completion function for fdisc iocb command 10651 * @phba: pointer to lpfc hba data structure. 10652 * @cmdiocb: pointer to lpfc command iocb data structure. 10653 * @rspiocb: pointer to lpfc response iocb data structure. 10654 * 10655 * This routine is the completion callback function to a Fabric Discover 10656 * (FDISC) ELS command. Since all the FDISC ELS commands are issued 10657 * single threaded, each FDISC completion callback function will reset 10658 * the discovery timer for all vports such that the timers will not get 10659 * unnecessary timeout. The function checks the FDISC IOCB status. If error 10660 * detected, the vport will be set to FC_VPORT_FAILED state. Otherwise,the 10661 * vport will set to FC_VPORT_ACTIVE state. It then checks whether the DID 10662 * assigned to the vport has been changed with the completion of the FDISC 10663 * command. If so, both RPI (Remote Port Index) and VPI (Virtual Port Index) 10664 * are unregistered from the HBA, and then the lpfc_register_new_vport() 10665 * routine is invoked to register new vport with the HBA. Otherwise, the 10666 * lpfc_do_scr_ns_plogi() routine is invoked to issue a PLOGI to the Name 10667 * Server for State Change Request (SCR). 10668 **/ 10669 static void 10670 lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 10671 struct lpfc_iocbq *rspiocb) 10672 { 10673 struct lpfc_vport *vport = cmdiocb->vport; 10674 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 10675 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 10676 struct lpfc_nodelist *np; 10677 struct lpfc_nodelist *next_np; 10678 IOCB_t *irsp = &rspiocb->iocb; 10679 struct lpfc_iocbq *piocb; 10680 struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp; 10681 struct serv_parm *sp; 10682 uint8_t fabric_param_changed; 10683 10684 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 10685 "0123 FDISC completes. x%x/x%x prevDID: x%x\n", 10686 irsp->ulpStatus, irsp->un.ulpWord[4], 10687 vport->fc_prevDID); 10688 /* Since all FDISCs are being single threaded, we 10689 * must reset the discovery timer for ALL vports 10690 * waiting to send FDISC when one completes. 10691 */ 10692 list_for_each_entry(piocb, &phba->fabric_iocb_list, list) { 10693 lpfc_set_disctmo(piocb->vport); 10694 } 10695 10696 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 10697 "FDISC cmpl: status:x%x/x%x prevdid:x%x", 10698 irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_prevDID); 10699 10700 if (irsp->ulpStatus) { 10701 10702 if (lpfc_fabric_login_reqd(phba, cmdiocb, rspiocb)) { 10703 lpfc_retry_pport_discovery(phba); 10704 goto out; 10705 } 10706 10707 /* Check for retry */ 10708 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) 10709 goto out; 10710 /* FDISC failed */ 10711 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10712 "0126 FDISC failed. (x%x/x%x)\n", 10713 irsp->ulpStatus, irsp->un.ulpWord[4]); 10714 goto fdisc_failed; 10715 } 10716 spin_lock_irq(shost->host_lock); 10717 vport->fc_flag &= ~FC_VPORT_CVL_RCVD; 10718 vport->fc_flag &= ~FC_VPORT_LOGO_RCVD; 10719 vport->fc_flag |= FC_FABRIC; 10720 if (vport->phba->fc_topology == LPFC_TOPOLOGY_LOOP) 10721 vport->fc_flag |= FC_PUBLIC_LOOP; 10722 spin_unlock_irq(shost->host_lock); 10723 10724 vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID; 10725 lpfc_vport_set_state(vport, FC_VPORT_ACTIVE); 10726 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 10727 if (!prsp) 10728 goto out; 10729 sp = prsp->virt + sizeof(uint32_t); 10730 fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp); 10731 memcpy(&vport->fabric_portname, &sp->portName, 10732 sizeof(struct lpfc_name)); 10733 memcpy(&vport->fabric_nodename, &sp->nodeName, 10734 sizeof(struct lpfc_name)); 10735 if (fabric_param_changed && 10736 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 10737 /* If our NportID changed, we need to ensure all 10738 * remaining NPORTs get unreg_login'ed so we can 10739 * issue unreg_vpi. 10740 */ 10741 list_for_each_entry_safe(np, next_np, 10742 &vport->fc_nodes, nlp_listp) { 10743 if ((np->nlp_state != NLP_STE_NPR_NODE) || 10744 !(np->nlp_flag & NLP_NPR_ADISC)) 10745 continue; 10746 spin_lock_irq(&ndlp->lock); 10747 np->nlp_flag &= ~NLP_NPR_ADISC; 10748 spin_unlock_irq(&ndlp->lock); 10749 lpfc_unreg_rpi(vport, np); 10750 } 10751 lpfc_cleanup_pending_mbox(vport); 10752 10753 if (phba->sli_rev == LPFC_SLI_REV4) 10754 lpfc_sli4_unreg_all_rpis(vport); 10755 10756 lpfc_mbx_unreg_vpi(vport); 10757 spin_lock_irq(shost->host_lock); 10758 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 10759 if (phba->sli_rev == LPFC_SLI_REV4) 10760 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 10761 else 10762 vport->fc_flag |= FC_LOGO_RCVD_DID_CHNG; 10763 spin_unlock_irq(shost->host_lock); 10764 } else if ((phba->sli_rev == LPFC_SLI_REV4) && 10765 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 10766 /* 10767 * Driver needs to re-reg VPI in order for f/w 10768 * to update the MAC address. 10769 */ 10770 lpfc_register_new_vport(phba, vport, ndlp); 10771 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 10772 goto out; 10773 } 10774 10775 if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI) 10776 lpfc_issue_init_vpi(vport); 10777 else if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) 10778 lpfc_register_new_vport(phba, vport, ndlp); 10779 else 10780 lpfc_do_scr_ns_plogi(phba, vport); 10781 10782 /* The FDISC completed successfully. Move the fabric ndlp to 10783 * UNMAPPED state and register with the transport. 10784 */ 10785 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 10786 goto out; 10787 10788 fdisc_failed: 10789 if (vport->fc_vport && 10790 (vport->fc_vport->vport_state != FC_VPORT_NO_FABRIC_RSCS)) 10791 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 10792 /* Cancel discovery timer */ 10793 lpfc_can_disctmo(vport); 10794 out: 10795 lpfc_els_free_iocb(phba, cmdiocb); 10796 lpfc_nlp_put(ndlp); 10797 } 10798 10799 /** 10800 * lpfc_issue_els_fdisc - Issue a fdisc iocb command 10801 * @vport: pointer to a virtual N_Port data structure. 10802 * @ndlp: pointer to a node-list data structure. 10803 * @retry: number of retries to the command IOCB. 10804 * 10805 * This routine prepares and issues a Fabric Discover (FDISC) IOCB to 10806 * a remote node (@ndlp) off a @vport. It uses the lpfc_issue_fabric_iocb() 10807 * routine to issue the IOCB, which makes sure only one outstanding fabric 10808 * IOCB will be sent off HBA at any given time. 10809 * 10810 * Note that the ndlp reference count will be incremented by 1 for holding the 10811 * ndlp and the reference to ndlp will be stored into the context1 field of 10812 * the IOCB for the completion callback function to the FDISC ELS command. 10813 * 10814 * Return code 10815 * 0 - Successfully issued fdisc iocb command 10816 * 1 - Failed to issue fdisc iocb command 10817 **/ 10818 static int 10819 lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 10820 uint8_t retry) 10821 { 10822 struct lpfc_hba *phba = vport->phba; 10823 IOCB_t *icmd; 10824 struct lpfc_iocbq *elsiocb; 10825 struct serv_parm *sp; 10826 uint8_t *pcmd; 10827 uint16_t cmdsize; 10828 int did = ndlp->nlp_DID; 10829 int rc; 10830 10831 vport->port_state = LPFC_FDISC; 10832 vport->fc_myDID = 0; 10833 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 10834 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did, 10835 ELS_CMD_FDISC); 10836 if (!elsiocb) { 10837 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 10838 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10839 "0255 Issue FDISC: no IOCB\n"); 10840 return 1; 10841 } 10842 10843 icmd = &elsiocb->iocb; 10844 icmd->un.elsreq64.myID = 0; 10845 icmd->un.elsreq64.fl = 1; 10846 10847 /* 10848 * SLI3 ports require a different context type value than SLI4. 10849 * Catch SLI3 ports here and override the prep. 10850 */ 10851 if (phba->sli_rev == LPFC_SLI_REV3) { 10852 icmd->ulpCt_h = 1; 10853 icmd->ulpCt_l = 0; 10854 } 10855 10856 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 10857 *((uint32_t *) (pcmd)) = ELS_CMD_FDISC; 10858 pcmd += sizeof(uint32_t); /* CSP Word 1 */ 10859 memcpy(pcmd, &vport->phba->pport->fc_sparam, sizeof(struct serv_parm)); 10860 sp = (struct serv_parm *) pcmd; 10861 /* Setup CSPs accordingly for Fabric */ 10862 sp->cmn.e_d_tov = 0; 10863 sp->cmn.w2.r_a_tov = 0; 10864 sp->cmn.virtual_fabric_support = 0; 10865 sp->cls1.classValid = 0; 10866 sp->cls2.seqDelivery = 1; 10867 sp->cls3.seqDelivery = 1; 10868 10869 pcmd += sizeof(uint32_t); /* CSP Word 2 */ 10870 pcmd += sizeof(uint32_t); /* CSP Word 3 */ 10871 pcmd += sizeof(uint32_t); /* CSP Word 4 */ 10872 pcmd += sizeof(uint32_t); /* Port Name */ 10873 memcpy(pcmd, &vport->fc_portname, 8); 10874 pcmd += sizeof(uint32_t); /* Node Name */ 10875 pcmd += sizeof(uint32_t); /* Node Name */ 10876 memcpy(pcmd, &vport->fc_nodename, 8); 10877 sp->cmn.valid_vendor_ver_level = 0; 10878 memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion)); 10879 lpfc_set_disctmo(vport); 10880 10881 phba->fc_stat.elsXmitFDISC++; 10882 elsiocb->iocb_cmpl = lpfc_cmpl_els_fdisc; 10883 10884 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 10885 "Issue FDISC: did:x%x", 10886 did, 0, 0); 10887 10888 elsiocb->context1 = lpfc_nlp_get(ndlp); 10889 if (!elsiocb->context1) { 10890 lpfc_els_free_iocb(phba, elsiocb); 10891 goto err_out; 10892 } 10893 10894 rc = lpfc_issue_fabric_iocb(phba, elsiocb); 10895 if (rc == IOCB_ERROR) { 10896 lpfc_els_free_iocb(phba, elsiocb); 10897 lpfc_nlp_put(ndlp); 10898 goto err_out; 10899 } 10900 10901 lpfc_vport_set_state(vport, FC_VPORT_INITIALIZING); 10902 return 0; 10903 10904 err_out: 10905 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 10906 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 10907 "0256 Issue FDISC: Cannot send IOCB\n"); 10908 return 1; 10909 } 10910 10911 /** 10912 * lpfc_cmpl_els_npiv_logo - Completion function with vport logo 10913 * @phba: pointer to lpfc hba data structure. 10914 * @cmdiocb: pointer to lpfc command iocb data structure. 10915 * @rspiocb: pointer to lpfc response iocb data structure. 10916 * 10917 * This routine is the completion callback function to the issuing of a LOGO 10918 * ELS command off a vport. It frees the command IOCB and then decrement the 10919 * reference count held on ndlp for this completion function, indicating that 10920 * the reference to the ndlp is no long needed. Note that the 10921 * lpfc_els_free_iocb() routine decrements the ndlp reference held for this 10922 * callback function and an additional explicit ndlp reference decrementation 10923 * will trigger the actual release of the ndlp. 10924 **/ 10925 static void 10926 lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 10927 struct lpfc_iocbq *rspiocb) 10928 { 10929 struct lpfc_vport *vport = cmdiocb->vport; 10930 IOCB_t *irsp; 10931 struct lpfc_nodelist *ndlp; 10932 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 10933 10934 ndlp = (struct lpfc_nodelist *)cmdiocb->context1; 10935 irsp = &rspiocb->iocb; 10936 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 10937 "LOGO npiv cmpl: status:x%x/x%x did:x%x", 10938 irsp->ulpStatus, irsp->un.ulpWord[4], irsp->un.rcvels.remoteID); 10939 10940 /* NPIV LOGO completes to NPort <nlp_DID> */ 10941 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 10942 "2928 NPIV LOGO completes to NPort x%x " 10943 "Data: x%x x%x x%x x%x x%x x%x x%x\n", 10944 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4], 10945 irsp->ulpTimeout, vport->num_disc_nodes, 10946 kref_read(&ndlp->kref), ndlp->nlp_flag, 10947 ndlp->fc4_xpt_flags); 10948 10949 if (irsp->ulpStatus == IOSTAT_SUCCESS) { 10950 spin_lock_irq(shost->host_lock); 10951 vport->fc_flag &= ~FC_NDISC_ACTIVE; 10952 vport->fc_flag &= ~FC_FABRIC; 10953 spin_unlock_irq(shost->host_lock); 10954 lpfc_can_disctmo(vport); 10955 } 10956 10957 /* Safe to release resources now. */ 10958 lpfc_els_free_iocb(phba, cmdiocb); 10959 lpfc_nlp_put(ndlp); 10960 vport->unreg_vpi_cmpl = VPORT_ERROR; 10961 } 10962 10963 /** 10964 * lpfc_issue_els_npiv_logo - Issue a logo off a vport 10965 * @vport: pointer to a virtual N_Port data structure. 10966 * @ndlp: pointer to a node-list data structure. 10967 * 10968 * This routine issues a LOGO ELS command to an @ndlp off a @vport. 10969 * 10970 * Note that the ndlp reference count will be incremented by 1 for holding the 10971 * ndlp and the reference to ndlp will be stored into the context1 field of 10972 * the IOCB for the completion callback function to the LOGO ELS command. 10973 * 10974 * Return codes 10975 * 0 - Successfully issued logo off the @vport 10976 * 1 - Failed to issue logo off the @vport 10977 **/ 10978 int 10979 lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 10980 { 10981 int rc = 0; 10982 struct lpfc_hba *phba = vport->phba; 10983 struct lpfc_iocbq *elsiocb; 10984 uint8_t *pcmd; 10985 uint16_t cmdsize; 10986 10987 cmdsize = 2 * sizeof(uint32_t) + sizeof(struct lpfc_name); 10988 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, ndlp->nlp_DID, 10989 ELS_CMD_LOGO); 10990 if (!elsiocb) 10991 return 1; 10992 10993 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 10994 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO; 10995 pcmd += sizeof(uint32_t); 10996 10997 /* Fill in LOGO payload */ 10998 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID); 10999 pcmd += sizeof(uint32_t); 11000 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name)); 11001 11002 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 11003 "Issue LOGO npiv did:x%x flg:x%x", 11004 ndlp->nlp_DID, ndlp->nlp_flag, 0); 11005 11006 elsiocb->iocb_cmpl = lpfc_cmpl_els_npiv_logo; 11007 spin_lock_irq(&ndlp->lock); 11008 ndlp->nlp_flag |= NLP_LOGO_SND; 11009 spin_unlock_irq(&ndlp->lock); 11010 elsiocb->context1 = lpfc_nlp_get(ndlp); 11011 if (!elsiocb->context1) { 11012 lpfc_els_free_iocb(phba, elsiocb); 11013 goto err; 11014 } 11015 11016 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 11017 if (rc == IOCB_ERROR) { 11018 lpfc_els_free_iocb(phba, elsiocb); 11019 lpfc_nlp_put(ndlp); 11020 goto err; 11021 } 11022 return 0; 11023 11024 err: 11025 spin_lock_irq(&ndlp->lock); 11026 ndlp->nlp_flag &= ~NLP_LOGO_SND; 11027 spin_unlock_irq(&ndlp->lock); 11028 return 1; 11029 } 11030 11031 /** 11032 * lpfc_fabric_block_timeout - Handler function to the fabric block timer 11033 * @t: timer context used to obtain the lpfc hba. 11034 * 11035 * This routine is invoked by the fabric iocb block timer after 11036 * timeout. It posts the fabric iocb block timeout event by setting the 11037 * WORKER_FABRIC_BLOCK_TMO bit to work port event bitmap and then invokes 11038 * lpfc_worker_wake_up() routine to wake up the worker thread. It is for 11039 * the worker thread to invoke the lpfc_unblock_fabric_iocbs() on the 11040 * posted event WORKER_FABRIC_BLOCK_TMO. 11041 **/ 11042 void 11043 lpfc_fabric_block_timeout(struct timer_list *t) 11044 { 11045 struct lpfc_hba *phba = from_timer(phba, t, fabric_block_timer); 11046 unsigned long iflags; 11047 uint32_t tmo_posted; 11048 11049 spin_lock_irqsave(&phba->pport->work_port_lock, iflags); 11050 tmo_posted = phba->pport->work_port_events & WORKER_FABRIC_BLOCK_TMO; 11051 if (!tmo_posted) 11052 phba->pport->work_port_events |= WORKER_FABRIC_BLOCK_TMO; 11053 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags); 11054 11055 if (!tmo_posted) 11056 lpfc_worker_wake_up(phba); 11057 return; 11058 } 11059 11060 /** 11061 * lpfc_resume_fabric_iocbs - Issue a fabric iocb from driver internal list 11062 * @phba: pointer to lpfc hba data structure. 11063 * 11064 * This routine issues one fabric iocb from the driver internal list to 11065 * the HBA. It first checks whether it's ready to issue one fabric iocb to 11066 * the HBA (whether there is no outstanding fabric iocb). If so, it shall 11067 * remove one pending fabric iocb from the driver internal list and invokes 11068 * lpfc_sli_issue_iocb() routine to send the fabric iocb to the HBA. 11069 **/ 11070 static void 11071 lpfc_resume_fabric_iocbs(struct lpfc_hba *phba) 11072 { 11073 struct lpfc_iocbq *iocb; 11074 unsigned long iflags; 11075 int ret; 11076 IOCB_t *cmd; 11077 11078 repeat: 11079 iocb = NULL; 11080 spin_lock_irqsave(&phba->hbalock, iflags); 11081 /* Post any pending iocb to the SLI layer */ 11082 if (atomic_read(&phba->fabric_iocb_count) == 0) { 11083 list_remove_head(&phba->fabric_iocb_list, iocb, typeof(*iocb), 11084 list); 11085 if (iocb) 11086 /* Increment fabric iocb count to hold the position */ 11087 atomic_inc(&phba->fabric_iocb_count); 11088 } 11089 spin_unlock_irqrestore(&phba->hbalock, iflags); 11090 if (iocb) { 11091 iocb->fabric_iocb_cmpl = iocb->iocb_cmpl; 11092 iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb; 11093 iocb->iocb_flag |= LPFC_IO_FABRIC; 11094 11095 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD, 11096 "Fabric sched1: ste:x%x", 11097 iocb->vport->port_state, 0, 0); 11098 11099 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0); 11100 11101 if (ret == IOCB_ERROR) { 11102 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl; 11103 iocb->fabric_iocb_cmpl = NULL; 11104 iocb->iocb_flag &= ~LPFC_IO_FABRIC; 11105 cmd = &iocb->iocb; 11106 cmd->ulpStatus = IOSTAT_LOCAL_REJECT; 11107 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED; 11108 iocb->iocb_cmpl(phba, iocb, iocb); 11109 11110 atomic_dec(&phba->fabric_iocb_count); 11111 goto repeat; 11112 } 11113 } 11114 } 11115 11116 /** 11117 * lpfc_unblock_fabric_iocbs - Unblock issuing fabric iocb command 11118 * @phba: pointer to lpfc hba data structure. 11119 * 11120 * This routine unblocks the issuing fabric iocb command. The function 11121 * will clear the fabric iocb block bit and then invoke the routine 11122 * lpfc_resume_fabric_iocbs() to issue one of the pending fabric iocb 11123 * from the driver internal fabric iocb list. 11124 **/ 11125 void 11126 lpfc_unblock_fabric_iocbs(struct lpfc_hba *phba) 11127 { 11128 clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 11129 11130 lpfc_resume_fabric_iocbs(phba); 11131 return; 11132 } 11133 11134 /** 11135 * lpfc_block_fabric_iocbs - Block issuing fabric iocb command 11136 * @phba: pointer to lpfc hba data structure. 11137 * 11138 * This routine blocks the issuing fabric iocb for a specified amount of 11139 * time (currently 100 ms). This is done by set the fabric iocb block bit 11140 * and set up a timeout timer for 100ms. When the block bit is set, no more 11141 * fabric iocb will be issued out of the HBA. 11142 **/ 11143 static void 11144 lpfc_block_fabric_iocbs(struct lpfc_hba *phba) 11145 { 11146 int blocked; 11147 11148 blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 11149 /* Start a timer to unblock fabric iocbs after 100ms */ 11150 if (!blocked) 11151 mod_timer(&phba->fabric_block_timer, 11152 jiffies + msecs_to_jiffies(100)); 11153 11154 return; 11155 } 11156 11157 /** 11158 * lpfc_cmpl_fabric_iocb - Completion callback function for fabric iocb 11159 * @phba: pointer to lpfc hba data structure. 11160 * @cmdiocb: pointer to lpfc command iocb data structure. 11161 * @rspiocb: pointer to lpfc response iocb data structure. 11162 * 11163 * This routine is the callback function that is put to the fabric iocb's 11164 * callback function pointer (iocb->iocb_cmpl). The original iocb's callback 11165 * function pointer has been stored in iocb->fabric_iocb_cmpl. This callback 11166 * function first restores and invokes the original iocb's callback function 11167 * and then invokes the lpfc_resume_fabric_iocbs() routine to issue the next 11168 * fabric bound iocb from the driver internal fabric iocb list onto the wire. 11169 **/ 11170 static void 11171 lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 11172 struct lpfc_iocbq *rspiocb) 11173 { 11174 struct ls_rjt stat; 11175 11176 BUG_ON((cmdiocb->iocb_flag & LPFC_IO_FABRIC) != LPFC_IO_FABRIC); 11177 11178 switch (rspiocb->iocb.ulpStatus) { 11179 case IOSTAT_NPORT_RJT: 11180 case IOSTAT_FABRIC_RJT: 11181 if (rspiocb->iocb.un.ulpWord[4] & RJT_UNAVAIL_TEMP) { 11182 lpfc_block_fabric_iocbs(phba); 11183 } 11184 break; 11185 11186 case IOSTAT_NPORT_BSY: 11187 case IOSTAT_FABRIC_BSY: 11188 lpfc_block_fabric_iocbs(phba); 11189 break; 11190 11191 case IOSTAT_LS_RJT: 11192 stat.un.lsRjtError = 11193 be32_to_cpu(rspiocb->iocb.un.ulpWord[4]); 11194 if ((stat.un.b.lsRjtRsnCode == LSRJT_UNABLE_TPC) || 11195 (stat.un.b.lsRjtRsnCode == LSRJT_LOGICAL_BSY)) 11196 lpfc_block_fabric_iocbs(phba); 11197 break; 11198 } 11199 11200 BUG_ON(atomic_read(&phba->fabric_iocb_count) == 0); 11201 11202 cmdiocb->iocb_cmpl = cmdiocb->fabric_iocb_cmpl; 11203 cmdiocb->fabric_iocb_cmpl = NULL; 11204 cmdiocb->iocb_flag &= ~LPFC_IO_FABRIC; 11205 cmdiocb->iocb_cmpl(phba, cmdiocb, rspiocb); 11206 11207 atomic_dec(&phba->fabric_iocb_count); 11208 if (!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags)) { 11209 /* Post any pending iocbs to HBA */ 11210 lpfc_resume_fabric_iocbs(phba); 11211 } 11212 } 11213 11214 /** 11215 * lpfc_issue_fabric_iocb - Issue a fabric iocb command 11216 * @phba: pointer to lpfc hba data structure. 11217 * @iocb: pointer to lpfc command iocb data structure. 11218 * 11219 * This routine is used as the top-level API for issuing a fabric iocb command 11220 * such as FLOGI and FDISC. To accommodate certain switch fabric, this driver 11221 * function makes sure that only one fabric bound iocb will be outstanding at 11222 * any given time. As such, this function will first check to see whether there 11223 * is already an outstanding fabric iocb on the wire. If so, it will put the 11224 * newly issued iocb onto the driver internal fabric iocb list, waiting to be 11225 * issued later. Otherwise, it will issue the iocb on the wire and update the 11226 * fabric iocb count it indicate that there is one fabric iocb on the wire. 11227 * 11228 * Note, this implementation has a potential sending out fabric IOCBs out of 11229 * order. The problem is caused by the construction of the "ready" boolen does 11230 * not include the condition that the internal fabric IOCB list is empty. As 11231 * such, it is possible a fabric IOCB issued by this routine might be "jump" 11232 * ahead of the fabric IOCBs in the internal list. 11233 * 11234 * Return code 11235 * IOCB_SUCCESS - either fabric iocb put on the list or issued successfully 11236 * IOCB_ERROR - failed to issue fabric iocb 11237 **/ 11238 static int 11239 lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb) 11240 { 11241 unsigned long iflags; 11242 int ready; 11243 int ret; 11244 11245 BUG_ON(atomic_read(&phba->fabric_iocb_count) > 1); 11246 11247 spin_lock_irqsave(&phba->hbalock, iflags); 11248 ready = atomic_read(&phba->fabric_iocb_count) == 0 && 11249 !test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 11250 11251 if (ready) 11252 /* Increment fabric iocb count to hold the position */ 11253 atomic_inc(&phba->fabric_iocb_count); 11254 spin_unlock_irqrestore(&phba->hbalock, iflags); 11255 if (ready) { 11256 iocb->fabric_iocb_cmpl = iocb->iocb_cmpl; 11257 iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb; 11258 iocb->iocb_flag |= LPFC_IO_FABRIC; 11259 11260 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD, 11261 "Fabric sched2: ste:x%x", 11262 iocb->vport->port_state, 0, 0); 11263 11264 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0); 11265 11266 if (ret == IOCB_ERROR) { 11267 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl; 11268 iocb->fabric_iocb_cmpl = NULL; 11269 iocb->iocb_flag &= ~LPFC_IO_FABRIC; 11270 atomic_dec(&phba->fabric_iocb_count); 11271 } 11272 } else { 11273 spin_lock_irqsave(&phba->hbalock, iflags); 11274 list_add_tail(&iocb->list, &phba->fabric_iocb_list); 11275 spin_unlock_irqrestore(&phba->hbalock, iflags); 11276 ret = IOCB_SUCCESS; 11277 } 11278 return ret; 11279 } 11280 11281 /** 11282 * lpfc_fabric_abort_vport - Abort a vport's iocbs from driver fabric iocb list 11283 * @vport: pointer to a virtual N_Port data structure. 11284 * 11285 * This routine aborts all the IOCBs associated with a @vport from the 11286 * driver internal fabric IOCB list. The list contains fabric IOCBs to be 11287 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB 11288 * list, removes each IOCB associated with the @vport off the list, set the 11289 * status field to IOSTAT_LOCAL_REJECT, and invokes the callback function 11290 * associated with the IOCB. 11291 **/ 11292 static void lpfc_fabric_abort_vport(struct lpfc_vport *vport) 11293 { 11294 LIST_HEAD(completions); 11295 struct lpfc_hba *phba = vport->phba; 11296 struct lpfc_iocbq *tmp_iocb, *piocb; 11297 11298 spin_lock_irq(&phba->hbalock); 11299 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list, 11300 list) { 11301 11302 if (piocb->vport != vport) 11303 continue; 11304 11305 list_move_tail(&piocb->list, &completions); 11306 } 11307 spin_unlock_irq(&phba->hbalock); 11308 11309 /* Cancel all the IOCBs from the completions list */ 11310 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 11311 IOERR_SLI_ABORTED); 11312 } 11313 11314 /** 11315 * lpfc_fabric_abort_nport - Abort a ndlp's iocbs from driver fabric iocb list 11316 * @ndlp: pointer to a node-list data structure. 11317 * 11318 * This routine aborts all the IOCBs associated with an @ndlp from the 11319 * driver internal fabric IOCB list. The list contains fabric IOCBs to be 11320 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB 11321 * list, removes each IOCB associated with the @ndlp off the list, set the 11322 * status field to IOSTAT_LOCAL_REJECT, and invokes the callback function 11323 * associated with the IOCB. 11324 **/ 11325 void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp) 11326 { 11327 LIST_HEAD(completions); 11328 struct lpfc_hba *phba = ndlp->phba; 11329 struct lpfc_iocbq *tmp_iocb, *piocb; 11330 struct lpfc_sli_ring *pring; 11331 11332 pring = lpfc_phba_elsring(phba); 11333 11334 if (unlikely(!pring)) 11335 return; 11336 11337 spin_lock_irq(&phba->hbalock); 11338 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list, 11339 list) { 11340 if ((lpfc_check_sli_ndlp(phba, pring, piocb, ndlp))) { 11341 11342 list_move_tail(&piocb->list, &completions); 11343 } 11344 } 11345 spin_unlock_irq(&phba->hbalock); 11346 11347 /* Cancel all the IOCBs from the completions list */ 11348 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 11349 IOERR_SLI_ABORTED); 11350 } 11351 11352 /** 11353 * lpfc_fabric_abort_hba - Abort all iocbs on driver fabric iocb list 11354 * @phba: pointer to lpfc hba data structure. 11355 * 11356 * This routine aborts all the IOCBs currently on the driver internal 11357 * fabric IOCB list. The list contains fabric IOCBs to be issued to the ELS 11358 * IOCB ring. This function takes the entire IOCB list off the fabric IOCB 11359 * list, removes IOCBs off the list, set the status field to 11360 * IOSTAT_LOCAL_REJECT, and invokes the callback function associated with 11361 * the IOCB. 11362 **/ 11363 void lpfc_fabric_abort_hba(struct lpfc_hba *phba) 11364 { 11365 LIST_HEAD(completions); 11366 11367 spin_lock_irq(&phba->hbalock); 11368 list_splice_init(&phba->fabric_iocb_list, &completions); 11369 spin_unlock_irq(&phba->hbalock); 11370 11371 /* Cancel all the IOCBs from the completions list */ 11372 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 11373 IOERR_SLI_ABORTED); 11374 } 11375 11376 /** 11377 * lpfc_sli4_vport_delete_els_xri_aborted -Remove all ndlp references for vport 11378 * @vport: pointer to lpfc vport data structure. 11379 * 11380 * This routine is invoked by the vport cleanup for deletions and the cleanup 11381 * for an ndlp on removal. 11382 **/ 11383 void 11384 lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *vport) 11385 { 11386 struct lpfc_hba *phba = vport->phba; 11387 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 11388 unsigned long iflag = 0; 11389 11390 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, iflag); 11391 list_for_each_entry_safe(sglq_entry, sglq_next, 11392 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) { 11393 if (sglq_entry->ndlp && sglq_entry->ndlp->vport == vport) { 11394 lpfc_nlp_put(sglq_entry->ndlp); 11395 sglq_entry->ndlp = NULL; 11396 } 11397 } 11398 spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, iflag); 11399 return; 11400 } 11401 11402 /** 11403 * lpfc_sli4_els_xri_aborted - Slow-path process of els xri abort 11404 * @phba: pointer to lpfc hba data structure. 11405 * @axri: pointer to the els xri abort wcqe structure. 11406 * 11407 * This routine is invoked by the worker thread to process a SLI4 slow-path 11408 * ELS aborted xri. 11409 **/ 11410 void 11411 lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba, 11412 struct sli4_wcqe_xri_aborted *axri) 11413 { 11414 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); 11415 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri); 11416 uint16_t lxri = 0; 11417 11418 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 11419 unsigned long iflag = 0; 11420 struct lpfc_nodelist *ndlp; 11421 struct lpfc_sli_ring *pring; 11422 11423 pring = lpfc_phba_elsring(phba); 11424 11425 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, iflag); 11426 list_for_each_entry_safe(sglq_entry, sglq_next, 11427 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) { 11428 if (sglq_entry->sli4_xritag == xri) { 11429 list_del(&sglq_entry->list); 11430 ndlp = sglq_entry->ndlp; 11431 sglq_entry->ndlp = NULL; 11432 list_add_tail(&sglq_entry->list, 11433 &phba->sli4_hba.lpfc_els_sgl_list); 11434 sglq_entry->state = SGL_FREED; 11435 spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, 11436 iflag); 11437 11438 if (ndlp) { 11439 lpfc_set_rrq_active(phba, ndlp, 11440 sglq_entry->sli4_lxritag, 11441 rxid, 1); 11442 lpfc_nlp_put(ndlp); 11443 } 11444 11445 /* Check if TXQ queue needs to be serviced */ 11446 if (pring && !list_empty(&pring->txq)) 11447 lpfc_worker_wake_up(phba); 11448 return; 11449 } 11450 } 11451 spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, iflag); 11452 lxri = lpfc_sli4_xri_inrange(phba, xri); 11453 if (lxri == NO_XRI) 11454 return; 11455 11456 spin_lock_irqsave(&phba->hbalock, iflag); 11457 sglq_entry = __lpfc_get_active_sglq(phba, lxri); 11458 if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) { 11459 spin_unlock_irqrestore(&phba->hbalock, iflag); 11460 return; 11461 } 11462 sglq_entry->state = SGL_XRI_ABORTED; 11463 spin_unlock_irqrestore(&phba->hbalock, iflag); 11464 return; 11465 } 11466 11467 /* lpfc_sli_abts_recover_port - Recover a port that failed a BLS_ABORT req. 11468 * @vport: pointer to virtual port object. 11469 * @ndlp: nodelist pointer for the impacted node. 11470 * 11471 * The driver calls this routine in response to an SLI4 XRI ABORT CQE 11472 * or an SLI3 ASYNC_STATUS_CN event from the port. For either event, 11473 * the driver is required to send a LOGO to the remote node before it 11474 * attempts to recover its login to the remote node. 11475 */ 11476 void 11477 lpfc_sli_abts_recover_port(struct lpfc_vport *vport, 11478 struct lpfc_nodelist *ndlp) 11479 { 11480 struct Scsi_Host *shost; 11481 struct lpfc_hba *phba; 11482 unsigned long flags = 0; 11483 11484 shost = lpfc_shost_from_vport(vport); 11485 phba = vport->phba; 11486 if (ndlp->nlp_state != NLP_STE_MAPPED_NODE) { 11487 lpfc_printf_log(phba, KERN_INFO, 11488 LOG_SLI, "3093 No rport recovery needed. " 11489 "rport in state 0x%x\n", ndlp->nlp_state); 11490 return; 11491 } 11492 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 11493 "3094 Start rport recovery on shost id 0x%x " 11494 "fc_id 0x%06x vpi 0x%x rpi 0x%x state 0x%x " 11495 "flags 0x%x\n", 11496 shost->host_no, ndlp->nlp_DID, 11497 vport->vpi, ndlp->nlp_rpi, ndlp->nlp_state, 11498 ndlp->nlp_flag); 11499 /* 11500 * The rport is not responding. Remove the FCP-2 flag to prevent 11501 * an ADISC in the follow-up recovery code. 11502 */ 11503 spin_lock_irqsave(&ndlp->lock, flags); 11504 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; 11505 ndlp->nlp_flag |= NLP_ISSUE_LOGO; 11506 spin_unlock_irqrestore(&ndlp->lock, flags); 11507 lpfc_unreg_rpi(vport, ndlp); 11508 } 11509 11510 static void lpfc_init_cs_ctl_bitmap(struct lpfc_vport *vport) 11511 { 11512 bitmap_zero(vport->vmid_priority_range, LPFC_VMID_MAX_PRIORITY_RANGE); 11513 } 11514 11515 static void 11516 lpfc_vmid_set_cs_ctl_range(struct lpfc_vport *vport, u32 min, u32 max) 11517 { 11518 u32 i; 11519 11520 if ((min > max) || (max > LPFC_VMID_MAX_PRIORITY_RANGE)) 11521 return; 11522 11523 for (i = min; i <= max; i++) 11524 set_bit(i, vport->vmid_priority_range); 11525 } 11526 11527 static void lpfc_vmid_put_cs_ctl(struct lpfc_vport *vport, u32 ctcl_vmid) 11528 { 11529 set_bit(ctcl_vmid, vport->vmid_priority_range); 11530 } 11531 11532 u32 lpfc_vmid_get_cs_ctl(struct lpfc_vport *vport) 11533 { 11534 u32 i; 11535 11536 i = find_first_bit(vport->vmid_priority_range, 11537 LPFC_VMID_MAX_PRIORITY_RANGE); 11538 11539 if (i == LPFC_VMID_MAX_PRIORITY_RANGE) 11540 return 0; 11541 11542 clear_bit(i, vport->vmid_priority_range); 11543 return i; 11544 } 11545 11546 #define MAX_PRIORITY_DESC 255 11547 11548 static void 11549 lpfc_cmpl_els_qfpa(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 11550 struct lpfc_iocbq *rspiocb) 11551 { 11552 struct lpfc_vport *vport = cmdiocb->vport; 11553 struct priority_range_desc *desc; 11554 struct lpfc_dmabuf *prsp = NULL; 11555 struct lpfc_vmid_priority_range *vmid_range = NULL; 11556 u32 *data; 11557 struct lpfc_dmabuf *dmabuf = cmdiocb->context2; 11558 IOCB_t *irsp = &rspiocb->iocb; 11559 u8 *pcmd, max_desc; 11560 u32 len, i; 11561 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)cmdiocb->context1; 11562 11563 prsp = list_get_first(&dmabuf->list, struct lpfc_dmabuf, list); 11564 if (!prsp) 11565 goto out; 11566 11567 pcmd = prsp->virt; 11568 data = (u32 *)pcmd; 11569 if (data[0] == ELS_CMD_LS_RJT) { 11570 lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI, 11571 "3277 QFPA LS_RJT x%x x%x\n", 11572 data[0], data[1]); 11573 goto out; 11574 } 11575 if (irsp->ulpStatus) { 11576 lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI, 11577 "6529 QFPA failed with status x%x x%x\n", 11578 irsp->ulpStatus, irsp->un.ulpWord[4]); 11579 goto out; 11580 } 11581 11582 if (!vport->qfpa_res) { 11583 max_desc = FCELSSIZE / sizeof(*vport->qfpa_res); 11584 vport->qfpa_res = kcalloc(max_desc, sizeof(*vport->qfpa_res), 11585 GFP_KERNEL); 11586 if (!vport->qfpa_res) 11587 goto out; 11588 } 11589 11590 len = *((u32 *)(pcmd + 4)); 11591 len = be32_to_cpu(len); 11592 memcpy(vport->qfpa_res, pcmd, len + 8); 11593 len = len / LPFC_PRIORITY_RANGE_DESC_SIZE; 11594 11595 desc = (struct priority_range_desc *)(pcmd + 8); 11596 vmid_range = vport->vmid_priority.vmid_range; 11597 if (!vmid_range) { 11598 vmid_range = kcalloc(MAX_PRIORITY_DESC, sizeof(*vmid_range), 11599 GFP_KERNEL); 11600 if (!vmid_range) { 11601 kfree(vport->qfpa_res); 11602 goto out; 11603 } 11604 vport->vmid_priority.vmid_range = vmid_range; 11605 } 11606 vport->vmid_priority.num_descriptors = len; 11607 11608 for (i = 0; i < len; i++, vmid_range++, desc++) { 11609 lpfc_printf_vlog(vport, KERN_DEBUG, LOG_ELS, 11610 "6539 vmid values low=%d, high=%d, qos=%d, " 11611 "local ve id=%d\n", desc->lo_range, 11612 desc->hi_range, desc->qos_priority, 11613 desc->local_ve_id); 11614 11615 vmid_range->low = desc->lo_range << 1; 11616 if (desc->local_ve_id == QFPA_ODD_ONLY) 11617 vmid_range->low++; 11618 if (desc->qos_priority) 11619 vport->vmid_flag |= LPFC_VMID_QOS_ENABLED; 11620 vmid_range->qos = desc->qos_priority; 11621 11622 vmid_range->high = desc->hi_range << 1; 11623 if ((desc->local_ve_id == QFPA_ODD_ONLY) || 11624 (desc->local_ve_id == QFPA_EVEN_ODD)) 11625 vmid_range->high++; 11626 } 11627 lpfc_init_cs_ctl_bitmap(vport); 11628 for (i = 0; i < vport->vmid_priority.num_descriptors; i++) { 11629 lpfc_vmid_set_cs_ctl_range(vport, 11630 vport->vmid_priority.vmid_range[i].low, 11631 vport->vmid_priority.vmid_range[i].high); 11632 } 11633 11634 vport->vmid_flag |= LPFC_VMID_QFPA_CMPL; 11635 out: 11636 lpfc_els_free_iocb(phba, cmdiocb); 11637 lpfc_nlp_put(ndlp); 11638 } 11639 11640 int lpfc_issue_els_qfpa(struct lpfc_vport *vport) 11641 { 11642 struct lpfc_hba *phba = vport->phba; 11643 struct lpfc_nodelist *ndlp; 11644 struct lpfc_iocbq *elsiocb; 11645 u8 *pcmd; 11646 int ret; 11647 11648 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); 11649 if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) 11650 return -ENXIO; 11651 11652 elsiocb = lpfc_prep_els_iocb(vport, 1, LPFC_QFPA_SIZE, 2, ndlp, 11653 ndlp->nlp_DID, ELS_CMD_QFPA); 11654 if (!elsiocb) 11655 return -ENOMEM; 11656 11657 pcmd = (u8 *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt); 11658 11659 *((u32 *)(pcmd)) = ELS_CMD_QFPA; 11660 pcmd += 4; 11661 11662 elsiocb->iocb_cmpl = lpfc_cmpl_els_qfpa; 11663 11664 elsiocb->context1 = lpfc_nlp_get(ndlp); 11665 if (!elsiocb->context1) { 11666 lpfc_els_free_iocb(vport->phba, elsiocb); 11667 return -ENXIO; 11668 } 11669 11670 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 2); 11671 if (ret != IOCB_SUCCESS) { 11672 lpfc_els_free_iocb(phba, elsiocb); 11673 lpfc_nlp_put(ndlp); 11674 return -EIO; 11675 } 11676 vport->vmid_flag &= ~LPFC_VMID_QOS_ENABLED; 11677 return 0; 11678 } 11679 11680 int 11681 lpfc_vmid_uvem(struct lpfc_vport *vport, 11682 struct lpfc_vmid *vmid, bool instantiated) 11683 { 11684 struct lpfc_vem_id_desc *vem_id_desc; 11685 struct lpfc_nodelist *ndlp; 11686 struct lpfc_iocbq *elsiocb; 11687 struct instantiated_ve_desc *inst_desc; 11688 struct lpfc_vmid_context *vmid_context; 11689 u8 *pcmd; 11690 u32 *len; 11691 int ret = 0; 11692 11693 ndlp = lpfc_findnode_did(vport, Fabric_DID); 11694 if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) 11695 return -ENXIO; 11696 11697 vmid_context = kmalloc(sizeof(*vmid_context), GFP_KERNEL); 11698 if (!vmid_context) 11699 return -ENOMEM; 11700 elsiocb = lpfc_prep_els_iocb(vport, 1, LPFC_UVEM_SIZE, 2, 11701 ndlp, Fabric_DID, ELS_CMD_UVEM); 11702 if (!elsiocb) 11703 goto out; 11704 11705 lpfc_printf_vlog(vport, KERN_DEBUG, LOG_ELS, 11706 "3427 Host vmid %s %d\n", 11707 vmid->host_vmid, instantiated); 11708 vmid_context->vmp = vmid; 11709 vmid_context->nlp = ndlp; 11710 vmid_context->instantiated = instantiated; 11711 elsiocb->vmid_tag.vmid_context = vmid_context; 11712 pcmd = (u8 *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt); 11713 11714 if (uuid_is_null((uuid_t *)vport->lpfc_vmid_host_uuid)) 11715 memcpy(vport->lpfc_vmid_host_uuid, vmid->host_vmid, 11716 LPFC_COMPRESS_VMID_SIZE); 11717 11718 *((u32 *)(pcmd)) = ELS_CMD_UVEM; 11719 len = (u32 *)(pcmd + 4); 11720 *len = cpu_to_be32(LPFC_UVEM_SIZE - 8); 11721 11722 vem_id_desc = (struct lpfc_vem_id_desc *)(pcmd + 8); 11723 vem_id_desc->tag = be32_to_cpu(VEM_ID_DESC_TAG); 11724 vem_id_desc->length = be32_to_cpu(LPFC_UVEM_VEM_ID_DESC_SIZE); 11725 memcpy(vem_id_desc->vem_id, vport->lpfc_vmid_host_uuid, 11726 LPFC_COMPRESS_VMID_SIZE); 11727 11728 inst_desc = (struct instantiated_ve_desc *)(pcmd + 32); 11729 inst_desc->tag = be32_to_cpu(INSTANTIATED_VE_DESC_TAG); 11730 inst_desc->length = be32_to_cpu(LPFC_UVEM_VE_MAP_DESC_SIZE); 11731 memcpy(inst_desc->global_vem_id, vmid->host_vmid, 11732 LPFC_COMPRESS_VMID_SIZE); 11733 11734 bf_set(lpfc_instantiated_nport_id, inst_desc, vport->fc_myDID); 11735 bf_set(lpfc_instantiated_local_id, inst_desc, 11736 vmid->un.cs_ctl_vmid); 11737 if (instantiated) { 11738 inst_desc->tag = be32_to_cpu(INSTANTIATED_VE_DESC_TAG); 11739 } else { 11740 inst_desc->tag = be32_to_cpu(DEINSTANTIATED_VE_DESC_TAG); 11741 lpfc_vmid_put_cs_ctl(vport, vmid->un.cs_ctl_vmid); 11742 } 11743 inst_desc->word6 = cpu_to_be32(inst_desc->word6); 11744 11745 elsiocb->iocb_cmpl = lpfc_cmpl_els_uvem; 11746 11747 elsiocb->context1 = lpfc_nlp_get(ndlp); 11748 if (!elsiocb->context1) { 11749 lpfc_els_free_iocb(vport->phba, elsiocb); 11750 goto out; 11751 } 11752 11753 ret = lpfc_sli_issue_iocb(vport->phba, LPFC_ELS_RING, elsiocb, 0); 11754 if (ret != IOCB_SUCCESS) { 11755 lpfc_els_free_iocb(vport->phba, elsiocb); 11756 lpfc_nlp_put(ndlp); 11757 goto out; 11758 } 11759 11760 return 0; 11761 out: 11762 kfree(vmid_context); 11763 return -EIO; 11764 } 11765 11766 static void 11767 lpfc_cmpl_els_uvem(struct lpfc_hba *phba, struct lpfc_iocbq *icmdiocb, 11768 struct lpfc_iocbq *rspiocb) 11769 { 11770 struct lpfc_vport *vport = icmdiocb->vport; 11771 struct lpfc_dmabuf *prsp = NULL; 11772 struct lpfc_vmid_context *vmid_context = 11773 icmdiocb->vmid_tag.vmid_context; 11774 struct lpfc_nodelist *ndlp = icmdiocb->context1; 11775 u8 *pcmd; 11776 u32 *data; 11777 IOCB_t *irsp = &rspiocb->iocb; 11778 struct lpfc_dmabuf *dmabuf = icmdiocb->context2; 11779 struct lpfc_vmid *vmid; 11780 11781 vmid = vmid_context->vmp; 11782 if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) 11783 ndlp = NULL; 11784 11785 prsp = list_get_first(&dmabuf->list, struct lpfc_dmabuf, list); 11786 if (!prsp) 11787 goto out; 11788 pcmd = prsp->virt; 11789 data = (u32 *)pcmd; 11790 if (data[0] == ELS_CMD_LS_RJT) { 11791 lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI, 11792 "4532 UVEM LS_RJT %x %x\n", data[0], data[1]); 11793 goto out; 11794 } 11795 if (irsp->ulpStatus) { 11796 lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI, 11797 "4533 UVEM error status %x: %x\n", 11798 irsp->ulpStatus, irsp->un.ulpWord[4]); 11799 goto out; 11800 } 11801 spin_lock(&phba->hbalock); 11802 /* Set IN USE flag */ 11803 vport->vmid_flag |= LPFC_VMID_IN_USE; 11804 phba->pport->vmid_flag |= LPFC_VMID_IN_USE; 11805 spin_unlock(&phba->hbalock); 11806 11807 if (vmid_context->instantiated) { 11808 write_lock(&vport->vmid_lock); 11809 vmid->flag |= LPFC_VMID_REGISTERED; 11810 vmid->flag &= ~LPFC_VMID_REQ_REGISTER; 11811 write_unlock(&vport->vmid_lock); 11812 } 11813 11814 out: 11815 kfree(vmid_context); 11816 lpfc_els_free_iocb(phba, icmdiocb); 11817 lpfc_nlp_put(ndlp); 11818 } 11819