1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term * 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 * EMULEX and SLI are trademarks of Emulex. * 8 * www.broadcom.com * 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 10 * * 11 * This program is free software; you can redistribute it and/or * 12 * modify it under the terms of version 2 of the GNU General * 13 * Public License as published by the Free Software Foundation. * 14 * This program is distributed in the hope that it will be useful. * 15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 19 * TO BE LEGALLY INVALID. See the GNU General Public License for * 20 * more details, a copy of which can be found in the file COPYING * 21 * included with this package. * 22 *******************************************************************/ 23 /* See Fibre Channel protocol T11 FC-LS for details */ 24 #include <linux/blkdev.h> 25 #include <linux/pci.h> 26 #include <linux/slab.h> 27 #include <linux/interrupt.h> 28 29 #include <scsi/scsi.h> 30 #include <scsi/scsi_device.h> 31 #include <scsi/scsi_host.h> 32 #include <scsi/scsi_transport_fc.h> 33 #include <uapi/scsi/fc/fc_fs.h> 34 #include <uapi/scsi/fc/fc_els.h> 35 36 #include "lpfc_hw4.h" 37 #include "lpfc_hw.h" 38 #include "lpfc_sli.h" 39 #include "lpfc_sli4.h" 40 #include "lpfc_nl.h" 41 #include "lpfc_disc.h" 42 #include "lpfc_scsi.h" 43 #include "lpfc.h" 44 #include "lpfc_logmsg.h" 45 #include "lpfc_crtn.h" 46 #include "lpfc_vport.h" 47 #include "lpfc_debugfs.h" 48 49 static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *, 50 struct lpfc_iocbq *); 51 static void lpfc_cmpl_fabric_iocb(struct lpfc_hba *, struct lpfc_iocbq *, 52 struct lpfc_iocbq *); 53 static void lpfc_fabric_abort_vport(struct lpfc_vport *vport); 54 static int lpfc_issue_els_fdisc(struct lpfc_vport *vport, 55 struct lpfc_nodelist *ndlp, uint8_t retry); 56 static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba, 57 struct lpfc_iocbq *iocb); 58 59 static int lpfc_max_els_tries = 3; 60 61 /** 62 * lpfc_els_chk_latt - Check host link attention event for a vport 63 * @vport: pointer to a host virtual N_Port data structure. 64 * 65 * This routine checks whether there is an outstanding host link 66 * attention event during the discovery process with the @vport. It is done 67 * by reading the HBA's Host Attention (HA) register. If there is any host 68 * link attention events during this @vport's discovery process, the @vport 69 * shall be marked as FC_ABORT_DISCOVERY, a host link attention clear shall 70 * be issued if the link state is not already in host link cleared state, 71 * and a return code shall indicate whether the host link attention event 72 * had happened. 73 * 74 * Note that, if either the host link is in state LPFC_LINK_DOWN or @vport 75 * state in LPFC_VPORT_READY, the request for checking host link attention 76 * event will be ignored and a return code shall indicate no host link 77 * attention event had happened. 78 * 79 * Return codes 80 * 0 - no host link attention event happened 81 * 1 - host link attention event happened 82 **/ 83 int 84 lpfc_els_chk_latt(struct lpfc_vport *vport) 85 { 86 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 87 struct lpfc_hba *phba = vport->phba; 88 uint32_t ha_copy; 89 90 if (vport->port_state >= LPFC_VPORT_READY || 91 phba->link_state == LPFC_LINK_DOWN || 92 phba->sli_rev > LPFC_SLI_REV3) 93 return 0; 94 95 /* Read the HBA Host Attention Register */ 96 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 97 return 1; 98 99 if (!(ha_copy & HA_LATT)) 100 return 0; 101 102 /* Pending Link Event during Discovery */ 103 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 104 "0237 Pending Link Event during " 105 "Discovery: State x%x\n", 106 phba->pport->port_state); 107 108 /* CLEAR_LA should re-enable link attention events and 109 * we should then immediately take a LATT event. The 110 * LATT processing should call lpfc_linkdown() which 111 * will cleanup any left over in-progress discovery 112 * events. 113 */ 114 spin_lock_irq(shost->host_lock); 115 vport->fc_flag |= FC_ABORT_DISCOVERY; 116 spin_unlock_irq(shost->host_lock); 117 118 if (phba->link_state != LPFC_CLEAR_LA) 119 lpfc_issue_clear_la(phba, vport); 120 121 return 1; 122 } 123 124 /** 125 * lpfc_prep_els_iocb - Allocate and prepare a lpfc iocb data structure 126 * @vport: pointer to a host virtual N_Port data structure. 127 * @expectRsp: flag indicating whether response is expected. 128 * @cmdSize: size of the ELS command. 129 * @retry: number of retries to the command IOCB when it fails. 130 * @ndlp: pointer to a node-list data structure. 131 * @did: destination identifier. 132 * @elscmd: the ELS command code. 133 * 134 * This routine is used for allocating a lpfc-IOCB data structure from 135 * the driver lpfc-IOCB free-list and prepare the IOCB with the parameters 136 * passed into the routine for discovery state machine to issue an Extended 137 * Link Service (ELS) commands. It is a generic lpfc-IOCB allocation 138 * and preparation routine that is used by all the discovery state machine 139 * routines and the ELS command-specific fields will be later set up by 140 * the individual discovery machine routines after calling this routine 141 * allocating and preparing a generic IOCB data structure. It fills in the 142 * Buffer Descriptor Entries (BDEs), allocates buffers for both command 143 * payload and response payload (if expected). The reference count on the 144 * ndlp is incremented by 1 and the reference to the ndlp is put into 145 * context1 of the IOCB data structure for this IOCB to hold the ndlp 146 * reference for the command's callback function to access later. 147 * 148 * Return code 149 * Pointer to the newly allocated/prepared els iocb data structure 150 * NULL - when els iocb data structure allocation/preparation failed 151 **/ 152 struct lpfc_iocbq * 153 lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp, 154 uint16_t cmdSize, uint8_t retry, 155 struct lpfc_nodelist *ndlp, uint32_t did, 156 uint32_t elscmd) 157 { 158 struct lpfc_hba *phba = vport->phba; 159 struct lpfc_iocbq *elsiocb; 160 struct lpfc_dmabuf *pcmd, *prsp, *pbuflist; 161 struct ulp_bde64 *bpl; 162 IOCB_t *icmd; 163 164 165 if (!lpfc_is_link_up(phba)) 166 return NULL; 167 168 /* Allocate buffer for command iocb */ 169 elsiocb = lpfc_sli_get_iocbq(phba); 170 171 if (elsiocb == NULL) 172 return NULL; 173 174 /* 175 * If this command is for fabric controller and HBA running 176 * in FIP mode send FLOGI, FDISC and LOGO as FIP frames. 177 */ 178 if ((did == Fabric_DID) && 179 (phba->hba_flag & HBA_FIP_SUPPORT) && 180 ((elscmd == ELS_CMD_FLOGI) || 181 (elscmd == ELS_CMD_FDISC) || 182 (elscmd == ELS_CMD_LOGO))) 183 switch (elscmd) { 184 case ELS_CMD_FLOGI: 185 elsiocb->iocb_flag |= 186 ((LPFC_ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT) 187 & LPFC_FIP_ELS_ID_MASK); 188 break; 189 case ELS_CMD_FDISC: 190 elsiocb->iocb_flag |= 191 ((LPFC_ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT) 192 & LPFC_FIP_ELS_ID_MASK); 193 break; 194 case ELS_CMD_LOGO: 195 elsiocb->iocb_flag |= 196 ((LPFC_ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT) 197 & LPFC_FIP_ELS_ID_MASK); 198 break; 199 } 200 else 201 elsiocb->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK; 202 203 icmd = &elsiocb->iocb; 204 205 /* fill in BDEs for command */ 206 /* Allocate buffer for command payload */ 207 pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 208 if (pcmd) 209 pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys); 210 if (!pcmd || !pcmd->virt) 211 goto els_iocb_free_pcmb_exit; 212 213 INIT_LIST_HEAD(&pcmd->list); 214 215 /* Allocate buffer for response payload */ 216 if (expectRsp) { 217 prsp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 218 if (prsp) 219 prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 220 &prsp->phys); 221 if (!prsp || !prsp->virt) 222 goto els_iocb_free_prsp_exit; 223 INIT_LIST_HEAD(&prsp->list); 224 } else 225 prsp = NULL; 226 227 /* Allocate buffer for Buffer ptr list */ 228 pbuflist = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 229 if (pbuflist) 230 pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 231 &pbuflist->phys); 232 if (!pbuflist || !pbuflist->virt) 233 goto els_iocb_free_pbuf_exit; 234 235 INIT_LIST_HEAD(&pbuflist->list); 236 237 if (expectRsp) { 238 icmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys); 239 icmd->un.elsreq64.bdl.addrLow = putPaddrLow(pbuflist->phys); 240 icmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; 241 icmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64)); 242 243 icmd->un.elsreq64.remoteID = did; /* DID */ 244 icmd->ulpCommand = CMD_ELS_REQUEST64_CR; 245 if (elscmd == ELS_CMD_FLOGI) 246 icmd->ulpTimeout = FF_DEF_RATOV * 2; 247 else if (elscmd == ELS_CMD_LOGO) 248 icmd->ulpTimeout = phba->fc_ratov; 249 else 250 icmd->ulpTimeout = phba->fc_ratov * 2; 251 } else { 252 icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys); 253 icmd->un.xseq64.bdl.addrLow = putPaddrLow(pbuflist->phys); 254 icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; 255 icmd->un.xseq64.bdl.bdeSize = sizeof(struct ulp_bde64); 256 icmd->un.xseq64.xmit_els_remoteID = did; /* DID */ 257 icmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX; 258 } 259 icmd->ulpBdeCount = 1; 260 icmd->ulpLe = 1; 261 icmd->ulpClass = CLASS3; 262 263 /* 264 * If we have NPIV enabled, we want to send ELS traffic by VPI. 265 * For SLI4, since the driver controls VPIs we also want to include 266 * all ELS pt2pt protocol traffic as well. 267 */ 268 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) || 269 ((phba->sli_rev == LPFC_SLI_REV4) && 270 (vport->fc_flag & FC_PT2PT))) { 271 272 if (expectRsp) { 273 icmd->un.elsreq64.myID = vport->fc_myDID; 274 275 /* For ELS_REQUEST64_CR, use the VPI by default */ 276 icmd->ulpContext = phba->vpi_ids[vport->vpi]; 277 } 278 279 icmd->ulpCt_h = 0; 280 /* The CT field must be 0=INVALID_RPI for the ECHO cmd */ 281 if (elscmd == ELS_CMD_ECHO) 282 icmd->ulpCt_l = 0; /* context = invalid RPI */ 283 else 284 icmd->ulpCt_l = 1; /* context = VPI */ 285 } 286 287 bpl = (struct ulp_bde64 *) pbuflist->virt; 288 bpl->addrLow = le32_to_cpu(putPaddrLow(pcmd->phys)); 289 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pcmd->phys)); 290 bpl->tus.f.bdeSize = cmdSize; 291 bpl->tus.f.bdeFlags = 0; 292 bpl->tus.w = le32_to_cpu(bpl->tus.w); 293 294 if (expectRsp) { 295 bpl++; 296 bpl->addrLow = le32_to_cpu(putPaddrLow(prsp->phys)); 297 bpl->addrHigh = le32_to_cpu(putPaddrHigh(prsp->phys)); 298 bpl->tus.f.bdeSize = FCELSSIZE; 299 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 300 bpl->tus.w = le32_to_cpu(bpl->tus.w); 301 } 302 303 elsiocb->context2 = pcmd; 304 elsiocb->context3 = pbuflist; 305 elsiocb->retry = retry; 306 elsiocb->vport = vport; 307 elsiocb->drvrTimeout = (phba->fc_ratov << 1) + LPFC_DRVR_TIMEOUT; 308 309 if (prsp) { 310 list_add(&prsp->list, &pcmd->list); 311 } 312 if (expectRsp) { 313 /* Xmit ELS command <elsCmd> to remote NPORT <did> */ 314 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 315 "0116 Xmit ELS command x%x to remote " 316 "NPORT x%x I/O tag: x%x, port state:x%x " 317 "rpi x%x fc_flag:x%x\n", 318 elscmd, did, elsiocb->iotag, 319 vport->port_state, ndlp->nlp_rpi, 320 vport->fc_flag); 321 } else { 322 /* Xmit ELS response <elsCmd> to remote NPORT <did> */ 323 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 324 "0117 Xmit ELS response x%x to remote " 325 "NPORT x%x I/O tag: x%x, size: x%x " 326 "port_state x%x rpi x%x fc_flag x%x\n", 327 elscmd, ndlp->nlp_DID, elsiocb->iotag, 328 cmdSize, vport->port_state, 329 ndlp->nlp_rpi, vport->fc_flag); 330 } 331 return elsiocb; 332 333 els_iocb_free_pbuf_exit: 334 if (expectRsp) 335 lpfc_mbuf_free(phba, prsp->virt, prsp->phys); 336 kfree(pbuflist); 337 338 els_iocb_free_prsp_exit: 339 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys); 340 kfree(prsp); 341 342 els_iocb_free_pcmb_exit: 343 kfree(pcmd); 344 lpfc_sli_release_iocbq(phba, elsiocb); 345 return NULL; 346 } 347 348 /** 349 * lpfc_issue_fabric_reglogin - Issue fabric registration login for a vport 350 * @vport: pointer to a host virtual N_Port data structure. 351 * 352 * This routine issues a fabric registration login for a @vport. An 353 * active ndlp node with Fabric_DID must already exist for this @vport. 354 * The routine invokes two mailbox commands to carry out fabric registration 355 * login through the HBA firmware: the first mailbox command requests the 356 * HBA to perform link configuration for the @vport; and the second mailbox 357 * command requests the HBA to perform the actual fabric registration login 358 * with the @vport. 359 * 360 * Return code 361 * 0 - successfully issued fabric registration login for @vport 362 * -ENXIO -- failed to issue fabric registration login for @vport 363 **/ 364 int 365 lpfc_issue_fabric_reglogin(struct lpfc_vport *vport) 366 { 367 struct lpfc_hba *phba = vport->phba; 368 LPFC_MBOXQ_t *mbox; 369 struct lpfc_dmabuf *mp; 370 struct lpfc_nodelist *ndlp; 371 struct serv_parm *sp; 372 int rc; 373 int err = 0; 374 375 sp = &phba->fc_fabparam; 376 ndlp = lpfc_findnode_did(vport, Fabric_DID); 377 if (!ndlp) { 378 err = 1; 379 goto fail; 380 } 381 382 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 383 if (!mbox) { 384 err = 2; 385 goto fail; 386 } 387 388 vport->port_state = LPFC_FABRIC_CFG_LINK; 389 lpfc_config_link(phba, mbox); 390 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 391 mbox->vport = vport; 392 393 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 394 if (rc == MBX_NOT_FINISHED) { 395 err = 3; 396 goto fail_free_mbox; 397 } 398 399 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 400 if (!mbox) { 401 err = 4; 402 goto fail; 403 } 404 rc = lpfc_reg_rpi(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox, 405 ndlp->nlp_rpi); 406 if (rc) { 407 err = 5; 408 goto fail_free_mbox; 409 } 410 411 mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login; 412 mbox->vport = vport; 413 /* increment the reference count on ndlp to hold reference 414 * for the callback routine. 415 */ 416 mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 417 if (!mbox->ctx_ndlp) { 418 err = 6; 419 goto fail_no_ndlp; 420 } 421 422 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 423 if (rc == MBX_NOT_FINISHED) { 424 err = 7; 425 goto fail_issue_reg_login; 426 } 427 428 return 0; 429 430 fail_issue_reg_login: 431 /* decrement the reference count on ndlp just incremented 432 * for the failed mbox command. 433 */ 434 lpfc_nlp_put(ndlp); 435 fail_no_ndlp: 436 mp = (struct lpfc_dmabuf *)mbox->ctx_buf; 437 lpfc_mbuf_free(phba, mp->virt, mp->phys); 438 kfree(mp); 439 fail_free_mbox: 440 mempool_free(mbox, phba->mbox_mem_pool); 441 442 fail: 443 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 444 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 445 "0249 Cannot issue Register Fabric login: Err %d\n", 446 err); 447 return -ENXIO; 448 } 449 450 /** 451 * lpfc_issue_reg_vfi - Register VFI for this vport's fabric login 452 * @vport: pointer to a host virtual N_Port data structure. 453 * 454 * This routine issues a REG_VFI mailbox for the vfi, vpi, fcfi triplet for 455 * the @vport. This mailbox command is necessary for SLI4 port only. 456 * 457 * Return code 458 * 0 - successfully issued REG_VFI for @vport 459 * A failure code otherwise. 460 **/ 461 int 462 lpfc_issue_reg_vfi(struct lpfc_vport *vport) 463 { 464 struct lpfc_hba *phba = vport->phba; 465 LPFC_MBOXQ_t *mboxq = NULL; 466 struct lpfc_nodelist *ndlp; 467 struct lpfc_dmabuf *dmabuf = NULL; 468 int rc = 0; 469 470 /* move forward in case of SLI4 FC port loopback test and pt2pt mode */ 471 if ((phba->sli_rev == LPFC_SLI_REV4) && 472 !(phba->link_flag & LS_LOOPBACK_MODE) && 473 !(vport->fc_flag & FC_PT2PT)) { 474 ndlp = lpfc_findnode_did(vport, Fabric_DID); 475 if (!ndlp) { 476 rc = -ENODEV; 477 goto fail; 478 } 479 } 480 481 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 482 if (!mboxq) { 483 rc = -ENOMEM; 484 goto fail; 485 } 486 487 /* Supply CSP's only if we are fabric connect or pt-to-pt connect */ 488 if ((vport->fc_flag & FC_FABRIC) || (vport->fc_flag & FC_PT2PT)) { 489 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 490 if (!dmabuf) { 491 rc = -ENOMEM; 492 goto fail; 493 } 494 dmabuf->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &dmabuf->phys); 495 if (!dmabuf->virt) { 496 rc = -ENOMEM; 497 goto fail; 498 } 499 memcpy(dmabuf->virt, &phba->fc_fabparam, 500 sizeof(struct serv_parm)); 501 } 502 503 vport->port_state = LPFC_FABRIC_CFG_LINK; 504 if (dmabuf) 505 lpfc_reg_vfi(mboxq, vport, dmabuf->phys); 506 else 507 lpfc_reg_vfi(mboxq, vport, 0); 508 509 mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi; 510 mboxq->vport = vport; 511 mboxq->ctx_buf = dmabuf; 512 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 513 if (rc == MBX_NOT_FINISHED) { 514 rc = -ENXIO; 515 goto fail; 516 } 517 return 0; 518 519 fail: 520 if (mboxq) 521 mempool_free(mboxq, phba->mbox_mem_pool); 522 if (dmabuf) { 523 if (dmabuf->virt) 524 lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys); 525 kfree(dmabuf); 526 } 527 528 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 529 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 530 "0289 Issue Register VFI failed: Err %d\n", rc); 531 return rc; 532 } 533 534 /** 535 * lpfc_issue_unreg_vfi - Unregister VFI for this vport's fabric login 536 * @vport: pointer to a host virtual N_Port data structure. 537 * 538 * This routine issues a UNREG_VFI mailbox with the vfi, vpi, fcfi triplet for 539 * the @vport. This mailbox command is necessary for SLI4 port only. 540 * 541 * Return code 542 * 0 - successfully issued REG_VFI for @vport 543 * A failure code otherwise. 544 **/ 545 int 546 lpfc_issue_unreg_vfi(struct lpfc_vport *vport) 547 { 548 struct lpfc_hba *phba = vport->phba; 549 struct Scsi_Host *shost; 550 LPFC_MBOXQ_t *mboxq; 551 int rc; 552 553 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 554 if (!mboxq) { 555 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 556 "2556 UNREG_VFI mbox allocation failed" 557 "HBA state x%x\n", phba->pport->port_state); 558 return -ENOMEM; 559 } 560 561 lpfc_unreg_vfi(mboxq, vport); 562 mboxq->vport = vport; 563 mboxq->mbox_cmpl = lpfc_unregister_vfi_cmpl; 564 565 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 566 if (rc == MBX_NOT_FINISHED) { 567 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 568 "2557 UNREG_VFI issue mbox failed rc x%x " 569 "HBA state x%x\n", 570 rc, phba->pport->port_state); 571 mempool_free(mboxq, phba->mbox_mem_pool); 572 return -EIO; 573 } 574 575 shost = lpfc_shost_from_vport(vport); 576 spin_lock_irq(shost->host_lock); 577 vport->fc_flag &= ~FC_VFI_REGISTERED; 578 spin_unlock_irq(shost->host_lock); 579 return 0; 580 } 581 582 /** 583 * lpfc_check_clean_addr_bit - Check whether assigned FCID is clean. 584 * @vport: pointer to a host virtual N_Port data structure. 585 * @sp: pointer to service parameter data structure. 586 * 587 * This routine is called from FLOGI/FDISC completion handler functions. 588 * lpfc_check_clean_addr_bit return 1 when FCID/Fabric portname/ Fabric 589 * node nodename is changed in the completion service parameter else return 590 * 0. This function also set flag in the vport data structure to delay 591 * NP_Port discovery after the FLOGI/FDISC completion if Clean address bit 592 * in FLOGI/FDISC response is cleared and FCID/Fabric portname/ Fabric 593 * node nodename is changed in the completion service parameter. 594 * 595 * Return code 596 * 0 - FCID and Fabric Nodename and Fabric portname is not changed. 597 * 1 - FCID or Fabric Nodename or Fabric portname is changed. 598 * 599 **/ 600 static uint8_t 601 lpfc_check_clean_addr_bit(struct lpfc_vport *vport, 602 struct serv_parm *sp) 603 { 604 struct lpfc_hba *phba = vport->phba; 605 uint8_t fabric_param_changed = 0; 606 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 607 608 if ((vport->fc_prevDID != vport->fc_myDID) || 609 memcmp(&vport->fabric_portname, &sp->portName, 610 sizeof(struct lpfc_name)) || 611 memcmp(&vport->fabric_nodename, &sp->nodeName, 612 sizeof(struct lpfc_name)) || 613 (vport->vport_flag & FAWWPN_PARAM_CHG)) { 614 fabric_param_changed = 1; 615 vport->vport_flag &= ~FAWWPN_PARAM_CHG; 616 } 617 /* 618 * Word 1 Bit 31 in common service parameter is overloaded. 619 * Word 1 Bit 31 in FLOGI request is multiple NPort request 620 * Word 1 Bit 31 in FLOGI response is clean address bit 621 * 622 * If fabric parameter is changed and clean address bit is 623 * cleared delay nport discovery if 624 * - vport->fc_prevDID != 0 (not initial discovery) OR 625 * - lpfc_delay_discovery module parameter is set. 626 */ 627 if (fabric_param_changed && !sp->cmn.clean_address_bit && 628 (vport->fc_prevDID || phba->cfg_delay_discovery)) { 629 spin_lock_irq(shost->host_lock); 630 vport->fc_flag |= FC_DISC_DELAYED; 631 spin_unlock_irq(shost->host_lock); 632 } 633 634 return fabric_param_changed; 635 } 636 637 638 /** 639 * lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port 640 * @vport: pointer to a host virtual N_Port data structure. 641 * @ndlp: pointer to a node-list data structure. 642 * @sp: pointer to service parameter data structure. 643 * @irsp: pointer to the IOCB within the lpfc response IOCB. 644 * 645 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback 646 * function to handle the completion of a Fabric Login (FLOGI) into a fabric 647 * port in a fabric topology. It properly sets up the parameters to the @ndlp 648 * from the IOCB response. It also check the newly assigned N_Port ID to the 649 * @vport against the previously assigned N_Port ID. If it is different from 650 * the previously assigned Destination ID (DID), the lpfc_unreg_rpi() routine 651 * is invoked on all the remaining nodes with the @vport to unregister the 652 * Remote Port Indicators (RPIs). Finally, the lpfc_issue_fabric_reglogin() 653 * is invoked to register login to the fabric. 654 * 655 * Return code 656 * 0 - Success (currently, always return 0) 657 **/ 658 static int 659 lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 660 struct serv_parm *sp, IOCB_t *irsp) 661 { 662 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 663 struct lpfc_hba *phba = vport->phba; 664 struct lpfc_nodelist *np; 665 struct lpfc_nodelist *next_np; 666 uint8_t fabric_param_changed; 667 668 spin_lock_irq(shost->host_lock); 669 vport->fc_flag |= FC_FABRIC; 670 spin_unlock_irq(shost->host_lock); 671 672 phba->fc_edtov = be32_to_cpu(sp->cmn.e_d_tov); 673 if (sp->cmn.edtovResolution) /* E_D_TOV ticks are in nanoseconds */ 674 phba->fc_edtov = (phba->fc_edtov + 999999) / 1000000; 675 676 phba->fc_edtovResol = sp->cmn.edtovResolution; 677 phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000; 678 679 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 680 spin_lock_irq(shost->host_lock); 681 vport->fc_flag |= FC_PUBLIC_LOOP; 682 spin_unlock_irq(shost->host_lock); 683 } 684 685 vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID; 686 memcpy(&ndlp->nlp_portname, &sp->portName, sizeof(struct lpfc_name)); 687 memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof(struct lpfc_name)); 688 ndlp->nlp_class_sup = 0; 689 if (sp->cls1.classValid) 690 ndlp->nlp_class_sup |= FC_COS_CLASS1; 691 if (sp->cls2.classValid) 692 ndlp->nlp_class_sup |= FC_COS_CLASS2; 693 if (sp->cls3.classValid) 694 ndlp->nlp_class_sup |= FC_COS_CLASS3; 695 if (sp->cls4.classValid) 696 ndlp->nlp_class_sup |= FC_COS_CLASS4; 697 ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | 698 sp->cmn.bbRcvSizeLsb; 699 700 fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp); 701 if (fabric_param_changed) { 702 /* Reset FDMI attribute masks based on config parameter */ 703 if (phba->cfg_enable_SmartSAN || 704 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) { 705 /* Setup appropriate attribute masks */ 706 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR; 707 if (phba->cfg_enable_SmartSAN) 708 vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR; 709 else 710 vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR; 711 } else { 712 vport->fdmi_hba_mask = 0; 713 vport->fdmi_port_mask = 0; 714 } 715 716 } 717 memcpy(&vport->fabric_portname, &sp->portName, 718 sizeof(struct lpfc_name)); 719 memcpy(&vport->fabric_nodename, &sp->nodeName, 720 sizeof(struct lpfc_name)); 721 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm)); 722 723 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { 724 if (sp->cmn.response_multiple_NPort) { 725 lpfc_printf_vlog(vport, KERN_WARNING, 726 LOG_ELS | LOG_VPORT, 727 "1816 FLOGI NPIV supported, " 728 "response data 0x%x\n", 729 sp->cmn.response_multiple_NPort); 730 spin_lock_irq(&phba->hbalock); 731 phba->link_flag |= LS_NPIV_FAB_SUPPORTED; 732 spin_unlock_irq(&phba->hbalock); 733 } else { 734 /* Because we asked f/w for NPIV it still expects us 735 to call reg_vnpid at least for the physical host */ 736 lpfc_printf_vlog(vport, KERN_WARNING, 737 LOG_ELS | LOG_VPORT, 738 "1817 Fabric does not support NPIV " 739 "- configuring single port mode.\n"); 740 spin_lock_irq(&phba->hbalock); 741 phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED; 742 spin_unlock_irq(&phba->hbalock); 743 } 744 } 745 746 /* 747 * For FC we need to do some special processing because of the SLI 748 * Port's default settings of the Common Service Parameters. 749 */ 750 if ((phba->sli_rev == LPFC_SLI_REV4) && 751 (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC)) { 752 /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */ 753 if (fabric_param_changed) 754 lpfc_unregister_fcf_prep(phba); 755 756 /* This should just update the VFI CSPs*/ 757 if (vport->fc_flag & FC_VFI_REGISTERED) 758 lpfc_issue_reg_vfi(vport); 759 } 760 761 if (fabric_param_changed && 762 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 763 764 /* If our NportID changed, we need to ensure all 765 * remaining NPORTs get unreg_login'ed. 766 */ 767 list_for_each_entry_safe(np, next_np, 768 &vport->fc_nodes, nlp_listp) { 769 if ((np->nlp_state != NLP_STE_NPR_NODE) || 770 !(np->nlp_flag & NLP_NPR_ADISC)) 771 continue; 772 spin_lock_irq(&np->lock); 773 np->nlp_flag &= ~NLP_NPR_ADISC; 774 spin_unlock_irq(&np->lock); 775 lpfc_unreg_rpi(vport, np); 776 } 777 lpfc_cleanup_pending_mbox(vport); 778 779 if (phba->sli_rev == LPFC_SLI_REV4) { 780 lpfc_sli4_unreg_all_rpis(vport); 781 lpfc_mbx_unreg_vpi(vport); 782 spin_lock_irq(shost->host_lock); 783 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 784 spin_unlock_irq(shost->host_lock); 785 } 786 787 /* 788 * For SLI3 and SLI4, the VPI needs to be reregistered in 789 * response to this fabric parameter change event. 790 */ 791 spin_lock_irq(shost->host_lock); 792 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 793 spin_unlock_irq(shost->host_lock); 794 } else if ((phba->sli_rev == LPFC_SLI_REV4) && 795 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 796 /* 797 * Driver needs to re-reg VPI in order for f/w 798 * to update the MAC address. 799 */ 800 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 801 lpfc_register_new_vport(phba, vport, ndlp); 802 return 0; 803 } 804 805 if (phba->sli_rev < LPFC_SLI_REV4) { 806 lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE); 807 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED && 808 vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) 809 lpfc_register_new_vport(phba, vport, ndlp); 810 else 811 lpfc_issue_fabric_reglogin(vport); 812 } else { 813 ndlp->nlp_type |= NLP_FABRIC; 814 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 815 if ((!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) && 816 (vport->vpi_state & LPFC_VPI_REGISTERED)) { 817 lpfc_start_fdiscs(phba); 818 lpfc_do_scr_ns_plogi(phba, vport); 819 } else if (vport->fc_flag & FC_VFI_REGISTERED) 820 lpfc_issue_init_vpi(vport); 821 else { 822 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 823 "3135 Need register VFI: (x%x/%x)\n", 824 vport->fc_prevDID, vport->fc_myDID); 825 lpfc_issue_reg_vfi(vport); 826 } 827 } 828 return 0; 829 } 830 831 /** 832 * lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port 833 * @vport: pointer to a host virtual N_Port data structure. 834 * @ndlp: pointer to a node-list data structure. 835 * @sp: pointer to service parameter data structure. 836 * 837 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback 838 * function to handle the completion of a Fabric Login (FLOGI) into an N_Port 839 * in a point-to-point topology. First, the @vport's N_Port Name is compared 840 * with the received N_Port Name: if the @vport's N_Port Name is greater than 841 * the received N_Port Name lexicographically, this node shall assign local 842 * N_Port ID (PT2PT_LocalID: 1) and remote N_Port ID (PT2PT_RemoteID: 2) and 843 * will send out Port Login (PLOGI) with the N_Port IDs assigned. Otherwise, 844 * this node shall just wait for the remote node to issue PLOGI and assign 845 * N_Port IDs. 846 * 847 * Return code 848 * 0 - Success 849 * -ENXIO - Fail 850 **/ 851 static int 852 lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 853 struct serv_parm *sp) 854 { 855 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 856 struct lpfc_hba *phba = vport->phba; 857 LPFC_MBOXQ_t *mbox; 858 int rc; 859 860 spin_lock_irq(shost->host_lock); 861 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 862 vport->fc_flag |= FC_PT2PT; 863 spin_unlock_irq(shost->host_lock); 864 865 /* If we are pt2pt with another NPort, force NPIV off! */ 866 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED; 867 868 /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */ 869 if ((phba->sli_rev == LPFC_SLI_REV4) && phba->fc_topology_changed) { 870 lpfc_unregister_fcf_prep(phba); 871 872 spin_lock_irq(shost->host_lock); 873 vport->fc_flag &= ~FC_VFI_REGISTERED; 874 spin_unlock_irq(shost->host_lock); 875 phba->fc_topology_changed = 0; 876 } 877 878 rc = memcmp(&vport->fc_portname, &sp->portName, 879 sizeof(vport->fc_portname)); 880 881 if (rc >= 0) { 882 /* This side will initiate the PLOGI */ 883 spin_lock_irq(shost->host_lock); 884 vport->fc_flag |= FC_PT2PT_PLOGI; 885 spin_unlock_irq(shost->host_lock); 886 887 /* 888 * N_Port ID cannot be 0, set our Id to LocalID 889 * the other side will be RemoteID. 890 */ 891 892 /* not equal */ 893 if (rc) 894 vport->fc_myDID = PT2PT_LocalID; 895 896 /* Decrement ndlp reference count indicating that ndlp can be 897 * safely released when other references to it are done. 898 */ 899 lpfc_nlp_put(ndlp); 900 901 ndlp = lpfc_findnode_did(vport, PT2PT_RemoteID); 902 if (!ndlp) { 903 /* 904 * Cannot find existing Fabric ndlp, so allocate a 905 * new one 906 */ 907 ndlp = lpfc_nlp_init(vport, PT2PT_RemoteID); 908 if (!ndlp) 909 goto fail; 910 } 911 912 memcpy(&ndlp->nlp_portname, &sp->portName, 913 sizeof(struct lpfc_name)); 914 memcpy(&ndlp->nlp_nodename, &sp->nodeName, 915 sizeof(struct lpfc_name)); 916 /* Set state will put ndlp onto node list if not already done */ 917 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 918 spin_lock_irq(&ndlp->lock); 919 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 920 spin_unlock_irq(&ndlp->lock); 921 922 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 923 if (!mbox) 924 goto fail; 925 926 lpfc_config_link(phba, mbox); 927 928 mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link; 929 mbox->vport = vport; 930 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 931 if (rc == MBX_NOT_FINISHED) { 932 mempool_free(mbox, phba->mbox_mem_pool); 933 goto fail; 934 } 935 } else { 936 /* This side will wait for the PLOGI, decrement ndlp reference 937 * count indicating that ndlp can be released when other 938 * references to it are done. 939 */ 940 lpfc_nlp_put(ndlp); 941 942 /* Start discovery - this should just do CLEAR_LA */ 943 lpfc_disc_start(vport); 944 } 945 946 return 0; 947 fail: 948 return -ENXIO; 949 } 950 951 /** 952 * lpfc_cmpl_els_flogi - Completion callback function for flogi 953 * @phba: pointer to lpfc hba data structure. 954 * @cmdiocb: pointer to lpfc command iocb data structure. 955 * @rspiocb: pointer to lpfc response iocb data structure. 956 * 957 * This routine is the top-level completion callback function for issuing 958 * a Fabric Login (FLOGI) command. If the response IOCB reported error, 959 * the lpfc_els_retry() routine shall be invoked to retry the FLOGI. If 960 * retry has been made (either immediately or delayed with lpfc_els_retry() 961 * returning 1), the command IOCB will be released and function returned. 962 * If the retry attempt has been given up (possibly reach the maximum 963 * number of retries), one additional decrement of ndlp reference shall be 964 * invoked before going out after releasing the command IOCB. This will 965 * actually release the remote node (Note, lpfc_els_free_iocb() will also 966 * invoke one decrement of ndlp reference count). If no error reported in 967 * the IOCB status, the command Port ID field is used to determine whether 968 * this is a point-to-point topology or a fabric topology: if the Port ID 969 * field is assigned, it is a fabric topology; otherwise, it is a 970 * point-to-point topology. The routine lpfc_cmpl_els_flogi_fabric() or 971 * lpfc_cmpl_els_flogi_nport() shall be invoked accordingly to handle the 972 * specific topology completion conditions. 973 **/ 974 static void 975 lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 976 struct lpfc_iocbq *rspiocb) 977 { 978 struct lpfc_vport *vport = cmdiocb->vport; 979 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 980 IOCB_t *irsp = &rspiocb->iocb; 981 struct lpfc_nodelist *ndlp = cmdiocb->context1; 982 struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp; 983 struct serv_parm *sp; 984 uint16_t fcf_index; 985 int rc; 986 987 /* Check to see if link went down during discovery */ 988 if (lpfc_els_chk_latt(vport)) { 989 /* One additional decrement on node reference count to 990 * trigger the release of the node 991 */ 992 lpfc_nlp_put(ndlp); 993 goto out; 994 } 995 996 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 997 "FLOGI cmpl: status:x%x/x%x state:x%x", 998 irsp->ulpStatus, irsp->un.ulpWord[4], 999 vport->port_state); 1000 1001 if (irsp->ulpStatus) { 1002 /* 1003 * In case of FIP mode, perform roundrobin FCF failover 1004 * due to new FCF discovery 1005 */ 1006 if ((phba->hba_flag & HBA_FIP_SUPPORT) && 1007 (phba->fcf.fcf_flag & FCF_DISCOVERY)) { 1008 if (phba->link_state < LPFC_LINK_UP) 1009 goto stop_rr_fcf_flogi; 1010 if ((phba->fcoe_cvl_eventtag_attn == 1011 phba->fcoe_cvl_eventtag) && 1012 (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 1013 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == 1014 IOERR_SLI_ABORTED)) 1015 goto stop_rr_fcf_flogi; 1016 else 1017 phba->fcoe_cvl_eventtag_attn = 1018 phba->fcoe_cvl_eventtag; 1019 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS, 1020 "2611 FLOGI failed on FCF (x%x), " 1021 "status:x%x/x%x, tmo:x%x, perform " 1022 "roundrobin FCF failover\n", 1023 phba->fcf.current_rec.fcf_indx, 1024 irsp->ulpStatus, irsp->un.ulpWord[4], 1025 irsp->ulpTimeout); 1026 lpfc_sli4_set_fcf_flogi_fail(phba, 1027 phba->fcf.current_rec.fcf_indx); 1028 fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba); 1029 rc = lpfc_sli4_fcf_rr_next_proc(vport, fcf_index); 1030 if (rc) 1031 goto out; 1032 } 1033 1034 stop_rr_fcf_flogi: 1035 /* FLOGI failure */ 1036 if (!(irsp->ulpStatus == IOSTAT_LOCAL_REJECT && 1037 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == 1038 IOERR_LOOP_OPEN_FAILURE))) 1039 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1040 "2858 FLOGI failure Status:x%x/x%x TMO" 1041 ":x%x Data x%x x%x\n", 1042 irsp->ulpStatus, irsp->un.ulpWord[4], 1043 irsp->ulpTimeout, phba->hba_flag, 1044 phba->fcf.fcf_flag); 1045 1046 /* Check for retry */ 1047 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) 1048 goto out; 1049 1050 lpfc_printf_vlog(vport, KERN_WARNING, LOG_TRACE_EVENT, 1051 "0150 FLOGI failure Status:x%x/x%x " 1052 "xri x%x TMO:x%x\n", 1053 irsp->ulpStatus, irsp->un.ulpWord[4], 1054 cmdiocb->sli4_xritag, irsp->ulpTimeout); 1055 1056 /* If this is not a loop open failure, bail out */ 1057 if (!(irsp->ulpStatus == IOSTAT_LOCAL_REJECT && 1058 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == 1059 IOERR_LOOP_OPEN_FAILURE))) 1060 goto flogifail; 1061 1062 /* FLOGI failed, so there is no fabric */ 1063 spin_lock_irq(shost->host_lock); 1064 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 1065 spin_unlock_irq(shost->host_lock); 1066 1067 /* If private loop, then allow max outstanding els to be 1068 * LPFC_MAX_DISC_THREADS (32). Scanning in the case of no 1069 * alpa map would take too long otherwise. 1070 */ 1071 if (phba->alpa_map[0] == 0) 1072 vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS; 1073 if ((phba->sli_rev == LPFC_SLI_REV4) && 1074 (!(vport->fc_flag & FC_VFI_REGISTERED) || 1075 (vport->fc_prevDID != vport->fc_myDID) || 1076 phba->fc_topology_changed)) { 1077 if (vport->fc_flag & FC_VFI_REGISTERED) { 1078 if (phba->fc_topology_changed) { 1079 lpfc_unregister_fcf_prep(phba); 1080 spin_lock_irq(shost->host_lock); 1081 vport->fc_flag &= ~FC_VFI_REGISTERED; 1082 spin_unlock_irq(shost->host_lock); 1083 phba->fc_topology_changed = 0; 1084 } else { 1085 lpfc_sli4_unreg_all_rpis(vport); 1086 } 1087 } 1088 1089 /* Do not register VFI if the driver aborted FLOGI */ 1090 if (!lpfc_error_lost_link(irsp)) 1091 lpfc_issue_reg_vfi(vport); 1092 1093 lpfc_nlp_put(ndlp); 1094 goto out; 1095 } 1096 goto flogifail; 1097 } 1098 spin_lock_irq(shost->host_lock); 1099 vport->fc_flag &= ~FC_VPORT_CVL_RCVD; 1100 vport->fc_flag &= ~FC_VPORT_LOGO_RCVD; 1101 spin_unlock_irq(shost->host_lock); 1102 1103 /* 1104 * The FLogI succeeded. Sync the data for the CPU before 1105 * accessing it. 1106 */ 1107 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 1108 if (!prsp) 1109 goto out; 1110 sp = prsp->virt + sizeof(uint32_t); 1111 1112 /* FLOGI completes successfully */ 1113 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1114 "0101 FLOGI completes successfully, I/O tag:x%x, " 1115 "xri x%x Data: x%x x%x x%x x%x x%x %x\n", 1116 cmdiocb->iotag, cmdiocb->sli4_xritag, 1117 irsp->un.ulpWord[4], sp->cmn.e_d_tov, 1118 sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution, 1119 vport->port_state, vport->fc_flag); 1120 1121 if (vport->port_state == LPFC_FLOGI) { 1122 /* 1123 * If Common Service Parameters indicate Nport 1124 * we are point to point, if Fport we are Fabric. 1125 */ 1126 if (sp->cmn.fPort) 1127 rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp, irsp); 1128 else if (!(phba->hba_flag & HBA_FCOE_MODE)) 1129 rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp); 1130 else { 1131 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1132 "2831 FLOGI response with cleared Fabric " 1133 "bit fcf_index 0x%x " 1134 "Switch Name %02x%02x%02x%02x%02x%02x%02x%02x " 1135 "Fabric Name " 1136 "%02x%02x%02x%02x%02x%02x%02x%02x\n", 1137 phba->fcf.current_rec.fcf_indx, 1138 phba->fcf.current_rec.switch_name[0], 1139 phba->fcf.current_rec.switch_name[1], 1140 phba->fcf.current_rec.switch_name[2], 1141 phba->fcf.current_rec.switch_name[3], 1142 phba->fcf.current_rec.switch_name[4], 1143 phba->fcf.current_rec.switch_name[5], 1144 phba->fcf.current_rec.switch_name[6], 1145 phba->fcf.current_rec.switch_name[7], 1146 phba->fcf.current_rec.fabric_name[0], 1147 phba->fcf.current_rec.fabric_name[1], 1148 phba->fcf.current_rec.fabric_name[2], 1149 phba->fcf.current_rec.fabric_name[3], 1150 phba->fcf.current_rec.fabric_name[4], 1151 phba->fcf.current_rec.fabric_name[5], 1152 phba->fcf.current_rec.fabric_name[6], 1153 phba->fcf.current_rec.fabric_name[7]); 1154 1155 lpfc_nlp_put(ndlp); 1156 spin_lock_irq(&phba->hbalock); 1157 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 1158 phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO); 1159 spin_unlock_irq(&phba->hbalock); 1160 phba->fcf.fcf_redisc_attempted = 0; /* reset */ 1161 goto out; 1162 } 1163 if (!rc) { 1164 /* Mark the FCF discovery process done */ 1165 if (phba->hba_flag & HBA_FIP_SUPPORT) 1166 lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP | 1167 LOG_ELS, 1168 "2769 FLOGI to FCF (x%x) " 1169 "completed successfully\n", 1170 phba->fcf.current_rec.fcf_indx); 1171 spin_lock_irq(&phba->hbalock); 1172 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 1173 phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO); 1174 spin_unlock_irq(&phba->hbalock); 1175 phba->fcf.fcf_redisc_attempted = 0; /* reset */ 1176 goto out; 1177 } 1178 } else if (vport->port_state > LPFC_FLOGI && 1179 vport->fc_flag & FC_PT2PT) { 1180 /* 1181 * In a p2p topology, it is possible that discovery has 1182 * already progressed, and this completion can be ignored. 1183 * Recheck the indicated topology. 1184 */ 1185 if (!sp->cmn.fPort) 1186 goto out; 1187 } 1188 1189 flogifail: 1190 spin_lock_irq(&phba->hbalock); 1191 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 1192 spin_unlock_irq(&phba->hbalock); 1193 1194 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) 1195 lpfc_nlp_put(ndlp); 1196 if (!lpfc_error_lost_link(irsp)) { 1197 /* FLOGI failed, so just use loop map to make discovery list */ 1198 lpfc_disc_list_loopmap(vport); 1199 1200 /* Start discovery */ 1201 lpfc_disc_start(vport); 1202 } else if (((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) || 1203 (((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) != 1204 IOERR_SLI_ABORTED) && 1205 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) != 1206 IOERR_SLI_DOWN))) && 1207 (phba->link_state != LPFC_CLEAR_LA)) { 1208 /* If FLOGI failed enable link interrupt. */ 1209 lpfc_issue_clear_la(phba, vport); 1210 } 1211 out: 1212 phba->hba_flag &= ~HBA_FLOGI_OUTSTANDING; 1213 lpfc_els_free_iocb(phba, cmdiocb); 1214 lpfc_nlp_put(ndlp); 1215 } 1216 1217 /** 1218 * lpfc_cmpl_els_link_down - Completion callback function for ELS command 1219 * aborted during a link down 1220 * @phba: pointer to lpfc hba data structure. 1221 * @cmdiocb: pointer to lpfc command iocb data structure. 1222 * @rspiocb: pointer to lpfc response iocb data structure. 1223 * 1224 */ 1225 static void 1226 lpfc_cmpl_els_link_down(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1227 struct lpfc_iocbq *rspiocb) 1228 { 1229 IOCB_t *irsp; 1230 uint32_t *pcmd; 1231 uint32_t cmd; 1232 1233 pcmd = (uint32_t *)(((struct lpfc_dmabuf *)cmdiocb->context2)->virt); 1234 cmd = *pcmd; 1235 irsp = &rspiocb->iocb; 1236 1237 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 1238 "6445 ELS completes after LINK_DOWN: " 1239 " Status %x/%x cmd x%x flg x%x\n", 1240 irsp->ulpStatus, irsp->un.ulpWord[4], cmd, 1241 cmdiocb->iocb_flag); 1242 1243 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC) { 1244 cmdiocb->iocb_flag &= ~LPFC_IO_FABRIC; 1245 atomic_dec(&phba->fabric_iocb_count); 1246 } 1247 lpfc_els_free_iocb(phba, cmdiocb); 1248 } 1249 1250 /** 1251 * lpfc_issue_els_flogi - Issue an flogi iocb command for a vport 1252 * @vport: pointer to a host virtual N_Port data structure. 1253 * @ndlp: pointer to a node-list data structure. 1254 * @retry: number of retries to the command IOCB. 1255 * 1256 * This routine issues a Fabric Login (FLOGI) Request ELS command 1257 * for a @vport. The initiator service parameters are put into the payload 1258 * of the FLOGI Request IOCB and the top-level callback function pointer 1259 * to lpfc_cmpl_els_flogi() routine is put to the IOCB completion callback 1260 * function field. The lpfc_issue_fabric_iocb routine is invoked to send 1261 * out FLOGI ELS command with one outstanding fabric IOCB at a time. 1262 * 1263 * Note that the ndlp reference count will be incremented by 1 for holding the 1264 * ndlp and the reference to ndlp will be stored into the context1 field of 1265 * the IOCB for the completion callback function to the FLOGI ELS command. 1266 * 1267 * Return code 1268 * 0 - successfully issued flogi iocb for @vport 1269 * 1 - failed to issue flogi iocb for @vport 1270 **/ 1271 static int 1272 lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1273 uint8_t retry) 1274 { 1275 struct lpfc_hba *phba = vport->phba; 1276 struct serv_parm *sp; 1277 IOCB_t *icmd; 1278 struct lpfc_iocbq *elsiocb; 1279 struct lpfc_iocbq defer_flogi_acc; 1280 uint8_t *pcmd; 1281 uint16_t cmdsize; 1282 uint32_t tmo, did; 1283 int rc; 1284 1285 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 1286 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 1287 ndlp->nlp_DID, ELS_CMD_FLOGI); 1288 1289 if (!elsiocb) 1290 return 1; 1291 1292 icmd = &elsiocb->iocb; 1293 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 1294 1295 /* For FLOGI request, remainder of payload is service parameters */ 1296 *((uint32_t *) (pcmd)) = ELS_CMD_FLOGI; 1297 pcmd += sizeof(uint32_t); 1298 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm)); 1299 sp = (struct serv_parm *) pcmd; 1300 1301 /* Setup CSPs accordingly for Fabric */ 1302 sp->cmn.e_d_tov = 0; 1303 sp->cmn.w2.r_a_tov = 0; 1304 sp->cmn.virtual_fabric_support = 0; 1305 sp->cls1.classValid = 0; 1306 if (sp->cmn.fcphLow < FC_PH3) 1307 sp->cmn.fcphLow = FC_PH3; 1308 if (sp->cmn.fcphHigh < FC_PH3) 1309 sp->cmn.fcphHigh = FC_PH3; 1310 1311 if (phba->sli_rev == LPFC_SLI_REV4) { 1312 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 1313 LPFC_SLI_INTF_IF_TYPE_0) { 1314 elsiocb->iocb.ulpCt_h = ((SLI4_CT_FCFI >> 1) & 1); 1315 elsiocb->iocb.ulpCt_l = (SLI4_CT_FCFI & 1); 1316 /* FLOGI needs to be 3 for WQE FCFI */ 1317 /* Set the fcfi to the fcfi we registered with */ 1318 elsiocb->iocb.ulpContext = phba->fcf.fcfi; 1319 } 1320 /* Can't do SLI4 class2 without support sequence coalescing */ 1321 sp->cls2.classValid = 0; 1322 sp->cls2.seqDelivery = 0; 1323 } else { 1324 /* Historical, setting sequential-delivery bit for SLI3 */ 1325 sp->cls2.seqDelivery = (sp->cls2.classValid) ? 1 : 0; 1326 sp->cls3.seqDelivery = (sp->cls3.classValid) ? 1 : 0; 1327 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { 1328 sp->cmn.request_multiple_Nport = 1; 1329 /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */ 1330 icmd->ulpCt_h = 1; 1331 icmd->ulpCt_l = 0; 1332 } else 1333 sp->cmn.request_multiple_Nport = 0; 1334 } 1335 1336 if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) { 1337 icmd->un.elsreq64.myID = 0; 1338 icmd->un.elsreq64.fl = 1; 1339 } 1340 1341 tmo = phba->fc_ratov; 1342 phba->fc_ratov = LPFC_DISC_FLOGI_TMO; 1343 lpfc_set_disctmo(vport); 1344 phba->fc_ratov = tmo; 1345 1346 phba->fc_stat.elsXmitFLOGI++; 1347 elsiocb->iocb_cmpl = lpfc_cmpl_els_flogi; 1348 1349 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1350 "Issue FLOGI: opt:x%x", 1351 phba->sli3_options, 0, 0); 1352 1353 elsiocb->context1 = lpfc_nlp_get(ndlp); 1354 if (!elsiocb->context1) { 1355 lpfc_els_free_iocb(phba, elsiocb); 1356 return 1; 1357 } 1358 1359 rc = lpfc_issue_fabric_iocb(phba, elsiocb); 1360 if (rc == IOCB_ERROR) { 1361 lpfc_els_free_iocb(phba, elsiocb); 1362 lpfc_nlp_put(ndlp); 1363 return 1; 1364 } 1365 1366 phba->hba_flag |= (HBA_FLOGI_ISSUED | HBA_FLOGI_OUTSTANDING); 1367 1368 /* Check for a deferred FLOGI ACC condition */ 1369 if (phba->defer_flogi_acc_flag) { 1370 did = vport->fc_myDID; 1371 vport->fc_myDID = Fabric_DID; 1372 1373 memset(&defer_flogi_acc, 0, sizeof(struct lpfc_iocbq)); 1374 1375 defer_flogi_acc.iocb.ulpContext = phba->defer_flogi_acc_rx_id; 1376 defer_flogi_acc.iocb.unsli3.rcvsli3.ox_id = 1377 phba->defer_flogi_acc_ox_id; 1378 1379 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1380 "3354 Xmit deferred FLOGI ACC: rx_id: x%x," 1381 " ox_id: x%x, hba_flag x%x\n", 1382 phba->defer_flogi_acc_rx_id, 1383 phba->defer_flogi_acc_ox_id, phba->hba_flag); 1384 1385 /* Send deferred FLOGI ACC */ 1386 lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, &defer_flogi_acc, 1387 ndlp, NULL); 1388 1389 phba->defer_flogi_acc_flag = false; 1390 1391 vport->fc_myDID = did; 1392 } 1393 1394 return 0; 1395 } 1396 1397 /** 1398 * lpfc_els_abort_flogi - Abort all outstanding flogi iocbs 1399 * @phba: pointer to lpfc hba data structure. 1400 * 1401 * This routine aborts all the outstanding Fabric Login (FLOGI) IOCBs 1402 * with a @phba. This routine walks all the outstanding IOCBs on the txcmplq 1403 * list and issues an abort IOCB commond on each outstanding IOCB that 1404 * contains a active Fabric_DID ndlp. Note that this function is to issue 1405 * the abort IOCB command on all the outstanding IOCBs, thus when this 1406 * function returns, it does not guarantee all the IOCBs are actually aborted. 1407 * 1408 * Return code 1409 * 0 - Successfully issued abort iocb on all outstanding flogis (Always 0) 1410 **/ 1411 int 1412 lpfc_els_abort_flogi(struct lpfc_hba *phba) 1413 { 1414 struct lpfc_sli_ring *pring; 1415 struct lpfc_iocbq *iocb, *next_iocb; 1416 struct lpfc_nodelist *ndlp; 1417 IOCB_t *icmd; 1418 1419 /* Abort outstanding I/O on NPort <nlp_DID> */ 1420 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 1421 "0201 Abort outstanding I/O on NPort x%x\n", 1422 Fabric_DID); 1423 1424 pring = lpfc_phba_elsring(phba); 1425 if (unlikely(!pring)) 1426 return -EIO; 1427 1428 /* 1429 * Check the txcmplq for an iocb that matches the nport the driver is 1430 * searching for. 1431 */ 1432 spin_lock_irq(&phba->hbalock); 1433 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) { 1434 icmd = &iocb->iocb; 1435 if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR) { 1436 ndlp = (struct lpfc_nodelist *)(iocb->context1); 1437 if (ndlp && ndlp->nlp_DID == Fabric_DID) { 1438 if ((phba->pport->fc_flag & FC_PT2PT) && 1439 !(phba->pport->fc_flag & FC_PT2PT_PLOGI)) 1440 iocb->fabric_iocb_cmpl = 1441 lpfc_ignore_els_cmpl; 1442 lpfc_sli_issue_abort_iotag(phba, pring, iocb, 1443 NULL); 1444 } 1445 } 1446 } 1447 /* Make sure HBA is alive */ 1448 lpfc_issue_hb_tmo(phba); 1449 1450 spin_unlock_irq(&phba->hbalock); 1451 1452 return 0; 1453 } 1454 1455 /** 1456 * lpfc_initial_flogi - Issue an initial fabric login for a vport 1457 * @vport: pointer to a host virtual N_Port data structure. 1458 * 1459 * This routine issues an initial Fabric Login (FLOGI) for the @vport 1460 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from 1461 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and 1462 * put it into the @vport's ndlp list. If an inactive ndlp found on the list, 1463 * it will just be enabled and made active. The lpfc_issue_els_flogi() routine 1464 * is then invoked with the @vport and the ndlp to perform the FLOGI for the 1465 * @vport. 1466 * 1467 * Return code 1468 * 0 - failed to issue initial flogi for @vport 1469 * 1 - successfully issued initial flogi for @vport 1470 **/ 1471 int 1472 lpfc_initial_flogi(struct lpfc_vport *vport) 1473 { 1474 struct lpfc_nodelist *ndlp; 1475 1476 vport->port_state = LPFC_FLOGI; 1477 lpfc_set_disctmo(vport); 1478 1479 /* First look for the Fabric ndlp */ 1480 ndlp = lpfc_findnode_did(vport, Fabric_DID); 1481 if (!ndlp) { 1482 /* Cannot find existing Fabric ndlp, so allocate a new one */ 1483 ndlp = lpfc_nlp_init(vport, Fabric_DID); 1484 if (!ndlp) 1485 return 0; 1486 /* Set the node type */ 1487 ndlp->nlp_type |= NLP_FABRIC; 1488 1489 /* Put ndlp onto node list */ 1490 lpfc_enqueue_node(vport, ndlp); 1491 } 1492 1493 if (lpfc_issue_els_flogi(vport, ndlp, 0)) { 1494 /* This decrement of reference count to node shall kick off 1495 * the release of the node. 1496 */ 1497 lpfc_nlp_put(ndlp); 1498 return 0; 1499 } 1500 return 1; 1501 } 1502 1503 /** 1504 * lpfc_initial_fdisc - Issue an initial fabric discovery for a vport 1505 * @vport: pointer to a host virtual N_Port data structure. 1506 * 1507 * This routine issues an initial Fabric Discover (FDISC) for the @vport 1508 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from 1509 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and 1510 * put it into the @vport's ndlp list. If an inactive ndlp found on the list, 1511 * it will just be enabled and made active. The lpfc_issue_els_fdisc() routine 1512 * is then invoked with the @vport and the ndlp to perform the FDISC for the 1513 * @vport. 1514 * 1515 * Return code 1516 * 0 - failed to issue initial fdisc for @vport 1517 * 1 - successfully issued initial fdisc for @vport 1518 **/ 1519 int 1520 lpfc_initial_fdisc(struct lpfc_vport *vport) 1521 { 1522 struct lpfc_nodelist *ndlp; 1523 1524 /* First look for the Fabric ndlp */ 1525 ndlp = lpfc_findnode_did(vport, Fabric_DID); 1526 if (!ndlp) { 1527 /* Cannot find existing Fabric ndlp, so allocate a new one */ 1528 ndlp = lpfc_nlp_init(vport, Fabric_DID); 1529 if (!ndlp) 1530 return 0; 1531 1532 /* NPIV is only supported in Fabrics. */ 1533 ndlp->nlp_type |= NLP_FABRIC; 1534 1535 /* Put ndlp onto node list */ 1536 lpfc_enqueue_node(vport, ndlp); 1537 } 1538 1539 if (lpfc_issue_els_fdisc(vport, ndlp, 0)) { 1540 /* decrement node reference count to trigger the release of 1541 * the node. 1542 */ 1543 lpfc_nlp_put(ndlp); 1544 return 0; 1545 } 1546 return 1; 1547 } 1548 1549 /** 1550 * lpfc_more_plogi - Check and issue remaining plogis for a vport 1551 * @vport: pointer to a host virtual N_Port data structure. 1552 * 1553 * This routine checks whether there are more remaining Port Logins 1554 * (PLOGI) to be issued for the @vport. If so, it will invoke the routine 1555 * lpfc_els_disc_plogi() to go through the Node Port Recovery (NPR) nodes 1556 * to issue ELS PLOGIs up to the configured discover threads with the 1557 * @vport (@vport->cfg_discovery_threads). The function also decrement 1558 * the @vport's num_disc_node by 1 if it is not already 0. 1559 **/ 1560 void 1561 lpfc_more_plogi(struct lpfc_vport *vport) 1562 { 1563 if (vport->num_disc_nodes) 1564 vport->num_disc_nodes--; 1565 1566 /* Continue discovery with <num_disc_nodes> PLOGIs to go */ 1567 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 1568 "0232 Continue discovery with %d PLOGIs to go " 1569 "Data: x%x x%x x%x\n", 1570 vport->num_disc_nodes, vport->fc_plogi_cnt, 1571 vport->fc_flag, vport->port_state); 1572 /* Check to see if there are more PLOGIs to be sent */ 1573 if (vport->fc_flag & FC_NLP_MORE) 1574 /* go thru NPR nodes and issue any remaining ELS PLOGIs */ 1575 lpfc_els_disc_plogi(vport); 1576 1577 return; 1578 } 1579 1580 /** 1581 * lpfc_plogi_confirm_nport - Confirm plogi wwpn matches stored ndlp 1582 * @phba: pointer to lpfc hba data structure. 1583 * @prsp: pointer to response IOCB payload. 1584 * @ndlp: pointer to a node-list data structure. 1585 * 1586 * This routine checks and indicates whether the WWPN of an N_Port, retrieved 1587 * from a PLOGI, matches the WWPN that is stored in the @ndlp for that N_POrt. 1588 * The following cases are considered N_Port confirmed: 1589 * 1) The N_Port is a Fabric ndlp; 2) The @ndlp is on vport list and matches 1590 * the WWPN of the N_Port logged into; 3) The @ndlp is not on vport list but 1591 * it does not have WWPN assigned either. If the WWPN is confirmed, the 1592 * pointer to the @ndlp will be returned. If the WWPN is not confirmed: 1593 * 1) if there is a node on vport list other than the @ndlp with the same 1594 * WWPN of the N_Port PLOGI logged into, the lpfc_unreg_rpi() will be invoked 1595 * on that node to release the RPI associated with the node; 2) if there is 1596 * no node found on vport list with the same WWPN of the N_Port PLOGI logged 1597 * into, a new node shall be allocated (or activated). In either case, the 1598 * parameters of the @ndlp shall be copied to the new_ndlp, the @ndlp shall 1599 * be released and the new_ndlp shall be put on to the vport node list and 1600 * its pointer returned as the confirmed node. 1601 * 1602 * Note that before the @ndlp got "released", the keepDID from not-matching 1603 * or inactive "new_ndlp" on the vport node list is assigned to the nlp_DID 1604 * of the @ndlp. This is because the release of @ndlp is actually to put it 1605 * into an inactive state on the vport node list and the vport node list 1606 * management algorithm does not allow two node with a same DID. 1607 * 1608 * Return code 1609 * pointer to the PLOGI N_Port @ndlp 1610 **/ 1611 static struct lpfc_nodelist * 1612 lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, 1613 struct lpfc_nodelist *ndlp) 1614 { 1615 struct lpfc_vport *vport = ndlp->vport; 1616 struct lpfc_nodelist *new_ndlp; 1617 struct serv_parm *sp; 1618 uint8_t name[sizeof(struct lpfc_name)]; 1619 uint32_t keepDID = 0, keep_nlp_flag = 0; 1620 uint32_t keep_new_nlp_flag = 0; 1621 uint16_t keep_nlp_state; 1622 u32 keep_nlp_fc4_type = 0; 1623 struct lpfc_nvme_rport *keep_nrport = NULL; 1624 unsigned long *active_rrqs_xri_bitmap = NULL; 1625 1626 /* Fabric nodes can have the same WWPN so we don't bother searching 1627 * by WWPN. Just return the ndlp that was given to us. 1628 */ 1629 if (ndlp->nlp_type & NLP_FABRIC) 1630 return ndlp; 1631 1632 sp = (struct serv_parm *) ((uint8_t *) prsp + sizeof(uint32_t)); 1633 memset(name, 0, sizeof(struct lpfc_name)); 1634 1635 /* Now we find out if the NPort we are logging into, matches the WWPN 1636 * we have for that ndlp. If not, we have some work to do. 1637 */ 1638 new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName); 1639 1640 /* return immediately if the WWPN matches ndlp */ 1641 if (!new_ndlp || (new_ndlp == ndlp)) 1642 return ndlp; 1643 1644 if (phba->sli_rev == LPFC_SLI_REV4) { 1645 active_rrqs_xri_bitmap = mempool_alloc(phba->active_rrq_pool, 1646 GFP_KERNEL); 1647 if (active_rrqs_xri_bitmap) 1648 memset(active_rrqs_xri_bitmap, 0, 1649 phba->cfg_rrq_xri_bitmap_sz); 1650 } 1651 1652 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE, 1653 "3178 PLOGI confirm: ndlp x%x x%x x%x: " 1654 "new_ndlp x%x x%x x%x\n", 1655 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_fc4_type, 1656 (new_ndlp ? new_ndlp->nlp_DID : 0), 1657 (new_ndlp ? new_ndlp->nlp_flag : 0), 1658 (new_ndlp ? new_ndlp->nlp_fc4_type : 0)); 1659 1660 keepDID = new_ndlp->nlp_DID; 1661 1662 if (phba->sli_rev == LPFC_SLI_REV4 && active_rrqs_xri_bitmap) 1663 memcpy(active_rrqs_xri_bitmap, new_ndlp->active_rrqs_xri_bitmap, 1664 phba->cfg_rrq_xri_bitmap_sz); 1665 1666 /* At this point in this routine, we know new_ndlp will be 1667 * returned. however, any previous GID_FTs that were done 1668 * would have updated nlp_fc4_type in ndlp, so we must ensure 1669 * new_ndlp has the right value. 1670 */ 1671 if (vport->fc_flag & FC_FABRIC) { 1672 keep_nlp_fc4_type = new_ndlp->nlp_fc4_type; 1673 new_ndlp->nlp_fc4_type = ndlp->nlp_fc4_type; 1674 } 1675 1676 lpfc_unreg_rpi(vport, new_ndlp); 1677 new_ndlp->nlp_DID = ndlp->nlp_DID; 1678 new_ndlp->nlp_prev_state = ndlp->nlp_prev_state; 1679 if (phba->sli_rev == LPFC_SLI_REV4) 1680 memcpy(new_ndlp->active_rrqs_xri_bitmap, 1681 ndlp->active_rrqs_xri_bitmap, 1682 phba->cfg_rrq_xri_bitmap_sz); 1683 1684 /* Lock both ndlps */ 1685 spin_lock_irq(&ndlp->lock); 1686 spin_lock_irq(&new_ndlp->lock); 1687 keep_new_nlp_flag = new_ndlp->nlp_flag; 1688 keep_nlp_flag = ndlp->nlp_flag; 1689 new_ndlp->nlp_flag = ndlp->nlp_flag; 1690 1691 /* if new_ndlp had NLP_UNREG_INP set, keep it */ 1692 if (keep_new_nlp_flag & NLP_UNREG_INP) 1693 new_ndlp->nlp_flag |= NLP_UNREG_INP; 1694 else 1695 new_ndlp->nlp_flag &= ~NLP_UNREG_INP; 1696 1697 /* if new_ndlp had NLP_RPI_REGISTERED set, keep it */ 1698 if (keep_new_nlp_flag & NLP_RPI_REGISTERED) 1699 new_ndlp->nlp_flag |= NLP_RPI_REGISTERED; 1700 else 1701 new_ndlp->nlp_flag &= ~NLP_RPI_REGISTERED; 1702 1703 /* 1704 * Retain the DROPPED flag. This will take care of the init 1705 * refcount when affecting the state change 1706 */ 1707 if (keep_new_nlp_flag & NLP_DROPPED) 1708 new_ndlp->nlp_flag |= NLP_DROPPED; 1709 else 1710 new_ndlp->nlp_flag &= ~NLP_DROPPED; 1711 1712 ndlp->nlp_flag = keep_new_nlp_flag; 1713 1714 /* if ndlp had NLP_UNREG_INP set, keep it */ 1715 if (keep_nlp_flag & NLP_UNREG_INP) 1716 ndlp->nlp_flag |= NLP_UNREG_INP; 1717 else 1718 ndlp->nlp_flag &= ~NLP_UNREG_INP; 1719 1720 /* if ndlp had NLP_RPI_REGISTERED set, keep it */ 1721 if (keep_nlp_flag & NLP_RPI_REGISTERED) 1722 ndlp->nlp_flag |= NLP_RPI_REGISTERED; 1723 else 1724 ndlp->nlp_flag &= ~NLP_RPI_REGISTERED; 1725 1726 /* 1727 * Retain the DROPPED flag. This will take care of the init 1728 * refcount when affecting the state change 1729 */ 1730 if (keep_nlp_flag & NLP_DROPPED) 1731 ndlp->nlp_flag |= NLP_DROPPED; 1732 else 1733 ndlp->nlp_flag &= ~NLP_DROPPED; 1734 1735 spin_unlock_irq(&new_ndlp->lock); 1736 spin_unlock_irq(&ndlp->lock); 1737 1738 /* Set nlp_states accordingly */ 1739 keep_nlp_state = new_ndlp->nlp_state; 1740 lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state); 1741 1742 /* interchange the nvme remoteport structs */ 1743 keep_nrport = new_ndlp->nrport; 1744 new_ndlp->nrport = ndlp->nrport; 1745 1746 /* Move this back to NPR state */ 1747 if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) { 1748 /* The new_ndlp is replacing ndlp totally, so we need 1749 * to put ndlp on UNUSED list and try to free it. 1750 */ 1751 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1752 "3179 PLOGI confirm NEW: %x %x\n", 1753 new_ndlp->nlp_DID, keepDID); 1754 1755 /* Two ndlps cannot have the same did on the nodelist. 1756 * Note: for this case, ndlp has a NULL WWPN so setting 1757 * the nlp_fc4_type isn't required. 1758 */ 1759 ndlp->nlp_DID = keepDID; 1760 lpfc_nlp_set_state(vport, ndlp, keep_nlp_state); 1761 if (phba->sli_rev == LPFC_SLI_REV4 && 1762 active_rrqs_xri_bitmap) 1763 memcpy(ndlp->active_rrqs_xri_bitmap, 1764 active_rrqs_xri_bitmap, 1765 phba->cfg_rrq_xri_bitmap_sz); 1766 1767 } else { 1768 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1769 "3180 PLOGI confirm SWAP: %x %x\n", 1770 new_ndlp->nlp_DID, keepDID); 1771 1772 lpfc_unreg_rpi(vport, ndlp); 1773 1774 /* Two ndlps cannot have the same did and the fc4 1775 * type must be transferred because the ndlp is in 1776 * flight. 1777 */ 1778 ndlp->nlp_DID = keepDID; 1779 ndlp->nlp_fc4_type = keep_nlp_fc4_type; 1780 1781 if (phba->sli_rev == LPFC_SLI_REV4 && 1782 active_rrqs_xri_bitmap) 1783 memcpy(ndlp->active_rrqs_xri_bitmap, 1784 active_rrqs_xri_bitmap, 1785 phba->cfg_rrq_xri_bitmap_sz); 1786 1787 /* Since we are switching over to the new_ndlp, 1788 * reset the old ndlp state 1789 */ 1790 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) || 1791 (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) 1792 keep_nlp_state = NLP_STE_NPR_NODE; 1793 lpfc_nlp_set_state(vport, ndlp, keep_nlp_state); 1794 ndlp->nrport = keep_nrport; 1795 } 1796 1797 /* 1798 * If ndlp is not associated with any rport we can drop it here else 1799 * let dev_loss_tmo_callbk trigger DEVICE_RM event 1800 */ 1801 if (!ndlp->rport && (ndlp->nlp_state == NLP_STE_NPR_NODE)) 1802 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); 1803 1804 if (phba->sli_rev == LPFC_SLI_REV4 && 1805 active_rrqs_xri_bitmap) 1806 mempool_free(active_rrqs_xri_bitmap, 1807 phba->active_rrq_pool); 1808 1809 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE, 1810 "3173 PLOGI confirm exit: new_ndlp x%x x%x x%x\n", 1811 new_ndlp->nlp_DID, new_ndlp->nlp_flag, 1812 new_ndlp->nlp_fc4_type); 1813 1814 return new_ndlp; 1815 } 1816 1817 /** 1818 * lpfc_end_rscn - Check and handle more rscn for a vport 1819 * @vport: pointer to a host virtual N_Port data structure. 1820 * 1821 * This routine checks whether more Registration State Change 1822 * Notifications (RSCNs) came in while the discovery state machine was in 1823 * the FC_RSCN_MODE. If so, the lpfc_els_handle_rscn() routine will be 1824 * invoked to handle the additional RSCNs for the @vport. Otherwise, the 1825 * FC_RSCN_MODE bit will be cleared with the @vport to mark as the end of 1826 * handling the RSCNs. 1827 **/ 1828 void 1829 lpfc_end_rscn(struct lpfc_vport *vport) 1830 { 1831 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1832 1833 if (vport->fc_flag & FC_RSCN_MODE) { 1834 /* 1835 * Check to see if more RSCNs came in while we were 1836 * processing this one. 1837 */ 1838 if (vport->fc_rscn_id_cnt || 1839 (vport->fc_flag & FC_RSCN_DISCOVERY) != 0) 1840 lpfc_els_handle_rscn(vport); 1841 else { 1842 spin_lock_irq(shost->host_lock); 1843 vport->fc_flag &= ~FC_RSCN_MODE; 1844 spin_unlock_irq(shost->host_lock); 1845 } 1846 } 1847 } 1848 1849 /** 1850 * lpfc_cmpl_els_rrq - Completion handled for els RRQs. 1851 * @phba: pointer to lpfc hba data structure. 1852 * @cmdiocb: pointer to lpfc command iocb data structure. 1853 * @rspiocb: pointer to lpfc response iocb data structure. 1854 * 1855 * This routine will call the clear rrq function to free the rrq and 1856 * clear the xri's bit in the ndlp's xri_bitmap. If the ndlp does not 1857 * exist then the clear_rrq is still called because the rrq needs to 1858 * be freed. 1859 **/ 1860 1861 static void 1862 lpfc_cmpl_els_rrq(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1863 struct lpfc_iocbq *rspiocb) 1864 { 1865 struct lpfc_vport *vport = cmdiocb->vport; 1866 IOCB_t *irsp; 1867 struct lpfc_nodelist *ndlp = cmdiocb->context1; 1868 struct lpfc_node_rrq *rrq; 1869 1870 /* we pass cmdiocb to state machine which needs rspiocb as well */ 1871 rrq = cmdiocb->context_un.rrq; 1872 cmdiocb->context_un.rsp_iocb = rspiocb; 1873 1874 irsp = &rspiocb->iocb; 1875 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1876 "RRQ cmpl: status:x%x/x%x did:x%x", 1877 irsp->ulpStatus, irsp->un.ulpWord[4], 1878 irsp->un.elsreq64.remoteID); 1879 1880 /* rrq completes to NPort <nlp_DID> */ 1881 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1882 "2880 RRQ completes to DID x%x " 1883 "Data: x%x x%x x%x x%x x%x\n", 1884 irsp->un.elsreq64.remoteID, 1885 irsp->ulpStatus, irsp->un.ulpWord[4], 1886 irsp->ulpTimeout, rrq->xritag, rrq->rxid); 1887 1888 if (irsp->ulpStatus) { 1889 /* Check for retry */ 1890 /* RRQ failed Don't print the vport to vport rjts */ 1891 if (irsp->ulpStatus != IOSTAT_LS_RJT || 1892 (((irsp->un.ulpWord[4]) >> 16 != LSRJT_INVALID_CMD) && 1893 ((irsp->un.ulpWord[4]) >> 16 != LSRJT_UNABLE_TPC)) || 1894 (phba)->pport->cfg_log_verbose & LOG_ELS) 1895 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1896 "2881 RRQ failure DID:%06X Status:" 1897 "x%x/x%x\n", 1898 ndlp->nlp_DID, irsp->ulpStatus, 1899 irsp->un.ulpWord[4]); 1900 } 1901 1902 lpfc_clr_rrq_active(phba, rrq->xritag, rrq); 1903 lpfc_els_free_iocb(phba, cmdiocb); 1904 lpfc_nlp_put(ndlp); 1905 return; 1906 } 1907 /** 1908 * lpfc_cmpl_els_plogi - Completion callback function for plogi 1909 * @phba: pointer to lpfc hba data structure. 1910 * @cmdiocb: pointer to lpfc command iocb data structure. 1911 * @rspiocb: pointer to lpfc response iocb data structure. 1912 * 1913 * This routine is the completion callback function for issuing the Port 1914 * Login (PLOGI) command. For PLOGI completion, there must be an active 1915 * ndlp on the vport node list that matches the remote node ID from the 1916 * PLOGI response IOCB. If such ndlp does not exist, the PLOGI is simply 1917 * ignored and command IOCB released. The PLOGI response IOCB status is 1918 * checked for error conditions. If there is error status reported, PLOGI 1919 * retry shall be attempted by invoking the lpfc_els_retry() routine. 1920 * Otherwise, the lpfc_plogi_confirm_nport() routine shall be invoked on 1921 * the ndlp and the NLP_EVT_CMPL_PLOGI state to the Discover State Machine 1922 * (DSM) is set for this PLOGI completion. Finally, it checks whether 1923 * there are additional N_Port nodes with the vport that need to perform 1924 * PLOGI. If so, the lpfc_more_plogi() routine is invoked to issue addition 1925 * PLOGIs. 1926 **/ 1927 static void 1928 lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1929 struct lpfc_iocbq *rspiocb) 1930 { 1931 struct lpfc_vport *vport = cmdiocb->vport; 1932 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1933 IOCB_t *irsp; 1934 struct lpfc_nodelist *ndlp, *free_ndlp; 1935 struct lpfc_dmabuf *prsp; 1936 int disc; 1937 1938 /* we pass cmdiocb to state machine which needs rspiocb as well */ 1939 cmdiocb->context_un.rsp_iocb = rspiocb; 1940 1941 irsp = &rspiocb->iocb; 1942 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1943 "PLOGI cmpl: status:x%x/x%x did:x%x", 1944 irsp->ulpStatus, irsp->un.ulpWord[4], 1945 irsp->un.elsreq64.remoteID); 1946 1947 ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID); 1948 if (!ndlp) { 1949 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1950 "0136 PLOGI completes to NPort x%x " 1951 "with no ndlp. Data: x%x x%x x%x\n", 1952 irsp->un.elsreq64.remoteID, 1953 irsp->ulpStatus, irsp->un.ulpWord[4], 1954 irsp->ulpIoTag); 1955 goto out_freeiocb; 1956 } 1957 1958 /* Since ndlp can be freed in the disc state machine, note if this node 1959 * is being used during discovery. 1960 */ 1961 spin_lock_irq(&ndlp->lock); 1962 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC); 1963 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 1964 spin_unlock_irq(&ndlp->lock); 1965 1966 /* PLOGI completes to NPort <nlp_DID> */ 1967 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1968 "0102 PLOGI completes to NPort x%06x " 1969 "Data: x%x x%x x%x x%x x%x\n", 1970 ndlp->nlp_DID, ndlp->nlp_fc4_type, 1971 irsp->ulpStatus, irsp->un.ulpWord[4], 1972 disc, vport->num_disc_nodes); 1973 1974 /* Check to see if link went down during discovery */ 1975 if (lpfc_els_chk_latt(vport)) { 1976 spin_lock_irq(&ndlp->lock); 1977 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 1978 spin_unlock_irq(&ndlp->lock); 1979 goto out; 1980 } 1981 1982 if (irsp->ulpStatus) { 1983 /* Check for retry */ 1984 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 1985 /* ELS command is being retried */ 1986 if (disc) { 1987 spin_lock_irq(&ndlp->lock); 1988 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 1989 spin_unlock_irq(&ndlp->lock); 1990 } 1991 goto out; 1992 } 1993 /* PLOGI failed Don't print the vport to vport rjts */ 1994 if (irsp->ulpStatus != IOSTAT_LS_RJT || 1995 (((irsp->un.ulpWord[4]) >> 16 != LSRJT_INVALID_CMD) && 1996 ((irsp->un.ulpWord[4]) >> 16 != LSRJT_UNABLE_TPC)) || 1997 (phba)->pport->cfg_log_verbose & LOG_ELS) 1998 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1999 "2753 PLOGI failure DID:%06X Status:x%x/x%x\n", 2000 ndlp->nlp_DID, irsp->ulpStatus, 2001 irsp->un.ulpWord[4]); 2002 2003 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 2004 if (lpfc_error_lost_link(irsp)) 2005 goto check_plogi; 2006 else 2007 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2008 NLP_EVT_CMPL_PLOGI); 2009 2010 /* If a PLOGI collision occurred, the node needs to continue 2011 * with the reglogin process. 2012 */ 2013 spin_lock_irq(&ndlp->lock); 2014 if ((ndlp->nlp_flag & (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI)) && 2015 ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE) { 2016 spin_unlock_irq(&ndlp->lock); 2017 goto out; 2018 } 2019 spin_unlock_irq(&ndlp->lock); 2020 2021 /* No PLOGI collision and the node is not registered with the 2022 * scsi or nvme transport. It is no longer an active node. Just 2023 * start the device remove process. 2024 */ 2025 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { 2026 spin_lock_irq(&ndlp->lock); 2027 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 2028 spin_unlock_irq(&ndlp->lock); 2029 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2030 NLP_EVT_DEVICE_RM); 2031 } 2032 } else { 2033 /* Good status, call state machine */ 2034 prsp = list_entry(((struct lpfc_dmabuf *) 2035 cmdiocb->context2)->list.next, 2036 struct lpfc_dmabuf, list); 2037 ndlp = lpfc_plogi_confirm_nport(phba, prsp->virt, ndlp); 2038 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2039 NLP_EVT_CMPL_PLOGI); 2040 } 2041 2042 check_plogi: 2043 if (disc && vport->num_disc_nodes) { 2044 /* Check to see if there are more PLOGIs to be sent */ 2045 lpfc_more_plogi(vport); 2046 2047 if (vport->num_disc_nodes == 0) { 2048 spin_lock_irq(shost->host_lock); 2049 vport->fc_flag &= ~FC_NDISC_ACTIVE; 2050 spin_unlock_irq(shost->host_lock); 2051 2052 lpfc_can_disctmo(vport); 2053 lpfc_end_rscn(vport); 2054 } 2055 } 2056 2057 out: 2058 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, 2059 "PLOGI Cmpl PUT: did:x%x refcnt %d", 2060 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 2061 2062 out_freeiocb: 2063 /* Release the reference on the original I/O request. */ 2064 free_ndlp = (struct lpfc_nodelist *)cmdiocb->context1; 2065 2066 lpfc_els_free_iocb(phba, cmdiocb); 2067 lpfc_nlp_put(free_ndlp); 2068 return; 2069 } 2070 2071 /** 2072 * lpfc_issue_els_plogi - Issue an plogi iocb command for a vport 2073 * @vport: pointer to a host virtual N_Port data structure. 2074 * @did: destination port identifier. 2075 * @retry: number of retries to the command IOCB. 2076 * 2077 * This routine issues a Port Login (PLOGI) command to a remote N_Port 2078 * (with the @did) for a @vport. Before issuing a PLOGI to a remote N_Port, 2079 * the ndlp with the remote N_Port DID must exist on the @vport's ndlp list. 2080 * This routine constructs the proper fields of the PLOGI IOCB and invokes 2081 * the lpfc_sli_issue_iocb() routine to send out PLOGI ELS command. 2082 * 2083 * Note that the ndlp reference count will be incremented by 1 for holding 2084 * the ndlp and the reference to ndlp will be stored into the context1 field 2085 * of the IOCB for the completion callback function to the PLOGI ELS command. 2086 * 2087 * Return code 2088 * 0 - Successfully issued a plogi for @vport 2089 * 1 - failed to issue a plogi for @vport 2090 **/ 2091 int 2092 lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry) 2093 { 2094 struct lpfc_hba *phba = vport->phba; 2095 struct serv_parm *sp; 2096 struct lpfc_nodelist *ndlp; 2097 struct lpfc_iocbq *elsiocb; 2098 uint8_t *pcmd; 2099 uint16_t cmdsize; 2100 int ret; 2101 2102 ndlp = lpfc_findnode_did(vport, did); 2103 if (!ndlp) 2104 return 1; 2105 2106 /* Defer the processing of the issue PLOGI until after the 2107 * outstanding UNREG_RPI mbox command completes, unless we 2108 * are going offline. This logic does not apply for Fabric DIDs 2109 */ 2110 if ((ndlp->nlp_flag & NLP_UNREG_INP) && 2111 ((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) && 2112 !(vport->fc_flag & FC_OFFLINE_MODE)) { 2113 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2114 "4110 Issue PLOGI x%x deferred " 2115 "on NPort x%x rpi x%x Data: x%px\n", 2116 ndlp->nlp_defer_did, ndlp->nlp_DID, 2117 ndlp->nlp_rpi, ndlp); 2118 2119 /* We can only defer 1st PLOGI */ 2120 if (ndlp->nlp_defer_did == NLP_EVT_NOTHING_PENDING) 2121 ndlp->nlp_defer_did = did; 2122 return 0; 2123 } 2124 2125 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 2126 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did, 2127 ELS_CMD_PLOGI); 2128 if (!elsiocb) 2129 return 1; 2130 2131 spin_lock_irq(&ndlp->lock); 2132 ndlp->nlp_flag &= ~NLP_FCP_PRLI_RJT; 2133 spin_unlock_irq(&ndlp->lock); 2134 2135 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2136 2137 /* For PLOGI request, remainder of payload is service parameters */ 2138 *((uint32_t *) (pcmd)) = ELS_CMD_PLOGI; 2139 pcmd += sizeof(uint32_t); 2140 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm)); 2141 sp = (struct serv_parm *) pcmd; 2142 2143 /* 2144 * If we are a N-port connected to a Fabric, fix-up paramm's so logins 2145 * to device on remote loops work. 2146 */ 2147 if ((vport->fc_flag & FC_FABRIC) && !(vport->fc_flag & FC_PUBLIC_LOOP)) 2148 sp->cmn.altBbCredit = 1; 2149 2150 if (sp->cmn.fcphLow < FC_PH_4_3) 2151 sp->cmn.fcphLow = FC_PH_4_3; 2152 2153 if (sp->cmn.fcphHigh < FC_PH3) 2154 sp->cmn.fcphHigh = FC_PH3; 2155 2156 sp->cmn.valid_vendor_ver_level = 0; 2157 memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion)); 2158 sp->cmn.bbRcvSizeMsb &= 0xF; 2159 2160 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2161 "Issue PLOGI: did:x%x", 2162 did, 0, 0); 2163 2164 /* If our firmware supports this feature, convey that 2165 * information to the target using the vendor specific field. 2166 */ 2167 if (phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) { 2168 sp->cmn.valid_vendor_ver_level = 1; 2169 sp->un.vv.vid = cpu_to_be32(LPFC_VV_EMLX_ID); 2170 sp->un.vv.flags = cpu_to_be32(LPFC_VV_SUPPRESS_RSP); 2171 } 2172 2173 phba->fc_stat.elsXmitPLOGI++; 2174 elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi; 2175 2176 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2177 "Issue PLOGI: did:x%x refcnt %d", 2178 did, kref_read(&ndlp->kref), 0); 2179 elsiocb->context1 = lpfc_nlp_get(ndlp); 2180 if (!elsiocb->context1) { 2181 lpfc_els_free_iocb(phba, elsiocb); 2182 return 1; 2183 } 2184 2185 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 2186 if (ret) { 2187 lpfc_els_free_iocb(phba, elsiocb); 2188 lpfc_nlp_put(ndlp); 2189 return 1; 2190 } 2191 2192 return 0; 2193 } 2194 2195 /** 2196 * lpfc_cmpl_els_prli - Completion callback function for prli 2197 * @phba: pointer to lpfc hba data structure. 2198 * @cmdiocb: pointer to lpfc command iocb data structure. 2199 * @rspiocb: pointer to lpfc response iocb data structure. 2200 * 2201 * This routine is the completion callback function for a Process Login 2202 * (PRLI) ELS command. The PRLI response IOCB status is checked for error 2203 * status. If there is error status reported, PRLI retry shall be attempted 2204 * by invoking the lpfc_els_retry() routine. Otherwise, the state 2205 * NLP_EVT_CMPL_PRLI is sent to the Discover State Machine (DSM) for this 2206 * ndlp to mark the PRLI completion. 2207 **/ 2208 static void 2209 lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2210 struct lpfc_iocbq *rspiocb) 2211 { 2212 struct lpfc_vport *vport = cmdiocb->vport; 2213 IOCB_t *irsp; 2214 struct lpfc_nodelist *ndlp; 2215 char *mode; 2216 u32 loglevel; 2217 2218 /* we pass cmdiocb to state machine which needs rspiocb as well */ 2219 cmdiocb->context_un.rsp_iocb = rspiocb; 2220 2221 irsp = &(rspiocb->iocb); 2222 ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 2223 spin_lock_irq(&ndlp->lock); 2224 ndlp->nlp_flag &= ~NLP_PRLI_SND; 2225 2226 /* Driver supports multiple FC4 types. Counters matter. */ 2227 vport->fc_prli_sent--; 2228 ndlp->fc4_prli_sent--; 2229 spin_unlock_irq(&ndlp->lock); 2230 2231 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2232 "PRLI cmpl: status:x%x/x%x did:x%x", 2233 irsp->ulpStatus, irsp->un.ulpWord[4], 2234 ndlp->nlp_DID); 2235 2236 /* PRLI completes to NPort <nlp_DID> */ 2237 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2238 "0103 PRLI completes to NPort x%06x " 2239 "Data: x%x x%x x%x x%x\n", 2240 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4], 2241 vport->num_disc_nodes, ndlp->fc4_prli_sent); 2242 2243 /* Check to see if link went down during discovery */ 2244 if (lpfc_els_chk_latt(vport)) 2245 goto out; 2246 2247 if (irsp->ulpStatus) { 2248 /* Check for retry */ 2249 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 2250 /* ELS command is being retried */ 2251 goto out; 2252 } 2253 2254 /* If we don't send GFT_ID to Fabric, a PRLI error 2255 * could be expected. 2256 */ 2257 if ((vport->fc_flag & FC_FABRIC) || 2258 (vport->cfg_enable_fc4_type != LPFC_ENABLE_BOTH)) { 2259 mode = KERN_ERR; 2260 loglevel = LOG_TRACE_EVENT; 2261 } else { 2262 mode = KERN_INFO; 2263 loglevel = LOG_ELS; 2264 } 2265 2266 /* PRLI failed */ 2267 lpfc_printf_vlog(vport, mode, loglevel, 2268 "2754 PRLI failure DID:%06X Status:x%x/x%x, " 2269 "data: x%x\n", 2270 ndlp->nlp_DID, irsp->ulpStatus, 2271 irsp->un.ulpWord[4], ndlp->fc4_prli_sent); 2272 2273 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 2274 if (!lpfc_error_lost_link(irsp)) 2275 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2276 NLP_EVT_CMPL_PRLI); 2277 2278 /* As long as this node is not registered with the SCSI 2279 * or NVMe transport and no other PRLIs are outstanding, 2280 * it is no longer an active node. Otherwise devloss 2281 * handles the final cleanup. 2282 */ 2283 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) && 2284 !ndlp->fc4_prli_sent) { 2285 spin_lock_irq(&ndlp->lock); 2286 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 2287 spin_unlock_irq(&ndlp->lock); 2288 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2289 NLP_EVT_DEVICE_RM); 2290 } 2291 } else { 2292 /* Good status, call state machine. However, if another 2293 * PRLI is outstanding, don't call the state machine 2294 * because final disposition to Mapped or Unmapped is 2295 * completed there. 2296 */ 2297 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2298 NLP_EVT_CMPL_PRLI); 2299 } 2300 2301 out: 2302 lpfc_els_free_iocb(phba, cmdiocb); 2303 lpfc_nlp_put(ndlp); 2304 return; 2305 } 2306 2307 /** 2308 * lpfc_issue_els_prli - Issue a prli iocb command for a vport 2309 * @vport: pointer to a host virtual N_Port data structure. 2310 * @ndlp: pointer to a node-list data structure. 2311 * @retry: number of retries to the command IOCB. 2312 * 2313 * This routine issues a Process Login (PRLI) ELS command for the 2314 * @vport. The PRLI service parameters are set up in the payload of the 2315 * PRLI Request command and the pointer to lpfc_cmpl_els_prli() routine 2316 * is put to the IOCB completion callback func field before invoking the 2317 * routine lpfc_sli_issue_iocb() to send out PRLI command. 2318 * 2319 * Note that the ndlp reference count will be incremented by 1 for holding the 2320 * ndlp and the reference to ndlp will be stored into the context1 field of 2321 * the IOCB for the completion callback function to the PRLI ELS command. 2322 * 2323 * Return code 2324 * 0 - successfully issued prli iocb command for @vport 2325 * 1 - failed to issue prli iocb command for @vport 2326 **/ 2327 int 2328 lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2329 uint8_t retry) 2330 { 2331 int rc = 0; 2332 struct lpfc_hba *phba = vport->phba; 2333 PRLI *npr; 2334 struct lpfc_nvme_prli *npr_nvme; 2335 struct lpfc_iocbq *elsiocb; 2336 uint8_t *pcmd; 2337 uint16_t cmdsize; 2338 u32 local_nlp_type, elscmd; 2339 2340 /* 2341 * If we are in RSCN mode, the FC4 types supported from a 2342 * previous GFT_ID command may not be accurate. So, if we 2343 * are a NVME Initiator, always look for the possibility of 2344 * the remote NPort beng a NVME Target. 2345 */ 2346 if (phba->sli_rev == LPFC_SLI_REV4 && 2347 vport->fc_flag & FC_RSCN_MODE && 2348 vport->nvmei_support) 2349 ndlp->nlp_fc4_type |= NLP_FC4_NVME; 2350 local_nlp_type = ndlp->nlp_fc4_type; 2351 2352 /* This routine will issue 1 or 2 PRLIs, so zero all the ndlp 2353 * fields here before any of them can complete. 2354 */ 2355 ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); 2356 ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR); 2357 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; 2358 ndlp->nlp_flag &= ~(NLP_FIRSTBURST | NLP_NPR_2B_DISC); 2359 ndlp->nvme_fb_size = 0; 2360 2361 send_next_prli: 2362 if (local_nlp_type & NLP_FC4_FCP) { 2363 /* Payload is 4 + 16 = 20 x14 bytes. */ 2364 cmdsize = (sizeof(uint32_t) + sizeof(PRLI)); 2365 elscmd = ELS_CMD_PRLI; 2366 } else if (local_nlp_type & NLP_FC4_NVME) { 2367 /* Payload is 4 + 20 = 24 x18 bytes. */ 2368 cmdsize = (sizeof(uint32_t) + sizeof(struct lpfc_nvme_prli)); 2369 elscmd = ELS_CMD_NVMEPRLI; 2370 } else { 2371 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2372 "3083 Unknown FC_TYPE x%x ndlp x%06x\n", 2373 ndlp->nlp_fc4_type, ndlp->nlp_DID); 2374 return 1; 2375 } 2376 2377 /* SLI3 ports don't support NVME. If this rport is a strict NVME 2378 * FC4 type, implicitly LOGO. 2379 */ 2380 if (phba->sli_rev == LPFC_SLI_REV3 && 2381 ndlp->nlp_fc4_type == NLP_FC4_NVME) { 2382 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2383 "3088 Rport fc4 type 0x%x not supported by SLI3 adapter\n", 2384 ndlp->nlp_type); 2385 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); 2386 return 1; 2387 } 2388 2389 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 2390 ndlp->nlp_DID, elscmd); 2391 if (!elsiocb) 2392 return 1; 2393 2394 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2395 2396 /* For PRLI request, remainder of payload is service parameters */ 2397 memset(pcmd, 0, cmdsize); 2398 2399 if (local_nlp_type & NLP_FC4_FCP) { 2400 /* Remainder of payload is FCP PRLI parameter page. 2401 * Note: this data structure is defined as 2402 * BE/LE in the structure definition so no 2403 * byte swap call is made. 2404 */ 2405 *((uint32_t *)(pcmd)) = ELS_CMD_PRLI; 2406 pcmd += sizeof(uint32_t); 2407 npr = (PRLI *)pcmd; 2408 2409 /* 2410 * If our firmware version is 3.20 or later, 2411 * set the following bits for FC-TAPE support. 2412 */ 2413 if (phba->vpd.rev.feaLevelHigh >= 0x02) { 2414 npr->ConfmComplAllowed = 1; 2415 npr->Retry = 1; 2416 npr->TaskRetryIdReq = 1; 2417 } 2418 npr->estabImagePair = 1; 2419 npr->readXferRdyDis = 1; 2420 if (vport->cfg_first_burst_size) 2421 npr->writeXferRdyDis = 1; 2422 2423 /* For FCP support */ 2424 npr->prliType = PRLI_FCP_TYPE; 2425 npr->initiatorFunc = 1; 2426 elsiocb->iocb_flag |= LPFC_PRLI_FCP_REQ; 2427 2428 /* Remove FCP type - processed. */ 2429 local_nlp_type &= ~NLP_FC4_FCP; 2430 } else if (local_nlp_type & NLP_FC4_NVME) { 2431 /* Remainder of payload is NVME PRLI parameter page. 2432 * This data structure is the newer definition that 2433 * uses bf macros so a byte swap is required. 2434 */ 2435 *((uint32_t *)(pcmd)) = ELS_CMD_NVMEPRLI; 2436 pcmd += sizeof(uint32_t); 2437 npr_nvme = (struct lpfc_nvme_prli *)pcmd; 2438 bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE); 2439 bf_set(prli_estabImagePair, npr_nvme, 0); /* Should be 0 */ 2440 if (phba->nsler) { 2441 bf_set(prli_nsler, npr_nvme, 1); 2442 bf_set(prli_conf, npr_nvme, 1); 2443 } 2444 2445 /* Only initiators request first burst. */ 2446 if ((phba->cfg_nvme_enable_fb) && 2447 !phba->nvmet_support) 2448 bf_set(prli_fba, npr_nvme, 1); 2449 2450 if (phba->nvmet_support) { 2451 bf_set(prli_tgt, npr_nvme, 1); 2452 bf_set(prli_disc, npr_nvme, 1); 2453 } else { 2454 bf_set(prli_init, npr_nvme, 1); 2455 bf_set(prli_conf, npr_nvme, 1); 2456 } 2457 2458 npr_nvme->word1 = cpu_to_be32(npr_nvme->word1); 2459 npr_nvme->word4 = cpu_to_be32(npr_nvme->word4); 2460 elsiocb->iocb_flag |= LPFC_PRLI_NVME_REQ; 2461 2462 /* Remove NVME type - processed. */ 2463 local_nlp_type &= ~NLP_FC4_NVME; 2464 } 2465 2466 phba->fc_stat.elsXmitPRLI++; 2467 elsiocb->iocb_cmpl = lpfc_cmpl_els_prli; 2468 spin_lock_irq(&ndlp->lock); 2469 ndlp->nlp_flag |= NLP_PRLI_SND; 2470 2471 /* The vport counters are used for lpfc_scan_finished, but 2472 * the ndlp is used to track outstanding PRLIs for different 2473 * FC4 types. 2474 */ 2475 vport->fc_prli_sent++; 2476 ndlp->fc4_prli_sent++; 2477 spin_unlock_irq(&ndlp->lock); 2478 2479 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2480 "Issue PRLI: did:x%x refcnt %d", 2481 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 2482 elsiocb->context1 = lpfc_nlp_get(ndlp); 2483 if (!elsiocb->context1) { 2484 lpfc_els_free_iocb(phba, elsiocb); 2485 goto err; 2486 } 2487 2488 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 2489 if (rc == IOCB_ERROR) { 2490 lpfc_els_free_iocb(phba, elsiocb); 2491 lpfc_nlp_put(ndlp); 2492 goto err; 2493 } 2494 2495 2496 /* The driver supports 2 FC4 types. Make sure 2497 * a PRLI is issued for all types before exiting. 2498 */ 2499 if (phba->sli_rev == LPFC_SLI_REV4 && 2500 local_nlp_type & (NLP_FC4_FCP | NLP_FC4_NVME)) 2501 goto send_next_prli; 2502 else 2503 return 0; 2504 2505 err: 2506 spin_lock_irq(&ndlp->lock); 2507 ndlp->nlp_flag &= ~NLP_PRLI_SND; 2508 spin_unlock_irq(&ndlp->lock); 2509 return 1; 2510 } 2511 2512 /** 2513 * lpfc_rscn_disc - Perform rscn discovery for a vport 2514 * @vport: pointer to a host virtual N_Port data structure. 2515 * 2516 * This routine performs Registration State Change Notification (RSCN) 2517 * discovery for a @vport. If the @vport's node port recovery count is not 2518 * zero, it will invoke the lpfc_els_disc_plogi() to perform PLOGI for all 2519 * the nodes that need recovery. If none of the PLOGI were needed through 2520 * the lpfc_els_disc_plogi() routine, the lpfc_end_rscn() routine shall be 2521 * invoked to check and handle possible more RSCN came in during the period 2522 * of processing the current ones. 2523 **/ 2524 static void 2525 lpfc_rscn_disc(struct lpfc_vport *vport) 2526 { 2527 lpfc_can_disctmo(vport); 2528 2529 /* RSCN discovery */ 2530 /* go thru NPR nodes and issue ELS PLOGIs */ 2531 if (vport->fc_npr_cnt) 2532 if (lpfc_els_disc_plogi(vport)) 2533 return; 2534 2535 lpfc_end_rscn(vport); 2536 } 2537 2538 /** 2539 * lpfc_adisc_done - Complete the adisc phase of discovery 2540 * @vport: pointer to lpfc_vport hba data structure that finished all ADISCs. 2541 * 2542 * This function is called when the final ADISC is completed during discovery. 2543 * This function handles clearing link attention or issuing reg_vpi depending 2544 * on whether npiv is enabled. This function also kicks off the PLOGI phase of 2545 * discovery. 2546 * This function is called with no locks held. 2547 **/ 2548 static void 2549 lpfc_adisc_done(struct lpfc_vport *vport) 2550 { 2551 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2552 struct lpfc_hba *phba = vport->phba; 2553 2554 /* 2555 * For NPIV, cmpl_reg_vpi will set port_state to READY, 2556 * and continue discovery. 2557 */ 2558 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 2559 !(vport->fc_flag & FC_RSCN_MODE) && 2560 (phba->sli_rev < LPFC_SLI_REV4)) { 2561 /* The ADISCs are complete. Doesn't matter if they 2562 * succeeded or failed because the ADISC completion 2563 * routine guarantees to call the state machine and 2564 * the RPI is either unregistered (failed ADISC response) 2565 * or the RPI is still valid and the node is marked 2566 * mapped for a target. The exchanges should be in the 2567 * correct state. This code is specific to SLI3. 2568 */ 2569 lpfc_issue_clear_la(phba, vport); 2570 lpfc_issue_reg_vpi(phba, vport); 2571 return; 2572 } 2573 /* 2574 * For SLI2, we need to set port_state to READY 2575 * and continue discovery. 2576 */ 2577 if (vport->port_state < LPFC_VPORT_READY) { 2578 /* If we get here, there is nothing to ADISC */ 2579 lpfc_issue_clear_la(phba, vport); 2580 if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) { 2581 vport->num_disc_nodes = 0; 2582 /* go thru NPR list, issue ELS PLOGIs */ 2583 if (vport->fc_npr_cnt) 2584 lpfc_els_disc_plogi(vport); 2585 if (!vport->num_disc_nodes) { 2586 spin_lock_irq(shost->host_lock); 2587 vport->fc_flag &= ~FC_NDISC_ACTIVE; 2588 spin_unlock_irq(shost->host_lock); 2589 lpfc_can_disctmo(vport); 2590 lpfc_end_rscn(vport); 2591 } 2592 } 2593 vport->port_state = LPFC_VPORT_READY; 2594 } else 2595 lpfc_rscn_disc(vport); 2596 } 2597 2598 /** 2599 * lpfc_more_adisc - Issue more adisc as needed 2600 * @vport: pointer to a host virtual N_Port data structure. 2601 * 2602 * This routine determines whether there are more ndlps on a @vport 2603 * node list need to have Address Discover (ADISC) issued. If so, it will 2604 * invoke the lpfc_els_disc_adisc() routine to issue ADISC on the @vport's 2605 * remaining nodes which need to have ADISC sent. 2606 **/ 2607 void 2608 lpfc_more_adisc(struct lpfc_vport *vport) 2609 { 2610 if (vport->num_disc_nodes) 2611 vport->num_disc_nodes--; 2612 /* Continue discovery with <num_disc_nodes> ADISCs to go */ 2613 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2614 "0210 Continue discovery with %d ADISCs to go " 2615 "Data: x%x x%x x%x\n", 2616 vport->num_disc_nodes, vport->fc_adisc_cnt, 2617 vport->fc_flag, vport->port_state); 2618 /* Check to see if there are more ADISCs to be sent */ 2619 if (vport->fc_flag & FC_NLP_MORE) { 2620 lpfc_set_disctmo(vport); 2621 /* go thru NPR nodes and issue any remaining ELS ADISCs */ 2622 lpfc_els_disc_adisc(vport); 2623 } 2624 if (!vport->num_disc_nodes) 2625 lpfc_adisc_done(vport); 2626 return; 2627 } 2628 2629 /** 2630 * lpfc_cmpl_els_adisc - Completion callback function for adisc 2631 * @phba: pointer to lpfc hba data structure. 2632 * @cmdiocb: pointer to lpfc command iocb data structure. 2633 * @rspiocb: pointer to lpfc response iocb data structure. 2634 * 2635 * This routine is the completion function for issuing the Address Discover 2636 * (ADISC) command. It first checks to see whether link went down during 2637 * the discovery process. If so, the node will be marked as node port 2638 * recovery for issuing discover IOCB by the link attention handler and 2639 * exit. Otherwise, the response status is checked. If error was reported 2640 * in the response status, the ADISC command shall be retried by invoking 2641 * the lpfc_els_retry() routine. Otherwise, if no error was reported in 2642 * the response status, the state machine is invoked to set transition 2643 * with respect to NLP_EVT_CMPL_ADISC event. 2644 **/ 2645 static void 2646 lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2647 struct lpfc_iocbq *rspiocb) 2648 { 2649 struct lpfc_vport *vport = cmdiocb->vport; 2650 IOCB_t *irsp; 2651 struct lpfc_nodelist *ndlp; 2652 int disc; 2653 2654 /* we pass cmdiocb to state machine which needs rspiocb as well */ 2655 cmdiocb->context_un.rsp_iocb = rspiocb; 2656 2657 irsp = &(rspiocb->iocb); 2658 ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 2659 2660 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2661 "ADISC cmpl: status:x%x/x%x did:x%x", 2662 irsp->ulpStatus, irsp->un.ulpWord[4], 2663 ndlp->nlp_DID); 2664 2665 /* Since ndlp can be freed in the disc state machine, note if this node 2666 * is being used during discovery. 2667 */ 2668 spin_lock_irq(&ndlp->lock); 2669 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC); 2670 ndlp->nlp_flag &= ~(NLP_ADISC_SND | NLP_NPR_2B_DISC); 2671 spin_unlock_irq(&ndlp->lock); 2672 /* ADISC completes to NPort <nlp_DID> */ 2673 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2674 "0104 ADISC completes to NPort x%x " 2675 "Data: x%x x%x x%x x%x x%x\n", 2676 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4], 2677 irsp->ulpTimeout, disc, vport->num_disc_nodes); 2678 /* Check to see if link went down during discovery */ 2679 if (lpfc_els_chk_latt(vport)) { 2680 spin_lock_irq(&ndlp->lock); 2681 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2682 spin_unlock_irq(&ndlp->lock); 2683 goto out; 2684 } 2685 2686 if (irsp->ulpStatus) { 2687 /* Check for retry */ 2688 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 2689 /* ELS command is being retried */ 2690 if (disc) { 2691 spin_lock_irq(&ndlp->lock); 2692 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2693 spin_unlock_irq(&ndlp->lock); 2694 lpfc_set_disctmo(vport); 2695 } 2696 goto out; 2697 } 2698 /* ADISC failed */ 2699 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2700 "2755 ADISC failure DID:%06X Status:x%x/x%x\n", 2701 ndlp->nlp_DID, irsp->ulpStatus, 2702 irsp->un.ulpWord[4]); 2703 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 2704 if (lpfc_error_lost_link(irsp)) 2705 goto check_adisc; 2706 else 2707 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2708 NLP_EVT_CMPL_ADISC); 2709 2710 /* As long as this node is not registered with the SCSI or NVMe 2711 * transport, it is no longer an active node. Otherwise 2712 * devloss handles the final cleanup. 2713 */ 2714 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { 2715 spin_lock_irq(&ndlp->lock); 2716 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 2717 spin_unlock_irq(&ndlp->lock); 2718 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2719 NLP_EVT_DEVICE_RM); 2720 } 2721 } else 2722 /* Good status, call state machine */ 2723 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2724 NLP_EVT_CMPL_ADISC); 2725 2726 check_adisc: 2727 /* Check to see if there are more ADISCs to be sent */ 2728 if (disc && vport->num_disc_nodes) 2729 lpfc_more_adisc(vport); 2730 out: 2731 lpfc_els_free_iocb(phba, cmdiocb); 2732 lpfc_nlp_put(ndlp); 2733 return; 2734 } 2735 2736 /** 2737 * lpfc_issue_els_adisc - Issue an address discover iocb to an node on a vport 2738 * @vport: pointer to a virtual N_Port data structure. 2739 * @ndlp: pointer to a node-list data structure. 2740 * @retry: number of retries to the command IOCB. 2741 * 2742 * This routine issues an Address Discover (ADISC) for an @ndlp on a 2743 * @vport. It prepares the payload of the ADISC ELS command, updates the 2744 * and states of the ndlp, and invokes the lpfc_sli_issue_iocb() routine 2745 * to issue the ADISC ELS command. 2746 * 2747 * Note that the ndlp reference count will be incremented by 1 for holding the 2748 * ndlp and the reference to ndlp will be stored into the context1 field of 2749 * the IOCB for the completion callback function to the ADISC ELS command. 2750 * 2751 * Return code 2752 * 0 - successfully issued adisc 2753 * 1 - failed to issue adisc 2754 **/ 2755 int 2756 lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2757 uint8_t retry) 2758 { 2759 int rc = 0; 2760 struct lpfc_hba *phba = vport->phba; 2761 ADISC *ap; 2762 struct lpfc_iocbq *elsiocb; 2763 uint8_t *pcmd; 2764 uint16_t cmdsize; 2765 2766 cmdsize = (sizeof(uint32_t) + sizeof(ADISC)); 2767 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 2768 ndlp->nlp_DID, ELS_CMD_ADISC); 2769 if (!elsiocb) 2770 return 1; 2771 2772 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2773 2774 /* For ADISC request, remainder of payload is service parameters */ 2775 *((uint32_t *) (pcmd)) = ELS_CMD_ADISC; 2776 pcmd += sizeof(uint32_t); 2777 2778 /* Fill in ADISC payload */ 2779 ap = (ADISC *) pcmd; 2780 ap->hardAL_PA = phba->fc_pref_ALPA; 2781 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name)); 2782 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 2783 ap->DID = be32_to_cpu(vport->fc_myDID); 2784 2785 phba->fc_stat.elsXmitADISC++; 2786 elsiocb->iocb_cmpl = lpfc_cmpl_els_adisc; 2787 spin_lock_irq(&ndlp->lock); 2788 ndlp->nlp_flag |= NLP_ADISC_SND; 2789 spin_unlock_irq(&ndlp->lock); 2790 elsiocb->context1 = lpfc_nlp_get(ndlp); 2791 if (!elsiocb->context1) { 2792 lpfc_els_free_iocb(phba, elsiocb); 2793 goto err; 2794 } 2795 2796 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2797 "Issue ADISC: did:x%x refcnt %d", 2798 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 2799 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 2800 if (rc == IOCB_ERROR) { 2801 lpfc_els_free_iocb(phba, elsiocb); 2802 lpfc_nlp_put(ndlp); 2803 goto err; 2804 } 2805 2806 return 0; 2807 2808 err: 2809 spin_lock_irq(&ndlp->lock); 2810 ndlp->nlp_flag &= ~NLP_ADISC_SND; 2811 spin_unlock_irq(&ndlp->lock); 2812 return 1; 2813 } 2814 2815 /** 2816 * lpfc_cmpl_els_logo - Completion callback function for logo 2817 * @phba: pointer to lpfc hba data structure. 2818 * @cmdiocb: pointer to lpfc command iocb data structure. 2819 * @rspiocb: pointer to lpfc response iocb data structure. 2820 * 2821 * This routine is the completion function for issuing the ELS Logout (LOGO) 2822 * command. If no error status was reported from the LOGO response, the 2823 * state machine of the associated ndlp shall be invoked for transition with 2824 * respect to NLP_EVT_CMPL_LOGO event. 2825 **/ 2826 static void 2827 lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2828 struct lpfc_iocbq *rspiocb) 2829 { 2830 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 2831 struct lpfc_vport *vport = ndlp->vport; 2832 IOCB_t *irsp; 2833 unsigned long flags; 2834 uint32_t skip_recovery = 0; 2835 int wake_up_waiter = 0; 2836 2837 /* we pass cmdiocb to state machine which needs rspiocb as well */ 2838 cmdiocb->context_un.rsp_iocb = rspiocb; 2839 2840 irsp = &(rspiocb->iocb); 2841 spin_lock_irq(&ndlp->lock); 2842 ndlp->nlp_flag &= ~NLP_LOGO_SND; 2843 if (ndlp->upcall_flags & NLP_WAIT_FOR_LOGO) { 2844 wake_up_waiter = 1; 2845 ndlp->upcall_flags &= ~NLP_WAIT_FOR_LOGO; 2846 } 2847 spin_unlock_irq(&ndlp->lock); 2848 2849 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2850 "LOGO cmpl: status:x%x/x%x did:x%x", 2851 irsp->ulpStatus, irsp->un.ulpWord[4], 2852 ndlp->nlp_DID); 2853 2854 /* LOGO completes to NPort <nlp_DID> */ 2855 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2856 "0105 LOGO completes to NPort x%x " 2857 "refcnt %d nflags x%x Data: x%x x%x x%x x%x\n", 2858 ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp->nlp_flag, 2859 irsp->ulpStatus, irsp->un.ulpWord[4], 2860 irsp->ulpTimeout, vport->num_disc_nodes); 2861 2862 if (lpfc_els_chk_latt(vport)) { 2863 skip_recovery = 1; 2864 goto out; 2865 } 2866 2867 /* The LOGO will not be retried on failure. A LOGO was 2868 * issued to the remote rport and a ACC or RJT or no Answer are 2869 * all acceptable. Note the failure and move forward with 2870 * discovery. The PLOGI will retry. 2871 */ 2872 if (irsp->ulpStatus) { 2873 /* LOGO failed */ 2874 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2875 "2756 LOGO failure, No Retry DID:%06X Status:x%x/x%x\n", 2876 ndlp->nlp_DID, irsp->ulpStatus, 2877 irsp->un.ulpWord[4]); 2878 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 2879 if (lpfc_error_lost_link(irsp)) { 2880 skip_recovery = 1; 2881 goto out; 2882 } 2883 } 2884 2885 /* Call state machine. This will unregister the rpi if needed. */ 2886 lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_CMPL_LOGO); 2887 2888 /* The driver sets this flag for an NPIV instance that doesn't want to 2889 * log into the remote port. 2890 */ 2891 if (ndlp->nlp_flag & NLP_TARGET_REMOVE) { 2892 spin_lock_irq(&ndlp->lock); 2893 if (phba->sli_rev == LPFC_SLI_REV4) 2894 ndlp->nlp_flag |= NLP_RELEASE_RPI; 2895 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 2896 spin_unlock_irq(&ndlp->lock); 2897 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2898 NLP_EVT_DEVICE_RM); 2899 lpfc_els_free_iocb(phba, cmdiocb); 2900 lpfc_nlp_put(ndlp); 2901 2902 /* Presume the node was released. */ 2903 return; 2904 } 2905 2906 out: 2907 /* Driver is done with the IO. */ 2908 lpfc_els_free_iocb(phba, cmdiocb); 2909 lpfc_nlp_put(ndlp); 2910 2911 /* At this point, the LOGO processing is complete. NOTE: For a 2912 * pt2pt topology, we are assuming the NPortID will only change 2913 * on link up processing. For a LOGO / PLOGI initiated by the 2914 * Initiator, we are assuming the NPortID is not going to change. 2915 */ 2916 2917 if (wake_up_waiter && ndlp->logo_waitq) 2918 wake_up(ndlp->logo_waitq); 2919 /* 2920 * If the node is a target, the handling attempts to recover the port. 2921 * For any other port type, the rpi is unregistered as an implicit 2922 * LOGO. 2923 */ 2924 if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET) && 2925 skip_recovery == 0) { 2926 lpfc_cancel_retry_delay_tmo(vport, ndlp); 2927 spin_lock_irqsave(&ndlp->lock, flags); 2928 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2929 spin_unlock_irqrestore(&ndlp->lock, flags); 2930 2931 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2932 "3187 LOGO completes to NPort x%x: Start " 2933 "Recovery Data: x%x x%x x%x x%x\n", 2934 ndlp->nlp_DID, irsp->ulpStatus, 2935 irsp->un.ulpWord[4], irsp->ulpTimeout, 2936 vport->num_disc_nodes); 2937 lpfc_disc_start(vport); 2938 return; 2939 } 2940 2941 /* Cleanup path for failed REG_RPI handling. If REG_RPI fails, the 2942 * driver sends a LOGO to the rport to cleanup. For fabric and 2943 * initiator ports cleanup the node as long as it the node is not 2944 * register with the transport. 2945 */ 2946 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { 2947 spin_lock_irq(&ndlp->lock); 2948 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 2949 spin_unlock_irq(&ndlp->lock); 2950 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2951 NLP_EVT_DEVICE_RM); 2952 } 2953 } 2954 2955 /** 2956 * lpfc_issue_els_logo - Issue a logo to an node on a vport 2957 * @vport: pointer to a virtual N_Port data structure. 2958 * @ndlp: pointer to a node-list data structure. 2959 * @retry: number of retries to the command IOCB. 2960 * 2961 * This routine constructs and issues an ELS Logout (LOGO) iocb command 2962 * to a remote node, referred by an @ndlp on a @vport. It constructs the 2963 * payload of the IOCB, properly sets up the @ndlp state, and invokes the 2964 * lpfc_sli_issue_iocb() routine to send out the LOGO ELS command. 2965 * 2966 * Note that the ndlp reference count will be incremented by 1 for holding the 2967 * ndlp and the reference to ndlp will be stored into the context1 field of 2968 * the IOCB for the completion callback function to the LOGO ELS command. 2969 * 2970 * Callers of this routine are expected to unregister the RPI first 2971 * 2972 * Return code 2973 * 0 - successfully issued logo 2974 * 1 - failed to issue logo 2975 **/ 2976 int 2977 lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2978 uint8_t retry) 2979 { 2980 struct lpfc_hba *phba = vport->phba; 2981 struct lpfc_iocbq *elsiocb; 2982 uint8_t *pcmd; 2983 uint16_t cmdsize; 2984 int rc; 2985 2986 spin_lock_irq(&ndlp->lock); 2987 if (ndlp->nlp_flag & NLP_LOGO_SND) { 2988 spin_unlock_irq(&ndlp->lock); 2989 return 0; 2990 } 2991 spin_unlock_irq(&ndlp->lock); 2992 2993 cmdsize = (2 * sizeof(uint32_t)) + sizeof(struct lpfc_name); 2994 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 2995 ndlp->nlp_DID, ELS_CMD_LOGO); 2996 if (!elsiocb) 2997 return 1; 2998 2999 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 3000 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO; 3001 pcmd += sizeof(uint32_t); 3002 3003 /* Fill in LOGO payload */ 3004 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID); 3005 pcmd += sizeof(uint32_t); 3006 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name)); 3007 3008 phba->fc_stat.elsXmitLOGO++; 3009 elsiocb->iocb_cmpl = lpfc_cmpl_els_logo; 3010 spin_lock_irq(&ndlp->lock); 3011 ndlp->nlp_flag |= NLP_LOGO_SND; 3012 ndlp->nlp_flag &= ~NLP_ISSUE_LOGO; 3013 spin_unlock_irq(&ndlp->lock); 3014 elsiocb->context1 = lpfc_nlp_get(ndlp); 3015 if (!elsiocb->context1) { 3016 lpfc_els_free_iocb(phba, elsiocb); 3017 goto err; 3018 } 3019 3020 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3021 "Issue LOGO: did:x%x refcnt %d", 3022 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 3023 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3024 if (rc == IOCB_ERROR) { 3025 lpfc_els_free_iocb(phba, elsiocb); 3026 lpfc_nlp_put(ndlp); 3027 goto err; 3028 } 3029 3030 spin_lock_irq(&ndlp->lock); 3031 ndlp->nlp_prev_state = ndlp->nlp_state; 3032 spin_unlock_irq(&ndlp->lock); 3033 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE); 3034 return 0; 3035 3036 err: 3037 spin_lock_irq(&ndlp->lock); 3038 ndlp->nlp_flag &= ~NLP_LOGO_SND; 3039 spin_unlock_irq(&ndlp->lock); 3040 return 1; 3041 } 3042 3043 /** 3044 * lpfc_cmpl_els_cmd - Completion callback function for generic els command 3045 * @phba: pointer to lpfc hba data structure. 3046 * @cmdiocb: pointer to lpfc command iocb data structure. 3047 * @rspiocb: pointer to lpfc response iocb data structure. 3048 * 3049 * This routine is a generic completion callback function for ELS commands. 3050 * Specifically, it is the callback function which does not need to perform 3051 * any command specific operations. It is currently used by the ELS command 3052 * issuing routines for RSCN, lpfc_issue_els_rscn, and the ELS Fibre Channel 3053 * Address Resolution Protocol Response (FARPR) routine, lpfc_issue_els_farpr(). 3054 * Other than certain debug loggings, this callback function simply invokes the 3055 * lpfc_els_chk_latt() routine to check whether link went down during the 3056 * discovery process. 3057 **/ 3058 static void 3059 lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 3060 struct lpfc_iocbq *rspiocb) 3061 { 3062 struct lpfc_vport *vport = cmdiocb->vport; 3063 struct lpfc_nodelist *free_ndlp; 3064 IOCB_t *irsp; 3065 3066 irsp = &rspiocb->iocb; 3067 3068 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3069 "ELS cmd cmpl: status:x%x/x%x did:x%x", 3070 irsp->ulpStatus, irsp->un.ulpWord[4], 3071 irsp->un.elsreq64.remoteID); 3072 3073 /* ELS cmd tag <ulpIoTag> completes */ 3074 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3075 "0106 ELS cmd tag x%x completes Data: x%x x%x x%x\n", 3076 irsp->ulpIoTag, irsp->ulpStatus, 3077 irsp->un.ulpWord[4], irsp->ulpTimeout); 3078 3079 /* Check to see if link went down during discovery */ 3080 lpfc_els_chk_latt(vport); 3081 3082 free_ndlp = (struct lpfc_nodelist *)cmdiocb->context1; 3083 3084 lpfc_els_free_iocb(phba, cmdiocb); 3085 lpfc_nlp_put(free_ndlp); 3086 } 3087 3088 /** 3089 * lpfc_reg_fab_ctrl_node - RPI register the fabric controller node. 3090 * @vport: pointer to lpfc_vport data structure. 3091 * @fc_ndlp: pointer to the fabric controller (0xfffffd) node. 3092 * 3093 * This routine registers the rpi assigned to the fabric controller 3094 * NPort_ID (0xfffffd) with the port and moves the node to UNMAPPED 3095 * state triggering a registration with the SCSI transport. 3096 * 3097 * This routine is single out because the fabric controller node 3098 * does not receive a PLOGI. This routine is consumed by the 3099 * SCR and RDF ELS commands. Callers are expected to qualify 3100 * with SLI4 first. 3101 **/ 3102 static int 3103 lpfc_reg_fab_ctrl_node(struct lpfc_vport *vport, struct lpfc_nodelist *fc_ndlp) 3104 { 3105 int rc = 0; 3106 struct lpfc_hba *phba = vport->phba; 3107 struct lpfc_nodelist *ns_ndlp; 3108 LPFC_MBOXQ_t *mbox; 3109 struct lpfc_dmabuf *mp; 3110 3111 if (fc_ndlp->nlp_flag & NLP_RPI_REGISTERED) 3112 return rc; 3113 3114 ns_ndlp = lpfc_findnode_did(vport, NameServer_DID); 3115 if (!ns_ndlp) 3116 return -ENODEV; 3117 3118 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, 3119 "0935 %s: Reg FC RPI x%x on FC DID x%x NSSte: x%x\n", 3120 __func__, fc_ndlp->nlp_rpi, fc_ndlp->nlp_DID, 3121 ns_ndlp->nlp_state); 3122 if (ns_ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) 3123 return -ENODEV; 3124 3125 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3126 if (!mbox) { 3127 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, 3128 "0936 %s: no memory for reg_login " 3129 "Data: x%x x%x x%x x%x\n", __func__, 3130 fc_ndlp->nlp_DID, fc_ndlp->nlp_state, 3131 fc_ndlp->nlp_flag, fc_ndlp->nlp_rpi); 3132 return -ENOMEM; 3133 } 3134 rc = lpfc_reg_rpi(phba, vport->vpi, fc_ndlp->nlp_DID, 3135 (u8 *)&vport->fc_sparam, mbox, fc_ndlp->nlp_rpi); 3136 if (rc) { 3137 rc = -EACCES; 3138 goto out; 3139 } 3140 3141 fc_ndlp->nlp_flag |= NLP_REG_LOGIN_SEND; 3142 mbox->mbox_cmpl = lpfc_mbx_cmpl_fc_reg_login; 3143 mbox->ctx_ndlp = lpfc_nlp_get(fc_ndlp); 3144 if (!mbox->ctx_ndlp) { 3145 rc = -ENOMEM; 3146 goto out_mem; 3147 } 3148 3149 mbox->vport = vport; 3150 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 3151 if (rc == MBX_NOT_FINISHED) { 3152 rc = -ENODEV; 3153 lpfc_nlp_put(fc_ndlp); 3154 goto out_mem; 3155 } 3156 /* Success path. Exit. */ 3157 lpfc_nlp_set_state(vport, fc_ndlp, 3158 NLP_STE_REG_LOGIN_ISSUE); 3159 return 0; 3160 3161 out_mem: 3162 fc_ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; 3163 mp = (struct lpfc_dmabuf *)mbox->ctx_buf; 3164 lpfc_mbuf_free(phba, mp->virt, mp->phys); 3165 kfree(mp); 3166 3167 out: 3168 mempool_free(mbox, phba->mbox_mem_pool); 3169 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, 3170 "0938 %s: failed to format reg_login " 3171 "Data: x%x x%x x%x x%x\n", __func__, 3172 fc_ndlp->nlp_DID, fc_ndlp->nlp_state, 3173 fc_ndlp->nlp_flag, fc_ndlp->nlp_rpi); 3174 return rc; 3175 } 3176 3177 /** 3178 * lpfc_cmpl_els_disc_cmd - Completion callback function for Discovery ELS cmd 3179 * @phba: pointer to lpfc hba data structure. 3180 * @cmdiocb: pointer to lpfc command iocb data structure. 3181 * @rspiocb: pointer to lpfc response iocb data structure. 3182 * 3183 * This routine is a generic completion callback function for Discovery ELS cmd. 3184 * Currently used by the ELS command issuing routines for the ELS State Change 3185 * Request (SCR), lpfc_issue_els_scr() and the ELS RDF, lpfc_issue_els_rdf(). 3186 * These commands will be retried once only for ELS timeout errors. 3187 **/ 3188 static void 3189 lpfc_cmpl_els_disc_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 3190 struct lpfc_iocbq *rspiocb) 3191 { 3192 struct lpfc_vport *vport = cmdiocb->vport; 3193 IOCB_t *irsp; 3194 struct lpfc_els_rdf_rsp *prdf; 3195 struct lpfc_dmabuf *pcmd, *prsp; 3196 u32 *pdata; 3197 u32 cmd; 3198 struct lpfc_nodelist *ndlp = cmdiocb->context1; 3199 3200 irsp = &rspiocb->iocb; 3201 3202 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3203 "ELS cmd cmpl: status:x%x/x%x did:x%x", 3204 irsp->ulpStatus, irsp->un.ulpWord[4], 3205 irsp->un.elsreq64.remoteID); 3206 /* ELS cmd tag <ulpIoTag> completes */ 3207 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3208 "0217 ELS cmd tag x%x completes Data: x%x x%x x%x " 3209 "x%x\n", 3210 irsp->ulpIoTag, irsp->ulpStatus, 3211 irsp->un.ulpWord[4], irsp->ulpTimeout, 3212 cmdiocb->retry); 3213 3214 pcmd = (struct lpfc_dmabuf *)cmdiocb->context2; 3215 if (!pcmd) 3216 goto out; 3217 3218 pdata = (u32 *)pcmd->virt; 3219 if (!pdata) 3220 goto out; 3221 cmd = *pdata; 3222 3223 /* Only 1 retry for ELS Timeout only */ 3224 if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT && 3225 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == 3226 IOERR_SEQUENCE_TIMEOUT)) { 3227 cmdiocb->retry++; 3228 if (cmdiocb->retry <= 1) { 3229 switch (cmd) { 3230 case ELS_CMD_SCR: 3231 lpfc_issue_els_scr(vport, cmdiocb->retry); 3232 break; 3233 case ELS_CMD_RDF: 3234 cmdiocb->context1 = NULL; /* save ndlp refcnt */ 3235 lpfc_issue_els_rdf(vport, cmdiocb->retry); 3236 break; 3237 } 3238 goto out; 3239 } 3240 phba->fc_stat.elsRetryExceeded++; 3241 } 3242 if (irsp->ulpStatus) { 3243 /* ELS discovery cmd completes with error */ 3244 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, 3245 "4203 ELS cmd x%x error: x%x x%X\n", cmd, 3246 irsp->ulpStatus, irsp->un.ulpWord[4]); 3247 goto out; 3248 } 3249 3250 /* The RDF response doesn't have any impact on the running driver 3251 * but the notification descriptors are dumped here for support. 3252 */ 3253 if (cmd == ELS_CMD_RDF) { 3254 int i; 3255 3256 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 3257 if (!prsp) 3258 goto out; 3259 3260 prdf = (struct lpfc_els_rdf_rsp *)prsp->virt; 3261 if (!prdf) 3262 goto out; 3263 3264 for (i = 0; i < ELS_RDF_REG_TAG_CNT && 3265 i < be32_to_cpu(prdf->reg_d1.reg_desc.count); i++) 3266 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3267 "4677 Fabric RDF Notification Grant Data: " 3268 "0x%08x\n", 3269 be32_to_cpu( 3270 prdf->reg_d1.desc_tags[i])); 3271 } 3272 3273 out: 3274 /* Check to see if link went down during discovery */ 3275 lpfc_els_chk_latt(vport); 3276 lpfc_els_free_iocb(phba, cmdiocb); 3277 lpfc_nlp_put(ndlp); 3278 return; 3279 } 3280 3281 /** 3282 * lpfc_issue_els_scr - Issue a scr to an node on a vport 3283 * @vport: pointer to a host virtual N_Port data structure. 3284 * @retry: retry counter for the command IOCB. 3285 * 3286 * This routine issues a State Change Request (SCR) to a fabric node 3287 * on a @vport. The remote node is Fabric Controller (0xfffffd). It 3288 * first search the @vport node list to find the matching ndlp. If no such 3289 * ndlp is found, a new ndlp shall be created for this (SCR) purpose. An 3290 * IOCB is allocated, payload prepared, and the lpfc_sli_issue_iocb() 3291 * routine is invoked to send the SCR IOCB. 3292 * 3293 * Note that the ndlp reference count will be incremented by 1 for holding the 3294 * ndlp and the reference to ndlp will be stored into the context1 field of 3295 * the IOCB for the completion callback function to the SCR ELS command. 3296 * 3297 * Return code 3298 * 0 - Successfully issued scr command 3299 * 1 - Failed to issue scr command 3300 **/ 3301 int 3302 lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry) 3303 { 3304 int rc = 0; 3305 struct lpfc_hba *phba = vport->phba; 3306 struct lpfc_iocbq *elsiocb; 3307 uint8_t *pcmd; 3308 uint16_t cmdsize; 3309 struct lpfc_nodelist *ndlp; 3310 3311 cmdsize = (sizeof(uint32_t) + sizeof(SCR)); 3312 3313 ndlp = lpfc_findnode_did(vport, Fabric_Cntl_DID); 3314 if (!ndlp) { 3315 ndlp = lpfc_nlp_init(vport, Fabric_Cntl_DID); 3316 if (!ndlp) 3317 return 1; 3318 lpfc_enqueue_node(vport, ndlp); 3319 } 3320 3321 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3322 ndlp->nlp_DID, ELS_CMD_SCR); 3323 if (!elsiocb) 3324 return 1; 3325 3326 if (phba->sli_rev == LPFC_SLI_REV4) { 3327 rc = lpfc_reg_fab_ctrl_node(vport, ndlp); 3328 if (rc) { 3329 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, 3330 "0937 %s: Failed to reg fc node, rc %d\n", 3331 __func__, rc); 3332 return 1; 3333 } 3334 } 3335 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 3336 3337 *((uint32_t *) (pcmd)) = ELS_CMD_SCR; 3338 pcmd += sizeof(uint32_t); 3339 3340 /* For SCR, remainder of payload is SCR parameter page */ 3341 memset(pcmd, 0, sizeof(SCR)); 3342 ((SCR *) pcmd)->Function = SCR_FUNC_FULL; 3343 3344 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3345 "Issue SCR: did:x%x", 3346 ndlp->nlp_DID, 0, 0); 3347 3348 phba->fc_stat.elsXmitSCR++; 3349 elsiocb->iocb_cmpl = lpfc_cmpl_els_disc_cmd; 3350 elsiocb->context1 = lpfc_nlp_get(ndlp); 3351 if (!elsiocb->context1) { 3352 lpfc_els_free_iocb(phba, elsiocb); 3353 return 1; 3354 } 3355 3356 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3357 "Issue SCR: did:x%x refcnt %d", 3358 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 3359 3360 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3361 if (rc == IOCB_ERROR) { 3362 lpfc_els_free_iocb(phba, elsiocb); 3363 lpfc_nlp_put(ndlp); 3364 return 1; 3365 } 3366 3367 /* Keep the ndlp just in case RDF is being sent */ 3368 return 0; 3369 } 3370 3371 /** 3372 * lpfc_issue_els_rscn - Issue an RSCN to the Fabric Controller (Fabric) 3373 * or the other nport (pt2pt). 3374 * @vport: pointer to a host virtual N_Port data structure. 3375 * @retry: number of retries to the command IOCB. 3376 * 3377 * This routine issues a RSCN to the Fabric Controller (DID 0xFFFFFD) 3378 * when connected to a fabric, or to the remote port when connected 3379 * in point-to-point mode. When sent to the Fabric Controller, it will 3380 * replay the RSCN to registered recipients. 3381 * 3382 * Note that the ndlp reference count will be incremented by 1 for holding the 3383 * ndlp and the reference to ndlp will be stored into the context1 field of 3384 * the IOCB for the completion callback function to the RSCN ELS command. 3385 * 3386 * Return code 3387 * 0 - Successfully issued RSCN command 3388 * 1 - Failed to issue RSCN command 3389 **/ 3390 int 3391 lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry) 3392 { 3393 int rc = 0; 3394 struct lpfc_hba *phba = vport->phba; 3395 struct lpfc_iocbq *elsiocb; 3396 struct lpfc_nodelist *ndlp; 3397 struct { 3398 struct fc_els_rscn rscn; 3399 struct fc_els_rscn_page portid; 3400 } *event; 3401 uint32_t nportid; 3402 uint16_t cmdsize = sizeof(*event); 3403 3404 /* Not supported for private loop */ 3405 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP && 3406 !(vport->fc_flag & FC_PUBLIC_LOOP)) 3407 return 1; 3408 3409 if (vport->fc_flag & FC_PT2PT) { 3410 /* find any mapped nport - that would be the other nport */ 3411 ndlp = lpfc_findnode_mapped(vport); 3412 if (!ndlp) 3413 return 1; 3414 } else { 3415 nportid = FC_FID_FCTRL; 3416 /* find the fabric controller node */ 3417 ndlp = lpfc_findnode_did(vport, nportid); 3418 if (!ndlp) { 3419 /* if one didn't exist, make one */ 3420 ndlp = lpfc_nlp_init(vport, nportid); 3421 if (!ndlp) 3422 return 1; 3423 lpfc_enqueue_node(vport, ndlp); 3424 } 3425 } 3426 3427 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3428 ndlp->nlp_DID, ELS_CMD_RSCN_XMT); 3429 3430 if (!elsiocb) 3431 return 1; 3432 3433 event = ((struct lpfc_dmabuf *)elsiocb->context2)->virt; 3434 3435 event->rscn.rscn_cmd = ELS_RSCN; 3436 event->rscn.rscn_page_len = sizeof(struct fc_els_rscn_page); 3437 event->rscn.rscn_plen = cpu_to_be16(cmdsize); 3438 3439 nportid = vport->fc_myDID; 3440 /* appears that page flags must be 0 for fabric to broadcast RSCN */ 3441 event->portid.rscn_page_flags = 0; 3442 event->portid.rscn_fid[0] = (nportid & 0x00FF0000) >> 16; 3443 event->portid.rscn_fid[1] = (nportid & 0x0000FF00) >> 8; 3444 event->portid.rscn_fid[2] = nportid & 0x000000FF; 3445 3446 phba->fc_stat.elsXmitRSCN++; 3447 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd; 3448 elsiocb->context1 = lpfc_nlp_get(ndlp); 3449 if (!elsiocb->context1) { 3450 lpfc_els_free_iocb(phba, elsiocb); 3451 return 1; 3452 } 3453 3454 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3455 "Issue RSCN: did:x%x", 3456 ndlp->nlp_DID, 0, 0); 3457 3458 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3459 if (rc == IOCB_ERROR) { 3460 lpfc_els_free_iocb(phba, elsiocb); 3461 lpfc_nlp_put(ndlp); 3462 return 1; 3463 } 3464 3465 /* This will cause the callback-function lpfc_cmpl_els_cmd to 3466 * trigger the release of node. 3467 */ 3468 if (!(vport->fc_flag & FC_PT2PT)) 3469 lpfc_nlp_put(ndlp); 3470 return 0; 3471 } 3472 3473 /** 3474 * lpfc_issue_els_farpr - Issue a farp to an node on a vport 3475 * @vport: pointer to a host virtual N_Port data structure. 3476 * @nportid: N_Port identifier to the remote node. 3477 * @retry: number of retries to the command IOCB. 3478 * 3479 * This routine issues a Fibre Channel Address Resolution Response 3480 * (FARPR) to a node on a vport. The remote node N_Port identifier (@nportid) 3481 * is passed into the function. It first search the @vport node list to find 3482 * the matching ndlp. If no such ndlp is found, a new ndlp shall be created 3483 * for this (FARPR) purpose. An IOCB is allocated, payload prepared, and the 3484 * lpfc_sli_issue_iocb() routine is invoked to send the FARPR ELS command. 3485 * 3486 * Note that the ndlp reference count will be incremented by 1 for holding the 3487 * ndlp and the reference to ndlp will be stored into the context1 field of 3488 * the IOCB for the completion callback function to the FARPR ELS command. 3489 * 3490 * Return code 3491 * 0 - Successfully issued farpr command 3492 * 1 - Failed to issue farpr command 3493 **/ 3494 static int 3495 lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) 3496 { 3497 int rc = 0; 3498 struct lpfc_hba *phba = vport->phba; 3499 struct lpfc_iocbq *elsiocb; 3500 FARP *fp; 3501 uint8_t *pcmd; 3502 uint32_t *lp; 3503 uint16_t cmdsize; 3504 struct lpfc_nodelist *ondlp; 3505 struct lpfc_nodelist *ndlp; 3506 3507 cmdsize = (sizeof(uint32_t) + sizeof(FARP)); 3508 3509 ndlp = lpfc_findnode_did(vport, nportid); 3510 if (!ndlp) { 3511 ndlp = lpfc_nlp_init(vport, nportid); 3512 if (!ndlp) 3513 return 1; 3514 lpfc_enqueue_node(vport, ndlp); 3515 } 3516 3517 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3518 ndlp->nlp_DID, ELS_CMD_RNID); 3519 if (!elsiocb) 3520 return 1; 3521 3522 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 3523 3524 *((uint32_t *) (pcmd)) = ELS_CMD_FARPR; 3525 pcmd += sizeof(uint32_t); 3526 3527 /* Fill in FARPR payload */ 3528 fp = (FARP *) (pcmd); 3529 memset(fp, 0, sizeof(FARP)); 3530 lp = (uint32_t *) pcmd; 3531 *lp++ = be32_to_cpu(nportid); 3532 *lp++ = be32_to_cpu(vport->fc_myDID); 3533 fp->Rflags = 0; 3534 fp->Mflags = (FARP_MATCH_PORT | FARP_MATCH_NODE); 3535 3536 memcpy(&fp->RportName, &vport->fc_portname, sizeof(struct lpfc_name)); 3537 memcpy(&fp->RnodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 3538 ondlp = lpfc_findnode_did(vport, nportid); 3539 if (ondlp) { 3540 memcpy(&fp->OportName, &ondlp->nlp_portname, 3541 sizeof(struct lpfc_name)); 3542 memcpy(&fp->OnodeName, &ondlp->nlp_nodename, 3543 sizeof(struct lpfc_name)); 3544 } 3545 3546 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3547 "Issue FARPR: did:x%x", 3548 ndlp->nlp_DID, 0, 0); 3549 3550 phba->fc_stat.elsXmitFARPR++; 3551 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd; 3552 elsiocb->context1 = lpfc_nlp_get(ndlp); 3553 if (!elsiocb->context1) { 3554 lpfc_els_free_iocb(phba, elsiocb); 3555 return 1; 3556 } 3557 3558 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3559 if (rc == IOCB_ERROR) { 3560 /* The additional lpfc_nlp_put will cause the following 3561 * lpfc_els_free_iocb routine to trigger the release of 3562 * the node. 3563 */ 3564 lpfc_els_free_iocb(phba, elsiocb); 3565 lpfc_nlp_put(ndlp); 3566 return 1; 3567 } 3568 /* This will cause the callback-function lpfc_cmpl_els_cmd to 3569 * trigger the release of the node. 3570 */ 3571 /* Don't release reference count as RDF is likely outstanding */ 3572 return 0; 3573 } 3574 3575 /** 3576 * lpfc_issue_els_rdf - Register for diagnostic functions from the fabric. 3577 * @vport: pointer to a host virtual N_Port data structure. 3578 * @retry: retry counter for the command IOCB. 3579 * 3580 * This routine issues an ELS RDF to the Fabric Controller to register 3581 * for diagnostic functions. 3582 * 3583 * Note that the ndlp reference count will be incremented by 1 for holding the 3584 * ndlp and the reference to ndlp will be stored into the context1 field of 3585 * the IOCB for the completion callback function to the RDF ELS command. 3586 * 3587 * Return code 3588 * 0 - Successfully issued rdf command 3589 * 1 - Failed to issue rdf command 3590 **/ 3591 int 3592 lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry) 3593 { 3594 struct lpfc_hba *phba = vport->phba; 3595 struct lpfc_iocbq *elsiocb; 3596 struct lpfc_els_rdf_req *prdf; 3597 struct lpfc_nodelist *ndlp; 3598 uint16_t cmdsize; 3599 int rc; 3600 3601 cmdsize = sizeof(*prdf); 3602 3603 ndlp = lpfc_findnode_did(vport, Fabric_Cntl_DID); 3604 if (!ndlp) { 3605 ndlp = lpfc_nlp_init(vport, Fabric_Cntl_DID); 3606 if (!ndlp) 3607 return -ENODEV; 3608 lpfc_enqueue_node(vport, ndlp); 3609 } 3610 3611 /* RDF ELS is not required on an NPIV VN_Port. */ 3612 if (vport->port_type == LPFC_NPIV_PORT) { 3613 lpfc_nlp_put(ndlp); 3614 return -EACCES; 3615 } 3616 3617 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3618 ndlp->nlp_DID, ELS_CMD_RDF); 3619 if (!elsiocb) 3620 return -ENOMEM; 3621 3622 if (phba->sli_rev == LPFC_SLI_REV4 && 3623 !(ndlp->nlp_flag & NLP_RPI_REGISTERED)) { 3624 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, 3625 "0939 %s: FC_NODE x%x RPI x%x flag x%x " 3626 "ste x%x type x%x Not registered\n", 3627 __func__, ndlp->nlp_DID, ndlp->nlp_rpi, 3628 ndlp->nlp_flag, ndlp->nlp_state, 3629 ndlp->nlp_type); 3630 return -ENODEV; 3631 } 3632 3633 /* Configure the payload for the supported FPIN events. */ 3634 prdf = (struct lpfc_els_rdf_req *) 3635 (((struct lpfc_dmabuf *)elsiocb->context2)->virt); 3636 memset(prdf, 0, cmdsize); 3637 prdf->rdf.fpin_cmd = ELS_RDF; 3638 prdf->rdf.desc_len = cpu_to_be32(sizeof(struct lpfc_els_rdf_req) - 3639 sizeof(struct fc_els_rdf)); 3640 prdf->reg_d1.reg_desc.desc_tag = cpu_to_be32(ELS_DTAG_FPIN_REGISTER); 3641 prdf->reg_d1.reg_desc.desc_len = cpu_to_be32( 3642 FC_TLV_DESC_LENGTH_FROM_SZ(prdf->reg_d1)); 3643 prdf->reg_d1.reg_desc.count = cpu_to_be32(ELS_RDF_REG_TAG_CNT); 3644 prdf->reg_d1.desc_tags[0] = cpu_to_be32(ELS_DTAG_LNK_INTEGRITY); 3645 prdf->reg_d1.desc_tags[1] = cpu_to_be32(ELS_DTAG_DELIVERY); 3646 prdf->reg_d1.desc_tags[2] = cpu_to_be32(ELS_DTAG_PEER_CONGEST); 3647 prdf->reg_d1.desc_tags[3] = cpu_to_be32(ELS_DTAG_CONGESTION); 3648 3649 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3650 "6444 Xmit RDF to remote NPORT x%x\n", 3651 ndlp->nlp_DID); 3652 3653 elsiocb->iocb_cmpl = lpfc_cmpl_els_disc_cmd; 3654 elsiocb->context1 = lpfc_nlp_get(ndlp); 3655 if (!elsiocb->context1) { 3656 lpfc_els_free_iocb(phba, elsiocb); 3657 return -EIO; 3658 } 3659 3660 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3661 "Issue RDF: did:x%x refcnt %d", 3662 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 3663 3664 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3665 if (rc == IOCB_ERROR) { 3666 lpfc_els_free_iocb(phba, elsiocb); 3667 lpfc_nlp_put(ndlp); 3668 return -EIO; 3669 } 3670 return 0; 3671 } 3672 3673 /** 3674 * lpfc_els_rcv_rdf - Receive RDF ELS request from the fabric. 3675 * @vport: pointer to a host virtual N_Port data structure. 3676 * @cmdiocb: pointer to lpfc command iocb data structure. 3677 * @ndlp: pointer to a node-list data structure. 3678 * 3679 * A received RDF implies a possible change to fabric supported diagnostic 3680 * functions. This routine sends LS_ACC and then has the Nx_Port issue a new 3681 * RDF request to reregister for supported diagnostic functions. 3682 * 3683 * Return code 3684 * 0 - Success 3685 * -EIO - Failed to process received RDF 3686 **/ 3687 static int 3688 lpfc_els_rcv_rdf(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 3689 struct lpfc_nodelist *ndlp) 3690 { 3691 /* Send LS_ACC */ 3692 if (lpfc_els_rsp_acc(vport, ELS_CMD_RDF, cmdiocb, ndlp, NULL)) { 3693 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3694 "1623 Failed to RDF_ACC from x%x for x%x\n", 3695 ndlp->nlp_DID, vport->fc_myDID); 3696 return -EIO; 3697 } 3698 3699 /* Issue new RDF for reregistering */ 3700 if (lpfc_issue_els_rdf(vport, 0)) { 3701 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3702 "2623 Failed to re register RDF for x%x\n", 3703 vport->fc_myDID); 3704 return -EIO; 3705 } 3706 3707 return 0; 3708 } 3709 3710 /** 3711 * lpfc_cancel_retry_delay_tmo - Cancel the timer with delayed iocb-cmd retry 3712 * @vport: pointer to a host virtual N_Port data structure. 3713 * @nlp: pointer to a node-list data structure. 3714 * 3715 * This routine cancels the timer with a delayed IOCB-command retry for 3716 * a @vport's @ndlp. It stops the timer for the delayed function retrial and 3717 * removes the ELS retry event if it presents. In addition, if the 3718 * NLP_NPR_2B_DISC bit is set in the @nlp's nlp_flag bitmap, ADISC IOCB 3719 * commands are sent for the @vport's nodes that require issuing discovery 3720 * ADISC. 3721 **/ 3722 void 3723 lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp) 3724 { 3725 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 3726 struct lpfc_work_evt *evtp; 3727 3728 if (!(nlp->nlp_flag & NLP_DELAY_TMO)) 3729 return; 3730 spin_lock_irq(&nlp->lock); 3731 nlp->nlp_flag &= ~NLP_DELAY_TMO; 3732 spin_unlock_irq(&nlp->lock); 3733 del_timer_sync(&nlp->nlp_delayfunc); 3734 nlp->nlp_last_elscmd = 0; 3735 if (!list_empty(&nlp->els_retry_evt.evt_listp)) { 3736 list_del_init(&nlp->els_retry_evt.evt_listp); 3737 /* Decrement nlp reference count held for the delayed retry */ 3738 evtp = &nlp->els_retry_evt; 3739 lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1); 3740 } 3741 if (nlp->nlp_flag & NLP_NPR_2B_DISC) { 3742 spin_lock_irq(&nlp->lock); 3743 nlp->nlp_flag &= ~NLP_NPR_2B_DISC; 3744 spin_unlock_irq(&nlp->lock); 3745 if (vport->num_disc_nodes) { 3746 if (vport->port_state < LPFC_VPORT_READY) { 3747 /* Check if there are more ADISCs to be sent */ 3748 lpfc_more_adisc(vport); 3749 } else { 3750 /* Check if there are more PLOGIs to be sent */ 3751 lpfc_more_plogi(vport); 3752 if (vport->num_disc_nodes == 0) { 3753 spin_lock_irq(shost->host_lock); 3754 vport->fc_flag &= ~FC_NDISC_ACTIVE; 3755 spin_unlock_irq(shost->host_lock); 3756 lpfc_can_disctmo(vport); 3757 lpfc_end_rscn(vport); 3758 } 3759 } 3760 } 3761 } 3762 return; 3763 } 3764 3765 /** 3766 * lpfc_els_retry_delay - Timer function with a ndlp delayed function timer 3767 * @t: pointer to the timer function associated data (ndlp). 3768 * 3769 * This routine is invoked by the ndlp delayed-function timer to check 3770 * whether there is any pending ELS retry event(s) with the node. If not, it 3771 * simply returns. Otherwise, if there is at least one ELS delayed event, it 3772 * adds the delayed events to the HBA work list and invokes the 3773 * lpfc_worker_wake_up() routine to wake up worker thread to process the 3774 * event. Note that lpfc_nlp_get() is called before posting the event to 3775 * the work list to hold reference count of ndlp so that it guarantees the 3776 * reference to ndlp will still be available when the worker thread gets 3777 * to the event associated with the ndlp. 3778 **/ 3779 void 3780 lpfc_els_retry_delay(struct timer_list *t) 3781 { 3782 struct lpfc_nodelist *ndlp = from_timer(ndlp, t, nlp_delayfunc); 3783 struct lpfc_vport *vport = ndlp->vport; 3784 struct lpfc_hba *phba = vport->phba; 3785 unsigned long flags; 3786 struct lpfc_work_evt *evtp = &ndlp->els_retry_evt; 3787 3788 spin_lock_irqsave(&phba->hbalock, flags); 3789 if (!list_empty(&evtp->evt_listp)) { 3790 spin_unlock_irqrestore(&phba->hbalock, flags); 3791 return; 3792 } 3793 3794 /* We need to hold the node by incrementing the reference 3795 * count until the queued work is done 3796 */ 3797 evtp->evt_arg1 = lpfc_nlp_get(ndlp); 3798 if (evtp->evt_arg1) { 3799 evtp->evt = LPFC_EVT_ELS_RETRY; 3800 list_add_tail(&evtp->evt_listp, &phba->work_list); 3801 lpfc_worker_wake_up(phba); 3802 } 3803 spin_unlock_irqrestore(&phba->hbalock, flags); 3804 return; 3805 } 3806 3807 /** 3808 * lpfc_els_retry_delay_handler - Work thread handler for ndlp delayed function 3809 * @ndlp: pointer to a node-list data structure. 3810 * 3811 * This routine is the worker-thread handler for processing the @ndlp delayed 3812 * event(s), posted by the lpfc_els_retry_delay() routine. It simply retrieves 3813 * the last ELS command from the associated ndlp and invokes the proper ELS 3814 * function according to the delayed ELS command to retry the command. 3815 **/ 3816 void 3817 lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp) 3818 { 3819 struct lpfc_vport *vport = ndlp->vport; 3820 uint32_t cmd, retry; 3821 3822 spin_lock_irq(&ndlp->lock); 3823 cmd = ndlp->nlp_last_elscmd; 3824 ndlp->nlp_last_elscmd = 0; 3825 3826 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { 3827 spin_unlock_irq(&ndlp->lock); 3828 return; 3829 } 3830 3831 ndlp->nlp_flag &= ~NLP_DELAY_TMO; 3832 spin_unlock_irq(&ndlp->lock); 3833 /* 3834 * If a discovery event readded nlp_delayfunc after timer 3835 * firing and before processing the timer, cancel the 3836 * nlp_delayfunc. 3837 */ 3838 del_timer_sync(&ndlp->nlp_delayfunc); 3839 retry = ndlp->nlp_retry; 3840 ndlp->nlp_retry = 0; 3841 3842 switch (cmd) { 3843 case ELS_CMD_FLOGI: 3844 lpfc_issue_els_flogi(vport, ndlp, retry); 3845 break; 3846 case ELS_CMD_PLOGI: 3847 if (!lpfc_issue_els_plogi(vport, ndlp->nlp_DID, retry)) { 3848 ndlp->nlp_prev_state = ndlp->nlp_state; 3849 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 3850 } 3851 break; 3852 case ELS_CMD_ADISC: 3853 if (!lpfc_issue_els_adisc(vport, ndlp, retry)) { 3854 ndlp->nlp_prev_state = ndlp->nlp_state; 3855 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 3856 } 3857 break; 3858 case ELS_CMD_PRLI: 3859 case ELS_CMD_NVMEPRLI: 3860 if (!lpfc_issue_els_prli(vport, ndlp, retry)) { 3861 ndlp->nlp_prev_state = ndlp->nlp_state; 3862 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); 3863 } 3864 break; 3865 case ELS_CMD_LOGO: 3866 if (!lpfc_issue_els_logo(vport, ndlp, retry)) { 3867 ndlp->nlp_prev_state = ndlp->nlp_state; 3868 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE); 3869 } 3870 break; 3871 case ELS_CMD_FDISC: 3872 if (!(vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI)) 3873 lpfc_issue_els_fdisc(vport, ndlp, retry); 3874 break; 3875 } 3876 return; 3877 } 3878 3879 /** 3880 * lpfc_link_reset - Issue link reset 3881 * @vport: pointer to a virtual N_Port data structure. 3882 * 3883 * This routine performs link reset by sending INIT_LINK mailbox command. 3884 * For SLI-3 adapter, link attention interrupt is enabled before issuing 3885 * INIT_LINK mailbox command. 3886 * 3887 * Return code 3888 * 0 - Link reset initiated successfully 3889 * 1 - Failed to initiate link reset 3890 **/ 3891 int 3892 lpfc_link_reset(struct lpfc_vport *vport) 3893 { 3894 struct lpfc_hba *phba = vport->phba; 3895 LPFC_MBOXQ_t *mbox; 3896 uint32_t control; 3897 int rc; 3898 3899 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 3900 "2851 Attempt link reset\n"); 3901 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3902 if (!mbox) { 3903 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3904 "2852 Failed to allocate mbox memory"); 3905 return 1; 3906 } 3907 3908 /* Enable Link attention interrupts */ 3909 if (phba->sli_rev <= LPFC_SLI_REV3) { 3910 spin_lock_irq(&phba->hbalock); 3911 phba->sli.sli_flag |= LPFC_PROCESS_LA; 3912 control = readl(phba->HCregaddr); 3913 control |= HC_LAINT_ENA; 3914 writel(control, phba->HCregaddr); 3915 readl(phba->HCregaddr); /* flush */ 3916 spin_unlock_irq(&phba->hbalock); 3917 } 3918 3919 lpfc_init_link(phba, mbox, phba->cfg_topology, 3920 phba->cfg_link_speed); 3921 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 3922 mbox->vport = vport; 3923 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 3924 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 3925 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3926 "2853 Failed to issue INIT_LINK " 3927 "mbox command, rc:x%x\n", rc); 3928 mempool_free(mbox, phba->mbox_mem_pool); 3929 return 1; 3930 } 3931 3932 return 0; 3933 } 3934 3935 /** 3936 * lpfc_els_retry - Make retry decision on an els command iocb 3937 * @phba: pointer to lpfc hba data structure. 3938 * @cmdiocb: pointer to lpfc command iocb data structure. 3939 * @rspiocb: pointer to lpfc response iocb data structure. 3940 * 3941 * This routine makes a retry decision on an ELS command IOCB, which has 3942 * failed. The following ELS IOCBs use this function for retrying the command 3943 * when previously issued command responsed with error status: FLOGI, PLOGI, 3944 * PRLI, ADISC and FDISC. Based on the ELS command type and the 3945 * returned error status, it makes the decision whether a retry shall be 3946 * issued for the command, and whether a retry shall be made immediately or 3947 * delayed. In the former case, the corresponding ELS command issuing-function 3948 * is called to retry the command. In the later case, the ELS command shall 3949 * be posted to the ndlp delayed event and delayed function timer set to the 3950 * ndlp for the delayed command issusing. 3951 * 3952 * Return code 3953 * 0 - No retry of els command is made 3954 * 1 - Immediate or delayed retry of els command is made 3955 **/ 3956 static int 3957 lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 3958 struct lpfc_iocbq *rspiocb) 3959 { 3960 struct lpfc_vport *vport = cmdiocb->vport; 3961 IOCB_t *irsp = &rspiocb->iocb; 3962 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 3963 struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 3964 uint32_t *elscmd; 3965 struct ls_rjt stat; 3966 int retry = 0, maxretry = lpfc_max_els_tries, delay = 0; 3967 int logerr = 0; 3968 uint32_t cmd = 0; 3969 uint32_t did; 3970 int link_reset = 0, rc; 3971 3972 3973 /* Note: context2 may be 0 for internal driver abort 3974 * of delays ELS command. 3975 */ 3976 3977 if (pcmd && pcmd->virt) { 3978 elscmd = (uint32_t *) (pcmd->virt); 3979 cmd = *elscmd++; 3980 } 3981 3982 if (ndlp) 3983 did = ndlp->nlp_DID; 3984 else { 3985 /* We should only hit this case for retrying PLOGI */ 3986 did = irsp->un.elsreq64.remoteID; 3987 ndlp = lpfc_findnode_did(vport, did); 3988 if (!ndlp && (cmd != ELS_CMD_PLOGI)) 3989 return 0; 3990 } 3991 3992 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3993 "Retry ELS: wd7:x%x wd4:x%x did:x%x", 3994 *(((uint32_t *)irsp) + 7), irsp->un.ulpWord[4], did); 3995 3996 switch (irsp->ulpStatus) { 3997 case IOSTAT_FCP_RSP_ERROR: 3998 break; 3999 case IOSTAT_REMOTE_STOP: 4000 if (phba->sli_rev == LPFC_SLI_REV4) { 4001 /* This IO was aborted by the target, we don't 4002 * know the rxid and because we did not send the 4003 * ABTS we cannot generate and RRQ. 4004 */ 4005 lpfc_set_rrq_active(phba, ndlp, 4006 cmdiocb->sli4_lxritag, 0, 0); 4007 } 4008 break; 4009 case IOSTAT_LOCAL_REJECT: 4010 switch ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK)) { 4011 case IOERR_LOOP_OPEN_FAILURE: 4012 if (cmd == ELS_CMD_FLOGI) { 4013 if (PCI_DEVICE_ID_HORNET == 4014 phba->pcidev->device) { 4015 phba->fc_topology = LPFC_TOPOLOGY_LOOP; 4016 phba->pport->fc_myDID = 0; 4017 phba->alpa_map[0] = 0; 4018 phba->alpa_map[1] = 0; 4019 } 4020 } 4021 if (cmd == ELS_CMD_PLOGI && cmdiocb->retry == 0) 4022 delay = 1000; 4023 retry = 1; 4024 break; 4025 4026 case IOERR_ILLEGAL_COMMAND: 4027 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 4028 "0124 Retry illegal cmd x%x " 4029 "retry:x%x delay:x%x\n", 4030 cmd, cmdiocb->retry, delay); 4031 retry = 1; 4032 /* All command's retry policy */ 4033 maxretry = 8; 4034 if (cmdiocb->retry > 2) 4035 delay = 1000; 4036 break; 4037 4038 case IOERR_NO_RESOURCES: 4039 logerr = 1; /* HBA out of resources */ 4040 retry = 1; 4041 if (cmdiocb->retry > 100) 4042 delay = 100; 4043 maxretry = 250; 4044 break; 4045 4046 case IOERR_ILLEGAL_FRAME: 4047 delay = 100; 4048 retry = 1; 4049 break; 4050 4051 case IOERR_INVALID_RPI: 4052 if (cmd == ELS_CMD_PLOGI && 4053 did == NameServer_DID) { 4054 /* Continue forever if plogi to */ 4055 /* the nameserver fails */ 4056 maxretry = 0; 4057 delay = 100; 4058 } 4059 retry = 1; 4060 break; 4061 4062 case IOERR_SEQUENCE_TIMEOUT: 4063 if (cmd == ELS_CMD_PLOGI && 4064 did == NameServer_DID && 4065 (cmdiocb->retry + 1) == maxretry) { 4066 /* Reset the Link */ 4067 link_reset = 1; 4068 break; 4069 } 4070 retry = 1; 4071 delay = 100; 4072 break; 4073 } 4074 break; 4075 4076 case IOSTAT_NPORT_RJT: 4077 case IOSTAT_FABRIC_RJT: 4078 if (irsp->un.ulpWord[4] & RJT_UNAVAIL_TEMP) { 4079 retry = 1; 4080 break; 4081 } 4082 break; 4083 4084 case IOSTAT_NPORT_BSY: 4085 case IOSTAT_FABRIC_BSY: 4086 logerr = 1; /* Fabric / Remote NPort out of resources */ 4087 retry = 1; 4088 break; 4089 4090 case IOSTAT_LS_RJT: 4091 stat.un.lsRjtError = be32_to_cpu(irsp->un.ulpWord[4]); 4092 /* Added for Vendor specifc support 4093 * Just keep retrying for these Rsn / Exp codes 4094 */ 4095 switch (stat.un.b.lsRjtRsnCode) { 4096 case LSRJT_UNABLE_TPC: 4097 /* The driver has a VALID PLOGI but the rport has 4098 * rejected the PRLI - can't do it now. Delay 4099 * for 1 second and try again. 4100 * 4101 * However, if explanation is REQ_UNSUPPORTED there's 4102 * no point to retry PRLI. 4103 */ 4104 if ((cmd == ELS_CMD_PRLI || cmd == ELS_CMD_NVMEPRLI) && 4105 stat.un.b.lsRjtRsnCodeExp != 4106 LSEXP_REQ_UNSUPPORTED) { 4107 delay = 1000; 4108 maxretry = lpfc_max_els_tries + 1; 4109 retry = 1; 4110 break; 4111 } 4112 4113 /* Legacy bug fix code for targets with PLOGI delays. */ 4114 if (stat.un.b.lsRjtRsnCodeExp == 4115 LSEXP_CMD_IN_PROGRESS) { 4116 if (cmd == ELS_CMD_PLOGI) { 4117 delay = 1000; 4118 maxretry = 48; 4119 } 4120 retry = 1; 4121 break; 4122 } 4123 if (stat.un.b.lsRjtRsnCodeExp == 4124 LSEXP_CANT_GIVE_DATA) { 4125 if (cmd == ELS_CMD_PLOGI) { 4126 delay = 1000; 4127 maxretry = 48; 4128 } 4129 retry = 1; 4130 break; 4131 } 4132 if (cmd == ELS_CMD_PLOGI) { 4133 delay = 1000; 4134 maxretry = lpfc_max_els_tries + 1; 4135 retry = 1; 4136 break; 4137 } 4138 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 4139 (cmd == ELS_CMD_FDISC) && 4140 (stat.un.b.lsRjtRsnCodeExp == LSEXP_OUT_OF_RESOURCE)){ 4141 lpfc_printf_vlog(vport, KERN_ERR, 4142 LOG_TRACE_EVENT, 4143 "0125 FDISC Failed (x%x). " 4144 "Fabric out of resources\n", 4145 stat.un.lsRjtError); 4146 lpfc_vport_set_state(vport, 4147 FC_VPORT_NO_FABRIC_RSCS); 4148 } 4149 break; 4150 4151 case LSRJT_LOGICAL_BSY: 4152 if ((cmd == ELS_CMD_PLOGI) || 4153 (cmd == ELS_CMD_PRLI) || 4154 (cmd == ELS_CMD_NVMEPRLI)) { 4155 delay = 1000; 4156 maxretry = 48; 4157 } else if (cmd == ELS_CMD_FDISC) { 4158 /* FDISC retry policy */ 4159 maxretry = 48; 4160 if (cmdiocb->retry >= 32) 4161 delay = 1000; 4162 } 4163 retry = 1; 4164 break; 4165 4166 case LSRJT_LOGICAL_ERR: 4167 /* There are some cases where switches return this 4168 * error when they are not ready and should be returning 4169 * Logical Busy. We should delay every time. 4170 */ 4171 if (cmd == ELS_CMD_FDISC && 4172 stat.un.b.lsRjtRsnCodeExp == LSEXP_PORT_LOGIN_REQ) { 4173 maxretry = 3; 4174 delay = 1000; 4175 retry = 1; 4176 } else if (cmd == ELS_CMD_FLOGI && 4177 stat.un.b.lsRjtRsnCodeExp == 4178 LSEXP_NOTHING_MORE) { 4179 vport->fc_sparam.cmn.bbRcvSizeMsb &= 0xf; 4180 retry = 1; 4181 lpfc_printf_vlog(vport, KERN_ERR, 4182 LOG_TRACE_EVENT, 4183 "0820 FLOGI Failed (x%x). " 4184 "BBCredit Not Supported\n", 4185 stat.un.lsRjtError); 4186 } 4187 break; 4188 4189 case LSRJT_PROTOCOL_ERR: 4190 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 4191 (cmd == ELS_CMD_FDISC) && 4192 ((stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_PNAME) || 4193 (stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_NPORT_ID)) 4194 ) { 4195 lpfc_printf_vlog(vport, KERN_ERR, 4196 LOG_TRACE_EVENT, 4197 "0122 FDISC Failed (x%x). " 4198 "Fabric Detected Bad WWN\n", 4199 stat.un.lsRjtError); 4200 lpfc_vport_set_state(vport, 4201 FC_VPORT_FABRIC_REJ_WWN); 4202 } 4203 break; 4204 case LSRJT_VENDOR_UNIQUE: 4205 if ((stat.un.b.vendorUnique == 0x45) && 4206 (cmd == ELS_CMD_FLOGI)) { 4207 goto out_retry; 4208 } 4209 break; 4210 case LSRJT_CMD_UNSUPPORTED: 4211 /* lpfc nvmet returns this type of LS_RJT when it 4212 * receives an FCP PRLI because lpfc nvmet only 4213 * support NVME. ELS request is terminated for FCP4 4214 * on this rport. 4215 */ 4216 if (stat.un.b.lsRjtRsnCodeExp == 4217 LSEXP_REQ_UNSUPPORTED && cmd == ELS_CMD_PRLI) { 4218 spin_lock_irq(&ndlp->lock); 4219 ndlp->nlp_flag |= NLP_FCP_PRLI_RJT; 4220 spin_unlock_irq(&ndlp->lock); 4221 retry = 0; 4222 goto out_retry; 4223 } 4224 break; 4225 } 4226 break; 4227 4228 case IOSTAT_INTERMED_RSP: 4229 case IOSTAT_BA_RJT: 4230 break; 4231 4232 default: 4233 break; 4234 } 4235 4236 if (link_reset) { 4237 rc = lpfc_link_reset(vport); 4238 if (rc) { 4239 /* Do not give up. Retry PLOGI one more time and attempt 4240 * link reset if PLOGI fails again. 4241 */ 4242 retry = 1; 4243 delay = 100; 4244 goto out_retry; 4245 } 4246 return 1; 4247 } 4248 4249 if (did == FDMI_DID) 4250 retry = 1; 4251 4252 if ((cmd == ELS_CMD_FLOGI) && 4253 (phba->fc_topology != LPFC_TOPOLOGY_LOOP) && 4254 !lpfc_error_lost_link(irsp)) { 4255 /* FLOGI retry policy */ 4256 retry = 1; 4257 /* retry FLOGI forever */ 4258 if (phba->link_flag != LS_LOOPBACK_MODE) 4259 maxretry = 0; 4260 else 4261 maxretry = 2; 4262 4263 if (cmdiocb->retry >= 100) 4264 delay = 5000; 4265 else if (cmdiocb->retry >= 32) 4266 delay = 1000; 4267 } else if ((cmd == ELS_CMD_FDISC) && !lpfc_error_lost_link(irsp)) { 4268 /* retry FDISCs every second up to devloss */ 4269 retry = 1; 4270 maxretry = vport->cfg_devloss_tmo; 4271 delay = 1000; 4272 } 4273 4274 cmdiocb->retry++; 4275 if (maxretry && (cmdiocb->retry >= maxretry)) { 4276 phba->fc_stat.elsRetryExceeded++; 4277 retry = 0; 4278 } 4279 4280 if ((vport->load_flag & FC_UNLOADING) != 0) 4281 retry = 0; 4282 4283 out_retry: 4284 if (retry) { 4285 if ((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_FDISC)) { 4286 /* Stop retrying PLOGI and FDISC if in FCF discovery */ 4287 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 4288 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4289 "2849 Stop retry ELS command " 4290 "x%x to remote NPORT x%x, " 4291 "Data: x%x x%x\n", cmd, did, 4292 cmdiocb->retry, delay); 4293 return 0; 4294 } 4295 } 4296 4297 /* Retry ELS command <elsCmd> to remote NPORT <did> */ 4298 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4299 "0107 Retry ELS command x%x to remote " 4300 "NPORT x%x Data: x%x x%x\n", 4301 cmd, did, cmdiocb->retry, delay); 4302 4303 if (((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) && 4304 ((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) || 4305 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) != 4306 IOERR_NO_RESOURCES))) { 4307 /* Don't reset timer for no resources */ 4308 4309 /* If discovery / RSCN timer is running, reset it */ 4310 if (timer_pending(&vport->fc_disctmo) || 4311 (vport->fc_flag & FC_RSCN_MODE)) 4312 lpfc_set_disctmo(vport); 4313 } 4314 4315 phba->fc_stat.elsXmitRetry++; 4316 if (ndlp && delay) { 4317 phba->fc_stat.elsDelayRetry++; 4318 ndlp->nlp_retry = cmdiocb->retry; 4319 4320 /* delay is specified in milliseconds */ 4321 mod_timer(&ndlp->nlp_delayfunc, 4322 jiffies + msecs_to_jiffies(delay)); 4323 spin_lock_irq(&ndlp->lock); 4324 ndlp->nlp_flag |= NLP_DELAY_TMO; 4325 spin_unlock_irq(&ndlp->lock); 4326 4327 ndlp->nlp_prev_state = ndlp->nlp_state; 4328 if ((cmd == ELS_CMD_PRLI) || 4329 (cmd == ELS_CMD_NVMEPRLI)) 4330 lpfc_nlp_set_state(vport, ndlp, 4331 NLP_STE_PRLI_ISSUE); 4332 else 4333 lpfc_nlp_set_state(vport, ndlp, 4334 NLP_STE_NPR_NODE); 4335 ndlp->nlp_last_elscmd = cmd; 4336 4337 return 1; 4338 } 4339 switch (cmd) { 4340 case ELS_CMD_FLOGI: 4341 lpfc_issue_els_flogi(vport, ndlp, cmdiocb->retry); 4342 return 1; 4343 case ELS_CMD_FDISC: 4344 lpfc_issue_els_fdisc(vport, ndlp, cmdiocb->retry); 4345 return 1; 4346 case ELS_CMD_PLOGI: 4347 if (ndlp) { 4348 ndlp->nlp_prev_state = ndlp->nlp_state; 4349 lpfc_nlp_set_state(vport, ndlp, 4350 NLP_STE_PLOGI_ISSUE); 4351 } 4352 lpfc_issue_els_plogi(vport, did, cmdiocb->retry); 4353 return 1; 4354 case ELS_CMD_ADISC: 4355 ndlp->nlp_prev_state = ndlp->nlp_state; 4356 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 4357 lpfc_issue_els_adisc(vport, ndlp, cmdiocb->retry); 4358 return 1; 4359 case ELS_CMD_PRLI: 4360 case ELS_CMD_NVMEPRLI: 4361 ndlp->nlp_prev_state = ndlp->nlp_state; 4362 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); 4363 lpfc_issue_els_prli(vport, ndlp, cmdiocb->retry); 4364 return 1; 4365 case ELS_CMD_LOGO: 4366 ndlp->nlp_prev_state = ndlp->nlp_state; 4367 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE); 4368 lpfc_issue_els_logo(vport, ndlp, cmdiocb->retry); 4369 return 1; 4370 } 4371 } 4372 /* No retry ELS command <elsCmd> to remote NPORT <did> */ 4373 if (logerr) { 4374 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 4375 "0137 No retry ELS command x%x to remote " 4376 "NPORT x%x: Out of Resources: Error:x%x/%x\n", 4377 cmd, did, irsp->ulpStatus, 4378 irsp->un.ulpWord[4]); 4379 } 4380 else { 4381 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4382 "0108 No retry ELS command x%x to remote " 4383 "NPORT x%x Retried:%d Error:x%x/%x\n", 4384 cmd, did, cmdiocb->retry, irsp->ulpStatus, 4385 irsp->un.ulpWord[4]); 4386 } 4387 return 0; 4388 } 4389 4390 /** 4391 * lpfc_els_free_data - Free lpfc dma buffer and data structure with an iocb 4392 * @phba: pointer to lpfc hba data structure. 4393 * @buf_ptr1: pointer to the lpfc DMA buffer data structure. 4394 * 4395 * This routine releases the lpfc DMA (Direct Memory Access) buffer(s) 4396 * associated with a command IOCB back to the lpfc DMA buffer pool. It first 4397 * checks to see whether there is a lpfc DMA buffer associated with the 4398 * response of the command IOCB. If so, it will be released before releasing 4399 * the lpfc DMA buffer associated with the IOCB itself. 4400 * 4401 * Return code 4402 * 0 - Successfully released lpfc DMA buffer (currently, always return 0) 4403 **/ 4404 static int 4405 lpfc_els_free_data(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr1) 4406 { 4407 struct lpfc_dmabuf *buf_ptr; 4408 4409 /* Free the response before processing the command. */ 4410 if (!list_empty(&buf_ptr1->list)) { 4411 list_remove_head(&buf_ptr1->list, buf_ptr, 4412 struct lpfc_dmabuf, 4413 list); 4414 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 4415 kfree(buf_ptr); 4416 } 4417 lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys); 4418 kfree(buf_ptr1); 4419 return 0; 4420 } 4421 4422 /** 4423 * lpfc_els_free_bpl - Free lpfc dma buffer and data structure with bpl 4424 * @phba: pointer to lpfc hba data structure. 4425 * @buf_ptr: pointer to the lpfc dma buffer data structure. 4426 * 4427 * This routine releases the lpfc Direct Memory Access (DMA) buffer 4428 * associated with a Buffer Pointer List (BPL) back to the lpfc DMA buffer 4429 * pool. 4430 * 4431 * Return code 4432 * 0 - Successfully released lpfc DMA buffer (currently, always return 0) 4433 **/ 4434 static int 4435 lpfc_els_free_bpl(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr) 4436 { 4437 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 4438 kfree(buf_ptr); 4439 return 0; 4440 } 4441 4442 /** 4443 * lpfc_els_free_iocb - Free a command iocb and its associated resources 4444 * @phba: pointer to lpfc hba data structure. 4445 * @elsiocb: pointer to lpfc els command iocb data structure. 4446 * 4447 * This routine frees a command IOCB and its associated resources. The 4448 * command IOCB data structure contains the reference to various associated 4449 * resources, these fields must be set to NULL if the associated reference 4450 * not present: 4451 * context1 - reference to ndlp 4452 * context2 - reference to cmd 4453 * context2->next - reference to rsp 4454 * context3 - reference to bpl 4455 * 4456 * It first properly decrements the reference count held on ndlp for the 4457 * IOCB completion callback function. If LPFC_DELAY_MEM_FREE flag is not 4458 * set, it invokes the lpfc_els_free_data() routine to release the Direct 4459 * Memory Access (DMA) buffers associated with the IOCB. Otherwise, it 4460 * adds the DMA buffer the @phba data structure for the delayed release. 4461 * If reference to the Buffer Pointer List (BPL) is present, the 4462 * lpfc_els_free_bpl() routine is invoked to release the DMA memory 4463 * associated with BPL. Finally, the lpfc_sli_release_iocbq() routine is 4464 * invoked to release the IOCB data structure back to @phba IOCBQ list. 4465 * 4466 * Return code 4467 * 0 - Success (currently, always return 0) 4468 **/ 4469 int 4470 lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb) 4471 { 4472 struct lpfc_dmabuf *buf_ptr, *buf_ptr1; 4473 4474 /* The I/O job is complete. Clear the context1 data. */ 4475 elsiocb->context1 = NULL; 4476 4477 /* context2 = cmd, context2->next = rsp, context3 = bpl */ 4478 if (elsiocb->context2) { 4479 if (elsiocb->iocb_flag & LPFC_DELAY_MEM_FREE) { 4480 /* Firmware could still be in progress of DMAing 4481 * payload, so don't free data buffer till after 4482 * a hbeat. 4483 */ 4484 elsiocb->iocb_flag &= ~LPFC_DELAY_MEM_FREE; 4485 buf_ptr = elsiocb->context2; 4486 elsiocb->context2 = NULL; 4487 if (buf_ptr) { 4488 buf_ptr1 = NULL; 4489 spin_lock_irq(&phba->hbalock); 4490 if (!list_empty(&buf_ptr->list)) { 4491 list_remove_head(&buf_ptr->list, 4492 buf_ptr1, struct lpfc_dmabuf, 4493 list); 4494 INIT_LIST_HEAD(&buf_ptr1->list); 4495 list_add_tail(&buf_ptr1->list, 4496 &phba->elsbuf); 4497 phba->elsbuf_cnt++; 4498 } 4499 INIT_LIST_HEAD(&buf_ptr->list); 4500 list_add_tail(&buf_ptr->list, &phba->elsbuf); 4501 phba->elsbuf_cnt++; 4502 spin_unlock_irq(&phba->hbalock); 4503 } 4504 } else { 4505 buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2; 4506 lpfc_els_free_data(phba, buf_ptr1); 4507 elsiocb->context2 = NULL; 4508 } 4509 } 4510 4511 if (elsiocb->context3) { 4512 buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3; 4513 lpfc_els_free_bpl(phba, buf_ptr); 4514 elsiocb->context3 = NULL; 4515 } 4516 lpfc_sli_release_iocbq(phba, elsiocb); 4517 return 0; 4518 } 4519 4520 /** 4521 * lpfc_cmpl_els_logo_acc - Completion callback function to logo acc response 4522 * @phba: pointer to lpfc hba data structure. 4523 * @cmdiocb: pointer to lpfc command iocb data structure. 4524 * @rspiocb: pointer to lpfc response iocb data structure. 4525 * 4526 * This routine is the completion callback function to the Logout (LOGO) 4527 * Accept (ACC) Response ELS command. This routine is invoked to indicate 4528 * the completion of the LOGO process. It invokes the lpfc_nlp_not_used() to 4529 * release the ndlp if it has the last reference remaining (reference count 4530 * is 1). If succeeded (meaning ndlp released), it sets the IOCB context1 4531 * field to NULL to inform the following lpfc_els_free_iocb() routine no 4532 * ndlp reference count needs to be decremented. Otherwise, the ndlp 4533 * reference use-count shall be decremented by the lpfc_els_free_iocb() 4534 * routine. Finally, the lpfc_els_free_iocb() is invoked to release the 4535 * IOCB data structure. 4536 **/ 4537 static void 4538 lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 4539 struct lpfc_iocbq *rspiocb) 4540 { 4541 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 4542 struct lpfc_vport *vport = cmdiocb->vport; 4543 IOCB_t *irsp; 4544 4545 irsp = &rspiocb->iocb; 4546 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 4547 "ACC LOGO cmpl: status:x%x/x%x did:x%x", 4548 irsp->ulpStatus, irsp->un.ulpWord[4], ndlp->nlp_DID); 4549 /* ACC to LOGO completes to NPort <nlp_DID> */ 4550 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4551 "0109 ACC to LOGO completes to NPort x%x refcnt %d " 4552 "Data: x%x x%x x%x\n", 4553 ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp->nlp_flag, 4554 ndlp->nlp_state, ndlp->nlp_rpi); 4555 4556 /* This clause allows the LOGO ACC to complete and free resources 4557 * for the Fabric Domain Controller. It does deliberately skip 4558 * the unreg_rpi and release rpi because some fabrics send RDP 4559 * requests after logging out from the initiator. 4560 */ 4561 if (ndlp->nlp_type & NLP_FABRIC && 4562 ((ndlp->nlp_DID & WELL_KNOWN_DID_MASK) != WELL_KNOWN_DID_MASK)) 4563 goto out; 4564 4565 if (ndlp->nlp_state == NLP_STE_NPR_NODE) { 4566 /* NPort Recovery mode or node is just allocated */ 4567 if (!lpfc_nlp_not_used(ndlp)) { 4568 /* A LOGO is completing and the node is in NPR state. 4569 * If this a fabric node that cleared its transport 4570 * registration, release the rpi. 4571 */ 4572 spin_lock_irq(&ndlp->lock); 4573 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 4574 if (phba->sli_rev == LPFC_SLI_REV4) 4575 ndlp->nlp_flag |= NLP_RELEASE_RPI; 4576 spin_unlock_irq(&ndlp->lock); 4577 lpfc_unreg_rpi(vport, ndlp); 4578 } else { 4579 /* Indicate the node has already released, should 4580 * not reference to it from within lpfc_els_free_iocb. 4581 */ 4582 cmdiocb->context1 = NULL; 4583 } 4584 } 4585 out: 4586 /* 4587 * The driver received a LOGO from the rport and has ACK'd it. 4588 * At this point, the driver is done so release the IOCB 4589 */ 4590 lpfc_els_free_iocb(phba, cmdiocb); 4591 lpfc_nlp_put(ndlp); 4592 } 4593 4594 /** 4595 * lpfc_mbx_cmpl_dflt_rpi - Completion callbk func for unreg dflt rpi mbox cmd 4596 * @phba: pointer to lpfc hba data structure. 4597 * @pmb: pointer to the driver internal queue element for mailbox command. 4598 * 4599 * This routine is the completion callback function for unregister default 4600 * RPI (Remote Port Index) mailbox command to the @phba. It simply releases 4601 * the associated lpfc Direct Memory Access (DMA) buffer back to the pool and 4602 * decrements the ndlp reference count held for this completion callback 4603 * function. After that, it invokes the lpfc_nlp_not_used() to check 4604 * whether there is only one reference left on the ndlp. If so, it will 4605 * perform one more decrement and trigger the release of the ndlp. 4606 **/ 4607 void 4608 lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 4609 { 4610 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf); 4611 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; 4612 u32 mbx_flag = pmb->mbox_flag; 4613 u32 mbx_cmd = pmb->u.mb.mbxCommand; 4614 4615 pmb->ctx_buf = NULL; 4616 pmb->ctx_ndlp = NULL; 4617 4618 if (ndlp) { 4619 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, 4620 "0006 rpi x%x DID:%x flg:%x %d x%px " 4621 "mbx_cmd x%x mbx_flag x%x x%px\n", 4622 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, 4623 kref_read(&ndlp->kref), ndlp, mbx_cmd, 4624 mbx_flag, pmb); 4625 4626 /* This ends the default/temporary RPI cleanup logic for this 4627 * ndlp and the node and rpi needs to be released. Free the rpi 4628 * first on an UNREG_LOGIN and then release the final 4629 * references. 4630 */ 4631 spin_lock_irq(&ndlp->lock); 4632 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; 4633 if (mbx_cmd == MBX_UNREG_LOGIN) 4634 ndlp->nlp_flag &= ~NLP_UNREG_INP; 4635 spin_unlock_irq(&ndlp->lock); 4636 lpfc_nlp_put(ndlp); 4637 lpfc_drop_node(ndlp->vport, ndlp); 4638 } 4639 4640 lpfc_mbuf_free(phba, mp->virt, mp->phys); 4641 kfree(mp); 4642 mempool_free(pmb, phba->mbox_mem_pool); 4643 return; 4644 } 4645 4646 /** 4647 * lpfc_cmpl_els_rsp - Completion callback function for els response iocb cmd 4648 * @phba: pointer to lpfc hba data structure. 4649 * @cmdiocb: pointer to lpfc command iocb data structure. 4650 * @rspiocb: pointer to lpfc response iocb data structure. 4651 * 4652 * This routine is the completion callback function for ELS Response IOCB 4653 * command. In normal case, this callback function just properly sets the 4654 * nlp_flag bitmap in the ndlp data structure, if the mbox command reference 4655 * field in the command IOCB is not NULL, the referred mailbox command will 4656 * be send out, and then invokes the lpfc_els_free_iocb() routine to release 4657 * the IOCB. 4658 **/ 4659 static void 4660 lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 4661 struct lpfc_iocbq *rspiocb) 4662 { 4663 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 4664 struct lpfc_vport *vport = ndlp ? ndlp->vport : NULL; 4665 struct Scsi_Host *shost = vport ? lpfc_shost_from_vport(vport) : NULL; 4666 IOCB_t *irsp; 4667 LPFC_MBOXQ_t *mbox = NULL; 4668 struct lpfc_dmabuf *mp = NULL; 4669 4670 irsp = &rspiocb->iocb; 4671 4672 if (!vport) { 4673 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4674 "3177 ELS response failed\n"); 4675 goto out; 4676 } 4677 if (cmdiocb->context_un.mbox) 4678 mbox = cmdiocb->context_un.mbox; 4679 4680 /* Check to see if link went down during discovery */ 4681 if (!ndlp || lpfc_els_chk_latt(vport)) { 4682 if (mbox) { 4683 mp = (struct lpfc_dmabuf *)mbox->ctx_buf; 4684 if (mp) { 4685 lpfc_mbuf_free(phba, mp->virt, mp->phys); 4686 kfree(mp); 4687 } 4688 mempool_free(mbox, phba->mbox_mem_pool); 4689 } 4690 goto out; 4691 } 4692 4693 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 4694 "ELS rsp cmpl: status:x%x/x%x did:x%x", 4695 irsp->ulpStatus, irsp->un.ulpWord[4], 4696 cmdiocb->iocb.un.elsreq64.remoteID); 4697 /* ELS response tag <ulpIoTag> completes */ 4698 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4699 "0110 ELS response tag x%x completes " 4700 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%px\n", 4701 cmdiocb->iocb.ulpIoTag, rspiocb->iocb.ulpStatus, 4702 rspiocb->iocb.un.ulpWord[4], rspiocb->iocb.ulpTimeout, 4703 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 4704 ndlp->nlp_rpi, kref_read(&ndlp->kref), mbox); 4705 if (mbox) { 4706 if ((rspiocb->iocb.ulpStatus == 0) && 4707 (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) { 4708 if (!lpfc_unreg_rpi(vport, ndlp) && 4709 (!(vport->fc_flag & FC_PT2PT))) { 4710 if (ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE) { 4711 lpfc_printf_vlog(vport, KERN_INFO, 4712 LOG_DISCOVERY, 4713 "0314 PLOGI recov " 4714 "DID x%x " 4715 "Data: x%x x%x x%x\n", 4716 ndlp->nlp_DID, 4717 ndlp->nlp_state, 4718 ndlp->nlp_rpi, 4719 ndlp->nlp_flag); 4720 mp = mbox->ctx_buf; 4721 if (mp) { 4722 lpfc_mbuf_free(phba, mp->virt, 4723 mp->phys); 4724 kfree(mp); 4725 } 4726 mempool_free(mbox, phba->mbox_mem_pool); 4727 goto out; 4728 } 4729 } 4730 4731 /* Increment reference count to ndlp to hold the 4732 * reference to ndlp for the callback function. 4733 */ 4734 mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 4735 if (!mbox->ctx_ndlp) 4736 goto out; 4737 4738 mbox->vport = vport; 4739 if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) { 4740 mbox->mbox_flag |= LPFC_MBX_IMED_UNREG; 4741 mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; 4742 } 4743 else { 4744 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login; 4745 ndlp->nlp_prev_state = ndlp->nlp_state; 4746 lpfc_nlp_set_state(vport, ndlp, 4747 NLP_STE_REG_LOGIN_ISSUE); 4748 } 4749 4750 ndlp->nlp_flag |= NLP_REG_LOGIN_SEND; 4751 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 4752 != MBX_NOT_FINISHED) 4753 goto out; 4754 4755 /* Decrement the ndlp reference count we 4756 * set for this failed mailbox command. 4757 */ 4758 lpfc_nlp_put(ndlp); 4759 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; 4760 4761 /* ELS rsp: Cannot issue reg_login for <NPortid> */ 4762 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 4763 "0138 ELS rsp: Cannot issue reg_login for x%x " 4764 "Data: x%x x%x x%x\n", 4765 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 4766 ndlp->nlp_rpi); 4767 } 4768 mp = (struct lpfc_dmabuf *)mbox->ctx_buf; 4769 if (mp) { 4770 lpfc_mbuf_free(phba, mp->virt, mp->phys); 4771 kfree(mp); 4772 } 4773 mempool_free(mbox, phba->mbox_mem_pool); 4774 } 4775 out: 4776 if (ndlp && shost) { 4777 spin_lock_irq(&ndlp->lock); 4778 if (mbox) 4779 ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN; 4780 ndlp->nlp_flag &= ~NLP_RM_DFLT_RPI; 4781 spin_unlock_irq(&ndlp->lock); 4782 } 4783 4784 /* An SLI4 NPIV instance wants to drop the node at this point under 4785 * these conditions and release the RPI. 4786 */ 4787 if (phba->sli_rev == LPFC_SLI_REV4 && 4788 (vport && vport->port_type == LPFC_NPIV_PORT) && 4789 ndlp->nlp_flag & NLP_RELEASE_RPI) { 4790 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi); 4791 spin_lock_irq(&ndlp->lock); 4792 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; 4793 ndlp->nlp_flag &= ~NLP_RELEASE_RPI; 4794 spin_unlock_irq(&ndlp->lock); 4795 lpfc_drop_node(vport, ndlp); 4796 } 4797 4798 /* Release the originating I/O reference. */ 4799 lpfc_els_free_iocb(phba, cmdiocb); 4800 lpfc_nlp_put(ndlp); 4801 return; 4802 } 4803 4804 /** 4805 * lpfc_els_rsp_acc - Prepare and issue an acc response iocb command 4806 * @vport: pointer to a host virtual N_Port data structure. 4807 * @flag: the els command code to be accepted. 4808 * @oldiocb: pointer to the original lpfc command iocb data structure. 4809 * @ndlp: pointer to a node-list data structure. 4810 * @mbox: pointer to the driver internal queue element for mailbox command. 4811 * 4812 * This routine prepares and issues an Accept (ACC) response IOCB 4813 * command. It uses the @flag to properly set up the IOCB field for the 4814 * specific ACC response command to be issued and invokes the 4815 * lpfc_sli_issue_iocb() routine to send out ACC response IOCB. If a 4816 * @mbox pointer is passed in, it will be put into the context_un.mbox 4817 * field of the IOCB for the completion callback function to issue the 4818 * mailbox command to the HBA later when callback is invoked. 4819 * 4820 * Note that the ndlp reference count will be incremented by 1 for holding the 4821 * ndlp and the reference to ndlp will be stored into the context1 field of 4822 * the IOCB for the completion callback function to the corresponding 4823 * response ELS IOCB command. 4824 * 4825 * Return code 4826 * 0 - Successfully issued acc response 4827 * 1 - Failed to issue acc response 4828 **/ 4829 int 4830 lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, 4831 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp, 4832 LPFC_MBOXQ_t *mbox) 4833 { 4834 struct lpfc_hba *phba = vport->phba; 4835 IOCB_t *icmd; 4836 IOCB_t *oldcmd; 4837 struct lpfc_iocbq *elsiocb; 4838 uint8_t *pcmd; 4839 struct serv_parm *sp; 4840 uint16_t cmdsize; 4841 int rc; 4842 ELS_PKT *els_pkt_ptr; 4843 struct fc_els_rdf_resp *rdf_resp; 4844 4845 oldcmd = &oldiocb->iocb; 4846 4847 switch (flag) { 4848 case ELS_CMD_ACC: 4849 cmdsize = sizeof(uint32_t); 4850 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 4851 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 4852 if (!elsiocb) { 4853 spin_lock_irq(&ndlp->lock); 4854 ndlp->nlp_flag &= ~NLP_LOGO_ACC; 4855 spin_unlock_irq(&ndlp->lock); 4856 return 1; 4857 } 4858 4859 icmd = &elsiocb->iocb; 4860 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 4861 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 4862 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 4863 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 4864 pcmd += sizeof(uint32_t); 4865 4866 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 4867 "Issue ACC: did:x%x flg:x%x", 4868 ndlp->nlp_DID, ndlp->nlp_flag, 0); 4869 break; 4870 case ELS_CMD_FLOGI: 4871 case ELS_CMD_PLOGI: 4872 cmdsize = (sizeof(struct serv_parm) + sizeof(uint32_t)); 4873 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 4874 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 4875 if (!elsiocb) 4876 return 1; 4877 4878 icmd = &elsiocb->iocb; 4879 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 4880 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 4881 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 4882 4883 if (mbox) 4884 elsiocb->context_un.mbox = mbox; 4885 4886 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 4887 pcmd += sizeof(uint32_t); 4888 sp = (struct serv_parm *)pcmd; 4889 4890 if (flag == ELS_CMD_FLOGI) { 4891 /* Copy the received service parameters back */ 4892 memcpy(sp, &phba->fc_fabparam, 4893 sizeof(struct serv_parm)); 4894 4895 /* Clear the F_Port bit */ 4896 sp->cmn.fPort = 0; 4897 4898 /* Mark all class service parameters as invalid */ 4899 sp->cls1.classValid = 0; 4900 sp->cls2.classValid = 0; 4901 sp->cls3.classValid = 0; 4902 sp->cls4.classValid = 0; 4903 4904 /* Copy our worldwide names */ 4905 memcpy(&sp->portName, &vport->fc_sparam.portName, 4906 sizeof(struct lpfc_name)); 4907 memcpy(&sp->nodeName, &vport->fc_sparam.nodeName, 4908 sizeof(struct lpfc_name)); 4909 } else { 4910 memcpy(pcmd, &vport->fc_sparam, 4911 sizeof(struct serv_parm)); 4912 4913 sp->cmn.valid_vendor_ver_level = 0; 4914 memset(sp->un.vendorVersion, 0, 4915 sizeof(sp->un.vendorVersion)); 4916 sp->cmn.bbRcvSizeMsb &= 0xF; 4917 4918 /* If our firmware supports this feature, convey that 4919 * info to the target using the vendor specific field. 4920 */ 4921 if (phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) { 4922 sp->cmn.valid_vendor_ver_level = 1; 4923 sp->un.vv.vid = cpu_to_be32(LPFC_VV_EMLX_ID); 4924 sp->un.vv.flags = 4925 cpu_to_be32(LPFC_VV_SUPPRESS_RSP); 4926 } 4927 } 4928 4929 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 4930 "Issue ACC FLOGI/PLOGI: did:x%x flg:x%x", 4931 ndlp->nlp_DID, ndlp->nlp_flag, 0); 4932 break; 4933 case ELS_CMD_PRLO: 4934 cmdsize = sizeof(uint32_t) + sizeof(PRLO); 4935 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 4936 ndlp, ndlp->nlp_DID, ELS_CMD_PRLO); 4937 if (!elsiocb) 4938 return 1; 4939 4940 icmd = &elsiocb->iocb; 4941 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 4942 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 4943 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 4944 4945 memcpy(pcmd, ((struct lpfc_dmabuf *) oldiocb->context2)->virt, 4946 sizeof(uint32_t) + sizeof(PRLO)); 4947 *((uint32_t *) (pcmd)) = ELS_CMD_PRLO_ACC; 4948 els_pkt_ptr = (ELS_PKT *) pcmd; 4949 els_pkt_ptr->un.prlo.acceptRspCode = PRLO_REQ_EXECUTED; 4950 4951 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 4952 "Issue ACC PRLO: did:x%x flg:x%x", 4953 ndlp->nlp_DID, ndlp->nlp_flag, 0); 4954 break; 4955 case ELS_CMD_RDF: 4956 cmdsize = sizeof(*rdf_resp); 4957 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 4958 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 4959 if (!elsiocb) 4960 return 1; 4961 4962 icmd = &elsiocb->iocb; 4963 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 4964 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 4965 pcmd = (((struct lpfc_dmabuf *)elsiocb->context2)->virt); 4966 rdf_resp = (struct fc_els_rdf_resp *)pcmd; 4967 memset(rdf_resp, 0, sizeof(*rdf_resp)); 4968 rdf_resp->acc_hdr.la_cmd = ELS_LS_ACC; 4969 4970 /* FC-LS-5 specifies desc_list_len shall be set to 12 */ 4971 rdf_resp->desc_list_len = cpu_to_be32(12); 4972 4973 /* FC-LS-5 specifies LS REQ Information descriptor */ 4974 rdf_resp->lsri.desc_tag = cpu_to_be32(1); 4975 rdf_resp->lsri.desc_len = cpu_to_be32(sizeof(u32)); 4976 rdf_resp->lsri.rqst_w0.cmd = ELS_RDF; 4977 break; 4978 default: 4979 return 1; 4980 } 4981 if (ndlp->nlp_flag & NLP_LOGO_ACC) { 4982 spin_lock_irq(&ndlp->lock); 4983 if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED || 4984 ndlp->nlp_flag & NLP_REG_LOGIN_SEND)) 4985 ndlp->nlp_flag &= ~NLP_LOGO_ACC; 4986 spin_unlock_irq(&ndlp->lock); 4987 elsiocb->iocb_cmpl = lpfc_cmpl_els_logo_acc; 4988 } else { 4989 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 4990 } 4991 4992 phba->fc_stat.elsXmitACC++; 4993 elsiocb->context1 = lpfc_nlp_get(ndlp); 4994 if (!elsiocb->context1) { 4995 lpfc_els_free_iocb(phba, elsiocb); 4996 return 1; 4997 } 4998 4999 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5000 if (rc == IOCB_ERROR) { 5001 lpfc_els_free_iocb(phba, elsiocb); 5002 lpfc_nlp_put(ndlp); 5003 return 1; 5004 } 5005 5006 /* Xmit ELS ACC response tag <ulpIoTag> */ 5007 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5008 "0128 Xmit ELS ACC response Status: x%x, IoTag: x%x, " 5009 "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x " 5010 "RPI: x%x, fc_flag x%x refcnt %d\n", 5011 rc, elsiocb->iotag, elsiocb->sli4_xritag, 5012 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5013 ndlp->nlp_rpi, vport->fc_flag, kref_read(&ndlp->kref)); 5014 return 0; 5015 } 5016 5017 /** 5018 * lpfc_els_rsp_reject - Prepare and issue a rjt response iocb command 5019 * @vport: pointer to a virtual N_Port data structure. 5020 * @rejectError: reject response to issue 5021 * @oldiocb: pointer to the original lpfc command iocb data structure. 5022 * @ndlp: pointer to a node-list data structure. 5023 * @mbox: pointer to the driver internal queue element for mailbox command. 5024 * 5025 * This routine prepares and issue an Reject (RJT) response IOCB 5026 * command. If a @mbox pointer is passed in, it will be put into the 5027 * context_un.mbox field of the IOCB for the completion callback function 5028 * to issue to the HBA later. 5029 * 5030 * Note that the ndlp reference count will be incremented by 1 for holding the 5031 * ndlp and the reference to ndlp will be stored into the context1 field of 5032 * the IOCB for the completion callback function to the reject response 5033 * ELS IOCB command. 5034 * 5035 * Return code 5036 * 0 - Successfully issued reject response 5037 * 1 - Failed to issue reject response 5038 **/ 5039 int 5040 lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError, 5041 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp, 5042 LPFC_MBOXQ_t *mbox) 5043 { 5044 int rc; 5045 struct lpfc_hba *phba = vport->phba; 5046 IOCB_t *icmd; 5047 IOCB_t *oldcmd; 5048 struct lpfc_iocbq *elsiocb; 5049 uint8_t *pcmd; 5050 uint16_t cmdsize; 5051 5052 cmdsize = 2 * sizeof(uint32_t); 5053 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 5054 ndlp->nlp_DID, ELS_CMD_LS_RJT); 5055 if (!elsiocb) 5056 return 1; 5057 5058 icmd = &elsiocb->iocb; 5059 oldcmd = &oldiocb->iocb; 5060 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5061 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 5062 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 5063 5064 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT; 5065 pcmd += sizeof(uint32_t); 5066 *((uint32_t *) (pcmd)) = rejectError; 5067 5068 if (mbox) 5069 elsiocb->context_un.mbox = mbox; 5070 5071 /* Xmit ELS RJT <err> response tag <ulpIoTag> */ 5072 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5073 "0129 Xmit ELS RJT x%x response tag x%x " 5074 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, " 5075 "rpi x%x\n", 5076 rejectError, elsiocb->iotag, 5077 elsiocb->iocb.ulpContext, ndlp->nlp_DID, 5078 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); 5079 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5080 "Issue LS_RJT: did:x%x flg:x%x err:x%x", 5081 ndlp->nlp_DID, ndlp->nlp_flag, rejectError); 5082 5083 phba->fc_stat.elsXmitLSRJT++; 5084 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 5085 elsiocb->context1 = lpfc_nlp_get(ndlp); 5086 if (!elsiocb->context1) { 5087 lpfc_els_free_iocb(phba, elsiocb); 5088 return 1; 5089 } 5090 5091 /* The NPIV instance is rejecting this unsolicited ELS. Make sure the 5092 * node's assigned RPI needs to be released as this node will get 5093 * freed. 5094 */ 5095 if (phba->sli_rev == LPFC_SLI_REV4 && 5096 vport->port_type == LPFC_NPIV_PORT) { 5097 spin_lock_irq(&ndlp->lock); 5098 ndlp->nlp_flag |= NLP_RELEASE_RPI; 5099 spin_unlock_irq(&ndlp->lock); 5100 } 5101 5102 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5103 if (rc == IOCB_ERROR) { 5104 lpfc_els_free_iocb(phba, elsiocb); 5105 lpfc_nlp_put(ndlp); 5106 return 1; 5107 } 5108 5109 return 0; 5110 } 5111 5112 /** 5113 * lpfc_els_rsp_adisc_acc - Prepare and issue acc response to adisc iocb cmd 5114 * @vport: pointer to a virtual N_Port data structure. 5115 * @oldiocb: pointer to the original lpfc command iocb data structure. 5116 * @ndlp: pointer to a node-list data structure. 5117 * 5118 * This routine prepares and issues an Accept (ACC) response to Address 5119 * Discover (ADISC) ELS command. It simply prepares the payload of the IOCB 5120 * and invokes the lpfc_sli_issue_iocb() routine to send out the command. 5121 * 5122 * Note that the ndlp reference count will be incremented by 1 for holding the 5123 * ndlp and the reference to ndlp will be stored into the context1 field of 5124 * the IOCB for the completion callback function to the ADISC Accept response 5125 * ELS IOCB command. 5126 * 5127 * Return code 5128 * 0 - Successfully issued acc adisc response 5129 * 1 - Failed to issue adisc acc response 5130 **/ 5131 int 5132 lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, 5133 struct lpfc_nodelist *ndlp) 5134 { 5135 struct lpfc_hba *phba = vport->phba; 5136 ADISC *ap; 5137 IOCB_t *icmd, *oldcmd; 5138 struct lpfc_iocbq *elsiocb; 5139 uint8_t *pcmd; 5140 uint16_t cmdsize; 5141 int rc; 5142 5143 cmdsize = sizeof(uint32_t) + sizeof(ADISC); 5144 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 5145 ndlp->nlp_DID, ELS_CMD_ACC); 5146 if (!elsiocb) 5147 return 1; 5148 5149 icmd = &elsiocb->iocb; 5150 oldcmd = &oldiocb->iocb; 5151 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5152 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 5153 5154 /* Xmit ADISC ACC response tag <ulpIoTag> */ 5155 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5156 "0130 Xmit ADISC ACC response iotag x%x xri: " 5157 "x%x, did x%x, nlp_flag x%x, nlp_state x%x rpi x%x\n", 5158 elsiocb->iotag, elsiocb->iocb.ulpContext, 5159 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5160 ndlp->nlp_rpi); 5161 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 5162 5163 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 5164 pcmd += sizeof(uint32_t); 5165 5166 ap = (ADISC *) (pcmd); 5167 ap->hardAL_PA = phba->fc_pref_ALPA; 5168 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name)); 5169 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 5170 ap->DID = be32_to_cpu(vport->fc_myDID); 5171 5172 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5173 "Issue ACC ADISC: did:x%x flg:x%x refcnt %d", 5174 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); 5175 5176 phba->fc_stat.elsXmitACC++; 5177 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 5178 elsiocb->context1 = lpfc_nlp_get(ndlp); 5179 if (!elsiocb->context1) { 5180 lpfc_els_free_iocb(phba, elsiocb); 5181 return 1; 5182 } 5183 5184 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5185 if (rc == IOCB_ERROR) { 5186 lpfc_els_free_iocb(phba, elsiocb); 5187 lpfc_nlp_put(ndlp); 5188 return 1; 5189 } 5190 5191 /* Xmit ELS ACC response tag <ulpIoTag> */ 5192 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5193 "0128 Xmit ELS ACC response Status: x%x, IoTag: x%x, " 5194 "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x " 5195 "RPI: x%x, fc_flag x%x\n", 5196 rc, elsiocb->iotag, elsiocb->sli4_xritag, 5197 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5198 ndlp->nlp_rpi, vport->fc_flag); 5199 return 0; 5200 } 5201 5202 /** 5203 * lpfc_els_rsp_prli_acc - Prepare and issue acc response to prli iocb cmd 5204 * @vport: pointer to a virtual N_Port data structure. 5205 * @oldiocb: pointer to the original lpfc command iocb data structure. 5206 * @ndlp: pointer to a node-list data structure. 5207 * 5208 * This routine prepares and issues an Accept (ACC) response to Process 5209 * Login (PRLI) ELS command. It simply prepares the payload of the IOCB 5210 * and invokes the lpfc_sli_issue_iocb() routine to send out the command. 5211 * 5212 * Note that the ndlp reference count will be incremented by 1 for holding the 5213 * ndlp and the reference to ndlp will be stored into the context1 field of 5214 * the IOCB for the completion callback function to the PRLI Accept response 5215 * ELS IOCB command. 5216 * 5217 * Return code 5218 * 0 - Successfully issued acc prli response 5219 * 1 - Failed to issue acc prli response 5220 **/ 5221 int 5222 lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, 5223 struct lpfc_nodelist *ndlp) 5224 { 5225 struct lpfc_hba *phba = vport->phba; 5226 PRLI *npr; 5227 struct lpfc_nvme_prli *npr_nvme; 5228 lpfc_vpd_t *vpd; 5229 IOCB_t *icmd; 5230 IOCB_t *oldcmd; 5231 struct lpfc_iocbq *elsiocb; 5232 uint8_t *pcmd; 5233 uint16_t cmdsize; 5234 uint32_t prli_fc4_req, *req_payload; 5235 struct lpfc_dmabuf *req_buf; 5236 int rc; 5237 u32 elsrspcmd; 5238 5239 /* Need the incoming PRLI payload to determine if the ACC is for an 5240 * FC4 or NVME PRLI type. The PRLI type is at word 1. 5241 */ 5242 req_buf = (struct lpfc_dmabuf *)oldiocb->context2; 5243 req_payload = (((uint32_t *)req_buf->virt) + 1); 5244 5245 /* PRLI type payload is at byte 3 for FCP or NVME. */ 5246 prli_fc4_req = be32_to_cpu(*req_payload); 5247 prli_fc4_req = (prli_fc4_req >> 24) & 0xff; 5248 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5249 "6127 PRLI_ACC: Req Type x%x, Word1 x%08x\n", 5250 prli_fc4_req, *((uint32_t *)req_payload)); 5251 5252 if (prli_fc4_req == PRLI_FCP_TYPE) { 5253 cmdsize = sizeof(uint32_t) + sizeof(PRLI); 5254 elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK)); 5255 } else if (prli_fc4_req & PRLI_NVME_TYPE) { 5256 cmdsize = sizeof(uint32_t) + sizeof(struct lpfc_nvme_prli); 5257 elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_NVMEPRLI & ~ELS_RSP_MASK)); 5258 } else { 5259 return 1; 5260 } 5261 5262 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 5263 ndlp->nlp_DID, elsrspcmd); 5264 if (!elsiocb) 5265 return 1; 5266 5267 icmd = &elsiocb->iocb; 5268 oldcmd = &oldiocb->iocb; 5269 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5270 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 5271 5272 /* Xmit PRLI ACC response tag <ulpIoTag> */ 5273 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5274 "0131 Xmit PRLI ACC response tag x%x xri x%x, " 5275 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", 5276 elsiocb->iotag, elsiocb->iocb.ulpContext, 5277 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5278 ndlp->nlp_rpi); 5279 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 5280 memset(pcmd, 0, cmdsize); 5281 5282 *((uint32_t *)(pcmd)) = elsrspcmd; 5283 pcmd += sizeof(uint32_t); 5284 5285 /* For PRLI, remainder of payload is PRLI parameter page */ 5286 vpd = &phba->vpd; 5287 5288 if (prli_fc4_req == PRLI_FCP_TYPE) { 5289 /* 5290 * If the remote port is a target and our firmware version 5291 * is 3.20 or later, set the following bits for FC-TAPE 5292 * support. 5293 */ 5294 npr = (PRLI *) pcmd; 5295 if ((ndlp->nlp_type & NLP_FCP_TARGET) && 5296 (vpd->rev.feaLevelHigh >= 0x02)) { 5297 npr->ConfmComplAllowed = 1; 5298 npr->Retry = 1; 5299 npr->TaskRetryIdReq = 1; 5300 } 5301 npr->acceptRspCode = PRLI_REQ_EXECUTED; 5302 npr->estabImagePair = 1; 5303 npr->readXferRdyDis = 1; 5304 npr->ConfmComplAllowed = 1; 5305 npr->prliType = PRLI_FCP_TYPE; 5306 npr->initiatorFunc = 1; 5307 } else if (prli_fc4_req & PRLI_NVME_TYPE) { 5308 /* Respond with an NVME PRLI Type */ 5309 npr_nvme = (struct lpfc_nvme_prli *) pcmd; 5310 bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE); 5311 bf_set(prli_estabImagePair, npr_nvme, 0); /* Should be 0 */ 5312 bf_set(prli_acc_rsp_code, npr_nvme, PRLI_REQ_EXECUTED); 5313 if (phba->nvmet_support) { 5314 bf_set(prli_tgt, npr_nvme, 1); 5315 bf_set(prli_disc, npr_nvme, 1); 5316 if (phba->cfg_nvme_enable_fb) { 5317 bf_set(prli_fba, npr_nvme, 1); 5318 5319 /* TBD. Target mode needs to post buffers 5320 * that support the configured first burst 5321 * byte size. 5322 */ 5323 bf_set(prli_fb_sz, npr_nvme, 5324 phba->cfg_nvmet_fb_size); 5325 } 5326 } else { 5327 bf_set(prli_init, npr_nvme, 1); 5328 } 5329 5330 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, 5331 "6015 NVME issue PRLI ACC word1 x%08x " 5332 "word4 x%08x word5 x%08x flag x%x, " 5333 "fcp_info x%x nlp_type x%x\n", 5334 npr_nvme->word1, npr_nvme->word4, 5335 npr_nvme->word5, ndlp->nlp_flag, 5336 ndlp->nlp_fcp_info, ndlp->nlp_type); 5337 npr_nvme->word1 = cpu_to_be32(npr_nvme->word1); 5338 npr_nvme->word4 = cpu_to_be32(npr_nvme->word4); 5339 npr_nvme->word5 = cpu_to_be32(npr_nvme->word5); 5340 } else 5341 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 5342 "6128 Unknown FC_TYPE x%x x%x ndlp x%06x\n", 5343 prli_fc4_req, ndlp->nlp_fc4_type, 5344 ndlp->nlp_DID); 5345 5346 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5347 "Issue ACC PRLI: did:x%x flg:x%x", 5348 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); 5349 5350 phba->fc_stat.elsXmitACC++; 5351 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 5352 elsiocb->context1 = lpfc_nlp_get(ndlp); 5353 if (!elsiocb->context1) { 5354 lpfc_els_free_iocb(phba, elsiocb); 5355 return 1; 5356 } 5357 5358 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5359 if (rc == IOCB_ERROR) { 5360 lpfc_els_free_iocb(phba, elsiocb); 5361 lpfc_nlp_put(ndlp); 5362 return 1; 5363 } 5364 5365 return 0; 5366 } 5367 5368 /** 5369 * lpfc_els_rsp_rnid_acc - Issue rnid acc response iocb command 5370 * @vport: pointer to a virtual N_Port data structure. 5371 * @format: rnid command format. 5372 * @oldiocb: pointer to the original lpfc command iocb data structure. 5373 * @ndlp: pointer to a node-list data structure. 5374 * 5375 * This routine issues a Request Node Identification Data (RNID) Accept 5376 * (ACC) response. It constructs the RNID ACC response command according to 5377 * the proper @format and then calls the lpfc_sli_issue_iocb() routine to 5378 * issue the response. 5379 * 5380 * Note that the ndlp reference count will be incremented by 1 for holding the 5381 * ndlp and the reference to ndlp will be stored into the context1 field of 5382 * the IOCB for the completion callback function. 5383 * 5384 * Return code 5385 * 0 - Successfully issued acc rnid response 5386 * 1 - Failed to issue acc rnid response 5387 **/ 5388 static int 5389 lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format, 5390 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) 5391 { 5392 struct lpfc_hba *phba = vport->phba; 5393 RNID *rn; 5394 IOCB_t *icmd, *oldcmd; 5395 struct lpfc_iocbq *elsiocb; 5396 uint8_t *pcmd; 5397 uint16_t cmdsize; 5398 int rc; 5399 5400 cmdsize = sizeof(uint32_t) + sizeof(uint32_t) 5401 + (2 * sizeof(struct lpfc_name)); 5402 if (format) 5403 cmdsize += sizeof(RNID_TOP_DISC); 5404 5405 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 5406 ndlp->nlp_DID, ELS_CMD_ACC); 5407 if (!elsiocb) 5408 return 1; 5409 5410 icmd = &elsiocb->iocb; 5411 oldcmd = &oldiocb->iocb; 5412 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5413 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 5414 5415 /* Xmit RNID ACC response tag <ulpIoTag> */ 5416 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5417 "0132 Xmit RNID ACC response tag x%x xri x%x\n", 5418 elsiocb->iotag, elsiocb->iocb.ulpContext); 5419 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 5420 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 5421 pcmd += sizeof(uint32_t); 5422 5423 memset(pcmd, 0, sizeof(RNID)); 5424 rn = (RNID *) (pcmd); 5425 rn->Format = format; 5426 rn->CommonLen = (2 * sizeof(struct lpfc_name)); 5427 memcpy(&rn->portName, &vport->fc_portname, sizeof(struct lpfc_name)); 5428 memcpy(&rn->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 5429 switch (format) { 5430 case 0: 5431 rn->SpecificLen = 0; 5432 break; 5433 case RNID_TOPOLOGY_DISC: 5434 rn->SpecificLen = sizeof(RNID_TOP_DISC); 5435 memcpy(&rn->un.topologyDisc.portName, 5436 &vport->fc_portname, sizeof(struct lpfc_name)); 5437 rn->un.topologyDisc.unitType = RNID_HBA; 5438 rn->un.topologyDisc.physPort = 0; 5439 rn->un.topologyDisc.attachedNodes = 0; 5440 break; 5441 default: 5442 rn->CommonLen = 0; 5443 rn->SpecificLen = 0; 5444 break; 5445 } 5446 5447 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5448 "Issue ACC RNID: did:x%x flg:x%x refcnt %d", 5449 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); 5450 5451 phba->fc_stat.elsXmitACC++; 5452 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 5453 elsiocb->context1 = lpfc_nlp_get(ndlp); 5454 if (!elsiocb->context1) { 5455 lpfc_els_free_iocb(phba, elsiocb); 5456 return 1; 5457 } 5458 5459 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5460 if (rc == IOCB_ERROR) { 5461 lpfc_els_free_iocb(phba, elsiocb); 5462 lpfc_nlp_put(ndlp); 5463 return 1; 5464 } 5465 5466 return 0; 5467 } 5468 5469 /** 5470 * lpfc_els_clear_rrq - Clear the rq that this rrq describes. 5471 * @vport: pointer to a virtual N_Port data structure. 5472 * @iocb: pointer to the lpfc command iocb data structure. 5473 * @ndlp: pointer to a node-list data structure. 5474 * 5475 * Return 5476 **/ 5477 static void 5478 lpfc_els_clear_rrq(struct lpfc_vport *vport, 5479 struct lpfc_iocbq *iocb, struct lpfc_nodelist *ndlp) 5480 { 5481 struct lpfc_hba *phba = vport->phba; 5482 uint8_t *pcmd; 5483 struct RRQ *rrq; 5484 uint16_t rxid; 5485 uint16_t xri; 5486 struct lpfc_node_rrq *prrq; 5487 5488 5489 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) iocb->context2)->virt); 5490 pcmd += sizeof(uint32_t); 5491 rrq = (struct RRQ *)pcmd; 5492 rrq->rrq_exchg = be32_to_cpu(rrq->rrq_exchg); 5493 rxid = bf_get(rrq_rxid, rrq); 5494 5495 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5496 "2883 Clear RRQ for SID:x%x OXID:x%x RXID:x%x" 5497 " x%x x%x\n", 5498 be32_to_cpu(bf_get(rrq_did, rrq)), 5499 bf_get(rrq_oxid, rrq), 5500 rxid, 5501 iocb->iotag, iocb->iocb.ulpContext); 5502 5503 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5504 "Clear RRQ: did:x%x flg:x%x exchg:x%.08x", 5505 ndlp->nlp_DID, ndlp->nlp_flag, rrq->rrq_exchg); 5506 if (vport->fc_myDID == be32_to_cpu(bf_get(rrq_did, rrq))) 5507 xri = bf_get(rrq_oxid, rrq); 5508 else 5509 xri = rxid; 5510 prrq = lpfc_get_active_rrq(vport, xri, ndlp->nlp_DID); 5511 if (prrq) 5512 lpfc_clr_rrq_active(phba, xri, prrq); 5513 return; 5514 } 5515 5516 /** 5517 * lpfc_els_rsp_echo_acc - Issue echo acc response 5518 * @vport: pointer to a virtual N_Port data structure. 5519 * @data: pointer to echo data to return in the accept. 5520 * @oldiocb: pointer to the original lpfc command iocb data structure. 5521 * @ndlp: pointer to a node-list data structure. 5522 * 5523 * Return code 5524 * 0 - Successfully issued acc echo response 5525 * 1 - Failed to issue acc echo response 5526 **/ 5527 static int 5528 lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data, 5529 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) 5530 { 5531 struct lpfc_hba *phba = vport->phba; 5532 struct lpfc_iocbq *elsiocb; 5533 uint8_t *pcmd; 5534 uint16_t cmdsize; 5535 int rc; 5536 5537 cmdsize = oldiocb->iocb.unsli3.rcvsli3.acc_len; 5538 5539 /* The accumulated length can exceed the BPL_SIZE. For 5540 * now, use this as the limit 5541 */ 5542 if (cmdsize > LPFC_BPL_SIZE) 5543 cmdsize = LPFC_BPL_SIZE; 5544 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 5545 ndlp->nlp_DID, ELS_CMD_ACC); 5546 if (!elsiocb) 5547 return 1; 5548 5549 elsiocb->iocb.ulpContext = oldiocb->iocb.ulpContext; /* Xri / rx_id */ 5550 elsiocb->iocb.unsli3.rcvsli3.ox_id = oldiocb->iocb.unsli3.rcvsli3.ox_id; 5551 5552 /* Xmit ECHO ACC response tag <ulpIoTag> */ 5553 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5554 "2876 Xmit ECHO ACC response tag x%x xri x%x\n", 5555 elsiocb->iotag, elsiocb->iocb.ulpContext); 5556 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 5557 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 5558 pcmd += sizeof(uint32_t); 5559 memcpy(pcmd, data, cmdsize - sizeof(uint32_t)); 5560 5561 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5562 "Issue ACC ECHO: did:x%x flg:x%x refcnt %d", 5563 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); 5564 5565 phba->fc_stat.elsXmitACC++; 5566 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 5567 elsiocb->context1 = lpfc_nlp_get(ndlp); 5568 if (!elsiocb->context1) { 5569 lpfc_els_free_iocb(phba, elsiocb); 5570 return 1; 5571 } 5572 5573 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5574 if (rc == IOCB_ERROR) { 5575 lpfc_els_free_iocb(phba, elsiocb); 5576 lpfc_nlp_put(ndlp); 5577 return 1; 5578 } 5579 5580 return 0; 5581 } 5582 5583 /** 5584 * lpfc_els_disc_adisc - Issue remaining adisc iocbs to npr nodes of a vport 5585 * @vport: pointer to a host virtual N_Port data structure. 5586 * 5587 * This routine issues Address Discover (ADISC) ELS commands to those 5588 * N_Ports which are in node port recovery state and ADISC has not been issued 5589 * for the @vport. Each time an ELS ADISC IOCB is issued by invoking the 5590 * lpfc_issue_els_adisc() routine, the per @vport number of discover count 5591 * (num_disc_nodes) shall be incremented. If the num_disc_nodes reaches a 5592 * pre-configured threshold (cfg_discovery_threads), the @vport fc_flag will 5593 * be marked with FC_NLP_MORE bit and the process of issuing remaining ADISC 5594 * IOCBs quit for later pick up. On the other hand, after walking through 5595 * all the ndlps with the @vport and there is none ADISC IOCB issued, the 5596 * @vport fc_flag shall be cleared with FC_NLP_MORE bit indicating there is 5597 * no more ADISC need to be sent. 5598 * 5599 * Return code 5600 * The number of N_Ports with adisc issued. 5601 **/ 5602 int 5603 lpfc_els_disc_adisc(struct lpfc_vport *vport) 5604 { 5605 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 5606 struct lpfc_nodelist *ndlp, *next_ndlp; 5607 int sentadisc = 0; 5608 5609 /* go thru NPR nodes and issue any remaining ELS ADISCs */ 5610 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 5611 if (ndlp->nlp_state == NLP_STE_NPR_NODE && 5612 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 && 5613 (ndlp->nlp_flag & NLP_NPR_ADISC) != 0) { 5614 spin_lock_irq(&ndlp->lock); 5615 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 5616 spin_unlock_irq(&ndlp->lock); 5617 ndlp->nlp_prev_state = ndlp->nlp_state; 5618 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 5619 lpfc_issue_els_adisc(vport, ndlp, 0); 5620 sentadisc++; 5621 vport->num_disc_nodes++; 5622 if (vport->num_disc_nodes >= 5623 vport->cfg_discovery_threads) { 5624 spin_lock_irq(shost->host_lock); 5625 vport->fc_flag |= FC_NLP_MORE; 5626 spin_unlock_irq(shost->host_lock); 5627 break; 5628 } 5629 } 5630 } 5631 if (sentadisc == 0) { 5632 spin_lock_irq(shost->host_lock); 5633 vport->fc_flag &= ~FC_NLP_MORE; 5634 spin_unlock_irq(shost->host_lock); 5635 } 5636 return sentadisc; 5637 } 5638 5639 /** 5640 * lpfc_els_disc_plogi - Issue plogi for all npr nodes of a vport before adisc 5641 * @vport: pointer to a host virtual N_Port data structure. 5642 * 5643 * This routine issues Port Login (PLOGI) ELS commands to all the N_Ports 5644 * which are in node port recovery state, with a @vport. Each time an ELS 5645 * ADISC PLOGI IOCB is issued by invoking the lpfc_issue_els_plogi() routine, 5646 * the per @vport number of discover count (num_disc_nodes) shall be 5647 * incremented. If the num_disc_nodes reaches a pre-configured threshold 5648 * (cfg_discovery_threads), the @vport fc_flag will be marked with FC_NLP_MORE 5649 * bit set and quit the process of issuing remaining ADISC PLOGIN IOCBs for 5650 * later pick up. On the other hand, after walking through all the ndlps with 5651 * the @vport and there is none ADISC PLOGI IOCB issued, the @vport fc_flag 5652 * shall be cleared with the FC_NLP_MORE bit indicating there is no more ADISC 5653 * PLOGI need to be sent. 5654 * 5655 * Return code 5656 * The number of N_Ports with plogi issued. 5657 **/ 5658 int 5659 lpfc_els_disc_plogi(struct lpfc_vport *vport) 5660 { 5661 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 5662 struct lpfc_nodelist *ndlp, *next_ndlp; 5663 int sentplogi = 0; 5664 5665 /* go thru NPR nodes and issue any remaining ELS PLOGIs */ 5666 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 5667 if (ndlp->nlp_state == NLP_STE_NPR_NODE && 5668 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 && 5669 (ndlp->nlp_flag & NLP_DELAY_TMO) == 0 && 5670 (ndlp->nlp_flag & NLP_NPR_ADISC) == 0) { 5671 ndlp->nlp_prev_state = ndlp->nlp_state; 5672 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 5673 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 5674 sentplogi++; 5675 vport->num_disc_nodes++; 5676 if (vport->num_disc_nodes >= 5677 vport->cfg_discovery_threads) { 5678 spin_lock_irq(shost->host_lock); 5679 vport->fc_flag |= FC_NLP_MORE; 5680 spin_unlock_irq(shost->host_lock); 5681 break; 5682 } 5683 } 5684 } 5685 5686 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 5687 "6452 Discover PLOGI %d flag x%x\n", 5688 sentplogi, vport->fc_flag); 5689 5690 if (sentplogi) { 5691 lpfc_set_disctmo(vport); 5692 } 5693 else { 5694 spin_lock_irq(shost->host_lock); 5695 vport->fc_flag &= ~FC_NLP_MORE; 5696 spin_unlock_irq(shost->host_lock); 5697 } 5698 return sentplogi; 5699 } 5700 5701 static uint32_t 5702 lpfc_rdp_res_link_service(struct fc_rdp_link_service_desc *desc, 5703 uint32_t word0) 5704 { 5705 5706 desc->tag = cpu_to_be32(RDP_LINK_SERVICE_DESC_TAG); 5707 desc->payload.els_req = word0; 5708 desc->length = cpu_to_be32(sizeof(desc->payload)); 5709 5710 return sizeof(struct fc_rdp_link_service_desc); 5711 } 5712 5713 static uint32_t 5714 lpfc_rdp_res_sfp_desc(struct fc_rdp_sfp_desc *desc, 5715 uint8_t *page_a0, uint8_t *page_a2) 5716 { 5717 uint16_t wavelength; 5718 uint16_t temperature; 5719 uint16_t rx_power; 5720 uint16_t tx_bias; 5721 uint16_t tx_power; 5722 uint16_t vcc; 5723 uint16_t flag = 0; 5724 struct sff_trasnceiver_codes_byte4 *trasn_code_byte4; 5725 struct sff_trasnceiver_codes_byte5 *trasn_code_byte5; 5726 5727 desc->tag = cpu_to_be32(RDP_SFP_DESC_TAG); 5728 5729 trasn_code_byte4 = (struct sff_trasnceiver_codes_byte4 *) 5730 &page_a0[SSF_TRANSCEIVER_CODE_B4]; 5731 trasn_code_byte5 = (struct sff_trasnceiver_codes_byte5 *) 5732 &page_a0[SSF_TRANSCEIVER_CODE_B5]; 5733 5734 if ((trasn_code_byte4->fc_sw_laser) || 5735 (trasn_code_byte5->fc_sw_laser_sl) || 5736 (trasn_code_byte5->fc_sw_laser_sn)) { /* check if its short WL */ 5737 flag |= (SFP_FLAG_PT_SWLASER << SFP_FLAG_PT_SHIFT); 5738 } else if (trasn_code_byte4->fc_lw_laser) { 5739 wavelength = (page_a0[SSF_WAVELENGTH_B1] << 8) | 5740 page_a0[SSF_WAVELENGTH_B0]; 5741 if (wavelength == SFP_WAVELENGTH_LC1310) 5742 flag |= SFP_FLAG_PT_LWLASER_LC1310 << SFP_FLAG_PT_SHIFT; 5743 if (wavelength == SFP_WAVELENGTH_LL1550) 5744 flag |= SFP_FLAG_PT_LWLASER_LL1550 << SFP_FLAG_PT_SHIFT; 5745 } 5746 /* check if its SFP+ */ 5747 flag |= ((page_a0[SSF_IDENTIFIER] == SFF_PG0_IDENT_SFP) ? 5748 SFP_FLAG_CT_SFP_PLUS : SFP_FLAG_CT_UNKNOWN) 5749 << SFP_FLAG_CT_SHIFT; 5750 5751 /* check if its OPTICAL */ 5752 flag |= ((page_a0[SSF_CONNECTOR] == SFF_PG0_CONNECTOR_LC) ? 5753 SFP_FLAG_IS_OPTICAL_PORT : 0) 5754 << SFP_FLAG_IS_OPTICAL_SHIFT; 5755 5756 temperature = (page_a2[SFF_TEMPERATURE_B1] << 8 | 5757 page_a2[SFF_TEMPERATURE_B0]); 5758 vcc = (page_a2[SFF_VCC_B1] << 8 | 5759 page_a2[SFF_VCC_B0]); 5760 tx_power = (page_a2[SFF_TXPOWER_B1] << 8 | 5761 page_a2[SFF_TXPOWER_B0]); 5762 tx_bias = (page_a2[SFF_TX_BIAS_CURRENT_B1] << 8 | 5763 page_a2[SFF_TX_BIAS_CURRENT_B0]); 5764 rx_power = (page_a2[SFF_RXPOWER_B1] << 8 | 5765 page_a2[SFF_RXPOWER_B0]); 5766 desc->sfp_info.temperature = cpu_to_be16(temperature); 5767 desc->sfp_info.rx_power = cpu_to_be16(rx_power); 5768 desc->sfp_info.tx_bias = cpu_to_be16(tx_bias); 5769 desc->sfp_info.tx_power = cpu_to_be16(tx_power); 5770 desc->sfp_info.vcc = cpu_to_be16(vcc); 5771 5772 desc->sfp_info.flags = cpu_to_be16(flag); 5773 desc->length = cpu_to_be32(sizeof(desc->sfp_info)); 5774 5775 return sizeof(struct fc_rdp_sfp_desc); 5776 } 5777 5778 static uint32_t 5779 lpfc_rdp_res_link_error(struct fc_rdp_link_error_status_desc *desc, 5780 READ_LNK_VAR *stat) 5781 { 5782 uint32_t type; 5783 5784 desc->tag = cpu_to_be32(RDP_LINK_ERROR_STATUS_DESC_TAG); 5785 5786 type = VN_PT_PHY_PF_PORT << VN_PT_PHY_SHIFT; 5787 5788 desc->info.port_type = cpu_to_be32(type); 5789 5790 desc->info.link_status.link_failure_cnt = 5791 cpu_to_be32(stat->linkFailureCnt); 5792 desc->info.link_status.loss_of_synch_cnt = 5793 cpu_to_be32(stat->lossSyncCnt); 5794 desc->info.link_status.loss_of_signal_cnt = 5795 cpu_to_be32(stat->lossSignalCnt); 5796 desc->info.link_status.primitive_seq_proto_err = 5797 cpu_to_be32(stat->primSeqErrCnt); 5798 desc->info.link_status.invalid_trans_word = 5799 cpu_to_be32(stat->invalidXmitWord); 5800 desc->info.link_status.invalid_crc_cnt = cpu_to_be32(stat->crcCnt); 5801 5802 desc->length = cpu_to_be32(sizeof(desc->info)); 5803 5804 return sizeof(struct fc_rdp_link_error_status_desc); 5805 } 5806 5807 static uint32_t 5808 lpfc_rdp_res_bbc_desc(struct fc_rdp_bbc_desc *desc, READ_LNK_VAR *stat, 5809 struct lpfc_vport *vport) 5810 { 5811 uint32_t bbCredit; 5812 5813 desc->tag = cpu_to_be32(RDP_BBC_DESC_TAG); 5814 5815 bbCredit = vport->fc_sparam.cmn.bbCreditLsb | 5816 (vport->fc_sparam.cmn.bbCreditMsb << 8); 5817 desc->bbc_info.port_bbc = cpu_to_be32(bbCredit); 5818 if (vport->phba->fc_topology != LPFC_TOPOLOGY_LOOP) { 5819 bbCredit = vport->phba->fc_fabparam.cmn.bbCreditLsb | 5820 (vport->phba->fc_fabparam.cmn.bbCreditMsb << 8); 5821 desc->bbc_info.attached_port_bbc = cpu_to_be32(bbCredit); 5822 } else { 5823 desc->bbc_info.attached_port_bbc = 0; 5824 } 5825 5826 desc->bbc_info.rtt = 0; 5827 desc->length = cpu_to_be32(sizeof(desc->bbc_info)); 5828 5829 return sizeof(struct fc_rdp_bbc_desc); 5830 } 5831 5832 static uint32_t 5833 lpfc_rdp_res_oed_temp_desc(struct lpfc_hba *phba, 5834 struct fc_rdp_oed_sfp_desc *desc, uint8_t *page_a2) 5835 { 5836 uint32_t flags = 0; 5837 5838 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 5839 5840 desc->oed_info.hi_alarm = page_a2[SSF_TEMP_HIGH_ALARM]; 5841 desc->oed_info.lo_alarm = page_a2[SSF_TEMP_LOW_ALARM]; 5842 desc->oed_info.hi_warning = page_a2[SSF_TEMP_HIGH_WARNING]; 5843 desc->oed_info.lo_warning = page_a2[SSF_TEMP_LOW_WARNING]; 5844 5845 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TEMPERATURE) 5846 flags |= RDP_OET_HIGH_ALARM; 5847 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TEMPERATURE) 5848 flags |= RDP_OET_LOW_ALARM; 5849 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TEMPERATURE) 5850 flags |= RDP_OET_HIGH_WARNING; 5851 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TEMPERATURE) 5852 flags |= RDP_OET_LOW_WARNING; 5853 5854 flags |= ((0xf & RDP_OED_TEMPERATURE) << RDP_OED_TYPE_SHIFT); 5855 desc->oed_info.function_flags = cpu_to_be32(flags); 5856 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 5857 return sizeof(struct fc_rdp_oed_sfp_desc); 5858 } 5859 5860 static uint32_t 5861 lpfc_rdp_res_oed_voltage_desc(struct lpfc_hba *phba, 5862 struct fc_rdp_oed_sfp_desc *desc, 5863 uint8_t *page_a2) 5864 { 5865 uint32_t flags = 0; 5866 5867 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 5868 5869 desc->oed_info.hi_alarm = page_a2[SSF_VOLTAGE_HIGH_ALARM]; 5870 desc->oed_info.lo_alarm = page_a2[SSF_VOLTAGE_LOW_ALARM]; 5871 desc->oed_info.hi_warning = page_a2[SSF_VOLTAGE_HIGH_WARNING]; 5872 desc->oed_info.lo_warning = page_a2[SSF_VOLTAGE_LOW_WARNING]; 5873 5874 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_VOLTAGE) 5875 flags |= RDP_OET_HIGH_ALARM; 5876 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_VOLTAGE) 5877 flags |= RDP_OET_LOW_ALARM; 5878 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_VOLTAGE) 5879 flags |= RDP_OET_HIGH_WARNING; 5880 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_VOLTAGE) 5881 flags |= RDP_OET_LOW_WARNING; 5882 5883 flags |= ((0xf & RDP_OED_VOLTAGE) << RDP_OED_TYPE_SHIFT); 5884 desc->oed_info.function_flags = cpu_to_be32(flags); 5885 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 5886 return sizeof(struct fc_rdp_oed_sfp_desc); 5887 } 5888 5889 static uint32_t 5890 lpfc_rdp_res_oed_txbias_desc(struct lpfc_hba *phba, 5891 struct fc_rdp_oed_sfp_desc *desc, 5892 uint8_t *page_a2) 5893 { 5894 uint32_t flags = 0; 5895 5896 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 5897 5898 desc->oed_info.hi_alarm = page_a2[SSF_BIAS_HIGH_ALARM]; 5899 desc->oed_info.lo_alarm = page_a2[SSF_BIAS_LOW_ALARM]; 5900 desc->oed_info.hi_warning = page_a2[SSF_BIAS_HIGH_WARNING]; 5901 desc->oed_info.lo_warning = page_a2[SSF_BIAS_LOW_WARNING]; 5902 5903 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TXBIAS) 5904 flags |= RDP_OET_HIGH_ALARM; 5905 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TXBIAS) 5906 flags |= RDP_OET_LOW_ALARM; 5907 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TXBIAS) 5908 flags |= RDP_OET_HIGH_WARNING; 5909 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TXBIAS) 5910 flags |= RDP_OET_LOW_WARNING; 5911 5912 flags |= ((0xf & RDP_OED_TXBIAS) << RDP_OED_TYPE_SHIFT); 5913 desc->oed_info.function_flags = cpu_to_be32(flags); 5914 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 5915 return sizeof(struct fc_rdp_oed_sfp_desc); 5916 } 5917 5918 static uint32_t 5919 lpfc_rdp_res_oed_txpower_desc(struct lpfc_hba *phba, 5920 struct fc_rdp_oed_sfp_desc *desc, 5921 uint8_t *page_a2) 5922 { 5923 uint32_t flags = 0; 5924 5925 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 5926 5927 desc->oed_info.hi_alarm = page_a2[SSF_TXPOWER_HIGH_ALARM]; 5928 desc->oed_info.lo_alarm = page_a2[SSF_TXPOWER_LOW_ALARM]; 5929 desc->oed_info.hi_warning = page_a2[SSF_TXPOWER_HIGH_WARNING]; 5930 desc->oed_info.lo_warning = page_a2[SSF_TXPOWER_LOW_WARNING]; 5931 5932 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TXPOWER) 5933 flags |= RDP_OET_HIGH_ALARM; 5934 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TXPOWER) 5935 flags |= RDP_OET_LOW_ALARM; 5936 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TXPOWER) 5937 flags |= RDP_OET_HIGH_WARNING; 5938 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TXPOWER) 5939 flags |= RDP_OET_LOW_WARNING; 5940 5941 flags |= ((0xf & RDP_OED_TXPOWER) << RDP_OED_TYPE_SHIFT); 5942 desc->oed_info.function_flags = cpu_to_be32(flags); 5943 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 5944 return sizeof(struct fc_rdp_oed_sfp_desc); 5945 } 5946 5947 5948 static uint32_t 5949 lpfc_rdp_res_oed_rxpower_desc(struct lpfc_hba *phba, 5950 struct fc_rdp_oed_sfp_desc *desc, 5951 uint8_t *page_a2) 5952 { 5953 uint32_t flags = 0; 5954 5955 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 5956 5957 desc->oed_info.hi_alarm = page_a2[SSF_RXPOWER_HIGH_ALARM]; 5958 desc->oed_info.lo_alarm = page_a2[SSF_RXPOWER_LOW_ALARM]; 5959 desc->oed_info.hi_warning = page_a2[SSF_RXPOWER_HIGH_WARNING]; 5960 desc->oed_info.lo_warning = page_a2[SSF_RXPOWER_LOW_WARNING]; 5961 5962 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_RXPOWER) 5963 flags |= RDP_OET_HIGH_ALARM; 5964 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_RXPOWER) 5965 flags |= RDP_OET_LOW_ALARM; 5966 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_RXPOWER) 5967 flags |= RDP_OET_HIGH_WARNING; 5968 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_RXPOWER) 5969 flags |= RDP_OET_LOW_WARNING; 5970 5971 flags |= ((0xf & RDP_OED_RXPOWER) << RDP_OED_TYPE_SHIFT); 5972 desc->oed_info.function_flags = cpu_to_be32(flags); 5973 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 5974 return sizeof(struct fc_rdp_oed_sfp_desc); 5975 } 5976 5977 static uint32_t 5978 lpfc_rdp_res_opd_desc(struct fc_rdp_opd_sfp_desc *desc, 5979 uint8_t *page_a0, struct lpfc_vport *vport) 5980 { 5981 desc->tag = cpu_to_be32(RDP_OPD_DESC_TAG); 5982 memcpy(desc->opd_info.vendor_name, &page_a0[SSF_VENDOR_NAME], 16); 5983 memcpy(desc->opd_info.model_number, &page_a0[SSF_VENDOR_PN], 16); 5984 memcpy(desc->opd_info.serial_number, &page_a0[SSF_VENDOR_SN], 16); 5985 memcpy(desc->opd_info.revision, &page_a0[SSF_VENDOR_REV], 4); 5986 memcpy(desc->opd_info.date, &page_a0[SSF_DATE_CODE], 8); 5987 desc->length = cpu_to_be32(sizeof(desc->opd_info)); 5988 return sizeof(struct fc_rdp_opd_sfp_desc); 5989 } 5990 5991 static uint32_t 5992 lpfc_rdp_res_fec_desc(struct fc_fec_rdp_desc *desc, READ_LNK_VAR *stat) 5993 { 5994 if (bf_get(lpfc_read_link_stat_gec2, stat) == 0) 5995 return 0; 5996 desc->tag = cpu_to_be32(RDP_FEC_DESC_TAG); 5997 5998 desc->info.CorrectedBlocks = 5999 cpu_to_be32(stat->fecCorrBlkCount); 6000 desc->info.UncorrectableBlocks = 6001 cpu_to_be32(stat->fecUncorrBlkCount); 6002 6003 desc->length = cpu_to_be32(sizeof(desc->info)); 6004 6005 return sizeof(struct fc_fec_rdp_desc); 6006 } 6007 6008 static uint32_t 6009 lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba) 6010 { 6011 uint16_t rdp_cap = 0; 6012 uint16_t rdp_speed; 6013 6014 desc->tag = cpu_to_be32(RDP_PORT_SPEED_DESC_TAG); 6015 6016 switch (phba->fc_linkspeed) { 6017 case LPFC_LINK_SPEED_1GHZ: 6018 rdp_speed = RDP_PS_1GB; 6019 break; 6020 case LPFC_LINK_SPEED_2GHZ: 6021 rdp_speed = RDP_PS_2GB; 6022 break; 6023 case LPFC_LINK_SPEED_4GHZ: 6024 rdp_speed = RDP_PS_4GB; 6025 break; 6026 case LPFC_LINK_SPEED_8GHZ: 6027 rdp_speed = RDP_PS_8GB; 6028 break; 6029 case LPFC_LINK_SPEED_10GHZ: 6030 rdp_speed = RDP_PS_10GB; 6031 break; 6032 case LPFC_LINK_SPEED_16GHZ: 6033 rdp_speed = RDP_PS_16GB; 6034 break; 6035 case LPFC_LINK_SPEED_32GHZ: 6036 rdp_speed = RDP_PS_32GB; 6037 break; 6038 case LPFC_LINK_SPEED_64GHZ: 6039 rdp_speed = RDP_PS_64GB; 6040 break; 6041 default: 6042 rdp_speed = RDP_PS_UNKNOWN; 6043 break; 6044 } 6045 6046 desc->info.port_speed.speed = cpu_to_be16(rdp_speed); 6047 6048 if (phba->lmt & LMT_128Gb) 6049 rdp_cap |= RDP_PS_128GB; 6050 if (phba->lmt & LMT_64Gb) 6051 rdp_cap |= RDP_PS_64GB; 6052 if (phba->lmt & LMT_32Gb) 6053 rdp_cap |= RDP_PS_32GB; 6054 if (phba->lmt & LMT_16Gb) 6055 rdp_cap |= RDP_PS_16GB; 6056 if (phba->lmt & LMT_10Gb) 6057 rdp_cap |= RDP_PS_10GB; 6058 if (phba->lmt & LMT_8Gb) 6059 rdp_cap |= RDP_PS_8GB; 6060 if (phba->lmt & LMT_4Gb) 6061 rdp_cap |= RDP_PS_4GB; 6062 if (phba->lmt & LMT_2Gb) 6063 rdp_cap |= RDP_PS_2GB; 6064 if (phba->lmt & LMT_1Gb) 6065 rdp_cap |= RDP_PS_1GB; 6066 6067 if (rdp_cap == 0) 6068 rdp_cap = RDP_CAP_UNKNOWN; 6069 if (phba->cfg_link_speed != LPFC_USER_LINK_SPEED_AUTO) 6070 rdp_cap |= RDP_CAP_USER_CONFIGURED; 6071 6072 desc->info.port_speed.capabilities = cpu_to_be16(rdp_cap); 6073 desc->length = cpu_to_be32(sizeof(desc->info)); 6074 return sizeof(struct fc_rdp_port_speed_desc); 6075 } 6076 6077 static uint32_t 6078 lpfc_rdp_res_diag_port_names(struct fc_rdp_port_name_desc *desc, 6079 struct lpfc_vport *vport) 6080 { 6081 6082 desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG); 6083 6084 memcpy(desc->port_names.wwnn, &vport->fc_nodename, 6085 sizeof(desc->port_names.wwnn)); 6086 6087 memcpy(desc->port_names.wwpn, &vport->fc_portname, 6088 sizeof(desc->port_names.wwpn)); 6089 6090 desc->length = cpu_to_be32(sizeof(desc->port_names)); 6091 return sizeof(struct fc_rdp_port_name_desc); 6092 } 6093 6094 static uint32_t 6095 lpfc_rdp_res_attach_port_names(struct fc_rdp_port_name_desc *desc, 6096 struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 6097 { 6098 6099 desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG); 6100 if (vport->fc_flag & FC_FABRIC) { 6101 memcpy(desc->port_names.wwnn, &vport->fabric_nodename, 6102 sizeof(desc->port_names.wwnn)); 6103 6104 memcpy(desc->port_names.wwpn, &vport->fabric_portname, 6105 sizeof(desc->port_names.wwpn)); 6106 } else { /* Point to Point */ 6107 memcpy(desc->port_names.wwnn, &ndlp->nlp_nodename, 6108 sizeof(desc->port_names.wwnn)); 6109 6110 memcpy(desc->port_names.wwpn, &ndlp->nlp_portname, 6111 sizeof(desc->port_names.wwpn)); 6112 } 6113 6114 desc->length = cpu_to_be32(sizeof(desc->port_names)); 6115 return sizeof(struct fc_rdp_port_name_desc); 6116 } 6117 6118 static void 6119 lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context, 6120 int status) 6121 { 6122 struct lpfc_nodelist *ndlp = rdp_context->ndlp; 6123 struct lpfc_vport *vport = ndlp->vport; 6124 struct lpfc_iocbq *elsiocb; 6125 struct ulp_bde64 *bpl; 6126 IOCB_t *icmd; 6127 uint8_t *pcmd; 6128 struct ls_rjt *stat; 6129 struct fc_rdp_res_frame *rdp_res; 6130 uint32_t cmdsize, len; 6131 uint16_t *flag_ptr; 6132 int rc; 6133 6134 if (status != SUCCESS) 6135 goto error; 6136 6137 /* This will change once we know the true size of the RDP payload */ 6138 cmdsize = sizeof(struct fc_rdp_res_frame); 6139 6140 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, 6141 lpfc_max_els_tries, rdp_context->ndlp, 6142 rdp_context->ndlp->nlp_DID, ELS_CMD_ACC); 6143 if (!elsiocb) 6144 goto free_rdp_context; 6145 6146 icmd = &elsiocb->iocb; 6147 icmd->ulpContext = rdp_context->rx_id; 6148 icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id; 6149 6150 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6151 "2171 Xmit RDP response tag x%x xri x%x, " 6152 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x", 6153 elsiocb->iotag, elsiocb->iocb.ulpContext, 6154 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 6155 ndlp->nlp_rpi); 6156 rdp_res = (struct fc_rdp_res_frame *) 6157 (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 6158 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 6159 memset(pcmd, 0, sizeof(struct fc_rdp_res_frame)); 6160 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 6161 6162 /* Update Alarm and Warning */ 6163 flag_ptr = (uint16_t *)(rdp_context->page_a2 + SSF_ALARM_FLAGS); 6164 phba->sfp_alarm |= *flag_ptr; 6165 flag_ptr = (uint16_t *)(rdp_context->page_a2 + SSF_WARNING_FLAGS); 6166 phba->sfp_warning |= *flag_ptr; 6167 6168 /* For RDP payload */ 6169 len = 8; 6170 len += lpfc_rdp_res_link_service((struct fc_rdp_link_service_desc *) 6171 (len + pcmd), ELS_CMD_RDP); 6172 6173 len += lpfc_rdp_res_sfp_desc((struct fc_rdp_sfp_desc *)(len + pcmd), 6174 rdp_context->page_a0, rdp_context->page_a2); 6175 len += lpfc_rdp_res_speed((struct fc_rdp_port_speed_desc *)(len + pcmd), 6176 phba); 6177 len += lpfc_rdp_res_link_error((struct fc_rdp_link_error_status_desc *) 6178 (len + pcmd), &rdp_context->link_stat); 6179 len += lpfc_rdp_res_diag_port_names((struct fc_rdp_port_name_desc *) 6180 (len + pcmd), vport); 6181 len += lpfc_rdp_res_attach_port_names((struct fc_rdp_port_name_desc *) 6182 (len + pcmd), vport, ndlp); 6183 len += lpfc_rdp_res_fec_desc((struct fc_fec_rdp_desc *)(len + pcmd), 6184 &rdp_context->link_stat); 6185 len += lpfc_rdp_res_bbc_desc((struct fc_rdp_bbc_desc *)(len + pcmd), 6186 &rdp_context->link_stat, vport); 6187 len += lpfc_rdp_res_oed_temp_desc(phba, 6188 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 6189 rdp_context->page_a2); 6190 len += lpfc_rdp_res_oed_voltage_desc(phba, 6191 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 6192 rdp_context->page_a2); 6193 len += lpfc_rdp_res_oed_txbias_desc(phba, 6194 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 6195 rdp_context->page_a2); 6196 len += lpfc_rdp_res_oed_txpower_desc(phba, 6197 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 6198 rdp_context->page_a2); 6199 len += lpfc_rdp_res_oed_rxpower_desc(phba, 6200 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 6201 rdp_context->page_a2); 6202 len += lpfc_rdp_res_opd_desc((struct fc_rdp_opd_sfp_desc *)(len + pcmd), 6203 rdp_context->page_a0, vport); 6204 6205 rdp_res->length = cpu_to_be32(len - 8); 6206 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 6207 6208 /* Now that we know the true size of the payload, update the BPL */ 6209 bpl = (struct ulp_bde64 *) 6210 (((struct lpfc_dmabuf *)(elsiocb->context3))->virt); 6211 bpl->tus.f.bdeSize = len; 6212 bpl->tus.f.bdeFlags = 0; 6213 bpl->tus.w = le32_to_cpu(bpl->tus.w); 6214 6215 phba->fc_stat.elsXmitACC++; 6216 elsiocb->context1 = lpfc_nlp_get(ndlp); 6217 if (!elsiocb->context1) { 6218 lpfc_els_free_iocb(phba, elsiocb); 6219 goto free_rdp_context; 6220 } 6221 6222 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 6223 if (rc == IOCB_ERROR) { 6224 lpfc_els_free_iocb(phba, elsiocb); 6225 lpfc_nlp_put(ndlp); 6226 } 6227 6228 goto free_rdp_context; 6229 6230 error: 6231 cmdsize = 2 * sizeof(uint32_t); 6232 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, lpfc_max_els_tries, 6233 ndlp, ndlp->nlp_DID, ELS_CMD_LS_RJT); 6234 if (!elsiocb) 6235 goto free_rdp_context; 6236 6237 icmd = &elsiocb->iocb; 6238 icmd->ulpContext = rdp_context->rx_id; 6239 icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id; 6240 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 6241 6242 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT; 6243 stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t)); 6244 stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 6245 6246 phba->fc_stat.elsXmitLSRJT++; 6247 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 6248 elsiocb->context1 = lpfc_nlp_get(ndlp); 6249 if (!elsiocb->context1) { 6250 lpfc_els_free_iocb(phba, elsiocb); 6251 goto free_rdp_context; 6252 } 6253 6254 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 6255 if (rc == IOCB_ERROR) { 6256 lpfc_els_free_iocb(phba, elsiocb); 6257 lpfc_nlp_put(ndlp); 6258 } 6259 6260 free_rdp_context: 6261 /* This reference put is for the original unsolicited RDP. If the 6262 * iocb prep failed, there is no reference to remove. 6263 */ 6264 lpfc_nlp_put(ndlp); 6265 kfree(rdp_context); 6266 } 6267 6268 static int 6269 lpfc_get_rdp_info(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context) 6270 { 6271 LPFC_MBOXQ_t *mbox = NULL; 6272 int rc; 6273 6274 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6275 if (!mbox) { 6276 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_ELS, 6277 "7105 failed to allocate mailbox memory"); 6278 return 1; 6279 } 6280 6281 if (lpfc_sli4_dump_page_a0(phba, mbox)) 6282 goto prep_mbox_fail; 6283 mbox->vport = rdp_context->ndlp->vport; 6284 mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_page_a0; 6285 mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context; 6286 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 6287 if (rc == MBX_NOT_FINISHED) 6288 goto issue_mbox_fail; 6289 6290 return 0; 6291 6292 prep_mbox_fail: 6293 issue_mbox_fail: 6294 mempool_free(mbox, phba->mbox_mem_pool); 6295 return 1; 6296 } 6297 6298 /* 6299 * lpfc_els_rcv_rdp - Process an unsolicited RDP ELS. 6300 * @vport: pointer to a host virtual N_Port data structure. 6301 * @cmdiocb: pointer to lpfc command iocb data structure. 6302 * @ndlp: pointer to a node-list data structure. 6303 * 6304 * This routine processes an unsolicited RDP(Read Diagnostic Parameters) 6305 * IOCB. First, the payload of the unsolicited RDP is checked. 6306 * Then it will (1) send MBX_DUMP_MEMORY, Embedded DMP_LMSD sub command TYPE-3 6307 * for Page A0, (2) send MBX_DUMP_MEMORY, DMP_LMSD for Page A2, 6308 * (3) send MBX_READ_LNK_STAT to get link stat, (4) Call lpfc_els_rdp_cmpl 6309 * gather all data and send RDP response. 6310 * 6311 * Return code 6312 * 0 - Sent the acc response 6313 * 1 - Sent the reject response. 6314 */ 6315 static int 6316 lpfc_els_rcv_rdp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 6317 struct lpfc_nodelist *ndlp) 6318 { 6319 struct lpfc_hba *phba = vport->phba; 6320 struct lpfc_dmabuf *pcmd; 6321 uint8_t rjt_err, rjt_expl = LSEXP_NOTHING_MORE; 6322 struct fc_rdp_req_frame *rdp_req; 6323 struct lpfc_rdp_context *rdp_context; 6324 IOCB_t *cmd = NULL; 6325 struct ls_rjt stat; 6326 6327 if (phba->sli_rev < LPFC_SLI_REV4 || 6328 bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 6329 LPFC_SLI_INTF_IF_TYPE_2) { 6330 rjt_err = LSRJT_UNABLE_TPC; 6331 rjt_expl = LSEXP_REQ_UNSUPPORTED; 6332 goto error; 6333 } 6334 6335 if (phba->sli_rev < LPFC_SLI_REV4 || (phba->hba_flag & HBA_FCOE_MODE)) { 6336 rjt_err = LSRJT_UNABLE_TPC; 6337 rjt_expl = LSEXP_REQ_UNSUPPORTED; 6338 goto error; 6339 } 6340 6341 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 6342 rdp_req = (struct fc_rdp_req_frame *) pcmd->virt; 6343 6344 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6345 "2422 ELS RDP Request " 6346 "dec len %d tag x%x port_id %d len %d\n", 6347 be32_to_cpu(rdp_req->rdp_des_length), 6348 be32_to_cpu(rdp_req->nport_id_desc.tag), 6349 be32_to_cpu(rdp_req->nport_id_desc.nport_id), 6350 be32_to_cpu(rdp_req->nport_id_desc.length)); 6351 6352 if (sizeof(struct fc_rdp_nport_desc) != 6353 be32_to_cpu(rdp_req->rdp_des_length)) 6354 goto rjt_logerr; 6355 if (RDP_N_PORT_DESC_TAG != be32_to_cpu(rdp_req->nport_id_desc.tag)) 6356 goto rjt_logerr; 6357 if (RDP_NPORT_ID_SIZE != 6358 be32_to_cpu(rdp_req->nport_id_desc.length)) 6359 goto rjt_logerr; 6360 rdp_context = kzalloc(sizeof(struct lpfc_rdp_context), GFP_KERNEL); 6361 if (!rdp_context) { 6362 rjt_err = LSRJT_UNABLE_TPC; 6363 goto error; 6364 } 6365 6366 cmd = &cmdiocb->iocb; 6367 rdp_context->ndlp = lpfc_nlp_get(ndlp); 6368 if (!rdp_context->ndlp) { 6369 kfree(rdp_context); 6370 rjt_err = LSRJT_UNABLE_TPC; 6371 goto error; 6372 } 6373 rdp_context->ox_id = cmd->unsli3.rcvsli3.ox_id; 6374 rdp_context->rx_id = cmd->ulpContext; 6375 rdp_context->cmpl = lpfc_els_rdp_cmpl; 6376 if (lpfc_get_rdp_info(phba, rdp_context)) { 6377 lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_ELS, 6378 "2423 Unable to send mailbox"); 6379 kfree(rdp_context); 6380 rjt_err = LSRJT_UNABLE_TPC; 6381 lpfc_nlp_put(ndlp); 6382 goto error; 6383 } 6384 6385 return 0; 6386 6387 rjt_logerr: 6388 rjt_err = LSRJT_LOGICAL_ERR; 6389 6390 error: 6391 memset(&stat, 0, sizeof(stat)); 6392 stat.un.b.lsRjtRsnCode = rjt_err; 6393 stat.un.b.lsRjtRsnCodeExp = rjt_expl; 6394 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 6395 return 1; 6396 } 6397 6398 6399 static void 6400 lpfc_els_lcb_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 6401 { 6402 MAILBOX_t *mb; 6403 IOCB_t *icmd; 6404 uint8_t *pcmd; 6405 struct lpfc_iocbq *elsiocb; 6406 struct lpfc_nodelist *ndlp; 6407 struct ls_rjt *stat; 6408 union lpfc_sli4_cfg_shdr *shdr; 6409 struct lpfc_lcb_context *lcb_context; 6410 struct fc_lcb_res_frame *lcb_res; 6411 uint32_t cmdsize, shdr_status, shdr_add_status; 6412 int rc; 6413 6414 mb = &pmb->u.mb; 6415 lcb_context = (struct lpfc_lcb_context *)pmb->ctx_ndlp; 6416 ndlp = lcb_context->ndlp; 6417 pmb->ctx_ndlp = NULL; 6418 pmb->ctx_buf = NULL; 6419 6420 shdr = (union lpfc_sli4_cfg_shdr *) 6421 &pmb->u.mqe.un.beacon_config.header.cfg_shdr; 6422 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 6423 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 6424 6425 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX, 6426 "0194 SET_BEACON_CONFIG mailbox " 6427 "completed with status x%x add_status x%x," 6428 " mbx status x%x\n", 6429 shdr_status, shdr_add_status, mb->mbxStatus); 6430 6431 if ((mb->mbxStatus != MBX_SUCCESS) || shdr_status || 6432 (shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE) || 6433 (shdr_add_status == ADD_STATUS_INVALID_REQUEST)) { 6434 mempool_free(pmb, phba->mbox_mem_pool); 6435 goto error; 6436 } 6437 6438 mempool_free(pmb, phba->mbox_mem_pool); 6439 cmdsize = sizeof(struct fc_lcb_res_frame); 6440 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 6441 lpfc_max_els_tries, ndlp, 6442 ndlp->nlp_DID, ELS_CMD_ACC); 6443 6444 /* Decrement the ndlp reference count from previous mbox command */ 6445 lpfc_nlp_put(ndlp); 6446 6447 if (!elsiocb) 6448 goto free_lcb_context; 6449 6450 lcb_res = (struct fc_lcb_res_frame *) 6451 (((struct lpfc_dmabuf *)elsiocb->context2)->virt); 6452 6453 memset(lcb_res, 0, sizeof(struct fc_lcb_res_frame)); 6454 icmd = &elsiocb->iocb; 6455 icmd->ulpContext = lcb_context->rx_id; 6456 icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id; 6457 6458 pcmd = (uint8_t *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt); 6459 *((uint32_t *)(pcmd)) = ELS_CMD_ACC; 6460 lcb_res->lcb_sub_command = lcb_context->sub_command; 6461 lcb_res->lcb_type = lcb_context->type; 6462 lcb_res->capability = lcb_context->capability; 6463 lcb_res->lcb_frequency = lcb_context->frequency; 6464 lcb_res->lcb_duration = lcb_context->duration; 6465 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 6466 phba->fc_stat.elsXmitACC++; 6467 6468 elsiocb->context1 = lpfc_nlp_get(ndlp); 6469 if (!elsiocb->context1) { 6470 lpfc_els_free_iocb(phba, elsiocb); 6471 goto out; 6472 } 6473 6474 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 6475 if (rc == IOCB_ERROR) { 6476 lpfc_els_free_iocb(phba, elsiocb); 6477 lpfc_nlp_put(ndlp); 6478 } 6479 out: 6480 kfree(lcb_context); 6481 return; 6482 6483 error: 6484 cmdsize = sizeof(struct fc_lcb_res_frame); 6485 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 6486 lpfc_max_els_tries, ndlp, 6487 ndlp->nlp_DID, ELS_CMD_LS_RJT); 6488 lpfc_nlp_put(ndlp); 6489 if (!elsiocb) 6490 goto free_lcb_context; 6491 6492 icmd = &elsiocb->iocb; 6493 icmd->ulpContext = lcb_context->rx_id; 6494 icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id; 6495 pcmd = (uint8_t *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt); 6496 6497 *((uint32_t *)(pcmd)) = ELS_CMD_LS_RJT; 6498 stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t)); 6499 stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 6500 6501 if (shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE) 6502 stat->un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS; 6503 6504 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 6505 phba->fc_stat.elsXmitLSRJT++; 6506 elsiocb->context1 = lpfc_nlp_get(ndlp); 6507 if (!elsiocb->context1) { 6508 lpfc_els_free_iocb(phba, elsiocb); 6509 goto free_lcb_context; 6510 } 6511 6512 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 6513 if (rc == IOCB_ERROR) { 6514 lpfc_els_free_iocb(phba, elsiocb); 6515 lpfc_nlp_put(ndlp); 6516 } 6517 free_lcb_context: 6518 kfree(lcb_context); 6519 } 6520 6521 static int 6522 lpfc_sli4_set_beacon(struct lpfc_vport *vport, 6523 struct lpfc_lcb_context *lcb_context, 6524 uint32_t beacon_state) 6525 { 6526 struct lpfc_hba *phba = vport->phba; 6527 union lpfc_sli4_cfg_shdr *cfg_shdr; 6528 LPFC_MBOXQ_t *mbox = NULL; 6529 uint32_t len; 6530 int rc; 6531 6532 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6533 if (!mbox) 6534 return 1; 6535 6536 cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr; 6537 len = sizeof(struct lpfc_mbx_set_beacon_config) - 6538 sizeof(struct lpfc_sli4_cfg_mhdr); 6539 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 6540 LPFC_MBOX_OPCODE_SET_BEACON_CONFIG, len, 6541 LPFC_SLI4_MBX_EMBED); 6542 mbox->ctx_ndlp = (void *)lcb_context; 6543 mbox->vport = phba->pport; 6544 mbox->mbox_cmpl = lpfc_els_lcb_rsp; 6545 bf_set(lpfc_mbx_set_beacon_port_num, &mbox->u.mqe.un.beacon_config, 6546 phba->sli4_hba.physical_port); 6547 bf_set(lpfc_mbx_set_beacon_state, &mbox->u.mqe.un.beacon_config, 6548 beacon_state); 6549 mbox->u.mqe.un.beacon_config.word5 = 0; /* Reserved */ 6550 6551 /* 6552 * Check bv1s bit before issuing the mailbox 6553 * if bv1s == 1, LCB V1 supported 6554 * else, LCB V0 supported 6555 */ 6556 6557 if (phba->sli4_hba.pc_sli4_params.bv1s) { 6558 /* COMMON_SET_BEACON_CONFIG_V1 */ 6559 cfg_shdr->request.word9 = BEACON_VERSION_V1; 6560 lcb_context->capability |= LCB_CAPABILITY_DURATION; 6561 bf_set(lpfc_mbx_set_beacon_port_type, 6562 &mbox->u.mqe.un.beacon_config, 0); 6563 bf_set(lpfc_mbx_set_beacon_duration_v1, 6564 &mbox->u.mqe.un.beacon_config, 6565 be16_to_cpu(lcb_context->duration)); 6566 } else { 6567 /* COMMON_SET_BEACON_CONFIG_V0 */ 6568 if (be16_to_cpu(lcb_context->duration) != 0) { 6569 mempool_free(mbox, phba->mbox_mem_pool); 6570 return 1; 6571 } 6572 cfg_shdr->request.word9 = BEACON_VERSION_V0; 6573 lcb_context->capability &= ~(LCB_CAPABILITY_DURATION); 6574 bf_set(lpfc_mbx_set_beacon_state, 6575 &mbox->u.mqe.un.beacon_config, beacon_state); 6576 bf_set(lpfc_mbx_set_beacon_port_type, 6577 &mbox->u.mqe.un.beacon_config, 1); 6578 bf_set(lpfc_mbx_set_beacon_duration, 6579 &mbox->u.mqe.un.beacon_config, 6580 be16_to_cpu(lcb_context->duration)); 6581 } 6582 6583 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 6584 if (rc == MBX_NOT_FINISHED) { 6585 mempool_free(mbox, phba->mbox_mem_pool); 6586 return 1; 6587 } 6588 6589 return 0; 6590 } 6591 6592 6593 /** 6594 * lpfc_els_rcv_lcb - Process an unsolicited LCB 6595 * @vport: pointer to a host virtual N_Port data structure. 6596 * @cmdiocb: pointer to lpfc command iocb data structure. 6597 * @ndlp: pointer to a node-list data structure. 6598 * 6599 * This routine processes an unsolicited LCB(LINK CABLE BEACON) IOCB. 6600 * First, the payload of the unsolicited LCB is checked. 6601 * Then based on Subcommand beacon will either turn on or off. 6602 * 6603 * Return code 6604 * 0 - Sent the acc response 6605 * 1 - Sent the reject response. 6606 **/ 6607 static int 6608 lpfc_els_rcv_lcb(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 6609 struct lpfc_nodelist *ndlp) 6610 { 6611 struct lpfc_hba *phba = vport->phba; 6612 struct lpfc_dmabuf *pcmd; 6613 uint8_t *lp; 6614 struct fc_lcb_request_frame *beacon; 6615 struct lpfc_lcb_context *lcb_context; 6616 u8 state, rjt_err = 0; 6617 struct ls_rjt stat; 6618 6619 pcmd = (struct lpfc_dmabuf *)cmdiocb->context2; 6620 lp = (uint8_t *)pcmd->virt; 6621 beacon = (struct fc_lcb_request_frame *)pcmd->virt; 6622 6623 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6624 "0192 ELS LCB Data x%x x%x x%x x%x sub x%x " 6625 "type x%x frequency %x duration x%x\n", 6626 lp[0], lp[1], lp[2], 6627 beacon->lcb_command, 6628 beacon->lcb_sub_command, 6629 beacon->lcb_type, 6630 beacon->lcb_frequency, 6631 be16_to_cpu(beacon->lcb_duration)); 6632 6633 if (beacon->lcb_sub_command != LPFC_LCB_ON && 6634 beacon->lcb_sub_command != LPFC_LCB_OFF) { 6635 rjt_err = LSRJT_CMD_UNSUPPORTED; 6636 goto rjt; 6637 } 6638 6639 if (phba->sli_rev < LPFC_SLI_REV4 || 6640 phba->hba_flag & HBA_FCOE_MODE || 6641 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 6642 LPFC_SLI_INTF_IF_TYPE_2)) { 6643 rjt_err = LSRJT_CMD_UNSUPPORTED; 6644 goto rjt; 6645 } 6646 6647 lcb_context = kmalloc(sizeof(*lcb_context), GFP_KERNEL); 6648 if (!lcb_context) { 6649 rjt_err = LSRJT_UNABLE_TPC; 6650 goto rjt; 6651 } 6652 6653 state = (beacon->lcb_sub_command == LPFC_LCB_ON) ? 1 : 0; 6654 lcb_context->sub_command = beacon->lcb_sub_command; 6655 lcb_context->capability = 0; 6656 lcb_context->type = beacon->lcb_type; 6657 lcb_context->frequency = beacon->lcb_frequency; 6658 lcb_context->duration = beacon->lcb_duration; 6659 lcb_context->ox_id = cmdiocb->iocb.unsli3.rcvsli3.ox_id; 6660 lcb_context->rx_id = cmdiocb->iocb.ulpContext; 6661 lcb_context->ndlp = lpfc_nlp_get(ndlp); 6662 if (!lcb_context->ndlp) { 6663 rjt_err = LSRJT_UNABLE_TPC; 6664 goto rjt_free; 6665 } 6666 6667 if (lpfc_sli4_set_beacon(vport, lcb_context, state)) { 6668 lpfc_printf_vlog(ndlp->vport, KERN_ERR, LOG_TRACE_EVENT, 6669 "0193 failed to send mail box"); 6670 lpfc_nlp_put(ndlp); 6671 rjt_err = LSRJT_UNABLE_TPC; 6672 goto rjt_free; 6673 } 6674 return 0; 6675 6676 rjt_free: 6677 kfree(lcb_context); 6678 rjt: 6679 memset(&stat, 0, sizeof(stat)); 6680 stat.un.b.lsRjtRsnCode = rjt_err; 6681 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 6682 return 1; 6683 } 6684 6685 6686 /** 6687 * lpfc_els_flush_rscn - Clean up any rscn activities with a vport 6688 * @vport: pointer to a host virtual N_Port data structure. 6689 * 6690 * This routine cleans up any Registration State Change Notification 6691 * (RSCN) activity with a @vport. Note that the fc_rscn_flush flag of the 6692 * @vport together with the host_lock is used to prevent multiple thread 6693 * trying to access the RSCN array on a same @vport at the same time. 6694 **/ 6695 void 6696 lpfc_els_flush_rscn(struct lpfc_vport *vport) 6697 { 6698 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6699 struct lpfc_hba *phba = vport->phba; 6700 int i; 6701 6702 spin_lock_irq(shost->host_lock); 6703 if (vport->fc_rscn_flush) { 6704 /* Another thread is walking fc_rscn_id_list on this vport */ 6705 spin_unlock_irq(shost->host_lock); 6706 return; 6707 } 6708 /* Indicate we are walking lpfc_els_flush_rscn on this vport */ 6709 vport->fc_rscn_flush = 1; 6710 spin_unlock_irq(shost->host_lock); 6711 6712 for (i = 0; i < vport->fc_rscn_id_cnt; i++) { 6713 lpfc_in_buf_free(phba, vport->fc_rscn_id_list[i]); 6714 vport->fc_rscn_id_list[i] = NULL; 6715 } 6716 spin_lock_irq(shost->host_lock); 6717 vport->fc_rscn_id_cnt = 0; 6718 vport->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY); 6719 spin_unlock_irq(shost->host_lock); 6720 lpfc_can_disctmo(vport); 6721 /* Indicate we are done walking this fc_rscn_id_list */ 6722 vport->fc_rscn_flush = 0; 6723 } 6724 6725 /** 6726 * lpfc_rscn_payload_check - Check whether there is a pending rscn to a did 6727 * @vport: pointer to a host virtual N_Port data structure. 6728 * @did: remote destination port identifier. 6729 * 6730 * This routine checks whether there is any pending Registration State 6731 * Configuration Notification (RSCN) to a @did on @vport. 6732 * 6733 * Return code 6734 * None zero - The @did matched with a pending rscn 6735 * 0 - not able to match @did with a pending rscn 6736 **/ 6737 int 6738 lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did) 6739 { 6740 D_ID ns_did; 6741 D_ID rscn_did; 6742 uint32_t *lp; 6743 uint32_t payload_len, i; 6744 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6745 6746 ns_did.un.word = did; 6747 6748 /* Never match fabric nodes for RSCNs */ 6749 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) 6750 return 0; 6751 6752 /* If we are doing a FULL RSCN rediscovery, match everything */ 6753 if (vport->fc_flag & FC_RSCN_DISCOVERY) 6754 return did; 6755 6756 spin_lock_irq(shost->host_lock); 6757 if (vport->fc_rscn_flush) { 6758 /* Another thread is walking fc_rscn_id_list on this vport */ 6759 spin_unlock_irq(shost->host_lock); 6760 return 0; 6761 } 6762 /* Indicate we are walking fc_rscn_id_list on this vport */ 6763 vport->fc_rscn_flush = 1; 6764 spin_unlock_irq(shost->host_lock); 6765 for (i = 0; i < vport->fc_rscn_id_cnt; i++) { 6766 lp = vport->fc_rscn_id_list[i]->virt; 6767 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK); 6768 payload_len -= sizeof(uint32_t); /* take off word 0 */ 6769 while (payload_len) { 6770 rscn_did.un.word = be32_to_cpu(*lp++); 6771 payload_len -= sizeof(uint32_t); 6772 switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) { 6773 case RSCN_ADDRESS_FORMAT_PORT: 6774 if ((ns_did.un.b.domain == rscn_did.un.b.domain) 6775 && (ns_did.un.b.area == rscn_did.un.b.area) 6776 && (ns_did.un.b.id == rscn_did.un.b.id)) 6777 goto return_did_out; 6778 break; 6779 case RSCN_ADDRESS_FORMAT_AREA: 6780 if ((ns_did.un.b.domain == rscn_did.un.b.domain) 6781 && (ns_did.un.b.area == rscn_did.un.b.area)) 6782 goto return_did_out; 6783 break; 6784 case RSCN_ADDRESS_FORMAT_DOMAIN: 6785 if (ns_did.un.b.domain == rscn_did.un.b.domain) 6786 goto return_did_out; 6787 break; 6788 case RSCN_ADDRESS_FORMAT_FABRIC: 6789 goto return_did_out; 6790 } 6791 } 6792 } 6793 /* Indicate we are done with walking fc_rscn_id_list on this vport */ 6794 vport->fc_rscn_flush = 0; 6795 return 0; 6796 return_did_out: 6797 /* Indicate we are done with walking fc_rscn_id_list on this vport */ 6798 vport->fc_rscn_flush = 0; 6799 return did; 6800 } 6801 6802 /** 6803 * lpfc_rscn_recovery_check - Send recovery event to vport nodes matching rscn 6804 * @vport: pointer to a host virtual N_Port data structure. 6805 * 6806 * This routine sends recovery (NLP_EVT_DEVICE_RECOVERY) event to the 6807 * state machine for a @vport's nodes that are with pending RSCN (Registration 6808 * State Change Notification). 6809 * 6810 * Return code 6811 * 0 - Successful (currently alway return 0) 6812 **/ 6813 static int 6814 lpfc_rscn_recovery_check(struct lpfc_vport *vport) 6815 { 6816 struct lpfc_nodelist *ndlp = NULL; 6817 6818 /* Move all affected nodes by pending RSCNs to NPR state. */ 6819 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 6820 if ((ndlp->nlp_state == NLP_STE_UNUSED_NODE) || 6821 !lpfc_rscn_payload_check(vport, ndlp->nlp_DID)) 6822 continue; 6823 6824 /* NVME Target mode does not do RSCN Recovery. */ 6825 if (vport->phba->nvmet_support) 6826 continue; 6827 6828 /* If we are in the process of doing discovery on this 6829 * NPort, let it continue on its own. 6830 */ 6831 switch (ndlp->nlp_state) { 6832 case NLP_STE_PLOGI_ISSUE: 6833 case NLP_STE_ADISC_ISSUE: 6834 case NLP_STE_REG_LOGIN_ISSUE: 6835 case NLP_STE_PRLI_ISSUE: 6836 case NLP_STE_LOGO_ISSUE: 6837 continue; 6838 } 6839 6840 /* Check to see if we need to NVME rescan this target 6841 * remoteport. 6842 */ 6843 if (ndlp->nlp_fc4_type & NLP_FC4_NVME && 6844 ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_DISCOVERY)) 6845 lpfc_nvme_rescan_port(vport, ndlp); 6846 6847 lpfc_disc_state_machine(vport, ndlp, NULL, 6848 NLP_EVT_DEVICE_RECOVERY); 6849 lpfc_cancel_retry_delay_tmo(vport, ndlp); 6850 } 6851 return 0; 6852 } 6853 6854 /** 6855 * lpfc_send_rscn_event - Send an RSCN event to management application 6856 * @vport: pointer to a host virtual N_Port data structure. 6857 * @cmdiocb: pointer to lpfc command iocb data structure. 6858 * 6859 * lpfc_send_rscn_event sends an RSCN netlink event to management 6860 * applications. 6861 */ 6862 static void 6863 lpfc_send_rscn_event(struct lpfc_vport *vport, 6864 struct lpfc_iocbq *cmdiocb) 6865 { 6866 struct lpfc_dmabuf *pcmd; 6867 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6868 uint32_t *payload_ptr; 6869 uint32_t payload_len; 6870 struct lpfc_rscn_event_header *rscn_event_data; 6871 6872 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 6873 payload_ptr = (uint32_t *) pcmd->virt; 6874 payload_len = be32_to_cpu(*payload_ptr & ~ELS_CMD_MASK); 6875 6876 rscn_event_data = kmalloc(sizeof(struct lpfc_rscn_event_header) + 6877 payload_len, GFP_KERNEL); 6878 if (!rscn_event_data) { 6879 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 6880 "0147 Failed to allocate memory for RSCN event\n"); 6881 return; 6882 } 6883 rscn_event_data->event_type = FC_REG_RSCN_EVENT; 6884 rscn_event_data->payload_length = payload_len; 6885 memcpy(rscn_event_data->rscn_payload, payload_ptr, 6886 payload_len); 6887 6888 fc_host_post_vendor_event(shost, 6889 fc_get_event_number(), 6890 sizeof(struct lpfc_rscn_event_header) + payload_len, 6891 (char *)rscn_event_data, 6892 LPFC_NL_VENDOR_ID); 6893 6894 kfree(rscn_event_data); 6895 } 6896 6897 /** 6898 * lpfc_els_rcv_rscn - Process an unsolicited rscn iocb 6899 * @vport: pointer to a host virtual N_Port data structure. 6900 * @cmdiocb: pointer to lpfc command iocb data structure. 6901 * @ndlp: pointer to a node-list data structure. 6902 * 6903 * This routine processes an unsolicited RSCN (Registration State Change 6904 * Notification) IOCB. First, the payload of the unsolicited RSCN is walked 6905 * to invoke fc_host_post_event() routine to the FC transport layer. If the 6906 * discover state machine is about to begin discovery, it just accepts the 6907 * RSCN and the discovery process will satisfy the RSCN. If this RSCN only 6908 * contains N_Port IDs for other vports on this HBA, it just accepts the 6909 * RSCN and ignore processing it. If the state machine is in the recovery 6910 * state, the fc_rscn_id_list of this @vport is walked and the 6911 * lpfc_rscn_recovery_check() routine is invoked to send recovery event for 6912 * all nodes that match RSCN payload. Otherwise, the lpfc_els_handle_rscn() 6913 * routine is invoked to handle the RSCN event. 6914 * 6915 * Return code 6916 * 0 - Just sent the acc response 6917 * 1 - Sent the acc response and waited for name server completion 6918 **/ 6919 static int 6920 lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 6921 struct lpfc_nodelist *ndlp) 6922 { 6923 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6924 struct lpfc_hba *phba = vport->phba; 6925 struct lpfc_dmabuf *pcmd; 6926 uint32_t *lp, *datap; 6927 uint32_t payload_len, length, nportid, *cmd; 6928 int rscn_cnt; 6929 int rscn_id = 0, hba_id = 0; 6930 int i, tmo; 6931 6932 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 6933 lp = (uint32_t *) pcmd->virt; 6934 6935 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK); 6936 payload_len -= sizeof(uint32_t); /* take off word 0 */ 6937 /* RSCN received */ 6938 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 6939 "0214 RSCN received Data: x%x x%x x%x x%x\n", 6940 vport->fc_flag, payload_len, *lp, 6941 vport->fc_rscn_id_cnt); 6942 6943 /* Send an RSCN event to the management application */ 6944 lpfc_send_rscn_event(vport, cmdiocb); 6945 6946 for (i = 0; i < payload_len/sizeof(uint32_t); i++) 6947 fc_host_post_event(shost, fc_get_event_number(), 6948 FCH_EVT_RSCN, lp[i]); 6949 6950 /* Check if RSCN is coming from a direct-connected remote NPort */ 6951 if (vport->fc_flag & FC_PT2PT) { 6952 /* If so, just ACC it, no other action needed for now */ 6953 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6954 "2024 pt2pt RSCN %08x Data: x%x x%x\n", 6955 *lp, vport->fc_flag, payload_len); 6956 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 6957 6958 /* Check to see if we need to NVME rescan this target 6959 * remoteport. 6960 */ 6961 if (ndlp->nlp_fc4_type & NLP_FC4_NVME && 6962 ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_DISCOVERY)) 6963 lpfc_nvme_rescan_port(vport, ndlp); 6964 return 0; 6965 } 6966 6967 /* If we are about to begin discovery, just ACC the RSCN. 6968 * Discovery processing will satisfy it. 6969 */ 6970 if (vport->port_state <= LPFC_NS_QRY) { 6971 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6972 "RCV RSCN ignore: did:x%x/ste:x%x flg:x%x", 6973 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); 6974 6975 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 6976 return 0; 6977 } 6978 6979 /* If this RSCN just contains NPortIDs for other vports on this HBA, 6980 * just ACC and ignore it. 6981 */ 6982 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 6983 !(vport->cfg_peer_port_login)) { 6984 i = payload_len; 6985 datap = lp; 6986 while (i > 0) { 6987 nportid = *datap++; 6988 nportid = ((be32_to_cpu(nportid)) & Mask_DID); 6989 i -= sizeof(uint32_t); 6990 rscn_id++; 6991 if (lpfc_find_vport_by_did(phba, nportid)) 6992 hba_id++; 6993 } 6994 if (rscn_id == hba_id) { 6995 /* ALL NPortIDs in RSCN are on HBA */ 6996 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 6997 "0219 Ignore RSCN " 6998 "Data: x%x x%x x%x x%x\n", 6999 vport->fc_flag, payload_len, 7000 *lp, vport->fc_rscn_id_cnt); 7001 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 7002 "RCV RSCN vport: did:x%x/ste:x%x flg:x%x", 7003 ndlp->nlp_DID, vport->port_state, 7004 ndlp->nlp_flag); 7005 7006 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, 7007 ndlp, NULL); 7008 return 0; 7009 } 7010 } 7011 7012 spin_lock_irq(shost->host_lock); 7013 if (vport->fc_rscn_flush) { 7014 /* Another thread is walking fc_rscn_id_list on this vport */ 7015 vport->fc_flag |= FC_RSCN_DISCOVERY; 7016 spin_unlock_irq(shost->host_lock); 7017 /* Send back ACC */ 7018 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 7019 return 0; 7020 } 7021 /* Indicate we are walking fc_rscn_id_list on this vport */ 7022 vport->fc_rscn_flush = 1; 7023 spin_unlock_irq(shost->host_lock); 7024 /* Get the array count after successfully have the token */ 7025 rscn_cnt = vport->fc_rscn_id_cnt; 7026 /* If we are already processing an RSCN, save the received 7027 * RSCN payload buffer, cmdiocb->context2 to process later. 7028 */ 7029 if (vport->fc_flag & (FC_RSCN_MODE | FC_NDISC_ACTIVE)) { 7030 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 7031 "RCV RSCN defer: did:x%x/ste:x%x flg:x%x", 7032 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); 7033 7034 spin_lock_irq(shost->host_lock); 7035 vport->fc_flag |= FC_RSCN_DEFERRED; 7036 7037 /* Restart disctmo if its already running */ 7038 if (vport->fc_flag & FC_DISC_TMO) { 7039 tmo = ((phba->fc_ratov * 3) + 3); 7040 mod_timer(&vport->fc_disctmo, 7041 jiffies + msecs_to_jiffies(1000 * tmo)); 7042 } 7043 if ((rscn_cnt < FC_MAX_HOLD_RSCN) && 7044 !(vport->fc_flag & FC_RSCN_DISCOVERY)) { 7045 vport->fc_flag |= FC_RSCN_MODE; 7046 spin_unlock_irq(shost->host_lock); 7047 if (rscn_cnt) { 7048 cmd = vport->fc_rscn_id_list[rscn_cnt-1]->virt; 7049 length = be32_to_cpu(*cmd & ~ELS_CMD_MASK); 7050 } 7051 if ((rscn_cnt) && 7052 (payload_len + length <= LPFC_BPL_SIZE)) { 7053 *cmd &= ELS_CMD_MASK; 7054 *cmd |= cpu_to_be32(payload_len + length); 7055 memcpy(((uint8_t *)cmd) + length, lp, 7056 payload_len); 7057 } else { 7058 vport->fc_rscn_id_list[rscn_cnt] = pcmd; 7059 vport->fc_rscn_id_cnt++; 7060 /* If we zero, cmdiocb->context2, the calling 7061 * routine will not try to free it. 7062 */ 7063 cmdiocb->context2 = NULL; 7064 } 7065 /* Deferred RSCN */ 7066 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 7067 "0235 Deferred RSCN " 7068 "Data: x%x x%x x%x\n", 7069 vport->fc_rscn_id_cnt, vport->fc_flag, 7070 vport->port_state); 7071 } else { 7072 vport->fc_flag |= FC_RSCN_DISCOVERY; 7073 spin_unlock_irq(shost->host_lock); 7074 /* ReDiscovery RSCN */ 7075 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 7076 "0234 ReDiscovery RSCN " 7077 "Data: x%x x%x x%x\n", 7078 vport->fc_rscn_id_cnt, vport->fc_flag, 7079 vport->port_state); 7080 } 7081 /* Indicate we are done walking fc_rscn_id_list on this vport */ 7082 vport->fc_rscn_flush = 0; 7083 /* Send back ACC */ 7084 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 7085 /* send RECOVERY event for ALL nodes that match RSCN payload */ 7086 lpfc_rscn_recovery_check(vport); 7087 return 0; 7088 } 7089 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 7090 "RCV RSCN: did:x%x/ste:x%x flg:x%x", 7091 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); 7092 7093 spin_lock_irq(shost->host_lock); 7094 vport->fc_flag |= FC_RSCN_MODE; 7095 spin_unlock_irq(shost->host_lock); 7096 vport->fc_rscn_id_list[vport->fc_rscn_id_cnt++] = pcmd; 7097 /* Indicate we are done walking fc_rscn_id_list on this vport */ 7098 vport->fc_rscn_flush = 0; 7099 /* 7100 * If we zero, cmdiocb->context2, the calling routine will 7101 * not try to free it. 7102 */ 7103 cmdiocb->context2 = NULL; 7104 lpfc_set_disctmo(vport); 7105 /* Send back ACC */ 7106 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 7107 /* send RECOVERY event for ALL nodes that match RSCN payload */ 7108 lpfc_rscn_recovery_check(vport); 7109 return lpfc_els_handle_rscn(vport); 7110 } 7111 7112 /** 7113 * lpfc_els_handle_rscn - Handle rscn for a vport 7114 * @vport: pointer to a host virtual N_Port data structure. 7115 * 7116 * This routine handles the Registration State Configuration Notification 7117 * (RSCN) for a @vport. If login to NameServer does not exist, a new ndlp shall 7118 * be created and a Port Login (PLOGI) to the NameServer is issued. Otherwise, 7119 * if the ndlp to NameServer exists, a Common Transport (CT) command to the 7120 * NameServer shall be issued. If CT command to the NameServer fails to be 7121 * issued, the lpfc_els_flush_rscn() routine shall be invoked to clean up any 7122 * RSCN activities with the @vport. 7123 * 7124 * Return code 7125 * 0 - Cleaned up rscn on the @vport 7126 * 1 - Wait for plogi to name server before proceed 7127 **/ 7128 int 7129 lpfc_els_handle_rscn(struct lpfc_vport *vport) 7130 { 7131 struct lpfc_nodelist *ndlp; 7132 struct lpfc_hba *phba = vport->phba; 7133 7134 /* Ignore RSCN if the port is being torn down. */ 7135 if (vport->load_flag & FC_UNLOADING) { 7136 lpfc_els_flush_rscn(vport); 7137 return 0; 7138 } 7139 7140 /* Start timer for RSCN processing */ 7141 lpfc_set_disctmo(vport); 7142 7143 /* RSCN processed */ 7144 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 7145 "0215 RSCN processed Data: x%x x%x x%x x%x x%x x%x\n", 7146 vport->fc_flag, 0, vport->fc_rscn_id_cnt, 7147 vport->port_state, vport->num_disc_nodes, 7148 vport->gidft_inp); 7149 7150 /* To process RSCN, first compare RSCN data with NameServer */ 7151 vport->fc_ns_retry = 0; 7152 vport->num_disc_nodes = 0; 7153 7154 ndlp = lpfc_findnode_did(vport, NameServer_DID); 7155 if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { 7156 /* Good ndlp, issue CT Request to NameServer. Need to 7157 * know how many gidfts were issued. If none, then just 7158 * flush the RSCN. Otherwise, the outstanding requests 7159 * need to complete. 7160 */ 7161 if (phba->cfg_ns_query == LPFC_NS_QUERY_GID_FT) { 7162 if (lpfc_issue_gidft(vport) > 0) 7163 return 1; 7164 } else if (phba->cfg_ns_query == LPFC_NS_QUERY_GID_PT) { 7165 if (lpfc_issue_gidpt(vport) > 0) 7166 return 1; 7167 } else { 7168 return 1; 7169 } 7170 } else { 7171 /* Nameserver login in question. Revalidate. */ 7172 if (ndlp) { 7173 ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE; 7174 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 7175 } else { 7176 ndlp = lpfc_nlp_init(vport, NameServer_DID); 7177 if (!ndlp) { 7178 lpfc_els_flush_rscn(vport); 7179 return 0; 7180 } 7181 ndlp->nlp_prev_state = ndlp->nlp_state; 7182 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 7183 } 7184 ndlp->nlp_type |= NLP_FABRIC; 7185 lpfc_issue_els_plogi(vport, NameServer_DID, 0); 7186 /* Wait for NameServer login cmpl before we can 7187 * continue 7188 */ 7189 return 1; 7190 } 7191 7192 lpfc_els_flush_rscn(vport); 7193 return 0; 7194 } 7195 7196 /** 7197 * lpfc_els_rcv_flogi - Process an unsolicited flogi iocb 7198 * @vport: pointer to a host virtual N_Port data structure. 7199 * @cmdiocb: pointer to lpfc command iocb data structure. 7200 * @ndlp: pointer to a node-list data structure. 7201 * 7202 * This routine processes Fabric Login (FLOGI) IOCB received as an ELS 7203 * unsolicited event. An unsolicited FLOGI can be received in a point-to- 7204 * point topology. As an unsolicited FLOGI should not be received in a loop 7205 * mode, any unsolicited FLOGI received in loop mode shall be ignored. The 7206 * lpfc_check_sparm() routine is invoked to check the parameters in the 7207 * unsolicited FLOGI. If parameters validation failed, the routine 7208 * lpfc_els_rsp_reject() shall be called with reject reason code set to 7209 * LSEXP_SPARM_OPTIONS to reject the FLOGI. Otherwise, the Port WWN in the 7210 * FLOGI shall be compared with the Port WWN of the @vport to determine who 7211 * will initiate PLOGI. The higher lexicographical value party shall has 7212 * higher priority (as the winning port) and will initiate PLOGI and 7213 * communicate Port_IDs (Addresses) for both nodes in PLOGI. The result 7214 * of this will be marked in the @vport fc_flag field with FC_PT2PT_PLOGI 7215 * and then the lpfc_els_rsp_acc() routine is invoked to accept the FLOGI. 7216 * 7217 * Return code 7218 * 0 - Successfully processed the unsolicited flogi 7219 * 1 - Failed to process the unsolicited flogi 7220 **/ 7221 static int 7222 lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7223 struct lpfc_nodelist *ndlp) 7224 { 7225 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7226 struct lpfc_hba *phba = vport->phba; 7227 struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 7228 uint32_t *lp = (uint32_t *) pcmd->virt; 7229 IOCB_t *icmd = &cmdiocb->iocb; 7230 struct serv_parm *sp; 7231 LPFC_MBOXQ_t *mbox; 7232 uint32_t cmd, did; 7233 int rc; 7234 uint32_t fc_flag = 0; 7235 uint32_t port_state = 0; 7236 7237 cmd = *lp++; 7238 sp = (struct serv_parm *) lp; 7239 7240 /* FLOGI received */ 7241 7242 lpfc_set_disctmo(vport); 7243 7244 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 7245 /* We should never receive a FLOGI in loop mode, ignore it */ 7246 did = icmd->un.elsreq64.remoteID; 7247 7248 /* An FLOGI ELS command <elsCmd> was received from DID <did> in 7249 Loop Mode */ 7250 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 7251 "0113 An FLOGI ELS command x%x was " 7252 "received from DID x%x in Loop Mode\n", 7253 cmd, did); 7254 return 1; 7255 } 7256 7257 (void) lpfc_check_sparm(vport, ndlp, sp, CLASS3, 1); 7258 7259 /* 7260 * If our portname is greater than the remote portname, 7261 * then we initiate Nport login. 7262 */ 7263 7264 rc = memcmp(&vport->fc_portname, &sp->portName, 7265 sizeof(struct lpfc_name)); 7266 7267 if (!rc) { 7268 if (phba->sli_rev < LPFC_SLI_REV4) { 7269 mbox = mempool_alloc(phba->mbox_mem_pool, 7270 GFP_KERNEL); 7271 if (!mbox) 7272 return 1; 7273 lpfc_linkdown(phba); 7274 lpfc_init_link(phba, mbox, 7275 phba->cfg_topology, 7276 phba->cfg_link_speed); 7277 mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0; 7278 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 7279 mbox->vport = vport; 7280 rc = lpfc_sli_issue_mbox(phba, mbox, 7281 MBX_NOWAIT); 7282 lpfc_set_loopback_flag(phba); 7283 if (rc == MBX_NOT_FINISHED) 7284 mempool_free(mbox, phba->mbox_mem_pool); 7285 return 1; 7286 } 7287 7288 /* abort the flogi coming back to ourselves 7289 * due to external loopback on the port. 7290 */ 7291 lpfc_els_abort_flogi(phba); 7292 return 0; 7293 7294 } else if (rc > 0) { /* greater than */ 7295 spin_lock_irq(shost->host_lock); 7296 vport->fc_flag |= FC_PT2PT_PLOGI; 7297 spin_unlock_irq(shost->host_lock); 7298 7299 /* If we have the high WWPN we can assign our own 7300 * myDID; otherwise, we have to WAIT for a PLOGI 7301 * from the remote NPort to find out what it 7302 * will be. 7303 */ 7304 vport->fc_myDID = PT2PT_LocalID; 7305 } else { 7306 vport->fc_myDID = PT2PT_RemoteID; 7307 } 7308 7309 /* 7310 * The vport state should go to LPFC_FLOGI only 7311 * AFTER we issue a FLOGI, not receive one. 7312 */ 7313 spin_lock_irq(shost->host_lock); 7314 fc_flag = vport->fc_flag; 7315 port_state = vport->port_state; 7316 vport->fc_flag |= FC_PT2PT; 7317 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 7318 7319 /* Acking an unsol FLOGI. Count 1 for link bounce 7320 * work-around. 7321 */ 7322 vport->rcv_flogi_cnt++; 7323 spin_unlock_irq(shost->host_lock); 7324 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7325 "3311 Rcv Flogi PS x%x new PS x%x " 7326 "fc_flag x%x new fc_flag x%x\n", 7327 port_state, vport->port_state, 7328 fc_flag, vport->fc_flag); 7329 7330 /* 7331 * We temporarily set fc_myDID to make it look like we are 7332 * a Fabric. This is done just so we end up with the right 7333 * did / sid on the FLOGI ACC rsp. 7334 */ 7335 did = vport->fc_myDID; 7336 vport->fc_myDID = Fabric_DID; 7337 7338 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm)); 7339 7340 /* Defer ACC response until AFTER we issue a FLOGI */ 7341 if (!(phba->hba_flag & HBA_FLOGI_ISSUED)) { 7342 phba->defer_flogi_acc_rx_id = cmdiocb->iocb.ulpContext; 7343 phba->defer_flogi_acc_ox_id = 7344 cmdiocb->iocb.unsli3.rcvsli3.ox_id; 7345 7346 vport->fc_myDID = did; 7347 7348 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7349 "3344 Deferring FLOGI ACC: rx_id: x%x," 7350 " ox_id: x%x, hba_flag x%x\n", 7351 phba->defer_flogi_acc_rx_id, 7352 phba->defer_flogi_acc_ox_id, phba->hba_flag); 7353 7354 phba->defer_flogi_acc_flag = true; 7355 7356 return 0; 7357 } 7358 7359 /* Send back ACC */ 7360 lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, cmdiocb, ndlp, NULL); 7361 7362 /* Now lets put fc_myDID back to what its supposed to be */ 7363 vport->fc_myDID = did; 7364 7365 return 0; 7366 } 7367 7368 /** 7369 * lpfc_els_rcv_rnid - Process an unsolicited rnid iocb 7370 * @vport: pointer to a host virtual N_Port data structure. 7371 * @cmdiocb: pointer to lpfc command iocb data structure. 7372 * @ndlp: pointer to a node-list data structure. 7373 * 7374 * This routine processes Request Node Identification Data (RNID) IOCB 7375 * received as an ELS unsolicited event. Only when the RNID specified format 7376 * 0x0 or 0xDF (Topology Discovery Specific Node Identification Data) 7377 * present, this routine will invoke the lpfc_els_rsp_rnid_acc() routine to 7378 * Accept (ACC) the RNID ELS command. All the other RNID formats are 7379 * rejected by invoking the lpfc_els_rsp_reject() routine. 7380 * 7381 * Return code 7382 * 0 - Successfully processed rnid iocb (currently always return 0) 7383 **/ 7384 static int 7385 lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7386 struct lpfc_nodelist *ndlp) 7387 { 7388 struct lpfc_dmabuf *pcmd; 7389 uint32_t *lp; 7390 RNID *rn; 7391 struct ls_rjt stat; 7392 7393 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 7394 lp = (uint32_t *) pcmd->virt; 7395 7396 lp++; 7397 rn = (RNID *) lp; 7398 7399 /* RNID received */ 7400 7401 switch (rn->Format) { 7402 case 0: 7403 case RNID_TOPOLOGY_DISC: 7404 /* Send back ACC */ 7405 lpfc_els_rsp_rnid_acc(vport, rn->Format, cmdiocb, ndlp); 7406 break; 7407 default: 7408 /* Reject this request because format not supported */ 7409 stat.un.b.lsRjtRsvd0 = 0; 7410 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 7411 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 7412 stat.un.b.vendorUnique = 0; 7413 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, 7414 NULL); 7415 } 7416 return 0; 7417 } 7418 7419 /** 7420 * lpfc_els_rcv_echo - Process an unsolicited echo iocb 7421 * @vport: pointer to a host virtual N_Port data structure. 7422 * @cmdiocb: pointer to lpfc command iocb data structure. 7423 * @ndlp: pointer to a node-list data structure. 7424 * 7425 * Return code 7426 * 0 - Successfully processed echo iocb (currently always return 0) 7427 **/ 7428 static int 7429 lpfc_els_rcv_echo(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7430 struct lpfc_nodelist *ndlp) 7431 { 7432 uint8_t *pcmd; 7433 7434 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt); 7435 7436 /* skip over first word of echo command to find echo data */ 7437 pcmd += sizeof(uint32_t); 7438 7439 lpfc_els_rsp_echo_acc(vport, pcmd, cmdiocb, ndlp); 7440 return 0; 7441 } 7442 7443 /** 7444 * lpfc_els_rcv_lirr - Process an unsolicited lirr iocb 7445 * @vport: pointer to a host virtual N_Port data structure. 7446 * @cmdiocb: pointer to lpfc command iocb data structure. 7447 * @ndlp: pointer to a node-list data structure. 7448 * 7449 * This routine processes a Link Incident Report Registration(LIRR) IOCB 7450 * received as an ELS unsolicited event. Currently, this function just invokes 7451 * the lpfc_els_rsp_reject() routine to reject the LIRR IOCB unconditionally. 7452 * 7453 * Return code 7454 * 0 - Successfully processed lirr iocb (currently always return 0) 7455 **/ 7456 static int 7457 lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7458 struct lpfc_nodelist *ndlp) 7459 { 7460 struct ls_rjt stat; 7461 7462 /* For now, unconditionally reject this command */ 7463 stat.un.b.lsRjtRsvd0 = 0; 7464 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 7465 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 7466 stat.un.b.vendorUnique = 0; 7467 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 7468 return 0; 7469 } 7470 7471 /** 7472 * lpfc_els_rcv_rrq - Process an unsolicited rrq iocb 7473 * @vport: pointer to a host virtual N_Port data structure. 7474 * @cmdiocb: pointer to lpfc command iocb data structure. 7475 * @ndlp: pointer to a node-list data structure. 7476 * 7477 * This routine processes a Reinstate Recovery Qualifier (RRQ) IOCB 7478 * received as an ELS unsolicited event. A request to RRQ shall only 7479 * be accepted if the Originator Nx_Port N_Port_ID or the Responder 7480 * Nx_Port N_Port_ID of the target Exchange is the same as the 7481 * N_Port_ID of the Nx_Port that makes the request. If the RRQ is 7482 * not accepted, an LS_RJT with reason code "Unable to perform 7483 * command request" and reason code explanation "Invalid Originator 7484 * S_ID" shall be returned. For now, we just unconditionally accept 7485 * RRQ from the target. 7486 **/ 7487 static void 7488 lpfc_els_rcv_rrq(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7489 struct lpfc_nodelist *ndlp) 7490 { 7491 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 7492 if (vport->phba->sli_rev == LPFC_SLI_REV4) 7493 lpfc_els_clear_rrq(vport, cmdiocb, ndlp); 7494 } 7495 7496 /** 7497 * lpfc_els_rsp_rls_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd 7498 * @phba: pointer to lpfc hba data structure. 7499 * @pmb: pointer to the driver internal queue element for mailbox command. 7500 * 7501 * This routine is the completion callback function for the MBX_READ_LNK_STAT 7502 * mailbox command. This callback function is to actually send the Accept 7503 * (ACC) response to a Read Link Status (RLS) unsolicited IOCB event. It 7504 * collects the link statistics from the completion of the MBX_READ_LNK_STAT 7505 * mailbox command, constructs the RLS response with the link statistics 7506 * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC 7507 * response to the RLS. 7508 * 7509 * Note that the ndlp reference count will be incremented by 1 for holding the 7510 * ndlp and the reference to ndlp will be stored into the context1 field of 7511 * the IOCB for the completion callback function to the RLS Accept Response 7512 * ELS IOCB command. 7513 * 7514 **/ 7515 static void 7516 lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 7517 { 7518 int rc = 0; 7519 MAILBOX_t *mb; 7520 IOCB_t *icmd; 7521 struct RLS_RSP *rls_rsp; 7522 uint8_t *pcmd; 7523 struct lpfc_iocbq *elsiocb; 7524 struct lpfc_nodelist *ndlp; 7525 uint16_t oxid; 7526 uint16_t rxid; 7527 uint32_t cmdsize; 7528 7529 mb = &pmb->u.mb; 7530 7531 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; 7532 rxid = (uint16_t)((unsigned long)(pmb->ctx_buf) & 0xffff); 7533 oxid = (uint16_t)(((unsigned long)(pmb->ctx_buf) >> 16) & 0xffff); 7534 pmb->ctx_buf = NULL; 7535 pmb->ctx_ndlp = NULL; 7536 7537 if (mb->mbxStatus) { 7538 mempool_free(pmb, phba->mbox_mem_pool); 7539 return; 7540 } 7541 7542 cmdsize = sizeof(struct RLS_RSP) + sizeof(uint32_t); 7543 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 7544 lpfc_max_els_tries, ndlp, 7545 ndlp->nlp_DID, ELS_CMD_ACC); 7546 7547 /* Decrement the ndlp reference count from previous mbox command */ 7548 lpfc_nlp_put(ndlp); 7549 7550 if (!elsiocb) { 7551 mempool_free(pmb, phba->mbox_mem_pool); 7552 return; 7553 } 7554 7555 icmd = &elsiocb->iocb; 7556 icmd->ulpContext = rxid; 7557 icmd->unsli3.rcvsli3.ox_id = oxid; 7558 7559 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 7560 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 7561 pcmd += sizeof(uint32_t); /* Skip past command */ 7562 rls_rsp = (struct RLS_RSP *)pcmd; 7563 7564 rls_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt); 7565 rls_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt); 7566 rls_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt); 7567 rls_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt); 7568 rls_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord); 7569 rls_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt); 7570 mempool_free(pmb, phba->mbox_mem_pool); 7571 /* Xmit ELS RLS ACC response tag <ulpIoTag> */ 7572 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS, 7573 "2874 Xmit ELS RLS ACC response tag x%x xri x%x, " 7574 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", 7575 elsiocb->iotag, elsiocb->iocb.ulpContext, 7576 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 7577 ndlp->nlp_rpi); 7578 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 7579 phba->fc_stat.elsXmitACC++; 7580 elsiocb->context1 = lpfc_nlp_get(ndlp); 7581 if (!elsiocb->context1) { 7582 lpfc_els_free_iocb(phba, elsiocb); 7583 return; 7584 } 7585 7586 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 7587 if (rc == IOCB_ERROR) { 7588 lpfc_els_free_iocb(phba, elsiocb); 7589 lpfc_nlp_put(ndlp); 7590 } 7591 return; 7592 } 7593 7594 /** 7595 * lpfc_els_rcv_rls - Process an unsolicited rls iocb 7596 * @vport: pointer to a host virtual N_Port data structure. 7597 * @cmdiocb: pointer to lpfc command iocb data structure. 7598 * @ndlp: pointer to a node-list data structure. 7599 * 7600 * This routine processes Read Link Status (RLS) IOCB received as an 7601 * ELS unsolicited event. It first checks the remote port state. If the 7602 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE 7603 * state, it invokes the lpfc_els_rsl_reject() routine to send the reject 7604 * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command 7605 * for reading the HBA link statistics. It is for the callback function, 7606 * lpfc_els_rsp_rls_acc(), set to the MBX_READ_LNK_STAT mailbox command 7607 * to actually sending out RPL Accept (ACC) response. 7608 * 7609 * Return codes 7610 * 0 - Successfully processed rls iocb (currently always return 0) 7611 **/ 7612 static int 7613 lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7614 struct lpfc_nodelist *ndlp) 7615 { 7616 struct lpfc_hba *phba = vport->phba; 7617 LPFC_MBOXQ_t *mbox; 7618 struct ls_rjt stat; 7619 7620 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 7621 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) 7622 /* reject the unsolicited RLS request and done with it */ 7623 goto reject_out; 7624 7625 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC); 7626 if (mbox) { 7627 lpfc_read_lnk_stat(phba, mbox); 7628 mbox->ctx_buf = (void *)((unsigned long) 7629 ((cmdiocb->iocb.unsli3.rcvsli3.ox_id << 16) | 7630 cmdiocb->iocb.ulpContext)); /* rx_id */ 7631 mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 7632 if (!mbox->ctx_ndlp) 7633 goto node_err; 7634 mbox->vport = vport; 7635 mbox->mbox_cmpl = lpfc_els_rsp_rls_acc; 7636 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 7637 != MBX_NOT_FINISHED) 7638 /* Mbox completion will send ELS Response */ 7639 return 0; 7640 /* Decrement reference count used for the failed mbox 7641 * command. 7642 */ 7643 lpfc_nlp_put(ndlp); 7644 node_err: 7645 mempool_free(mbox, phba->mbox_mem_pool); 7646 } 7647 reject_out: 7648 /* issue rejection response */ 7649 stat.un.b.lsRjtRsvd0 = 0; 7650 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 7651 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 7652 stat.un.b.vendorUnique = 0; 7653 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 7654 return 0; 7655 } 7656 7657 /** 7658 * lpfc_els_rcv_rtv - Process an unsolicited rtv iocb 7659 * @vport: pointer to a host virtual N_Port data structure. 7660 * @cmdiocb: pointer to lpfc command iocb data structure. 7661 * @ndlp: pointer to a node-list data structure. 7662 * 7663 * This routine processes Read Timout Value (RTV) IOCB received as an 7664 * ELS unsolicited event. It first checks the remote port state. If the 7665 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE 7666 * state, it invokes the lpfc_els_rsl_reject() routine to send the reject 7667 * response. Otherwise, it sends the Accept(ACC) response to a Read Timeout 7668 * Value (RTV) unsolicited IOCB event. 7669 * 7670 * Note that the ndlp reference count will be incremented by 1 for holding the 7671 * ndlp and the reference to ndlp will be stored into the context1 field of 7672 * the IOCB for the completion callback function to the RTV Accept Response 7673 * ELS IOCB command. 7674 * 7675 * Return codes 7676 * 0 - Successfully processed rtv iocb (currently always return 0) 7677 **/ 7678 static int 7679 lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7680 struct lpfc_nodelist *ndlp) 7681 { 7682 int rc = 0; 7683 struct lpfc_hba *phba = vport->phba; 7684 struct ls_rjt stat; 7685 struct RTV_RSP *rtv_rsp; 7686 uint8_t *pcmd; 7687 struct lpfc_iocbq *elsiocb; 7688 uint32_t cmdsize; 7689 7690 7691 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 7692 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) 7693 /* reject the unsolicited RTV request and done with it */ 7694 goto reject_out; 7695 7696 cmdsize = sizeof(struct RTV_RSP) + sizeof(uint32_t); 7697 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 7698 lpfc_max_els_tries, ndlp, 7699 ndlp->nlp_DID, ELS_CMD_ACC); 7700 7701 if (!elsiocb) 7702 return 1; 7703 7704 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 7705 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 7706 pcmd += sizeof(uint32_t); /* Skip past command */ 7707 7708 /* use the command's xri in the response */ 7709 elsiocb->iocb.ulpContext = cmdiocb->iocb.ulpContext; /* Xri / rx_id */ 7710 elsiocb->iocb.unsli3.rcvsli3.ox_id = cmdiocb->iocb.unsli3.rcvsli3.ox_id; 7711 7712 rtv_rsp = (struct RTV_RSP *)pcmd; 7713 7714 /* populate RTV payload */ 7715 rtv_rsp->ratov = cpu_to_be32(phba->fc_ratov * 1000); /* report msecs */ 7716 rtv_rsp->edtov = cpu_to_be32(phba->fc_edtov); 7717 bf_set(qtov_edtovres, rtv_rsp, phba->fc_edtovResol ? 1 : 0); 7718 bf_set(qtov_rttov, rtv_rsp, 0); /* Field is for FC ONLY */ 7719 rtv_rsp->qtov = cpu_to_be32(rtv_rsp->qtov); 7720 7721 /* Xmit ELS RLS ACC response tag <ulpIoTag> */ 7722 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS, 7723 "2875 Xmit ELS RTV ACC response tag x%x xri x%x, " 7724 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x, " 7725 "Data: x%x x%x x%x\n", 7726 elsiocb->iotag, elsiocb->iocb.ulpContext, 7727 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 7728 ndlp->nlp_rpi, 7729 rtv_rsp->ratov, rtv_rsp->edtov, rtv_rsp->qtov); 7730 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 7731 phba->fc_stat.elsXmitACC++; 7732 elsiocb->context1 = lpfc_nlp_get(ndlp); 7733 if (!elsiocb->context1) { 7734 lpfc_els_free_iocb(phba, elsiocb); 7735 return 0; 7736 } 7737 7738 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 7739 if (rc == IOCB_ERROR) { 7740 lpfc_els_free_iocb(phba, elsiocb); 7741 lpfc_nlp_put(ndlp); 7742 } 7743 return 0; 7744 7745 reject_out: 7746 /* issue rejection response */ 7747 stat.un.b.lsRjtRsvd0 = 0; 7748 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 7749 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 7750 stat.un.b.vendorUnique = 0; 7751 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 7752 return 0; 7753 } 7754 7755 /* lpfc_issue_els_rrq - Process an unsolicited rrq iocb 7756 * @vport: pointer to a host virtual N_Port data structure. 7757 * @ndlp: pointer to a node-list data structure. 7758 * @did: DID of the target. 7759 * @rrq: Pointer to the rrq struct. 7760 * 7761 * Build a ELS RRQ command and send it to the target. If the issue_iocb is 7762 * Successful the the completion handler will clear the RRQ. 7763 * 7764 * Return codes 7765 * 0 - Successfully sent rrq els iocb. 7766 * 1 - Failed to send rrq els iocb. 7767 **/ 7768 static int 7769 lpfc_issue_els_rrq(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 7770 uint32_t did, struct lpfc_node_rrq *rrq) 7771 { 7772 struct lpfc_hba *phba = vport->phba; 7773 struct RRQ *els_rrq; 7774 struct lpfc_iocbq *elsiocb; 7775 uint8_t *pcmd; 7776 uint16_t cmdsize; 7777 int ret; 7778 7779 if (!ndlp) 7780 return 1; 7781 7782 /* If ndlp is not NULL, we will bump the reference count on it */ 7783 cmdsize = (sizeof(uint32_t) + sizeof(struct RRQ)); 7784 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, did, 7785 ELS_CMD_RRQ); 7786 if (!elsiocb) 7787 return 1; 7788 7789 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 7790 7791 /* For RRQ request, remainder of payload is Exchange IDs */ 7792 *((uint32_t *) (pcmd)) = ELS_CMD_RRQ; 7793 pcmd += sizeof(uint32_t); 7794 els_rrq = (struct RRQ *) pcmd; 7795 7796 bf_set(rrq_oxid, els_rrq, phba->sli4_hba.xri_ids[rrq->xritag]); 7797 bf_set(rrq_rxid, els_rrq, rrq->rxid); 7798 bf_set(rrq_did, els_rrq, vport->fc_myDID); 7799 els_rrq->rrq = cpu_to_be32(els_rrq->rrq); 7800 els_rrq->rrq_exchg = cpu_to_be32(els_rrq->rrq_exchg); 7801 7802 7803 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 7804 "Issue RRQ: did:x%x", 7805 did, rrq->xritag, rrq->rxid); 7806 elsiocb->context_un.rrq = rrq; 7807 elsiocb->iocb_cmpl = lpfc_cmpl_els_rrq; 7808 7809 lpfc_nlp_get(ndlp); 7810 elsiocb->context1 = ndlp; 7811 7812 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 7813 if (ret == IOCB_ERROR) 7814 goto io_err; 7815 return 0; 7816 7817 io_err: 7818 lpfc_els_free_iocb(phba, elsiocb); 7819 lpfc_nlp_put(ndlp); 7820 return 1; 7821 } 7822 7823 /** 7824 * lpfc_send_rrq - Sends ELS RRQ if needed. 7825 * @phba: pointer to lpfc hba data structure. 7826 * @rrq: pointer to the active rrq. 7827 * 7828 * This routine will call the lpfc_issue_els_rrq if the rrq is 7829 * still active for the xri. If this function returns a failure then 7830 * the caller needs to clean up the RRQ by calling lpfc_clr_active_rrq. 7831 * 7832 * Returns 0 Success. 7833 * 1 Failure. 7834 **/ 7835 int 7836 lpfc_send_rrq(struct lpfc_hba *phba, struct lpfc_node_rrq *rrq) 7837 { 7838 struct lpfc_nodelist *ndlp = lpfc_findnode_did(rrq->vport, 7839 rrq->nlp_DID); 7840 if (!ndlp) 7841 return 1; 7842 7843 if (lpfc_test_rrq_active(phba, ndlp, rrq->xritag)) 7844 return lpfc_issue_els_rrq(rrq->vport, ndlp, 7845 rrq->nlp_DID, rrq); 7846 else 7847 return 1; 7848 } 7849 7850 /** 7851 * lpfc_els_rsp_rpl_acc - Issue an accept rpl els command 7852 * @vport: pointer to a host virtual N_Port data structure. 7853 * @cmdsize: size of the ELS command. 7854 * @oldiocb: pointer to the original lpfc command iocb data structure. 7855 * @ndlp: pointer to a node-list data structure. 7856 * 7857 * This routine issuees an Accept (ACC) Read Port List (RPL) ELS command. 7858 * It is to be called by the lpfc_els_rcv_rpl() routine to accept the RPL. 7859 * 7860 * Note that the ndlp reference count will be incremented by 1 for holding the 7861 * ndlp and the reference to ndlp will be stored into the context1 field of 7862 * the IOCB for the completion callback function to the RPL Accept Response 7863 * ELS command. 7864 * 7865 * Return code 7866 * 0 - Successfully issued ACC RPL ELS command 7867 * 1 - Failed to issue ACC RPL ELS command 7868 **/ 7869 static int 7870 lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize, 7871 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) 7872 { 7873 int rc = 0; 7874 struct lpfc_hba *phba = vport->phba; 7875 IOCB_t *icmd, *oldcmd; 7876 RPL_RSP rpl_rsp; 7877 struct lpfc_iocbq *elsiocb; 7878 uint8_t *pcmd; 7879 7880 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 7881 ndlp->nlp_DID, ELS_CMD_ACC); 7882 7883 if (!elsiocb) 7884 return 1; 7885 7886 icmd = &elsiocb->iocb; 7887 oldcmd = &oldiocb->iocb; 7888 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 7889 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 7890 7891 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 7892 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 7893 pcmd += sizeof(uint16_t); 7894 *((uint16_t *)(pcmd)) = be16_to_cpu(cmdsize); 7895 pcmd += sizeof(uint16_t); 7896 7897 /* Setup the RPL ACC payload */ 7898 rpl_rsp.listLen = be32_to_cpu(1); 7899 rpl_rsp.index = 0; 7900 rpl_rsp.port_num_blk.portNum = 0; 7901 rpl_rsp.port_num_blk.portID = be32_to_cpu(vport->fc_myDID); 7902 memcpy(&rpl_rsp.port_num_blk.portName, &vport->fc_portname, 7903 sizeof(struct lpfc_name)); 7904 memcpy(pcmd, &rpl_rsp, cmdsize - sizeof(uint32_t)); 7905 /* Xmit ELS RPL ACC response tag <ulpIoTag> */ 7906 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7907 "0120 Xmit ELS RPL ACC response tag x%x " 7908 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, " 7909 "rpi x%x\n", 7910 elsiocb->iotag, elsiocb->iocb.ulpContext, 7911 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 7912 ndlp->nlp_rpi); 7913 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 7914 phba->fc_stat.elsXmitACC++; 7915 elsiocb->context1 = lpfc_nlp_get(ndlp); 7916 if (!elsiocb->context1) { 7917 lpfc_els_free_iocb(phba, elsiocb); 7918 return 1; 7919 } 7920 7921 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 7922 if (rc == IOCB_ERROR) { 7923 lpfc_els_free_iocb(phba, elsiocb); 7924 lpfc_nlp_put(ndlp); 7925 return 1; 7926 } 7927 7928 return 0; 7929 } 7930 7931 /** 7932 * lpfc_els_rcv_rpl - Process an unsolicited rpl iocb 7933 * @vport: pointer to a host virtual N_Port data structure. 7934 * @cmdiocb: pointer to lpfc command iocb data structure. 7935 * @ndlp: pointer to a node-list data structure. 7936 * 7937 * This routine processes Read Port List (RPL) IOCB received as an ELS 7938 * unsolicited event. It first checks the remote port state. If the remote 7939 * port is not in NLP_STE_UNMAPPED_NODE and NLP_STE_MAPPED_NODE states, it 7940 * invokes the lpfc_els_rsp_reject() routine to send reject response. 7941 * Otherwise, this routine then invokes the lpfc_els_rsp_rpl_acc() routine 7942 * to accept the RPL. 7943 * 7944 * Return code 7945 * 0 - Successfully processed rpl iocb (currently always return 0) 7946 **/ 7947 static int 7948 lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7949 struct lpfc_nodelist *ndlp) 7950 { 7951 struct lpfc_dmabuf *pcmd; 7952 uint32_t *lp; 7953 uint32_t maxsize; 7954 uint16_t cmdsize; 7955 RPL *rpl; 7956 struct ls_rjt stat; 7957 7958 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 7959 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) { 7960 /* issue rejection response */ 7961 stat.un.b.lsRjtRsvd0 = 0; 7962 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 7963 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 7964 stat.un.b.vendorUnique = 0; 7965 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, 7966 NULL); 7967 /* rejected the unsolicited RPL request and done with it */ 7968 return 0; 7969 } 7970 7971 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 7972 lp = (uint32_t *) pcmd->virt; 7973 rpl = (RPL *) (lp + 1); 7974 maxsize = be32_to_cpu(rpl->maxsize); 7975 7976 /* We support only one port */ 7977 if ((rpl->index == 0) && 7978 ((maxsize == 0) || 7979 ((maxsize * sizeof(uint32_t)) >= sizeof(RPL_RSP)))) { 7980 cmdsize = sizeof(uint32_t) + sizeof(RPL_RSP); 7981 } else { 7982 cmdsize = sizeof(uint32_t) + maxsize * sizeof(uint32_t); 7983 } 7984 lpfc_els_rsp_rpl_acc(vport, cmdsize, cmdiocb, ndlp); 7985 7986 return 0; 7987 } 7988 7989 /** 7990 * lpfc_els_rcv_farp - Process an unsolicited farp request els command 7991 * @vport: pointer to a virtual N_Port data structure. 7992 * @cmdiocb: pointer to lpfc command iocb data structure. 7993 * @ndlp: pointer to a node-list data structure. 7994 * 7995 * This routine processes Fibre Channel Address Resolution Protocol 7996 * (FARP) Request IOCB received as an ELS unsolicited event. Currently, 7997 * the lpfc driver only supports matching on WWPN or WWNN for FARP. As such, 7998 * FARP_MATCH_PORT flag and FARP_MATCH_NODE flag are checked against the 7999 * Match Flag in the FARP request IOCB: if FARP_MATCH_PORT flag is set, the 8000 * remote PortName is compared against the FC PortName stored in the @vport 8001 * data structure; if FARP_MATCH_NODE flag is set, the remote NodeName is 8002 * compared against the FC NodeName stored in the @vport data structure. 8003 * If any of these matches and the FARP_REQUEST_FARPR flag is set in the 8004 * FARP request IOCB Response Flag, the lpfc_issue_els_farpr() routine is 8005 * invoked to send out FARP Response to the remote node. Before sending the 8006 * FARP Response, however, the FARP_REQUEST_PLOGI flag is check in the FARP 8007 * request IOCB Response Flag and, if it is set, the lpfc_issue_els_plogi() 8008 * routine is invoked to log into the remote port first. 8009 * 8010 * Return code 8011 * 0 - Either the FARP Match Mode not supported or successfully processed 8012 **/ 8013 static int 8014 lpfc_els_rcv_farp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8015 struct lpfc_nodelist *ndlp) 8016 { 8017 struct lpfc_dmabuf *pcmd; 8018 uint32_t *lp; 8019 IOCB_t *icmd; 8020 FARP *fp; 8021 uint32_t cnt, did; 8022 8023 icmd = &cmdiocb->iocb; 8024 did = icmd->un.elsreq64.remoteID; 8025 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 8026 lp = (uint32_t *) pcmd->virt; 8027 8028 lp++; 8029 fp = (FARP *) lp; 8030 /* FARP-REQ received from DID <did> */ 8031 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 8032 "0601 FARP-REQ received from DID x%x\n", did); 8033 /* We will only support match on WWPN or WWNN */ 8034 if (fp->Mflags & ~(FARP_MATCH_NODE | FARP_MATCH_PORT)) { 8035 return 0; 8036 } 8037 8038 cnt = 0; 8039 /* If this FARP command is searching for my portname */ 8040 if (fp->Mflags & FARP_MATCH_PORT) { 8041 if (memcmp(&fp->RportName, &vport->fc_portname, 8042 sizeof(struct lpfc_name)) == 0) 8043 cnt = 1; 8044 } 8045 8046 /* If this FARP command is searching for my nodename */ 8047 if (fp->Mflags & FARP_MATCH_NODE) { 8048 if (memcmp(&fp->RnodeName, &vport->fc_nodename, 8049 sizeof(struct lpfc_name)) == 0) 8050 cnt = 1; 8051 } 8052 8053 if (cnt) { 8054 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) || 8055 (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) { 8056 /* Log back into the node before sending the FARP. */ 8057 if (fp->Rflags & FARP_REQUEST_PLOGI) { 8058 ndlp->nlp_prev_state = ndlp->nlp_state; 8059 lpfc_nlp_set_state(vport, ndlp, 8060 NLP_STE_PLOGI_ISSUE); 8061 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 8062 } 8063 8064 /* Send a FARP response to that node */ 8065 if (fp->Rflags & FARP_REQUEST_FARPR) 8066 lpfc_issue_els_farpr(vport, did, 0); 8067 } 8068 } 8069 return 0; 8070 } 8071 8072 /** 8073 * lpfc_els_rcv_farpr - Process an unsolicited farp response iocb 8074 * @vport: pointer to a host virtual N_Port data structure. 8075 * @cmdiocb: pointer to lpfc command iocb data structure. 8076 * @ndlp: pointer to a node-list data structure. 8077 * 8078 * This routine processes Fibre Channel Address Resolution Protocol 8079 * Response (FARPR) IOCB received as an ELS unsolicited event. It simply 8080 * invokes the lpfc_els_rsp_acc() routine to the remote node to accept 8081 * the FARP response request. 8082 * 8083 * Return code 8084 * 0 - Successfully processed FARPR IOCB (currently always return 0) 8085 **/ 8086 static int 8087 lpfc_els_rcv_farpr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8088 struct lpfc_nodelist *ndlp) 8089 { 8090 struct lpfc_dmabuf *pcmd; 8091 uint32_t *lp; 8092 IOCB_t *icmd; 8093 uint32_t did; 8094 8095 icmd = &cmdiocb->iocb; 8096 did = icmd->un.elsreq64.remoteID; 8097 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 8098 lp = (uint32_t *) pcmd->virt; 8099 8100 lp++; 8101 /* FARP-RSP received from DID <did> */ 8102 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 8103 "0600 FARP-RSP received from DID x%x\n", did); 8104 /* ACCEPT the Farp resp request */ 8105 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 8106 8107 return 0; 8108 } 8109 8110 /** 8111 * lpfc_els_rcv_fan - Process an unsolicited fan iocb command 8112 * @vport: pointer to a host virtual N_Port data structure. 8113 * @cmdiocb: pointer to lpfc command iocb data structure. 8114 * @fan_ndlp: pointer to a node-list data structure. 8115 * 8116 * This routine processes a Fabric Address Notification (FAN) IOCB 8117 * command received as an ELS unsolicited event. The FAN ELS command will 8118 * only be processed on a physical port (i.e., the @vport represents the 8119 * physical port). The fabric NodeName and PortName from the FAN IOCB are 8120 * compared against those in the phba data structure. If any of those is 8121 * different, the lpfc_initial_flogi() routine is invoked to initialize 8122 * Fabric Login (FLOGI) to the fabric to start the discover over. Otherwise, 8123 * if both of those are identical, the lpfc_issue_fabric_reglogin() routine 8124 * is invoked to register login to the fabric. 8125 * 8126 * Return code 8127 * 0 - Successfully processed fan iocb (currently always return 0). 8128 **/ 8129 static int 8130 lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 8131 struct lpfc_nodelist *fan_ndlp) 8132 { 8133 struct lpfc_hba *phba = vport->phba; 8134 uint32_t *lp; 8135 FAN *fp; 8136 8137 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0265 FAN received\n"); 8138 lp = (uint32_t *)((struct lpfc_dmabuf *)cmdiocb->context2)->virt; 8139 fp = (FAN *) ++lp; 8140 /* FAN received; Fan does not have a reply sequence */ 8141 if ((vport == phba->pport) && 8142 (vport->port_state == LPFC_LOCAL_CFG_LINK)) { 8143 if ((memcmp(&phba->fc_fabparam.nodeName, &fp->FnodeName, 8144 sizeof(struct lpfc_name))) || 8145 (memcmp(&phba->fc_fabparam.portName, &fp->FportName, 8146 sizeof(struct lpfc_name)))) { 8147 /* This port has switched fabrics. FLOGI is required */ 8148 lpfc_issue_init_vfi(vport); 8149 } else { 8150 /* FAN verified - skip FLOGI */ 8151 vport->fc_myDID = vport->fc_prevDID; 8152 if (phba->sli_rev < LPFC_SLI_REV4) 8153 lpfc_issue_fabric_reglogin(vport); 8154 else { 8155 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 8156 "3138 Need register VFI: (x%x/%x)\n", 8157 vport->fc_prevDID, vport->fc_myDID); 8158 lpfc_issue_reg_vfi(vport); 8159 } 8160 } 8161 } 8162 return 0; 8163 } 8164 8165 /** 8166 * lpfc_els_timeout - Handler funciton to the els timer 8167 * @t: timer context used to obtain the vport. 8168 * 8169 * This routine is invoked by the ELS timer after timeout. It posts the ELS 8170 * timer timeout event by setting the WORKER_ELS_TMO bit to the work port 8171 * event bitmap and then invokes the lpfc_worker_wake_up() routine to wake 8172 * up the worker thread. It is for the worker thread to invoke the routine 8173 * lpfc_els_timeout_handler() to work on the posted event WORKER_ELS_TMO. 8174 **/ 8175 void 8176 lpfc_els_timeout(struct timer_list *t) 8177 { 8178 struct lpfc_vport *vport = from_timer(vport, t, els_tmofunc); 8179 struct lpfc_hba *phba = vport->phba; 8180 uint32_t tmo_posted; 8181 unsigned long iflag; 8182 8183 spin_lock_irqsave(&vport->work_port_lock, iflag); 8184 tmo_posted = vport->work_port_events & WORKER_ELS_TMO; 8185 if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING))) 8186 vport->work_port_events |= WORKER_ELS_TMO; 8187 spin_unlock_irqrestore(&vport->work_port_lock, iflag); 8188 8189 if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING))) 8190 lpfc_worker_wake_up(phba); 8191 return; 8192 } 8193 8194 8195 /** 8196 * lpfc_els_timeout_handler - Process an els timeout event 8197 * @vport: pointer to a virtual N_Port data structure. 8198 * 8199 * This routine is the actual handler function that processes an ELS timeout 8200 * event. It walks the ELS ring to get and abort all the IOCBs (except the 8201 * ABORT/CLOSE/FARP/FARPR/FDISC), which are associated with the @vport by 8202 * invoking the lpfc_sli_issue_abort_iotag() routine. 8203 **/ 8204 void 8205 lpfc_els_timeout_handler(struct lpfc_vport *vport) 8206 { 8207 struct lpfc_hba *phba = vport->phba; 8208 struct lpfc_sli_ring *pring; 8209 struct lpfc_iocbq *tmp_iocb, *piocb; 8210 IOCB_t *cmd = NULL; 8211 struct lpfc_dmabuf *pcmd; 8212 uint32_t els_command = 0; 8213 uint32_t timeout; 8214 uint32_t remote_ID = 0xffffffff; 8215 LIST_HEAD(abort_list); 8216 8217 8218 timeout = (uint32_t)(phba->fc_ratov << 1); 8219 8220 pring = lpfc_phba_elsring(phba); 8221 if (unlikely(!pring)) 8222 return; 8223 8224 if (phba->pport->load_flag & FC_UNLOADING) 8225 return; 8226 8227 spin_lock_irq(&phba->hbalock); 8228 if (phba->sli_rev == LPFC_SLI_REV4) 8229 spin_lock(&pring->ring_lock); 8230 8231 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { 8232 cmd = &piocb->iocb; 8233 8234 if ((piocb->iocb_flag & LPFC_IO_LIBDFC) != 0 || 8235 piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN || 8236 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN) 8237 continue; 8238 8239 if (piocb->vport != vport) 8240 continue; 8241 8242 pcmd = (struct lpfc_dmabuf *) piocb->context2; 8243 if (pcmd) 8244 els_command = *(uint32_t *) (pcmd->virt); 8245 8246 if (els_command == ELS_CMD_FARP || 8247 els_command == ELS_CMD_FARPR || 8248 els_command == ELS_CMD_FDISC) 8249 continue; 8250 8251 if (piocb->drvrTimeout > 0) { 8252 if (piocb->drvrTimeout >= timeout) 8253 piocb->drvrTimeout -= timeout; 8254 else 8255 piocb->drvrTimeout = 0; 8256 continue; 8257 } 8258 8259 remote_ID = 0xffffffff; 8260 if (cmd->ulpCommand != CMD_GEN_REQUEST64_CR) 8261 remote_ID = cmd->un.elsreq64.remoteID; 8262 else { 8263 struct lpfc_nodelist *ndlp; 8264 ndlp = __lpfc_findnode_rpi(vport, cmd->ulpContext); 8265 if (ndlp) 8266 remote_ID = ndlp->nlp_DID; 8267 } 8268 list_add_tail(&piocb->dlist, &abort_list); 8269 } 8270 if (phba->sli_rev == LPFC_SLI_REV4) 8271 spin_unlock(&pring->ring_lock); 8272 spin_unlock_irq(&phba->hbalock); 8273 8274 list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) { 8275 cmd = &piocb->iocb; 8276 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 8277 "0127 ELS timeout Data: x%x x%x x%x " 8278 "x%x\n", els_command, 8279 remote_ID, cmd->ulpCommand, cmd->ulpIoTag); 8280 spin_lock_irq(&phba->hbalock); 8281 list_del_init(&piocb->dlist); 8282 lpfc_sli_issue_abort_iotag(phba, pring, piocb, NULL); 8283 spin_unlock_irq(&phba->hbalock); 8284 } 8285 8286 /* Make sure HBA is alive */ 8287 lpfc_issue_hb_tmo(phba); 8288 8289 if (!list_empty(&pring->txcmplq)) 8290 if (!(phba->pport->load_flag & FC_UNLOADING)) 8291 mod_timer(&vport->els_tmofunc, 8292 jiffies + msecs_to_jiffies(1000 * timeout)); 8293 } 8294 8295 /** 8296 * lpfc_els_flush_cmd - Clean up the outstanding els commands to a vport 8297 * @vport: pointer to a host virtual N_Port data structure. 8298 * 8299 * This routine is used to clean up all the outstanding ELS commands on a 8300 * @vport. It first aborts the @vport by invoking lpfc_fabric_abort_vport() 8301 * routine. After that, it walks the ELS transmit queue to remove all the 8302 * IOCBs with the @vport other than the QUE_RING and ABORT/CLOSE IOCBs. For 8303 * the IOCBs with a non-NULL completion callback function, the callback 8304 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and 8305 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs with a NULL completion 8306 * callback function, the IOCB will simply be released. Finally, it walks 8307 * the ELS transmit completion queue to issue an abort IOCB to any transmit 8308 * completion queue IOCB that is associated with the @vport and is not 8309 * an IOCB from libdfc (i.e., the management plane IOCBs that are not 8310 * part of the discovery state machine) out to HBA by invoking the 8311 * lpfc_sli_issue_abort_iotag() routine. Note that this function issues the 8312 * abort IOCB to any transmit completion queueed IOCB, it does not guarantee 8313 * the IOCBs are aborted when this function returns. 8314 **/ 8315 void 8316 lpfc_els_flush_cmd(struct lpfc_vport *vport) 8317 { 8318 LIST_HEAD(abort_list); 8319 struct lpfc_hba *phba = vport->phba; 8320 struct lpfc_sli_ring *pring; 8321 struct lpfc_iocbq *tmp_iocb, *piocb; 8322 IOCB_t *cmd = NULL; 8323 unsigned long iflags = 0; 8324 8325 lpfc_fabric_abort_vport(vport); 8326 8327 /* 8328 * For SLI3, only the hbalock is required. But SLI4 needs to coordinate 8329 * with the ring insert operation. Because lpfc_sli_issue_abort_iotag 8330 * ultimately grabs the ring_lock, the driver must splice the list into 8331 * a working list and release the locks before calling the abort. 8332 */ 8333 spin_lock_irqsave(&phba->hbalock, iflags); 8334 pring = lpfc_phba_elsring(phba); 8335 8336 /* Bail out if we've no ELS wq, like in PCI error recovery case. */ 8337 if (unlikely(!pring)) { 8338 spin_unlock_irqrestore(&phba->hbalock, iflags); 8339 return; 8340 } 8341 8342 if (phba->sli_rev == LPFC_SLI_REV4) 8343 spin_lock(&pring->ring_lock); 8344 8345 /* First we need to issue aborts to outstanding cmds on txcmpl */ 8346 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { 8347 if (piocb->iocb_flag & LPFC_IO_LIBDFC) 8348 continue; 8349 8350 if (piocb->vport != vport) 8351 continue; 8352 8353 if (piocb->iocb_flag & LPFC_DRIVER_ABORTED) 8354 continue; 8355 8356 /* On the ELS ring we can have ELS_REQUESTs or 8357 * GEN_REQUESTs waiting for a response. 8358 */ 8359 cmd = &piocb->iocb; 8360 if (cmd->ulpCommand == CMD_ELS_REQUEST64_CR) { 8361 list_add_tail(&piocb->dlist, &abort_list); 8362 8363 /* If the link is down when flushing ELS commands 8364 * the firmware will not complete them till after 8365 * the link comes back up. This may confuse 8366 * discovery for the new link up, so we need to 8367 * change the compl routine to just clean up the iocb 8368 * and avoid any retry logic. 8369 */ 8370 if (phba->link_state == LPFC_LINK_DOWN) 8371 piocb->iocb_cmpl = lpfc_cmpl_els_link_down; 8372 } 8373 if (cmd->ulpCommand == CMD_GEN_REQUEST64_CR) 8374 list_add_tail(&piocb->dlist, &abort_list); 8375 } 8376 8377 if (phba->sli_rev == LPFC_SLI_REV4) 8378 spin_unlock(&pring->ring_lock); 8379 spin_unlock_irqrestore(&phba->hbalock, iflags); 8380 8381 /* Abort each txcmpl iocb on aborted list and remove the dlist links. */ 8382 list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) { 8383 spin_lock_irqsave(&phba->hbalock, iflags); 8384 list_del_init(&piocb->dlist); 8385 lpfc_sli_issue_abort_iotag(phba, pring, piocb, NULL); 8386 spin_unlock_irqrestore(&phba->hbalock, iflags); 8387 } 8388 /* Make sure HBA is alive */ 8389 lpfc_issue_hb_tmo(phba); 8390 8391 if (!list_empty(&abort_list)) 8392 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 8393 "3387 abort list for txq not empty\n"); 8394 INIT_LIST_HEAD(&abort_list); 8395 8396 spin_lock_irqsave(&phba->hbalock, iflags); 8397 if (phba->sli_rev == LPFC_SLI_REV4) 8398 spin_lock(&pring->ring_lock); 8399 8400 /* No need to abort the txq list, 8401 * just queue them up for lpfc_sli_cancel_iocbs 8402 */ 8403 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) { 8404 cmd = &piocb->iocb; 8405 8406 if (piocb->iocb_flag & LPFC_IO_LIBDFC) { 8407 continue; 8408 } 8409 8410 /* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */ 8411 if (cmd->ulpCommand == CMD_QUE_RING_BUF_CN || 8412 cmd->ulpCommand == CMD_QUE_RING_BUF64_CN || 8413 cmd->ulpCommand == CMD_CLOSE_XRI_CN || 8414 cmd->ulpCommand == CMD_ABORT_XRI_CN) 8415 continue; 8416 8417 if (piocb->vport != vport) 8418 continue; 8419 8420 list_del_init(&piocb->list); 8421 list_add_tail(&piocb->list, &abort_list); 8422 } 8423 8424 /* The same holds true for any FLOGI/FDISC on the fabric_iocb_list */ 8425 if (vport == phba->pport) { 8426 list_for_each_entry_safe(piocb, tmp_iocb, 8427 &phba->fabric_iocb_list, list) { 8428 cmd = &piocb->iocb; 8429 list_del_init(&piocb->list); 8430 list_add_tail(&piocb->list, &abort_list); 8431 } 8432 } 8433 8434 if (phba->sli_rev == LPFC_SLI_REV4) 8435 spin_unlock(&pring->ring_lock); 8436 spin_unlock_irqrestore(&phba->hbalock, iflags); 8437 8438 /* Cancel all the IOCBs from the completions list */ 8439 lpfc_sli_cancel_iocbs(phba, &abort_list, 8440 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); 8441 8442 return; 8443 } 8444 8445 /** 8446 * lpfc_els_flush_all_cmd - Clean up all the outstanding els commands to a HBA 8447 * @phba: pointer to lpfc hba data structure. 8448 * 8449 * This routine is used to clean up all the outstanding ELS commands on a 8450 * @phba. It first aborts the @phba by invoking the lpfc_fabric_abort_hba() 8451 * routine. After that, it walks the ELS transmit queue to remove all the 8452 * IOCBs to the @phba other than the QUE_RING and ABORT/CLOSE IOCBs. For 8453 * the IOCBs with the completion callback function associated, the callback 8454 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and 8455 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs without the completion 8456 * callback function associated, the IOCB will simply be released. Finally, 8457 * it walks the ELS transmit completion queue to issue an abort IOCB to any 8458 * transmit completion queue IOCB that is not an IOCB from libdfc (i.e., the 8459 * management plane IOCBs that are not part of the discovery state machine) 8460 * out to HBA by invoking the lpfc_sli_issue_abort_iotag() routine. 8461 **/ 8462 void 8463 lpfc_els_flush_all_cmd(struct lpfc_hba *phba) 8464 { 8465 struct lpfc_vport *vport; 8466 8467 spin_lock_irq(&phba->port_list_lock); 8468 list_for_each_entry(vport, &phba->port_list, listentry) 8469 lpfc_els_flush_cmd(vport); 8470 spin_unlock_irq(&phba->port_list_lock); 8471 8472 return; 8473 } 8474 8475 /** 8476 * lpfc_send_els_failure_event - Posts an ELS command failure event 8477 * @phba: Pointer to hba context object. 8478 * @cmdiocbp: Pointer to command iocb which reported error. 8479 * @rspiocbp: Pointer to response iocb which reported error. 8480 * 8481 * This function sends an event when there is an ELS command 8482 * failure. 8483 **/ 8484 void 8485 lpfc_send_els_failure_event(struct lpfc_hba *phba, 8486 struct lpfc_iocbq *cmdiocbp, 8487 struct lpfc_iocbq *rspiocbp) 8488 { 8489 struct lpfc_vport *vport = cmdiocbp->vport; 8490 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 8491 struct lpfc_lsrjt_event lsrjt_event; 8492 struct lpfc_fabric_event_header fabric_event; 8493 struct ls_rjt stat; 8494 struct lpfc_nodelist *ndlp; 8495 uint32_t *pcmd; 8496 8497 ndlp = cmdiocbp->context1; 8498 if (!ndlp) 8499 return; 8500 8501 if (rspiocbp->iocb.ulpStatus == IOSTAT_LS_RJT) { 8502 lsrjt_event.header.event_type = FC_REG_ELS_EVENT; 8503 lsrjt_event.header.subcategory = LPFC_EVENT_LSRJT_RCV; 8504 memcpy(lsrjt_event.header.wwpn, &ndlp->nlp_portname, 8505 sizeof(struct lpfc_name)); 8506 memcpy(lsrjt_event.header.wwnn, &ndlp->nlp_nodename, 8507 sizeof(struct lpfc_name)); 8508 pcmd = (uint32_t *) (((struct lpfc_dmabuf *) 8509 cmdiocbp->context2)->virt); 8510 lsrjt_event.command = (pcmd != NULL) ? *pcmd : 0; 8511 stat.un.lsRjtError = be32_to_cpu(rspiocbp->iocb.un.ulpWord[4]); 8512 lsrjt_event.reason_code = stat.un.b.lsRjtRsnCode; 8513 lsrjt_event.explanation = stat.un.b.lsRjtRsnCodeExp; 8514 fc_host_post_vendor_event(shost, 8515 fc_get_event_number(), 8516 sizeof(lsrjt_event), 8517 (char *)&lsrjt_event, 8518 LPFC_NL_VENDOR_ID); 8519 return; 8520 } 8521 if ((rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY) || 8522 (rspiocbp->iocb.ulpStatus == IOSTAT_FABRIC_BSY)) { 8523 fabric_event.event_type = FC_REG_FABRIC_EVENT; 8524 if (rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY) 8525 fabric_event.subcategory = LPFC_EVENT_PORT_BUSY; 8526 else 8527 fabric_event.subcategory = LPFC_EVENT_FABRIC_BUSY; 8528 memcpy(fabric_event.wwpn, &ndlp->nlp_portname, 8529 sizeof(struct lpfc_name)); 8530 memcpy(fabric_event.wwnn, &ndlp->nlp_nodename, 8531 sizeof(struct lpfc_name)); 8532 fc_host_post_vendor_event(shost, 8533 fc_get_event_number(), 8534 sizeof(fabric_event), 8535 (char *)&fabric_event, 8536 LPFC_NL_VENDOR_ID); 8537 return; 8538 } 8539 8540 } 8541 8542 /** 8543 * lpfc_send_els_event - Posts unsolicited els event 8544 * @vport: Pointer to vport object. 8545 * @ndlp: Pointer FC node object. 8546 * @payload: ELS command code type. 8547 * 8548 * This function posts an event when there is an incoming 8549 * unsolicited ELS command. 8550 **/ 8551 static void 8552 lpfc_send_els_event(struct lpfc_vport *vport, 8553 struct lpfc_nodelist *ndlp, 8554 uint32_t *payload) 8555 { 8556 struct lpfc_els_event_header *els_data = NULL; 8557 struct lpfc_logo_event *logo_data = NULL; 8558 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 8559 8560 if (*payload == ELS_CMD_LOGO) { 8561 logo_data = kmalloc(sizeof(struct lpfc_logo_event), GFP_KERNEL); 8562 if (!logo_data) { 8563 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 8564 "0148 Failed to allocate memory " 8565 "for LOGO event\n"); 8566 return; 8567 } 8568 els_data = &logo_data->header; 8569 } else { 8570 els_data = kmalloc(sizeof(struct lpfc_els_event_header), 8571 GFP_KERNEL); 8572 if (!els_data) { 8573 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 8574 "0149 Failed to allocate memory " 8575 "for ELS event\n"); 8576 return; 8577 } 8578 } 8579 els_data->event_type = FC_REG_ELS_EVENT; 8580 switch (*payload) { 8581 case ELS_CMD_PLOGI: 8582 els_data->subcategory = LPFC_EVENT_PLOGI_RCV; 8583 break; 8584 case ELS_CMD_PRLO: 8585 els_data->subcategory = LPFC_EVENT_PRLO_RCV; 8586 break; 8587 case ELS_CMD_ADISC: 8588 els_data->subcategory = LPFC_EVENT_ADISC_RCV; 8589 break; 8590 case ELS_CMD_LOGO: 8591 els_data->subcategory = LPFC_EVENT_LOGO_RCV; 8592 /* Copy the WWPN in the LOGO payload */ 8593 memcpy(logo_data->logo_wwpn, &payload[2], 8594 sizeof(struct lpfc_name)); 8595 break; 8596 default: 8597 kfree(els_data); 8598 return; 8599 } 8600 memcpy(els_data->wwpn, &ndlp->nlp_portname, sizeof(struct lpfc_name)); 8601 memcpy(els_data->wwnn, &ndlp->nlp_nodename, sizeof(struct lpfc_name)); 8602 if (*payload == ELS_CMD_LOGO) { 8603 fc_host_post_vendor_event(shost, 8604 fc_get_event_number(), 8605 sizeof(struct lpfc_logo_event), 8606 (char *)logo_data, 8607 LPFC_NL_VENDOR_ID); 8608 kfree(logo_data); 8609 } else { 8610 fc_host_post_vendor_event(shost, 8611 fc_get_event_number(), 8612 sizeof(struct lpfc_els_event_header), 8613 (char *)els_data, 8614 LPFC_NL_VENDOR_ID); 8615 kfree(els_data); 8616 } 8617 8618 return; 8619 } 8620 8621 8622 DECLARE_ENUM2STR_LOOKUP(lpfc_get_tlv_dtag_nm, fc_ls_tlv_dtag, 8623 FC_LS_TLV_DTAG_INIT); 8624 8625 DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_li_event_nm, fc_fpin_li_event_types, 8626 FC_FPIN_LI_EVT_TYPES_INIT); 8627 8628 /** 8629 * lpfc_els_rcv_fpin_li - Process an FPIN Link Integrity Event. 8630 * @vport: Pointer to vport object. 8631 * @tlv: Pointer to the Link Integrity Notification Descriptor. 8632 * 8633 * This function processes a link integrity FPIN event by 8634 * logging a message 8635 **/ 8636 static void 8637 lpfc_els_rcv_fpin_li(struct lpfc_vport *vport, struct fc_tlv_desc *tlv) 8638 { 8639 struct fc_fn_li_desc *li = (struct fc_fn_li_desc *)tlv; 8640 const char *li_evt_str; 8641 u32 li_evt; 8642 8643 li_evt = be16_to_cpu(li->event_type); 8644 li_evt_str = lpfc_get_fpin_li_event_nm(li_evt); 8645 8646 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 8647 "4680 FPIN Link Integrity %s (x%x) " 8648 "Detecting PN x%016llx Attached PN x%016llx " 8649 "Duration %d mSecs Count %d Port Cnt %d\n", 8650 li_evt_str, li_evt, 8651 be64_to_cpu(li->detecting_wwpn), 8652 be64_to_cpu(li->attached_wwpn), 8653 be32_to_cpu(li->event_threshold), 8654 be32_to_cpu(li->event_count), 8655 be32_to_cpu(li->pname_count)); 8656 } 8657 8658 static void 8659 lpfc_els_rcv_fpin(struct lpfc_vport *vport, struct fc_els_fpin *fpin, 8660 u32 fpin_length) 8661 { 8662 struct fc_tlv_desc *tlv; 8663 const char *dtag_nm; 8664 uint32_t desc_cnt = 0, bytes_remain; 8665 u32 dtag; 8666 8667 /* FPINs handled only if we are in the right discovery state */ 8668 if (vport->port_state < LPFC_DISC_AUTH) 8669 return; 8670 8671 /* make sure there is the full fpin header */ 8672 if (fpin_length < sizeof(struct fc_els_fpin)) 8673 return; 8674 8675 tlv = (struct fc_tlv_desc *)&fpin->fpin_desc[0]; 8676 bytes_remain = fpin_length - offsetof(struct fc_els_fpin, fpin_desc); 8677 bytes_remain = min_t(u32, bytes_remain, be32_to_cpu(fpin->desc_len)); 8678 8679 /* process each descriptor */ 8680 while (bytes_remain >= FC_TLV_DESC_HDR_SZ && 8681 bytes_remain >= FC_TLV_DESC_SZ_FROM_LENGTH(tlv)) { 8682 8683 dtag = be32_to_cpu(tlv->desc_tag); 8684 switch (dtag) { 8685 case ELS_DTAG_LNK_INTEGRITY: 8686 lpfc_els_rcv_fpin_li(vport, tlv); 8687 break; 8688 default: 8689 dtag_nm = lpfc_get_tlv_dtag_nm(dtag); 8690 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 8691 "4678 skipped FPIN descriptor[%d]: " 8692 "tag x%x (%s)\n", 8693 desc_cnt, dtag, dtag_nm); 8694 break; 8695 } 8696 8697 desc_cnt++; 8698 bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv); 8699 tlv = fc_tlv_next_desc(tlv); 8700 } 8701 8702 fc_host_fpin_rcv(lpfc_shost_from_vport(vport), fpin_length, 8703 (char *)fpin); 8704 } 8705 8706 /** 8707 * lpfc_els_unsol_buffer - Process an unsolicited event data buffer 8708 * @phba: pointer to lpfc hba data structure. 8709 * @pring: pointer to a SLI ring. 8710 * @vport: pointer to a host virtual N_Port data structure. 8711 * @elsiocb: pointer to lpfc els command iocb data structure. 8712 * 8713 * This routine is used for processing the IOCB associated with a unsolicited 8714 * event. It first determines whether there is an existing ndlp that matches 8715 * the DID from the unsolicited IOCB. If not, it will create a new one with 8716 * the DID from the unsolicited IOCB. The ELS command from the unsolicited 8717 * IOCB is then used to invoke the proper routine and to set up proper state 8718 * of the discovery state machine. 8719 **/ 8720 static void 8721 lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 8722 struct lpfc_vport *vport, struct lpfc_iocbq *elsiocb) 8723 { 8724 struct lpfc_nodelist *ndlp; 8725 struct ls_rjt stat; 8726 uint32_t *payload, payload_len; 8727 uint32_t cmd, did, newnode; 8728 uint8_t rjt_exp, rjt_err = 0, init_link = 0; 8729 IOCB_t *icmd = &elsiocb->iocb; 8730 LPFC_MBOXQ_t *mbox; 8731 8732 if (!vport || !(elsiocb->context2)) 8733 goto dropit; 8734 8735 newnode = 0; 8736 payload = ((struct lpfc_dmabuf *)elsiocb->context2)->virt; 8737 payload_len = elsiocb->iocb.unsli3.rcvsli3.acc_len; 8738 cmd = *payload; 8739 if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) == 0) 8740 lpfc_post_buffer(phba, pring, 1); 8741 8742 did = icmd->un.rcvels.remoteID; 8743 if (icmd->ulpStatus) { 8744 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8745 "RCV Unsol ELS: status:x%x/x%x did:x%x", 8746 icmd->ulpStatus, icmd->un.ulpWord[4], did); 8747 goto dropit; 8748 } 8749 8750 /* Check to see if link went down during discovery */ 8751 if (lpfc_els_chk_latt(vport)) 8752 goto dropit; 8753 8754 /* Ignore traffic received during vport shutdown. */ 8755 if (vport->load_flag & FC_UNLOADING) 8756 goto dropit; 8757 8758 /* If NPort discovery is delayed drop incoming ELS */ 8759 if ((vport->fc_flag & FC_DISC_DELAYED) && 8760 (cmd != ELS_CMD_PLOGI)) 8761 goto dropit; 8762 8763 ndlp = lpfc_findnode_did(vport, did); 8764 if (!ndlp) { 8765 /* Cannot find existing Fabric ndlp, so allocate a new one */ 8766 ndlp = lpfc_nlp_init(vport, did); 8767 if (!ndlp) 8768 goto dropit; 8769 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 8770 newnode = 1; 8771 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) 8772 ndlp->nlp_type |= NLP_FABRIC; 8773 } else if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) { 8774 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 8775 newnode = 1; 8776 } 8777 8778 phba->fc_stat.elsRcvFrame++; 8779 8780 /* 8781 * Do not process any unsolicited ELS commands 8782 * if the ndlp is in DEV_LOSS 8783 */ 8784 spin_lock_irq(&ndlp->lock); 8785 if (ndlp->nlp_flag & NLP_IN_DEV_LOSS) { 8786 spin_unlock_irq(&ndlp->lock); 8787 if (newnode) 8788 lpfc_nlp_put(ndlp); 8789 goto dropit; 8790 } 8791 spin_unlock_irq(&ndlp->lock); 8792 8793 elsiocb->context1 = lpfc_nlp_get(ndlp); 8794 if (!elsiocb->context1) 8795 goto dropit; 8796 elsiocb->vport = vport; 8797 8798 if ((cmd & ELS_CMD_MASK) == ELS_CMD_RSCN) { 8799 cmd &= ELS_CMD_MASK; 8800 } 8801 /* ELS command <elsCmd> received from NPORT <did> */ 8802 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 8803 "0112 ELS command x%x received from NPORT x%x " 8804 "refcnt %d Data: x%x x%x x%x x%x\n", 8805 cmd, did, kref_read(&ndlp->kref), vport->port_state, 8806 vport->fc_flag, vport->fc_myDID, vport->fc_prevDID); 8807 8808 /* reject till our FLOGI completes or PLOGI assigned DID via PT2PT */ 8809 if ((vport->port_state < LPFC_FABRIC_CFG_LINK) && 8810 (cmd != ELS_CMD_FLOGI) && 8811 !((cmd == ELS_CMD_PLOGI) && (vport->fc_flag & FC_PT2PT))) { 8812 rjt_err = LSRJT_LOGICAL_BSY; 8813 rjt_exp = LSEXP_NOTHING_MORE; 8814 goto lsrjt; 8815 } 8816 8817 switch (cmd) { 8818 case ELS_CMD_PLOGI: 8819 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8820 "RCV PLOGI: did:x%x/ste:x%x flg:x%x", 8821 did, vport->port_state, ndlp->nlp_flag); 8822 8823 phba->fc_stat.elsRcvPLOGI++; 8824 ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp); 8825 if (phba->sli_rev == LPFC_SLI_REV4 && 8826 (phba->pport->fc_flag & FC_PT2PT)) { 8827 vport->fc_prevDID = vport->fc_myDID; 8828 /* Our DID needs to be updated before registering 8829 * the vfi. This is done in lpfc_rcv_plogi but 8830 * that is called after the reg_vfi. 8831 */ 8832 vport->fc_myDID = elsiocb->iocb.un.rcvels.parmRo; 8833 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 8834 "3312 Remote port assigned DID x%x " 8835 "%x\n", vport->fc_myDID, 8836 vport->fc_prevDID); 8837 } 8838 8839 lpfc_send_els_event(vport, ndlp, payload); 8840 8841 /* If Nport discovery is delayed, reject PLOGIs */ 8842 if (vport->fc_flag & FC_DISC_DELAYED) { 8843 rjt_err = LSRJT_UNABLE_TPC; 8844 rjt_exp = LSEXP_NOTHING_MORE; 8845 break; 8846 } 8847 8848 if (vport->port_state < LPFC_DISC_AUTH) { 8849 if (!(phba->pport->fc_flag & FC_PT2PT) || 8850 (phba->pport->fc_flag & FC_PT2PT_PLOGI)) { 8851 rjt_err = LSRJT_UNABLE_TPC; 8852 rjt_exp = LSEXP_NOTHING_MORE; 8853 break; 8854 } 8855 } 8856 8857 spin_lock_irq(&ndlp->lock); 8858 ndlp->nlp_flag &= ~NLP_TARGET_REMOVE; 8859 spin_unlock_irq(&ndlp->lock); 8860 8861 lpfc_disc_state_machine(vport, ndlp, elsiocb, 8862 NLP_EVT_RCV_PLOGI); 8863 8864 break; 8865 case ELS_CMD_FLOGI: 8866 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8867 "RCV FLOGI: did:x%x/ste:x%x flg:x%x", 8868 did, vport->port_state, ndlp->nlp_flag); 8869 8870 phba->fc_stat.elsRcvFLOGI++; 8871 8872 /* If the driver believes fabric discovery is done and is ready, 8873 * bounce the link. There is some descrepancy. 8874 */ 8875 if (vport->port_state >= LPFC_LOCAL_CFG_LINK && 8876 vport->fc_flag & FC_PT2PT && 8877 vport->rcv_flogi_cnt >= 1) { 8878 rjt_err = LSRJT_LOGICAL_BSY; 8879 rjt_exp = LSEXP_NOTHING_MORE; 8880 init_link++; 8881 goto lsrjt; 8882 } 8883 8884 lpfc_els_rcv_flogi(vport, elsiocb, ndlp); 8885 if (newnode) 8886 lpfc_disc_state_machine(vport, ndlp, NULL, 8887 NLP_EVT_DEVICE_RM); 8888 break; 8889 case ELS_CMD_LOGO: 8890 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8891 "RCV LOGO: did:x%x/ste:x%x flg:x%x", 8892 did, vport->port_state, ndlp->nlp_flag); 8893 8894 phba->fc_stat.elsRcvLOGO++; 8895 lpfc_send_els_event(vport, ndlp, payload); 8896 if (vport->port_state < LPFC_DISC_AUTH) { 8897 rjt_err = LSRJT_UNABLE_TPC; 8898 rjt_exp = LSEXP_NOTHING_MORE; 8899 break; 8900 } 8901 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_LOGO); 8902 break; 8903 case ELS_CMD_PRLO: 8904 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8905 "RCV PRLO: did:x%x/ste:x%x flg:x%x", 8906 did, vport->port_state, ndlp->nlp_flag); 8907 8908 phba->fc_stat.elsRcvPRLO++; 8909 lpfc_send_els_event(vport, ndlp, payload); 8910 if (vport->port_state < LPFC_DISC_AUTH) { 8911 rjt_err = LSRJT_UNABLE_TPC; 8912 rjt_exp = LSEXP_NOTHING_MORE; 8913 break; 8914 } 8915 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLO); 8916 break; 8917 case ELS_CMD_LCB: 8918 phba->fc_stat.elsRcvLCB++; 8919 lpfc_els_rcv_lcb(vport, elsiocb, ndlp); 8920 break; 8921 case ELS_CMD_RDP: 8922 phba->fc_stat.elsRcvRDP++; 8923 lpfc_els_rcv_rdp(vport, elsiocb, ndlp); 8924 break; 8925 case ELS_CMD_RSCN: 8926 phba->fc_stat.elsRcvRSCN++; 8927 lpfc_els_rcv_rscn(vport, elsiocb, ndlp); 8928 if (newnode) 8929 lpfc_disc_state_machine(vport, ndlp, NULL, 8930 NLP_EVT_DEVICE_RM); 8931 break; 8932 case ELS_CMD_ADISC: 8933 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8934 "RCV ADISC: did:x%x/ste:x%x flg:x%x", 8935 did, vport->port_state, ndlp->nlp_flag); 8936 8937 lpfc_send_els_event(vport, ndlp, payload); 8938 phba->fc_stat.elsRcvADISC++; 8939 if (vport->port_state < LPFC_DISC_AUTH) { 8940 rjt_err = LSRJT_UNABLE_TPC; 8941 rjt_exp = LSEXP_NOTHING_MORE; 8942 break; 8943 } 8944 lpfc_disc_state_machine(vport, ndlp, elsiocb, 8945 NLP_EVT_RCV_ADISC); 8946 break; 8947 case ELS_CMD_PDISC: 8948 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8949 "RCV PDISC: did:x%x/ste:x%x flg:x%x", 8950 did, vport->port_state, ndlp->nlp_flag); 8951 8952 phba->fc_stat.elsRcvPDISC++; 8953 if (vport->port_state < LPFC_DISC_AUTH) { 8954 rjt_err = LSRJT_UNABLE_TPC; 8955 rjt_exp = LSEXP_NOTHING_MORE; 8956 break; 8957 } 8958 lpfc_disc_state_machine(vport, ndlp, elsiocb, 8959 NLP_EVT_RCV_PDISC); 8960 break; 8961 case ELS_CMD_FARPR: 8962 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8963 "RCV FARPR: did:x%x/ste:x%x flg:x%x", 8964 did, vport->port_state, ndlp->nlp_flag); 8965 8966 phba->fc_stat.elsRcvFARPR++; 8967 lpfc_els_rcv_farpr(vport, elsiocb, ndlp); 8968 break; 8969 case ELS_CMD_FARP: 8970 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8971 "RCV FARP: did:x%x/ste:x%x flg:x%x", 8972 did, vport->port_state, ndlp->nlp_flag); 8973 8974 phba->fc_stat.elsRcvFARP++; 8975 lpfc_els_rcv_farp(vport, elsiocb, ndlp); 8976 break; 8977 case ELS_CMD_FAN: 8978 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8979 "RCV FAN: did:x%x/ste:x%x flg:x%x", 8980 did, vport->port_state, ndlp->nlp_flag); 8981 8982 phba->fc_stat.elsRcvFAN++; 8983 lpfc_els_rcv_fan(vport, elsiocb, ndlp); 8984 break; 8985 case ELS_CMD_PRLI: 8986 case ELS_CMD_NVMEPRLI: 8987 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8988 "RCV PRLI: did:x%x/ste:x%x flg:x%x", 8989 did, vport->port_state, ndlp->nlp_flag); 8990 8991 phba->fc_stat.elsRcvPRLI++; 8992 if ((vport->port_state < LPFC_DISC_AUTH) && 8993 (vport->fc_flag & FC_FABRIC)) { 8994 rjt_err = LSRJT_UNABLE_TPC; 8995 rjt_exp = LSEXP_NOTHING_MORE; 8996 break; 8997 } 8998 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLI); 8999 break; 9000 case ELS_CMD_LIRR: 9001 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 9002 "RCV LIRR: did:x%x/ste:x%x flg:x%x", 9003 did, vport->port_state, ndlp->nlp_flag); 9004 9005 phba->fc_stat.elsRcvLIRR++; 9006 lpfc_els_rcv_lirr(vport, elsiocb, ndlp); 9007 if (newnode) 9008 lpfc_disc_state_machine(vport, ndlp, NULL, 9009 NLP_EVT_DEVICE_RM); 9010 break; 9011 case ELS_CMD_RLS: 9012 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 9013 "RCV RLS: did:x%x/ste:x%x flg:x%x", 9014 did, vport->port_state, ndlp->nlp_flag); 9015 9016 phba->fc_stat.elsRcvRLS++; 9017 lpfc_els_rcv_rls(vport, elsiocb, ndlp); 9018 if (newnode) 9019 lpfc_disc_state_machine(vport, ndlp, NULL, 9020 NLP_EVT_DEVICE_RM); 9021 break; 9022 case ELS_CMD_RPL: 9023 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 9024 "RCV RPL: did:x%x/ste:x%x flg:x%x", 9025 did, vport->port_state, ndlp->nlp_flag); 9026 9027 phba->fc_stat.elsRcvRPL++; 9028 lpfc_els_rcv_rpl(vport, elsiocb, ndlp); 9029 if (newnode) 9030 lpfc_disc_state_machine(vport, ndlp, NULL, 9031 NLP_EVT_DEVICE_RM); 9032 break; 9033 case ELS_CMD_RNID: 9034 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 9035 "RCV RNID: did:x%x/ste:x%x flg:x%x", 9036 did, vport->port_state, ndlp->nlp_flag); 9037 9038 phba->fc_stat.elsRcvRNID++; 9039 lpfc_els_rcv_rnid(vport, elsiocb, ndlp); 9040 if (newnode) 9041 lpfc_disc_state_machine(vport, ndlp, NULL, 9042 NLP_EVT_DEVICE_RM); 9043 break; 9044 case ELS_CMD_RTV: 9045 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 9046 "RCV RTV: did:x%x/ste:x%x flg:x%x", 9047 did, vport->port_state, ndlp->nlp_flag); 9048 phba->fc_stat.elsRcvRTV++; 9049 lpfc_els_rcv_rtv(vport, elsiocb, ndlp); 9050 if (newnode) 9051 lpfc_disc_state_machine(vport, ndlp, NULL, 9052 NLP_EVT_DEVICE_RM); 9053 break; 9054 case ELS_CMD_RRQ: 9055 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 9056 "RCV RRQ: did:x%x/ste:x%x flg:x%x", 9057 did, vport->port_state, ndlp->nlp_flag); 9058 9059 phba->fc_stat.elsRcvRRQ++; 9060 lpfc_els_rcv_rrq(vport, elsiocb, ndlp); 9061 if (newnode) 9062 lpfc_disc_state_machine(vport, ndlp, NULL, 9063 NLP_EVT_DEVICE_RM); 9064 break; 9065 case ELS_CMD_ECHO: 9066 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 9067 "RCV ECHO: did:x%x/ste:x%x flg:x%x", 9068 did, vport->port_state, ndlp->nlp_flag); 9069 9070 phba->fc_stat.elsRcvECHO++; 9071 lpfc_els_rcv_echo(vport, elsiocb, ndlp); 9072 if (newnode) 9073 lpfc_disc_state_machine(vport, ndlp, NULL, 9074 NLP_EVT_DEVICE_RM); 9075 break; 9076 case ELS_CMD_REC: 9077 /* receive this due to exchange closed */ 9078 rjt_err = LSRJT_UNABLE_TPC; 9079 rjt_exp = LSEXP_INVALID_OX_RX; 9080 break; 9081 case ELS_CMD_FPIN: 9082 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 9083 "RCV FPIN: did:x%x/ste:x%x flg:x%x", 9084 did, vport->port_state, ndlp->nlp_flag); 9085 9086 lpfc_els_rcv_fpin(vport, (struct fc_els_fpin *)payload, 9087 payload_len); 9088 9089 /* There are no replies, so no rjt codes */ 9090 break; 9091 case ELS_CMD_RDF: 9092 phba->fc_stat.elsRcvRDF++; 9093 /* Accept RDF only from fabric controller */ 9094 if (did != Fabric_Cntl_DID) { 9095 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, 9096 "1115 Received RDF from invalid DID " 9097 "x%x\n", did); 9098 rjt_err = LSRJT_PROTOCOL_ERR; 9099 rjt_exp = LSEXP_NOTHING_MORE; 9100 goto lsrjt; 9101 } 9102 9103 lpfc_els_rcv_rdf(vport, elsiocb, ndlp); 9104 break; 9105 default: 9106 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 9107 "RCV ELS cmd: cmd:x%x did:x%x/ste:x%x", 9108 cmd, did, vport->port_state); 9109 9110 /* Unsupported ELS command, reject */ 9111 rjt_err = LSRJT_CMD_UNSUPPORTED; 9112 rjt_exp = LSEXP_NOTHING_MORE; 9113 9114 /* Unknown ELS command <elsCmd> received from NPORT <did> */ 9115 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9116 "0115 Unknown ELS command x%x " 9117 "received from NPORT x%x\n", cmd, did); 9118 if (newnode) 9119 lpfc_disc_state_machine(vport, ndlp, NULL, 9120 NLP_EVT_DEVICE_RM); 9121 break; 9122 } 9123 9124 lsrjt: 9125 /* check if need to LS_RJT received ELS cmd */ 9126 if (rjt_err) { 9127 memset(&stat, 0, sizeof(stat)); 9128 stat.un.b.lsRjtRsnCode = rjt_err; 9129 stat.un.b.lsRjtRsnCodeExp = rjt_exp; 9130 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, elsiocb, ndlp, 9131 NULL); 9132 /* Remove the reference from above for new nodes. */ 9133 if (newnode) 9134 lpfc_disc_state_machine(vport, ndlp, NULL, 9135 NLP_EVT_DEVICE_RM); 9136 } 9137 9138 /* Release the reference on this elsiocb, not the ndlp. */ 9139 lpfc_nlp_put(elsiocb->context1); 9140 elsiocb->context1 = NULL; 9141 9142 /* Special case. Driver received an unsolicited command that 9143 * unsupportable given the driver's current state. Reset the 9144 * link and start over. 9145 */ 9146 if (init_link) { 9147 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 9148 if (!mbox) 9149 return; 9150 lpfc_linkdown(phba); 9151 lpfc_init_link(phba, mbox, 9152 phba->cfg_topology, 9153 phba->cfg_link_speed); 9154 mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0; 9155 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 9156 mbox->vport = vport; 9157 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) == 9158 MBX_NOT_FINISHED) 9159 mempool_free(mbox, phba->mbox_mem_pool); 9160 } 9161 9162 return; 9163 9164 dropit: 9165 if (vport && !(vport->load_flag & FC_UNLOADING)) 9166 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9167 "0111 Dropping received ELS cmd " 9168 "Data: x%x x%x x%x\n", 9169 icmd->ulpStatus, icmd->un.ulpWord[4], icmd->ulpTimeout); 9170 phba->fc_stat.elsRcvDrop++; 9171 } 9172 9173 /** 9174 * lpfc_els_unsol_event - Process an unsolicited event from an els sli ring 9175 * @phba: pointer to lpfc hba data structure. 9176 * @pring: pointer to a SLI ring. 9177 * @elsiocb: pointer to lpfc els iocb data structure. 9178 * 9179 * This routine is used to process an unsolicited event received from a SLI 9180 * (Service Level Interface) ring. The actual processing of the data buffer 9181 * associated with the unsolicited event is done by invoking the routine 9182 * lpfc_els_unsol_buffer() after properly set up the iocb buffer from the 9183 * SLI ring on which the unsolicited event was received. 9184 **/ 9185 void 9186 lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 9187 struct lpfc_iocbq *elsiocb) 9188 { 9189 struct lpfc_vport *vport = phba->pport; 9190 IOCB_t *icmd = &elsiocb->iocb; 9191 dma_addr_t paddr; 9192 struct lpfc_dmabuf *bdeBuf1 = elsiocb->context2; 9193 struct lpfc_dmabuf *bdeBuf2 = elsiocb->context3; 9194 9195 elsiocb->context1 = NULL; 9196 elsiocb->context2 = NULL; 9197 elsiocb->context3 = NULL; 9198 9199 if (icmd->ulpStatus == IOSTAT_NEED_BUFFER) { 9200 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); 9201 } else if (icmd->ulpStatus == IOSTAT_LOCAL_REJECT && 9202 (icmd->un.ulpWord[4] & IOERR_PARAM_MASK) == 9203 IOERR_RCV_BUFFER_WAITING) { 9204 phba->fc_stat.NoRcvBuf++; 9205 /* Not enough posted buffers; Try posting more buffers */ 9206 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) 9207 lpfc_post_buffer(phba, pring, 0); 9208 return; 9209 } 9210 9211 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 9212 (icmd->ulpCommand == CMD_IOCB_RCV_ELS64_CX || 9213 icmd->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) { 9214 if (icmd->unsli3.rcvsli3.vpi == 0xffff) 9215 vport = phba->pport; 9216 else 9217 vport = lpfc_find_vport_by_vpid(phba, 9218 icmd->unsli3.rcvsli3.vpi); 9219 } 9220 9221 /* If there are no BDEs associated 9222 * with this IOCB, there is nothing to do. 9223 */ 9224 if (icmd->ulpBdeCount == 0) 9225 return; 9226 9227 /* type of ELS cmd is first 32bit word 9228 * in packet 9229 */ 9230 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 9231 elsiocb->context2 = bdeBuf1; 9232 } else { 9233 paddr = getPaddr(icmd->un.cont64[0].addrHigh, 9234 icmd->un.cont64[0].addrLow); 9235 elsiocb->context2 = lpfc_sli_ringpostbuf_get(phba, pring, 9236 paddr); 9237 } 9238 9239 lpfc_els_unsol_buffer(phba, pring, vport, elsiocb); 9240 /* 9241 * The different unsolicited event handlers would tell us 9242 * if they are done with "mp" by setting context2 to NULL. 9243 */ 9244 if (elsiocb->context2) { 9245 lpfc_in_buf_free(phba, (struct lpfc_dmabuf *)elsiocb->context2); 9246 elsiocb->context2 = NULL; 9247 } 9248 9249 /* RCV_ELS64_CX provide for 2 BDEs - process 2nd if included */ 9250 if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) && 9251 icmd->ulpBdeCount == 2) { 9252 elsiocb->context2 = bdeBuf2; 9253 lpfc_els_unsol_buffer(phba, pring, vport, elsiocb); 9254 /* free mp if we are done with it */ 9255 if (elsiocb->context2) { 9256 lpfc_in_buf_free(phba, elsiocb->context2); 9257 elsiocb->context2 = NULL; 9258 } 9259 } 9260 } 9261 9262 static void 9263 lpfc_start_fdmi(struct lpfc_vport *vport) 9264 { 9265 struct lpfc_nodelist *ndlp; 9266 9267 /* If this is the first time, allocate an ndlp and initialize 9268 * it. Otherwise, make sure the node is enabled and then do the 9269 * login. 9270 */ 9271 ndlp = lpfc_findnode_did(vport, FDMI_DID); 9272 if (!ndlp) { 9273 ndlp = lpfc_nlp_init(vport, FDMI_DID); 9274 if (ndlp) { 9275 ndlp->nlp_type |= NLP_FABRIC; 9276 } else { 9277 return; 9278 } 9279 } 9280 9281 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 9282 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 9283 } 9284 9285 /** 9286 * lpfc_do_scr_ns_plogi - Issue a plogi to the name server for scr 9287 * @phba: pointer to lpfc hba data structure. 9288 * @vport: pointer to a virtual N_Port data structure. 9289 * 9290 * This routine issues a Port Login (PLOGI) to the Name Server with 9291 * State Change Request (SCR) for a @vport. This routine will create an 9292 * ndlp for the Name Server associated to the @vport if such node does 9293 * not already exist. The PLOGI to Name Server is issued by invoking the 9294 * lpfc_issue_els_plogi() routine. If Fabric-Device Management Interface 9295 * (FDMI) is configured to the @vport, a FDMI node will be created and 9296 * the PLOGI to FDMI is issued by invoking lpfc_issue_els_plogi() routine. 9297 **/ 9298 void 9299 lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport) 9300 { 9301 struct lpfc_nodelist *ndlp; 9302 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 9303 9304 /* 9305 * If lpfc_delay_discovery parameter is set and the clean address 9306 * bit is cleared and fc fabric parameters chenged, delay FC NPort 9307 * discovery. 9308 */ 9309 spin_lock_irq(shost->host_lock); 9310 if (vport->fc_flag & FC_DISC_DELAYED) { 9311 spin_unlock_irq(shost->host_lock); 9312 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9313 "3334 Delay fc port discovery for %d secs\n", 9314 phba->fc_ratov); 9315 mod_timer(&vport->delayed_disc_tmo, 9316 jiffies + msecs_to_jiffies(1000 * phba->fc_ratov)); 9317 return; 9318 } 9319 spin_unlock_irq(shost->host_lock); 9320 9321 ndlp = lpfc_findnode_did(vport, NameServer_DID); 9322 if (!ndlp) { 9323 ndlp = lpfc_nlp_init(vport, NameServer_DID); 9324 if (!ndlp) { 9325 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 9326 lpfc_disc_start(vport); 9327 return; 9328 } 9329 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 9330 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9331 "0251 NameServer login: no memory\n"); 9332 return; 9333 } 9334 } 9335 9336 ndlp->nlp_type |= NLP_FABRIC; 9337 9338 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 9339 9340 if (lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0)) { 9341 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 9342 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9343 "0252 Cannot issue NameServer login\n"); 9344 return; 9345 } 9346 9347 if ((phba->cfg_enable_SmartSAN || 9348 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) && 9349 (vport->load_flag & FC_ALLOW_FDMI)) 9350 lpfc_start_fdmi(vport); 9351 } 9352 9353 /** 9354 * lpfc_cmpl_reg_new_vport - Completion callback function to register new vport 9355 * @phba: pointer to lpfc hba data structure. 9356 * @pmb: pointer to the driver internal queue element for mailbox command. 9357 * 9358 * This routine is the completion callback function to register new vport 9359 * mailbox command. If the new vport mailbox command completes successfully, 9360 * the fabric registration login shall be performed on physical port (the 9361 * new vport created is actually a physical port, with VPI 0) or the port 9362 * login to Name Server for State Change Request (SCR) will be performed 9363 * on virtual port (real virtual port, with VPI greater than 0). 9364 **/ 9365 static void 9366 lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 9367 { 9368 struct lpfc_vport *vport = pmb->vport; 9369 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 9370 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; 9371 MAILBOX_t *mb = &pmb->u.mb; 9372 int rc; 9373 9374 spin_lock_irq(shost->host_lock); 9375 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 9376 spin_unlock_irq(shost->host_lock); 9377 9378 if (mb->mbxStatus) { 9379 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9380 "0915 Register VPI failed : Status: x%x" 9381 " upd bit: x%x \n", mb->mbxStatus, 9382 mb->un.varRegVpi.upd); 9383 if (phba->sli_rev == LPFC_SLI_REV4 && 9384 mb->un.varRegVpi.upd) 9385 goto mbox_err_exit ; 9386 9387 switch (mb->mbxStatus) { 9388 case 0x11: /* unsupported feature */ 9389 case 0x9603: /* max_vpi exceeded */ 9390 case 0x9602: /* Link event since CLEAR_LA */ 9391 /* giving up on vport registration */ 9392 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 9393 spin_lock_irq(shost->host_lock); 9394 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 9395 spin_unlock_irq(shost->host_lock); 9396 lpfc_can_disctmo(vport); 9397 break; 9398 /* If reg_vpi fail with invalid VPI status, re-init VPI */ 9399 case 0x20: 9400 spin_lock_irq(shost->host_lock); 9401 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 9402 spin_unlock_irq(shost->host_lock); 9403 lpfc_init_vpi(phba, pmb, vport->vpi); 9404 pmb->vport = vport; 9405 pmb->mbox_cmpl = lpfc_init_vpi_cmpl; 9406 rc = lpfc_sli_issue_mbox(phba, pmb, 9407 MBX_NOWAIT); 9408 if (rc == MBX_NOT_FINISHED) { 9409 lpfc_printf_vlog(vport, KERN_ERR, 9410 LOG_TRACE_EVENT, 9411 "2732 Failed to issue INIT_VPI" 9412 " mailbox command\n"); 9413 } else { 9414 lpfc_nlp_put(ndlp); 9415 return; 9416 } 9417 fallthrough; 9418 default: 9419 /* Try to recover from this error */ 9420 if (phba->sli_rev == LPFC_SLI_REV4) 9421 lpfc_sli4_unreg_all_rpis(vport); 9422 lpfc_mbx_unreg_vpi(vport); 9423 spin_lock_irq(shost->host_lock); 9424 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 9425 spin_unlock_irq(shost->host_lock); 9426 if (mb->mbxStatus == MBX_NOT_FINISHED) 9427 break; 9428 if ((vport->port_type == LPFC_PHYSICAL_PORT) && 9429 !(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG)) { 9430 if (phba->sli_rev == LPFC_SLI_REV4) 9431 lpfc_issue_init_vfi(vport); 9432 else 9433 lpfc_initial_flogi(vport); 9434 } else { 9435 lpfc_initial_fdisc(vport); 9436 } 9437 break; 9438 } 9439 } else { 9440 spin_lock_irq(shost->host_lock); 9441 vport->vpi_state |= LPFC_VPI_REGISTERED; 9442 spin_unlock_irq(shost->host_lock); 9443 if (vport == phba->pport) { 9444 if (phba->sli_rev < LPFC_SLI_REV4) 9445 lpfc_issue_fabric_reglogin(vport); 9446 else { 9447 /* 9448 * If the physical port is instantiated using 9449 * FDISC, do not start vport discovery. 9450 */ 9451 if (vport->port_state != LPFC_FDISC) 9452 lpfc_start_fdiscs(phba); 9453 lpfc_do_scr_ns_plogi(phba, vport); 9454 } 9455 } else { 9456 lpfc_do_scr_ns_plogi(phba, vport); 9457 } 9458 } 9459 mbox_err_exit: 9460 /* Now, we decrement the ndlp reference count held for this 9461 * callback function 9462 */ 9463 lpfc_nlp_put(ndlp); 9464 9465 mempool_free(pmb, phba->mbox_mem_pool); 9466 return; 9467 } 9468 9469 /** 9470 * lpfc_register_new_vport - Register a new vport with a HBA 9471 * @phba: pointer to lpfc hba data structure. 9472 * @vport: pointer to a host virtual N_Port data structure. 9473 * @ndlp: pointer to a node-list data structure. 9474 * 9475 * This routine registers the @vport as a new virtual port with a HBA. 9476 * It is done through a registering vpi mailbox command. 9477 **/ 9478 void 9479 lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport, 9480 struct lpfc_nodelist *ndlp) 9481 { 9482 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 9483 LPFC_MBOXQ_t *mbox; 9484 9485 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 9486 if (mbox) { 9487 lpfc_reg_vpi(vport, mbox); 9488 mbox->vport = vport; 9489 mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 9490 if (!mbox->ctx_ndlp) { 9491 mempool_free(mbox, phba->mbox_mem_pool); 9492 goto mbox_err_exit; 9493 } 9494 9495 mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport; 9496 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 9497 == MBX_NOT_FINISHED) { 9498 /* mailbox command not success, decrement ndlp 9499 * reference count for this command 9500 */ 9501 lpfc_nlp_put(ndlp); 9502 mempool_free(mbox, phba->mbox_mem_pool); 9503 9504 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9505 "0253 Register VPI: Can't send mbox\n"); 9506 goto mbox_err_exit; 9507 } 9508 } else { 9509 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9510 "0254 Register VPI: no memory\n"); 9511 goto mbox_err_exit; 9512 } 9513 return; 9514 9515 mbox_err_exit: 9516 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 9517 spin_lock_irq(shost->host_lock); 9518 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 9519 spin_unlock_irq(shost->host_lock); 9520 return; 9521 } 9522 9523 /** 9524 * lpfc_cancel_all_vport_retry_delay_timer - Cancel all vport retry delay timer 9525 * @phba: pointer to lpfc hba data structure. 9526 * 9527 * This routine cancels the retry delay timers to all the vports. 9528 **/ 9529 void 9530 lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *phba) 9531 { 9532 struct lpfc_vport **vports; 9533 struct lpfc_nodelist *ndlp; 9534 uint32_t link_state; 9535 int i; 9536 9537 /* Treat this failure as linkdown for all vports */ 9538 link_state = phba->link_state; 9539 lpfc_linkdown(phba); 9540 phba->link_state = link_state; 9541 9542 vports = lpfc_create_vport_work_array(phba); 9543 9544 if (vports) { 9545 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 9546 ndlp = lpfc_findnode_did(vports[i], Fabric_DID); 9547 if (ndlp) 9548 lpfc_cancel_retry_delay_tmo(vports[i], ndlp); 9549 lpfc_els_flush_cmd(vports[i]); 9550 } 9551 lpfc_destroy_vport_work_array(phba, vports); 9552 } 9553 } 9554 9555 /** 9556 * lpfc_retry_pport_discovery - Start timer to retry FLOGI. 9557 * @phba: pointer to lpfc hba data structure. 9558 * 9559 * This routine abort all pending discovery commands and 9560 * start a timer to retry FLOGI for the physical port 9561 * discovery. 9562 **/ 9563 void 9564 lpfc_retry_pport_discovery(struct lpfc_hba *phba) 9565 { 9566 struct lpfc_nodelist *ndlp; 9567 9568 /* Cancel the all vports retry delay retry timers */ 9569 lpfc_cancel_all_vport_retry_delay_timer(phba); 9570 9571 /* If fabric require FLOGI, then re-instantiate physical login */ 9572 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); 9573 if (!ndlp) 9574 return; 9575 9576 mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000)); 9577 spin_lock_irq(&ndlp->lock); 9578 ndlp->nlp_flag |= NLP_DELAY_TMO; 9579 spin_unlock_irq(&ndlp->lock); 9580 ndlp->nlp_last_elscmd = ELS_CMD_FLOGI; 9581 phba->pport->port_state = LPFC_FLOGI; 9582 return; 9583 } 9584 9585 /** 9586 * lpfc_fabric_login_reqd - Check if FLOGI required. 9587 * @phba: pointer to lpfc hba data structure. 9588 * @cmdiocb: pointer to FDISC command iocb. 9589 * @rspiocb: pointer to FDISC response iocb. 9590 * 9591 * This routine checks if a FLOGI is reguired for FDISC 9592 * to succeed. 9593 **/ 9594 static int 9595 lpfc_fabric_login_reqd(struct lpfc_hba *phba, 9596 struct lpfc_iocbq *cmdiocb, 9597 struct lpfc_iocbq *rspiocb) 9598 { 9599 9600 if ((rspiocb->iocb.ulpStatus != IOSTAT_FABRIC_RJT) || 9601 (rspiocb->iocb.un.ulpWord[4] != RJT_LOGIN_REQUIRED)) 9602 return 0; 9603 else 9604 return 1; 9605 } 9606 9607 /** 9608 * lpfc_cmpl_els_fdisc - Completion function for fdisc iocb command 9609 * @phba: pointer to lpfc hba data structure. 9610 * @cmdiocb: pointer to lpfc command iocb data structure. 9611 * @rspiocb: pointer to lpfc response iocb data structure. 9612 * 9613 * This routine is the completion callback function to a Fabric Discover 9614 * (FDISC) ELS command. Since all the FDISC ELS commands are issued 9615 * single threaded, each FDISC completion callback function will reset 9616 * the discovery timer for all vports such that the timers will not get 9617 * unnecessary timeout. The function checks the FDISC IOCB status. If error 9618 * detected, the vport will be set to FC_VPORT_FAILED state. Otherwise,the 9619 * vport will set to FC_VPORT_ACTIVE state. It then checks whether the DID 9620 * assigned to the vport has been changed with the completion of the FDISC 9621 * command. If so, both RPI (Remote Port Index) and VPI (Virtual Port Index) 9622 * are unregistered from the HBA, and then the lpfc_register_new_vport() 9623 * routine is invoked to register new vport with the HBA. Otherwise, the 9624 * lpfc_do_scr_ns_plogi() routine is invoked to issue a PLOGI to the Name 9625 * Server for State Change Request (SCR). 9626 **/ 9627 static void 9628 lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 9629 struct lpfc_iocbq *rspiocb) 9630 { 9631 struct lpfc_vport *vport = cmdiocb->vport; 9632 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 9633 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 9634 struct lpfc_nodelist *np; 9635 struct lpfc_nodelist *next_np; 9636 IOCB_t *irsp = &rspiocb->iocb; 9637 struct lpfc_iocbq *piocb; 9638 struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp; 9639 struct serv_parm *sp; 9640 uint8_t fabric_param_changed; 9641 9642 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 9643 "0123 FDISC completes. x%x/x%x prevDID: x%x\n", 9644 irsp->ulpStatus, irsp->un.ulpWord[4], 9645 vport->fc_prevDID); 9646 /* Since all FDISCs are being single threaded, we 9647 * must reset the discovery timer for ALL vports 9648 * waiting to send FDISC when one completes. 9649 */ 9650 list_for_each_entry(piocb, &phba->fabric_iocb_list, list) { 9651 lpfc_set_disctmo(piocb->vport); 9652 } 9653 9654 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 9655 "FDISC cmpl: status:x%x/x%x prevdid:x%x", 9656 irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_prevDID); 9657 9658 if (irsp->ulpStatus) { 9659 9660 if (lpfc_fabric_login_reqd(phba, cmdiocb, rspiocb)) { 9661 lpfc_retry_pport_discovery(phba); 9662 goto out; 9663 } 9664 9665 /* Check for retry */ 9666 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) 9667 goto out; 9668 /* FDISC failed */ 9669 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9670 "0126 FDISC failed. (x%x/x%x)\n", 9671 irsp->ulpStatus, irsp->un.ulpWord[4]); 9672 goto fdisc_failed; 9673 } 9674 spin_lock_irq(shost->host_lock); 9675 vport->fc_flag &= ~FC_VPORT_CVL_RCVD; 9676 vport->fc_flag &= ~FC_VPORT_LOGO_RCVD; 9677 vport->fc_flag |= FC_FABRIC; 9678 if (vport->phba->fc_topology == LPFC_TOPOLOGY_LOOP) 9679 vport->fc_flag |= FC_PUBLIC_LOOP; 9680 spin_unlock_irq(shost->host_lock); 9681 9682 vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID; 9683 lpfc_vport_set_state(vport, FC_VPORT_ACTIVE); 9684 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 9685 if (!prsp) 9686 goto out; 9687 sp = prsp->virt + sizeof(uint32_t); 9688 fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp); 9689 memcpy(&vport->fabric_portname, &sp->portName, 9690 sizeof(struct lpfc_name)); 9691 memcpy(&vport->fabric_nodename, &sp->nodeName, 9692 sizeof(struct lpfc_name)); 9693 if (fabric_param_changed && 9694 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 9695 /* If our NportID changed, we need to ensure all 9696 * remaining NPORTs get unreg_login'ed so we can 9697 * issue unreg_vpi. 9698 */ 9699 list_for_each_entry_safe(np, next_np, 9700 &vport->fc_nodes, nlp_listp) { 9701 if ((np->nlp_state != NLP_STE_NPR_NODE) || 9702 !(np->nlp_flag & NLP_NPR_ADISC)) 9703 continue; 9704 spin_lock_irq(&ndlp->lock); 9705 np->nlp_flag &= ~NLP_NPR_ADISC; 9706 spin_unlock_irq(&ndlp->lock); 9707 lpfc_unreg_rpi(vport, np); 9708 } 9709 lpfc_cleanup_pending_mbox(vport); 9710 9711 if (phba->sli_rev == LPFC_SLI_REV4) 9712 lpfc_sli4_unreg_all_rpis(vport); 9713 9714 lpfc_mbx_unreg_vpi(vport); 9715 spin_lock_irq(shost->host_lock); 9716 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 9717 if (phba->sli_rev == LPFC_SLI_REV4) 9718 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 9719 else 9720 vport->fc_flag |= FC_LOGO_RCVD_DID_CHNG; 9721 spin_unlock_irq(shost->host_lock); 9722 } else if ((phba->sli_rev == LPFC_SLI_REV4) && 9723 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 9724 /* 9725 * Driver needs to re-reg VPI in order for f/w 9726 * to update the MAC address. 9727 */ 9728 lpfc_register_new_vport(phba, vport, ndlp); 9729 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 9730 goto out; 9731 } 9732 9733 if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI) 9734 lpfc_issue_init_vpi(vport); 9735 else if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) 9736 lpfc_register_new_vport(phba, vport, ndlp); 9737 else 9738 lpfc_do_scr_ns_plogi(phba, vport); 9739 9740 /* The FDISC completed successfully. Move the fabric ndlp to 9741 * UNMAPPED state and register with the transport. 9742 */ 9743 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 9744 goto out; 9745 9746 fdisc_failed: 9747 if (vport->fc_vport && 9748 (vport->fc_vport->vport_state != FC_VPORT_NO_FABRIC_RSCS)) 9749 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 9750 /* Cancel discovery timer */ 9751 lpfc_can_disctmo(vport); 9752 out: 9753 lpfc_els_free_iocb(phba, cmdiocb); 9754 lpfc_nlp_put(ndlp); 9755 } 9756 9757 /** 9758 * lpfc_issue_els_fdisc - Issue a fdisc iocb command 9759 * @vport: pointer to a virtual N_Port data structure. 9760 * @ndlp: pointer to a node-list data structure. 9761 * @retry: number of retries to the command IOCB. 9762 * 9763 * This routine prepares and issues a Fabric Discover (FDISC) IOCB to 9764 * a remote node (@ndlp) off a @vport. It uses the lpfc_issue_fabric_iocb() 9765 * routine to issue the IOCB, which makes sure only one outstanding fabric 9766 * IOCB will be sent off HBA at any given time. 9767 * 9768 * Note that the ndlp reference count will be incremented by 1 for holding the 9769 * ndlp and the reference to ndlp will be stored into the context1 field of 9770 * the IOCB for the completion callback function to the FDISC ELS command. 9771 * 9772 * Return code 9773 * 0 - Successfully issued fdisc iocb command 9774 * 1 - Failed to issue fdisc iocb command 9775 **/ 9776 static int 9777 lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 9778 uint8_t retry) 9779 { 9780 struct lpfc_hba *phba = vport->phba; 9781 IOCB_t *icmd; 9782 struct lpfc_iocbq *elsiocb; 9783 struct serv_parm *sp; 9784 uint8_t *pcmd; 9785 uint16_t cmdsize; 9786 int did = ndlp->nlp_DID; 9787 int rc; 9788 9789 vport->port_state = LPFC_FDISC; 9790 vport->fc_myDID = 0; 9791 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 9792 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did, 9793 ELS_CMD_FDISC); 9794 if (!elsiocb) { 9795 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 9796 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9797 "0255 Issue FDISC: no IOCB\n"); 9798 return 1; 9799 } 9800 9801 icmd = &elsiocb->iocb; 9802 icmd->un.elsreq64.myID = 0; 9803 icmd->un.elsreq64.fl = 1; 9804 9805 /* 9806 * SLI3 ports require a different context type value than SLI4. 9807 * Catch SLI3 ports here and override the prep. 9808 */ 9809 if (phba->sli_rev == LPFC_SLI_REV3) { 9810 icmd->ulpCt_h = 1; 9811 icmd->ulpCt_l = 0; 9812 } 9813 9814 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 9815 *((uint32_t *) (pcmd)) = ELS_CMD_FDISC; 9816 pcmd += sizeof(uint32_t); /* CSP Word 1 */ 9817 memcpy(pcmd, &vport->phba->pport->fc_sparam, sizeof(struct serv_parm)); 9818 sp = (struct serv_parm *) pcmd; 9819 /* Setup CSPs accordingly for Fabric */ 9820 sp->cmn.e_d_tov = 0; 9821 sp->cmn.w2.r_a_tov = 0; 9822 sp->cmn.virtual_fabric_support = 0; 9823 sp->cls1.classValid = 0; 9824 sp->cls2.seqDelivery = 1; 9825 sp->cls3.seqDelivery = 1; 9826 9827 pcmd += sizeof(uint32_t); /* CSP Word 2 */ 9828 pcmd += sizeof(uint32_t); /* CSP Word 3 */ 9829 pcmd += sizeof(uint32_t); /* CSP Word 4 */ 9830 pcmd += sizeof(uint32_t); /* Port Name */ 9831 memcpy(pcmd, &vport->fc_portname, 8); 9832 pcmd += sizeof(uint32_t); /* Node Name */ 9833 pcmd += sizeof(uint32_t); /* Node Name */ 9834 memcpy(pcmd, &vport->fc_nodename, 8); 9835 sp->cmn.valid_vendor_ver_level = 0; 9836 memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion)); 9837 lpfc_set_disctmo(vport); 9838 9839 phba->fc_stat.elsXmitFDISC++; 9840 elsiocb->iocb_cmpl = lpfc_cmpl_els_fdisc; 9841 9842 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 9843 "Issue FDISC: did:x%x", 9844 did, 0, 0); 9845 9846 elsiocb->context1 = lpfc_nlp_get(ndlp); 9847 if (!elsiocb->context1) { 9848 lpfc_els_free_iocb(phba, elsiocb); 9849 goto err_out; 9850 } 9851 9852 rc = lpfc_issue_fabric_iocb(phba, elsiocb); 9853 if (rc == IOCB_ERROR) { 9854 lpfc_els_free_iocb(phba, elsiocb); 9855 lpfc_nlp_put(ndlp); 9856 goto err_out; 9857 } 9858 9859 lpfc_vport_set_state(vport, FC_VPORT_INITIALIZING); 9860 return 0; 9861 9862 err_out: 9863 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 9864 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9865 "0256 Issue FDISC: Cannot send IOCB\n"); 9866 return 1; 9867 } 9868 9869 /** 9870 * lpfc_cmpl_els_npiv_logo - Completion function with vport logo 9871 * @phba: pointer to lpfc hba data structure. 9872 * @cmdiocb: pointer to lpfc command iocb data structure. 9873 * @rspiocb: pointer to lpfc response iocb data structure. 9874 * 9875 * This routine is the completion callback function to the issuing of a LOGO 9876 * ELS command off a vport. It frees the command IOCB and then decrement the 9877 * reference count held on ndlp for this completion function, indicating that 9878 * the reference to the ndlp is no long needed. Note that the 9879 * lpfc_els_free_iocb() routine decrements the ndlp reference held for this 9880 * callback function and an additional explicit ndlp reference decrementation 9881 * will trigger the actual release of the ndlp. 9882 **/ 9883 static void 9884 lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 9885 struct lpfc_iocbq *rspiocb) 9886 { 9887 struct lpfc_vport *vport = cmdiocb->vport; 9888 IOCB_t *irsp; 9889 struct lpfc_nodelist *ndlp; 9890 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 9891 9892 ndlp = (struct lpfc_nodelist *)cmdiocb->context1; 9893 irsp = &rspiocb->iocb; 9894 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 9895 "LOGO npiv cmpl: status:x%x/x%x did:x%x", 9896 irsp->ulpStatus, irsp->un.ulpWord[4], irsp->un.rcvels.remoteID); 9897 9898 /* NPIV LOGO completes to NPort <nlp_DID> */ 9899 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 9900 "2928 NPIV LOGO completes to NPort x%x " 9901 "Data: x%x x%x x%x x%x x%x x%x x%x\n", 9902 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4], 9903 irsp->ulpTimeout, vport->num_disc_nodes, 9904 kref_read(&ndlp->kref), ndlp->nlp_flag, 9905 ndlp->fc4_xpt_flags); 9906 9907 if (irsp->ulpStatus == IOSTAT_SUCCESS) { 9908 spin_lock_irq(shost->host_lock); 9909 vport->fc_flag &= ~FC_NDISC_ACTIVE; 9910 vport->fc_flag &= ~FC_FABRIC; 9911 spin_unlock_irq(shost->host_lock); 9912 lpfc_can_disctmo(vport); 9913 } 9914 9915 /* Safe to release resources now. */ 9916 lpfc_els_free_iocb(phba, cmdiocb); 9917 lpfc_nlp_put(ndlp); 9918 vport->unreg_vpi_cmpl = VPORT_ERROR; 9919 } 9920 9921 /** 9922 * lpfc_issue_els_npiv_logo - Issue a logo off a vport 9923 * @vport: pointer to a virtual N_Port data structure. 9924 * @ndlp: pointer to a node-list data structure. 9925 * 9926 * This routine issues a LOGO ELS command to an @ndlp off a @vport. 9927 * 9928 * Note that the ndlp reference count will be incremented by 1 for holding the 9929 * ndlp and the reference to ndlp will be stored into the context1 field of 9930 * the IOCB for the completion callback function to the LOGO ELS command. 9931 * 9932 * Return codes 9933 * 0 - Successfully issued logo off the @vport 9934 * 1 - Failed to issue logo off the @vport 9935 **/ 9936 int 9937 lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 9938 { 9939 int rc = 0; 9940 struct lpfc_hba *phba = vport->phba; 9941 struct lpfc_iocbq *elsiocb; 9942 uint8_t *pcmd; 9943 uint16_t cmdsize; 9944 9945 cmdsize = 2 * sizeof(uint32_t) + sizeof(struct lpfc_name); 9946 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, ndlp->nlp_DID, 9947 ELS_CMD_LOGO); 9948 if (!elsiocb) 9949 return 1; 9950 9951 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 9952 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO; 9953 pcmd += sizeof(uint32_t); 9954 9955 /* Fill in LOGO payload */ 9956 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID); 9957 pcmd += sizeof(uint32_t); 9958 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name)); 9959 9960 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 9961 "Issue LOGO npiv did:x%x flg:x%x", 9962 ndlp->nlp_DID, ndlp->nlp_flag, 0); 9963 9964 elsiocb->iocb_cmpl = lpfc_cmpl_els_npiv_logo; 9965 spin_lock_irq(&ndlp->lock); 9966 ndlp->nlp_flag |= NLP_LOGO_SND; 9967 spin_unlock_irq(&ndlp->lock); 9968 elsiocb->context1 = lpfc_nlp_get(ndlp); 9969 if (!elsiocb->context1) { 9970 lpfc_els_free_iocb(phba, elsiocb); 9971 goto err; 9972 } 9973 9974 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 9975 if (rc == IOCB_ERROR) { 9976 lpfc_els_free_iocb(phba, elsiocb); 9977 lpfc_nlp_put(ndlp); 9978 goto err; 9979 } 9980 return 0; 9981 9982 err: 9983 spin_lock_irq(&ndlp->lock); 9984 ndlp->nlp_flag &= ~NLP_LOGO_SND; 9985 spin_unlock_irq(&ndlp->lock); 9986 return 1; 9987 } 9988 9989 /** 9990 * lpfc_fabric_block_timeout - Handler function to the fabric block timer 9991 * @t: timer context used to obtain the lpfc hba. 9992 * 9993 * This routine is invoked by the fabric iocb block timer after 9994 * timeout. It posts the fabric iocb block timeout event by setting the 9995 * WORKER_FABRIC_BLOCK_TMO bit to work port event bitmap and then invokes 9996 * lpfc_worker_wake_up() routine to wake up the worker thread. It is for 9997 * the worker thread to invoke the lpfc_unblock_fabric_iocbs() on the 9998 * posted event WORKER_FABRIC_BLOCK_TMO. 9999 **/ 10000 void 10001 lpfc_fabric_block_timeout(struct timer_list *t) 10002 { 10003 struct lpfc_hba *phba = from_timer(phba, t, fabric_block_timer); 10004 unsigned long iflags; 10005 uint32_t tmo_posted; 10006 10007 spin_lock_irqsave(&phba->pport->work_port_lock, iflags); 10008 tmo_posted = phba->pport->work_port_events & WORKER_FABRIC_BLOCK_TMO; 10009 if (!tmo_posted) 10010 phba->pport->work_port_events |= WORKER_FABRIC_BLOCK_TMO; 10011 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags); 10012 10013 if (!tmo_posted) 10014 lpfc_worker_wake_up(phba); 10015 return; 10016 } 10017 10018 /** 10019 * lpfc_resume_fabric_iocbs - Issue a fabric iocb from driver internal list 10020 * @phba: pointer to lpfc hba data structure. 10021 * 10022 * This routine issues one fabric iocb from the driver internal list to 10023 * the HBA. It first checks whether it's ready to issue one fabric iocb to 10024 * the HBA (whether there is no outstanding fabric iocb). If so, it shall 10025 * remove one pending fabric iocb from the driver internal list and invokes 10026 * lpfc_sli_issue_iocb() routine to send the fabric iocb to the HBA. 10027 **/ 10028 static void 10029 lpfc_resume_fabric_iocbs(struct lpfc_hba *phba) 10030 { 10031 struct lpfc_iocbq *iocb; 10032 unsigned long iflags; 10033 int ret; 10034 IOCB_t *cmd; 10035 10036 repeat: 10037 iocb = NULL; 10038 spin_lock_irqsave(&phba->hbalock, iflags); 10039 /* Post any pending iocb to the SLI layer */ 10040 if (atomic_read(&phba->fabric_iocb_count) == 0) { 10041 list_remove_head(&phba->fabric_iocb_list, iocb, typeof(*iocb), 10042 list); 10043 if (iocb) 10044 /* Increment fabric iocb count to hold the position */ 10045 atomic_inc(&phba->fabric_iocb_count); 10046 } 10047 spin_unlock_irqrestore(&phba->hbalock, iflags); 10048 if (iocb) { 10049 iocb->fabric_iocb_cmpl = iocb->iocb_cmpl; 10050 iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb; 10051 iocb->iocb_flag |= LPFC_IO_FABRIC; 10052 10053 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD, 10054 "Fabric sched1: ste:x%x", 10055 iocb->vport->port_state, 0, 0); 10056 10057 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0); 10058 10059 if (ret == IOCB_ERROR) { 10060 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl; 10061 iocb->fabric_iocb_cmpl = NULL; 10062 iocb->iocb_flag &= ~LPFC_IO_FABRIC; 10063 cmd = &iocb->iocb; 10064 cmd->ulpStatus = IOSTAT_LOCAL_REJECT; 10065 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED; 10066 iocb->iocb_cmpl(phba, iocb, iocb); 10067 10068 atomic_dec(&phba->fabric_iocb_count); 10069 goto repeat; 10070 } 10071 } 10072 } 10073 10074 /** 10075 * lpfc_unblock_fabric_iocbs - Unblock issuing fabric iocb command 10076 * @phba: pointer to lpfc hba data structure. 10077 * 10078 * This routine unblocks the issuing fabric iocb command. The function 10079 * will clear the fabric iocb block bit and then invoke the routine 10080 * lpfc_resume_fabric_iocbs() to issue one of the pending fabric iocb 10081 * from the driver internal fabric iocb list. 10082 **/ 10083 void 10084 lpfc_unblock_fabric_iocbs(struct lpfc_hba *phba) 10085 { 10086 clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 10087 10088 lpfc_resume_fabric_iocbs(phba); 10089 return; 10090 } 10091 10092 /** 10093 * lpfc_block_fabric_iocbs - Block issuing fabric iocb command 10094 * @phba: pointer to lpfc hba data structure. 10095 * 10096 * This routine blocks the issuing fabric iocb for a specified amount of 10097 * time (currently 100 ms). This is done by set the fabric iocb block bit 10098 * and set up a timeout timer for 100ms. When the block bit is set, no more 10099 * fabric iocb will be issued out of the HBA. 10100 **/ 10101 static void 10102 lpfc_block_fabric_iocbs(struct lpfc_hba *phba) 10103 { 10104 int blocked; 10105 10106 blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 10107 /* Start a timer to unblock fabric iocbs after 100ms */ 10108 if (!blocked) 10109 mod_timer(&phba->fabric_block_timer, 10110 jiffies + msecs_to_jiffies(100)); 10111 10112 return; 10113 } 10114 10115 /** 10116 * lpfc_cmpl_fabric_iocb - Completion callback function for fabric iocb 10117 * @phba: pointer to lpfc hba data structure. 10118 * @cmdiocb: pointer to lpfc command iocb data structure. 10119 * @rspiocb: pointer to lpfc response iocb data structure. 10120 * 10121 * This routine is the callback function that is put to the fabric iocb's 10122 * callback function pointer (iocb->iocb_cmpl). The original iocb's callback 10123 * function pointer has been stored in iocb->fabric_iocb_cmpl. This callback 10124 * function first restores and invokes the original iocb's callback function 10125 * and then invokes the lpfc_resume_fabric_iocbs() routine to issue the next 10126 * fabric bound iocb from the driver internal fabric iocb list onto the wire. 10127 **/ 10128 static void 10129 lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 10130 struct lpfc_iocbq *rspiocb) 10131 { 10132 struct ls_rjt stat; 10133 10134 BUG_ON((cmdiocb->iocb_flag & LPFC_IO_FABRIC) != LPFC_IO_FABRIC); 10135 10136 switch (rspiocb->iocb.ulpStatus) { 10137 case IOSTAT_NPORT_RJT: 10138 case IOSTAT_FABRIC_RJT: 10139 if (rspiocb->iocb.un.ulpWord[4] & RJT_UNAVAIL_TEMP) { 10140 lpfc_block_fabric_iocbs(phba); 10141 } 10142 break; 10143 10144 case IOSTAT_NPORT_BSY: 10145 case IOSTAT_FABRIC_BSY: 10146 lpfc_block_fabric_iocbs(phba); 10147 break; 10148 10149 case IOSTAT_LS_RJT: 10150 stat.un.lsRjtError = 10151 be32_to_cpu(rspiocb->iocb.un.ulpWord[4]); 10152 if ((stat.un.b.lsRjtRsnCode == LSRJT_UNABLE_TPC) || 10153 (stat.un.b.lsRjtRsnCode == LSRJT_LOGICAL_BSY)) 10154 lpfc_block_fabric_iocbs(phba); 10155 break; 10156 } 10157 10158 BUG_ON(atomic_read(&phba->fabric_iocb_count) == 0); 10159 10160 cmdiocb->iocb_cmpl = cmdiocb->fabric_iocb_cmpl; 10161 cmdiocb->fabric_iocb_cmpl = NULL; 10162 cmdiocb->iocb_flag &= ~LPFC_IO_FABRIC; 10163 cmdiocb->iocb_cmpl(phba, cmdiocb, rspiocb); 10164 10165 atomic_dec(&phba->fabric_iocb_count); 10166 if (!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags)) { 10167 /* Post any pending iocbs to HBA */ 10168 lpfc_resume_fabric_iocbs(phba); 10169 } 10170 } 10171 10172 /** 10173 * lpfc_issue_fabric_iocb - Issue a fabric iocb command 10174 * @phba: pointer to lpfc hba data structure. 10175 * @iocb: pointer to lpfc command iocb data structure. 10176 * 10177 * This routine is used as the top-level API for issuing a fabric iocb command 10178 * such as FLOGI and FDISC. To accommodate certain switch fabric, this driver 10179 * function makes sure that only one fabric bound iocb will be outstanding at 10180 * any given time. As such, this function will first check to see whether there 10181 * is already an outstanding fabric iocb on the wire. If so, it will put the 10182 * newly issued iocb onto the driver internal fabric iocb list, waiting to be 10183 * issued later. Otherwise, it will issue the iocb on the wire and update the 10184 * fabric iocb count it indicate that there is one fabric iocb on the wire. 10185 * 10186 * Note, this implementation has a potential sending out fabric IOCBs out of 10187 * order. The problem is caused by the construction of the "ready" boolen does 10188 * not include the condition that the internal fabric IOCB list is empty. As 10189 * such, it is possible a fabric IOCB issued by this routine might be "jump" 10190 * ahead of the fabric IOCBs in the internal list. 10191 * 10192 * Return code 10193 * IOCB_SUCCESS - either fabric iocb put on the list or issued successfully 10194 * IOCB_ERROR - failed to issue fabric iocb 10195 **/ 10196 static int 10197 lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb) 10198 { 10199 unsigned long iflags; 10200 int ready; 10201 int ret; 10202 10203 BUG_ON(atomic_read(&phba->fabric_iocb_count) > 1); 10204 10205 spin_lock_irqsave(&phba->hbalock, iflags); 10206 ready = atomic_read(&phba->fabric_iocb_count) == 0 && 10207 !test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 10208 10209 if (ready) 10210 /* Increment fabric iocb count to hold the position */ 10211 atomic_inc(&phba->fabric_iocb_count); 10212 spin_unlock_irqrestore(&phba->hbalock, iflags); 10213 if (ready) { 10214 iocb->fabric_iocb_cmpl = iocb->iocb_cmpl; 10215 iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb; 10216 iocb->iocb_flag |= LPFC_IO_FABRIC; 10217 10218 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD, 10219 "Fabric sched2: ste:x%x", 10220 iocb->vport->port_state, 0, 0); 10221 10222 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0); 10223 10224 if (ret == IOCB_ERROR) { 10225 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl; 10226 iocb->fabric_iocb_cmpl = NULL; 10227 iocb->iocb_flag &= ~LPFC_IO_FABRIC; 10228 atomic_dec(&phba->fabric_iocb_count); 10229 } 10230 } else { 10231 spin_lock_irqsave(&phba->hbalock, iflags); 10232 list_add_tail(&iocb->list, &phba->fabric_iocb_list); 10233 spin_unlock_irqrestore(&phba->hbalock, iflags); 10234 ret = IOCB_SUCCESS; 10235 } 10236 return ret; 10237 } 10238 10239 /** 10240 * lpfc_fabric_abort_vport - Abort a vport's iocbs from driver fabric iocb list 10241 * @vport: pointer to a virtual N_Port data structure. 10242 * 10243 * This routine aborts all the IOCBs associated with a @vport from the 10244 * driver internal fabric IOCB list. The list contains fabric IOCBs to be 10245 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB 10246 * list, removes each IOCB associated with the @vport off the list, set the 10247 * status field to IOSTAT_LOCAL_REJECT, and invokes the callback function 10248 * associated with the IOCB. 10249 **/ 10250 static void lpfc_fabric_abort_vport(struct lpfc_vport *vport) 10251 { 10252 LIST_HEAD(completions); 10253 struct lpfc_hba *phba = vport->phba; 10254 struct lpfc_iocbq *tmp_iocb, *piocb; 10255 10256 spin_lock_irq(&phba->hbalock); 10257 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list, 10258 list) { 10259 10260 if (piocb->vport != vport) 10261 continue; 10262 10263 list_move_tail(&piocb->list, &completions); 10264 } 10265 spin_unlock_irq(&phba->hbalock); 10266 10267 /* Cancel all the IOCBs from the completions list */ 10268 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 10269 IOERR_SLI_ABORTED); 10270 } 10271 10272 /** 10273 * lpfc_fabric_abort_nport - Abort a ndlp's iocbs from driver fabric iocb list 10274 * @ndlp: pointer to a node-list data structure. 10275 * 10276 * This routine aborts all the IOCBs associated with an @ndlp from the 10277 * driver internal fabric IOCB list. The list contains fabric IOCBs to be 10278 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB 10279 * list, removes each IOCB associated with the @ndlp off the list, set the 10280 * status field to IOSTAT_LOCAL_REJECT, and invokes the callback function 10281 * associated with the IOCB. 10282 **/ 10283 void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp) 10284 { 10285 LIST_HEAD(completions); 10286 struct lpfc_hba *phba = ndlp->phba; 10287 struct lpfc_iocbq *tmp_iocb, *piocb; 10288 struct lpfc_sli_ring *pring; 10289 10290 pring = lpfc_phba_elsring(phba); 10291 10292 if (unlikely(!pring)) 10293 return; 10294 10295 spin_lock_irq(&phba->hbalock); 10296 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list, 10297 list) { 10298 if ((lpfc_check_sli_ndlp(phba, pring, piocb, ndlp))) { 10299 10300 list_move_tail(&piocb->list, &completions); 10301 } 10302 } 10303 spin_unlock_irq(&phba->hbalock); 10304 10305 /* Cancel all the IOCBs from the completions list */ 10306 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 10307 IOERR_SLI_ABORTED); 10308 } 10309 10310 /** 10311 * lpfc_fabric_abort_hba - Abort all iocbs on driver fabric iocb list 10312 * @phba: pointer to lpfc hba data structure. 10313 * 10314 * This routine aborts all the IOCBs currently on the driver internal 10315 * fabric IOCB list. The list contains fabric IOCBs to be issued to the ELS 10316 * IOCB ring. This function takes the entire IOCB list off the fabric IOCB 10317 * list, removes IOCBs off the list, set the status field to 10318 * IOSTAT_LOCAL_REJECT, and invokes the callback function associated with 10319 * the IOCB. 10320 **/ 10321 void lpfc_fabric_abort_hba(struct lpfc_hba *phba) 10322 { 10323 LIST_HEAD(completions); 10324 10325 spin_lock_irq(&phba->hbalock); 10326 list_splice_init(&phba->fabric_iocb_list, &completions); 10327 spin_unlock_irq(&phba->hbalock); 10328 10329 /* Cancel all the IOCBs from the completions list */ 10330 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 10331 IOERR_SLI_ABORTED); 10332 } 10333 10334 /** 10335 * lpfc_sli4_vport_delete_els_xri_aborted -Remove all ndlp references for vport 10336 * @vport: pointer to lpfc vport data structure. 10337 * 10338 * This routine is invoked by the vport cleanup for deletions and the cleanup 10339 * for an ndlp on removal. 10340 **/ 10341 void 10342 lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *vport) 10343 { 10344 struct lpfc_hba *phba = vport->phba; 10345 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 10346 unsigned long iflag = 0; 10347 10348 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, iflag); 10349 list_for_each_entry_safe(sglq_entry, sglq_next, 10350 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) { 10351 if (sglq_entry->ndlp && sglq_entry->ndlp->vport == vport) { 10352 lpfc_nlp_put(sglq_entry->ndlp); 10353 sglq_entry->ndlp = NULL; 10354 } 10355 } 10356 spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, iflag); 10357 return; 10358 } 10359 10360 /** 10361 * lpfc_sli4_els_xri_aborted - Slow-path process of els xri abort 10362 * @phba: pointer to lpfc hba data structure. 10363 * @axri: pointer to the els xri abort wcqe structure. 10364 * 10365 * This routine is invoked by the worker thread to process a SLI4 slow-path 10366 * ELS aborted xri. 10367 **/ 10368 void 10369 lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba, 10370 struct sli4_wcqe_xri_aborted *axri) 10371 { 10372 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); 10373 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri); 10374 uint16_t lxri = 0; 10375 10376 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 10377 unsigned long iflag = 0; 10378 struct lpfc_nodelist *ndlp; 10379 struct lpfc_sli_ring *pring; 10380 10381 pring = lpfc_phba_elsring(phba); 10382 10383 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, iflag); 10384 list_for_each_entry_safe(sglq_entry, sglq_next, 10385 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) { 10386 if (sglq_entry->sli4_xritag == xri) { 10387 list_del(&sglq_entry->list); 10388 ndlp = sglq_entry->ndlp; 10389 sglq_entry->ndlp = NULL; 10390 list_add_tail(&sglq_entry->list, 10391 &phba->sli4_hba.lpfc_els_sgl_list); 10392 sglq_entry->state = SGL_FREED; 10393 spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, 10394 iflag); 10395 10396 if (ndlp) { 10397 lpfc_set_rrq_active(phba, ndlp, 10398 sglq_entry->sli4_lxritag, 10399 rxid, 1); 10400 lpfc_nlp_put(ndlp); 10401 } 10402 10403 /* Check if TXQ queue needs to be serviced */ 10404 if (pring && !list_empty(&pring->txq)) 10405 lpfc_worker_wake_up(phba); 10406 return; 10407 } 10408 } 10409 spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, iflag); 10410 lxri = lpfc_sli4_xri_inrange(phba, xri); 10411 if (lxri == NO_XRI) 10412 return; 10413 10414 spin_lock_irqsave(&phba->hbalock, iflag); 10415 sglq_entry = __lpfc_get_active_sglq(phba, lxri); 10416 if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) { 10417 spin_unlock_irqrestore(&phba->hbalock, iflag); 10418 return; 10419 } 10420 sglq_entry->state = SGL_XRI_ABORTED; 10421 spin_unlock_irqrestore(&phba->hbalock, iflag); 10422 return; 10423 } 10424 10425 /* lpfc_sli_abts_recover_port - Recover a port that failed a BLS_ABORT req. 10426 * @vport: pointer to virtual port object. 10427 * @ndlp: nodelist pointer for the impacted node. 10428 * 10429 * The driver calls this routine in response to an SLI4 XRI ABORT CQE 10430 * or an SLI3 ASYNC_STATUS_CN event from the port. For either event, 10431 * the driver is required to send a LOGO to the remote node before it 10432 * attempts to recover its login to the remote node. 10433 */ 10434 void 10435 lpfc_sli_abts_recover_port(struct lpfc_vport *vport, 10436 struct lpfc_nodelist *ndlp) 10437 { 10438 struct Scsi_Host *shost; 10439 struct lpfc_hba *phba; 10440 unsigned long flags = 0; 10441 10442 shost = lpfc_shost_from_vport(vport); 10443 phba = vport->phba; 10444 if (ndlp->nlp_state != NLP_STE_MAPPED_NODE) { 10445 lpfc_printf_log(phba, KERN_INFO, 10446 LOG_SLI, "3093 No rport recovery needed. " 10447 "rport in state 0x%x\n", ndlp->nlp_state); 10448 return; 10449 } 10450 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10451 "3094 Start rport recovery on shost id 0x%x " 10452 "fc_id 0x%06x vpi 0x%x rpi 0x%x state 0x%x " 10453 "flags 0x%x\n", 10454 shost->host_no, ndlp->nlp_DID, 10455 vport->vpi, ndlp->nlp_rpi, ndlp->nlp_state, 10456 ndlp->nlp_flag); 10457 /* 10458 * The rport is not responding. Remove the FCP-2 flag to prevent 10459 * an ADISC in the follow-up recovery code. 10460 */ 10461 spin_lock_irqsave(&ndlp->lock, flags); 10462 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; 10463 ndlp->nlp_flag |= NLP_ISSUE_LOGO; 10464 spin_unlock_irqrestore(&ndlp->lock, flags); 10465 lpfc_unreg_rpi(vport, ndlp); 10466 } 10467 10468