1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term * 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 * EMULEX and SLI are trademarks of Emulex. * 8 * www.broadcom.com * 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 10 * * 11 * This program is free software; you can redistribute it and/or * 12 * modify it under the terms of version 2 of the GNU General * 13 * Public License as published by the Free Software Foundation. * 14 * This program is distributed in the hope that it will be useful. * 15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 19 * TO BE LEGALLY INVALID. See the GNU General Public License for * 20 * more details, a copy of which can be found in the file COPYING * 21 * included with this package. * 22 *******************************************************************/ 23 /* See Fibre Channel protocol T11 FC-LS for details */ 24 #include <linux/blkdev.h> 25 #include <linux/pci.h> 26 #include <linux/slab.h> 27 #include <linux/interrupt.h> 28 29 #include <scsi/scsi.h> 30 #include <scsi/scsi_device.h> 31 #include <scsi/scsi_host.h> 32 #include <scsi/scsi_transport_fc.h> 33 #include <uapi/scsi/fc/fc_fs.h> 34 #include <uapi/scsi/fc/fc_els.h> 35 36 #include "lpfc_hw4.h" 37 #include "lpfc_hw.h" 38 #include "lpfc_sli.h" 39 #include "lpfc_sli4.h" 40 #include "lpfc_nl.h" 41 #include "lpfc_disc.h" 42 #include "lpfc_scsi.h" 43 #include "lpfc.h" 44 #include "lpfc_logmsg.h" 45 #include "lpfc_crtn.h" 46 #include "lpfc_vport.h" 47 #include "lpfc_debugfs.h" 48 49 static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *, 50 struct lpfc_iocbq *); 51 static void lpfc_cmpl_fabric_iocb(struct lpfc_hba *, struct lpfc_iocbq *, 52 struct lpfc_iocbq *); 53 static void lpfc_fabric_abort_vport(struct lpfc_vport *vport); 54 static int lpfc_issue_els_fdisc(struct lpfc_vport *vport, 55 struct lpfc_nodelist *ndlp, uint8_t retry); 56 static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba, 57 struct lpfc_iocbq *iocb); 58 59 static int lpfc_max_els_tries = 3; 60 61 /** 62 * lpfc_els_chk_latt - Check host link attention event for a vport 63 * @vport: pointer to a host virtual N_Port data structure. 64 * 65 * This routine checks whether there is an outstanding host link 66 * attention event during the discovery process with the @vport. It is done 67 * by reading the HBA's Host Attention (HA) register. If there is any host 68 * link attention events during this @vport's discovery process, the @vport 69 * shall be marked as FC_ABORT_DISCOVERY, a host link attention clear shall 70 * be issued if the link state is not already in host link cleared state, 71 * and a return code shall indicate whether the host link attention event 72 * had happened. 73 * 74 * Note that, if either the host link is in state LPFC_LINK_DOWN or @vport 75 * state in LPFC_VPORT_READY, the request for checking host link attention 76 * event will be ignored and a return code shall indicate no host link 77 * attention event had happened. 78 * 79 * Return codes 80 * 0 - no host link attention event happened 81 * 1 - host link attention event happened 82 **/ 83 int 84 lpfc_els_chk_latt(struct lpfc_vport *vport) 85 { 86 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 87 struct lpfc_hba *phba = vport->phba; 88 uint32_t ha_copy; 89 90 if (vport->port_state >= LPFC_VPORT_READY || 91 phba->link_state == LPFC_LINK_DOWN || 92 phba->sli_rev > LPFC_SLI_REV3) 93 return 0; 94 95 /* Read the HBA Host Attention Register */ 96 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 97 return 1; 98 99 if (!(ha_copy & HA_LATT)) 100 return 0; 101 102 /* Pending Link Event during Discovery */ 103 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 104 "0237 Pending Link Event during " 105 "Discovery: State x%x\n", 106 phba->pport->port_state); 107 108 /* CLEAR_LA should re-enable link attention events and 109 * we should then immediately take a LATT event. The 110 * LATT processing should call lpfc_linkdown() which 111 * will cleanup any left over in-progress discovery 112 * events. 113 */ 114 spin_lock_irq(shost->host_lock); 115 vport->fc_flag |= FC_ABORT_DISCOVERY; 116 spin_unlock_irq(shost->host_lock); 117 118 if (phba->link_state != LPFC_CLEAR_LA) 119 lpfc_issue_clear_la(phba, vport); 120 121 return 1; 122 } 123 124 /** 125 * lpfc_prep_els_iocb - Allocate and prepare a lpfc iocb data structure 126 * @vport: pointer to a host virtual N_Port data structure. 127 * @expectRsp: flag indicating whether response is expected. 128 * @cmdSize: size of the ELS command. 129 * @retry: number of retries to the command IOCB when it fails. 130 * @ndlp: pointer to a node-list data structure. 131 * @did: destination identifier. 132 * @elscmd: the ELS command code. 133 * 134 * This routine is used for allocating a lpfc-IOCB data structure from 135 * the driver lpfc-IOCB free-list and prepare the IOCB with the parameters 136 * passed into the routine for discovery state machine to issue an Extended 137 * Link Service (ELS) commands. It is a generic lpfc-IOCB allocation 138 * and preparation routine that is used by all the discovery state machine 139 * routines and the ELS command-specific fields will be later set up by 140 * the individual discovery machine routines after calling this routine 141 * allocating and preparing a generic IOCB data structure. It fills in the 142 * Buffer Descriptor Entries (BDEs), allocates buffers for both command 143 * payload and response payload (if expected). The reference count on the 144 * ndlp is incremented by 1 and the reference to the ndlp is put into 145 * context1 of the IOCB data structure for this IOCB to hold the ndlp 146 * reference for the command's callback function to access later. 147 * 148 * Return code 149 * Pointer to the newly allocated/prepared els iocb data structure 150 * NULL - when els iocb data structure allocation/preparation failed 151 **/ 152 struct lpfc_iocbq * 153 lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp, 154 uint16_t cmdSize, uint8_t retry, 155 struct lpfc_nodelist *ndlp, uint32_t did, 156 uint32_t elscmd) 157 { 158 struct lpfc_hba *phba = vport->phba; 159 struct lpfc_iocbq *elsiocb; 160 struct lpfc_dmabuf *pcmd, *prsp, *pbuflist; 161 struct ulp_bde64 *bpl; 162 IOCB_t *icmd; 163 164 165 if (!lpfc_is_link_up(phba)) 166 return NULL; 167 168 /* Allocate buffer for command iocb */ 169 elsiocb = lpfc_sli_get_iocbq(phba); 170 171 if (elsiocb == NULL) 172 return NULL; 173 174 /* 175 * If this command is for fabric controller and HBA running 176 * in FIP mode send FLOGI, FDISC and LOGO as FIP frames. 177 */ 178 if ((did == Fabric_DID) && 179 (phba->hba_flag & HBA_FIP_SUPPORT) && 180 ((elscmd == ELS_CMD_FLOGI) || 181 (elscmd == ELS_CMD_FDISC) || 182 (elscmd == ELS_CMD_LOGO))) 183 switch (elscmd) { 184 case ELS_CMD_FLOGI: 185 elsiocb->iocb_flag |= 186 ((LPFC_ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT) 187 & LPFC_FIP_ELS_ID_MASK); 188 break; 189 case ELS_CMD_FDISC: 190 elsiocb->iocb_flag |= 191 ((LPFC_ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT) 192 & LPFC_FIP_ELS_ID_MASK); 193 break; 194 case ELS_CMD_LOGO: 195 elsiocb->iocb_flag |= 196 ((LPFC_ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT) 197 & LPFC_FIP_ELS_ID_MASK); 198 break; 199 } 200 else 201 elsiocb->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK; 202 203 icmd = &elsiocb->iocb; 204 205 /* fill in BDEs for command */ 206 /* Allocate buffer for command payload */ 207 pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 208 if (pcmd) 209 pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys); 210 if (!pcmd || !pcmd->virt) 211 goto els_iocb_free_pcmb_exit; 212 213 INIT_LIST_HEAD(&pcmd->list); 214 215 /* Allocate buffer for response payload */ 216 if (expectRsp) { 217 prsp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 218 if (prsp) 219 prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 220 &prsp->phys); 221 if (!prsp || !prsp->virt) 222 goto els_iocb_free_prsp_exit; 223 INIT_LIST_HEAD(&prsp->list); 224 } else 225 prsp = NULL; 226 227 /* Allocate buffer for Buffer ptr list */ 228 pbuflist = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 229 if (pbuflist) 230 pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 231 &pbuflist->phys); 232 if (!pbuflist || !pbuflist->virt) 233 goto els_iocb_free_pbuf_exit; 234 235 INIT_LIST_HEAD(&pbuflist->list); 236 237 if (expectRsp) { 238 icmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys); 239 icmd->un.elsreq64.bdl.addrLow = putPaddrLow(pbuflist->phys); 240 icmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; 241 icmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64)); 242 243 icmd->un.elsreq64.remoteID = did; /* DID */ 244 icmd->ulpCommand = CMD_ELS_REQUEST64_CR; 245 if (elscmd == ELS_CMD_FLOGI) 246 icmd->ulpTimeout = FF_DEF_RATOV * 2; 247 else if (elscmd == ELS_CMD_LOGO) 248 icmd->ulpTimeout = phba->fc_ratov; 249 else 250 icmd->ulpTimeout = phba->fc_ratov * 2; 251 } else { 252 icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys); 253 icmd->un.xseq64.bdl.addrLow = putPaddrLow(pbuflist->phys); 254 icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; 255 icmd->un.xseq64.bdl.bdeSize = sizeof(struct ulp_bde64); 256 icmd->un.xseq64.xmit_els_remoteID = did; /* DID */ 257 icmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX; 258 } 259 icmd->ulpBdeCount = 1; 260 icmd->ulpLe = 1; 261 icmd->ulpClass = CLASS3; 262 263 /* 264 * If we have NPIV enabled, we want to send ELS traffic by VPI. 265 * For SLI4, since the driver controls VPIs we also want to include 266 * all ELS pt2pt protocol traffic as well. 267 */ 268 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) || 269 ((phba->sli_rev == LPFC_SLI_REV4) && 270 (vport->fc_flag & FC_PT2PT))) { 271 272 if (expectRsp) { 273 icmd->un.elsreq64.myID = vport->fc_myDID; 274 275 /* For ELS_REQUEST64_CR, use the VPI by default */ 276 icmd->ulpContext = phba->vpi_ids[vport->vpi]; 277 } 278 279 icmd->ulpCt_h = 0; 280 /* The CT field must be 0=INVALID_RPI for the ECHO cmd */ 281 if (elscmd == ELS_CMD_ECHO) 282 icmd->ulpCt_l = 0; /* context = invalid RPI */ 283 else 284 icmd->ulpCt_l = 1; /* context = VPI */ 285 } 286 287 bpl = (struct ulp_bde64 *) pbuflist->virt; 288 bpl->addrLow = le32_to_cpu(putPaddrLow(pcmd->phys)); 289 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pcmd->phys)); 290 bpl->tus.f.bdeSize = cmdSize; 291 bpl->tus.f.bdeFlags = 0; 292 bpl->tus.w = le32_to_cpu(bpl->tus.w); 293 294 if (expectRsp) { 295 bpl++; 296 bpl->addrLow = le32_to_cpu(putPaddrLow(prsp->phys)); 297 bpl->addrHigh = le32_to_cpu(putPaddrHigh(prsp->phys)); 298 bpl->tus.f.bdeSize = FCELSSIZE; 299 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 300 bpl->tus.w = le32_to_cpu(bpl->tus.w); 301 } 302 303 elsiocb->context2 = pcmd; 304 elsiocb->context3 = pbuflist; 305 elsiocb->retry = retry; 306 elsiocb->vport = vport; 307 elsiocb->drvrTimeout = (phba->fc_ratov << 1) + LPFC_DRVR_TIMEOUT; 308 309 if (prsp) { 310 list_add(&prsp->list, &pcmd->list); 311 } 312 if (expectRsp) { 313 /* Xmit ELS command <elsCmd> to remote NPORT <did> */ 314 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 315 "0116 Xmit ELS command x%x to remote " 316 "NPORT x%x I/O tag: x%x, port state:x%x " 317 "rpi x%x fc_flag:x%x\n", 318 elscmd, did, elsiocb->iotag, 319 vport->port_state, ndlp->nlp_rpi, 320 vport->fc_flag); 321 } else { 322 /* Xmit ELS response <elsCmd> to remote NPORT <did> */ 323 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 324 "0117 Xmit ELS response x%x to remote " 325 "NPORT x%x I/O tag: x%x, size: x%x " 326 "port_state x%x rpi x%x fc_flag x%x\n", 327 elscmd, ndlp->nlp_DID, elsiocb->iotag, 328 cmdSize, vport->port_state, 329 ndlp->nlp_rpi, vport->fc_flag); 330 } 331 return elsiocb; 332 333 els_iocb_free_pbuf_exit: 334 if (expectRsp) 335 lpfc_mbuf_free(phba, prsp->virt, prsp->phys); 336 kfree(pbuflist); 337 338 els_iocb_free_prsp_exit: 339 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys); 340 kfree(prsp); 341 342 els_iocb_free_pcmb_exit: 343 kfree(pcmd); 344 lpfc_sli_release_iocbq(phba, elsiocb); 345 return NULL; 346 } 347 348 /** 349 * lpfc_issue_fabric_reglogin - Issue fabric registration login for a vport 350 * @vport: pointer to a host virtual N_Port data structure. 351 * 352 * This routine issues a fabric registration login for a @vport. An 353 * active ndlp node with Fabric_DID must already exist for this @vport. 354 * The routine invokes two mailbox commands to carry out fabric registration 355 * login through the HBA firmware: the first mailbox command requests the 356 * HBA to perform link configuration for the @vport; and the second mailbox 357 * command requests the HBA to perform the actual fabric registration login 358 * with the @vport. 359 * 360 * Return code 361 * 0 - successfully issued fabric registration login for @vport 362 * -ENXIO -- failed to issue fabric registration login for @vport 363 **/ 364 int 365 lpfc_issue_fabric_reglogin(struct lpfc_vport *vport) 366 { 367 struct lpfc_hba *phba = vport->phba; 368 LPFC_MBOXQ_t *mbox; 369 struct lpfc_dmabuf *mp; 370 struct lpfc_nodelist *ndlp; 371 struct serv_parm *sp; 372 int rc; 373 int err = 0; 374 375 sp = &phba->fc_fabparam; 376 ndlp = lpfc_findnode_did(vport, Fabric_DID); 377 if (!ndlp) { 378 err = 1; 379 goto fail; 380 } 381 382 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 383 if (!mbox) { 384 err = 2; 385 goto fail; 386 } 387 388 vport->port_state = LPFC_FABRIC_CFG_LINK; 389 lpfc_config_link(phba, mbox); 390 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 391 mbox->vport = vport; 392 393 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 394 if (rc == MBX_NOT_FINISHED) { 395 err = 3; 396 goto fail_free_mbox; 397 } 398 399 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 400 if (!mbox) { 401 err = 4; 402 goto fail; 403 } 404 rc = lpfc_reg_rpi(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox, 405 ndlp->nlp_rpi); 406 if (rc) { 407 err = 5; 408 goto fail_free_mbox; 409 } 410 411 mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login; 412 mbox->vport = vport; 413 /* increment the reference count on ndlp to hold reference 414 * for the callback routine. 415 */ 416 mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 417 if (!mbox->ctx_ndlp) { 418 err = 6; 419 goto fail_no_ndlp; 420 } 421 422 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 423 if (rc == MBX_NOT_FINISHED) { 424 err = 7; 425 goto fail_issue_reg_login; 426 } 427 428 return 0; 429 430 fail_issue_reg_login: 431 /* decrement the reference count on ndlp just incremented 432 * for the failed mbox command. 433 */ 434 lpfc_nlp_put(ndlp); 435 fail_no_ndlp: 436 mp = (struct lpfc_dmabuf *)mbox->ctx_buf; 437 lpfc_mbuf_free(phba, mp->virt, mp->phys); 438 kfree(mp); 439 fail_free_mbox: 440 mempool_free(mbox, phba->mbox_mem_pool); 441 442 fail: 443 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 444 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 445 "0249 Cannot issue Register Fabric login: Err %d\n", 446 err); 447 return -ENXIO; 448 } 449 450 /** 451 * lpfc_issue_reg_vfi - Register VFI for this vport's fabric login 452 * @vport: pointer to a host virtual N_Port data structure. 453 * 454 * This routine issues a REG_VFI mailbox for the vfi, vpi, fcfi triplet for 455 * the @vport. This mailbox command is necessary for SLI4 port only. 456 * 457 * Return code 458 * 0 - successfully issued REG_VFI for @vport 459 * A failure code otherwise. 460 **/ 461 int 462 lpfc_issue_reg_vfi(struct lpfc_vport *vport) 463 { 464 struct lpfc_hba *phba = vport->phba; 465 LPFC_MBOXQ_t *mboxq = NULL; 466 struct lpfc_nodelist *ndlp; 467 struct lpfc_dmabuf *dmabuf = NULL; 468 int rc = 0; 469 470 /* move forward in case of SLI4 FC port loopback test and pt2pt mode */ 471 if ((phba->sli_rev == LPFC_SLI_REV4) && 472 !(phba->link_flag & LS_LOOPBACK_MODE) && 473 !(vport->fc_flag & FC_PT2PT)) { 474 ndlp = lpfc_findnode_did(vport, Fabric_DID); 475 if (!ndlp) { 476 rc = -ENODEV; 477 goto fail; 478 } 479 } 480 481 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 482 if (!mboxq) { 483 rc = -ENOMEM; 484 goto fail; 485 } 486 487 /* Supply CSP's only if we are fabric connect or pt-to-pt connect */ 488 if ((vport->fc_flag & FC_FABRIC) || (vport->fc_flag & FC_PT2PT)) { 489 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 490 if (!dmabuf) { 491 rc = -ENOMEM; 492 goto fail; 493 } 494 dmabuf->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &dmabuf->phys); 495 if (!dmabuf->virt) { 496 rc = -ENOMEM; 497 goto fail; 498 } 499 memcpy(dmabuf->virt, &phba->fc_fabparam, 500 sizeof(struct serv_parm)); 501 } 502 503 vport->port_state = LPFC_FABRIC_CFG_LINK; 504 if (dmabuf) 505 lpfc_reg_vfi(mboxq, vport, dmabuf->phys); 506 else 507 lpfc_reg_vfi(mboxq, vport, 0); 508 509 mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi; 510 mboxq->vport = vport; 511 mboxq->ctx_buf = dmabuf; 512 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 513 if (rc == MBX_NOT_FINISHED) { 514 rc = -ENXIO; 515 goto fail; 516 } 517 return 0; 518 519 fail: 520 if (mboxq) 521 mempool_free(mboxq, phba->mbox_mem_pool); 522 if (dmabuf) { 523 if (dmabuf->virt) 524 lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys); 525 kfree(dmabuf); 526 } 527 528 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 529 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 530 "0289 Issue Register VFI failed: Err %d\n", rc); 531 return rc; 532 } 533 534 /** 535 * lpfc_issue_unreg_vfi - Unregister VFI for this vport's fabric login 536 * @vport: pointer to a host virtual N_Port data structure. 537 * 538 * This routine issues a UNREG_VFI mailbox with the vfi, vpi, fcfi triplet for 539 * the @vport. This mailbox command is necessary for SLI4 port only. 540 * 541 * Return code 542 * 0 - successfully issued REG_VFI for @vport 543 * A failure code otherwise. 544 **/ 545 int 546 lpfc_issue_unreg_vfi(struct lpfc_vport *vport) 547 { 548 struct lpfc_hba *phba = vport->phba; 549 struct Scsi_Host *shost; 550 LPFC_MBOXQ_t *mboxq; 551 int rc; 552 553 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 554 if (!mboxq) { 555 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 556 "2556 UNREG_VFI mbox allocation failed" 557 "HBA state x%x\n", phba->pport->port_state); 558 return -ENOMEM; 559 } 560 561 lpfc_unreg_vfi(mboxq, vport); 562 mboxq->vport = vport; 563 mboxq->mbox_cmpl = lpfc_unregister_vfi_cmpl; 564 565 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 566 if (rc == MBX_NOT_FINISHED) { 567 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 568 "2557 UNREG_VFI issue mbox failed rc x%x " 569 "HBA state x%x\n", 570 rc, phba->pport->port_state); 571 mempool_free(mboxq, phba->mbox_mem_pool); 572 return -EIO; 573 } 574 575 shost = lpfc_shost_from_vport(vport); 576 spin_lock_irq(shost->host_lock); 577 vport->fc_flag &= ~FC_VFI_REGISTERED; 578 spin_unlock_irq(shost->host_lock); 579 return 0; 580 } 581 582 /** 583 * lpfc_check_clean_addr_bit - Check whether assigned FCID is clean. 584 * @vport: pointer to a host virtual N_Port data structure. 585 * @sp: pointer to service parameter data structure. 586 * 587 * This routine is called from FLOGI/FDISC completion handler functions. 588 * lpfc_check_clean_addr_bit return 1 when FCID/Fabric portname/ Fabric 589 * node nodename is changed in the completion service parameter else return 590 * 0. This function also set flag in the vport data structure to delay 591 * NP_Port discovery after the FLOGI/FDISC completion if Clean address bit 592 * in FLOGI/FDISC response is cleared and FCID/Fabric portname/ Fabric 593 * node nodename is changed in the completion service parameter. 594 * 595 * Return code 596 * 0 - FCID and Fabric Nodename and Fabric portname is not changed. 597 * 1 - FCID or Fabric Nodename or Fabric portname is changed. 598 * 599 **/ 600 static uint8_t 601 lpfc_check_clean_addr_bit(struct lpfc_vport *vport, 602 struct serv_parm *sp) 603 { 604 struct lpfc_hba *phba = vport->phba; 605 uint8_t fabric_param_changed = 0; 606 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 607 608 if ((vport->fc_prevDID != vport->fc_myDID) || 609 memcmp(&vport->fabric_portname, &sp->portName, 610 sizeof(struct lpfc_name)) || 611 memcmp(&vport->fabric_nodename, &sp->nodeName, 612 sizeof(struct lpfc_name)) || 613 (vport->vport_flag & FAWWPN_PARAM_CHG)) { 614 fabric_param_changed = 1; 615 vport->vport_flag &= ~FAWWPN_PARAM_CHG; 616 } 617 /* 618 * Word 1 Bit 31 in common service parameter is overloaded. 619 * Word 1 Bit 31 in FLOGI request is multiple NPort request 620 * Word 1 Bit 31 in FLOGI response is clean address bit 621 * 622 * If fabric parameter is changed and clean address bit is 623 * cleared delay nport discovery if 624 * - vport->fc_prevDID != 0 (not initial discovery) OR 625 * - lpfc_delay_discovery module parameter is set. 626 */ 627 if (fabric_param_changed && !sp->cmn.clean_address_bit && 628 (vport->fc_prevDID || phba->cfg_delay_discovery)) { 629 spin_lock_irq(shost->host_lock); 630 vport->fc_flag |= FC_DISC_DELAYED; 631 spin_unlock_irq(shost->host_lock); 632 } 633 634 return fabric_param_changed; 635 } 636 637 638 /** 639 * lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port 640 * @vport: pointer to a host virtual N_Port data structure. 641 * @ndlp: pointer to a node-list data structure. 642 * @sp: pointer to service parameter data structure. 643 * @irsp: pointer to the IOCB within the lpfc response IOCB. 644 * 645 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback 646 * function to handle the completion of a Fabric Login (FLOGI) into a fabric 647 * port in a fabric topology. It properly sets up the parameters to the @ndlp 648 * from the IOCB response. It also check the newly assigned N_Port ID to the 649 * @vport against the previously assigned N_Port ID. If it is different from 650 * the previously assigned Destination ID (DID), the lpfc_unreg_rpi() routine 651 * is invoked on all the remaining nodes with the @vport to unregister the 652 * Remote Port Indicators (RPIs). Finally, the lpfc_issue_fabric_reglogin() 653 * is invoked to register login to the fabric. 654 * 655 * Return code 656 * 0 - Success (currently, always return 0) 657 **/ 658 static int 659 lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 660 struct serv_parm *sp, IOCB_t *irsp) 661 { 662 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 663 struct lpfc_hba *phba = vport->phba; 664 struct lpfc_nodelist *np; 665 struct lpfc_nodelist *next_np; 666 uint8_t fabric_param_changed; 667 668 spin_lock_irq(shost->host_lock); 669 vport->fc_flag |= FC_FABRIC; 670 spin_unlock_irq(shost->host_lock); 671 672 phba->fc_edtov = be32_to_cpu(sp->cmn.e_d_tov); 673 if (sp->cmn.edtovResolution) /* E_D_TOV ticks are in nanoseconds */ 674 phba->fc_edtov = (phba->fc_edtov + 999999) / 1000000; 675 676 phba->fc_edtovResol = sp->cmn.edtovResolution; 677 phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000; 678 679 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 680 spin_lock_irq(shost->host_lock); 681 vport->fc_flag |= FC_PUBLIC_LOOP; 682 spin_unlock_irq(shost->host_lock); 683 } 684 685 vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID; 686 memcpy(&ndlp->nlp_portname, &sp->portName, sizeof(struct lpfc_name)); 687 memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof(struct lpfc_name)); 688 ndlp->nlp_class_sup = 0; 689 if (sp->cls1.classValid) 690 ndlp->nlp_class_sup |= FC_COS_CLASS1; 691 if (sp->cls2.classValid) 692 ndlp->nlp_class_sup |= FC_COS_CLASS2; 693 if (sp->cls3.classValid) 694 ndlp->nlp_class_sup |= FC_COS_CLASS3; 695 if (sp->cls4.classValid) 696 ndlp->nlp_class_sup |= FC_COS_CLASS4; 697 ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | 698 sp->cmn.bbRcvSizeLsb; 699 700 fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp); 701 if (fabric_param_changed) { 702 /* Reset FDMI attribute masks based on config parameter */ 703 if (phba->cfg_enable_SmartSAN || 704 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) { 705 /* Setup appropriate attribute masks */ 706 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR; 707 if (phba->cfg_enable_SmartSAN) 708 vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR; 709 else 710 vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR; 711 } else { 712 vport->fdmi_hba_mask = 0; 713 vport->fdmi_port_mask = 0; 714 } 715 716 } 717 memcpy(&vport->fabric_portname, &sp->portName, 718 sizeof(struct lpfc_name)); 719 memcpy(&vport->fabric_nodename, &sp->nodeName, 720 sizeof(struct lpfc_name)); 721 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm)); 722 723 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { 724 if (sp->cmn.response_multiple_NPort) { 725 lpfc_printf_vlog(vport, KERN_WARNING, 726 LOG_ELS | LOG_VPORT, 727 "1816 FLOGI NPIV supported, " 728 "response data 0x%x\n", 729 sp->cmn.response_multiple_NPort); 730 spin_lock_irq(&phba->hbalock); 731 phba->link_flag |= LS_NPIV_FAB_SUPPORTED; 732 spin_unlock_irq(&phba->hbalock); 733 } else { 734 /* Because we asked f/w for NPIV it still expects us 735 to call reg_vnpid atleast for the physcial host */ 736 lpfc_printf_vlog(vport, KERN_WARNING, 737 LOG_ELS | LOG_VPORT, 738 "1817 Fabric does not support NPIV " 739 "- configuring single port mode.\n"); 740 spin_lock_irq(&phba->hbalock); 741 phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED; 742 spin_unlock_irq(&phba->hbalock); 743 } 744 } 745 746 /* 747 * For FC we need to do some special processing because of the SLI 748 * Port's default settings of the Common Service Parameters. 749 */ 750 if ((phba->sli_rev == LPFC_SLI_REV4) && 751 (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC)) { 752 /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */ 753 if (fabric_param_changed) 754 lpfc_unregister_fcf_prep(phba); 755 756 /* This should just update the VFI CSPs*/ 757 if (vport->fc_flag & FC_VFI_REGISTERED) 758 lpfc_issue_reg_vfi(vport); 759 } 760 761 if (fabric_param_changed && 762 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 763 764 /* If our NportID changed, we need to ensure all 765 * remaining NPORTs get unreg_login'ed. 766 */ 767 list_for_each_entry_safe(np, next_np, 768 &vport->fc_nodes, nlp_listp) { 769 if ((np->nlp_state != NLP_STE_NPR_NODE) || 770 !(np->nlp_flag & NLP_NPR_ADISC)) 771 continue; 772 spin_lock_irq(&np->lock); 773 np->nlp_flag &= ~NLP_NPR_ADISC; 774 spin_unlock_irq(&np->lock); 775 lpfc_unreg_rpi(vport, np); 776 } 777 lpfc_cleanup_pending_mbox(vport); 778 779 if (phba->sli_rev == LPFC_SLI_REV4) { 780 lpfc_sli4_unreg_all_rpis(vport); 781 lpfc_mbx_unreg_vpi(vport); 782 spin_lock_irq(shost->host_lock); 783 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 784 spin_unlock_irq(shost->host_lock); 785 } 786 787 /* 788 * For SLI3 and SLI4, the VPI needs to be reregistered in 789 * response to this fabric parameter change event. 790 */ 791 spin_lock_irq(shost->host_lock); 792 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 793 spin_unlock_irq(shost->host_lock); 794 } else if ((phba->sli_rev == LPFC_SLI_REV4) && 795 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 796 /* 797 * Driver needs to re-reg VPI in order for f/w 798 * to update the MAC address. 799 */ 800 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 801 lpfc_register_new_vport(phba, vport, ndlp); 802 return 0; 803 } 804 805 if (phba->sli_rev < LPFC_SLI_REV4) { 806 lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE); 807 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED && 808 vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) 809 lpfc_register_new_vport(phba, vport, ndlp); 810 else 811 lpfc_issue_fabric_reglogin(vport); 812 } else { 813 ndlp->nlp_type |= NLP_FABRIC; 814 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 815 if ((!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) && 816 (vport->vpi_state & LPFC_VPI_REGISTERED)) { 817 lpfc_start_fdiscs(phba); 818 lpfc_do_scr_ns_plogi(phba, vport); 819 } else if (vport->fc_flag & FC_VFI_REGISTERED) 820 lpfc_issue_init_vpi(vport); 821 else { 822 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 823 "3135 Need register VFI: (x%x/%x)\n", 824 vport->fc_prevDID, vport->fc_myDID); 825 lpfc_issue_reg_vfi(vport); 826 } 827 } 828 return 0; 829 } 830 831 /** 832 * lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port 833 * @vport: pointer to a host virtual N_Port data structure. 834 * @ndlp: pointer to a node-list data structure. 835 * @sp: pointer to service parameter data structure. 836 * 837 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback 838 * function to handle the completion of a Fabric Login (FLOGI) into an N_Port 839 * in a point-to-point topology. First, the @vport's N_Port Name is compared 840 * with the received N_Port Name: if the @vport's N_Port Name is greater than 841 * the received N_Port Name lexicographically, this node shall assign local 842 * N_Port ID (PT2PT_LocalID: 1) and remote N_Port ID (PT2PT_RemoteID: 2) and 843 * will send out Port Login (PLOGI) with the N_Port IDs assigned. Otherwise, 844 * this node shall just wait for the remote node to issue PLOGI and assign 845 * N_Port IDs. 846 * 847 * Return code 848 * 0 - Success 849 * -ENXIO - Fail 850 **/ 851 static int 852 lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 853 struct serv_parm *sp) 854 { 855 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 856 struct lpfc_hba *phba = vport->phba; 857 LPFC_MBOXQ_t *mbox; 858 int rc; 859 860 spin_lock_irq(shost->host_lock); 861 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 862 vport->fc_flag |= FC_PT2PT; 863 spin_unlock_irq(shost->host_lock); 864 865 /* If we are pt2pt with another NPort, force NPIV off! */ 866 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED; 867 868 /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */ 869 if ((phba->sli_rev == LPFC_SLI_REV4) && phba->fc_topology_changed) { 870 lpfc_unregister_fcf_prep(phba); 871 872 spin_lock_irq(shost->host_lock); 873 vport->fc_flag &= ~FC_VFI_REGISTERED; 874 spin_unlock_irq(shost->host_lock); 875 phba->fc_topology_changed = 0; 876 } 877 878 rc = memcmp(&vport->fc_portname, &sp->portName, 879 sizeof(vport->fc_portname)); 880 881 if (rc >= 0) { 882 /* This side will initiate the PLOGI */ 883 spin_lock_irq(shost->host_lock); 884 vport->fc_flag |= FC_PT2PT_PLOGI; 885 spin_unlock_irq(shost->host_lock); 886 887 /* 888 * N_Port ID cannot be 0, set our Id to LocalID 889 * the other side will be RemoteID. 890 */ 891 892 /* not equal */ 893 if (rc) 894 vport->fc_myDID = PT2PT_LocalID; 895 896 /* Decrement ndlp reference count indicating that ndlp can be 897 * safely released when other references to it are done. 898 */ 899 lpfc_nlp_put(ndlp); 900 901 ndlp = lpfc_findnode_did(vport, PT2PT_RemoteID); 902 if (!ndlp) { 903 /* 904 * Cannot find existing Fabric ndlp, so allocate a 905 * new one 906 */ 907 ndlp = lpfc_nlp_init(vport, PT2PT_RemoteID); 908 if (!ndlp) 909 goto fail; 910 } 911 912 memcpy(&ndlp->nlp_portname, &sp->portName, 913 sizeof(struct lpfc_name)); 914 memcpy(&ndlp->nlp_nodename, &sp->nodeName, 915 sizeof(struct lpfc_name)); 916 /* Set state will put ndlp onto node list if not already done */ 917 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 918 spin_lock_irq(&ndlp->lock); 919 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 920 spin_unlock_irq(&ndlp->lock); 921 922 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 923 if (!mbox) 924 goto fail; 925 926 lpfc_config_link(phba, mbox); 927 928 mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link; 929 mbox->vport = vport; 930 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 931 if (rc == MBX_NOT_FINISHED) { 932 mempool_free(mbox, phba->mbox_mem_pool); 933 goto fail; 934 } 935 } else { 936 /* This side will wait for the PLOGI, decrement ndlp reference 937 * count indicating that ndlp can be released when other 938 * references to it are done. 939 */ 940 lpfc_nlp_put(ndlp); 941 942 /* Start discovery - this should just do CLEAR_LA */ 943 lpfc_disc_start(vport); 944 } 945 946 return 0; 947 fail: 948 return -ENXIO; 949 } 950 951 /** 952 * lpfc_cmpl_els_flogi - Completion callback function for flogi 953 * @phba: pointer to lpfc hba data structure. 954 * @cmdiocb: pointer to lpfc command iocb data structure. 955 * @rspiocb: pointer to lpfc response iocb data structure. 956 * 957 * This routine is the top-level completion callback function for issuing 958 * a Fabric Login (FLOGI) command. If the response IOCB reported error, 959 * the lpfc_els_retry() routine shall be invoked to retry the FLOGI. If 960 * retry has been made (either immediately or delayed with lpfc_els_retry() 961 * returning 1), the command IOCB will be released and function returned. 962 * If the retry attempt has been given up (possibly reach the maximum 963 * number of retries), one additional decrement of ndlp reference shall be 964 * invoked before going out after releasing the command IOCB. This will 965 * actually release the remote node (Note, lpfc_els_free_iocb() will also 966 * invoke one decrement of ndlp reference count). If no error reported in 967 * the IOCB status, the command Port ID field is used to determine whether 968 * this is a point-to-point topology or a fabric topology: if the Port ID 969 * field is assigned, it is a fabric topology; otherwise, it is a 970 * point-to-point topology. The routine lpfc_cmpl_els_flogi_fabric() or 971 * lpfc_cmpl_els_flogi_nport() shall be invoked accordingly to handle the 972 * specific topology completion conditions. 973 **/ 974 static void 975 lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 976 struct lpfc_iocbq *rspiocb) 977 { 978 struct lpfc_vport *vport = cmdiocb->vport; 979 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 980 IOCB_t *irsp = &rspiocb->iocb; 981 struct lpfc_nodelist *ndlp = cmdiocb->context1; 982 struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp; 983 struct serv_parm *sp; 984 uint16_t fcf_index; 985 int rc; 986 987 /* Check to see if link went down during discovery */ 988 if (lpfc_els_chk_latt(vport)) { 989 /* One additional decrement on node reference count to 990 * trigger the release of the node 991 */ 992 lpfc_nlp_put(ndlp); 993 goto out; 994 } 995 996 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 997 "FLOGI cmpl: status:x%x/x%x state:x%x", 998 irsp->ulpStatus, irsp->un.ulpWord[4], 999 vport->port_state); 1000 1001 if (irsp->ulpStatus) { 1002 /* 1003 * In case of FIP mode, perform roundrobin FCF failover 1004 * due to new FCF discovery 1005 */ 1006 if ((phba->hba_flag & HBA_FIP_SUPPORT) && 1007 (phba->fcf.fcf_flag & FCF_DISCOVERY)) { 1008 if (phba->link_state < LPFC_LINK_UP) 1009 goto stop_rr_fcf_flogi; 1010 if ((phba->fcoe_cvl_eventtag_attn == 1011 phba->fcoe_cvl_eventtag) && 1012 (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 1013 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == 1014 IOERR_SLI_ABORTED)) 1015 goto stop_rr_fcf_flogi; 1016 else 1017 phba->fcoe_cvl_eventtag_attn = 1018 phba->fcoe_cvl_eventtag; 1019 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS, 1020 "2611 FLOGI failed on FCF (x%x), " 1021 "status:x%x/x%x, tmo:x%x, perform " 1022 "roundrobin FCF failover\n", 1023 phba->fcf.current_rec.fcf_indx, 1024 irsp->ulpStatus, irsp->un.ulpWord[4], 1025 irsp->ulpTimeout); 1026 lpfc_sli4_set_fcf_flogi_fail(phba, 1027 phba->fcf.current_rec.fcf_indx); 1028 fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba); 1029 rc = lpfc_sli4_fcf_rr_next_proc(vport, fcf_index); 1030 if (rc) 1031 goto out; 1032 } 1033 1034 stop_rr_fcf_flogi: 1035 /* FLOGI failure */ 1036 if (!(irsp->ulpStatus == IOSTAT_LOCAL_REJECT && 1037 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == 1038 IOERR_LOOP_OPEN_FAILURE))) 1039 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1040 "2858 FLOGI failure Status:x%x/x%x TMO" 1041 ":x%x Data x%x x%x\n", 1042 irsp->ulpStatus, irsp->un.ulpWord[4], 1043 irsp->ulpTimeout, phba->hba_flag, 1044 phba->fcf.fcf_flag); 1045 1046 /* Check for retry */ 1047 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) 1048 goto out; 1049 1050 lpfc_printf_vlog(vport, KERN_WARNING, LOG_TRACE_EVENT, 1051 "0150 FLOGI failure Status:x%x/x%x " 1052 "xri x%x TMO:x%x\n", 1053 irsp->ulpStatus, irsp->un.ulpWord[4], 1054 cmdiocb->sli4_xritag, irsp->ulpTimeout); 1055 1056 /* If this is not a loop open failure, bail out */ 1057 if (!(irsp->ulpStatus == IOSTAT_LOCAL_REJECT && 1058 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == 1059 IOERR_LOOP_OPEN_FAILURE))) 1060 goto flogifail; 1061 1062 /* FLOGI failed, so there is no fabric */ 1063 spin_lock_irq(shost->host_lock); 1064 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 1065 spin_unlock_irq(shost->host_lock); 1066 1067 /* If private loop, then allow max outstanding els to be 1068 * LPFC_MAX_DISC_THREADS (32). Scanning in the case of no 1069 * alpa map would take too long otherwise. 1070 */ 1071 if (phba->alpa_map[0] == 0) 1072 vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS; 1073 if ((phba->sli_rev == LPFC_SLI_REV4) && 1074 (!(vport->fc_flag & FC_VFI_REGISTERED) || 1075 (vport->fc_prevDID != vport->fc_myDID) || 1076 phba->fc_topology_changed)) { 1077 if (vport->fc_flag & FC_VFI_REGISTERED) { 1078 if (phba->fc_topology_changed) { 1079 lpfc_unregister_fcf_prep(phba); 1080 spin_lock_irq(shost->host_lock); 1081 vport->fc_flag &= ~FC_VFI_REGISTERED; 1082 spin_unlock_irq(shost->host_lock); 1083 phba->fc_topology_changed = 0; 1084 } else { 1085 lpfc_sli4_unreg_all_rpis(vport); 1086 } 1087 } 1088 1089 /* Do not register VFI if the driver aborted FLOGI */ 1090 if (!lpfc_error_lost_link(irsp)) 1091 lpfc_issue_reg_vfi(vport); 1092 1093 lpfc_nlp_put(ndlp); 1094 goto out; 1095 } 1096 goto flogifail; 1097 } 1098 spin_lock_irq(shost->host_lock); 1099 vport->fc_flag &= ~FC_VPORT_CVL_RCVD; 1100 vport->fc_flag &= ~FC_VPORT_LOGO_RCVD; 1101 spin_unlock_irq(shost->host_lock); 1102 1103 /* 1104 * The FLogI succeeded. Sync the data for the CPU before 1105 * accessing it. 1106 */ 1107 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 1108 if (!prsp) 1109 goto out; 1110 sp = prsp->virt + sizeof(uint32_t); 1111 1112 /* FLOGI completes successfully */ 1113 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1114 "0101 FLOGI completes successfully, I/O tag:x%x, " 1115 "xri x%x Data: x%x x%x x%x x%x x%x %x\n", 1116 cmdiocb->iotag, cmdiocb->sli4_xritag, 1117 irsp->un.ulpWord[4], sp->cmn.e_d_tov, 1118 sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution, 1119 vport->port_state, vport->fc_flag); 1120 1121 if (vport->port_state == LPFC_FLOGI) { 1122 /* 1123 * If Common Service Parameters indicate Nport 1124 * we are point to point, if Fport we are Fabric. 1125 */ 1126 if (sp->cmn.fPort) 1127 rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp, irsp); 1128 else if (!(phba->hba_flag & HBA_FCOE_MODE)) 1129 rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp); 1130 else { 1131 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1132 "2831 FLOGI response with cleared Fabric " 1133 "bit fcf_index 0x%x " 1134 "Switch Name %02x%02x%02x%02x%02x%02x%02x%02x " 1135 "Fabric Name " 1136 "%02x%02x%02x%02x%02x%02x%02x%02x\n", 1137 phba->fcf.current_rec.fcf_indx, 1138 phba->fcf.current_rec.switch_name[0], 1139 phba->fcf.current_rec.switch_name[1], 1140 phba->fcf.current_rec.switch_name[2], 1141 phba->fcf.current_rec.switch_name[3], 1142 phba->fcf.current_rec.switch_name[4], 1143 phba->fcf.current_rec.switch_name[5], 1144 phba->fcf.current_rec.switch_name[6], 1145 phba->fcf.current_rec.switch_name[7], 1146 phba->fcf.current_rec.fabric_name[0], 1147 phba->fcf.current_rec.fabric_name[1], 1148 phba->fcf.current_rec.fabric_name[2], 1149 phba->fcf.current_rec.fabric_name[3], 1150 phba->fcf.current_rec.fabric_name[4], 1151 phba->fcf.current_rec.fabric_name[5], 1152 phba->fcf.current_rec.fabric_name[6], 1153 phba->fcf.current_rec.fabric_name[7]); 1154 1155 lpfc_nlp_put(ndlp); 1156 spin_lock_irq(&phba->hbalock); 1157 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 1158 phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO); 1159 spin_unlock_irq(&phba->hbalock); 1160 phba->fcf.fcf_redisc_attempted = 0; /* reset */ 1161 goto out; 1162 } 1163 if (!rc) { 1164 /* Mark the FCF discovery process done */ 1165 if (phba->hba_flag & HBA_FIP_SUPPORT) 1166 lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP | 1167 LOG_ELS, 1168 "2769 FLOGI to FCF (x%x) " 1169 "completed successfully\n", 1170 phba->fcf.current_rec.fcf_indx); 1171 spin_lock_irq(&phba->hbalock); 1172 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 1173 phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO); 1174 spin_unlock_irq(&phba->hbalock); 1175 phba->fcf.fcf_redisc_attempted = 0; /* reset */ 1176 goto out; 1177 } 1178 } 1179 1180 flogifail: 1181 spin_lock_irq(&phba->hbalock); 1182 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 1183 spin_unlock_irq(&phba->hbalock); 1184 1185 lpfc_nlp_put(ndlp); 1186 if (!lpfc_error_lost_link(irsp)) { 1187 /* FLOGI failed, so just use loop map to make discovery list */ 1188 lpfc_disc_list_loopmap(vport); 1189 1190 /* Start discovery */ 1191 lpfc_disc_start(vport); 1192 } else if (((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) || 1193 (((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) != 1194 IOERR_SLI_ABORTED) && 1195 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) != 1196 IOERR_SLI_DOWN))) && 1197 (phba->link_state != LPFC_CLEAR_LA)) { 1198 /* If FLOGI failed enable link interrupt. */ 1199 lpfc_issue_clear_la(phba, vport); 1200 } 1201 out: 1202 lpfc_els_free_iocb(phba, cmdiocb); 1203 lpfc_nlp_put(ndlp); 1204 } 1205 1206 /** 1207 * lpfc_cmpl_els_link_down - Completion callback function for ELS command 1208 * aborted during a link down 1209 * @phba: pointer to lpfc hba data structure. 1210 * @cmdiocb: pointer to lpfc command iocb data structure. 1211 * @rspiocb: pointer to lpfc response iocb data structure. 1212 * 1213 */ 1214 static void 1215 lpfc_cmpl_els_link_down(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1216 struct lpfc_iocbq *rspiocb) 1217 { 1218 IOCB_t *irsp; 1219 uint32_t *pcmd; 1220 uint32_t cmd; 1221 1222 pcmd = (uint32_t *)(((struct lpfc_dmabuf *)cmdiocb->context2)->virt); 1223 cmd = *pcmd; 1224 irsp = &rspiocb->iocb; 1225 1226 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 1227 "6445 ELS completes after LINK_DOWN: " 1228 " Status %x/%x cmd x%x flg x%x\n", 1229 irsp->ulpStatus, irsp->un.ulpWord[4], cmd, 1230 cmdiocb->iocb_flag); 1231 1232 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC) { 1233 cmdiocb->iocb_flag &= ~LPFC_IO_FABRIC; 1234 atomic_dec(&phba->fabric_iocb_count); 1235 } 1236 lpfc_els_free_iocb(phba, cmdiocb); 1237 } 1238 1239 /** 1240 * lpfc_issue_els_flogi - Issue an flogi iocb command for a vport 1241 * @vport: pointer to a host virtual N_Port data structure. 1242 * @ndlp: pointer to a node-list data structure. 1243 * @retry: number of retries to the command IOCB. 1244 * 1245 * This routine issues a Fabric Login (FLOGI) Request ELS command 1246 * for a @vport. The initiator service parameters are put into the payload 1247 * of the FLOGI Request IOCB and the top-level callback function pointer 1248 * to lpfc_cmpl_els_flogi() routine is put to the IOCB completion callback 1249 * function field. The lpfc_issue_fabric_iocb routine is invoked to send 1250 * out FLOGI ELS command with one outstanding fabric IOCB at a time. 1251 * 1252 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 1253 * will be incremented by 1 for holding the ndlp and the reference to ndlp 1254 * will be stored into the context1 field of the IOCB for the completion 1255 * callback function to the FLOGI ELS command. 1256 * 1257 * Return code 1258 * 0 - successfully issued flogi iocb for @vport 1259 * 1 - failed to issue flogi iocb for @vport 1260 **/ 1261 static int 1262 lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1263 uint8_t retry) 1264 { 1265 struct lpfc_hba *phba = vport->phba; 1266 struct serv_parm *sp; 1267 IOCB_t *icmd; 1268 struct lpfc_iocbq *elsiocb; 1269 struct lpfc_iocbq defer_flogi_acc; 1270 uint8_t *pcmd; 1271 uint16_t cmdsize; 1272 uint32_t tmo, did; 1273 int rc; 1274 1275 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 1276 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 1277 ndlp->nlp_DID, ELS_CMD_FLOGI); 1278 1279 if (!elsiocb) 1280 return 1; 1281 1282 icmd = &elsiocb->iocb; 1283 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 1284 1285 /* For FLOGI request, remainder of payload is service parameters */ 1286 *((uint32_t *) (pcmd)) = ELS_CMD_FLOGI; 1287 pcmd += sizeof(uint32_t); 1288 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm)); 1289 sp = (struct serv_parm *) pcmd; 1290 1291 /* Setup CSPs accordingly for Fabric */ 1292 sp->cmn.e_d_tov = 0; 1293 sp->cmn.w2.r_a_tov = 0; 1294 sp->cmn.virtual_fabric_support = 0; 1295 sp->cls1.classValid = 0; 1296 if (sp->cmn.fcphLow < FC_PH3) 1297 sp->cmn.fcphLow = FC_PH3; 1298 if (sp->cmn.fcphHigh < FC_PH3) 1299 sp->cmn.fcphHigh = FC_PH3; 1300 1301 if (phba->sli_rev == LPFC_SLI_REV4) { 1302 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 1303 LPFC_SLI_INTF_IF_TYPE_0) { 1304 elsiocb->iocb.ulpCt_h = ((SLI4_CT_FCFI >> 1) & 1); 1305 elsiocb->iocb.ulpCt_l = (SLI4_CT_FCFI & 1); 1306 /* FLOGI needs to be 3 for WQE FCFI */ 1307 /* Set the fcfi to the fcfi we registered with */ 1308 elsiocb->iocb.ulpContext = phba->fcf.fcfi; 1309 } 1310 /* Can't do SLI4 class2 without support sequence coalescing */ 1311 sp->cls2.classValid = 0; 1312 sp->cls2.seqDelivery = 0; 1313 } else { 1314 /* Historical, setting sequential-delivery bit for SLI3 */ 1315 sp->cls2.seqDelivery = (sp->cls2.classValid) ? 1 : 0; 1316 sp->cls3.seqDelivery = (sp->cls3.classValid) ? 1 : 0; 1317 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { 1318 sp->cmn.request_multiple_Nport = 1; 1319 /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */ 1320 icmd->ulpCt_h = 1; 1321 icmd->ulpCt_l = 0; 1322 } else 1323 sp->cmn.request_multiple_Nport = 0; 1324 } 1325 1326 if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) { 1327 icmd->un.elsreq64.myID = 0; 1328 icmd->un.elsreq64.fl = 1; 1329 } 1330 1331 tmo = phba->fc_ratov; 1332 phba->fc_ratov = LPFC_DISC_FLOGI_TMO; 1333 lpfc_set_disctmo(vport); 1334 phba->fc_ratov = tmo; 1335 1336 phba->fc_stat.elsXmitFLOGI++; 1337 elsiocb->iocb_cmpl = lpfc_cmpl_els_flogi; 1338 1339 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1340 "Issue FLOGI: opt:x%x", 1341 phba->sli3_options, 0, 0); 1342 1343 elsiocb->context1 = lpfc_nlp_get(ndlp); 1344 if (!elsiocb->context1) 1345 goto out; 1346 1347 rc = lpfc_issue_fabric_iocb(phba, elsiocb); 1348 if (rc == IOCB_ERROR) 1349 lpfc_nlp_put(ndlp); 1350 1351 phba->hba_flag |= HBA_FLOGI_ISSUED; 1352 1353 /* Check for a deferred FLOGI ACC condition */ 1354 if (phba->defer_flogi_acc_flag) { 1355 did = vport->fc_myDID; 1356 vport->fc_myDID = Fabric_DID; 1357 1358 memset(&defer_flogi_acc, 0, sizeof(struct lpfc_iocbq)); 1359 1360 defer_flogi_acc.iocb.ulpContext = phba->defer_flogi_acc_rx_id; 1361 defer_flogi_acc.iocb.unsli3.rcvsli3.ox_id = 1362 phba->defer_flogi_acc_ox_id; 1363 1364 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1365 "3354 Xmit deferred FLOGI ACC: rx_id: x%x," 1366 " ox_id: x%x, hba_flag x%x\n", 1367 phba->defer_flogi_acc_rx_id, 1368 phba->defer_flogi_acc_ox_id, phba->hba_flag); 1369 1370 /* Send deferred FLOGI ACC */ 1371 lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, &defer_flogi_acc, 1372 ndlp, NULL); 1373 1374 phba->defer_flogi_acc_flag = false; 1375 1376 vport->fc_myDID = did; 1377 } 1378 1379 if (!rc) 1380 return 0; 1381 out: 1382 lpfc_els_free_iocb(phba, elsiocb); 1383 return 1; 1384 } 1385 1386 /** 1387 * lpfc_els_abort_flogi - Abort all outstanding flogi iocbs 1388 * @phba: pointer to lpfc hba data structure. 1389 * 1390 * This routine aborts all the outstanding Fabric Login (FLOGI) IOCBs 1391 * with a @phba. This routine walks all the outstanding IOCBs on the txcmplq 1392 * list and issues an abort IOCB commond on each outstanding IOCB that 1393 * contains a active Fabric_DID ndlp. Note that this function is to issue 1394 * the abort IOCB command on all the outstanding IOCBs, thus when this 1395 * function returns, it does not guarantee all the IOCBs are actually aborted. 1396 * 1397 * Return code 1398 * 0 - Successfully issued abort iocb on all outstanding flogis (Always 0) 1399 **/ 1400 int 1401 lpfc_els_abort_flogi(struct lpfc_hba *phba) 1402 { 1403 struct lpfc_sli_ring *pring; 1404 struct lpfc_iocbq *iocb, *next_iocb; 1405 struct lpfc_nodelist *ndlp; 1406 IOCB_t *icmd; 1407 1408 /* Abort outstanding I/O on NPort <nlp_DID> */ 1409 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 1410 "0201 Abort outstanding I/O on NPort x%x\n", 1411 Fabric_DID); 1412 1413 pring = lpfc_phba_elsring(phba); 1414 if (unlikely(!pring)) 1415 return -EIO; 1416 1417 /* 1418 * Check the txcmplq for an iocb that matches the nport the driver is 1419 * searching for. 1420 */ 1421 spin_lock_irq(&phba->hbalock); 1422 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) { 1423 icmd = &iocb->iocb; 1424 if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR) { 1425 ndlp = (struct lpfc_nodelist *)(iocb->context1); 1426 if (ndlp && (ndlp->nlp_DID == Fabric_DID)) 1427 lpfc_sli_issue_abort_iotag(phba, pring, iocb, 1428 NULL); 1429 } 1430 } 1431 spin_unlock_irq(&phba->hbalock); 1432 1433 return 0; 1434 } 1435 1436 /** 1437 * lpfc_initial_flogi - Issue an initial fabric login for a vport 1438 * @vport: pointer to a host virtual N_Port data structure. 1439 * 1440 * This routine issues an initial Fabric Login (FLOGI) for the @vport 1441 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from 1442 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and 1443 * put it into the @vport's ndlp list. If an inactive ndlp found on the list, 1444 * it will just be enabled and made active. The lpfc_issue_els_flogi() routine 1445 * is then invoked with the @vport and the ndlp to perform the FLOGI for the 1446 * @vport. 1447 * 1448 * Return code 1449 * 0 - failed to issue initial flogi for @vport 1450 * 1 - successfully issued initial flogi for @vport 1451 **/ 1452 int 1453 lpfc_initial_flogi(struct lpfc_vport *vport) 1454 { 1455 struct lpfc_nodelist *ndlp; 1456 1457 vport->port_state = LPFC_FLOGI; 1458 lpfc_set_disctmo(vport); 1459 1460 /* First look for the Fabric ndlp */ 1461 ndlp = lpfc_findnode_did(vport, Fabric_DID); 1462 if (!ndlp) { 1463 /* Cannot find existing Fabric ndlp, so allocate a new one */ 1464 ndlp = lpfc_nlp_init(vport, Fabric_DID); 1465 if (!ndlp) 1466 return 0; 1467 /* Set the node type */ 1468 ndlp->nlp_type |= NLP_FABRIC; 1469 1470 /* Put ndlp onto node list */ 1471 lpfc_enqueue_node(vport, ndlp); 1472 } 1473 1474 if (lpfc_issue_els_flogi(vport, ndlp, 0)) { 1475 /* This decrement of reference count to node shall kick off 1476 * the release of the node. 1477 */ 1478 lpfc_nlp_put(ndlp); 1479 return 0; 1480 } 1481 return 1; 1482 } 1483 1484 /** 1485 * lpfc_initial_fdisc - Issue an initial fabric discovery for a vport 1486 * @vport: pointer to a host virtual N_Port data structure. 1487 * 1488 * This routine issues an initial Fabric Discover (FDISC) for the @vport 1489 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from 1490 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and 1491 * put it into the @vport's ndlp list. If an inactive ndlp found on the list, 1492 * it will just be enabled and made active. The lpfc_issue_els_fdisc() routine 1493 * is then invoked with the @vport and the ndlp to perform the FDISC for the 1494 * @vport. 1495 * 1496 * Return code 1497 * 0 - failed to issue initial fdisc for @vport 1498 * 1 - successfully issued initial fdisc for @vport 1499 **/ 1500 int 1501 lpfc_initial_fdisc(struct lpfc_vport *vport) 1502 { 1503 struct lpfc_nodelist *ndlp; 1504 1505 /* First look for the Fabric ndlp */ 1506 ndlp = lpfc_findnode_did(vport, Fabric_DID); 1507 if (!ndlp) { 1508 /* Cannot find existing Fabric ndlp, so allocate a new one */ 1509 ndlp = lpfc_nlp_init(vport, Fabric_DID); 1510 if (!ndlp) 1511 return 0; 1512 1513 /* NPIV is only supported in Fabrics. */ 1514 ndlp->nlp_type |= NLP_FABRIC; 1515 1516 /* Put ndlp onto node list */ 1517 lpfc_enqueue_node(vport, ndlp); 1518 } 1519 1520 if (lpfc_issue_els_fdisc(vport, ndlp, 0)) { 1521 /* decrement node reference count to trigger the release of 1522 * the node. 1523 */ 1524 lpfc_nlp_put(ndlp); 1525 return 0; 1526 } 1527 return 1; 1528 } 1529 1530 /** 1531 * lpfc_more_plogi - Check and issue remaining plogis for a vport 1532 * @vport: pointer to a host virtual N_Port data structure. 1533 * 1534 * This routine checks whether there are more remaining Port Logins 1535 * (PLOGI) to be issued for the @vport. If so, it will invoke the routine 1536 * lpfc_els_disc_plogi() to go through the Node Port Recovery (NPR) nodes 1537 * to issue ELS PLOGIs up to the configured discover threads with the 1538 * @vport (@vport->cfg_discovery_threads). The function also decrement 1539 * the @vport's num_disc_node by 1 if it is not already 0. 1540 **/ 1541 void 1542 lpfc_more_plogi(struct lpfc_vport *vport) 1543 { 1544 if (vport->num_disc_nodes) 1545 vport->num_disc_nodes--; 1546 1547 /* Continue discovery with <num_disc_nodes> PLOGIs to go */ 1548 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 1549 "0232 Continue discovery with %d PLOGIs to go " 1550 "Data: x%x x%x x%x\n", 1551 vport->num_disc_nodes, vport->fc_plogi_cnt, 1552 vport->fc_flag, vport->port_state); 1553 /* Check to see if there are more PLOGIs to be sent */ 1554 if (vport->fc_flag & FC_NLP_MORE) 1555 /* go thru NPR nodes and issue any remaining ELS PLOGIs */ 1556 lpfc_els_disc_plogi(vport); 1557 1558 return; 1559 } 1560 1561 /** 1562 * lpfc_plogi_confirm_nport - Confirm plogi wwpn matches stored ndlp 1563 * @phba: pointer to lpfc hba data structure. 1564 * @prsp: pointer to response IOCB payload. 1565 * @ndlp: pointer to a node-list data structure. 1566 * 1567 * This routine checks and indicates whether the WWPN of an N_Port, retrieved 1568 * from a PLOGI, matches the WWPN that is stored in the @ndlp for that N_POrt. 1569 * The following cases are considered N_Port confirmed: 1570 * 1) The N_Port is a Fabric ndlp; 2) The @ndlp is on vport list and matches 1571 * the WWPN of the N_Port logged into; 3) The @ndlp is not on vport list but 1572 * it does not have WWPN assigned either. If the WWPN is confirmed, the 1573 * pointer to the @ndlp will be returned. If the WWPN is not confirmed: 1574 * 1) if there is a node on vport list other than the @ndlp with the same 1575 * WWPN of the N_Port PLOGI logged into, the lpfc_unreg_rpi() will be invoked 1576 * on that node to release the RPI associated with the node; 2) if there is 1577 * no node found on vport list with the same WWPN of the N_Port PLOGI logged 1578 * into, a new node shall be allocated (or activated). In either case, the 1579 * parameters of the @ndlp shall be copied to the new_ndlp, the @ndlp shall 1580 * be released and the new_ndlp shall be put on to the vport node list and 1581 * its pointer returned as the confirmed node. 1582 * 1583 * Note that before the @ndlp got "released", the keepDID from not-matching 1584 * or inactive "new_ndlp" on the vport node list is assigned to the nlp_DID 1585 * of the @ndlp. This is because the release of @ndlp is actually to put it 1586 * into an inactive state on the vport node list and the vport node list 1587 * management algorithm does not allow two node with a same DID. 1588 * 1589 * Return code 1590 * pointer to the PLOGI N_Port @ndlp 1591 **/ 1592 static struct lpfc_nodelist * 1593 lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, 1594 struct lpfc_nodelist *ndlp) 1595 { 1596 struct lpfc_vport *vport = ndlp->vport; 1597 struct lpfc_nodelist *new_ndlp; 1598 struct serv_parm *sp; 1599 uint8_t name[sizeof(struct lpfc_name)]; 1600 uint32_t rc, keepDID = 0, keep_nlp_flag = 0; 1601 uint32_t keep_new_nlp_flag = 0; 1602 uint16_t keep_nlp_state; 1603 u32 keep_nlp_fc4_type = 0; 1604 struct lpfc_nvme_rport *keep_nrport = NULL; 1605 unsigned long *active_rrqs_xri_bitmap = NULL; 1606 1607 /* Fabric nodes can have the same WWPN so we don't bother searching 1608 * by WWPN. Just return the ndlp that was given to us. 1609 */ 1610 if (ndlp->nlp_type & NLP_FABRIC) 1611 return ndlp; 1612 1613 sp = (struct serv_parm *) ((uint8_t *) prsp + sizeof(uint32_t)); 1614 memset(name, 0, sizeof(struct lpfc_name)); 1615 1616 /* Now we find out if the NPort we are logging into, matches the WWPN 1617 * we have for that ndlp. If not, we have some work to do. 1618 */ 1619 new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName); 1620 1621 /* return immediately if the WWPN matches ndlp */ 1622 if (new_ndlp == ndlp) 1623 return ndlp; 1624 1625 if (phba->sli_rev == LPFC_SLI_REV4) { 1626 active_rrqs_xri_bitmap = mempool_alloc(phba->active_rrq_pool, 1627 GFP_KERNEL); 1628 if (active_rrqs_xri_bitmap) 1629 memset(active_rrqs_xri_bitmap, 0, 1630 phba->cfg_rrq_xri_bitmap_sz); 1631 } 1632 1633 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE, 1634 "3178 PLOGI confirm: ndlp x%x x%x x%x: " 1635 "new_ndlp x%x x%x x%x\n", 1636 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_fc4_type, 1637 (new_ndlp ? new_ndlp->nlp_DID : 0), 1638 (new_ndlp ? new_ndlp->nlp_flag : 0), 1639 (new_ndlp ? new_ndlp->nlp_fc4_type : 0)); 1640 1641 if (!new_ndlp) { 1642 rc = memcmp(&ndlp->nlp_portname, name, 1643 sizeof(struct lpfc_name)); 1644 if (!rc) { 1645 if (active_rrqs_xri_bitmap) 1646 mempool_free(active_rrqs_xri_bitmap, 1647 phba->active_rrq_pool); 1648 return ndlp; 1649 } 1650 new_ndlp = lpfc_nlp_init(vport, ndlp->nlp_DID); 1651 if (!new_ndlp) { 1652 if (active_rrqs_xri_bitmap) 1653 mempool_free(active_rrqs_xri_bitmap, 1654 phba->active_rrq_pool); 1655 return ndlp; 1656 } 1657 } else { 1658 keepDID = new_ndlp->nlp_DID; 1659 if (phba->sli_rev == LPFC_SLI_REV4 && 1660 active_rrqs_xri_bitmap) 1661 memcpy(active_rrqs_xri_bitmap, 1662 new_ndlp->active_rrqs_xri_bitmap, 1663 phba->cfg_rrq_xri_bitmap_sz); 1664 } 1665 1666 /* At this point in this routine, we know new_ndlp will be 1667 * returned. however, any previous GID_FTs that were done 1668 * would have updated nlp_fc4_type in ndlp, so we must ensure 1669 * new_ndlp has the right value. 1670 */ 1671 if (vport->fc_flag & FC_FABRIC) { 1672 keep_nlp_fc4_type = new_ndlp->nlp_fc4_type; 1673 new_ndlp->nlp_fc4_type = ndlp->nlp_fc4_type; 1674 } 1675 1676 lpfc_unreg_rpi(vport, new_ndlp); 1677 new_ndlp->nlp_DID = ndlp->nlp_DID; 1678 new_ndlp->nlp_prev_state = ndlp->nlp_prev_state; 1679 if (phba->sli_rev == LPFC_SLI_REV4) 1680 memcpy(new_ndlp->active_rrqs_xri_bitmap, 1681 ndlp->active_rrqs_xri_bitmap, 1682 phba->cfg_rrq_xri_bitmap_sz); 1683 1684 /* Lock both ndlps */ 1685 spin_lock_irq(&ndlp->lock); 1686 spin_lock_irq(&new_ndlp->lock); 1687 keep_new_nlp_flag = new_ndlp->nlp_flag; 1688 keep_nlp_flag = ndlp->nlp_flag; 1689 new_ndlp->nlp_flag = ndlp->nlp_flag; 1690 1691 /* if new_ndlp had NLP_UNREG_INP set, keep it */ 1692 if (keep_new_nlp_flag & NLP_UNREG_INP) 1693 new_ndlp->nlp_flag |= NLP_UNREG_INP; 1694 else 1695 new_ndlp->nlp_flag &= ~NLP_UNREG_INP; 1696 1697 /* if new_ndlp had NLP_RPI_REGISTERED set, keep it */ 1698 if (keep_new_nlp_flag & NLP_RPI_REGISTERED) 1699 new_ndlp->nlp_flag |= NLP_RPI_REGISTERED; 1700 else 1701 new_ndlp->nlp_flag &= ~NLP_RPI_REGISTERED; 1702 1703 ndlp->nlp_flag = keep_new_nlp_flag; 1704 1705 /* if ndlp had NLP_UNREG_INP set, keep it */ 1706 if (keep_nlp_flag & NLP_UNREG_INP) 1707 ndlp->nlp_flag |= NLP_UNREG_INP; 1708 else 1709 ndlp->nlp_flag &= ~NLP_UNREG_INP; 1710 1711 /* if ndlp had NLP_RPI_REGISTERED set, keep it */ 1712 if (keep_nlp_flag & NLP_RPI_REGISTERED) 1713 ndlp->nlp_flag |= NLP_RPI_REGISTERED; 1714 else 1715 ndlp->nlp_flag &= ~NLP_RPI_REGISTERED; 1716 1717 spin_unlock_irq(&new_ndlp->lock); 1718 spin_unlock_irq(&ndlp->lock); 1719 1720 /* Set nlp_states accordingly */ 1721 keep_nlp_state = new_ndlp->nlp_state; 1722 lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state); 1723 1724 /* interchange the nvme remoteport structs */ 1725 keep_nrport = new_ndlp->nrport; 1726 new_ndlp->nrport = ndlp->nrport; 1727 1728 /* Move this back to NPR state */ 1729 if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) { 1730 /* The new_ndlp is replacing ndlp totally, so we need 1731 * to put ndlp on UNUSED list and try to free it. 1732 */ 1733 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1734 "3179 PLOGI confirm NEW: %x %x\n", 1735 new_ndlp->nlp_DID, keepDID); 1736 1737 /* Two ndlps cannot have the same did on the nodelist. 1738 * Note: for this case, ndlp has a NULL WWPN so setting 1739 * the nlp_fc4_type isn't required. 1740 */ 1741 ndlp->nlp_DID = keepDID; 1742 lpfc_nlp_set_state(vport, ndlp, keep_nlp_state); 1743 if (phba->sli_rev == LPFC_SLI_REV4 && 1744 active_rrqs_xri_bitmap) 1745 memcpy(ndlp->active_rrqs_xri_bitmap, 1746 active_rrqs_xri_bitmap, 1747 phba->cfg_rrq_xri_bitmap_sz); 1748 1749 } else { 1750 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1751 "3180 PLOGI confirm SWAP: %x %x\n", 1752 new_ndlp->nlp_DID, keepDID); 1753 1754 lpfc_unreg_rpi(vport, ndlp); 1755 1756 /* Two ndlps cannot have the same did and the fc4 1757 * type must be transferred because the ndlp is in 1758 * flight. 1759 */ 1760 ndlp->nlp_DID = keepDID; 1761 ndlp->nlp_fc4_type = keep_nlp_fc4_type; 1762 1763 if (phba->sli_rev == LPFC_SLI_REV4 && 1764 active_rrqs_xri_bitmap) 1765 memcpy(ndlp->active_rrqs_xri_bitmap, 1766 active_rrqs_xri_bitmap, 1767 phba->cfg_rrq_xri_bitmap_sz); 1768 1769 /* Since we are switching over to the new_ndlp, 1770 * reset the old ndlp state 1771 */ 1772 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) || 1773 (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) 1774 keep_nlp_state = NLP_STE_NPR_NODE; 1775 lpfc_nlp_set_state(vport, ndlp, keep_nlp_state); 1776 ndlp->nrport = keep_nrport; 1777 } 1778 1779 /* 1780 * If ndlp is not associated with any rport we can drop it here else 1781 * let dev_loss_tmo_callbk trigger DEVICE_RM event 1782 */ 1783 if (!ndlp->rport && (ndlp->nlp_state == NLP_STE_NPR_NODE)) 1784 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); 1785 1786 if (phba->sli_rev == LPFC_SLI_REV4 && 1787 active_rrqs_xri_bitmap) 1788 mempool_free(active_rrqs_xri_bitmap, 1789 phba->active_rrq_pool); 1790 1791 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE, 1792 "3173 PLOGI confirm exit: new_ndlp x%x x%x x%x\n", 1793 new_ndlp->nlp_DID, new_ndlp->nlp_flag, 1794 new_ndlp->nlp_fc4_type); 1795 1796 return new_ndlp; 1797 } 1798 1799 /** 1800 * lpfc_end_rscn - Check and handle more rscn for a vport 1801 * @vport: pointer to a host virtual N_Port data structure. 1802 * 1803 * This routine checks whether more Registration State Change 1804 * Notifications (RSCNs) came in while the discovery state machine was in 1805 * the FC_RSCN_MODE. If so, the lpfc_els_handle_rscn() routine will be 1806 * invoked to handle the additional RSCNs for the @vport. Otherwise, the 1807 * FC_RSCN_MODE bit will be cleared with the @vport to mark as the end of 1808 * handling the RSCNs. 1809 **/ 1810 void 1811 lpfc_end_rscn(struct lpfc_vport *vport) 1812 { 1813 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1814 1815 if (vport->fc_flag & FC_RSCN_MODE) { 1816 /* 1817 * Check to see if more RSCNs came in while we were 1818 * processing this one. 1819 */ 1820 if (vport->fc_rscn_id_cnt || 1821 (vport->fc_flag & FC_RSCN_DISCOVERY) != 0) 1822 lpfc_els_handle_rscn(vport); 1823 else { 1824 spin_lock_irq(shost->host_lock); 1825 vport->fc_flag &= ~FC_RSCN_MODE; 1826 spin_unlock_irq(shost->host_lock); 1827 } 1828 } 1829 } 1830 1831 /** 1832 * lpfc_cmpl_els_rrq - Completion handled for els RRQs. 1833 * @phba: pointer to lpfc hba data structure. 1834 * @cmdiocb: pointer to lpfc command iocb data structure. 1835 * @rspiocb: pointer to lpfc response iocb data structure. 1836 * 1837 * This routine will call the clear rrq function to free the rrq and 1838 * clear the xri's bit in the ndlp's xri_bitmap. If the ndlp does not 1839 * exist then the clear_rrq is still called because the rrq needs to 1840 * be freed. 1841 **/ 1842 1843 static void 1844 lpfc_cmpl_els_rrq(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1845 struct lpfc_iocbq *rspiocb) 1846 { 1847 struct lpfc_vport *vport = cmdiocb->vport; 1848 IOCB_t *irsp; 1849 struct lpfc_nodelist *ndlp; 1850 struct lpfc_node_rrq *rrq; 1851 1852 /* we pass cmdiocb to state machine which needs rspiocb as well */ 1853 rrq = cmdiocb->context_un.rrq; 1854 cmdiocb->context_un.rsp_iocb = rspiocb; 1855 1856 irsp = &rspiocb->iocb; 1857 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1858 "RRQ cmpl: status:x%x/x%x did:x%x", 1859 irsp->ulpStatus, irsp->un.ulpWord[4], 1860 irsp->un.elsreq64.remoteID); 1861 1862 ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID); 1863 if (!ndlp || ndlp != rrq->ndlp) { 1864 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1865 "2882 RRQ completes to NPort x%x " 1866 "with no ndlp. Data: x%x x%x x%x\n", 1867 irsp->un.elsreq64.remoteID, 1868 irsp->ulpStatus, irsp->un.ulpWord[4], 1869 irsp->ulpIoTag); 1870 goto out; 1871 } 1872 1873 /* rrq completes to NPort <nlp_DID> */ 1874 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1875 "2880 RRQ completes to NPort x%x " 1876 "Data: x%x x%x x%x x%x x%x\n", 1877 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4], 1878 irsp->ulpTimeout, rrq->xritag, rrq->rxid); 1879 1880 if (irsp->ulpStatus) { 1881 /* Check for retry */ 1882 /* RRQ failed Don't print the vport to vport rjts */ 1883 if (irsp->ulpStatus != IOSTAT_LS_RJT || 1884 (((irsp->un.ulpWord[4]) >> 16 != LSRJT_INVALID_CMD) && 1885 ((irsp->un.ulpWord[4]) >> 16 != LSRJT_UNABLE_TPC)) || 1886 (phba)->pport->cfg_log_verbose & LOG_ELS) 1887 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1888 "2881 RRQ failure DID:%06X Status:" 1889 "x%x/x%x\n", 1890 ndlp->nlp_DID, irsp->ulpStatus, 1891 irsp->un.ulpWord[4]); 1892 } 1893 out: 1894 if (rrq) 1895 lpfc_clr_rrq_active(phba, rrq->xritag, rrq); 1896 1897 lpfc_els_free_iocb(phba, cmdiocb); 1898 lpfc_nlp_put(ndlp); 1899 return; 1900 } 1901 /** 1902 * lpfc_cmpl_els_plogi - Completion callback function for plogi 1903 * @phba: pointer to lpfc hba data structure. 1904 * @cmdiocb: pointer to lpfc command iocb data structure. 1905 * @rspiocb: pointer to lpfc response iocb data structure. 1906 * 1907 * This routine is the completion callback function for issuing the Port 1908 * Login (PLOGI) command. For PLOGI completion, there must be an active 1909 * ndlp on the vport node list that matches the remote node ID from the 1910 * PLOGI response IOCB. If such ndlp does not exist, the PLOGI is simply 1911 * ignored and command IOCB released. The PLOGI response IOCB status is 1912 * checked for error conditons. If there is error status reported, PLOGI 1913 * retry shall be attempted by invoking the lpfc_els_retry() routine. 1914 * Otherwise, the lpfc_plogi_confirm_nport() routine shall be invoked on 1915 * the ndlp and the NLP_EVT_CMPL_PLOGI state to the Discover State Machine 1916 * (DSM) is set for this PLOGI completion. Finally, it checks whether 1917 * there are additional N_Port nodes with the vport that need to perform 1918 * PLOGI. If so, the lpfc_more_plogi() routine is invoked to issue addition 1919 * PLOGIs. 1920 **/ 1921 static void 1922 lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1923 struct lpfc_iocbq *rspiocb) 1924 { 1925 struct lpfc_vport *vport = cmdiocb->vport; 1926 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1927 IOCB_t *irsp; 1928 struct lpfc_nodelist *ndlp, *free_ndlp; 1929 struct lpfc_dmabuf *prsp; 1930 int disc; 1931 1932 /* we pass cmdiocb to state machine which needs rspiocb as well */ 1933 cmdiocb->context_un.rsp_iocb = rspiocb; 1934 1935 irsp = &rspiocb->iocb; 1936 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1937 "PLOGI cmpl: status:x%x/x%x did:x%x", 1938 irsp->ulpStatus, irsp->un.ulpWord[4], 1939 irsp->un.elsreq64.remoteID); 1940 1941 ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID); 1942 if (!ndlp) { 1943 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1944 "0136 PLOGI completes to NPort x%x " 1945 "with no ndlp. Data: x%x x%x x%x\n", 1946 irsp->un.elsreq64.remoteID, 1947 irsp->ulpStatus, irsp->un.ulpWord[4], 1948 irsp->ulpIoTag); 1949 goto out_freeiocb; 1950 } 1951 1952 /* Since ndlp can be freed in the disc state machine, note if this node 1953 * is being used during discovery. 1954 */ 1955 spin_lock_irq(&ndlp->lock); 1956 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC); 1957 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 1958 spin_unlock_irq(&ndlp->lock); 1959 1960 /* PLOGI completes to NPort <nlp_DID> */ 1961 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1962 "0102 PLOGI completes to NPort x%06x " 1963 "Data: x%x x%x x%x x%x x%x\n", 1964 ndlp->nlp_DID, ndlp->nlp_fc4_type, 1965 irsp->ulpStatus, irsp->un.ulpWord[4], 1966 disc, vport->num_disc_nodes); 1967 1968 /* Check to see if link went down during discovery */ 1969 if (lpfc_els_chk_latt(vport)) { 1970 spin_lock_irq(&ndlp->lock); 1971 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 1972 spin_unlock_irq(&ndlp->lock); 1973 goto out; 1974 } 1975 1976 if (irsp->ulpStatus) { 1977 /* Check for retry */ 1978 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 1979 /* ELS command is being retried */ 1980 if (disc) { 1981 spin_lock_irq(&ndlp->lock); 1982 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 1983 spin_unlock_irq(&ndlp->lock); 1984 } 1985 goto out; 1986 } 1987 /* PLOGI failed Don't print the vport to vport rjts */ 1988 if (irsp->ulpStatus != IOSTAT_LS_RJT || 1989 (((irsp->un.ulpWord[4]) >> 16 != LSRJT_INVALID_CMD) && 1990 ((irsp->un.ulpWord[4]) >> 16 != LSRJT_UNABLE_TPC)) || 1991 (phba)->pport->cfg_log_verbose & LOG_ELS) 1992 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1993 "2753 PLOGI failure DID:%06X Status:x%x/x%x\n", 1994 ndlp->nlp_DID, irsp->ulpStatus, 1995 irsp->un.ulpWord[4]); 1996 1997 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 1998 if (lpfc_error_lost_link(irsp)) 1999 goto check_plogi; 2000 else 2001 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2002 NLP_EVT_CMPL_PLOGI); 2003 2004 /* As long as this node is not registered with the scsi or nvme 2005 * transport, it is no longer an active node. Otherwise 2006 * devloss handles the final cleanup. 2007 */ 2008 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { 2009 spin_lock_irq(&ndlp->lock); 2010 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 2011 spin_unlock_irq(&ndlp->lock); 2012 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2013 NLP_EVT_DEVICE_RM); 2014 } 2015 } else { 2016 /* Good status, call state machine */ 2017 prsp = list_entry(((struct lpfc_dmabuf *) 2018 cmdiocb->context2)->list.next, 2019 struct lpfc_dmabuf, list); 2020 ndlp = lpfc_plogi_confirm_nport(phba, prsp->virt, ndlp); 2021 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2022 NLP_EVT_CMPL_PLOGI); 2023 } 2024 2025 check_plogi: 2026 if (disc && vport->num_disc_nodes) { 2027 /* Check to see if there are more PLOGIs to be sent */ 2028 lpfc_more_plogi(vport); 2029 2030 if (vport->num_disc_nodes == 0) { 2031 spin_lock_irq(shost->host_lock); 2032 vport->fc_flag &= ~FC_NDISC_ACTIVE; 2033 spin_unlock_irq(shost->host_lock); 2034 2035 lpfc_can_disctmo(vport); 2036 lpfc_end_rscn(vport); 2037 } 2038 } 2039 2040 out: 2041 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, 2042 "PLOGI Cmpl PUT: did:x%x refcnt %d", 2043 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 2044 2045 out_freeiocb: 2046 /* Release the reference on the original I/O request. */ 2047 free_ndlp = (struct lpfc_nodelist *)cmdiocb->context1; 2048 2049 lpfc_els_free_iocb(phba, cmdiocb); 2050 lpfc_nlp_put(free_ndlp); 2051 return; 2052 } 2053 2054 /** 2055 * lpfc_issue_els_plogi - Issue an plogi iocb command for a vport 2056 * @vport: pointer to a host virtual N_Port data structure. 2057 * @did: destination port identifier. 2058 * @retry: number of retries to the command IOCB. 2059 * 2060 * This routine issues a Port Login (PLOGI) command to a remote N_Port 2061 * (with the @did) for a @vport. Before issuing a PLOGI to a remote N_Port, 2062 * the ndlp with the remote N_Port DID must exist on the @vport's ndlp list. 2063 * This routine constructs the proper feilds of the PLOGI IOCB and invokes 2064 * the lpfc_sli_issue_iocb() routine to send out PLOGI ELS command. 2065 * 2066 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 2067 * will be incremented by 1 for holding the ndlp and the reference to ndlp 2068 * will be stored into the context1 field of the IOCB for the completion 2069 * callback function to the PLOGI ELS command. 2070 * 2071 * Return code 2072 * 0 - Successfully issued a plogi for @vport 2073 * 1 - failed to issue a plogi for @vport 2074 **/ 2075 int 2076 lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry) 2077 { 2078 struct lpfc_hba *phba = vport->phba; 2079 struct serv_parm *sp; 2080 struct lpfc_nodelist *ndlp; 2081 struct lpfc_iocbq *elsiocb; 2082 uint8_t *pcmd; 2083 uint16_t cmdsize; 2084 int ret; 2085 2086 ndlp = lpfc_findnode_did(vport, did); 2087 2088 if (ndlp) { 2089 /* Defer the processing of the issue PLOGI until after the 2090 * outstanding UNREG_RPI mbox command completes, unless we 2091 * are going offline. This logic does not apply for Fabric DIDs 2092 */ 2093 if ((ndlp->nlp_flag & NLP_UNREG_INP) && 2094 ((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) && 2095 !(vport->fc_flag & FC_OFFLINE_MODE)) { 2096 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2097 "4110 Issue PLOGI x%x deferred " 2098 "on NPort x%x rpi x%x Data: x%px\n", 2099 ndlp->nlp_defer_did, ndlp->nlp_DID, 2100 ndlp->nlp_rpi, ndlp); 2101 2102 /* We can only defer 1st PLOGI */ 2103 if (ndlp->nlp_defer_did == NLP_EVT_NOTHING_PENDING) 2104 ndlp->nlp_defer_did = did; 2105 return 0; 2106 } 2107 } 2108 2109 /* If ndlp is not NULL, we will bump the reference count on it */ 2110 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 2111 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did, 2112 ELS_CMD_PLOGI); 2113 if (!elsiocb) 2114 return 1; 2115 2116 spin_lock_irq(&ndlp->lock); 2117 ndlp->nlp_flag &= ~NLP_FCP_PRLI_RJT; 2118 spin_unlock_irq(&ndlp->lock); 2119 2120 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2121 2122 /* For PLOGI request, remainder of payload is service parameters */ 2123 *((uint32_t *) (pcmd)) = ELS_CMD_PLOGI; 2124 pcmd += sizeof(uint32_t); 2125 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm)); 2126 sp = (struct serv_parm *) pcmd; 2127 2128 /* 2129 * If we are a N-port connected to a Fabric, fix-up paramm's so logins 2130 * to device on remote loops work. 2131 */ 2132 if ((vport->fc_flag & FC_FABRIC) && !(vport->fc_flag & FC_PUBLIC_LOOP)) 2133 sp->cmn.altBbCredit = 1; 2134 2135 if (sp->cmn.fcphLow < FC_PH_4_3) 2136 sp->cmn.fcphLow = FC_PH_4_3; 2137 2138 if (sp->cmn.fcphHigh < FC_PH3) 2139 sp->cmn.fcphHigh = FC_PH3; 2140 2141 sp->cmn.valid_vendor_ver_level = 0; 2142 memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion)); 2143 sp->cmn.bbRcvSizeMsb &= 0xF; 2144 2145 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2146 "Issue PLOGI: did:x%x", 2147 did, 0, 0); 2148 2149 /* If our firmware supports this feature, convey that 2150 * information to the target using the vendor specific field. 2151 */ 2152 if (phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) { 2153 sp->cmn.valid_vendor_ver_level = 1; 2154 sp->un.vv.vid = cpu_to_be32(LPFC_VV_EMLX_ID); 2155 sp->un.vv.flags = cpu_to_be32(LPFC_VV_SUPPRESS_RSP); 2156 } 2157 2158 phba->fc_stat.elsXmitPLOGI++; 2159 elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi; 2160 2161 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2162 "Issue PLOGI: did:x%x refcnt %d", 2163 did, kref_read(&ndlp->kref), 0); 2164 elsiocb->context1 = lpfc_nlp_get(ndlp); 2165 if (!elsiocb->context1) 2166 goto io_err; 2167 2168 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 2169 if (ret) { 2170 lpfc_nlp_put(ndlp); 2171 goto io_err; 2172 } 2173 return 0; 2174 2175 io_err: 2176 lpfc_els_free_iocb(phba, elsiocb); 2177 return 1; 2178 } 2179 2180 /** 2181 * lpfc_cmpl_els_prli - Completion callback function for prli 2182 * @phba: pointer to lpfc hba data structure. 2183 * @cmdiocb: pointer to lpfc command iocb data structure. 2184 * @rspiocb: pointer to lpfc response iocb data structure. 2185 * 2186 * This routine is the completion callback function for a Process Login 2187 * (PRLI) ELS command. The PRLI response IOCB status is checked for error 2188 * status. If there is error status reported, PRLI retry shall be attempted 2189 * by invoking the lpfc_els_retry() routine. Otherwise, the state 2190 * NLP_EVT_CMPL_PRLI is sent to the Discover State Machine (DSM) for this 2191 * ndlp to mark the PRLI completion. 2192 **/ 2193 static void 2194 lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2195 struct lpfc_iocbq *rspiocb) 2196 { 2197 struct lpfc_vport *vport = cmdiocb->vport; 2198 IOCB_t *irsp; 2199 struct lpfc_nodelist *ndlp; 2200 char *mode; 2201 u32 loglevel; 2202 2203 /* we pass cmdiocb to state machine which needs rspiocb as well */ 2204 cmdiocb->context_un.rsp_iocb = rspiocb; 2205 2206 irsp = &(rspiocb->iocb); 2207 ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 2208 spin_lock_irq(&ndlp->lock); 2209 ndlp->nlp_flag &= ~NLP_PRLI_SND; 2210 2211 /* Driver supports multiple FC4 types. Counters matter. */ 2212 vport->fc_prli_sent--; 2213 ndlp->fc4_prli_sent--; 2214 spin_unlock_irq(&ndlp->lock); 2215 2216 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2217 "PRLI cmpl: status:x%x/x%x did:x%x", 2218 irsp->ulpStatus, irsp->un.ulpWord[4], 2219 ndlp->nlp_DID); 2220 2221 /* PRLI completes to NPort <nlp_DID> */ 2222 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2223 "0103 PRLI completes to NPort x%06x " 2224 "Data: x%x x%x x%x x%x\n", 2225 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4], 2226 vport->num_disc_nodes, ndlp->fc4_prli_sent); 2227 2228 /* Check to see if link went down during discovery */ 2229 if (lpfc_els_chk_latt(vport)) 2230 goto out; 2231 2232 if (irsp->ulpStatus) { 2233 /* Check for retry */ 2234 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 2235 /* ELS command is being retried */ 2236 goto out; 2237 } 2238 2239 /* If we don't send GFT_ID to Fabric, a PRLI error 2240 * could be expected. 2241 */ 2242 if ((vport->fc_flag & FC_FABRIC) || 2243 (vport->cfg_enable_fc4_type != LPFC_ENABLE_BOTH)) { 2244 mode = KERN_ERR; 2245 loglevel = LOG_TRACE_EVENT; 2246 } else { 2247 mode = KERN_INFO; 2248 loglevel = LOG_ELS; 2249 } 2250 2251 /* PRLI failed */ 2252 lpfc_printf_vlog(vport, mode, loglevel, 2253 "2754 PRLI failure DID:%06X Status:x%x/x%x, " 2254 "data: x%x\n", 2255 ndlp->nlp_DID, irsp->ulpStatus, 2256 irsp->un.ulpWord[4], ndlp->fc4_prli_sent); 2257 2258 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 2259 if (lpfc_error_lost_link(irsp)) 2260 goto out; 2261 else 2262 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2263 NLP_EVT_CMPL_PRLI); 2264 2265 /* As long as this node is not registered with the SCSI 2266 * or NVMe transport and no other PRLIs are outstanding, 2267 * it is no longer an active node. Otherwise devloss 2268 * handles the final cleanup. 2269 */ 2270 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) && 2271 !ndlp->fc4_prli_sent) { 2272 spin_lock_irq(&ndlp->lock); 2273 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 2274 spin_unlock_irq(&ndlp->lock); 2275 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2276 NLP_EVT_DEVICE_RM); 2277 } 2278 } else { 2279 /* Good status, call state machine. However, if another 2280 * PRLI is outstanding, don't call the state machine 2281 * because final disposition to Mapped or Unmapped is 2282 * completed there. 2283 */ 2284 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2285 NLP_EVT_CMPL_PRLI); 2286 } 2287 2288 out: 2289 lpfc_els_free_iocb(phba, cmdiocb); 2290 lpfc_nlp_put(ndlp); 2291 return; 2292 } 2293 2294 /** 2295 * lpfc_issue_els_prli - Issue a prli iocb command for a vport 2296 * @vport: pointer to a host virtual N_Port data structure. 2297 * @ndlp: pointer to a node-list data structure. 2298 * @retry: number of retries to the command IOCB. 2299 * 2300 * This routine issues a Process Login (PRLI) ELS command for the 2301 * @vport. The PRLI service parameters are set up in the payload of the 2302 * PRLI Request command and the pointer to lpfc_cmpl_els_prli() routine 2303 * is put to the IOCB completion callback func field before invoking the 2304 * routine lpfc_sli_issue_iocb() to send out PRLI command. 2305 * 2306 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 2307 * will be incremented by 1 for holding the ndlp and the reference to ndlp 2308 * will be stored into the context1 field of the IOCB for the completion 2309 * callback function to the PRLI ELS command. 2310 * 2311 * Return code 2312 * 0 - successfully issued prli iocb command for @vport 2313 * 1 - failed to issue prli iocb command for @vport 2314 **/ 2315 int 2316 lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2317 uint8_t retry) 2318 { 2319 int rc = 0; 2320 struct lpfc_hba *phba = vport->phba; 2321 PRLI *npr; 2322 struct lpfc_nvme_prli *npr_nvme; 2323 struct lpfc_iocbq *elsiocb; 2324 uint8_t *pcmd; 2325 uint16_t cmdsize; 2326 u32 local_nlp_type, elscmd; 2327 2328 /* 2329 * If we are in RSCN mode, the FC4 types supported from a 2330 * previous GFT_ID command may not be accurate. So, if we 2331 * are a NVME Initiator, always look for the possibility of 2332 * the remote NPort beng a NVME Target. 2333 */ 2334 if (phba->sli_rev == LPFC_SLI_REV4 && 2335 vport->fc_flag & FC_RSCN_MODE && 2336 vport->nvmei_support) 2337 ndlp->nlp_fc4_type |= NLP_FC4_NVME; 2338 local_nlp_type = ndlp->nlp_fc4_type; 2339 2340 /* This routine will issue 1 or 2 PRLIs, so zero all the ndlp 2341 * fields here before any of them can complete. 2342 */ 2343 ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); 2344 ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR); 2345 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; 2346 ndlp->nlp_flag &= ~(NLP_FIRSTBURST | NLP_NPR_2B_DISC); 2347 ndlp->nvme_fb_size = 0; 2348 2349 send_next_prli: 2350 if (local_nlp_type & NLP_FC4_FCP) { 2351 /* Payload is 4 + 16 = 20 x14 bytes. */ 2352 cmdsize = (sizeof(uint32_t) + sizeof(PRLI)); 2353 elscmd = ELS_CMD_PRLI; 2354 } else if (local_nlp_type & NLP_FC4_NVME) { 2355 /* Payload is 4 + 20 = 24 x18 bytes. */ 2356 cmdsize = (sizeof(uint32_t) + sizeof(struct lpfc_nvme_prli)); 2357 elscmd = ELS_CMD_NVMEPRLI; 2358 } else { 2359 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2360 "3083 Unknown FC_TYPE x%x ndlp x%06x\n", 2361 ndlp->nlp_fc4_type, ndlp->nlp_DID); 2362 return 1; 2363 } 2364 2365 /* SLI3 ports don't support NVME. If this rport is a strict NVME 2366 * FC4 type, implicitly LOGO. 2367 */ 2368 if (phba->sli_rev == LPFC_SLI_REV3 && 2369 ndlp->nlp_fc4_type == NLP_FC4_NVME) { 2370 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2371 "3088 Rport fc4 type 0x%x not supported by SLI3 adapter\n", 2372 ndlp->nlp_type); 2373 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); 2374 return 1; 2375 } 2376 2377 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 2378 ndlp->nlp_DID, elscmd); 2379 if (!elsiocb) 2380 return 1; 2381 2382 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2383 2384 /* For PRLI request, remainder of payload is service parameters */ 2385 memset(pcmd, 0, cmdsize); 2386 2387 if (local_nlp_type & NLP_FC4_FCP) { 2388 /* Remainder of payload is FCP PRLI parameter page. 2389 * Note: this data structure is defined as 2390 * BE/LE in the structure definition so no 2391 * byte swap call is made. 2392 */ 2393 *((uint32_t *)(pcmd)) = ELS_CMD_PRLI; 2394 pcmd += sizeof(uint32_t); 2395 npr = (PRLI *)pcmd; 2396 2397 /* 2398 * If our firmware version is 3.20 or later, 2399 * set the following bits for FC-TAPE support. 2400 */ 2401 if (phba->vpd.rev.feaLevelHigh >= 0x02) { 2402 npr->ConfmComplAllowed = 1; 2403 npr->Retry = 1; 2404 npr->TaskRetryIdReq = 1; 2405 } 2406 npr->estabImagePair = 1; 2407 npr->readXferRdyDis = 1; 2408 if (vport->cfg_first_burst_size) 2409 npr->writeXferRdyDis = 1; 2410 2411 /* For FCP support */ 2412 npr->prliType = PRLI_FCP_TYPE; 2413 npr->initiatorFunc = 1; 2414 elsiocb->iocb_flag |= LPFC_PRLI_FCP_REQ; 2415 2416 /* Remove FCP type - processed. */ 2417 local_nlp_type &= ~NLP_FC4_FCP; 2418 } else if (local_nlp_type & NLP_FC4_NVME) { 2419 /* Remainder of payload is NVME PRLI parameter page. 2420 * This data structure is the newer definition that 2421 * uses bf macros so a byte swap is required. 2422 */ 2423 *((uint32_t *)(pcmd)) = ELS_CMD_NVMEPRLI; 2424 pcmd += sizeof(uint32_t); 2425 npr_nvme = (struct lpfc_nvme_prli *)pcmd; 2426 bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE); 2427 bf_set(prli_estabImagePair, npr_nvme, 0); /* Should be 0 */ 2428 if (phba->nsler) { 2429 bf_set(prli_nsler, npr_nvme, 1); 2430 bf_set(prli_conf, npr_nvme, 1); 2431 } 2432 2433 /* Only initiators request first burst. */ 2434 if ((phba->cfg_nvme_enable_fb) && 2435 !phba->nvmet_support) 2436 bf_set(prli_fba, npr_nvme, 1); 2437 2438 if (phba->nvmet_support) { 2439 bf_set(prli_tgt, npr_nvme, 1); 2440 bf_set(prli_disc, npr_nvme, 1); 2441 } else { 2442 bf_set(prli_init, npr_nvme, 1); 2443 bf_set(prli_conf, npr_nvme, 1); 2444 } 2445 2446 npr_nvme->word1 = cpu_to_be32(npr_nvme->word1); 2447 npr_nvme->word4 = cpu_to_be32(npr_nvme->word4); 2448 elsiocb->iocb_flag |= LPFC_PRLI_NVME_REQ; 2449 2450 /* Remove NVME type - processed. */ 2451 local_nlp_type &= ~NLP_FC4_NVME; 2452 } 2453 2454 phba->fc_stat.elsXmitPRLI++; 2455 elsiocb->iocb_cmpl = lpfc_cmpl_els_prli; 2456 spin_lock_irq(&ndlp->lock); 2457 ndlp->nlp_flag |= NLP_PRLI_SND; 2458 2459 /* The vport counters are used for lpfc_scan_finished, but 2460 * the ndlp is used to track outstanding PRLIs for different 2461 * FC4 types. 2462 */ 2463 vport->fc_prli_sent++; 2464 ndlp->fc4_prli_sent++; 2465 spin_unlock_irq(&ndlp->lock); 2466 2467 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2468 "Issue PRLI: did:x%x refcnt %d", 2469 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 2470 elsiocb->context1 = lpfc_nlp_get(ndlp); 2471 if (!elsiocb->context1) 2472 goto io_err; 2473 2474 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 2475 if (rc == IOCB_ERROR) 2476 goto node_err; 2477 2478 2479 /* The driver supports 2 FC4 types. Make sure 2480 * a PRLI is issued for all types before exiting. 2481 */ 2482 if (phba->sli_rev == LPFC_SLI_REV4 && 2483 local_nlp_type & (NLP_FC4_FCP | NLP_FC4_NVME)) 2484 goto send_next_prli; 2485 else 2486 return 0; 2487 2488 node_err: 2489 lpfc_nlp_put(ndlp); 2490 io_err: 2491 spin_lock_irq(&ndlp->lock); 2492 ndlp->nlp_flag &= ~NLP_PRLI_SND; 2493 spin_unlock_irq(&ndlp->lock); 2494 lpfc_els_free_iocb(phba, elsiocb); 2495 return 1; 2496 } 2497 2498 /** 2499 * lpfc_rscn_disc - Perform rscn discovery for a vport 2500 * @vport: pointer to a host virtual N_Port data structure. 2501 * 2502 * This routine performs Registration State Change Notification (RSCN) 2503 * discovery for a @vport. If the @vport's node port recovery count is not 2504 * zero, it will invoke the lpfc_els_disc_plogi() to perform PLOGI for all 2505 * the nodes that need recovery. If none of the PLOGI were needed through 2506 * the lpfc_els_disc_plogi() routine, the lpfc_end_rscn() routine shall be 2507 * invoked to check and handle possible more RSCN came in during the period 2508 * of processing the current ones. 2509 **/ 2510 static void 2511 lpfc_rscn_disc(struct lpfc_vport *vport) 2512 { 2513 lpfc_can_disctmo(vport); 2514 2515 /* RSCN discovery */ 2516 /* go thru NPR nodes and issue ELS PLOGIs */ 2517 if (vport->fc_npr_cnt) 2518 if (lpfc_els_disc_plogi(vport)) 2519 return; 2520 2521 lpfc_end_rscn(vport); 2522 } 2523 2524 /** 2525 * lpfc_adisc_done - Complete the adisc phase of discovery 2526 * @vport: pointer to lpfc_vport hba data structure that finished all ADISCs. 2527 * 2528 * This function is called when the final ADISC is completed during discovery. 2529 * This function handles clearing link attention or issuing reg_vpi depending 2530 * on whether npiv is enabled. This function also kicks off the PLOGI phase of 2531 * discovery. 2532 * This function is called with no locks held. 2533 **/ 2534 static void 2535 lpfc_adisc_done(struct lpfc_vport *vport) 2536 { 2537 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2538 struct lpfc_hba *phba = vport->phba; 2539 2540 /* 2541 * For NPIV, cmpl_reg_vpi will set port_state to READY, 2542 * and continue discovery. 2543 */ 2544 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 2545 !(vport->fc_flag & FC_RSCN_MODE) && 2546 (phba->sli_rev < LPFC_SLI_REV4)) { 2547 /* The ADISCs are complete. Doesn't matter if they 2548 * succeeded or failed because the ADISC completion 2549 * routine guarantees to call the state machine and 2550 * the RPI is either unregistered (failed ADISC response) 2551 * or the RPI is still valid and the node is marked 2552 * mapped for a target. The exchanges should be in the 2553 * correct state. This code is specific to SLI3. 2554 */ 2555 lpfc_issue_clear_la(phba, vport); 2556 lpfc_issue_reg_vpi(phba, vport); 2557 return; 2558 } 2559 /* 2560 * For SLI2, we need to set port_state to READY 2561 * and continue discovery. 2562 */ 2563 if (vport->port_state < LPFC_VPORT_READY) { 2564 /* If we get here, there is nothing to ADISC */ 2565 lpfc_issue_clear_la(phba, vport); 2566 if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) { 2567 vport->num_disc_nodes = 0; 2568 /* go thru NPR list, issue ELS PLOGIs */ 2569 if (vport->fc_npr_cnt) 2570 lpfc_els_disc_plogi(vport); 2571 if (!vport->num_disc_nodes) { 2572 spin_lock_irq(shost->host_lock); 2573 vport->fc_flag &= ~FC_NDISC_ACTIVE; 2574 spin_unlock_irq(shost->host_lock); 2575 lpfc_can_disctmo(vport); 2576 lpfc_end_rscn(vport); 2577 } 2578 } 2579 vport->port_state = LPFC_VPORT_READY; 2580 } else 2581 lpfc_rscn_disc(vport); 2582 } 2583 2584 /** 2585 * lpfc_more_adisc - Issue more adisc as needed 2586 * @vport: pointer to a host virtual N_Port data structure. 2587 * 2588 * This routine determines whether there are more ndlps on a @vport 2589 * node list need to have Address Discover (ADISC) issued. If so, it will 2590 * invoke the lpfc_els_disc_adisc() routine to issue ADISC on the @vport's 2591 * remaining nodes which need to have ADISC sent. 2592 **/ 2593 void 2594 lpfc_more_adisc(struct lpfc_vport *vport) 2595 { 2596 if (vport->num_disc_nodes) 2597 vport->num_disc_nodes--; 2598 /* Continue discovery with <num_disc_nodes> ADISCs to go */ 2599 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2600 "0210 Continue discovery with %d ADISCs to go " 2601 "Data: x%x x%x x%x\n", 2602 vport->num_disc_nodes, vport->fc_adisc_cnt, 2603 vport->fc_flag, vport->port_state); 2604 /* Check to see if there are more ADISCs to be sent */ 2605 if (vport->fc_flag & FC_NLP_MORE) { 2606 lpfc_set_disctmo(vport); 2607 /* go thru NPR nodes and issue any remaining ELS ADISCs */ 2608 lpfc_els_disc_adisc(vport); 2609 } 2610 if (!vport->num_disc_nodes) 2611 lpfc_adisc_done(vport); 2612 return; 2613 } 2614 2615 /** 2616 * lpfc_cmpl_els_adisc - Completion callback function for adisc 2617 * @phba: pointer to lpfc hba data structure. 2618 * @cmdiocb: pointer to lpfc command iocb data structure. 2619 * @rspiocb: pointer to lpfc response iocb data structure. 2620 * 2621 * This routine is the completion function for issuing the Address Discover 2622 * (ADISC) command. It first checks to see whether link went down during 2623 * the discovery process. If so, the node will be marked as node port 2624 * recovery for issuing discover IOCB by the link attention handler and 2625 * exit. Otherwise, the response status is checked. If error was reported 2626 * in the response status, the ADISC command shall be retried by invoking 2627 * the lpfc_els_retry() routine. Otherwise, if no error was reported in 2628 * the response status, the state machine is invoked to set transition 2629 * with respect to NLP_EVT_CMPL_ADISC event. 2630 **/ 2631 static void 2632 lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2633 struct lpfc_iocbq *rspiocb) 2634 { 2635 struct lpfc_vport *vport = cmdiocb->vport; 2636 IOCB_t *irsp; 2637 struct lpfc_nodelist *ndlp; 2638 int disc; 2639 2640 /* we pass cmdiocb to state machine which needs rspiocb as well */ 2641 cmdiocb->context_un.rsp_iocb = rspiocb; 2642 2643 irsp = &(rspiocb->iocb); 2644 ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 2645 2646 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2647 "ADISC cmpl: status:x%x/x%x did:x%x", 2648 irsp->ulpStatus, irsp->un.ulpWord[4], 2649 ndlp->nlp_DID); 2650 2651 /* Since ndlp can be freed in the disc state machine, note if this node 2652 * is being used during discovery. 2653 */ 2654 spin_lock_irq(&ndlp->lock); 2655 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC); 2656 ndlp->nlp_flag &= ~(NLP_ADISC_SND | NLP_NPR_2B_DISC); 2657 spin_unlock_irq(&ndlp->lock); 2658 /* ADISC completes to NPort <nlp_DID> */ 2659 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2660 "0104 ADISC completes to NPort x%x " 2661 "Data: x%x x%x x%x x%x x%x\n", 2662 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4], 2663 irsp->ulpTimeout, disc, vport->num_disc_nodes); 2664 /* Check to see if link went down during discovery */ 2665 if (lpfc_els_chk_latt(vport)) { 2666 spin_lock_irq(&ndlp->lock); 2667 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2668 spin_unlock_irq(&ndlp->lock); 2669 goto out; 2670 } 2671 2672 if (irsp->ulpStatus) { 2673 /* Check for retry */ 2674 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 2675 /* ELS command is being retried */ 2676 if (disc) { 2677 spin_lock_irq(&ndlp->lock); 2678 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2679 spin_unlock_irq(&ndlp->lock); 2680 lpfc_set_disctmo(vport); 2681 } 2682 goto out; 2683 } 2684 /* ADISC failed */ 2685 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2686 "2755 ADISC failure DID:%06X Status:x%x/x%x\n", 2687 ndlp->nlp_DID, irsp->ulpStatus, 2688 irsp->un.ulpWord[4]); 2689 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 2690 if (lpfc_error_lost_link(irsp)) 2691 goto check_adisc; 2692 else 2693 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2694 NLP_EVT_CMPL_ADISC); 2695 2696 /* As long as this node is not registered with the SCSI or NVMe 2697 * transport, it is no longer an active node. Otherwise 2698 * devloss handles the final cleanup. 2699 */ 2700 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { 2701 spin_lock_irq(&ndlp->lock); 2702 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 2703 spin_unlock_irq(&ndlp->lock); 2704 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2705 NLP_EVT_DEVICE_RM); 2706 } 2707 } else 2708 /* Good status, call state machine */ 2709 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2710 NLP_EVT_CMPL_ADISC); 2711 2712 check_adisc: 2713 /* Check to see if there are more ADISCs to be sent */ 2714 if (disc && vport->num_disc_nodes) 2715 lpfc_more_adisc(vport); 2716 out: 2717 lpfc_els_free_iocb(phba, cmdiocb); 2718 lpfc_nlp_put(ndlp); 2719 return; 2720 } 2721 2722 /** 2723 * lpfc_issue_els_adisc - Issue an address discover iocb to an node on a vport 2724 * @vport: pointer to a virtual N_Port data structure. 2725 * @ndlp: pointer to a node-list data structure. 2726 * @retry: number of retries to the command IOCB. 2727 * 2728 * This routine issues an Address Discover (ADISC) for an @ndlp on a 2729 * @vport. It prepares the payload of the ADISC ELS command, updates the 2730 * and states of the ndlp, and invokes the lpfc_sli_issue_iocb() routine 2731 * to issue the ADISC ELS command. 2732 * 2733 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 2734 * will be incremented by 1 for holding the ndlp and the reference to ndlp 2735 * will be stored into the context1 field of the IOCB for the completion 2736 * callback function to the ADISC ELS command. 2737 * 2738 * Return code 2739 * 0 - successfully issued adisc 2740 * 1 - failed to issue adisc 2741 **/ 2742 int 2743 lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2744 uint8_t retry) 2745 { 2746 int rc = 0; 2747 struct lpfc_hba *phba = vport->phba; 2748 ADISC *ap; 2749 struct lpfc_iocbq *elsiocb; 2750 uint8_t *pcmd; 2751 uint16_t cmdsize; 2752 2753 cmdsize = (sizeof(uint32_t) + sizeof(ADISC)); 2754 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 2755 ndlp->nlp_DID, ELS_CMD_ADISC); 2756 if (!elsiocb) 2757 return 1; 2758 2759 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2760 2761 /* For ADISC request, remainder of payload is service parameters */ 2762 *((uint32_t *) (pcmd)) = ELS_CMD_ADISC; 2763 pcmd += sizeof(uint32_t); 2764 2765 /* Fill in ADISC payload */ 2766 ap = (ADISC *) pcmd; 2767 ap->hardAL_PA = phba->fc_pref_ALPA; 2768 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name)); 2769 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 2770 ap->DID = be32_to_cpu(vport->fc_myDID); 2771 2772 phba->fc_stat.elsXmitADISC++; 2773 elsiocb->iocb_cmpl = lpfc_cmpl_els_adisc; 2774 spin_lock_irq(&ndlp->lock); 2775 ndlp->nlp_flag |= NLP_ADISC_SND; 2776 spin_unlock_irq(&ndlp->lock); 2777 elsiocb->context1 = lpfc_nlp_get(ndlp); 2778 if (!elsiocb->context1) 2779 goto node_err; 2780 2781 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2782 "Issue ADISC: did:x%x refcnt %d", 2783 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 2784 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 2785 if (rc == IOCB_ERROR) 2786 goto io_err; 2787 return 0; 2788 2789 io_err: 2790 lpfc_nlp_put(ndlp); 2791 node_err: 2792 spin_lock_irq(&ndlp->lock); 2793 ndlp->nlp_flag &= ~NLP_ADISC_SND; 2794 spin_unlock_irq(&ndlp->lock); 2795 lpfc_els_free_iocb(phba, elsiocb); 2796 return 1; 2797 } 2798 2799 /** 2800 * lpfc_cmpl_els_logo - Completion callback function for logo 2801 * @phba: pointer to lpfc hba data structure. 2802 * @cmdiocb: pointer to lpfc command iocb data structure. 2803 * @rspiocb: pointer to lpfc response iocb data structure. 2804 * 2805 * This routine is the completion function for issuing the ELS Logout (LOGO) 2806 * command. If no error status was reported from the LOGO response, the 2807 * state machine of the associated ndlp shall be invoked for transition with 2808 * respect to NLP_EVT_CMPL_LOGO event. Otherwise, if error status was reported, 2809 * the lpfc_els_retry() routine will be invoked to retry the LOGO command. 2810 **/ 2811 static void 2812 lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2813 struct lpfc_iocbq *rspiocb) 2814 { 2815 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 2816 struct lpfc_vport *vport = ndlp->vport; 2817 IOCB_t *irsp; 2818 struct lpfcMboxq *mbox; 2819 unsigned long flags; 2820 uint32_t skip_recovery = 0; 2821 2822 /* we pass cmdiocb to state machine which needs rspiocb as well */ 2823 cmdiocb->context_un.rsp_iocb = rspiocb; 2824 2825 irsp = &(rspiocb->iocb); 2826 spin_lock_irq(&ndlp->lock); 2827 ndlp->nlp_flag &= ~NLP_LOGO_SND; 2828 spin_unlock_irq(&ndlp->lock); 2829 2830 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2831 "LOGO cmpl: status:x%x/x%x did:x%x", 2832 irsp->ulpStatus, irsp->un.ulpWord[4], 2833 ndlp->nlp_DID); 2834 2835 /* LOGO completes to NPort <nlp_DID> */ 2836 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2837 "0105 LOGO completes to NPort x%x " 2838 "refcnt %d nflags x%x Data: x%x x%x x%x x%x\n", 2839 ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp->nlp_flag, 2840 irsp->ulpStatus, irsp->un.ulpWord[4], 2841 irsp->ulpTimeout, vport->num_disc_nodes); 2842 2843 if (lpfc_els_chk_latt(vport)) { 2844 skip_recovery = 1; 2845 goto out; 2846 } 2847 2848 /* The LOGO will not be retried on failure. A LOGO was 2849 * issued to the remote rport and a ACC or RJT or no Answer are 2850 * all acceptable. Note the failure and move forward with 2851 * discovery. The PLOGI will retry. 2852 */ 2853 if (irsp->ulpStatus) { 2854 /* LOGO failed */ 2855 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2856 "2756 LOGO failure, No Retry DID:%06X Status:x%x/x%x\n", 2857 ndlp->nlp_DID, irsp->ulpStatus, 2858 irsp->un.ulpWord[4]); 2859 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 2860 if (lpfc_error_lost_link(irsp)) { 2861 skip_recovery = 1; 2862 goto out; 2863 } 2864 } 2865 2866 /* Call state machine. This will unregister the rpi if needed. */ 2867 lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_CMPL_LOGO); 2868 2869 /* The driver sets this flag for an NPIV instance that doesn't want to 2870 * log into the remote port. 2871 */ 2872 if (ndlp->nlp_flag & NLP_TARGET_REMOVE) { 2873 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2874 NLP_EVT_DEVICE_RM); 2875 lpfc_els_free_iocb(phba, cmdiocb); 2876 lpfc_nlp_put(ndlp); 2877 2878 /* Presume the node was released. */ 2879 return; 2880 } 2881 2882 out: 2883 /* Driver is done with the IO. */ 2884 lpfc_els_free_iocb(phba, cmdiocb); 2885 lpfc_nlp_put(ndlp); 2886 2887 /* If we are in pt2pt mode, we could rcv new S_ID on PLOGI */ 2888 if ((vport->fc_flag & FC_PT2PT) && 2889 !(vport->fc_flag & FC_PT2PT_PLOGI)) { 2890 phba->pport->fc_myDID = 0; 2891 2892 if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || 2893 (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) { 2894 if (phba->nvmet_support) 2895 lpfc_nvmet_update_targetport(phba); 2896 else 2897 lpfc_nvme_update_localport(phba->pport); 2898 } 2899 2900 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2901 if (mbox) { 2902 lpfc_config_link(phba, mbox); 2903 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 2904 mbox->vport = vport; 2905 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) == 2906 MBX_NOT_FINISHED) { 2907 mempool_free(mbox, phba->mbox_mem_pool); 2908 skip_recovery = 1; 2909 } 2910 } 2911 } 2912 2913 /* 2914 * If the node is a target, the handling attempts to recover the port. 2915 * For any other port type, the rpi is unregistered as an implicit 2916 * LOGO. 2917 */ 2918 if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET) && 2919 skip_recovery == 0) { 2920 lpfc_cancel_retry_delay_tmo(vport, ndlp); 2921 spin_lock_irqsave(&ndlp->lock, flags); 2922 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2923 spin_unlock_irqrestore(&ndlp->lock, flags); 2924 2925 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2926 "3187 LOGO completes to NPort x%x: Start " 2927 "Recovery Data: x%x x%x x%x x%x\n", 2928 ndlp->nlp_DID, irsp->ulpStatus, 2929 irsp->un.ulpWord[4], irsp->ulpTimeout, 2930 vport->num_disc_nodes); 2931 lpfc_disc_start(vport); 2932 return; 2933 } 2934 2935 /* Cleanup path for failed REG_RPI handling. If REG_RPI fails, the 2936 * driver sends a LOGO to the rport to cleanup. For fabric and 2937 * initiator ports cleanup the node as long as it the node is not 2938 * register with the transport. 2939 */ 2940 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { 2941 spin_lock_irq(&ndlp->lock); 2942 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 2943 spin_unlock_irq(&ndlp->lock); 2944 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2945 NLP_EVT_DEVICE_RM); 2946 } 2947 } 2948 2949 /** 2950 * lpfc_issue_els_logo - Issue a logo to an node on a vport 2951 * @vport: pointer to a virtual N_Port data structure. 2952 * @ndlp: pointer to a node-list data structure. 2953 * @retry: number of retries to the command IOCB. 2954 * 2955 * This routine constructs and issues an ELS Logout (LOGO) iocb command 2956 * to a remote node, referred by an @ndlp on a @vport. It constructs the 2957 * payload of the IOCB, properly sets up the @ndlp state, and invokes the 2958 * lpfc_sli_issue_iocb() routine to send out the LOGO ELS command. 2959 * 2960 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 2961 * will be incremented by 1 for holding the ndlp and the reference to ndlp 2962 * will be stored into the context1 field of the IOCB for the completion 2963 * callback function to the LOGO ELS command. 2964 * 2965 * Callers of this routine are expected to unregister the RPI first 2966 * 2967 * Return code 2968 * 0 - successfully issued logo 2969 * 1 - failed to issue logo 2970 **/ 2971 int 2972 lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2973 uint8_t retry) 2974 { 2975 struct lpfc_hba *phba = vport->phba; 2976 struct lpfc_iocbq *elsiocb; 2977 uint8_t *pcmd; 2978 uint16_t cmdsize; 2979 int rc; 2980 2981 spin_lock_irq(&ndlp->lock); 2982 if (ndlp->nlp_flag & NLP_LOGO_SND) { 2983 spin_unlock_irq(&ndlp->lock); 2984 return 0; 2985 } 2986 spin_unlock_irq(&ndlp->lock); 2987 2988 cmdsize = (2 * sizeof(uint32_t)) + sizeof(struct lpfc_name); 2989 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 2990 ndlp->nlp_DID, ELS_CMD_LOGO); 2991 if (!elsiocb) 2992 return 1; 2993 2994 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2995 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO; 2996 pcmd += sizeof(uint32_t); 2997 2998 /* Fill in LOGO payload */ 2999 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID); 3000 pcmd += sizeof(uint32_t); 3001 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name)); 3002 3003 phba->fc_stat.elsXmitLOGO++; 3004 elsiocb->iocb_cmpl = lpfc_cmpl_els_logo; 3005 spin_lock_irq(&ndlp->lock); 3006 ndlp->nlp_flag |= NLP_LOGO_SND; 3007 ndlp->nlp_flag &= ~NLP_ISSUE_LOGO; 3008 spin_unlock_irq(&ndlp->lock); 3009 elsiocb->context1 = lpfc_nlp_get(ndlp); 3010 if (!elsiocb->context1) 3011 goto node_err; 3012 3013 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3014 "Issue LOGO: did:x%x refcnt %d", 3015 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 3016 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3017 if (rc == IOCB_ERROR) 3018 goto io_err; 3019 3020 spin_lock_irq(&ndlp->lock); 3021 ndlp->nlp_prev_state = ndlp->nlp_state; 3022 spin_unlock_irq(&ndlp->lock); 3023 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE); 3024 return 0; 3025 3026 io_err: 3027 lpfc_nlp_put(ndlp); 3028 node_err: 3029 spin_lock_irq(&ndlp->lock); 3030 ndlp->nlp_flag &= ~NLP_LOGO_SND; 3031 spin_unlock_irq(&ndlp->lock); 3032 lpfc_els_free_iocb(phba, elsiocb); 3033 return 1; 3034 } 3035 3036 /** 3037 * lpfc_cmpl_els_cmd - Completion callback function for generic els command 3038 * @phba: pointer to lpfc hba data structure. 3039 * @cmdiocb: pointer to lpfc command iocb data structure. 3040 * @rspiocb: pointer to lpfc response iocb data structure. 3041 * 3042 * This routine is a generic completion callback function for ELS commands. 3043 * Specifically, it is the callback function which does not need to perform 3044 * any command specific operations. It is currently used by the ELS command 3045 * issuing routines for RSCN, lpfc_issue_els_rscn, and the ELS Fibre Channel 3046 * Address Resolution Protocol Response (FARPR) routine, lpfc_issue_els_farpr(). 3047 * Other than certain debug loggings, this callback function simply invokes the 3048 * lpfc_els_chk_latt() routine to check whether link went down during the 3049 * discovery process. 3050 **/ 3051 static void 3052 lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 3053 struct lpfc_iocbq *rspiocb) 3054 { 3055 struct lpfc_vport *vport = cmdiocb->vport; 3056 struct lpfc_nodelist *free_ndlp; 3057 IOCB_t *irsp; 3058 3059 irsp = &rspiocb->iocb; 3060 3061 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3062 "ELS cmd cmpl: status:x%x/x%x did:x%x", 3063 irsp->ulpStatus, irsp->un.ulpWord[4], 3064 irsp->un.elsreq64.remoteID); 3065 3066 /* ELS cmd tag <ulpIoTag> completes */ 3067 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3068 "0106 ELS cmd tag x%x completes Data: x%x x%x x%x\n", 3069 irsp->ulpIoTag, irsp->ulpStatus, 3070 irsp->un.ulpWord[4], irsp->ulpTimeout); 3071 3072 /* Check to see if link went down during discovery */ 3073 lpfc_els_chk_latt(vport); 3074 3075 free_ndlp = (struct lpfc_nodelist *)cmdiocb->context1; 3076 3077 lpfc_els_free_iocb(phba, cmdiocb); 3078 lpfc_nlp_put(free_ndlp); 3079 } 3080 3081 /** 3082 * lpfc_cmpl_els_disc_cmd - Completion callback function for Discovery ELS cmd 3083 * @phba: pointer to lpfc hba data structure. 3084 * @cmdiocb: pointer to lpfc command iocb data structure. 3085 * @rspiocb: pointer to lpfc response iocb data structure. 3086 * 3087 * This routine is a generic completion callback function for Discovery ELS cmd. 3088 * Currently used by the ELS command issuing routines for the ELS State Change 3089 * Request (SCR), lpfc_issue_els_scr() and the ELS RDF, lpfc_issue_els_rdf(). 3090 * These commands will be retried once only for ELS timeout errors. 3091 **/ 3092 static void 3093 lpfc_cmpl_els_disc_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 3094 struct lpfc_iocbq *rspiocb) 3095 { 3096 struct lpfc_vport *vport = cmdiocb->vport; 3097 IOCB_t *irsp; 3098 struct lpfc_els_rdf_rsp *prdf; 3099 struct lpfc_dmabuf *pcmd, *prsp; 3100 u32 *pdata; 3101 u32 cmd; 3102 struct lpfc_nodelist *ndlp = cmdiocb->context1; 3103 3104 irsp = &rspiocb->iocb; 3105 3106 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3107 "ELS cmd cmpl: status:x%x/x%x did:x%x", 3108 irsp->ulpStatus, irsp->un.ulpWord[4], 3109 irsp->un.elsreq64.remoteID); 3110 /* ELS cmd tag <ulpIoTag> completes */ 3111 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3112 "0217 ELS cmd tag x%x completes Data: x%x x%x x%x " 3113 "x%x\n", 3114 irsp->ulpIoTag, irsp->ulpStatus, 3115 irsp->un.ulpWord[4], irsp->ulpTimeout, 3116 cmdiocb->retry); 3117 3118 pcmd = (struct lpfc_dmabuf *)cmdiocb->context2; 3119 if (!pcmd) 3120 goto out; 3121 3122 pdata = (u32 *)pcmd->virt; 3123 if (!pdata) 3124 goto out; 3125 cmd = *pdata; 3126 3127 /* Only 1 retry for ELS Timeout only */ 3128 if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT && 3129 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == 3130 IOERR_SEQUENCE_TIMEOUT)) { 3131 cmdiocb->retry++; 3132 if (cmdiocb->retry <= 1) { 3133 switch (cmd) { 3134 case ELS_CMD_SCR: 3135 lpfc_issue_els_scr(vport, cmdiocb->retry); 3136 break; 3137 case ELS_CMD_RDF: 3138 cmdiocb->context1 = NULL; /* save ndlp refcnt */ 3139 lpfc_issue_els_rdf(vport, cmdiocb->retry); 3140 break; 3141 } 3142 goto out; 3143 } 3144 phba->fc_stat.elsRetryExceeded++; 3145 } 3146 if (irsp->ulpStatus) { 3147 /* ELS discovery cmd completes with error */ 3148 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, 3149 "4203 ELS cmd x%x error: x%x x%X\n", cmd, 3150 irsp->ulpStatus, irsp->un.ulpWord[4]); 3151 goto out; 3152 } 3153 3154 /* The RDF response doesn't have any impact on the running driver 3155 * but the notification descriptors are dumped here for support. 3156 */ 3157 if (cmd == ELS_CMD_RDF) { 3158 int i; 3159 3160 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 3161 if (!prsp) 3162 goto out; 3163 3164 prdf = (struct lpfc_els_rdf_rsp *)prsp->virt; 3165 if (!prdf) 3166 goto out; 3167 3168 for (i = 0; i < ELS_RDF_REG_TAG_CNT && 3169 i < be32_to_cpu(prdf->reg_d1.reg_desc.count); i++) 3170 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3171 "4677 Fabric RDF Notification Grant Data: " 3172 "0x%08x\n", 3173 be32_to_cpu( 3174 prdf->reg_d1.desc_tags[i])); 3175 } 3176 3177 out: 3178 /* Check to see if link went down during discovery */ 3179 lpfc_els_chk_latt(vport); 3180 lpfc_els_free_iocb(phba, cmdiocb); 3181 lpfc_nlp_put(ndlp); 3182 return; 3183 } 3184 3185 /** 3186 * lpfc_issue_els_scr - Issue a scr to an node on a vport 3187 * @vport: pointer to a host virtual N_Port data structure. 3188 * @retry: retry counter for the command IOCB. 3189 * 3190 * This routine issues a State Change Request (SCR) to a fabric node 3191 * on a @vport. The remote node is Fabric Controller (0xfffffd). It 3192 * first search the @vport node list to find the matching ndlp. If no such 3193 * ndlp is found, a new ndlp shall be created for this (SCR) purpose. An 3194 * IOCB is allocated, payload prepared, and the lpfc_sli_issue_iocb() 3195 * routine is invoked to send the SCR IOCB. 3196 * 3197 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 3198 * will be incremented by 1 for holding the ndlp and the reference to ndlp 3199 * will be stored into the context1 field of the IOCB for the completion 3200 * callback function to the SCR ELS command. 3201 * 3202 * Return code 3203 * 0 - Successfully issued scr command 3204 * 1 - Failed to issue scr command 3205 **/ 3206 int 3207 lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry) 3208 { 3209 int rc = 0; 3210 struct lpfc_hba *phba = vport->phba; 3211 struct lpfc_iocbq *elsiocb; 3212 uint8_t *pcmd; 3213 uint16_t cmdsize; 3214 struct lpfc_nodelist *ndlp; 3215 3216 cmdsize = (sizeof(uint32_t) + sizeof(SCR)); 3217 3218 ndlp = lpfc_findnode_did(vport, Fabric_Cntl_DID); 3219 if (!ndlp) { 3220 ndlp = lpfc_nlp_init(vport, Fabric_Cntl_DID); 3221 if (!ndlp) 3222 return 1; 3223 lpfc_enqueue_node(vport, ndlp); 3224 } 3225 3226 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3227 ndlp->nlp_DID, ELS_CMD_SCR); 3228 3229 if (!elsiocb) 3230 return 1; 3231 3232 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 3233 3234 *((uint32_t *) (pcmd)) = ELS_CMD_SCR; 3235 pcmd += sizeof(uint32_t); 3236 3237 /* For SCR, remainder of payload is SCR parameter page */ 3238 memset(pcmd, 0, sizeof(SCR)); 3239 ((SCR *) pcmd)->Function = SCR_FUNC_FULL; 3240 3241 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3242 "Issue SCR: did:x%x", 3243 ndlp->nlp_DID, 0, 0); 3244 3245 phba->fc_stat.elsXmitSCR++; 3246 elsiocb->iocb_cmpl = lpfc_cmpl_els_disc_cmd; 3247 elsiocb->context1 = lpfc_nlp_get(ndlp); 3248 if (!elsiocb->context1) 3249 goto node_err; 3250 3251 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3252 "Issue SCR: did:x%x refcnt %d", 3253 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 3254 3255 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3256 if (rc == IOCB_ERROR) 3257 goto io_err; 3258 3259 /* Keep the ndlp just in case RDF is being sent */ 3260 return 0; 3261 3262 io_err: 3263 lpfc_nlp_put(ndlp); 3264 node_err: 3265 lpfc_els_free_iocb(phba, elsiocb); 3266 return 1; 3267 } 3268 3269 /** 3270 * lpfc_issue_els_rscn - Issue an RSCN to the Fabric Controller (Fabric) 3271 * or the other nport (pt2pt). 3272 * @vport: pointer to a host virtual N_Port data structure. 3273 * @retry: number of retries to the command IOCB. 3274 * 3275 * This routine issues a RSCN to the Fabric Controller (DID 0xFFFFFD) 3276 * when connected to a fabric, or to the remote port when connected 3277 * in point-to-point mode. When sent to the Fabric Controller, it will 3278 * replay the RSCN to registered recipients. 3279 * 3280 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 3281 * will be incremented by 1 for holding the ndlp and the reference to ndlp 3282 * will be stored into the context1 field of the IOCB for the completion 3283 * callback function to the RSCN ELS command. 3284 * 3285 * Return code 3286 * 0 - Successfully issued RSCN command 3287 * 1 - Failed to issue RSCN command 3288 **/ 3289 int 3290 lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry) 3291 { 3292 int rc = 0; 3293 struct lpfc_hba *phba = vport->phba; 3294 struct lpfc_iocbq *elsiocb; 3295 struct lpfc_nodelist *ndlp; 3296 struct { 3297 struct fc_els_rscn rscn; 3298 struct fc_els_rscn_page portid; 3299 } *event; 3300 uint32_t nportid; 3301 uint16_t cmdsize = sizeof(*event); 3302 3303 /* Not supported for private loop */ 3304 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP && 3305 !(vport->fc_flag & FC_PUBLIC_LOOP)) 3306 return 1; 3307 3308 if (vport->fc_flag & FC_PT2PT) { 3309 /* find any mapped nport - that would be the other nport */ 3310 ndlp = lpfc_findnode_mapped(vport); 3311 if (!ndlp) 3312 return 1; 3313 } else { 3314 nportid = FC_FID_FCTRL; 3315 /* find the fabric controller node */ 3316 ndlp = lpfc_findnode_did(vport, nportid); 3317 if (!ndlp) { 3318 /* if one didn't exist, make one */ 3319 ndlp = lpfc_nlp_init(vport, nportid); 3320 if (!ndlp) 3321 return 1; 3322 lpfc_enqueue_node(vport, ndlp); 3323 } 3324 } 3325 3326 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3327 ndlp->nlp_DID, ELS_CMD_RSCN_XMT); 3328 3329 if (!elsiocb) 3330 return 1; 3331 3332 event = ((struct lpfc_dmabuf *)elsiocb->context2)->virt; 3333 3334 event->rscn.rscn_cmd = ELS_RSCN; 3335 event->rscn.rscn_page_len = sizeof(struct fc_els_rscn_page); 3336 event->rscn.rscn_plen = cpu_to_be16(cmdsize); 3337 3338 nportid = vport->fc_myDID; 3339 /* appears that page flags must be 0 for fabric to broadcast RSCN */ 3340 event->portid.rscn_page_flags = 0; 3341 event->portid.rscn_fid[0] = (nportid & 0x00FF0000) >> 16; 3342 event->portid.rscn_fid[1] = (nportid & 0x0000FF00) >> 8; 3343 event->portid.rscn_fid[2] = nportid & 0x000000FF; 3344 3345 phba->fc_stat.elsXmitRSCN++; 3346 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd; 3347 elsiocb->context1 = lpfc_nlp_get(ndlp); 3348 if (!elsiocb->context1) 3349 goto node_err; 3350 3351 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3352 "Issue RSCN: did:x%x", 3353 ndlp->nlp_DID, 0, 0); 3354 3355 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3356 if (rc == IOCB_ERROR) 3357 goto io_err; 3358 3359 /* This will cause the callback-function lpfc_cmpl_els_cmd to 3360 * trigger the release of node. 3361 */ 3362 if (!(vport->fc_flag & FC_PT2PT)) 3363 lpfc_nlp_put(ndlp); 3364 return 0; 3365 io_err: 3366 lpfc_nlp_put(ndlp); 3367 node_err: 3368 lpfc_els_free_iocb(phba, elsiocb); 3369 return 1; 3370 } 3371 3372 /** 3373 * lpfc_issue_els_farpr - Issue a farp to an node on a vport 3374 * @vport: pointer to a host virtual N_Port data structure. 3375 * @nportid: N_Port identifier to the remote node. 3376 * @retry: number of retries to the command IOCB. 3377 * 3378 * This routine issues a Fibre Channel Address Resolution Response 3379 * (FARPR) to a node on a vport. The remote node N_Port identifier (@nportid) 3380 * is passed into the function. It first search the @vport node list to find 3381 * the matching ndlp. If no such ndlp is found, a new ndlp shall be created 3382 * for this (FARPR) purpose. An IOCB is allocated, payload prepared, and the 3383 * lpfc_sli_issue_iocb() routine is invoked to send the FARPR ELS command. 3384 * 3385 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 3386 * will be incremented by 1 for holding the ndlp and the reference to ndlp 3387 * will be stored into the context1 field of the IOCB for the completion 3388 * callback function to the PARPR ELS command. 3389 * 3390 * Return code 3391 * 0 - Successfully issued farpr command 3392 * 1 - Failed to issue farpr command 3393 **/ 3394 static int 3395 lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) 3396 { 3397 int rc = 0; 3398 struct lpfc_hba *phba = vport->phba; 3399 struct lpfc_iocbq *elsiocb; 3400 FARP *fp; 3401 uint8_t *pcmd; 3402 uint32_t *lp; 3403 uint16_t cmdsize; 3404 struct lpfc_nodelist *ondlp; 3405 struct lpfc_nodelist *ndlp; 3406 3407 cmdsize = (sizeof(uint32_t) + sizeof(FARP)); 3408 3409 ndlp = lpfc_findnode_did(vport, nportid); 3410 if (!ndlp) { 3411 ndlp = lpfc_nlp_init(vport, nportid); 3412 if (!ndlp) 3413 return 1; 3414 lpfc_enqueue_node(vport, ndlp); 3415 } 3416 3417 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3418 ndlp->nlp_DID, ELS_CMD_RNID); 3419 if (!elsiocb) 3420 return 1; 3421 3422 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 3423 3424 *((uint32_t *) (pcmd)) = ELS_CMD_FARPR; 3425 pcmd += sizeof(uint32_t); 3426 3427 /* Fill in FARPR payload */ 3428 fp = (FARP *) (pcmd); 3429 memset(fp, 0, sizeof(FARP)); 3430 lp = (uint32_t *) pcmd; 3431 *lp++ = be32_to_cpu(nportid); 3432 *lp++ = be32_to_cpu(vport->fc_myDID); 3433 fp->Rflags = 0; 3434 fp->Mflags = (FARP_MATCH_PORT | FARP_MATCH_NODE); 3435 3436 memcpy(&fp->RportName, &vport->fc_portname, sizeof(struct lpfc_name)); 3437 memcpy(&fp->RnodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 3438 ondlp = lpfc_findnode_did(vport, nportid); 3439 if (ondlp) { 3440 memcpy(&fp->OportName, &ondlp->nlp_portname, 3441 sizeof(struct lpfc_name)); 3442 memcpy(&fp->OnodeName, &ondlp->nlp_nodename, 3443 sizeof(struct lpfc_name)); 3444 } 3445 3446 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3447 "Issue FARPR: did:x%x", 3448 ndlp->nlp_DID, 0, 0); 3449 3450 phba->fc_stat.elsXmitFARPR++; 3451 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd; 3452 elsiocb->context1 = lpfc_nlp_get(ndlp); 3453 if (!elsiocb->context1) { 3454 lpfc_els_free_iocb(phba, elsiocb); 3455 return 1; 3456 } 3457 3458 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3459 if (rc == IOCB_ERROR) { 3460 /* The additional lpfc_nlp_put will cause the following 3461 * lpfc_els_free_iocb routine to trigger the release of 3462 * the node. 3463 */ 3464 lpfc_nlp_put(ndlp); 3465 lpfc_els_free_iocb(phba, elsiocb); 3466 return 1; 3467 } 3468 /* This will cause the callback-function lpfc_cmpl_els_cmd to 3469 * trigger the release of the node. 3470 */ 3471 /* Don't release reference count as RDF is likely outstanding */ 3472 return 0; 3473 } 3474 3475 /** 3476 * lpfc_issue_els_rdf - Register for diagnostic functions from the fabric. 3477 * @vport: pointer to a host virtual N_Port data structure. 3478 * @retry: retry counter for the command IOCB. 3479 * 3480 * This routine issues an ELS RDF to the Fabric Controller to register 3481 * for diagnostic functions. 3482 * 3483 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 3484 * will be incremented by 1 for holding the ndlp and the reference to ndlp 3485 * will be stored into the context1 field of the IOCB for the completion 3486 * callback function to the RDF ELS command. 3487 * 3488 * Return code 3489 * 0 - Successfully issued rdf command 3490 * 1 - Failed to issue rdf command 3491 **/ 3492 int 3493 lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry) 3494 { 3495 struct lpfc_hba *phba = vport->phba; 3496 struct lpfc_iocbq *elsiocb; 3497 struct lpfc_els_rdf_req *prdf; 3498 struct lpfc_nodelist *ndlp; 3499 uint16_t cmdsize; 3500 int rc; 3501 3502 cmdsize = sizeof(*prdf); 3503 3504 ndlp = lpfc_findnode_did(vport, Fabric_Cntl_DID); 3505 if (!ndlp) { 3506 ndlp = lpfc_nlp_init(vport, Fabric_Cntl_DID); 3507 if (!ndlp) 3508 return -ENODEV; 3509 lpfc_enqueue_node(vport, ndlp); 3510 } 3511 3512 /* RDF ELS is not required on an NPIV VN_Port. */ 3513 if (vport->port_type == LPFC_NPIV_PORT) { 3514 lpfc_nlp_put(ndlp); 3515 return -EACCES; 3516 } 3517 3518 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3519 ndlp->nlp_DID, ELS_CMD_RDF); 3520 if (!elsiocb) 3521 return -ENOMEM; 3522 3523 /* Configure the payload for the supported FPIN events. */ 3524 prdf = (struct lpfc_els_rdf_req *) 3525 (((struct lpfc_dmabuf *)elsiocb->context2)->virt); 3526 memset(prdf, 0, cmdsize); 3527 prdf->rdf.fpin_cmd = ELS_RDF; 3528 prdf->rdf.desc_len = cpu_to_be32(sizeof(struct lpfc_els_rdf_req) - 3529 sizeof(struct fc_els_rdf)); 3530 prdf->reg_d1.reg_desc.desc_tag = cpu_to_be32(ELS_DTAG_FPIN_REGISTER); 3531 prdf->reg_d1.reg_desc.desc_len = cpu_to_be32( 3532 FC_TLV_DESC_LENGTH_FROM_SZ(prdf->reg_d1)); 3533 prdf->reg_d1.reg_desc.count = cpu_to_be32(ELS_RDF_REG_TAG_CNT); 3534 prdf->reg_d1.desc_tags[0] = cpu_to_be32(ELS_DTAG_LNK_INTEGRITY); 3535 prdf->reg_d1.desc_tags[1] = cpu_to_be32(ELS_DTAG_DELIVERY); 3536 prdf->reg_d1.desc_tags[2] = cpu_to_be32(ELS_DTAG_PEER_CONGEST); 3537 prdf->reg_d1.desc_tags[3] = cpu_to_be32(ELS_DTAG_CONGESTION); 3538 3539 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3540 "6444 Xmit RDF to remote NPORT x%x\n", 3541 ndlp->nlp_DID); 3542 3543 elsiocb->iocb_cmpl = lpfc_cmpl_els_disc_cmd; 3544 elsiocb->context1 = lpfc_nlp_get(ndlp); 3545 if (!elsiocb->context1) 3546 goto node_err; 3547 3548 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3549 "Issue RDF: did:x%x refcnt %d", 3550 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 3551 3552 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3553 if (rc == IOCB_ERROR) 3554 goto io_err; 3555 return 0; 3556 3557 io_err: 3558 lpfc_nlp_put(ndlp); 3559 node_err: 3560 lpfc_els_free_iocb(phba, elsiocb); 3561 return -EIO; 3562 } 3563 3564 /** 3565 * lpfc_cancel_retry_delay_tmo - Cancel the timer with delayed iocb-cmd retry 3566 * @vport: pointer to a host virtual N_Port data structure. 3567 * @nlp: pointer to a node-list data structure. 3568 * 3569 * This routine cancels the timer with a delayed IOCB-command retry for 3570 * a @vport's @ndlp. It stops the timer for the delayed function retrial and 3571 * removes the ELS retry event if it presents. In addition, if the 3572 * NLP_NPR_2B_DISC bit is set in the @nlp's nlp_flag bitmap, ADISC IOCB 3573 * commands are sent for the @vport's nodes that require issuing discovery 3574 * ADISC. 3575 **/ 3576 void 3577 lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp) 3578 { 3579 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 3580 struct lpfc_work_evt *evtp; 3581 3582 if (!(nlp->nlp_flag & NLP_DELAY_TMO)) 3583 return; 3584 spin_lock_irq(&nlp->lock); 3585 nlp->nlp_flag &= ~NLP_DELAY_TMO; 3586 spin_unlock_irq(&nlp->lock); 3587 del_timer_sync(&nlp->nlp_delayfunc); 3588 nlp->nlp_last_elscmd = 0; 3589 if (!list_empty(&nlp->els_retry_evt.evt_listp)) { 3590 list_del_init(&nlp->els_retry_evt.evt_listp); 3591 /* Decrement nlp reference count held for the delayed retry */ 3592 evtp = &nlp->els_retry_evt; 3593 lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1); 3594 } 3595 if (nlp->nlp_flag & NLP_NPR_2B_DISC) { 3596 spin_lock_irq(&nlp->lock); 3597 nlp->nlp_flag &= ~NLP_NPR_2B_DISC; 3598 spin_unlock_irq(&nlp->lock); 3599 if (vport->num_disc_nodes) { 3600 if (vport->port_state < LPFC_VPORT_READY) { 3601 /* Check if there are more ADISCs to be sent */ 3602 lpfc_more_adisc(vport); 3603 } else { 3604 /* Check if there are more PLOGIs to be sent */ 3605 lpfc_more_plogi(vport); 3606 if (vport->num_disc_nodes == 0) { 3607 spin_lock_irq(shost->host_lock); 3608 vport->fc_flag &= ~FC_NDISC_ACTIVE; 3609 spin_unlock_irq(shost->host_lock); 3610 lpfc_can_disctmo(vport); 3611 lpfc_end_rscn(vport); 3612 } 3613 } 3614 } 3615 } 3616 return; 3617 } 3618 3619 /** 3620 * lpfc_els_retry_delay - Timer function with a ndlp delayed function timer 3621 * @t: pointer to the timer function associated data (ndlp). 3622 * 3623 * This routine is invoked by the ndlp delayed-function timer to check 3624 * whether there is any pending ELS retry event(s) with the node. If not, it 3625 * simply returns. Otherwise, if there is at least one ELS delayed event, it 3626 * adds the delayed events to the HBA work list and invokes the 3627 * lpfc_worker_wake_up() routine to wake up worker thread to process the 3628 * event. Note that lpfc_nlp_get() is called before posting the event to 3629 * the work list to hold reference count of ndlp so that it guarantees the 3630 * reference to ndlp will still be available when the worker thread gets 3631 * to the event associated with the ndlp. 3632 **/ 3633 void 3634 lpfc_els_retry_delay(struct timer_list *t) 3635 { 3636 struct lpfc_nodelist *ndlp = from_timer(ndlp, t, nlp_delayfunc); 3637 struct lpfc_vport *vport = ndlp->vport; 3638 struct lpfc_hba *phba = vport->phba; 3639 unsigned long flags; 3640 struct lpfc_work_evt *evtp = &ndlp->els_retry_evt; 3641 3642 spin_lock_irqsave(&phba->hbalock, flags); 3643 if (!list_empty(&evtp->evt_listp)) { 3644 spin_unlock_irqrestore(&phba->hbalock, flags); 3645 return; 3646 } 3647 3648 /* We need to hold the node by incrementing the reference 3649 * count until the queued work is done 3650 */ 3651 evtp->evt_arg1 = lpfc_nlp_get(ndlp); 3652 if (evtp->evt_arg1) { 3653 evtp->evt = LPFC_EVT_ELS_RETRY; 3654 list_add_tail(&evtp->evt_listp, &phba->work_list); 3655 lpfc_worker_wake_up(phba); 3656 } 3657 spin_unlock_irqrestore(&phba->hbalock, flags); 3658 return; 3659 } 3660 3661 /** 3662 * lpfc_els_retry_delay_handler - Work thread handler for ndlp delayed function 3663 * @ndlp: pointer to a node-list data structure. 3664 * 3665 * This routine is the worker-thread handler for processing the @ndlp delayed 3666 * event(s), posted by the lpfc_els_retry_delay() routine. It simply retrieves 3667 * the last ELS command from the associated ndlp and invokes the proper ELS 3668 * function according to the delayed ELS command to retry the command. 3669 **/ 3670 void 3671 lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp) 3672 { 3673 struct lpfc_vport *vport = ndlp->vport; 3674 uint32_t cmd, retry; 3675 3676 spin_lock_irq(&ndlp->lock); 3677 cmd = ndlp->nlp_last_elscmd; 3678 ndlp->nlp_last_elscmd = 0; 3679 3680 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { 3681 spin_unlock_irq(&ndlp->lock); 3682 return; 3683 } 3684 3685 ndlp->nlp_flag &= ~NLP_DELAY_TMO; 3686 spin_unlock_irq(&ndlp->lock); 3687 /* 3688 * If a discovery event readded nlp_delayfunc after timer 3689 * firing and before processing the timer, cancel the 3690 * nlp_delayfunc. 3691 */ 3692 del_timer_sync(&ndlp->nlp_delayfunc); 3693 retry = ndlp->nlp_retry; 3694 ndlp->nlp_retry = 0; 3695 3696 switch (cmd) { 3697 case ELS_CMD_FLOGI: 3698 lpfc_issue_els_flogi(vport, ndlp, retry); 3699 break; 3700 case ELS_CMD_PLOGI: 3701 if (!lpfc_issue_els_plogi(vport, ndlp->nlp_DID, retry)) { 3702 ndlp->nlp_prev_state = ndlp->nlp_state; 3703 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 3704 } 3705 break; 3706 case ELS_CMD_ADISC: 3707 if (!lpfc_issue_els_adisc(vport, ndlp, retry)) { 3708 ndlp->nlp_prev_state = ndlp->nlp_state; 3709 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 3710 } 3711 break; 3712 case ELS_CMD_PRLI: 3713 case ELS_CMD_NVMEPRLI: 3714 if (!lpfc_issue_els_prli(vport, ndlp, retry)) { 3715 ndlp->nlp_prev_state = ndlp->nlp_state; 3716 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); 3717 } 3718 break; 3719 case ELS_CMD_LOGO: 3720 if (!lpfc_issue_els_logo(vport, ndlp, retry)) { 3721 ndlp->nlp_prev_state = ndlp->nlp_state; 3722 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE); 3723 } 3724 break; 3725 case ELS_CMD_FDISC: 3726 if (!(vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI)) 3727 lpfc_issue_els_fdisc(vport, ndlp, retry); 3728 break; 3729 } 3730 return; 3731 } 3732 3733 /** 3734 * lpfc_link_reset - Issue link reset 3735 * @vport: pointer to a virtual N_Port data structure. 3736 * 3737 * This routine performs link reset by sending INIT_LINK mailbox command. 3738 * For SLI-3 adapter, link attention interrupt is enabled before issuing 3739 * INIT_LINK mailbox command. 3740 * 3741 * Return code 3742 * 0 - Link reset initiated successfully 3743 * 1 - Failed to initiate link reset 3744 **/ 3745 int 3746 lpfc_link_reset(struct lpfc_vport *vport) 3747 { 3748 struct lpfc_hba *phba = vport->phba; 3749 LPFC_MBOXQ_t *mbox; 3750 uint32_t control; 3751 int rc; 3752 3753 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 3754 "2851 Attempt link reset\n"); 3755 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3756 if (!mbox) { 3757 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3758 "2852 Failed to allocate mbox memory"); 3759 return 1; 3760 } 3761 3762 /* Enable Link attention interrupts */ 3763 if (phba->sli_rev <= LPFC_SLI_REV3) { 3764 spin_lock_irq(&phba->hbalock); 3765 phba->sli.sli_flag |= LPFC_PROCESS_LA; 3766 control = readl(phba->HCregaddr); 3767 control |= HC_LAINT_ENA; 3768 writel(control, phba->HCregaddr); 3769 readl(phba->HCregaddr); /* flush */ 3770 spin_unlock_irq(&phba->hbalock); 3771 } 3772 3773 lpfc_init_link(phba, mbox, phba->cfg_topology, 3774 phba->cfg_link_speed); 3775 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 3776 mbox->vport = vport; 3777 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 3778 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 3779 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3780 "2853 Failed to issue INIT_LINK " 3781 "mbox command, rc:x%x\n", rc); 3782 mempool_free(mbox, phba->mbox_mem_pool); 3783 return 1; 3784 } 3785 3786 return 0; 3787 } 3788 3789 /** 3790 * lpfc_els_retry - Make retry decision on an els command iocb 3791 * @phba: pointer to lpfc hba data structure. 3792 * @cmdiocb: pointer to lpfc command iocb data structure. 3793 * @rspiocb: pointer to lpfc response iocb data structure. 3794 * 3795 * This routine makes a retry decision on an ELS command IOCB, which has 3796 * failed. The following ELS IOCBs use this function for retrying the command 3797 * when previously issued command responsed with error status: FLOGI, PLOGI, 3798 * PRLI, ADISC, LOGO, and FDISC. Based on the ELS command type and the 3799 * returned error status, it makes the decision whether a retry shall be 3800 * issued for the command, and whether a retry shall be made immediately or 3801 * delayed. In the former case, the corresponding ELS command issuing-function 3802 * is called to retry the command. In the later case, the ELS command shall 3803 * be posted to the ndlp delayed event and delayed function timer set to the 3804 * ndlp for the delayed command issusing. 3805 * 3806 * Return code 3807 * 0 - No retry of els command is made 3808 * 1 - Immediate or delayed retry of els command is made 3809 **/ 3810 static int 3811 lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 3812 struct lpfc_iocbq *rspiocb) 3813 { 3814 struct lpfc_vport *vport = cmdiocb->vport; 3815 IOCB_t *irsp = &rspiocb->iocb; 3816 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 3817 struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 3818 uint32_t *elscmd; 3819 struct ls_rjt stat; 3820 int retry = 0, maxretry = lpfc_max_els_tries, delay = 0; 3821 int logerr = 0; 3822 uint32_t cmd = 0; 3823 uint32_t did; 3824 int link_reset = 0, rc; 3825 3826 3827 /* Note: context2 may be 0 for internal driver abort 3828 * of delays ELS command. 3829 */ 3830 3831 if (pcmd && pcmd->virt) { 3832 elscmd = (uint32_t *) (pcmd->virt); 3833 cmd = *elscmd++; 3834 } 3835 3836 if (ndlp) 3837 did = ndlp->nlp_DID; 3838 else { 3839 /* We should only hit this case for retrying PLOGI */ 3840 did = irsp->un.elsreq64.remoteID; 3841 ndlp = lpfc_findnode_did(vport, did); 3842 if (!ndlp && (cmd != ELS_CMD_PLOGI)) 3843 return 1; 3844 } 3845 3846 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3847 "Retry ELS: wd7:x%x wd4:x%x did:x%x", 3848 *(((uint32_t *) irsp) + 7), irsp->un.ulpWord[4], ndlp->nlp_DID); 3849 3850 switch (irsp->ulpStatus) { 3851 case IOSTAT_FCP_RSP_ERROR: 3852 break; 3853 case IOSTAT_REMOTE_STOP: 3854 if (phba->sli_rev == LPFC_SLI_REV4) { 3855 /* This IO was aborted by the target, we don't 3856 * know the rxid and because we did not send the 3857 * ABTS we cannot generate and RRQ. 3858 */ 3859 lpfc_set_rrq_active(phba, ndlp, 3860 cmdiocb->sli4_lxritag, 0, 0); 3861 } 3862 break; 3863 case IOSTAT_LOCAL_REJECT: 3864 switch ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK)) { 3865 case IOERR_LOOP_OPEN_FAILURE: 3866 if (cmd == ELS_CMD_FLOGI) { 3867 if (PCI_DEVICE_ID_HORNET == 3868 phba->pcidev->device) { 3869 phba->fc_topology = LPFC_TOPOLOGY_LOOP; 3870 phba->pport->fc_myDID = 0; 3871 phba->alpa_map[0] = 0; 3872 phba->alpa_map[1] = 0; 3873 } 3874 } 3875 if (cmd == ELS_CMD_PLOGI && cmdiocb->retry == 0) 3876 delay = 1000; 3877 retry = 1; 3878 break; 3879 3880 case IOERR_ILLEGAL_COMMAND: 3881 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 3882 "0124 Retry illegal cmd x%x " 3883 "retry:x%x delay:x%x\n", 3884 cmd, cmdiocb->retry, delay); 3885 retry = 1; 3886 /* All command's retry policy */ 3887 maxretry = 8; 3888 if (cmdiocb->retry > 2) 3889 delay = 1000; 3890 break; 3891 3892 case IOERR_NO_RESOURCES: 3893 logerr = 1; /* HBA out of resources */ 3894 retry = 1; 3895 if (cmdiocb->retry > 100) 3896 delay = 100; 3897 maxretry = 250; 3898 break; 3899 3900 case IOERR_ILLEGAL_FRAME: 3901 delay = 100; 3902 retry = 1; 3903 break; 3904 3905 case IOERR_INVALID_RPI: 3906 if (cmd == ELS_CMD_PLOGI && 3907 did == NameServer_DID) { 3908 /* Continue forever if plogi to */ 3909 /* the nameserver fails */ 3910 maxretry = 0; 3911 delay = 100; 3912 } 3913 retry = 1; 3914 break; 3915 3916 case IOERR_SEQUENCE_TIMEOUT: 3917 if (cmd == ELS_CMD_PLOGI && 3918 did == NameServer_DID && 3919 (cmdiocb->retry + 1) == maxretry) { 3920 /* Reset the Link */ 3921 link_reset = 1; 3922 break; 3923 } 3924 retry = 1; 3925 delay = 100; 3926 break; 3927 } 3928 break; 3929 3930 case IOSTAT_NPORT_RJT: 3931 case IOSTAT_FABRIC_RJT: 3932 if (irsp->un.ulpWord[4] & RJT_UNAVAIL_TEMP) { 3933 retry = 1; 3934 break; 3935 } 3936 break; 3937 3938 case IOSTAT_NPORT_BSY: 3939 case IOSTAT_FABRIC_BSY: 3940 logerr = 1; /* Fabric / Remote NPort out of resources */ 3941 retry = 1; 3942 break; 3943 3944 case IOSTAT_LS_RJT: 3945 stat.un.lsRjtError = be32_to_cpu(irsp->un.ulpWord[4]); 3946 /* Added for Vendor specifc support 3947 * Just keep retrying for these Rsn / Exp codes 3948 */ 3949 switch (stat.un.b.lsRjtRsnCode) { 3950 case LSRJT_UNABLE_TPC: 3951 /* The driver has a VALID PLOGI but the rport has 3952 * rejected the PRLI - can't do it now. Delay 3953 * for 1 second and try again. 3954 * 3955 * However, if explanation is REQ_UNSUPPORTED there's 3956 * no point to retry PRLI. 3957 */ 3958 if ((cmd == ELS_CMD_PRLI || cmd == ELS_CMD_NVMEPRLI) && 3959 stat.un.b.lsRjtRsnCodeExp != 3960 LSEXP_REQ_UNSUPPORTED) { 3961 delay = 1000; 3962 maxretry = lpfc_max_els_tries + 1; 3963 retry = 1; 3964 break; 3965 } 3966 3967 /* Legacy bug fix code for targets with PLOGI delays. */ 3968 if (stat.un.b.lsRjtRsnCodeExp == 3969 LSEXP_CMD_IN_PROGRESS) { 3970 if (cmd == ELS_CMD_PLOGI) { 3971 delay = 1000; 3972 maxretry = 48; 3973 } 3974 retry = 1; 3975 break; 3976 } 3977 if (stat.un.b.lsRjtRsnCodeExp == 3978 LSEXP_CANT_GIVE_DATA) { 3979 if (cmd == ELS_CMD_PLOGI) { 3980 delay = 1000; 3981 maxretry = 48; 3982 } 3983 retry = 1; 3984 break; 3985 } 3986 if (cmd == ELS_CMD_PLOGI) { 3987 delay = 1000; 3988 maxretry = lpfc_max_els_tries + 1; 3989 retry = 1; 3990 break; 3991 } 3992 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 3993 (cmd == ELS_CMD_FDISC) && 3994 (stat.un.b.lsRjtRsnCodeExp == LSEXP_OUT_OF_RESOURCE)){ 3995 lpfc_printf_vlog(vport, KERN_ERR, 3996 LOG_TRACE_EVENT, 3997 "0125 FDISC Failed (x%x). " 3998 "Fabric out of resources\n", 3999 stat.un.lsRjtError); 4000 lpfc_vport_set_state(vport, 4001 FC_VPORT_NO_FABRIC_RSCS); 4002 } 4003 break; 4004 4005 case LSRJT_LOGICAL_BSY: 4006 if ((cmd == ELS_CMD_PLOGI) || 4007 (cmd == ELS_CMD_PRLI) || 4008 (cmd == ELS_CMD_NVMEPRLI)) { 4009 delay = 1000; 4010 maxretry = 48; 4011 } else if (cmd == ELS_CMD_FDISC) { 4012 /* FDISC retry policy */ 4013 maxretry = 48; 4014 if (cmdiocb->retry >= 32) 4015 delay = 1000; 4016 } 4017 retry = 1; 4018 break; 4019 4020 case LSRJT_LOGICAL_ERR: 4021 /* There are some cases where switches return this 4022 * error when they are not ready and should be returning 4023 * Logical Busy. We should delay every time. 4024 */ 4025 if (cmd == ELS_CMD_FDISC && 4026 stat.un.b.lsRjtRsnCodeExp == LSEXP_PORT_LOGIN_REQ) { 4027 maxretry = 3; 4028 delay = 1000; 4029 retry = 1; 4030 } else if (cmd == ELS_CMD_FLOGI && 4031 stat.un.b.lsRjtRsnCodeExp == 4032 LSEXP_NOTHING_MORE) { 4033 vport->fc_sparam.cmn.bbRcvSizeMsb &= 0xf; 4034 retry = 1; 4035 lpfc_printf_vlog(vport, KERN_ERR, 4036 LOG_TRACE_EVENT, 4037 "0820 FLOGI Failed (x%x). " 4038 "BBCredit Not Supported\n", 4039 stat.un.lsRjtError); 4040 } 4041 break; 4042 4043 case LSRJT_PROTOCOL_ERR: 4044 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 4045 (cmd == ELS_CMD_FDISC) && 4046 ((stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_PNAME) || 4047 (stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_NPORT_ID)) 4048 ) { 4049 lpfc_printf_vlog(vport, KERN_ERR, 4050 LOG_TRACE_EVENT, 4051 "0122 FDISC Failed (x%x). " 4052 "Fabric Detected Bad WWN\n", 4053 stat.un.lsRjtError); 4054 lpfc_vport_set_state(vport, 4055 FC_VPORT_FABRIC_REJ_WWN); 4056 } 4057 break; 4058 case LSRJT_VENDOR_UNIQUE: 4059 if ((stat.un.b.vendorUnique == 0x45) && 4060 (cmd == ELS_CMD_FLOGI)) { 4061 goto out_retry; 4062 } 4063 break; 4064 case LSRJT_CMD_UNSUPPORTED: 4065 /* lpfc nvmet returns this type of LS_RJT when it 4066 * receives an FCP PRLI because lpfc nvmet only 4067 * support NVME. ELS request is terminated for FCP4 4068 * on this rport. 4069 */ 4070 if (stat.un.b.lsRjtRsnCodeExp == 4071 LSEXP_REQ_UNSUPPORTED && cmd == ELS_CMD_PRLI) { 4072 spin_lock_irq(&ndlp->lock); 4073 ndlp->nlp_flag |= NLP_FCP_PRLI_RJT; 4074 spin_unlock_irq(&ndlp->lock); 4075 retry = 0; 4076 goto out_retry; 4077 } 4078 break; 4079 } 4080 break; 4081 4082 case IOSTAT_INTERMED_RSP: 4083 case IOSTAT_BA_RJT: 4084 break; 4085 4086 default: 4087 break; 4088 } 4089 4090 if (link_reset) { 4091 rc = lpfc_link_reset(vport); 4092 if (rc) { 4093 /* Do not give up. Retry PLOGI one more time and attempt 4094 * link reset if PLOGI fails again. 4095 */ 4096 retry = 1; 4097 delay = 100; 4098 goto out_retry; 4099 } 4100 return 1; 4101 } 4102 4103 if (did == FDMI_DID) 4104 retry = 1; 4105 4106 if ((cmd == ELS_CMD_FLOGI) && 4107 (phba->fc_topology != LPFC_TOPOLOGY_LOOP) && 4108 !lpfc_error_lost_link(irsp)) { 4109 /* FLOGI retry policy */ 4110 retry = 1; 4111 /* retry FLOGI forever */ 4112 if (phba->link_flag != LS_LOOPBACK_MODE) 4113 maxretry = 0; 4114 else 4115 maxretry = 2; 4116 4117 if (cmdiocb->retry >= 100) 4118 delay = 5000; 4119 else if (cmdiocb->retry >= 32) 4120 delay = 1000; 4121 } else if ((cmd == ELS_CMD_FDISC) && !lpfc_error_lost_link(irsp)) { 4122 /* retry FDISCs every second up to devloss */ 4123 retry = 1; 4124 maxretry = vport->cfg_devloss_tmo; 4125 delay = 1000; 4126 } 4127 4128 cmdiocb->retry++; 4129 if (maxretry && (cmdiocb->retry >= maxretry)) { 4130 phba->fc_stat.elsRetryExceeded++; 4131 retry = 0; 4132 } 4133 4134 if ((vport->load_flag & FC_UNLOADING) != 0) 4135 retry = 0; 4136 4137 out_retry: 4138 if (retry) { 4139 if ((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_FDISC)) { 4140 /* Stop retrying PLOGI and FDISC if in FCF discovery */ 4141 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 4142 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4143 "2849 Stop retry ELS command " 4144 "x%x to remote NPORT x%x, " 4145 "Data: x%x x%x\n", cmd, did, 4146 cmdiocb->retry, delay); 4147 return 0; 4148 } 4149 } 4150 4151 /* Retry ELS command <elsCmd> to remote NPORT <did> */ 4152 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4153 "0107 Retry ELS command x%x to remote " 4154 "NPORT x%x Data: x%x x%x\n", 4155 cmd, did, cmdiocb->retry, delay); 4156 4157 if (((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) && 4158 ((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) || 4159 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) != 4160 IOERR_NO_RESOURCES))) { 4161 /* Don't reset timer for no resources */ 4162 4163 /* If discovery / RSCN timer is running, reset it */ 4164 if (timer_pending(&vport->fc_disctmo) || 4165 (vport->fc_flag & FC_RSCN_MODE)) 4166 lpfc_set_disctmo(vport); 4167 } 4168 4169 phba->fc_stat.elsXmitRetry++; 4170 if (ndlp && delay) { 4171 phba->fc_stat.elsDelayRetry++; 4172 ndlp->nlp_retry = cmdiocb->retry; 4173 4174 /* delay is specified in milliseconds */ 4175 mod_timer(&ndlp->nlp_delayfunc, 4176 jiffies + msecs_to_jiffies(delay)); 4177 spin_lock_irq(&ndlp->lock); 4178 ndlp->nlp_flag |= NLP_DELAY_TMO; 4179 spin_unlock_irq(&ndlp->lock); 4180 4181 ndlp->nlp_prev_state = ndlp->nlp_state; 4182 if ((cmd == ELS_CMD_PRLI) || 4183 (cmd == ELS_CMD_NVMEPRLI)) 4184 lpfc_nlp_set_state(vport, ndlp, 4185 NLP_STE_PRLI_ISSUE); 4186 else 4187 lpfc_nlp_set_state(vport, ndlp, 4188 NLP_STE_NPR_NODE); 4189 ndlp->nlp_last_elscmd = cmd; 4190 4191 return 1; 4192 } 4193 switch (cmd) { 4194 case ELS_CMD_FLOGI: 4195 lpfc_issue_els_flogi(vport, ndlp, cmdiocb->retry); 4196 return 1; 4197 case ELS_CMD_FDISC: 4198 lpfc_issue_els_fdisc(vport, ndlp, cmdiocb->retry); 4199 return 1; 4200 case ELS_CMD_PLOGI: 4201 if (ndlp) { 4202 ndlp->nlp_prev_state = ndlp->nlp_state; 4203 lpfc_nlp_set_state(vport, ndlp, 4204 NLP_STE_PLOGI_ISSUE); 4205 } 4206 lpfc_issue_els_plogi(vport, did, cmdiocb->retry); 4207 return 1; 4208 case ELS_CMD_ADISC: 4209 ndlp->nlp_prev_state = ndlp->nlp_state; 4210 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 4211 lpfc_issue_els_adisc(vport, ndlp, cmdiocb->retry); 4212 return 1; 4213 case ELS_CMD_PRLI: 4214 case ELS_CMD_NVMEPRLI: 4215 ndlp->nlp_prev_state = ndlp->nlp_state; 4216 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); 4217 lpfc_issue_els_prli(vport, ndlp, cmdiocb->retry); 4218 return 1; 4219 case ELS_CMD_LOGO: 4220 ndlp->nlp_prev_state = ndlp->nlp_state; 4221 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE); 4222 lpfc_issue_els_logo(vport, ndlp, cmdiocb->retry); 4223 return 1; 4224 } 4225 } 4226 /* No retry ELS command <elsCmd> to remote NPORT <did> */ 4227 if (logerr) { 4228 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 4229 "0137 No retry ELS command x%x to remote " 4230 "NPORT x%x: Out of Resources: Error:x%x/%x\n", 4231 cmd, did, irsp->ulpStatus, 4232 irsp->un.ulpWord[4]); 4233 } 4234 else { 4235 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4236 "0108 No retry ELS command x%x to remote " 4237 "NPORT x%x Retried:%d Error:x%x/%x\n", 4238 cmd, did, cmdiocb->retry, irsp->ulpStatus, 4239 irsp->un.ulpWord[4]); 4240 } 4241 return 0; 4242 } 4243 4244 /** 4245 * lpfc_els_free_data - Free lpfc dma buffer and data structure with an iocb 4246 * @phba: pointer to lpfc hba data structure. 4247 * @buf_ptr1: pointer to the lpfc DMA buffer data structure. 4248 * 4249 * This routine releases the lpfc DMA (Direct Memory Access) buffer(s) 4250 * associated with a command IOCB back to the lpfc DMA buffer pool. It first 4251 * checks to see whether there is a lpfc DMA buffer associated with the 4252 * response of the command IOCB. If so, it will be released before releasing 4253 * the lpfc DMA buffer associated with the IOCB itself. 4254 * 4255 * Return code 4256 * 0 - Successfully released lpfc DMA buffer (currently, always return 0) 4257 **/ 4258 static int 4259 lpfc_els_free_data(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr1) 4260 { 4261 struct lpfc_dmabuf *buf_ptr; 4262 4263 /* Free the response before processing the command. */ 4264 if (!list_empty(&buf_ptr1->list)) { 4265 list_remove_head(&buf_ptr1->list, buf_ptr, 4266 struct lpfc_dmabuf, 4267 list); 4268 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 4269 kfree(buf_ptr); 4270 } 4271 lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys); 4272 kfree(buf_ptr1); 4273 return 0; 4274 } 4275 4276 /** 4277 * lpfc_els_free_bpl - Free lpfc dma buffer and data structure with bpl 4278 * @phba: pointer to lpfc hba data structure. 4279 * @buf_ptr: pointer to the lpfc dma buffer data structure. 4280 * 4281 * This routine releases the lpfc Direct Memory Access (DMA) buffer 4282 * associated with a Buffer Pointer List (BPL) back to the lpfc DMA buffer 4283 * pool. 4284 * 4285 * Return code 4286 * 0 - Successfully released lpfc DMA buffer (currently, always return 0) 4287 **/ 4288 static int 4289 lpfc_els_free_bpl(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr) 4290 { 4291 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 4292 kfree(buf_ptr); 4293 return 0; 4294 } 4295 4296 /** 4297 * lpfc_els_free_iocb - Free a command iocb and its associated resources 4298 * @phba: pointer to lpfc hba data structure. 4299 * @elsiocb: pointer to lpfc els command iocb data structure. 4300 * 4301 * This routine frees a command IOCB and its associated resources. The 4302 * command IOCB data structure contains the reference to various associated 4303 * resources, these fields must be set to NULL if the associated reference 4304 * not present: 4305 * context1 - reference to ndlp 4306 * context2 - reference to cmd 4307 * context2->next - reference to rsp 4308 * context3 - reference to bpl 4309 * 4310 * It first properly decrements the reference count held on ndlp for the 4311 * IOCB completion callback function. If LPFC_DELAY_MEM_FREE flag is not 4312 * set, it invokes the lpfc_els_free_data() routine to release the Direct 4313 * Memory Access (DMA) buffers associated with the IOCB. Otherwise, it 4314 * adds the DMA buffer the @phba data structure for the delayed release. 4315 * If reference to the Buffer Pointer List (BPL) is present, the 4316 * lpfc_els_free_bpl() routine is invoked to release the DMA memory 4317 * associated with BPL. Finally, the lpfc_sli_release_iocbq() routine is 4318 * invoked to release the IOCB data structure back to @phba IOCBQ list. 4319 * 4320 * Return code 4321 * 0 - Success (currently, always return 0) 4322 **/ 4323 int 4324 lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb) 4325 { 4326 struct lpfc_dmabuf *buf_ptr, *buf_ptr1; 4327 4328 /* The I/O job is complete. Clear the context1 data. */ 4329 elsiocb->context1 = NULL; 4330 4331 /* context2 = cmd, context2->next = rsp, context3 = bpl */ 4332 if (elsiocb->context2) { 4333 if (elsiocb->iocb_flag & LPFC_DELAY_MEM_FREE) { 4334 /* Firmware could still be in progress of DMAing 4335 * payload, so don't free data buffer till after 4336 * a hbeat. 4337 */ 4338 elsiocb->iocb_flag &= ~LPFC_DELAY_MEM_FREE; 4339 buf_ptr = elsiocb->context2; 4340 elsiocb->context2 = NULL; 4341 if (buf_ptr) { 4342 buf_ptr1 = NULL; 4343 spin_lock_irq(&phba->hbalock); 4344 if (!list_empty(&buf_ptr->list)) { 4345 list_remove_head(&buf_ptr->list, 4346 buf_ptr1, struct lpfc_dmabuf, 4347 list); 4348 INIT_LIST_HEAD(&buf_ptr1->list); 4349 list_add_tail(&buf_ptr1->list, 4350 &phba->elsbuf); 4351 phba->elsbuf_cnt++; 4352 } 4353 INIT_LIST_HEAD(&buf_ptr->list); 4354 list_add_tail(&buf_ptr->list, &phba->elsbuf); 4355 phba->elsbuf_cnt++; 4356 spin_unlock_irq(&phba->hbalock); 4357 } 4358 } else { 4359 buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2; 4360 lpfc_els_free_data(phba, buf_ptr1); 4361 elsiocb->context2 = NULL; 4362 } 4363 } 4364 4365 if (elsiocb->context3) { 4366 buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3; 4367 lpfc_els_free_bpl(phba, buf_ptr); 4368 elsiocb->context3 = NULL; 4369 } 4370 lpfc_sli_release_iocbq(phba, elsiocb); 4371 return 0; 4372 } 4373 4374 /** 4375 * lpfc_cmpl_els_logo_acc - Completion callback function to logo acc response 4376 * @phba: pointer to lpfc hba data structure. 4377 * @cmdiocb: pointer to lpfc command iocb data structure. 4378 * @rspiocb: pointer to lpfc response iocb data structure. 4379 * 4380 * This routine is the completion callback function to the Logout (LOGO) 4381 * Accept (ACC) Response ELS command. This routine is invoked to indicate 4382 * the completion of the LOGO process. It invokes the lpfc_nlp_not_used() to 4383 * release the ndlp if it has the last reference remaining (reference count 4384 * is 1). If succeeded (meaning ndlp released), it sets the IOCB context1 4385 * field to NULL to inform the following lpfc_els_free_iocb() routine no 4386 * ndlp reference count needs to be decremented. Otherwise, the ndlp 4387 * reference use-count shall be decremented by the lpfc_els_free_iocb() 4388 * routine. Finally, the lpfc_els_free_iocb() is invoked to release the 4389 * IOCB data structure. 4390 **/ 4391 static void 4392 lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 4393 struct lpfc_iocbq *rspiocb) 4394 { 4395 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 4396 struct lpfc_vport *vport = cmdiocb->vport; 4397 IOCB_t *irsp; 4398 4399 irsp = &rspiocb->iocb; 4400 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 4401 "ACC LOGO cmpl: status:x%x/x%x did:x%x", 4402 irsp->ulpStatus, irsp->un.ulpWord[4], ndlp->nlp_DID); 4403 /* ACC to LOGO completes to NPort <nlp_DID> */ 4404 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4405 "0109 ACC to LOGO completes to NPort x%x refcnt %d" 4406 "Data: x%x x%x x%x\n", 4407 ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp->nlp_flag, 4408 ndlp->nlp_state, ndlp->nlp_rpi); 4409 4410 if (ndlp->nlp_state == NLP_STE_NPR_NODE) { 4411 /* NPort Recovery mode or node is just allocated */ 4412 if (!lpfc_nlp_not_used(ndlp)) { 4413 /* If the ndlp is being used by another discovery 4414 * thread, just unregister the RPI. 4415 */ 4416 lpfc_unreg_rpi(vport, ndlp); 4417 } else { 4418 /* Indicate the node has already released, should 4419 * not reference to it from within lpfc_els_free_iocb. 4420 */ 4421 cmdiocb->context1 = NULL; 4422 } 4423 } 4424 4425 /* 4426 * The driver received a LOGO from the rport and has ACK'd it. 4427 * At this point, the driver is done so release the IOCB 4428 */ 4429 lpfc_els_free_iocb(phba, cmdiocb); 4430 lpfc_nlp_put(ndlp); 4431 } 4432 4433 /** 4434 * lpfc_mbx_cmpl_dflt_rpi - Completion callbk func for unreg dflt rpi mbox cmd 4435 * @phba: pointer to lpfc hba data structure. 4436 * @pmb: pointer to the driver internal queue element for mailbox command. 4437 * 4438 * This routine is the completion callback function for unregister default 4439 * RPI (Remote Port Index) mailbox command to the @phba. It simply releases 4440 * the associated lpfc Direct Memory Access (DMA) buffer back to the pool and 4441 * decrements the ndlp reference count held for this completion callback 4442 * function. After that, it invokes the lpfc_nlp_not_used() to check 4443 * whether there is only one reference left on the ndlp. If so, it will 4444 * perform one more decrement and trigger the release of the ndlp. 4445 **/ 4446 void 4447 lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 4448 { 4449 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf); 4450 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; 4451 4452 pmb->ctx_buf = NULL; 4453 pmb->ctx_ndlp = NULL; 4454 4455 lpfc_mbuf_free(phba, mp->virt, mp->phys); 4456 kfree(mp); 4457 mempool_free(pmb, phba->mbox_mem_pool); 4458 if (ndlp) { 4459 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, 4460 "0006 rpi x%x DID:%x flg:%x %d x%px\n", 4461 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, 4462 kref_read(&ndlp->kref), 4463 ndlp); 4464 /* This is the end of the default RPI cleanup logic for 4465 * this ndlp and it could get released. Clear the nlp_flags to 4466 * prevent any further processing. 4467 */ 4468 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; 4469 lpfc_nlp_put(ndlp); 4470 lpfc_nlp_not_used(ndlp); 4471 } 4472 4473 return; 4474 } 4475 4476 /** 4477 * lpfc_cmpl_els_rsp - Completion callback function for els response iocb cmd 4478 * @phba: pointer to lpfc hba data structure. 4479 * @cmdiocb: pointer to lpfc command iocb data structure. 4480 * @rspiocb: pointer to lpfc response iocb data structure. 4481 * 4482 * This routine is the completion callback function for ELS Response IOCB 4483 * command. In normal case, this callback function just properly sets the 4484 * nlp_flag bitmap in the ndlp data structure, if the mbox command reference 4485 * field in the command IOCB is not NULL, the referred mailbox command will 4486 * be send out, and then invokes the lpfc_els_free_iocb() routine to release 4487 * the IOCB. Under error conditions, such as when a LS_RJT is returned or a 4488 * link down event occurred during the discovery, the lpfc_nlp_not_used() 4489 * routine shall be invoked trying to release the ndlp if no other threads 4490 * are currently referring it. 4491 **/ 4492 static void 4493 lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 4494 struct lpfc_iocbq *rspiocb) 4495 { 4496 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 4497 struct lpfc_vport *vport = ndlp ? ndlp->vport : NULL; 4498 struct Scsi_Host *shost = vport ? lpfc_shost_from_vport(vport) : NULL; 4499 IOCB_t *irsp; 4500 uint8_t *pcmd; 4501 LPFC_MBOXQ_t *mbox = NULL; 4502 struct lpfc_dmabuf *mp = NULL; 4503 uint32_t ls_rjt = 0; 4504 4505 irsp = &rspiocb->iocb; 4506 4507 if (!vport) { 4508 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4509 "3177 ELS response failed\n"); 4510 goto out; 4511 } 4512 if (cmdiocb->context_un.mbox) 4513 mbox = cmdiocb->context_un.mbox; 4514 4515 /* First determine if this is a LS_RJT cmpl. Note, this callback 4516 * function can have cmdiocb->contest1 (ndlp) field set to NULL. 4517 */ 4518 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt); 4519 if (ndlp && (*((uint32_t *) (pcmd)) == ELS_CMD_LS_RJT)) { 4520 /* A LS_RJT associated with Default RPI cleanup has its own 4521 * separate code path. 4522 */ 4523 if (!(ndlp->nlp_flag & NLP_RM_DFLT_RPI)) 4524 ls_rjt = 1; 4525 } 4526 4527 /* Check to see if link went down during discovery */ 4528 if (!ndlp || lpfc_els_chk_latt(vport)) { 4529 if (mbox) { 4530 mp = (struct lpfc_dmabuf *)mbox->ctx_buf; 4531 if (mp) { 4532 lpfc_mbuf_free(phba, mp->virt, mp->phys); 4533 kfree(mp); 4534 } 4535 mempool_free(mbox, phba->mbox_mem_pool); 4536 } 4537 if (ndlp && (ndlp->nlp_flag & NLP_RM_DFLT_RPI)) 4538 if (lpfc_nlp_not_used(ndlp)) { 4539 ndlp = NULL; 4540 /* Indicate the node has already released, 4541 * should not reference to it from within 4542 * the routine lpfc_els_free_iocb. 4543 */ 4544 cmdiocb->context1 = NULL; 4545 } 4546 goto out; 4547 } 4548 4549 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 4550 "ELS rsp cmpl: status:x%x/x%x did:x%x", 4551 irsp->ulpStatus, irsp->un.ulpWord[4], 4552 cmdiocb->iocb.un.elsreq64.remoteID); 4553 /* ELS response tag <ulpIoTag> completes */ 4554 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4555 "0110 ELS response tag x%x completes " 4556 "Data: x%x x%x x%x x%x x%x x%x x%x\n", 4557 cmdiocb->iocb.ulpIoTag, rspiocb->iocb.ulpStatus, 4558 rspiocb->iocb.un.ulpWord[4], rspiocb->iocb.ulpTimeout, 4559 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 4560 ndlp->nlp_rpi); 4561 if (mbox) { 4562 if ((rspiocb->iocb.ulpStatus == 0) && 4563 (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) { 4564 if (!lpfc_unreg_rpi(vport, ndlp) && 4565 (!(vport->fc_flag & FC_PT2PT))) { 4566 if (ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE) { 4567 lpfc_printf_vlog(vport, KERN_INFO, 4568 LOG_DISCOVERY, 4569 "0314 PLOGI recov " 4570 "DID x%x " 4571 "Data: x%x x%x x%x\n", 4572 ndlp->nlp_DID, 4573 ndlp->nlp_state, 4574 ndlp->nlp_rpi, 4575 ndlp->nlp_flag); 4576 mp = mbox->ctx_buf; 4577 if (mp) { 4578 lpfc_mbuf_free(phba, mp->virt, 4579 mp->phys); 4580 kfree(mp); 4581 } 4582 mempool_free(mbox, phba->mbox_mem_pool); 4583 goto out; 4584 } 4585 } 4586 4587 /* Increment reference count to ndlp to hold the 4588 * reference to ndlp for the callback function. 4589 */ 4590 mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 4591 if (!mbox->ctx_ndlp) 4592 goto out; 4593 4594 mbox->vport = vport; 4595 if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) { 4596 mbox->mbox_flag |= LPFC_MBX_IMED_UNREG; 4597 mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; 4598 } 4599 else { 4600 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login; 4601 ndlp->nlp_prev_state = ndlp->nlp_state; 4602 lpfc_nlp_set_state(vport, ndlp, 4603 NLP_STE_REG_LOGIN_ISSUE); 4604 } 4605 4606 ndlp->nlp_flag |= NLP_REG_LOGIN_SEND; 4607 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 4608 != MBX_NOT_FINISHED) 4609 goto out; 4610 4611 /* Decrement the ndlp reference count we 4612 * set for this failed mailbox command. 4613 */ 4614 lpfc_nlp_put(ndlp); 4615 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; 4616 4617 /* ELS rsp: Cannot issue reg_login for <NPortid> */ 4618 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 4619 "0138 ELS rsp: Cannot issue reg_login for x%x " 4620 "Data: x%x x%x x%x\n", 4621 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 4622 ndlp->nlp_rpi); 4623 4624 if (lpfc_nlp_not_used(ndlp)) { 4625 ndlp = NULL; 4626 /* Indicate node has already been released, 4627 * should not reference to it from within 4628 * the routine lpfc_els_free_iocb. 4629 */ 4630 cmdiocb->context1 = NULL; 4631 } 4632 } else { 4633 /* Do not drop node for lpfc_els_abort'ed ELS cmds */ 4634 if (!lpfc_error_lost_link(irsp) && 4635 ndlp->nlp_flag & NLP_ACC_REGLOGIN) { 4636 if (lpfc_nlp_not_used(ndlp)) { 4637 ndlp = NULL; 4638 /* Indicate node has already been 4639 * released, should not reference 4640 * to it from within the routine 4641 * lpfc_els_free_iocb. 4642 */ 4643 cmdiocb->context1 = NULL; 4644 } 4645 } 4646 } 4647 mp = (struct lpfc_dmabuf *)mbox->ctx_buf; 4648 if (mp) { 4649 lpfc_mbuf_free(phba, mp->virt, mp->phys); 4650 kfree(mp); 4651 } 4652 mempool_free(mbox, phba->mbox_mem_pool); 4653 } 4654 out: 4655 if (ndlp && shost) { 4656 spin_lock_irq(&ndlp->lock); 4657 if (mbox) 4658 ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN; 4659 ndlp->nlp_flag &= ~NLP_RM_DFLT_RPI; 4660 spin_unlock_irq(&ndlp->lock); 4661 4662 /* If the node is not being used by another discovery thread, 4663 * and we are sending a reject, we are done with it. 4664 * Release driver reference count here and free associated 4665 * resources. 4666 */ 4667 if (ls_rjt) 4668 if (lpfc_nlp_not_used(ndlp)) 4669 /* Indicate node has already been released, 4670 * should not reference to it from within 4671 * the routine lpfc_els_free_iocb. 4672 */ 4673 cmdiocb->context1 = NULL; 4674 } 4675 4676 /* Release the originating I/O reference. */ 4677 lpfc_els_free_iocb(phba, cmdiocb); 4678 lpfc_nlp_put(ndlp); 4679 return; 4680 } 4681 4682 /** 4683 * lpfc_els_rsp_acc - Prepare and issue an acc response iocb command 4684 * @vport: pointer to a host virtual N_Port data structure. 4685 * @flag: the els command code to be accepted. 4686 * @oldiocb: pointer to the original lpfc command iocb data structure. 4687 * @ndlp: pointer to a node-list data structure. 4688 * @mbox: pointer to the driver internal queue element for mailbox command. 4689 * 4690 * This routine prepares and issues an Accept (ACC) response IOCB 4691 * command. It uses the @flag to properly set up the IOCB field for the 4692 * specific ACC response command to be issued and invokes the 4693 * lpfc_sli_issue_iocb() routine to send out ACC response IOCB. If a 4694 * @mbox pointer is passed in, it will be put into the context_un.mbox 4695 * field of the IOCB for the completion callback function to issue the 4696 * mailbox command to the HBA later when callback is invoked. 4697 * 4698 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 4699 * will be incremented by 1 for holding the ndlp and the reference to ndlp 4700 * will be stored into the context1 field of the IOCB for the completion 4701 * callback function to the corresponding response ELS IOCB command. 4702 * 4703 * Return code 4704 * 0 - Successfully issued acc response 4705 * 1 - Failed to issue acc response 4706 **/ 4707 int 4708 lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, 4709 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp, 4710 LPFC_MBOXQ_t *mbox) 4711 { 4712 struct lpfc_hba *phba = vport->phba; 4713 IOCB_t *icmd; 4714 IOCB_t *oldcmd; 4715 struct lpfc_iocbq *elsiocb; 4716 uint8_t *pcmd; 4717 struct serv_parm *sp; 4718 uint16_t cmdsize; 4719 int rc; 4720 ELS_PKT *els_pkt_ptr; 4721 4722 oldcmd = &oldiocb->iocb; 4723 4724 switch (flag) { 4725 case ELS_CMD_ACC: 4726 cmdsize = sizeof(uint32_t); 4727 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 4728 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 4729 if (!elsiocb) { 4730 spin_lock_irq(&ndlp->lock); 4731 ndlp->nlp_flag &= ~NLP_LOGO_ACC; 4732 spin_unlock_irq(&ndlp->lock); 4733 return 1; 4734 } 4735 4736 icmd = &elsiocb->iocb; 4737 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 4738 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 4739 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 4740 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 4741 pcmd += sizeof(uint32_t); 4742 4743 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 4744 "Issue ACC: did:x%x flg:x%x", 4745 ndlp->nlp_DID, ndlp->nlp_flag, 0); 4746 break; 4747 case ELS_CMD_FLOGI: 4748 case ELS_CMD_PLOGI: 4749 cmdsize = (sizeof(struct serv_parm) + sizeof(uint32_t)); 4750 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 4751 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 4752 if (!elsiocb) 4753 return 1; 4754 4755 icmd = &elsiocb->iocb; 4756 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 4757 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 4758 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 4759 4760 if (mbox) 4761 elsiocb->context_un.mbox = mbox; 4762 4763 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 4764 pcmd += sizeof(uint32_t); 4765 sp = (struct serv_parm *)pcmd; 4766 4767 if (flag == ELS_CMD_FLOGI) { 4768 /* Copy the received service parameters back */ 4769 memcpy(sp, &phba->fc_fabparam, 4770 sizeof(struct serv_parm)); 4771 4772 /* Clear the F_Port bit */ 4773 sp->cmn.fPort = 0; 4774 4775 /* Mark all class service parameters as invalid */ 4776 sp->cls1.classValid = 0; 4777 sp->cls2.classValid = 0; 4778 sp->cls3.classValid = 0; 4779 sp->cls4.classValid = 0; 4780 4781 /* Copy our worldwide names */ 4782 memcpy(&sp->portName, &vport->fc_sparam.portName, 4783 sizeof(struct lpfc_name)); 4784 memcpy(&sp->nodeName, &vport->fc_sparam.nodeName, 4785 sizeof(struct lpfc_name)); 4786 } else { 4787 memcpy(pcmd, &vport->fc_sparam, 4788 sizeof(struct serv_parm)); 4789 4790 sp->cmn.valid_vendor_ver_level = 0; 4791 memset(sp->un.vendorVersion, 0, 4792 sizeof(sp->un.vendorVersion)); 4793 sp->cmn.bbRcvSizeMsb &= 0xF; 4794 4795 /* If our firmware supports this feature, convey that 4796 * info to the target using the vendor specific field. 4797 */ 4798 if (phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) { 4799 sp->cmn.valid_vendor_ver_level = 1; 4800 sp->un.vv.vid = cpu_to_be32(LPFC_VV_EMLX_ID); 4801 sp->un.vv.flags = 4802 cpu_to_be32(LPFC_VV_SUPPRESS_RSP); 4803 } 4804 } 4805 4806 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 4807 "Issue ACC FLOGI/PLOGI: did:x%x flg:x%x", 4808 ndlp->nlp_DID, ndlp->nlp_flag, 0); 4809 break; 4810 case ELS_CMD_PRLO: 4811 cmdsize = sizeof(uint32_t) + sizeof(PRLO); 4812 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 4813 ndlp, ndlp->nlp_DID, ELS_CMD_PRLO); 4814 if (!elsiocb) 4815 return 1; 4816 4817 icmd = &elsiocb->iocb; 4818 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 4819 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 4820 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 4821 4822 memcpy(pcmd, ((struct lpfc_dmabuf *) oldiocb->context2)->virt, 4823 sizeof(uint32_t) + sizeof(PRLO)); 4824 *((uint32_t *) (pcmd)) = ELS_CMD_PRLO_ACC; 4825 els_pkt_ptr = (ELS_PKT *) pcmd; 4826 els_pkt_ptr->un.prlo.acceptRspCode = PRLO_REQ_EXECUTED; 4827 4828 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 4829 "Issue ACC PRLO: did:x%x flg:x%x", 4830 ndlp->nlp_DID, ndlp->nlp_flag, 0); 4831 break; 4832 default: 4833 return 1; 4834 } 4835 if (ndlp->nlp_flag & NLP_LOGO_ACC) { 4836 spin_lock_irq(&ndlp->lock); 4837 if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED || 4838 ndlp->nlp_flag & NLP_REG_LOGIN_SEND)) 4839 ndlp->nlp_flag &= ~NLP_LOGO_ACC; 4840 spin_unlock_irq(&ndlp->lock); 4841 elsiocb->iocb_cmpl = lpfc_cmpl_els_logo_acc; 4842 } else { 4843 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 4844 } 4845 4846 phba->fc_stat.elsXmitACC++; 4847 elsiocb->context1 = lpfc_nlp_get(ndlp); 4848 if (!elsiocb->context1) 4849 goto node_err; 4850 4851 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 4852 if (rc == IOCB_ERROR) 4853 goto io_err; 4854 4855 /* Xmit ELS ACC response tag <ulpIoTag> */ 4856 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4857 "0128 Xmit ELS ACC response Status: x%x, IoTag: x%x, " 4858 "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x " 4859 "RPI: x%x, fc_flag x%x\n", 4860 rc, elsiocb->iotag, elsiocb->sli4_xritag, 4861 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 4862 ndlp->nlp_rpi, vport->fc_flag); 4863 return 0; 4864 4865 io_err: 4866 lpfc_nlp_put(ndlp); 4867 node_err: 4868 lpfc_els_free_iocb(phba, elsiocb); 4869 return 1; 4870 } 4871 4872 /** 4873 * lpfc_els_rsp_reject - Propare and issue a rjt response iocb command 4874 * @vport: pointer to a virtual N_Port data structure. 4875 * @rejectError: reject response to issue 4876 * @oldiocb: pointer to the original lpfc command iocb data structure. 4877 * @ndlp: pointer to a node-list data structure. 4878 * @mbox: pointer to the driver internal queue element for mailbox command. 4879 * 4880 * This routine prepares and issue an Reject (RJT) response IOCB 4881 * command. If a @mbox pointer is passed in, it will be put into the 4882 * context_un.mbox field of the IOCB for the completion callback function 4883 * to issue to the HBA later. 4884 * 4885 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 4886 * will be incremented by 1 for holding the ndlp and the reference to ndlp 4887 * will be stored into the context1 field of the IOCB for the completion 4888 * callback function to the reject response ELS IOCB command. 4889 * 4890 * Return code 4891 * 0 - Successfully issued reject response 4892 * 1 - Failed to issue reject response 4893 **/ 4894 int 4895 lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError, 4896 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp, 4897 LPFC_MBOXQ_t *mbox) 4898 { 4899 int rc; 4900 struct lpfc_hba *phba = vport->phba; 4901 IOCB_t *icmd; 4902 IOCB_t *oldcmd; 4903 struct lpfc_iocbq *elsiocb; 4904 uint8_t *pcmd; 4905 uint16_t cmdsize; 4906 4907 cmdsize = 2 * sizeof(uint32_t); 4908 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 4909 ndlp->nlp_DID, ELS_CMD_LS_RJT); 4910 if (!elsiocb) 4911 return 1; 4912 4913 icmd = &elsiocb->iocb; 4914 oldcmd = &oldiocb->iocb; 4915 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 4916 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 4917 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 4918 4919 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT; 4920 pcmd += sizeof(uint32_t); 4921 *((uint32_t *) (pcmd)) = rejectError; 4922 4923 if (mbox) 4924 elsiocb->context_un.mbox = mbox; 4925 4926 /* Xmit ELS RJT <err> response tag <ulpIoTag> */ 4927 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4928 "0129 Xmit ELS RJT x%x response tag x%x " 4929 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, " 4930 "rpi x%x\n", 4931 rejectError, elsiocb->iotag, 4932 elsiocb->iocb.ulpContext, ndlp->nlp_DID, 4933 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); 4934 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 4935 "Issue LS_RJT: did:x%x flg:x%x err:x%x", 4936 ndlp->nlp_DID, ndlp->nlp_flag, rejectError); 4937 4938 phba->fc_stat.elsXmitLSRJT++; 4939 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 4940 elsiocb->context1 = lpfc_nlp_get(ndlp); 4941 if (!elsiocb->context1) 4942 goto node_err; 4943 4944 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 4945 if (rc == IOCB_ERROR) 4946 goto io_err; 4947 4948 return 0; 4949 4950 io_err: 4951 lpfc_nlp_put(ndlp); 4952 node_err: 4953 lpfc_els_free_iocb(phba, elsiocb); 4954 return 1; 4955 } 4956 4957 /** 4958 * lpfc_els_rsp_adisc_acc - Prepare and issue acc response to adisc iocb cmd 4959 * @vport: pointer to a virtual N_Port data structure. 4960 * @oldiocb: pointer to the original lpfc command iocb data structure. 4961 * @ndlp: pointer to a node-list data structure. 4962 * 4963 * This routine prepares and issues an Accept (ACC) response to Address 4964 * Discover (ADISC) ELS command. It simply prepares the payload of the IOCB 4965 * and invokes the lpfc_sli_issue_iocb() routine to send out the command. 4966 * 4967 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 4968 * will be incremented by 1 for holding the ndlp and the reference to ndlp 4969 * will be stored into the context1 field of the IOCB for the completion 4970 * callback function to the ADISC Accept response ELS IOCB command. 4971 * 4972 * Return code 4973 * 0 - Successfully issued acc adisc response 4974 * 1 - Failed to issue adisc acc response 4975 **/ 4976 int 4977 lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, 4978 struct lpfc_nodelist *ndlp) 4979 { 4980 struct lpfc_hba *phba = vport->phba; 4981 ADISC *ap; 4982 IOCB_t *icmd, *oldcmd; 4983 struct lpfc_iocbq *elsiocb; 4984 uint8_t *pcmd; 4985 uint16_t cmdsize; 4986 int rc; 4987 4988 cmdsize = sizeof(uint32_t) + sizeof(ADISC); 4989 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 4990 ndlp->nlp_DID, ELS_CMD_ACC); 4991 if (!elsiocb) 4992 return 1; 4993 4994 icmd = &elsiocb->iocb; 4995 oldcmd = &oldiocb->iocb; 4996 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 4997 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 4998 4999 /* Xmit ADISC ACC response tag <ulpIoTag> */ 5000 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5001 "0130 Xmit ADISC ACC response iotag x%x xri: " 5002 "x%x, did x%x, nlp_flag x%x, nlp_state x%x rpi x%x\n", 5003 elsiocb->iotag, elsiocb->iocb.ulpContext, 5004 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5005 ndlp->nlp_rpi); 5006 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 5007 5008 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 5009 pcmd += sizeof(uint32_t); 5010 5011 ap = (ADISC *) (pcmd); 5012 ap->hardAL_PA = phba->fc_pref_ALPA; 5013 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name)); 5014 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 5015 ap->DID = be32_to_cpu(vport->fc_myDID); 5016 5017 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5018 "Issue ACC ADISC: did:x%x flg:x%x refcnt %d", 5019 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); 5020 5021 phba->fc_stat.elsXmitACC++; 5022 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 5023 elsiocb->context1 = lpfc_nlp_get(ndlp); 5024 if (!elsiocb->context1) 5025 goto node_err; 5026 5027 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5028 if (rc == IOCB_ERROR) 5029 goto io_err; 5030 5031 /* Xmit ELS ACC response tag <ulpIoTag> */ 5032 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5033 "0128 Xmit ELS ACC response Status: x%x, IoTag: x%x, " 5034 "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x " 5035 "RPI: x%x, fc_flag x%x\n", 5036 rc, elsiocb->iotag, elsiocb->sli4_xritag, 5037 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5038 ndlp->nlp_rpi, vport->fc_flag); 5039 return 0; 5040 5041 io_err: 5042 lpfc_nlp_put(ndlp); 5043 node_err: 5044 lpfc_els_free_iocb(phba, elsiocb); 5045 return 1; 5046 } 5047 5048 /** 5049 * lpfc_els_rsp_prli_acc - Prepare and issue acc response to prli iocb cmd 5050 * @vport: pointer to a virtual N_Port data structure. 5051 * @oldiocb: pointer to the original lpfc command iocb data structure. 5052 * @ndlp: pointer to a node-list data structure. 5053 * 5054 * This routine prepares and issues an Accept (ACC) response to Process 5055 * Login (PRLI) ELS command. It simply prepares the payload of the IOCB 5056 * and invokes the lpfc_sli_issue_iocb() routine to send out the command. 5057 * 5058 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 5059 * will be incremented by 1 for holding the ndlp and the reference to ndlp 5060 * will be stored into the context1 field of the IOCB for the completion 5061 * callback function to the PRLI Accept response ELS IOCB command. 5062 * 5063 * Return code 5064 * 0 - Successfully issued acc prli response 5065 * 1 - Failed to issue acc prli response 5066 **/ 5067 int 5068 lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, 5069 struct lpfc_nodelist *ndlp) 5070 { 5071 struct lpfc_hba *phba = vport->phba; 5072 PRLI *npr; 5073 struct lpfc_nvme_prli *npr_nvme; 5074 lpfc_vpd_t *vpd; 5075 IOCB_t *icmd; 5076 IOCB_t *oldcmd; 5077 struct lpfc_iocbq *elsiocb; 5078 uint8_t *pcmd; 5079 uint16_t cmdsize; 5080 uint32_t prli_fc4_req, *req_payload; 5081 struct lpfc_dmabuf *req_buf; 5082 int rc; 5083 u32 elsrspcmd; 5084 5085 /* Need the incoming PRLI payload to determine if the ACC is for an 5086 * FC4 or NVME PRLI type. The PRLI type is at word 1. 5087 */ 5088 req_buf = (struct lpfc_dmabuf *)oldiocb->context2; 5089 req_payload = (((uint32_t *)req_buf->virt) + 1); 5090 5091 /* PRLI type payload is at byte 3 for FCP or NVME. */ 5092 prli_fc4_req = be32_to_cpu(*req_payload); 5093 prli_fc4_req = (prli_fc4_req >> 24) & 0xff; 5094 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5095 "6127 PRLI_ACC: Req Type x%x, Word1 x%08x\n", 5096 prli_fc4_req, *((uint32_t *)req_payload)); 5097 5098 if (prli_fc4_req == PRLI_FCP_TYPE) { 5099 cmdsize = sizeof(uint32_t) + sizeof(PRLI); 5100 elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK)); 5101 } else if (prli_fc4_req & PRLI_NVME_TYPE) { 5102 cmdsize = sizeof(uint32_t) + sizeof(struct lpfc_nvme_prli); 5103 elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_NVMEPRLI & ~ELS_RSP_MASK)); 5104 } else { 5105 return 1; 5106 } 5107 5108 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 5109 ndlp->nlp_DID, elsrspcmd); 5110 if (!elsiocb) 5111 return 1; 5112 5113 icmd = &elsiocb->iocb; 5114 oldcmd = &oldiocb->iocb; 5115 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5116 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 5117 5118 /* Xmit PRLI ACC response tag <ulpIoTag> */ 5119 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5120 "0131 Xmit PRLI ACC response tag x%x xri x%x, " 5121 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", 5122 elsiocb->iotag, elsiocb->iocb.ulpContext, 5123 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5124 ndlp->nlp_rpi); 5125 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 5126 memset(pcmd, 0, cmdsize); 5127 5128 *((uint32_t *)(pcmd)) = elsrspcmd; 5129 pcmd += sizeof(uint32_t); 5130 5131 /* For PRLI, remainder of payload is PRLI parameter page */ 5132 vpd = &phba->vpd; 5133 5134 if (prli_fc4_req == PRLI_FCP_TYPE) { 5135 /* 5136 * If the remote port is a target and our firmware version 5137 * is 3.20 or later, set the following bits for FC-TAPE 5138 * support. 5139 */ 5140 npr = (PRLI *) pcmd; 5141 if ((ndlp->nlp_type & NLP_FCP_TARGET) && 5142 (vpd->rev.feaLevelHigh >= 0x02)) { 5143 npr->ConfmComplAllowed = 1; 5144 npr->Retry = 1; 5145 npr->TaskRetryIdReq = 1; 5146 } 5147 npr->acceptRspCode = PRLI_REQ_EXECUTED; 5148 npr->estabImagePair = 1; 5149 npr->readXferRdyDis = 1; 5150 npr->ConfmComplAllowed = 1; 5151 npr->prliType = PRLI_FCP_TYPE; 5152 npr->initiatorFunc = 1; 5153 } else if (prli_fc4_req & PRLI_NVME_TYPE) { 5154 /* Respond with an NVME PRLI Type */ 5155 npr_nvme = (struct lpfc_nvme_prli *) pcmd; 5156 bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE); 5157 bf_set(prli_estabImagePair, npr_nvme, 0); /* Should be 0 */ 5158 bf_set(prli_acc_rsp_code, npr_nvme, PRLI_REQ_EXECUTED); 5159 if (phba->nvmet_support) { 5160 bf_set(prli_tgt, npr_nvme, 1); 5161 bf_set(prli_disc, npr_nvme, 1); 5162 if (phba->cfg_nvme_enable_fb) { 5163 bf_set(prli_fba, npr_nvme, 1); 5164 5165 /* TBD. Target mode needs to post buffers 5166 * that support the configured first burst 5167 * byte size. 5168 */ 5169 bf_set(prli_fb_sz, npr_nvme, 5170 phba->cfg_nvmet_fb_size); 5171 } 5172 } else { 5173 bf_set(prli_init, npr_nvme, 1); 5174 } 5175 5176 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, 5177 "6015 NVME issue PRLI ACC word1 x%08x " 5178 "word4 x%08x word5 x%08x flag x%x, " 5179 "fcp_info x%x nlp_type x%x\n", 5180 npr_nvme->word1, npr_nvme->word4, 5181 npr_nvme->word5, ndlp->nlp_flag, 5182 ndlp->nlp_fcp_info, ndlp->nlp_type); 5183 npr_nvme->word1 = cpu_to_be32(npr_nvme->word1); 5184 npr_nvme->word4 = cpu_to_be32(npr_nvme->word4); 5185 npr_nvme->word5 = cpu_to_be32(npr_nvme->word5); 5186 } else 5187 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 5188 "6128 Unknown FC_TYPE x%x x%x ndlp x%06x\n", 5189 prli_fc4_req, ndlp->nlp_fc4_type, 5190 ndlp->nlp_DID); 5191 5192 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5193 "Issue ACC PRLI: did:x%x flg:x%x", 5194 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); 5195 5196 phba->fc_stat.elsXmitACC++; 5197 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 5198 elsiocb->context1 = lpfc_nlp_get(ndlp); 5199 if (!elsiocb->context1) 5200 goto node_err; 5201 5202 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5203 if (rc == IOCB_ERROR) 5204 goto io_err; 5205 return 0; 5206 5207 io_err: 5208 lpfc_nlp_put(ndlp); 5209 node_err: 5210 lpfc_els_free_iocb(phba, elsiocb); 5211 return 1; 5212 } 5213 5214 /** 5215 * lpfc_els_rsp_rnid_acc - Issue rnid acc response iocb command 5216 * @vport: pointer to a virtual N_Port data structure. 5217 * @format: rnid command format. 5218 * @oldiocb: pointer to the original lpfc command iocb data structure. 5219 * @ndlp: pointer to a node-list data structure. 5220 * 5221 * This routine issues a Request Node Identification Data (RNID) Accept 5222 * (ACC) response. It constructs the RNID ACC response command according to 5223 * the proper @format and then calls the lpfc_sli_issue_iocb() routine to 5224 * issue the response. Note that this command does not need to hold the ndlp 5225 * reference count for the callback. So, the ndlp reference count taken by 5226 * the lpfc_prep_els_iocb() routine is put back and the context1 field of 5227 * IOCB is set to NULL to indicate to the lpfc_els_free_iocb() routine that 5228 * there is no ndlp reference available. 5229 * 5230 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 5231 * will be incremented by 1 for holding the ndlp and the reference to ndlp 5232 * will be stored into the context1 field of the IOCB for the completion 5233 * callback function. However, for the RNID Accept Response ELS command, 5234 * this is undone later by this routine after the IOCB is allocated. 5235 * 5236 * Return code 5237 * 0 - Successfully issued acc rnid response 5238 * 1 - Failed to issue acc rnid response 5239 **/ 5240 static int 5241 lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format, 5242 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) 5243 { 5244 struct lpfc_hba *phba = vport->phba; 5245 RNID *rn; 5246 IOCB_t *icmd, *oldcmd; 5247 struct lpfc_iocbq *elsiocb; 5248 uint8_t *pcmd; 5249 uint16_t cmdsize; 5250 int rc; 5251 5252 cmdsize = sizeof(uint32_t) + sizeof(uint32_t) 5253 + (2 * sizeof(struct lpfc_name)); 5254 if (format) 5255 cmdsize += sizeof(RNID_TOP_DISC); 5256 5257 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 5258 ndlp->nlp_DID, ELS_CMD_ACC); 5259 if (!elsiocb) 5260 return 1; 5261 5262 icmd = &elsiocb->iocb; 5263 oldcmd = &oldiocb->iocb; 5264 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5265 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 5266 5267 /* Xmit RNID ACC response tag <ulpIoTag> */ 5268 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5269 "0132 Xmit RNID ACC response tag x%x xri x%x\n", 5270 elsiocb->iotag, elsiocb->iocb.ulpContext); 5271 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 5272 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 5273 pcmd += sizeof(uint32_t); 5274 5275 memset(pcmd, 0, sizeof(RNID)); 5276 rn = (RNID *) (pcmd); 5277 rn->Format = format; 5278 rn->CommonLen = (2 * sizeof(struct lpfc_name)); 5279 memcpy(&rn->portName, &vport->fc_portname, sizeof(struct lpfc_name)); 5280 memcpy(&rn->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 5281 switch (format) { 5282 case 0: 5283 rn->SpecificLen = 0; 5284 break; 5285 case RNID_TOPOLOGY_DISC: 5286 rn->SpecificLen = sizeof(RNID_TOP_DISC); 5287 memcpy(&rn->un.topologyDisc.portName, 5288 &vport->fc_portname, sizeof(struct lpfc_name)); 5289 rn->un.topologyDisc.unitType = RNID_HBA; 5290 rn->un.topologyDisc.physPort = 0; 5291 rn->un.topologyDisc.attachedNodes = 0; 5292 break; 5293 default: 5294 rn->CommonLen = 0; 5295 rn->SpecificLen = 0; 5296 break; 5297 } 5298 5299 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5300 "Issue ACC RNID: did:x%x flg:x%x refcnt %d", 5301 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); 5302 5303 phba->fc_stat.elsXmitACC++; 5304 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 5305 elsiocb->context1 = lpfc_nlp_get(ndlp); 5306 if (!elsiocb->context1) 5307 goto node_err; 5308 5309 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5310 if (rc == IOCB_ERROR) 5311 goto io_err; 5312 5313 return 0; 5314 5315 io_err: 5316 lpfc_nlp_put(ndlp); 5317 node_err: 5318 lpfc_els_free_iocb(phba, elsiocb); 5319 return 1; 5320 } 5321 5322 /** 5323 * lpfc_els_clear_rrq - Clear the rq that this rrq describes. 5324 * @vport: pointer to a virtual N_Port data structure. 5325 * @iocb: pointer to the lpfc command iocb data structure. 5326 * @ndlp: pointer to a node-list data structure. 5327 * 5328 * Return 5329 **/ 5330 static void 5331 lpfc_els_clear_rrq(struct lpfc_vport *vport, 5332 struct lpfc_iocbq *iocb, struct lpfc_nodelist *ndlp) 5333 { 5334 struct lpfc_hba *phba = vport->phba; 5335 uint8_t *pcmd; 5336 struct RRQ *rrq; 5337 uint16_t rxid; 5338 uint16_t xri; 5339 struct lpfc_node_rrq *prrq; 5340 5341 5342 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) iocb->context2)->virt); 5343 pcmd += sizeof(uint32_t); 5344 rrq = (struct RRQ *)pcmd; 5345 rrq->rrq_exchg = be32_to_cpu(rrq->rrq_exchg); 5346 rxid = bf_get(rrq_rxid, rrq); 5347 5348 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5349 "2883 Clear RRQ for SID:x%x OXID:x%x RXID:x%x" 5350 " x%x x%x\n", 5351 be32_to_cpu(bf_get(rrq_did, rrq)), 5352 bf_get(rrq_oxid, rrq), 5353 rxid, 5354 iocb->iotag, iocb->iocb.ulpContext); 5355 5356 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5357 "Clear RRQ: did:x%x flg:x%x exchg:x%.08x", 5358 ndlp->nlp_DID, ndlp->nlp_flag, rrq->rrq_exchg); 5359 if (vport->fc_myDID == be32_to_cpu(bf_get(rrq_did, rrq))) 5360 xri = bf_get(rrq_oxid, rrq); 5361 else 5362 xri = rxid; 5363 prrq = lpfc_get_active_rrq(vport, xri, ndlp->nlp_DID); 5364 if (prrq) 5365 lpfc_clr_rrq_active(phba, xri, prrq); 5366 return; 5367 } 5368 5369 /** 5370 * lpfc_els_rsp_echo_acc - Issue echo acc response 5371 * @vport: pointer to a virtual N_Port data structure. 5372 * @data: pointer to echo data to return in the accept. 5373 * @oldiocb: pointer to the original lpfc command iocb data structure. 5374 * @ndlp: pointer to a node-list data structure. 5375 * 5376 * Return code 5377 * 0 - Successfully issued acc echo response 5378 * 1 - Failed to issue acc echo response 5379 **/ 5380 static int 5381 lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data, 5382 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) 5383 { 5384 struct lpfc_hba *phba = vport->phba; 5385 struct lpfc_iocbq *elsiocb; 5386 uint8_t *pcmd; 5387 uint16_t cmdsize; 5388 int rc; 5389 5390 cmdsize = oldiocb->iocb.unsli3.rcvsli3.acc_len; 5391 5392 /* The accumulated length can exceed the BPL_SIZE. For 5393 * now, use this as the limit 5394 */ 5395 if (cmdsize > LPFC_BPL_SIZE) 5396 cmdsize = LPFC_BPL_SIZE; 5397 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 5398 ndlp->nlp_DID, ELS_CMD_ACC); 5399 if (!elsiocb) 5400 return 1; 5401 5402 elsiocb->iocb.ulpContext = oldiocb->iocb.ulpContext; /* Xri / rx_id */ 5403 elsiocb->iocb.unsli3.rcvsli3.ox_id = oldiocb->iocb.unsli3.rcvsli3.ox_id; 5404 5405 /* Xmit ECHO ACC response tag <ulpIoTag> */ 5406 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5407 "2876 Xmit ECHO ACC response tag x%x xri x%x\n", 5408 elsiocb->iotag, elsiocb->iocb.ulpContext); 5409 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 5410 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 5411 pcmd += sizeof(uint32_t); 5412 memcpy(pcmd, data, cmdsize - sizeof(uint32_t)); 5413 5414 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5415 "Issue ACC ECHO: did:x%x flg:x%x refcnt %d", 5416 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); 5417 5418 phba->fc_stat.elsXmitACC++; 5419 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 5420 elsiocb->context1 = lpfc_nlp_get(ndlp); 5421 if (!elsiocb->context1) 5422 goto node_err; 5423 5424 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5425 if (rc == IOCB_ERROR) 5426 goto io_err; 5427 return 0; 5428 5429 io_err: 5430 lpfc_nlp_put(ndlp); 5431 node_err: 5432 lpfc_els_free_iocb(phba, elsiocb); 5433 return 1; 5434 } 5435 5436 /** 5437 * lpfc_els_disc_adisc - Issue remaining adisc iocbs to npr nodes of a vport 5438 * @vport: pointer to a host virtual N_Port data structure. 5439 * 5440 * This routine issues Address Discover (ADISC) ELS commands to those 5441 * N_Ports which are in node port recovery state and ADISC has not been issued 5442 * for the @vport. Each time an ELS ADISC IOCB is issued by invoking the 5443 * lpfc_issue_els_adisc() routine, the per @vport number of discover count 5444 * (num_disc_nodes) shall be incremented. If the num_disc_nodes reaches a 5445 * pre-configured threshold (cfg_discovery_threads), the @vport fc_flag will 5446 * be marked with FC_NLP_MORE bit and the process of issuing remaining ADISC 5447 * IOCBs quit for later pick up. On the other hand, after walking through 5448 * all the ndlps with the @vport and there is none ADISC IOCB issued, the 5449 * @vport fc_flag shall be cleared with FC_NLP_MORE bit indicating there is 5450 * no more ADISC need to be sent. 5451 * 5452 * Return code 5453 * The number of N_Ports with adisc issued. 5454 **/ 5455 int 5456 lpfc_els_disc_adisc(struct lpfc_vport *vport) 5457 { 5458 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 5459 struct lpfc_nodelist *ndlp, *next_ndlp; 5460 int sentadisc = 0; 5461 5462 /* go thru NPR nodes and issue any remaining ELS ADISCs */ 5463 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 5464 if (ndlp->nlp_state == NLP_STE_NPR_NODE && 5465 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 && 5466 (ndlp->nlp_flag & NLP_NPR_ADISC) != 0) { 5467 spin_lock_irq(&ndlp->lock); 5468 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 5469 spin_unlock_irq(&ndlp->lock); 5470 ndlp->nlp_prev_state = ndlp->nlp_state; 5471 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 5472 lpfc_issue_els_adisc(vport, ndlp, 0); 5473 sentadisc++; 5474 vport->num_disc_nodes++; 5475 if (vport->num_disc_nodes >= 5476 vport->cfg_discovery_threads) { 5477 spin_lock_irq(shost->host_lock); 5478 vport->fc_flag |= FC_NLP_MORE; 5479 spin_unlock_irq(shost->host_lock); 5480 break; 5481 } 5482 } 5483 } 5484 if (sentadisc == 0) { 5485 spin_lock_irq(shost->host_lock); 5486 vport->fc_flag &= ~FC_NLP_MORE; 5487 spin_unlock_irq(shost->host_lock); 5488 } 5489 return sentadisc; 5490 } 5491 5492 /** 5493 * lpfc_els_disc_plogi - Issue plogi for all npr nodes of a vport before adisc 5494 * @vport: pointer to a host virtual N_Port data structure. 5495 * 5496 * This routine issues Port Login (PLOGI) ELS commands to all the N_Ports 5497 * which are in node port recovery state, with a @vport. Each time an ELS 5498 * ADISC PLOGI IOCB is issued by invoking the lpfc_issue_els_plogi() routine, 5499 * the per @vport number of discover count (num_disc_nodes) shall be 5500 * incremented. If the num_disc_nodes reaches a pre-configured threshold 5501 * (cfg_discovery_threads), the @vport fc_flag will be marked with FC_NLP_MORE 5502 * bit set and quit the process of issuing remaining ADISC PLOGIN IOCBs for 5503 * later pick up. On the other hand, after walking through all the ndlps with 5504 * the @vport and there is none ADISC PLOGI IOCB issued, the @vport fc_flag 5505 * shall be cleared with the FC_NLP_MORE bit indicating there is no more ADISC 5506 * PLOGI need to be sent. 5507 * 5508 * Return code 5509 * The number of N_Ports with plogi issued. 5510 **/ 5511 int 5512 lpfc_els_disc_plogi(struct lpfc_vport *vport) 5513 { 5514 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 5515 struct lpfc_nodelist *ndlp, *next_ndlp; 5516 int sentplogi = 0; 5517 5518 /* go thru NPR nodes and issue any remaining ELS PLOGIs */ 5519 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 5520 if (ndlp->nlp_state == NLP_STE_NPR_NODE && 5521 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 && 5522 (ndlp->nlp_flag & NLP_DELAY_TMO) == 0 && 5523 (ndlp->nlp_flag & NLP_NPR_ADISC) == 0) { 5524 ndlp->nlp_prev_state = ndlp->nlp_state; 5525 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 5526 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 5527 sentplogi++; 5528 vport->num_disc_nodes++; 5529 if (vport->num_disc_nodes >= 5530 vport->cfg_discovery_threads) { 5531 spin_lock_irq(shost->host_lock); 5532 vport->fc_flag |= FC_NLP_MORE; 5533 spin_unlock_irq(shost->host_lock); 5534 break; 5535 } 5536 } 5537 } 5538 5539 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 5540 "6452 Discover PLOGI %d flag x%x\n", 5541 sentplogi, vport->fc_flag); 5542 5543 if (sentplogi) { 5544 lpfc_set_disctmo(vport); 5545 } 5546 else { 5547 spin_lock_irq(shost->host_lock); 5548 vport->fc_flag &= ~FC_NLP_MORE; 5549 spin_unlock_irq(shost->host_lock); 5550 } 5551 return sentplogi; 5552 } 5553 5554 static uint32_t 5555 lpfc_rdp_res_link_service(struct fc_rdp_link_service_desc *desc, 5556 uint32_t word0) 5557 { 5558 5559 desc->tag = cpu_to_be32(RDP_LINK_SERVICE_DESC_TAG); 5560 desc->payload.els_req = word0; 5561 desc->length = cpu_to_be32(sizeof(desc->payload)); 5562 5563 return sizeof(struct fc_rdp_link_service_desc); 5564 } 5565 5566 static uint32_t 5567 lpfc_rdp_res_sfp_desc(struct fc_rdp_sfp_desc *desc, 5568 uint8_t *page_a0, uint8_t *page_a2) 5569 { 5570 uint16_t wavelength; 5571 uint16_t temperature; 5572 uint16_t rx_power; 5573 uint16_t tx_bias; 5574 uint16_t tx_power; 5575 uint16_t vcc; 5576 uint16_t flag = 0; 5577 struct sff_trasnceiver_codes_byte4 *trasn_code_byte4; 5578 struct sff_trasnceiver_codes_byte5 *trasn_code_byte5; 5579 5580 desc->tag = cpu_to_be32(RDP_SFP_DESC_TAG); 5581 5582 trasn_code_byte4 = (struct sff_trasnceiver_codes_byte4 *) 5583 &page_a0[SSF_TRANSCEIVER_CODE_B4]; 5584 trasn_code_byte5 = (struct sff_trasnceiver_codes_byte5 *) 5585 &page_a0[SSF_TRANSCEIVER_CODE_B5]; 5586 5587 if ((trasn_code_byte4->fc_sw_laser) || 5588 (trasn_code_byte5->fc_sw_laser_sl) || 5589 (trasn_code_byte5->fc_sw_laser_sn)) { /* check if its short WL */ 5590 flag |= (SFP_FLAG_PT_SWLASER << SFP_FLAG_PT_SHIFT); 5591 } else if (trasn_code_byte4->fc_lw_laser) { 5592 wavelength = (page_a0[SSF_WAVELENGTH_B1] << 8) | 5593 page_a0[SSF_WAVELENGTH_B0]; 5594 if (wavelength == SFP_WAVELENGTH_LC1310) 5595 flag |= SFP_FLAG_PT_LWLASER_LC1310 << SFP_FLAG_PT_SHIFT; 5596 if (wavelength == SFP_WAVELENGTH_LL1550) 5597 flag |= SFP_FLAG_PT_LWLASER_LL1550 << SFP_FLAG_PT_SHIFT; 5598 } 5599 /* check if its SFP+ */ 5600 flag |= ((page_a0[SSF_IDENTIFIER] == SFF_PG0_IDENT_SFP) ? 5601 SFP_FLAG_CT_SFP_PLUS : SFP_FLAG_CT_UNKNOWN) 5602 << SFP_FLAG_CT_SHIFT; 5603 5604 /* check if its OPTICAL */ 5605 flag |= ((page_a0[SSF_CONNECTOR] == SFF_PG0_CONNECTOR_LC) ? 5606 SFP_FLAG_IS_OPTICAL_PORT : 0) 5607 << SFP_FLAG_IS_OPTICAL_SHIFT; 5608 5609 temperature = (page_a2[SFF_TEMPERATURE_B1] << 8 | 5610 page_a2[SFF_TEMPERATURE_B0]); 5611 vcc = (page_a2[SFF_VCC_B1] << 8 | 5612 page_a2[SFF_VCC_B0]); 5613 tx_power = (page_a2[SFF_TXPOWER_B1] << 8 | 5614 page_a2[SFF_TXPOWER_B0]); 5615 tx_bias = (page_a2[SFF_TX_BIAS_CURRENT_B1] << 8 | 5616 page_a2[SFF_TX_BIAS_CURRENT_B0]); 5617 rx_power = (page_a2[SFF_RXPOWER_B1] << 8 | 5618 page_a2[SFF_RXPOWER_B0]); 5619 desc->sfp_info.temperature = cpu_to_be16(temperature); 5620 desc->sfp_info.rx_power = cpu_to_be16(rx_power); 5621 desc->sfp_info.tx_bias = cpu_to_be16(tx_bias); 5622 desc->sfp_info.tx_power = cpu_to_be16(tx_power); 5623 desc->sfp_info.vcc = cpu_to_be16(vcc); 5624 5625 desc->sfp_info.flags = cpu_to_be16(flag); 5626 desc->length = cpu_to_be32(sizeof(desc->sfp_info)); 5627 5628 return sizeof(struct fc_rdp_sfp_desc); 5629 } 5630 5631 static uint32_t 5632 lpfc_rdp_res_link_error(struct fc_rdp_link_error_status_desc *desc, 5633 READ_LNK_VAR *stat) 5634 { 5635 uint32_t type; 5636 5637 desc->tag = cpu_to_be32(RDP_LINK_ERROR_STATUS_DESC_TAG); 5638 5639 type = VN_PT_PHY_PF_PORT << VN_PT_PHY_SHIFT; 5640 5641 desc->info.port_type = cpu_to_be32(type); 5642 5643 desc->info.link_status.link_failure_cnt = 5644 cpu_to_be32(stat->linkFailureCnt); 5645 desc->info.link_status.loss_of_synch_cnt = 5646 cpu_to_be32(stat->lossSyncCnt); 5647 desc->info.link_status.loss_of_signal_cnt = 5648 cpu_to_be32(stat->lossSignalCnt); 5649 desc->info.link_status.primitive_seq_proto_err = 5650 cpu_to_be32(stat->primSeqErrCnt); 5651 desc->info.link_status.invalid_trans_word = 5652 cpu_to_be32(stat->invalidXmitWord); 5653 desc->info.link_status.invalid_crc_cnt = cpu_to_be32(stat->crcCnt); 5654 5655 desc->length = cpu_to_be32(sizeof(desc->info)); 5656 5657 return sizeof(struct fc_rdp_link_error_status_desc); 5658 } 5659 5660 static uint32_t 5661 lpfc_rdp_res_bbc_desc(struct fc_rdp_bbc_desc *desc, READ_LNK_VAR *stat, 5662 struct lpfc_vport *vport) 5663 { 5664 uint32_t bbCredit; 5665 5666 desc->tag = cpu_to_be32(RDP_BBC_DESC_TAG); 5667 5668 bbCredit = vport->fc_sparam.cmn.bbCreditLsb | 5669 (vport->fc_sparam.cmn.bbCreditMsb << 8); 5670 desc->bbc_info.port_bbc = cpu_to_be32(bbCredit); 5671 if (vport->phba->fc_topology != LPFC_TOPOLOGY_LOOP) { 5672 bbCredit = vport->phba->fc_fabparam.cmn.bbCreditLsb | 5673 (vport->phba->fc_fabparam.cmn.bbCreditMsb << 8); 5674 desc->bbc_info.attached_port_bbc = cpu_to_be32(bbCredit); 5675 } else { 5676 desc->bbc_info.attached_port_bbc = 0; 5677 } 5678 5679 desc->bbc_info.rtt = 0; 5680 desc->length = cpu_to_be32(sizeof(desc->bbc_info)); 5681 5682 return sizeof(struct fc_rdp_bbc_desc); 5683 } 5684 5685 static uint32_t 5686 lpfc_rdp_res_oed_temp_desc(struct lpfc_hba *phba, 5687 struct fc_rdp_oed_sfp_desc *desc, uint8_t *page_a2) 5688 { 5689 uint32_t flags = 0; 5690 5691 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 5692 5693 desc->oed_info.hi_alarm = page_a2[SSF_TEMP_HIGH_ALARM]; 5694 desc->oed_info.lo_alarm = page_a2[SSF_TEMP_LOW_ALARM]; 5695 desc->oed_info.hi_warning = page_a2[SSF_TEMP_HIGH_WARNING]; 5696 desc->oed_info.lo_warning = page_a2[SSF_TEMP_LOW_WARNING]; 5697 5698 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TEMPERATURE) 5699 flags |= RDP_OET_HIGH_ALARM; 5700 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TEMPERATURE) 5701 flags |= RDP_OET_LOW_ALARM; 5702 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TEMPERATURE) 5703 flags |= RDP_OET_HIGH_WARNING; 5704 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TEMPERATURE) 5705 flags |= RDP_OET_LOW_WARNING; 5706 5707 flags |= ((0xf & RDP_OED_TEMPERATURE) << RDP_OED_TYPE_SHIFT); 5708 desc->oed_info.function_flags = cpu_to_be32(flags); 5709 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 5710 return sizeof(struct fc_rdp_oed_sfp_desc); 5711 } 5712 5713 static uint32_t 5714 lpfc_rdp_res_oed_voltage_desc(struct lpfc_hba *phba, 5715 struct fc_rdp_oed_sfp_desc *desc, 5716 uint8_t *page_a2) 5717 { 5718 uint32_t flags = 0; 5719 5720 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 5721 5722 desc->oed_info.hi_alarm = page_a2[SSF_VOLTAGE_HIGH_ALARM]; 5723 desc->oed_info.lo_alarm = page_a2[SSF_VOLTAGE_LOW_ALARM]; 5724 desc->oed_info.hi_warning = page_a2[SSF_VOLTAGE_HIGH_WARNING]; 5725 desc->oed_info.lo_warning = page_a2[SSF_VOLTAGE_LOW_WARNING]; 5726 5727 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_VOLTAGE) 5728 flags |= RDP_OET_HIGH_ALARM; 5729 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_VOLTAGE) 5730 flags |= RDP_OET_LOW_ALARM; 5731 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_VOLTAGE) 5732 flags |= RDP_OET_HIGH_WARNING; 5733 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_VOLTAGE) 5734 flags |= RDP_OET_LOW_WARNING; 5735 5736 flags |= ((0xf & RDP_OED_VOLTAGE) << RDP_OED_TYPE_SHIFT); 5737 desc->oed_info.function_flags = cpu_to_be32(flags); 5738 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 5739 return sizeof(struct fc_rdp_oed_sfp_desc); 5740 } 5741 5742 static uint32_t 5743 lpfc_rdp_res_oed_txbias_desc(struct lpfc_hba *phba, 5744 struct fc_rdp_oed_sfp_desc *desc, 5745 uint8_t *page_a2) 5746 { 5747 uint32_t flags = 0; 5748 5749 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 5750 5751 desc->oed_info.hi_alarm = page_a2[SSF_BIAS_HIGH_ALARM]; 5752 desc->oed_info.lo_alarm = page_a2[SSF_BIAS_LOW_ALARM]; 5753 desc->oed_info.hi_warning = page_a2[SSF_BIAS_HIGH_WARNING]; 5754 desc->oed_info.lo_warning = page_a2[SSF_BIAS_LOW_WARNING]; 5755 5756 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TXBIAS) 5757 flags |= RDP_OET_HIGH_ALARM; 5758 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TXBIAS) 5759 flags |= RDP_OET_LOW_ALARM; 5760 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TXBIAS) 5761 flags |= RDP_OET_HIGH_WARNING; 5762 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TXBIAS) 5763 flags |= RDP_OET_LOW_WARNING; 5764 5765 flags |= ((0xf & RDP_OED_TXBIAS) << RDP_OED_TYPE_SHIFT); 5766 desc->oed_info.function_flags = cpu_to_be32(flags); 5767 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 5768 return sizeof(struct fc_rdp_oed_sfp_desc); 5769 } 5770 5771 static uint32_t 5772 lpfc_rdp_res_oed_txpower_desc(struct lpfc_hba *phba, 5773 struct fc_rdp_oed_sfp_desc *desc, 5774 uint8_t *page_a2) 5775 { 5776 uint32_t flags = 0; 5777 5778 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 5779 5780 desc->oed_info.hi_alarm = page_a2[SSF_TXPOWER_HIGH_ALARM]; 5781 desc->oed_info.lo_alarm = page_a2[SSF_TXPOWER_LOW_ALARM]; 5782 desc->oed_info.hi_warning = page_a2[SSF_TXPOWER_HIGH_WARNING]; 5783 desc->oed_info.lo_warning = page_a2[SSF_TXPOWER_LOW_WARNING]; 5784 5785 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TXPOWER) 5786 flags |= RDP_OET_HIGH_ALARM; 5787 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TXPOWER) 5788 flags |= RDP_OET_LOW_ALARM; 5789 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TXPOWER) 5790 flags |= RDP_OET_HIGH_WARNING; 5791 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TXPOWER) 5792 flags |= RDP_OET_LOW_WARNING; 5793 5794 flags |= ((0xf & RDP_OED_TXPOWER) << RDP_OED_TYPE_SHIFT); 5795 desc->oed_info.function_flags = cpu_to_be32(flags); 5796 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 5797 return sizeof(struct fc_rdp_oed_sfp_desc); 5798 } 5799 5800 5801 static uint32_t 5802 lpfc_rdp_res_oed_rxpower_desc(struct lpfc_hba *phba, 5803 struct fc_rdp_oed_sfp_desc *desc, 5804 uint8_t *page_a2) 5805 { 5806 uint32_t flags = 0; 5807 5808 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 5809 5810 desc->oed_info.hi_alarm = page_a2[SSF_RXPOWER_HIGH_ALARM]; 5811 desc->oed_info.lo_alarm = page_a2[SSF_RXPOWER_LOW_ALARM]; 5812 desc->oed_info.hi_warning = page_a2[SSF_RXPOWER_HIGH_WARNING]; 5813 desc->oed_info.lo_warning = page_a2[SSF_RXPOWER_LOW_WARNING]; 5814 5815 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_RXPOWER) 5816 flags |= RDP_OET_HIGH_ALARM; 5817 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_RXPOWER) 5818 flags |= RDP_OET_LOW_ALARM; 5819 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_RXPOWER) 5820 flags |= RDP_OET_HIGH_WARNING; 5821 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_RXPOWER) 5822 flags |= RDP_OET_LOW_WARNING; 5823 5824 flags |= ((0xf & RDP_OED_RXPOWER) << RDP_OED_TYPE_SHIFT); 5825 desc->oed_info.function_flags = cpu_to_be32(flags); 5826 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 5827 return sizeof(struct fc_rdp_oed_sfp_desc); 5828 } 5829 5830 static uint32_t 5831 lpfc_rdp_res_opd_desc(struct fc_rdp_opd_sfp_desc *desc, 5832 uint8_t *page_a0, struct lpfc_vport *vport) 5833 { 5834 desc->tag = cpu_to_be32(RDP_OPD_DESC_TAG); 5835 memcpy(desc->opd_info.vendor_name, &page_a0[SSF_VENDOR_NAME], 16); 5836 memcpy(desc->opd_info.model_number, &page_a0[SSF_VENDOR_PN], 16); 5837 memcpy(desc->opd_info.serial_number, &page_a0[SSF_VENDOR_SN], 16); 5838 memcpy(desc->opd_info.revision, &page_a0[SSF_VENDOR_REV], 4); 5839 memcpy(desc->opd_info.date, &page_a0[SSF_DATE_CODE], 8); 5840 desc->length = cpu_to_be32(sizeof(desc->opd_info)); 5841 return sizeof(struct fc_rdp_opd_sfp_desc); 5842 } 5843 5844 static uint32_t 5845 lpfc_rdp_res_fec_desc(struct fc_fec_rdp_desc *desc, READ_LNK_VAR *stat) 5846 { 5847 if (bf_get(lpfc_read_link_stat_gec2, stat) == 0) 5848 return 0; 5849 desc->tag = cpu_to_be32(RDP_FEC_DESC_TAG); 5850 5851 desc->info.CorrectedBlocks = 5852 cpu_to_be32(stat->fecCorrBlkCount); 5853 desc->info.UncorrectableBlocks = 5854 cpu_to_be32(stat->fecUncorrBlkCount); 5855 5856 desc->length = cpu_to_be32(sizeof(desc->info)); 5857 5858 return sizeof(struct fc_fec_rdp_desc); 5859 } 5860 5861 static uint32_t 5862 lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba) 5863 { 5864 uint16_t rdp_cap = 0; 5865 uint16_t rdp_speed; 5866 5867 desc->tag = cpu_to_be32(RDP_PORT_SPEED_DESC_TAG); 5868 5869 switch (phba->fc_linkspeed) { 5870 case LPFC_LINK_SPEED_1GHZ: 5871 rdp_speed = RDP_PS_1GB; 5872 break; 5873 case LPFC_LINK_SPEED_2GHZ: 5874 rdp_speed = RDP_PS_2GB; 5875 break; 5876 case LPFC_LINK_SPEED_4GHZ: 5877 rdp_speed = RDP_PS_4GB; 5878 break; 5879 case LPFC_LINK_SPEED_8GHZ: 5880 rdp_speed = RDP_PS_8GB; 5881 break; 5882 case LPFC_LINK_SPEED_10GHZ: 5883 rdp_speed = RDP_PS_10GB; 5884 break; 5885 case LPFC_LINK_SPEED_16GHZ: 5886 rdp_speed = RDP_PS_16GB; 5887 break; 5888 case LPFC_LINK_SPEED_32GHZ: 5889 rdp_speed = RDP_PS_32GB; 5890 break; 5891 case LPFC_LINK_SPEED_64GHZ: 5892 rdp_speed = RDP_PS_64GB; 5893 break; 5894 default: 5895 rdp_speed = RDP_PS_UNKNOWN; 5896 break; 5897 } 5898 5899 desc->info.port_speed.speed = cpu_to_be16(rdp_speed); 5900 5901 if (phba->lmt & LMT_128Gb) 5902 rdp_cap |= RDP_PS_128GB; 5903 if (phba->lmt & LMT_64Gb) 5904 rdp_cap |= RDP_PS_64GB; 5905 if (phba->lmt & LMT_32Gb) 5906 rdp_cap |= RDP_PS_32GB; 5907 if (phba->lmt & LMT_16Gb) 5908 rdp_cap |= RDP_PS_16GB; 5909 if (phba->lmt & LMT_10Gb) 5910 rdp_cap |= RDP_PS_10GB; 5911 if (phba->lmt & LMT_8Gb) 5912 rdp_cap |= RDP_PS_8GB; 5913 if (phba->lmt & LMT_4Gb) 5914 rdp_cap |= RDP_PS_4GB; 5915 if (phba->lmt & LMT_2Gb) 5916 rdp_cap |= RDP_PS_2GB; 5917 if (phba->lmt & LMT_1Gb) 5918 rdp_cap |= RDP_PS_1GB; 5919 5920 if (rdp_cap == 0) 5921 rdp_cap = RDP_CAP_UNKNOWN; 5922 if (phba->cfg_link_speed != LPFC_USER_LINK_SPEED_AUTO) 5923 rdp_cap |= RDP_CAP_USER_CONFIGURED; 5924 5925 desc->info.port_speed.capabilities = cpu_to_be16(rdp_cap); 5926 desc->length = cpu_to_be32(sizeof(desc->info)); 5927 return sizeof(struct fc_rdp_port_speed_desc); 5928 } 5929 5930 static uint32_t 5931 lpfc_rdp_res_diag_port_names(struct fc_rdp_port_name_desc *desc, 5932 struct lpfc_vport *vport) 5933 { 5934 5935 desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG); 5936 5937 memcpy(desc->port_names.wwnn, &vport->fc_nodename, 5938 sizeof(desc->port_names.wwnn)); 5939 5940 memcpy(desc->port_names.wwpn, &vport->fc_portname, 5941 sizeof(desc->port_names.wwpn)); 5942 5943 desc->length = cpu_to_be32(sizeof(desc->port_names)); 5944 return sizeof(struct fc_rdp_port_name_desc); 5945 } 5946 5947 static uint32_t 5948 lpfc_rdp_res_attach_port_names(struct fc_rdp_port_name_desc *desc, 5949 struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 5950 { 5951 5952 desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG); 5953 if (vport->fc_flag & FC_FABRIC) { 5954 memcpy(desc->port_names.wwnn, &vport->fabric_nodename, 5955 sizeof(desc->port_names.wwnn)); 5956 5957 memcpy(desc->port_names.wwpn, &vport->fabric_portname, 5958 sizeof(desc->port_names.wwpn)); 5959 } else { /* Point to Point */ 5960 memcpy(desc->port_names.wwnn, &ndlp->nlp_nodename, 5961 sizeof(desc->port_names.wwnn)); 5962 5963 memcpy(desc->port_names.wwpn, &ndlp->nlp_portname, 5964 sizeof(desc->port_names.wwpn)); 5965 } 5966 5967 desc->length = cpu_to_be32(sizeof(desc->port_names)); 5968 return sizeof(struct fc_rdp_port_name_desc); 5969 } 5970 5971 static void 5972 lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context, 5973 int status) 5974 { 5975 struct lpfc_nodelist *ndlp = rdp_context->ndlp; 5976 struct lpfc_vport *vport = ndlp->vport; 5977 struct lpfc_iocbq *elsiocb; 5978 struct ulp_bde64 *bpl; 5979 IOCB_t *icmd; 5980 uint8_t *pcmd; 5981 struct ls_rjt *stat; 5982 struct fc_rdp_res_frame *rdp_res; 5983 uint32_t cmdsize, len; 5984 uint16_t *flag_ptr; 5985 int rc; 5986 5987 if (status != SUCCESS) 5988 goto error; 5989 5990 /* This will change once we know the true size of the RDP payload */ 5991 cmdsize = sizeof(struct fc_rdp_res_frame); 5992 5993 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, 5994 lpfc_max_els_tries, rdp_context->ndlp, 5995 rdp_context->ndlp->nlp_DID, ELS_CMD_ACC); 5996 if (!elsiocb) 5997 goto free_rdp_context; 5998 5999 icmd = &elsiocb->iocb; 6000 icmd->ulpContext = rdp_context->rx_id; 6001 icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id; 6002 6003 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6004 "2171 Xmit RDP response tag x%x xri x%x, " 6005 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x", 6006 elsiocb->iotag, elsiocb->iocb.ulpContext, 6007 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 6008 ndlp->nlp_rpi); 6009 rdp_res = (struct fc_rdp_res_frame *) 6010 (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 6011 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 6012 memset(pcmd, 0, sizeof(struct fc_rdp_res_frame)); 6013 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 6014 6015 /* Update Alarm and Warning */ 6016 flag_ptr = (uint16_t *)(rdp_context->page_a2 + SSF_ALARM_FLAGS); 6017 phba->sfp_alarm |= *flag_ptr; 6018 flag_ptr = (uint16_t *)(rdp_context->page_a2 + SSF_WARNING_FLAGS); 6019 phba->sfp_warning |= *flag_ptr; 6020 6021 /* For RDP payload */ 6022 len = 8; 6023 len += lpfc_rdp_res_link_service((struct fc_rdp_link_service_desc *) 6024 (len + pcmd), ELS_CMD_RDP); 6025 6026 len += lpfc_rdp_res_sfp_desc((struct fc_rdp_sfp_desc *)(len + pcmd), 6027 rdp_context->page_a0, rdp_context->page_a2); 6028 len += lpfc_rdp_res_speed((struct fc_rdp_port_speed_desc *)(len + pcmd), 6029 phba); 6030 len += lpfc_rdp_res_link_error((struct fc_rdp_link_error_status_desc *) 6031 (len + pcmd), &rdp_context->link_stat); 6032 len += lpfc_rdp_res_diag_port_names((struct fc_rdp_port_name_desc *) 6033 (len + pcmd), vport); 6034 len += lpfc_rdp_res_attach_port_names((struct fc_rdp_port_name_desc *) 6035 (len + pcmd), vport, ndlp); 6036 len += lpfc_rdp_res_fec_desc((struct fc_fec_rdp_desc *)(len + pcmd), 6037 &rdp_context->link_stat); 6038 len += lpfc_rdp_res_bbc_desc((struct fc_rdp_bbc_desc *)(len + pcmd), 6039 &rdp_context->link_stat, vport); 6040 len += lpfc_rdp_res_oed_temp_desc(phba, 6041 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 6042 rdp_context->page_a2); 6043 len += lpfc_rdp_res_oed_voltage_desc(phba, 6044 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 6045 rdp_context->page_a2); 6046 len += lpfc_rdp_res_oed_txbias_desc(phba, 6047 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 6048 rdp_context->page_a2); 6049 len += lpfc_rdp_res_oed_txpower_desc(phba, 6050 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 6051 rdp_context->page_a2); 6052 len += lpfc_rdp_res_oed_rxpower_desc(phba, 6053 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 6054 rdp_context->page_a2); 6055 len += lpfc_rdp_res_opd_desc((struct fc_rdp_opd_sfp_desc *)(len + pcmd), 6056 rdp_context->page_a0, vport); 6057 6058 rdp_res->length = cpu_to_be32(len - 8); 6059 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 6060 6061 /* Now that we know the true size of the payload, update the BPL */ 6062 bpl = (struct ulp_bde64 *) 6063 (((struct lpfc_dmabuf *)(elsiocb->context3))->virt); 6064 bpl->tus.f.bdeSize = len; 6065 bpl->tus.f.bdeFlags = 0; 6066 bpl->tus.w = le32_to_cpu(bpl->tus.w); 6067 6068 phba->fc_stat.elsXmitACC++; 6069 elsiocb->context1 = lpfc_nlp_get(ndlp); 6070 if (!elsiocb->context1) { 6071 lpfc_els_free_iocb(phba, elsiocb); 6072 goto free_rdp_context; 6073 } 6074 6075 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 6076 if (rc == IOCB_ERROR) { 6077 lpfc_nlp_put(ndlp); 6078 lpfc_els_free_iocb(phba, elsiocb); 6079 } 6080 6081 goto free_rdp_context; 6082 6083 error: 6084 cmdsize = 2 * sizeof(uint32_t); 6085 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, lpfc_max_els_tries, 6086 ndlp, ndlp->nlp_DID, ELS_CMD_LS_RJT); 6087 if (!elsiocb) 6088 goto free_rdp_context; 6089 6090 icmd = &elsiocb->iocb; 6091 icmd->ulpContext = rdp_context->rx_id; 6092 icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id; 6093 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 6094 6095 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT; 6096 stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t)); 6097 stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 6098 6099 phba->fc_stat.elsXmitLSRJT++; 6100 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 6101 elsiocb->context1 = lpfc_nlp_get(ndlp); 6102 if (!elsiocb->context1) { 6103 lpfc_els_free_iocb(phba, elsiocb); 6104 goto free_rdp_context; 6105 } 6106 6107 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 6108 if (rc == IOCB_ERROR) { 6109 lpfc_nlp_put(ndlp); 6110 lpfc_els_free_iocb(phba, elsiocb); 6111 } 6112 6113 free_rdp_context: 6114 /* This reference put is for the original unsolicited RDP. If the 6115 * iocb prep failed, there is no reference to remove. 6116 */ 6117 lpfc_nlp_put(ndlp); 6118 kfree(rdp_context); 6119 } 6120 6121 static int 6122 lpfc_get_rdp_info(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context) 6123 { 6124 LPFC_MBOXQ_t *mbox = NULL; 6125 int rc; 6126 6127 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6128 if (!mbox) { 6129 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_ELS, 6130 "7105 failed to allocate mailbox memory"); 6131 return 1; 6132 } 6133 6134 if (lpfc_sli4_dump_page_a0(phba, mbox)) 6135 goto prep_mbox_fail; 6136 mbox->vport = rdp_context->ndlp->vport; 6137 mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_page_a0; 6138 mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context; 6139 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 6140 if (rc == MBX_NOT_FINISHED) 6141 goto issue_mbox_fail; 6142 6143 return 0; 6144 6145 prep_mbox_fail: 6146 issue_mbox_fail: 6147 mempool_free(mbox, phba->mbox_mem_pool); 6148 return 1; 6149 } 6150 6151 /* 6152 * lpfc_els_rcv_rdp - Process an unsolicited RDP ELS. 6153 * @vport: pointer to a host virtual N_Port data structure. 6154 * @cmdiocb: pointer to lpfc command iocb data structure. 6155 * @ndlp: pointer to a node-list data structure. 6156 * 6157 * This routine processes an unsolicited RDP(Read Diagnostic Parameters) 6158 * IOCB. First, the payload of the unsolicited RDP is checked. 6159 * Then it will (1) send MBX_DUMP_MEMORY, Embedded DMP_LMSD sub command TYPE-3 6160 * for Page A0, (2) send MBX_DUMP_MEMORY, DMP_LMSD for Page A2, 6161 * (3) send MBX_READ_LNK_STAT to get link stat, (4) Call lpfc_els_rdp_cmpl 6162 * gather all data and send RDP response. 6163 * 6164 * Return code 6165 * 0 - Sent the acc response 6166 * 1 - Sent the reject response. 6167 */ 6168 static int 6169 lpfc_els_rcv_rdp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 6170 struct lpfc_nodelist *ndlp) 6171 { 6172 struct lpfc_hba *phba = vport->phba; 6173 struct lpfc_dmabuf *pcmd; 6174 uint8_t rjt_err, rjt_expl = LSEXP_NOTHING_MORE; 6175 struct fc_rdp_req_frame *rdp_req; 6176 struct lpfc_rdp_context *rdp_context; 6177 IOCB_t *cmd = NULL; 6178 struct ls_rjt stat; 6179 6180 if (phba->sli_rev < LPFC_SLI_REV4 || 6181 bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 6182 LPFC_SLI_INTF_IF_TYPE_2) { 6183 rjt_err = LSRJT_UNABLE_TPC; 6184 rjt_expl = LSEXP_REQ_UNSUPPORTED; 6185 goto error; 6186 } 6187 6188 if (phba->sli_rev < LPFC_SLI_REV4 || (phba->hba_flag & HBA_FCOE_MODE)) { 6189 rjt_err = LSRJT_UNABLE_TPC; 6190 rjt_expl = LSEXP_REQ_UNSUPPORTED; 6191 goto error; 6192 } 6193 6194 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 6195 rdp_req = (struct fc_rdp_req_frame *) pcmd->virt; 6196 6197 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6198 "2422 ELS RDP Request " 6199 "dec len %d tag x%x port_id %d len %d\n", 6200 be32_to_cpu(rdp_req->rdp_des_length), 6201 be32_to_cpu(rdp_req->nport_id_desc.tag), 6202 be32_to_cpu(rdp_req->nport_id_desc.nport_id), 6203 be32_to_cpu(rdp_req->nport_id_desc.length)); 6204 6205 if (sizeof(struct fc_rdp_nport_desc) != 6206 be32_to_cpu(rdp_req->rdp_des_length)) 6207 goto rjt_logerr; 6208 if (RDP_N_PORT_DESC_TAG != be32_to_cpu(rdp_req->nport_id_desc.tag)) 6209 goto rjt_logerr; 6210 if (RDP_NPORT_ID_SIZE != 6211 be32_to_cpu(rdp_req->nport_id_desc.length)) 6212 goto rjt_logerr; 6213 rdp_context = kzalloc(sizeof(struct lpfc_rdp_context), GFP_KERNEL); 6214 if (!rdp_context) { 6215 rjt_err = LSRJT_UNABLE_TPC; 6216 goto error; 6217 } 6218 6219 cmd = &cmdiocb->iocb; 6220 rdp_context->ndlp = lpfc_nlp_get(ndlp); 6221 if (!rdp_context->ndlp) { 6222 kfree(rdp_context); 6223 rjt_err = LSRJT_UNABLE_TPC; 6224 goto error; 6225 } 6226 rdp_context->ox_id = cmd->unsli3.rcvsli3.ox_id; 6227 rdp_context->rx_id = cmd->ulpContext; 6228 rdp_context->cmpl = lpfc_els_rdp_cmpl; 6229 if (lpfc_get_rdp_info(phba, rdp_context)) { 6230 lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_ELS, 6231 "2423 Unable to send mailbox"); 6232 kfree(rdp_context); 6233 rjt_err = LSRJT_UNABLE_TPC; 6234 lpfc_nlp_put(ndlp); 6235 goto error; 6236 } 6237 6238 return 0; 6239 6240 rjt_logerr: 6241 rjt_err = LSRJT_LOGICAL_ERR; 6242 6243 error: 6244 memset(&stat, 0, sizeof(stat)); 6245 stat.un.b.lsRjtRsnCode = rjt_err; 6246 stat.un.b.lsRjtRsnCodeExp = rjt_expl; 6247 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 6248 return 1; 6249 } 6250 6251 6252 static void 6253 lpfc_els_lcb_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 6254 { 6255 MAILBOX_t *mb; 6256 IOCB_t *icmd; 6257 uint8_t *pcmd; 6258 struct lpfc_iocbq *elsiocb; 6259 struct lpfc_nodelist *ndlp; 6260 struct ls_rjt *stat; 6261 union lpfc_sli4_cfg_shdr *shdr; 6262 struct lpfc_lcb_context *lcb_context; 6263 struct fc_lcb_res_frame *lcb_res; 6264 uint32_t cmdsize, shdr_status, shdr_add_status; 6265 int rc; 6266 6267 mb = &pmb->u.mb; 6268 lcb_context = (struct lpfc_lcb_context *)pmb->ctx_ndlp; 6269 ndlp = lcb_context->ndlp; 6270 pmb->ctx_ndlp = NULL; 6271 pmb->ctx_buf = NULL; 6272 6273 shdr = (union lpfc_sli4_cfg_shdr *) 6274 &pmb->u.mqe.un.beacon_config.header.cfg_shdr; 6275 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 6276 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 6277 6278 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX, 6279 "0194 SET_BEACON_CONFIG mailbox " 6280 "completed with status x%x add_status x%x," 6281 " mbx status x%x\n", 6282 shdr_status, shdr_add_status, mb->mbxStatus); 6283 6284 if ((mb->mbxStatus != MBX_SUCCESS) || shdr_status || 6285 (shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE) || 6286 (shdr_add_status == ADD_STATUS_INVALID_REQUEST)) { 6287 mempool_free(pmb, phba->mbox_mem_pool); 6288 goto error; 6289 } 6290 6291 mempool_free(pmb, phba->mbox_mem_pool); 6292 cmdsize = sizeof(struct fc_lcb_res_frame); 6293 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 6294 lpfc_max_els_tries, ndlp, 6295 ndlp->nlp_DID, ELS_CMD_ACC); 6296 6297 /* Decrement the ndlp reference count from previous mbox command */ 6298 lpfc_nlp_put(ndlp); 6299 6300 if (!elsiocb) 6301 goto free_lcb_context; 6302 6303 lcb_res = (struct fc_lcb_res_frame *) 6304 (((struct lpfc_dmabuf *)elsiocb->context2)->virt); 6305 6306 memset(lcb_res, 0, sizeof(struct fc_lcb_res_frame)); 6307 icmd = &elsiocb->iocb; 6308 icmd->ulpContext = lcb_context->rx_id; 6309 icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id; 6310 6311 pcmd = (uint8_t *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt); 6312 *((uint32_t *)(pcmd)) = ELS_CMD_ACC; 6313 lcb_res->lcb_sub_command = lcb_context->sub_command; 6314 lcb_res->lcb_type = lcb_context->type; 6315 lcb_res->capability = lcb_context->capability; 6316 lcb_res->lcb_frequency = lcb_context->frequency; 6317 lcb_res->lcb_duration = lcb_context->duration; 6318 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 6319 phba->fc_stat.elsXmitACC++; 6320 6321 elsiocb->context1 = lpfc_nlp_get(ndlp); 6322 if (!elsiocb->context1) 6323 goto node_err; 6324 6325 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 6326 if (!rc) 6327 goto out; 6328 6329 lpfc_nlp_put(ndlp); 6330 node_err: 6331 lpfc_els_free_iocb(phba, elsiocb); 6332 out: 6333 kfree(lcb_context); 6334 return; 6335 6336 error: 6337 cmdsize = sizeof(struct fc_lcb_res_frame); 6338 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 6339 lpfc_max_els_tries, ndlp, 6340 ndlp->nlp_DID, ELS_CMD_LS_RJT); 6341 lpfc_nlp_put(ndlp); 6342 if (!elsiocb) 6343 goto free_lcb_context; 6344 6345 icmd = &elsiocb->iocb; 6346 icmd->ulpContext = lcb_context->rx_id; 6347 icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id; 6348 pcmd = (uint8_t *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt); 6349 6350 *((uint32_t *)(pcmd)) = ELS_CMD_LS_RJT; 6351 stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t)); 6352 stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 6353 6354 if (shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE) 6355 stat->un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS; 6356 6357 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 6358 phba->fc_stat.elsXmitLSRJT++; 6359 elsiocb->context1 = lpfc_nlp_get(ndlp); 6360 if (!elsiocb->context1) { 6361 lpfc_els_free_iocb(phba, elsiocb); 6362 goto free_lcb_context; 6363 } 6364 6365 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 6366 if (rc == IOCB_ERROR) { 6367 lpfc_nlp_put(ndlp); 6368 lpfc_els_free_iocb(phba, elsiocb); 6369 } 6370 free_lcb_context: 6371 kfree(lcb_context); 6372 } 6373 6374 static int 6375 lpfc_sli4_set_beacon(struct lpfc_vport *vport, 6376 struct lpfc_lcb_context *lcb_context, 6377 uint32_t beacon_state) 6378 { 6379 struct lpfc_hba *phba = vport->phba; 6380 union lpfc_sli4_cfg_shdr *cfg_shdr; 6381 LPFC_MBOXQ_t *mbox = NULL; 6382 uint32_t len; 6383 int rc; 6384 6385 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6386 if (!mbox) 6387 return 1; 6388 6389 cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr; 6390 len = sizeof(struct lpfc_mbx_set_beacon_config) - 6391 sizeof(struct lpfc_sli4_cfg_mhdr); 6392 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 6393 LPFC_MBOX_OPCODE_SET_BEACON_CONFIG, len, 6394 LPFC_SLI4_MBX_EMBED); 6395 mbox->ctx_ndlp = (void *)lcb_context; 6396 mbox->vport = phba->pport; 6397 mbox->mbox_cmpl = lpfc_els_lcb_rsp; 6398 bf_set(lpfc_mbx_set_beacon_port_num, &mbox->u.mqe.un.beacon_config, 6399 phba->sli4_hba.physical_port); 6400 bf_set(lpfc_mbx_set_beacon_state, &mbox->u.mqe.un.beacon_config, 6401 beacon_state); 6402 mbox->u.mqe.un.beacon_config.word5 = 0; /* Reserved */ 6403 6404 /* 6405 * Check bv1s bit before issuing the mailbox 6406 * if bv1s == 1, LCB V1 supported 6407 * else, LCB V0 supported 6408 */ 6409 6410 if (phba->sli4_hba.pc_sli4_params.bv1s) { 6411 /* COMMON_SET_BEACON_CONFIG_V1 */ 6412 cfg_shdr->request.word9 = BEACON_VERSION_V1; 6413 lcb_context->capability |= LCB_CAPABILITY_DURATION; 6414 bf_set(lpfc_mbx_set_beacon_port_type, 6415 &mbox->u.mqe.un.beacon_config, 0); 6416 bf_set(lpfc_mbx_set_beacon_duration_v1, 6417 &mbox->u.mqe.un.beacon_config, 6418 be16_to_cpu(lcb_context->duration)); 6419 } else { 6420 /* COMMON_SET_BEACON_CONFIG_V0 */ 6421 if (be16_to_cpu(lcb_context->duration) != 0) { 6422 mempool_free(mbox, phba->mbox_mem_pool); 6423 return 1; 6424 } 6425 cfg_shdr->request.word9 = BEACON_VERSION_V0; 6426 lcb_context->capability &= ~(LCB_CAPABILITY_DURATION); 6427 bf_set(lpfc_mbx_set_beacon_state, 6428 &mbox->u.mqe.un.beacon_config, beacon_state); 6429 bf_set(lpfc_mbx_set_beacon_port_type, 6430 &mbox->u.mqe.un.beacon_config, 1); 6431 bf_set(lpfc_mbx_set_beacon_duration, 6432 &mbox->u.mqe.un.beacon_config, 6433 be16_to_cpu(lcb_context->duration)); 6434 } 6435 6436 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 6437 if (rc == MBX_NOT_FINISHED) { 6438 mempool_free(mbox, phba->mbox_mem_pool); 6439 return 1; 6440 } 6441 6442 return 0; 6443 } 6444 6445 6446 /** 6447 * lpfc_els_rcv_lcb - Process an unsolicited LCB 6448 * @vport: pointer to a host virtual N_Port data structure. 6449 * @cmdiocb: pointer to lpfc command iocb data structure. 6450 * @ndlp: pointer to a node-list data structure. 6451 * 6452 * This routine processes an unsolicited LCB(LINK CABLE BEACON) IOCB. 6453 * First, the payload of the unsolicited LCB is checked. 6454 * Then based on Subcommand beacon will either turn on or off. 6455 * 6456 * Return code 6457 * 0 - Sent the acc response 6458 * 1 - Sent the reject response. 6459 **/ 6460 static int 6461 lpfc_els_rcv_lcb(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 6462 struct lpfc_nodelist *ndlp) 6463 { 6464 struct lpfc_hba *phba = vport->phba; 6465 struct lpfc_dmabuf *pcmd; 6466 uint8_t *lp; 6467 struct fc_lcb_request_frame *beacon; 6468 struct lpfc_lcb_context *lcb_context; 6469 u8 state, rjt_err = 0; 6470 struct ls_rjt stat; 6471 6472 pcmd = (struct lpfc_dmabuf *)cmdiocb->context2; 6473 lp = (uint8_t *)pcmd->virt; 6474 beacon = (struct fc_lcb_request_frame *)pcmd->virt; 6475 6476 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6477 "0192 ELS LCB Data x%x x%x x%x x%x sub x%x " 6478 "type x%x frequency %x duration x%x\n", 6479 lp[0], lp[1], lp[2], 6480 beacon->lcb_command, 6481 beacon->lcb_sub_command, 6482 beacon->lcb_type, 6483 beacon->lcb_frequency, 6484 be16_to_cpu(beacon->lcb_duration)); 6485 6486 if (beacon->lcb_sub_command != LPFC_LCB_ON && 6487 beacon->lcb_sub_command != LPFC_LCB_OFF) { 6488 rjt_err = LSRJT_CMD_UNSUPPORTED; 6489 goto rjt; 6490 } 6491 6492 if (phba->sli_rev < LPFC_SLI_REV4 || 6493 phba->hba_flag & HBA_FCOE_MODE || 6494 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 6495 LPFC_SLI_INTF_IF_TYPE_2)) { 6496 rjt_err = LSRJT_CMD_UNSUPPORTED; 6497 goto rjt; 6498 } 6499 6500 lcb_context = kmalloc(sizeof(*lcb_context), GFP_KERNEL); 6501 if (!lcb_context) { 6502 rjt_err = LSRJT_UNABLE_TPC; 6503 goto rjt; 6504 } 6505 6506 state = (beacon->lcb_sub_command == LPFC_LCB_ON) ? 1 : 0; 6507 lcb_context->sub_command = beacon->lcb_sub_command; 6508 lcb_context->capability = 0; 6509 lcb_context->type = beacon->lcb_type; 6510 lcb_context->frequency = beacon->lcb_frequency; 6511 lcb_context->duration = beacon->lcb_duration; 6512 lcb_context->ox_id = cmdiocb->iocb.unsli3.rcvsli3.ox_id; 6513 lcb_context->rx_id = cmdiocb->iocb.ulpContext; 6514 lcb_context->ndlp = lpfc_nlp_get(ndlp); 6515 if (!lcb_context->ndlp) { 6516 rjt_err = LSRJT_UNABLE_TPC; 6517 goto rjt_free; 6518 } 6519 6520 if (lpfc_sli4_set_beacon(vport, lcb_context, state)) { 6521 lpfc_printf_vlog(ndlp->vport, KERN_ERR, LOG_TRACE_EVENT, 6522 "0193 failed to send mail box"); 6523 lpfc_nlp_put(ndlp); 6524 rjt_err = LSRJT_UNABLE_TPC; 6525 goto rjt_free; 6526 } 6527 return 0; 6528 6529 rjt_free: 6530 kfree(lcb_context); 6531 rjt: 6532 memset(&stat, 0, sizeof(stat)); 6533 stat.un.b.lsRjtRsnCode = rjt_err; 6534 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 6535 return 1; 6536 } 6537 6538 6539 /** 6540 * lpfc_els_flush_rscn - Clean up any rscn activities with a vport 6541 * @vport: pointer to a host virtual N_Port data structure. 6542 * 6543 * This routine cleans up any Registration State Change Notification 6544 * (RSCN) activity with a @vport. Note that the fc_rscn_flush flag of the 6545 * @vport together with the host_lock is used to prevent multiple thread 6546 * trying to access the RSCN array on a same @vport at the same time. 6547 **/ 6548 void 6549 lpfc_els_flush_rscn(struct lpfc_vport *vport) 6550 { 6551 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6552 struct lpfc_hba *phba = vport->phba; 6553 int i; 6554 6555 spin_lock_irq(shost->host_lock); 6556 if (vport->fc_rscn_flush) { 6557 /* Another thread is walking fc_rscn_id_list on this vport */ 6558 spin_unlock_irq(shost->host_lock); 6559 return; 6560 } 6561 /* Indicate we are walking lpfc_els_flush_rscn on this vport */ 6562 vport->fc_rscn_flush = 1; 6563 spin_unlock_irq(shost->host_lock); 6564 6565 for (i = 0; i < vport->fc_rscn_id_cnt; i++) { 6566 lpfc_in_buf_free(phba, vport->fc_rscn_id_list[i]); 6567 vport->fc_rscn_id_list[i] = NULL; 6568 } 6569 spin_lock_irq(shost->host_lock); 6570 vport->fc_rscn_id_cnt = 0; 6571 vport->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY); 6572 spin_unlock_irq(shost->host_lock); 6573 lpfc_can_disctmo(vport); 6574 /* Indicate we are done walking this fc_rscn_id_list */ 6575 vport->fc_rscn_flush = 0; 6576 } 6577 6578 /** 6579 * lpfc_rscn_payload_check - Check whether there is a pending rscn to a did 6580 * @vport: pointer to a host virtual N_Port data structure. 6581 * @did: remote destination port identifier. 6582 * 6583 * This routine checks whether there is any pending Registration State 6584 * Configuration Notification (RSCN) to a @did on @vport. 6585 * 6586 * Return code 6587 * None zero - The @did matched with a pending rscn 6588 * 0 - not able to match @did with a pending rscn 6589 **/ 6590 int 6591 lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did) 6592 { 6593 D_ID ns_did; 6594 D_ID rscn_did; 6595 uint32_t *lp; 6596 uint32_t payload_len, i; 6597 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6598 6599 ns_did.un.word = did; 6600 6601 /* Never match fabric nodes for RSCNs */ 6602 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) 6603 return 0; 6604 6605 /* If we are doing a FULL RSCN rediscovery, match everything */ 6606 if (vport->fc_flag & FC_RSCN_DISCOVERY) 6607 return did; 6608 6609 spin_lock_irq(shost->host_lock); 6610 if (vport->fc_rscn_flush) { 6611 /* Another thread is walking fc_rscn_id_list on this vport */ 6612 spin_unlock_irq(shost->host_lock); 6613 return 0; 6614 } 6615 /* Indicate we are walking fc_rscn_id_list on this vport */ 6616 vport->fc_rscn_flush = 1; 6617 spin_unlock_irq(shost->host_lock); 6618 for (i = 0; i < vport->fc_rscn_id_cnt; i++) { 6619 lp = vport->fc_rscn_id_list[i]->virt; 6620 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK); 6621 payload_len -= sizeof(uint32_t); /* take off word 0 */ 6622 while (payload_len) { 6623 rscn_did.un.word = be32_to_cpu(*lp++); 6624 payload_len -= sizeof(uint32_t); 6625 switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) { 6626 case RSCN_ADDRESS_FORMAT_PORT: 6627 if ((ns_did.un.b.domain == rscn_did.un.b.domain) 6628 && (ns_did.un.b.area == rscn_did.un.b.area) 6629 && (ns_did.un.b.id == rscn_did.un.b.id)) 6630 goto return_did_out; 6631 break; 6632 case RSCN_ADDRESS_FORMAT_AREA: 6633 if ((ns_did.un.b.domain == rscn_did.un.b.domain) 6634 && (ns_did.un.b.area == rscn_did.un.b.area)) 6635 goto return_did_out; 6636 break; 6637 case RSCN_ADDRESS_FORMAT_DOMAIN: 6638 if (ns_did.un.b.domain == rscn_did.un.b.domain) 6639 goto return_did_out; 6640 break; 6641 case RSCN_ADDRESS_FORMAT_FABRIC: 6642 goto return_did_out; 6643 } 6644 } 6645 } 6646 /* Indicate we are done with walking fc_rscn_id_list on this vport */ 6647 vport->fc_rscn_flush = 0; 6648 return 0; 6649 return_did_out: 6650 /* Indicate we are done with walking fc_rscn_id_list on this vport */ 6651 vport->fc_rscn_flush = 0; 6652 return did; 6653 } 6654 6655 /** 6656 * lpfc_rscn_recovery_check - Send recovery event to vport nodes matching rscn 6657 * @vport: pointer to a host virtual N_Port data structure. 6658 * 6659 * This routine sends recovery (NLP_EVT_DEVICE_RECOVERY) event to the 6660 * state machine for a @vport's nodes that are with pending RSCN (Registration 6661 * State Change Notification). 6662 * 6663 * Return code 6664 * 0 - Successful (currently alway return 0) 6665 **/ 6666 static int 6667 lpfc_rscn_recovery_check(struct lpfc_vport *vport) 6668 { 6669 struct lpfc_nodelist *ndlp = NULL; 6670 6671 /* Move all affected nodes by pending RSCNs to NPR state. */ 6672 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 6673 if ((ndlp->nlp_state == NLP_STE_UNUSED_NODE) || 6674 !lpfc_rscn_payload_check(vport, ndlp->nlp_DID)) 6675 continue; 6676 6677 /* NVME Target mode does not do RSCN Recovery. */ 6678 if (vport->phba->nvmet_support) 6679 continue; 6680 6681 /* If we are in the process of doing discovery on this 6682 * NPort, let it continue on its own. 6683 */ 6684 switch (ndlp->nlp_state) { 6685 case NLP_STE_PLOGI_ISSUE: 6686 case NLP_STE_ADISC_ISSUE: 6687 case NLP_STE_REG_LOGIN_ISSUE: 6688 case NLP_STE_PRLI_ISSUE: 6689 case NLP_STE_LOGO_ISSUE: 6690 continue; 6691 } 6692 6693 /* Check to see if we need to NVME rescan this target 6694 * remoteport. 6695 */ 6696 if (ndlp->nlp_fc4_type & NLP_FC4_NVME && 6697 ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_DISCOVERY)) 6698 lpfc_nvme_rescan_port(vport, ndlp); 6699 6700 lpfc_disc_state_machine(vport, ndlp, NULL, 6701 NLP_EVT_DEVICE_RECOVERY); 6702 lpfc_cancel_retry_delay_tmo(vport, ndlp); 6703 } 6704 return 0; 6705 } 6706 6707 /** 6708 * lpfc_send_rscn_event - Send an RSCN event to management application 6709 * @vport: pointer to a host virtual N_Port data structure. 6710 * @cmdiocb: pointer to lpfc command iocb data structure. 6711 * 6712 * lpfc_send_rscn_event sends an RSCN netlink event to management 6713 * applications. 6714 */ 6715 static void 6716 lpfc_send_rscn_event(struct lpfc_vport *vport, 6717 struct lpfc_iocbq *cmdiocb) 6718 { 6719 struct lpfc_dmabuf *pcmd; 6720 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6721 uint32_t *payload_ptr; 6722 uint32_t payload_len; 6723 struct lpfc_rscn_event_header *rscn_event_data; 6724 6725 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 6726 payload_ptr = (uint32_t *) pcmd->virt; 6727 payload_len = be32_to_cpu(*payload_ptr & ~ELS_CMD_MASK); 6728 6729 rscn_event_data = kmalloc(sizeof(struct lpfc_rscn_event_header) + 6730 payload_len, GFP_KERNEL); 6731 if (!rscn_event_data) { 6732 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 6733 "0147 Failed to allocate memory for RSCN event\n"); 6734 return; 6735 } 6736 rscn_event_data->event_type = FC_REG_RSCN_EVENT; 6737 rscn_event_data->payload_length = payload_len; 6738 memcpy(rscn_event_data->rscn_payload, payload_ptr, 6739 payload_len); 6740 6741 fc_host_post_vendor_event(shost, 6742 fc_get_event_number(), 6743 sizeof(struct lpfc_rscn_event_header) + payload_len, 6744 (char *)rscn_event_data, 6745 LPFC_NL_VENDOR_ID); 6746 6747 kfree(rscn_event_data); 6748 } 6749 6750 /** 6751 * lpfc_els_rcv_rscn - Process an unsolicited rscn iocb 6752 * @vport: pointer to a host virtual N_Port data structure. 6753 * @cmdiocb: pointer to lpfc command iocb data structure. 6754 * @ndlp: pointer to a node-list data structure. 6755 * 6756 * This routine processes an unsolicited RSCN (Registration State Change 6757 * Notification) IOCB. First, the payload of the unsolicited RSCN is walked 6758 * to invoke fc_host_post_event() routine to the FC transport layer. If the 6759 * discover state machine is about to begin discovery, it just accepts the 6760 * RSCN and the discovery process will satisfy the RSCN. If this RSCN only 6761 * contains N_Port IDs for other vports on this HBA, it just accepts the 6762 * RSCN and ignore processing it. If the state machine is in the recovery 6763 * state, the fc_rscn_id_list of this @vport is walked and the 6764 * lpfc_rscn_recovery_check() routine is invoked to send recovery event for 6765 * all nodes that match RSCN payload. Otherwise, the lpfc_els_handle_rscn() 6766 * routine is invoked to handle the RSCN event. 6767 * 6768 * Return code 6769 * 0 - Just sent the acc response 6770 * 1 - Sent the acc response and waited for name server completion 6771 **/ 6772 static int 6773 lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 6774 struct lpfc_nodelist *ndlp) 6775 { 6776 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6777 struct lpfc_hba *phba = vport->phba; 6778 struct lpfc_dmabuf *pcmd; 6779 uint32_t *lp, *datap; 6780 uint32_t payload_len, length, nportid, *cmd; 6781 int rscn_cnt; 6782 int rscn_id = 0, hba_id = 0; 6783 int i, tmo; 6784 6785 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 6786 lp = (uint32_t *) pcmd->virt; 6787 6788 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK); 6789 payload_len -= sizeof(uint32_t); /* take off word 0 */ 6790 /* RSCN received */ 6791 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 6792 "0214 RSCN received Data: x%x x%x x%x x%x\n", 6793 vport->fc_flag, payload_len, *lp, 6794 vport->fc_rscn_id_cnt); 6795 6796 /* Send an RSCN event to the management application */ 6797 lpfc_send_rscn_event(vport, cmdiocb); 6798 6799 for (i = 0; i < payload_len/sizeof(uint32_t); i++) 6800 fc_host_post_event(shost, fc_get_event_number(), 6801 FCH_EVT_RSCN, lp[i]); 6802 6803 /* Check if RSCN is coming from a direct-connected remote NPort */ 6804 if (vport->fc_flag & FC_PT2PT) { 6805 /* If so, just ACC it, no other action needed for now */ 6806 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6807 "2024 pt2pt RSCN %08x Data: x%x x%x\n", 6808 *lp, vport->fc_flag, payload_len); 6809 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 6810 6811 /* Check to see if we need to NVME rescan this target 6812 * remoteport. 6813 */ 6814 if (ndlp->nlp_fc4_type & NLP_FC4_NVME && 6815 ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_DISCOVERY)) 6816 lpfc_nvme_rescan_port(vport, ndlp); 6817 return 0; 6818 } 6819 6820 /* If we are about to begin discovery, just ACC the RSCN. 6821 * Discovery processing will satisfy it. 6822 */ 6823 if (vport->port_state <= LPFC_NS_QRY) { 6824 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6825 "RCV RSCN ignore: did:x%x/ste:x%x flg:x%x", 6826 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); 6827 6828 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 6829 return 0; 6830 } 6831 6832 /* If this RSCN just contains NPortIDs for other vports on this HBA, 6833 * just ACC and ignore it. 6834 */ 6835 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 6836 !(vport->cfg_peer_port_login)) { 6837 i = payload_len; 6838 datap = lp; 6839 while (i > 0) { 6840 nportid = *datap++; 6841 nportid = ((be32_to_cpu(nportid)) & Mask_DID); 6842 i -= sizeof(uint32_t); 6843 rscn_id++; 6844 if (lpfc_find_vport_by_did(phba, nportid)) 6845 hba_id++; 6846 } 6847 if (rscn_id == hba_id) { 6848 /* ALL NPortIDs in RSCN are on HBA */ 6849 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 6850 "0219 Ignore RSCN " 6851 "Data: x%x x%x x%x x%x\n", 6852 vport->fc_flag, payload_len, 6853 *lp, vport->fc_rscn_id_cnt); 6854 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6855 "RCV RSCN vport: did:x%x/ste:x%x flg:x%x", 6856 ndlp->nlp_DID, vport->port_state, 6857 ndlp->nlp_flag); 6858 6859 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, 6860 ndlp, NULL); 6861 return 0; 6862 } 6863 } 6864 6865 spin_lock_irq(shost->host_lock); 6866 if (vport->fc_rscn_flush) { 6867 /* Another thread is walking fc_rscn_id_list on this vport */ 6868 vport->fc_flag |= FC_RSCN_DISCOVERY; 6869 spin_unlock_irq(shost->host_lock); 6870 /* Send back ACC */ 6871 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 6872 return 0; 6873 } 6874 /* Indicate we are walking fc_rscn_id_list on this vport */ 6875 vport->fc_rscn_flush = 1; 6876 spin_unlock_irq(shost->host_lock); 6877 /* Get the array count after successfully have the token */ 6878 rscn_cnt = vport->fc_rscn_id_cnt; 6879 /* If we are already processing an RSCN, save the received 6880 * RSCN payload buffer, cmdiocb->context2 to process later. 6881 */ 6882 if (vport->fc_flag & (FC_RSCN_MODE | FC_NDISC_ACTIVE)) { 6883 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6884 "RCV RSCN defer: did:x%x/ste:x%x flg:x%x", 6885 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); 6886 6887 spin_lock_irq(shost->host_lock); 6888 vport->fc_flag |= FC_RSCN_DEFERRED; 6889 6890 /* Restart disctmo if its already running */ 6891 if (vport->fc_flag & FC_DISC_TMO) { 6892 tmo = ((phba->fc_ratov * 3) + 3); 6893 mod_timer(&vport->fc_disctmo, 6894 jiffies + msecs_to_jiffies(1000 * tmo)); 6895 } 6896 if ((rscn_cnt < FC_MAX_HOLD_RSCN) && 6897 !(vport->fc_flag & FC_RSCN_DISCOVERY)) { 6898 vport->fc_flag |= FC_RSCN_MODE; 6899 spin_unlock_irq(shost->host_lock); 6900 if (rscn_cnt) { 6901 cmd = vport->fc_rscn_id_list[rscn_cnt-1]->virt; 6902 length = be32_to_cpu(*cmd & ~ELS_CMD_MASK); 6903 } 6904 if ((rscn_cnt) && 6905 (payload_len + length <= LPFC_BPL_SIZE)) { 6906 *cmd &= ELS_CMD_MASK; 6907 *cmd |= cpu_to_be32(payload_len + length); 6908 memcpy(((uint8_t *)cmd) + length, lp, 6909 payload_len); 6910 } else { 6911 vport->fc_rscn_id_list[rscn_cnt] = pcmd; 6912 vport->fc_rscn_id_cnt++; 6913 /* If we zero, cmdiocb->context2, the calling 6914 * routine will not try to free it. 6915 */ 6916 cmdiocb->context2 = NULL; 6917 } 6918 /* Deferred RSCN */ 6919 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 6920 "0235 Deferred RSCN " 6921 "Data: x%x x%x x%x\n", 6922 vport->fc_rscn_id_cnt, vport->fc_flag, 6923 vport->port_state); 6924 } else { 6925 vport->fc_flag |= FC_RSCN_DISCOVERY; 6926 spin_unlock_irq(shost->host_lock); 6927 /* ReDiscovery RSCN */ 6928 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 6929 "0234 ReDiscovery RSCN " 6930 "Data: x%x x%x x%x\n", 6931 vport->fc_rscn_id_cnt, vport->fc_flag, 6932 vport->port_state); 6933 } 6934 /* Indicate we are done walking fc_rscn_id_list on this vport */ 6935 vport->fc_rscn_flush = 0; 6936 /* Send back ACC */ 6937 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 6938 /* send RECOVERY event for ALL nodes that match RSCN payload */ 6939 lpfc_rscn_recovery_check(vport); 6940 return 0; 6941 } 6942 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6943 "RCV RSCN: did:x%x/ste:x%x flg:x%x", 6944 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); 6945 6946 spin_lock_irq(shost->host_lock); 6947 vport->fc_flag |= FC_RSCN_MODE; 6948 spin_unlock_irq(shost->host_lock); 6949 vport->fc_rscn_id_list[vport->fc_rscn_id_cnt++] = pcmd; 6950 /* Indicate we are done walking fc_rscn_id_list on this vport */ 6951 vport->fc_rscn_flush = 0; 6952 /* 6953 * If we zero, cmdiocb->context2, the calling routine will 6954 * not try to free it. 6955 */ 6956 cmdiocb->context2 = NULL; 6957 lpfc_set_disctmo(vport); 6958 /* Send back ACC */ 6959 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 6960 /* send RECOVERY event for ALL nodes that match RSCN payload */ 6961 lpfc_rscn_recovery_check(vport); 6962 return lpfc_els_handle_rscn(vport); 6963 } 6964 6965 /** 6966 * lpfc_els_handle_rscn - Handle rscn for a vport 6967 * @vport: pointer to a host virtual N_Port data structure. 6968 * 6969 * This routine handles the Registration State Configuration Notification 6970 * (RSCN) for a @vport. If login to NameServer does not exist, a new ndlp shall 6971 * be created and a Port Login (PLOGI) to the NameServer is issued. Otherwise, 6972 * if the ndlp to NameServer exists, a Common Transport (CT) command to the 6973 * NameServer shall be issued. If CT command to the NameServer fails to be 6974 * issued, the lpfc_els_flush_rscn() routine shall be invoked to clean up any 6975 * RSCN activities with the @vport. 6976 * 6977 * Return code 6978 * 0 - Cleaned up rscn on the @vport 6979 * 1 - Wait for plogi to name server before proceed 6980 **/ 6981 int 6982 lpfc_els_handle_rscn(struct lpfc_vport *vport) 6983 { 6984 struct lpfc_nodelist *ndlp; 6985 struct lpfc_hba *phba = vport->phba; 6986 6987 /* Ignore RSCN if the port is being torn down. */ 6988 if (vport->load_flag & FC_UNLOADING) { 6989 lpfc_els_flush_rscn(vport); 6990 return 0; 6991 } 6992 6993 /* Start timer for RSCN processing */ 6994 lpfc_set_disctmo(vport); 6995 6996 /* RSCN processed */ 6997 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 6998 "0215 RSCN processed Data: x%x x%x x%x x%x x%x x%x\n", 6999 vport->fc_flag, 0, vport->fc_rscn_id_cnt, 7000 vport->port_state, vport->num_disc_nodes, 7001 vport->gidft_inp); 7002 7003 /* To process RSCN, first compare RSCN data with NameServer */ 7004 vport->fc_ns_retry = 0; 7005 vport->num_disc_nodes = 0; 7006 7007 ndlp = lpfc_findnode_did(vport, NameServer_DID); 7008 if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { 7009 /* Good ndlp, issue CT Request to NameServer. Need to 7010 * know how many gidfts were issued. If none, then just 7011 * flush the RSCN. Otherwise, the outstanding requests 7012 * need to complete. 7013 */ 7014 if (phba->cfg_ns_query == LPFC_NS_QUERY_GID_FT) { 7015 if (lpfc_issue_gidft(vport) > 0) 7016 return 1; 7017 } else if (phba->cfg_ns_query == LPFC_NS_QUERY_GID_PT) { 7018 if (lpfc_issue_gidpt(vport) > 0) 7019 return 1; 7020 } else { 7021 return 1; 7022 } 7023 } else { 7024 /* Nameserver login in question. Revalidate. */ 7025 if (ndlp) { 7026 ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE; 7027 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 7028 } else { 7029 ndlp = lpfc_nlp_init(vport, NameServer_DID); 7030 if (!ndlp) { 7031 lpfc_els_flush_rscn(vport); 7032 return 0; 7033 } 7034 ndlp->nlp_prev_state = ndlp->nlp_state; 7035 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 7036 } 7037 ndlp->nlp_type |= NLP_FABRIC; 7038 lpfc_issue_els_plogi(vport, NameServer_DID, 0); 7039 /* Wait for NameServer login cmpl before we can 7040 * continue 7041 */ 7042 return 1; 7043 } 7044 7045 lpfc_els_flush_rscn(vport); 7046 return 0; 7047 } 7048 7049 /** 7050 * lpfc_els_rcv_flogi - Process an unsolicited flogi iocb 7051 * @vport: pointer to a host virtual N_Port data structure. 7052 * @cmdiocb: pointer to lpfc command iocb data structure. 7053 * @ndlp: pointer to a node-list data structure. 7054 * 7055 * This routine processes Fabric Login (FLOGI) IOCB received as an ELS 7056 * unsolicited event. An unsolicited FLOGI can be received in a point-to- 7057 * point topology. As an unsolicited FLOGI should not be received in a loop 7058 * mode, any unsolicited FLOGI received in loop mode shall be ignored. The 7059 * lpfc_check_sparm() routine is invoked to check the parameters in the 7060 * unsolicited FLOGI. If parameters validation failed, the routine 7061 * lpfc_els_rsp_reject() shall be called with reject reason code set to 7062 * LSEXP_SPARM_OPTIONS to reject the FLOGI. Otherwise, the Port WWN in the 7063 * FLOGI shall be compared with the Port WWN of the @vport to determine who 7064 * will initiate PLOGI. The higher lexicographical value party shall has 7065 * higher priority (as the winning port) and will initiate PLOGI and 7066 * communicate Port_IDs (Addresses) for both nodes in PLOGI. The result 7067 * of this will be marked in the @vport fc_flag field with FC_PT2PT_PLOGI 7068 * and then the lpfc_els_rsp_acc() routine is invoked to accept the FLOGI. 7069 * 7070 * Return code 7071 * 0 - Successfully processed the unsolicited flogi 7072 * 1 - Failed to process the unsolicited flogi 7073 **/ 7074 static int 7075 lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7076 struct lpfc_nodelist *ndlp) 7077 { 7078 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7079 struct lpfc_hba *phba = vport->phba; 7080 struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 7081 uint32_t *lp = (uint32_t *) pcmd->virt; 7082 IOCB_t *icmd = &cmdiocb->iocb; 7083 struct serv_parm *sp; 7084 LPFC_MBOXQ_t *mbox; 7085 uint32_t cmd, did; 7086 int rc; 7087 uint32_t fc_flag = 0; 7088 uint32_t port_state = 0; 7089 7090 cmd = *lp++; 7091 sp = (struct serv_parm *) lp; 7092 7093 /* FLOGI received */ 7094 7095 lpfc_set_disctmo(vport); 7096 7097 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 7098 /* We should never receive a FLOGI in loop mode, ignore it */ 7099 did = icmd->un.elsreq64.remoteID; 7100 7101 /* An FLOGI ELS command <elsCmd> was received from DID <did> in 7102 Loop Mode */ 7103 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 7104 "0113 An FLOGI ELS command x%x was " 7105 "received from DID x%x in Loop Mode\n", 7106 cmd, did); 7107 return 1; 7108 } 7109 7110 (void) lpfc_check_sparm(vport, ndlp, sp, CLASS3, 1); 7111 7112 /* 7113 * If our portname is greater than the remote portname, 7114 * then we initiate Nport login. 7115 */ 7116 7117 rc = memcmp(&vport->fc_portname, &sp->portName, 7118 sizeof(struct lpfc_name)); 7119 7120 if (!rc) { 7121 if (phba->sli_rev < LPFC_SLI_REV4) { 7122 mbox = mempool_alloc(phba->mbox_mem_pool, 7123 GFP_KERNEL); 7124 if (!mbox) 7125 return 1; 7126 lpfc_linkdown(phba); 7127 lpfc_init_link(phba, mbox, 7128 phba->cfg_topology, 7129 phba->cfg_link_speed); 7130 mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0; 7131 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 7132 mbox->vport = vport; 7133 rc = lpfc_sli_issue_mbox(phba, mbox, 7134 MBX_NOWAIT); 7135 lpfc_set_loopback_flag(phba); 7136 if (rc == MBX_NOT_FINISHED) 7137 mempool_free(mbox, phba->mbox_mem_pool); 7138 return 1; 7139 } 7140 7141 /* abort the flogi coming back to ourselves 7142 * due to external loopback on the port. 7143 */ 7144 lpfc_els_abort_flogi(phba); 7145 return 0; 7146 7147 } else if (rc > 0) { /* greater than */ 7148 spin_lock_irq(shost->host_lock); 7149 vport->fc_flag |= FC_PT2PT_PLOGI; 7150 spin_unlock_irq(shost->host_lock); 7151 7152 /* If we have the high WWPN we can assign our own 7153 * myDID; otherwise, we have to WAIT for a PLOGI 7154 * from the remote NPort to find out what it 7155 * will be. 7156 */ 7157 vport->fc_myDID = PT2PT_LocalID; 7158 } else { 7159 vport->fc_myDID = PT2PT_RemoteID; 7160 } 7161 7162 /* 7163 * The vport state should go to LPFC_FLOGI only 7164 * AFTER we issue a FLOGI, not receive one. 7165 */ 7166 spin_lock_irq(shost->host_lock); 7167 fc_flag = vport->fc_flag; 7168 port_state = vport->port_state; 7169 vport->fc_flag |= FC_PT2PT; 7170 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 7171 7172 /* Acking an unsol FLOGI. Count 1 for link bounce 7173 * work-around. 7174 */ 7175 vport->rcv_flogi_cnt++; 7176 spin_unlock_irq(shost->host_lock); 7177 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7178 "3311 Rcv Flogi PS x%x new PS x%x " 7179 "fc_flag x%x new fc_flag x%x\n", 7180 port_state, vport->port_state, 7181 fc_flag, vport->fc_flag); 7182 7183 /* 7184 * We temporarily set fc_myDID to make it look like we are 7185 * a Fabric. This is done just so we end up with the right 7186 * did / sid on the FLOGI ACC rsp. 7187 */ 7188 did = vport->fc_myDID; 7189 vport->fc_myDID = Fabric_DID; 7190 7191 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm)); 7192 7193 /* Defer ACC response until AFTER we issue a FLOGI */ 7194 if (!(phba->hba_flag & HBA_FLOGI_ISSUED)) { 7195 phba->defer_flogi_acc_rx_id = cmdiocb->iocb.ulpContext; 7196 phba->defer_flogi_acc_ox_id = 7197 cmdiocb->iocb.unsli3.rcvsli3.ox_id; 7198 7199 vport->fc_myDID = did; 7200 7201 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7202 "3344 Deferring FLOGI ACC: rx_id: x%x," 7203 " ox_id: x%x, hba_flag x%x\n", 7204 phba->defer_flogi_acc_rx_id, 7205 phba->defer_flogi_acc_ox_id, phba->hba_flag); 7206 7207 phba->defer_flogi_acc_flag = true; 7208 7209 return 0; 7210 } 7211 7212 /* Send back ACC */ 7213 lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, cmdiocb, ndlp, NULL); 7214 7215 /* Now lets put fc_myDID back to what its supposed to be */ 7216 vport->fc_myDID = did; 7217 7218 return 0; 7219 } 7220 7221 /** 7222 * lpfc_els_rcv_rnid - Process an unsolicited rnid iocb 7223 * @vport: pointer to a host virtual N_Port data structure. 7224 * @cmdiocb: pointer to lpfc command iocb data structure. 7225 * @ndlp: pointer to a node-list data structure. 7226 * 7227 * This routine processes Request Node Identification Data (RNID) IOCB 7228 * received as an ELS unsolicited event. Only when the RNID specified format 7229 * 0x0 or 0xDF (Topology Discovery Specific Node Identification Data) 7230 * present, this routine will invoke the lpfc_els_rsp_rnid_acc() routine to 7231 * Accept (ACC) the RNID ELS command. All the other RNID formats are 7232 * rejected by invoking the lpfc_els_rsp_reject() routine. 7233 * 7234 * Return code 7235 * 0 - Successfully processed rnid iocb (currently always return 0) 7236 **/ 7237 static int 7238 lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7239 struct lpfc_nodelist *ndlp) 7240 { 7241 struct lpfc_dmabuf *pcmd; 7242 uint32_t *lp; 7243 RNID *rn; 7244 struct ls_rjt stat; 7245 7246 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 7247 lp = (uint32_t *) pcmd->virt; 7248 7249 lp++; 7250 rn = (RNID *) lp; 7251 7252 /* RNID received */ 7253 7254 switch (rn->Format) { 7255 case 0: 7256 case RNID_TOPOLOGY_DISC: 7257 /* Send back ACC */ 7258 lpfc_els_rsp_rnid_acc(vport, rn->Format, cmdiocb, ndlp); 7259 break; 7260 default: 7261 /* Reject this request because format not supported */ 7262 stat.un.b.lsRjtRsvd0 = 0; 7263 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 7264 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 7265 stat.un.b.vendorUnique = 0; 7266 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, 7267 NULL); 7268 } 7269 return 0; 7270 } 7271 7272 /** 7273 * lpfc_els_rcv_echo - Process an unsolicited echo iocb 7274 * @vport: pointer to a host virtual N_Port data structure. 7275 * @cmdiocb: pointer to lpfc command iocb data structure. 7276 * @ndlp: pointer to a node-list data structure. 7277 * 7278 * Return code 7279 * 0 - Successfully processed echo iocb (currently always return 0) 7280 **/ 7281 static int 7282 lpfc_els_rcv_echo(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7283 struct lpfc_nodelist *ndlp) 7284 { 7285 uint8_t *pcmd; 7286 7287 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt); 7288 7289 /* skip over first word of echo command to find echo data */ 7290 pcmd += sizeof(uint32_t); 7291 7292 lpfc_els_rsp_echo_acc(vport, pcmd, cmdiocb, ndlp); 7293 return 0; 7294 } 7295 7296 /** 7297 * lpfc_els_rcv_lirr - Process an unsolicited lirr iocb 7298 * @vport: pointer to a host virtual N_Port data structure. 7299 * @cmdiocb: pointer to lpfc command iocb data structure. 7300 * @ndlp: pointer to a node-list data structure. 7301 * 7302 * This routine processes a Link Incident Report Registration(LIRR) IOCB 7303 * received as an ELS unsolicited event. Currently, this function just invokes 7304 * the lpfc_els_rsp_reject() routine to reject the LIRR IOCB unconditionally. 7305 * 7306 * Return code 7307 * 0 - Successfully processed lirr iocb (currently always return 0) 7308 **/ 7309 static int 7310 lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7311 struct lpfc_nodelist *ndlp) 7312 { 7313 struct ls_rjt stat; 7314 7315 /* For now, unconditionally reject this command */ 7316 stat.un.b.lsRjtRsvd0 = 0; 7317 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 7318 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 7319 stat.un.b.vendorUnique = 0; 7320 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 7321 return 0; 7322 } 7323 7324 /** 7325 * lpfc_els_rcv_rrq - Process an unsolicited rrq iocb 7326 * @vport: pointer to a host virtual N_Port data structure. 7327 * @cmdiocb: pointer to lpfc command iocb data structure. 7328 * @ndlp: pointer to a node-list data structure. 7329 * 7330 * This routine processes a Reinstate Recovery Qualifier (RRQ) IOCB 7331 * received as an ELS unsolicited event. A request to RRQ shall only 7332 * be accepted if the Originator Nx_Port N_Port_ID or the Responder 7333 * Nx_Port N_Port_ID of the target Exchange is the same as the 7334 * N_Port_ID of the Nx_Port that makes the request. If the RRQ is 7335 * not accepted, an LS_RJT with reason code "Unable to perform 7336 * command request" and reason code explanation "Invalid Originator 7337 * S_ID" shall be returned. For now, we just unconditionally accept 7338 * RRQ from the target. 7339 **/ 7340 static void 7341 lpfc_els_rcv_rrq(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7342 struct lpfc_nodelist *ndlp) 7343 { 7344 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 7345 if (vport->phba->sli_rev == LPFC_SLI_REV4) 7346 lpfc_els_clear_rrq(vport, cmdiocb, ndlp); 7347 } 7348 7349 /** 7350 * lpfc_els_rsp_rls_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd 7351 * @phba: pointer to lpfc hba data structure. 7352 * @pmb: pointer to the driver internal queue element for mailbox command. 7353 * 7354 * This routine is the completion callback function for the MBX_READ_LNK_STAT 7355 * mailbox command. This callback function is to actually send the Accept 7356 * (ACC) response to a Read Port Status (RPS) unsolicited IOCB event. It 7357 * collects the link statistics from the completion of the MBX_READ_LNK_STAT 7358 * mailbox command, constructs the RPS response with the link statistics 7359 * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC 7360 * response to the RPS. 7361 * 7362 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 7363 * will be incremented by 1 for holding the ndlp and the reference to ndlp 7364 * will be stored into the context1 field of the IOCB for the completion 7365 * callback function to the RPS Accept Response ELS IOCB command. 7366 * 7367 **/ 7368 static void 7369 lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 7370 { 7371 int rc = 0; 7372 MAILBOX_t *mb; 7373 IOCB_t *icmd; 7374 struct RLS_RSP *rls_rsp; 7375 uint8_t *pcmd; 7376 struct lpfc_iocbq *elsiocb; 7377 struct lpfc_nodelist *ndlp; 7378 uint16_t oxid; 7379 uint16_t rxid; 7380 uint32_t cmdsize; 7381 7382 mb = &pmb->u.mb; 7383 7384 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; 7385 rxid = (uint16_t)((unsigned long)(pmb->ctx_buf) & 0xffff); 7386 oxid = (uint16_t)(((unsigned long)(pmb->ctx_buf) >> 16) & 0xffff); 7387 pmb->ctx_buf = NULL; 7388 pmb->ctx_ndlp = NULL; 7389 7390 if (mb->mbxStatus) { 7391 mempool_free(pmb, phba->mbox_mem_pool); 7392 return; 7393 } 7394 7395 cmdsize = sizeof(struct RLS_RSP) + sizeof(uint32_t); 7396 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 7397 lpfc_max_els_tries, ndlp, 7398 ndlp->nlp_DID, ELS_CMD_ACC); 7399 7400 /* Decrement the ndlp reference count from previous mbox command */ 7401 lpfc_nlp_put(ndlp); 7402 7403 if (!elsiocb) { 7404 mempool_free(pmb, phba->mbox_mem_pool); 7405 return; 7406 } 7407 7408 icmd = &elsiocb->iocb; 7409 icmd->ulpContext = rxid; 7410 icmd->unsli3.rcvsli3.ox_id = oxid; 7411 7412 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 7413 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 7414 pcmd += sizeof(uint32_t); /* Skip past command */ 7415 rls_rsp = (struct RLS_RSP *)pcmd; 7416 7417 rls_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt); 7418 rls_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt); 7419 rls_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt); 7420 rls_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt); 7421 rls_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord); 7422 rls_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt); 7423 mempool_free(pmb, phba->mbox_mem_pool); 7424 /* Xmit ELS RLS ACC response tag <ulpIoTag> */ 7425 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS, 7426 "2874 Xmit ELS RLS ACC response tag x%x xri x%x, " 7427 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", 7428 elsiocb->iotag, elsiocb->iocb.ulpContext, 7429 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 7430 ndlp->nlp_rpi); 7431 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 7432 phba->fc_stat.elsXmitACC++; 7433 elsiocb->context1 = lpfc_nlp_get(ndlp); 7434 if (!elsiocb->context1) 7435 goto node_err; 7436 7437 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 7438 if (rc == IOCB_ERROR) 7439 goto io_err; 7440 return; 7441 7442 io_err: 7443 lpfc_nlp_put(ndlp); 7444 node_err: 7445 lpfc_els_free_iocb(phba, elsiocb); 7446 } 7447 7448 /** 7449 * lpfc_els_rcv_rls - Process an unsolicited rls iocb 7450 * @vport: pointer to a host virtual N_Port data structure. 7451 * @cmdiocb: pointer to lpfc command iocb data structure. 7452 * @ndlp: pointer to a node-list data structure. 7453 * 7454 * This routine processes Read Link Status (RLS) IOCB received as an 7455 * ELS unsolicited event. It first checks the remote port state. If the 7456 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE 7457 * state, it invokes the lpfc_els_rsl_reject() routine to send the reject 7458 * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command 7459 * for reading the HBA link statistics. It is for the callback function, 7460 * lpfc_els_rsp_rls_acc(), set to the MBX_READ_LNK_STAT mailbox command 7461 * to actually sending out RPL Accept (ACC) response. 7462 * 7463 * Return codes 7464 * 0 - Successfully processed rls iocb (currently always return 0) 7465 **/ 7466 static int 7467 lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7468 struct lpfc_nodelist *ndlp) 7469 { 7470 struct lpfc_hba *phba = vport->phba; 7471 LPFC_MBOXQ_t *mbox; 7472 struct ls_rjt stat; 7473 7474 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 7475 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) 7476 /* reject the unsolicited RLS request and done with it */ 7477 goto reject_out; 7478 7479 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC); 7480 if (mbox) { 7481 lpfc_read_lnk_stat(phba, mbox); 7482 mbox->ctx_buf = (void *)((unsigned long) 7483 ((cmdiocb->iocb.unsli3.rcvsli3.ox_id << 16) | 7484 cmdiocb->iocb.ulpContext)); /* rx_id */ 7485 mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 7486 if (!mbox->ctx_ndlp) 7487 goto node_err; 7488 mbox->vport = vport; 7489 mbox->mbox_cmpl = lpfc_els_rsp_rls_acc; 7490 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 7491 != MBX_NOT_FINISHED) 7492 /* Mbox completion will send ELS Response */ 7493 return 0; 7494 /* Decrement reference count used for the failed mbox 7495 * command. 7496 */ 7497 lpfc_nlp_put(ndlp); 7498 node_err: 7499 mempool_free(mbox, phba->mbox_mem_pool); 7500 } 7501 reject_out: 7502 /* issue rejection response */ 7503 stat.un.b.lsRjtRsvd0 = 0; 7504 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 7505 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 7506 stat.un.b.vendorUnique = 0; 7507 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 7508 return 0; 7509 } 7510 7511 /** 7512 * lpfc_els_rcv_rtv - Process an unsolicited rtv iocb 7513 * @vport: pointer to a host virtual N_Port data structure. 7514 * @cmdiocb: pointer to lpfc command iocb data structure. 7515 * @ndlp: pointer to a node-list data structure. 7516 * 7517 * This routine processes Read Timout Value (RTV) IOCB received as an 7518 * ELS unsolicited event. It first checks the remote port state. If the 7519 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE 7520 * state, it invokes the lpfc_els_rsl_reject() routine to send the reject 7521 * response. Otherwise, it sends the Accept(ACC) response to a Read Timeout 7522 * Value (RTV) unsolicited IOCB event. 7523 * 7524 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 7525 * will be incremented by 1 for holding the ndlp and the reference to ndlp 7526 * will be stored into the context1 field of the IOCB for the completion 7527 * callback function to the RTV Accept Response ELS IOCB command. 7528 * 7529 * Return codes 7530 * 0 - Successfully processed rtv iocb (currently always return 0) 7531 **/ 7532 static int 7533 lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7534 struct lpfc_nodelist *ndlp) 7535 { 7536 int rc = 0; 7537 struct lpfc_hba *phba = vport->phba; 7538 struct ls_rjt stat; 7539 struct RTV_RSP *rtv_rsp; 7540 uint8_t *pcmd; 7541 struct lpfc_iocbq *elsiocb; 7542 uint32_t cmdsize; 7543 7544 7545 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 7546 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) 7547 /* reject the unsolicited RTV request and done with it */ 7548 goto reject_out; 7549 7550 cmdsize = sizeof(struct RTV_RSP) + sizeof(uint32_t); 7551 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 7552 lpfc_max_els_tries, ndlp, 7553 ndlp->nlp_DID, ELS_CMD_ACC); 7554 7555 if (!elsiocb) 7556 return 1; 7557 7558 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 7559 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 7560 pcmd += sizeof(uint32_t); /* Skip past command */ 7561 7562 /* use the command's xri in the response */ 7563 elsiocb->iocb.ulpContext = cmdiocb->iocb.ulpContext; /* Xri / rx_id */ 7564 elsiocb->iocb.unsli3.rcvsli3.ox_id = cmdiocb->iocb.unsli3.rcvsli3.ox_id; 7565 7566 rtv_rsp = (struct RTV_RSP *)pcmd; 7567 7568 /* populate RTV payload */ 7569 rtv_rsp->ratov = cpu_to_be32(phba->fc_ratov * 1000); /* report msecs */ 7570 rtv_rsp->edtov = cpu_to_be32(phba->fc_edtov); 7571 bf_set(qtov_edtovres, rtv_rsp, phba->fc_edtovResol ? 1 : 0); 7572 bf_set(qtov_rttov, rtv_rsp, 0); /* Field is for FC ONLY */ 7573 rtv_rsp->qtov = cpu_to_be32(rtv_rsp->qtov); 7574 7575 /* Xmit ELS RLS ACC response tag <ulpIoTag> */ 7576 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS, 7577 "2875 Xmit ELS RTV ACC response tag x%x xri x%x, " 7578 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x, " 7579 "Data: x%x x%x x%x\n", 7580 elsiocb->iotag, elsiocb->iocb.ulpContext, 7581 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 7582 ndlp->nlp_rpi, 7583 rtv_rsp->ratov, rtv_rsp->edtov, rtv_rsp->qtov); 7584 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 7585 phba->fc_stat.elsXmitACC++; 7586 elsiocb->context1 = lpfc_nlp_get(ndlp); 7587 if (!elsiocb->context1) { 7588 lpfc_els_free_iocb(phba, elsiocb); 7589 return 0; 7590 } 7591 7592 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 7593 if (rc == IOCB_ERROR) { 7594 lpfc_nlp_put(ndlp); 7595 lpfc_els_free_iocb(phba, elsiocb); 7596 } 7597 return 0; 7598 7599 reject_out: 7600 /* issue rejection response */ 7601 stat.un.b.lsRjtRsvd0 = 0; 7602 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 7603 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 7604 stat.un.b.vendorUnique = 0; 7605 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 7606 return 0; 7607 } 7608 7609 /* lpfc_issue_els_rrq - Process an unsolicited rrq iocb 7610 * @vport: pointer to a host virtual N_Port data structure. 7611 * @ndlp: pointer to a node-list data structure. 7612 * @did: DID of the target. 7613 * @rrq: Pointer to the rrq struct. 7614 * 7615 * Build a ELS RRQ command and send it to the target. If the issue_iocb is 7616 * Successful the the completion handler will clear the RRQ. 7617 * 7618 * Return codes 7619 * 0 - Successfully sent rrq els iocb. 7620 * 1 - Failed to send rrq els iocb. 7621 **/ 7622 static int 7623 lpfc_issue_els_rrq(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 7624 uint32_t did, struct lpfc_node_rrq *rrq) 7625 { 7626 struct lpfc_hba *phba = vport->phba; 7627 struct RRQ *els_rrq; 7628 struct lpfc_iocbq *elsiocb; 7629 uint8_t *pcmd; 7630 uint16_t cmdsize; 7631 int ret; 7632 7633 7634 if (ndlp != rrq->ndlp) 7635 ndlp = rrq->ndlp; 7636 if (!ndlp) 7637 return 1; 7638 7639 /* If ndlp is not NULL, we will bump the reference count on it */ 7640 cmdsize = (sizeof(uint32_t) + sizeof(struct RRQ)); 7641 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, did, 7642 ELS_CMD_RRQ); 7643 if (!elsiocb) 7644 return 1; 7645 7646 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 7647 7648 /* For RRQ request, remainder of payload is Exchange IDs */ 7649 *((uint32_t *) (pcmd)) = ELS_CMD_RRQ; 7650 pcmd += sizeof(uint32_t); 7651 els_rrq = (struct RRQ *) pcmd; 7652 7653 bf_set(rrq_oxid, els_rrq, phba->sli4_hba.xri_ids[rrq->xritag]); 7654 bf_set(rrq_rxid, els_rrq, rrq->rxid); 7655 bf_set(rrq_did, els_rrq, vport->fc_myDID); 7656 els_rrq->rrq = cpu_to_be32(els_rrq->rrq); 7657 els_rrq->rrq_exchg = cpu_to_be32(els_rrq->rrq_exchg); 7658 7659 7660 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 7661 "Issue RRQ: did:x%x", 7662 did, rrq->xritag, rrq->rxid); 7663 elsiocb->context_un.rrq = rrq; 7664 elsiocb->iocb_cmpl = lpfc_cmpl_els_rrq; 7665 elsiocb->context1 = lpfc_nlp_get(ndlp); 7666 if (!elsiocb->context1) 7667 goto node_err; 7668 7669 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 7670 if (ret == IOCB_ERROR) 7671 goto io_err; 7672 return 0; 7673 7674 io_err: 7675 lpfc_nlp_put(ndlp); 7676 node_err: 7677 lpfc_els_free_iocb(phba, elsiocb); 7678 return 1; 7679 } 7680 7681 /** 7682 * lpfc_send_rrq - Sends ELS RRQ if needed. 7683 * @phba: pointer to lpfc hba data structure. 7684 * @rrq: pointer to the active rrq. 7685 * 7686 * This routine will call the lpfc_issue_els_rrq if the rrq is 7687 * still active for the xri. If this function returns a failure then 7688 * the caller needs to clean up the RRQ by calling lpfc_clr_active_rrq. 7689 * 7690 * Returns 0 Success. 7691 * 1 Failure. 7692 **/ 7693 int 7694 lpfc_send_rrq(struct lpfc_hba *phba, struct lpfc_node_rrq *rrq) 7695 { 7696 struct lpfc_nodelist *ndlp = lpfc_findnode_did(rrq->vport, 7697 rrq->nlp_DID); 7698 if (!ndlp) 7699 return 1; 7700 7701 if (lpfc_test_rrq_active(phba, ndlp, rrq->xritag)) 7702 return lpfc_issue_els_rrq(rrq->vport, ndlp, 7703 rrq->nlp_DID, rrq); 7704 else 7705 return 1; 7706 } 7707 7708 /** 7709 * lpfc_els_rsp_rpl_acc - Issue an accept rpl els command 7710 * @vport: pointer to a host virtual N_Port data structure. 7711 * @cmdsize: size of the ELS command. 7712 * @oldiocb: pointer to the original lpfc command iocb data structure. 7713 * @ndlp: pointer to a node-list data structure. 7714 * 7715 * This routine issuees an Accept (ACC) Read Port List (RPL) ELS command. 7716 * It is to be called by the lpfc_els_rcv_rpl() routine to accept the RPL. 7717 * 7718 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 7719 * will be incremented by 1 for holding the ndlp and the reference to ndlp 7720 * will be stored into the context1 field of the IOCB for the completion 7721 * callback function to the RPL Accept Response ELS command. 7722 * 7723 * Return code 7724 * 0 - Successfully issued ACC RPL ELS command 7725 * 1 - Failed to issue ACC RPL ELS command 7726 **/ 7727 static int 7728 lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize, 7729 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) 7730 { 7731 int rc = 0; 7732 struct lpfc_hba *phba = vport->phba; 7733 IOCB_t *icmd, *oldcmd; 7734 RPL_RSP rpl_rsp; 7735 struct lpfc_iocbq *elsiocb; 7736 uint8_t *pcmd; 7737 7738 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 7739 ndlp->nlp_DID, ELS_CMD_ACC); 7740 7741 if (!elsiocb) 7742 return 1; 7743 7744 icmd = &elsiocb->iocb; 7745 oldcmd = &oldiocb->iocb; 7746 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 7747 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 7748 7749 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 7750 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 7751 pcmd += sizeof(uint16_t); 7752 *((uint16_t *)(pcmd)) = be16_to_cpu(cmdsize); 7753 pcmd += sizeof(uint16_t); 7754 7755 /* Setup the RPL ACC payload */ 7756 rpl_rsp.listLen = be32_to_cpu(1); 7757 rpl_rsp.index = 0; 7758 rpl_rsp.port_num_blk.portNum = 0; 7759 rpl_rsp.port_num_blk.portID = be32_to_cpu(vport->fc_myDID); 7760 memcpy(&rpl_rsp.port_num_blk.portName, &vport->fc_portname, 7761 sizeof(struct lpfc_name)); 7762 memcpy(pcmd, &rpl_rsp, cmdsize - sizeof(uint32_t)); 7763 /* Xmit ELS RPL ACC response tag <ulpIoTag> */ 7764 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7765 "0120 Xmit ELS RPL ACC response tag x%x " 7766 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, " 7767 "rpi x%x\n", 7768 elsiocb->iotag, elsiocb->iocb.ulpContext, 7769 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 7770 ndlp->nlp_rpi); 7771 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 7772 phba->fc_stat.elsXmitACC++; 7773 elsiocb->context1 = lpfc_nlp_get(ndlp); 7774 if (!elsiocb->context1) 7775 goto node_err; 7776 7777 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 7778 if (rc == IOCB_ERROR) 7779 goto io_err; 7780 return 0; 7781 7782 io_err: 7783 lpfc_nlp_put(ndlp); 7784 node_err: 7785 lpfc_els_free_iocb(phba, elsiocb); 7786 return 1; 7787 } 7788 7789 /** 7790 * lpfc_els_rcv_rpl - Process an unsolicited rpl iocb 7791 * @vport: pointer to a host virtual N_Port data structure. 7792 * @cmdiocb: pointer to lpfc command iocb data structure. 7793 * @ndlp: pointer to a node-list data structure. 7794 * 7795 * This routine processes Read Port List (RPL) IOCB received as an ELS 7796 * unsolicited event. It first checks the remote port state. If the remote 7797 * port is not in NLP_STE_UNMAPPED_NODE and NLP_STE_MAPPED_NODE states, it 7798 * invokes the lpfc_els_rsp_reject() routine to send reject response. 7799 * Otherwise, this routine then invokes the lpfc_els_rsp_rpl_acc() routine 7800 * to accept the RPL. 7801 * 7802 * Return code 7803 * 0 - Successfully processed rpl iocb (currently always return 0) 7804 **/ 7805 static int 7806 lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7807 struct lpfc_nodelist *ndlp) 7808 { 7809 struct lpfc_dmabuf *pcmd; 7810 uint32_t *lp; 7811 uint32_t maxsize; 7812 uint16_t cmdsize; 7813 RPL *rpl; 7814 struct ls_rjt stat; 7815 7816 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 7817 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) { 7818 /* issue rejection response */ 7819 stat.un.b.lsRjtRsvd0 = 0; 7820 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 7821 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 7822 stat.un.b.vendorUnique = 0; 7823 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, 7824 NULL); 7825 /* rejected the unsolicited RPL request and done with it */ 7826 return 0; 7827 } 7828 7829 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 7830 lp = (uint32_t *) pcmd->virt; 7831 rpl = (RPL *) (lp + 1); 7832 maxsize = be32_to_cpu(rpl->maxsize); 7833 7834 /* We support only one port */ 7835 if ((rpl->index == 0) && 7836 ((maxsize == 0) || 7837 ((maxsize * sizeof(uint32_t)) >= sizeof(RPL_RSP)))) { 7838 cmdsize = sizeof(uint32_t) + sizeof(RPL_RSP); 7839 } else { 7840 cmdsize = sizeof(uint32_t) + maxsize * sizeof(uint32_t); 7841 } 7842 lpfc_els_rsp_rpl_acc(vport, cmdsize, cmdiocb, ndlp); 7843 7844 return 0; 7845 } 7846 7847 /** 7848 * lpfc_els_rcv_farp - Process an unsolicited farp request els command 7849 * @vport: pointer to a virtual N_Port data structure. 7850 * @cmdiocb: pointer to lpfc command iocb data structure. 7851 * @ndlp: pointer to a node-list data structure. 7852 * 7853 * This routine processes Fibre Channel Address Resolution Protocol 7854 * (FARP) Request IOCB received as an ELS unsolicited event. Currently, 7855 * the lpfc driver only supports matching on WWPN or WWNN for FARP. As such, 7856 * FARP_MATCH_PORT flag and FARP_MATCH_NODE flag are checked against the 7857 * Match Flag in the FARP request IOCB: if FARP_MATCH_PORT flag is set, the 7858 * remote PortName is compared against the FC PortName stored in the @vport 7859 * data structure; if FARP_MATCH_NODE flag is set, the remote NodeName is 7860 * compared against the FC NodeName stored in the @vport data structure. 7861 * If any of these matches and the FARP_REQUEST_FARPR flag is set in the 7862 * FARP request IOCB Response Flag, the lpfc_issue_els_farpr() routine is 7863 * invoked to send out FARP Response to the remote node. Before sending the 7864 * FARP Response, however, the FARP_REQUEST_PLOGI flag is check in the FARP 7865 * request IOCB Response Flag and, if it is set, the lpfc_issue_els_plogi() 7866 * routine is invoked to log into the remote port first. 7867 * 7868 * Return code 7869 * 0 - Either the FARP Match Mode not supported or successfully processed 7870 **/ 7871 static int 7872 lpfc_els_rcv_farp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7873 struct lpfc_nodelist *ndlp) 7874 { 7875 struct lpfc_dmabuf *pcmd; 7876 uint32_t *lp; 7877 IOCB_t *icmd; 7878 FARP *fp; 7879 uint32_t cnt, did; 7880 7881 icmd = &cmdiocb->iocb; 7882 did = icmd->un.elsreq64.remoteID; 7883 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 7884 lp = (uint32_t *) pcmd->virt; 7885 7886 lp++; 7887 fp = (FARP *) lp; 7888 /* FARP-REQ received from DID <did> */ 7889 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7890 "0601 FARP-REQ received from DID x%x\n", did); 7891 /* We will only support match on WWPN or WWNN */ 7892 if (fp->Mflags & ~(FARP_MATCH_NODE | FARP_MATCH_PORT)) { 7893 return 0; 7894 } 7895 7896 cnt = 0; 7897 /* If this FARP command is searching for my portname */ 7898 if (fp->Mflags & FARP_MATCH_PORT) { 7899 if (memcmp(&fp->RportName, &vport->fc_portname, 7900 sizeof(struct lpfc_name)) == 0) 7901 cnt = 1; 7902 } 7903 7904 /* If this FARP command is searching for my nodename */ 7905 if (fp->Mflags & FARP_MATCH_NODE) { 7906 if (memcmp(&fp->RnodeName, &vport->fc_nodename, 7907 sizeof(struct lpfc_name)) == 0) 7908 cnt = 1; 7909 } 7910 7911 if (cnt) { 7912 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) || 7913 (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) { 7914 /* Log back into the node before sending the FARP. */ 7915 if (fp->Rflags & FARP_REQUEST_PLOGI) { 7916 ndlp->nlp_prev_state = ndlp->nlp_state; 7917 lpfc_nlp_set_state(vport, ndlp, 7918 NLP_STE_PLOGI_ISSUE); 7919 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 7920 } 7921 7922 /* Send a FARP response to that node */ 7923 if (fp->Rflags & FARP_REQUEST_FARPR) 7924 lpfc_issue_els_farpr(vport, did, 0); 7925 } 7926 } 7927 return 0; 7928 } 7929 7930 /** 7931 * lpfc_els_rcv_farpr - Process an unsolicited farp response iocb 7932 * @vport: pointer to a host virtual N_Port data structure. 7933 * @cmdiocb: pointer to lpfc command iocb data structure. 7934 * @ndlp: pointer to a node-list data structure. 7935 * 7936 * This routine processes Fibre Channel Address Resolution Protocol 7937 * Response (FARPR) IOCB received as an ELS unsolicited event. It simply 7938 * invokes the lpfc_els_rsp_acc() routine to the remote node to accept 7939 * the FARP response request. 7940 * 7941 * Return code 7942 * 0 - Successfully processed FARPR IOCB (currently always return 0) 7943 **/ 7944 static int 7945 lpfc_els_rcv_farpr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7946 struct lpfc_nodelist *ndlp) 7947 { 7948 struct lpfc_dmabuf *pcmd; 7949 uint32_t *lp; 7950 IOCB_t *icmd; 7951 uint32_t did; 7952 7953 icmd = &cmdiocb->iocb; 7954 did = icmd->un.elsreq64.remoteID; 7955 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 7956 lp = (uint32_t *) pcmd->virt; 7957 7958 lp++; 7959 /* FARP-RSP received from DID <did> */ 7960 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7961 "0600 FARP-RSP received from DID x%x\n", did); 7962 /* ACCEPT the Farp resp request */ 7963 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 7964 7965 return 0; 7966 } 7967 7968 /** 7969 * lpfc_els_rcv_fan - Process an unsolicited fan iocb command 7970 * @vport: pointer to a host virtual N_Port data structure. 7971 * @cmdiocb: pointer to lpfc command iocb data structure. 7972 * @fan_ndlp: pointer to a node-list data structure. 7973 * 7974 * This routine processes a Fabric Address Notification (FAN) IOCB 7975 * command received as an ELS unsolicited event. The FAN ELS command will 7976 * only be processed on a physical port (i.e., the @vport represents the 7977 * physical port). The fabric NodeName and PortName from the FAN IOCB are 7978 * compared against those in the phba data structure. If any of those is 7979 * different, the lpfc_initial_flogi() routine is invoked to initialize 7980 * Fabric Login (FLOGI) to the fabric to start the discover over. Otherwise, 7981 * if both of those are identical, the lpfc_issue_fabric_reglogin() routine 7982 * is invoked to register login to the fabric. 7983 * 7984 * Return code 7985 * 0 - Successfully processed fan iocb (currently always return 0). 7986 **/ 7987 static int 7988 lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7989 struct lpfc_nodelist *fan_ndlp) 7990 { 7991 struct lpfc_hba *phba = vport->phba; 7992 uint32_t *lp; 7993 FAN *fp; 7994 7995 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0265 FAN received\n"); 7996 lp = (uint32_t *)((struct lpfc_dmabuf *)cmdiocb->context2)->virt; 7997 fp = (FAN *) ++lp; 7998 /* FAN received; Fan does not have a reply sequence */ 7999 if ((vport == phba->pport) && 8000 (vport->port_state == LPFC_LOCAL_CFG_LINK)) { 8001 if ((memcmp(&phba->fc_fabparam.nodeName, &fp->FnodeName, 8002 sizeof(struct lpfc_name))) || 8003 (memcmp(&phba->fc_fabparam.portName, &fp->FportName, 8004 sizeof(struct lpfc_name)))) { 8005 /* This port has switched fabrics. FLOGI is required */ 8006 lpfc_issue_init_vfi(vport); 8007 } else { 8008 /* FAN verified - skip FLOGI */ 8009 vport->fc_myDID = vport->fc_prevDID; 8010 if (phba->sli_rev < LPFC_SLI_REV4) 8011 lpfc_issue_fabric_reglogin(vport); 8012 else { 8013 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 8014 "3138 Need register VFI: (x%x/%x)\n", 8015 vport->fc_prevDID, vport->fc_myDID); 8016 lpfc_issue_reg_vfi(vport); 8017 } 8018 } 8019 } 8020 return 0; 8021 } 8022 8023 /** 8024 * lpfc_els_timeout - Handler funciton to the els timer 8025 * @t: timer context used to obtain the vport. 8026 * 8027 * This routine is invoked by the ELS timer after timeout. It posts the ELS 8028 * timer timeout event by setting the WORKER_ELS_TMO bit to the work port 8029 * event bitmap and then invokes the lpfc_worker_wake_up() routine to wake 8030 * up the worker thread. It is for the worker thread to invoke the routine 8031 * lpfc_els_timeout_handler() to work on the posted event WORKER_ELS_TMO. 8032 **/ 8033 void 8034 lpfc_els_timeout(struct timer_list *t) 8035 { 8036 struct lpfc_vport *vport = from_timer(vport, t, els_tmofunc); 8037 struct lpfc_hba *phba = vport->phba; 8038 uint32_t tmo_posted; 8039 unsigned long iflag; 8040 8041 spin_lock_irqsave(&vport->work_port_lock, iflag); 8042 tmo_posted = vport->work_port_events & WORKER_ELS_TMO; 8043 if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING))) 8044 vport->work_port_events |= WORKER_ELS_TMO; 8045 spin_unlock_irqrestore(&vport->work_port_lock, iflag); 8046 8047 if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING))) 8048 lpfc_worker_wake_up(phba); 8049 return; 8050 } 8051 8052 8053 /** 8054 * lpfc_els_timeout_handler - Process an els timeout event 8055 * @vport: pointer to a virtual N_Port data structure. 8056 * 8057 * This routine is the actual handler function that processes an ELS timeout 8058 * event. It walks the ELS ring to get and abort all the IOCBs (except the 8059 * ABORT/CLOSE/FARP/FARPR/FDISC), which are associated with the @vport by 8060 * invoking the lpfc_sli_issue_abort_iotag() routine. 8061 **/ 8062 void 8063 lpfc_els_timeout_handler(struct lpfc_vport *vport) 8064 { 8065 struct lpfc_hba *phba = vport->phba; 8066 struct lpfc_sli_ring *pring; 8067 struct lpfc_iocbq *tmp_iocb, *piocb; 8068 IOCB_t *cmd = NULL; 8069 struct lpfc_dmabuf *pcmd; 8070 uint32_t els_command = 0; 8071 uint32_t timeout; 8072 uint32_t remote_ID = 0xffffffff; 8073 LIST_HEAD(abort_list); 8074 8075 8076 timeout = (uint32_t)(phba->fc_ratov << 1); 8077 8078 pring = lpfc_phba_elsring(phba); 8079 if (unlikely(!pring)) 8080 return; 8081 8082 if (phba->pport->load_flag & FC_UNLOADING) 8083 return; 8084 8085 spin_lock_irq(&phba->hbalock); 8086 if (phba->sli_rev == LPFC_SLI_REV4) 8087 spin_lock(&pring->ring_lock); 8088 8089 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { 8090 cmd = &piocb->iocb; 8091 8092 if ((piocb->iocb_flag & LPFC_IO_LIBDFC) != 0 || 8093 piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN || 8094 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN) 8095 continue; 8096 8097 if (piocb->vport != vport) 8098 continue; 8099 8100 pcmd = (struct lpfc_dmabuf *) piocb->context2; 8101 if (pcmd) 8102 els_command = *(uint32_t *) (pcmd->virt); 8103 8104 if (els_command == ELS_CMD_FARP || 8105 els_command == ELS_CMD_FARPR || 8106 els_command == ELS_CMD_FDISC) 8107 continue; 8108 8109 if (piocb->drvrTimeout > 0) { 8110 if (piocb->drvrTimeout >= timeout) 8111 piocb->drvrTimeout -= timeout; 8112 else 8113 piocb->drvrTimeout = 0; 8114 continue; 8115 } 8116 8117 remote_ID = 0xffffffff; 8118 if (cmd->ulpCommand != CMD_GEN_REQUEST64_CR) 8119 remote_ID = cmd->un.elsreq64.remoteID; 8120 else { 8121 struct lpfc_nodelist *ndlp; 8122 ndlp = __lpfc_findnode_rpi(vport, cmd->ulpContext); 8123 if (ndlp) 8124 remote_ID = ndlp->nlp_DID; 8125 } 8126 list_add_tail(&piocb->dlist, &abort_list); 8127 } 8128 if (phba->sli_rev == LPFC_SLI_REV4) 8129 spin_unlock(&pring->ring_lock); 8130 spin_unlock_irq(&phba->hbalock); 8131 8132 list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) { 8133 cmd = &piocb->iocb; 8134 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 8135 "0127 ELS timeout Data: x%x x%x x%x " 8136 "x%x\n", els_command, 8137 remote_ID, cmd->ulpCommand, cmd->ulpIoTag); 8138 spin_lock_irq(&phba->hbalock); 8139 list_del_init(&piocb->dlist); 8140 lpfc_sli_issue_abort_iotag(phba, pring, piocb, NULL); 8141 spin_unlock_irq(&phba->hbalock); 8142 } 8143 8144 if (!list_empty(&pring->txcmplq)) 8145 if (!(phba->pport->load_flag & FC_UNLOADING)) 8146 mod_timer(&vport->els_tmofunc, 8147 jiffies + msecs_to_jiffies(1000 * timeout)); 8148 } 8149 8150 /** 8151 * lpfc_els_flush_cmd - Clean up the outstanding els commands to a vport 8152 * @vport: pointer to a host virtual N_Port data structure. 8153 * 8154 * This routine is used to clean up all the outstanding ELS commands on a 8155 * @vport. It first aborts the @vport by invoking lpfc_fabric_abort_vport() 8156 * routine. After that, it walks the ELS transmit queue to remove all the 8157 * IOCBs with the @vport other than the QUE_RING and ABORT/CLOSE IOCBs. For 8158 * the IOCBs with a non-NULL completion callback function, the callback 8159 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and 8160 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs with a NULL completion 8161 * callback function, the IOCB will simply be released. Finally, it walks 8162 * the ELS transmit completion queue to issue an abort IOCB to any transmit 8163 * completion queue IOCB that is associated with the @vport and is not 8164 * an IOCB from libdfc (i.e., the management plane IOCBs that are not 8165 * part of the discovery state machine) out to HBA by invoking the 8166 * lpfc_sli_issue_abort_iotag() routine. Note that this function issues the 8167 * abort IOCB to any transmit completion queueed IOCB, it does not guarantee 8168 * the IOCBs are aborted when this function returns. 8169 **/ 8170 void 8171 lpfc_els_flush_cmd(struct lpfc_vport *vport) 8172 { 8173 LIST_HEAD(abort_list); 8174 struct lpfc_hba *phba = vport->phba; 8175 struct lpfc_sli_ring *pring; 8176 struct lpfc_iocbq *tmp_iocb, *piocb; 8177 IOCB_t *cmd = NULL; 8178 unsigned long iflags = 0; 8179 8180 lpfc_fabric_abort_vport(vport); 8181 8182 /* 8183 * For SLI3, only the hbalock is required. But SLI4 needs to coordinate 8184 * with the ring insert operation. Because lpfc_sli_issue_abort_iotag 8185 * ultimately grabs the ring_lock, the driver must splice the list into 8186 * a working list and release the locks before calling the abort. 8187 */ 8188 spin_lock_irqsave(&phba->hbalock, iflags); 8189 pring = lpfc_phba_elsring(phba); 8190 8191 /* Bail out if we've no ELS wq, like in PCI error recovery case. */ 8192 if (unlikely(!pring)) { 8193 spin_unlock_irqrestore(&phba->hbalock, iflags); 8194 return; 8195 } 8196 8197 if (phba->sli_rev == LPFC_SLI_REV4) 8198 spin_lock(&pring->ring_lock); 8199 8200 /* First we need to issue aborts to outstanding cmds on txcmpl */ 8201 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { 8202 if (piocb->iocb_flag & LPFC_IO_LIBDFC) 8203 continue; 8204 8205 if (piocb->vport != vport) 8206 continue; 8207 8208 if (piocb->iocb_flag & LPFC_DRIVER_ABORTED) 8209 continue; 8210 8211 /* On the ELS ring we can have ELS_REQUESTs or 8212 * GEN_REQUESTs waiting for a response. 8213 */ 8214 cmd = &piocb->iocb; 8215 if (cmd->ulpCommand == CMD_ELS_REQUEST64_CR) { 8216 list_add_tail(&piocb->dlist, &abort_list); 8217 8218 /* If the link is down when flushing ELS commands 8219 * the firmware will not complete them till after 8220 * the link comes back up. This may confuse 8221 * discovery for the new link up, so we need to 8222 * change the compl routine to just clean up the iocb 8223 * and avoid any retry logic. 8224 */ 8225 if (phba->link_state == LPFC_LINK_DOWN) 8226 piocb->iocb_cmpl = lpfc_cmpl_els_link_down; 8227 } 8228 if (cmd->ulpCommand == CMD_GEN_REQUEST64_CR) 8229 list_add_tail(&piocb->dlist, &abort_list); 8230 } 8231 8232 if (phba->sli_rev == LPFC_SLI_REV4) 8233 spin_unlock(&pring->ring_lock); 8234 spin_unlock_irqrestore(&phba->hbalock, iflags); 8235 8236 /* Abort each txcmpl iocb on aborted list and remove the dlist links. */ 8237 list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) { 8238 spin_lock_irqsave(&phba->hbalock, iflags); 8239 list_del_init(&piocb->dlist); 8240 lpfc_sli_issue_abort_iotag(phba, pring, piocb, NULL); 8241 spin_unlock_irqrestore(&phba->hbalock, iflags); 8242 } 8243 if (!list_empty(&abort_list)) 8244 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 8245 "3387 abort list for txq not empty\n"); 8246 INIT_LIST_HEAD(&abort_list); 8247 8248 spin_lock_irqsave(&phba->hbalock, iflags); 8249 if (phba->sli_rev == LPFC_SLI_REV4) 8250 spin_lock(&pring->ring_lock); 8251 8252 /* No need to abort the txq list, 8253 * just queue them up for lpfc_sli_cancel_iocbs 8254 */ 8255 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) { 8256 cmd = &piocb->iocb; 8257 8258 if (piocb->iocb_flag & LPFC_IO_LIBDFC) { 8259 continue; 8260 } 8261 8262 /* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */ 8263 if (cmd->ulpCommand == CMD_QUE_RING_BUF_CN || 8264 cmd->ulpCommand == CMD_QUE_RING_BUF64_CN || 8265 cmd->ulpCommand == CMD_CLOSE_XRI_CN || 8266 cmd->ulpCommand == CMD_ABORT_XRI_CN) 8267 continue; 8268 8269 if (piocb->vport != vport) 8270 continue; 8271 8272 list_del_init(&piocb->list); 8273 list_add_tail(&piocb->list, &abort_list); 8274 } 8275 8276 /* The same holds true for any FLOGI/FDISC on the fabric_iocb_list */ 8277 if (vport == phba->pport) { 8278 list_for_each_entry_safe(piocb, tmp_iocb, 8279 &phba->fabric_iocb_list, list) { 8280 cmd = &piocb->iocb; 8281 list_del_init(&piocb->list); 8282 list_add_tail(&piocb->list, &abort_list); 8283 } 8284 } 8285 8286 if (phba->sli_rev == LPFC_SLI_REV4) 8287 spin_unlock(&pring->ring_lock); 8288 spin_unlock_irqrestore(&phba->hbalock, iflags); 8289 8290 /* Cancel all the IOCBs from the completions list */ 8291 lpfc_sli_cancel_iocbs(phba, &abort_list, 8292 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); 8293 8294 return; 8295 } 8296 8297 /** 8298 * lpfc_els_flush_all_cmd - Clean up all the outstanding els commands to a HBA 8299 * @phba: pointer to lpfc hba data structure. 8300 * 8301 * This routine is used to clean up all the outstanding ELS commands on a 8302 * @phba. It first aborts the @phba by invoking the lpfc_fabric_abort_hba() 8303 * routine. After that, it walks the ELS transmit queue to remove all the 8304 * IOCBs to the @phba other than the QUE_RING and ABORT/CLOSE IOCBs. For 8305 * the IOCBs with the completion callback function associated, the callback 8306 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and 8307 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs without the completion 8308 * callback function associated, the IOCB will simply be released. Finally, 8309 * it walks the ELS transmit completion queue to issue an abort IOCB to any 8310 * transmit completion queue IOCB that is not an IOCB from libdfc (i.e., the 8311 * management plane IOCBs that are not part of the discovery state machine) 8312 * out to HBA by invoking the lpfc_sli_issue_abort_iotag() routine. 8313 **/ 8314 void 8315 lpfc_els_flush_all_cmd(struct lpfc_hba *phba) 8316 { 8317 struct lpfc_vport *vport; 8318 8319 spin_lock_irq(&phba->port_list_lock); 8320 list_for_each_entry(vport, &phba->port_list, listentry) 8321 lpfc_els_flush_cmd(vport); 8322 spin_unlock_irq(&phba->port_list_lock); 8323 8324 return; 8325 } 8326 8327 /** 8328 * lpfc_send_els_failure_event - Posts an ELS command failure event 8329 * @phba: Pointer to hba context object. 8330 * @cmdiocbp: Pointer to command iocb which reported error. 8331 * @rspiocbp: Pointer to response iocb which reported error. 8332 * 8333 * This function sends an event when there is an ELS command 8334 * failure. 8335 **/ 8336 void 8337 lpfc_send_els_failure_event(struct lpfc_hba *phba, 8338 struct lpfc_iocbq *cmdiocbp, 8339 struct lpfc_iocbq *rspiocbp) 8340 { 8341 struct lpfc_vport *vport = cmdiocbp->vport; 8342 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 8343 struct lpfc_lsrjt_event lsrjt_event; 8344 struct lpfc_fabric_event_header fabric_event; 8345 struct ls_rjt stat; 8346 struct lpfc_nodelist *ndlp; 8347 uint32_t *pcmd; 8348 8349 ndlp = cmdiocbp->context1; 8350 if (!ndlp) 8351 return; 8352 8353 if (rspiocbp->iocb.ulpStatus == IOSTAT_LS_RJT) { 8354 lsrjt_event.header.event_type = FC_REG_ELS_EVENT; 8355 lsrjt_event.header.subcategory = LPFC_EVENT_LSRJT_RCV; 8356 memcpy(lsrjt_event.header.wwpn, &ndlp->nlp_portname, 8357 sizeof(struct lpfc_name)); 8358 memcpy(lsrjt_event.header.wwnn, &ndlp->nlp_nodename, 8359 sizeof(struct lpfc_name)); 8360 pcmd = (uint32_t *) (((struct lpfc_dmabuf *) 8361 cmdiocbp->context2)->virt); 8362 lsrjt_event.command = (pcmd != NULL) ? *pcmd : 0; 8363 stat.un.lsRjtError = be32_to_cpu(rspiocbp->iocb.un.ulpWord[4]); 8364 lsrjt_event.reason_code = stat.un.b.lsRjtRsnCode; 8365 lsrjt_event.explanation = stat.un.b.lsRjtRsnCodeExp; 8366 fc_host_post_vendor_event(shost, 8367 fc_get_event_number(), 8368 sizeof(lsrjt_event), 8369 (char *)&lsrjt_event, 8370 LPFC_NL_VENDOR_ID); 8371 return; 8372 } 8373 if ((rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY) || 8374 (rspiocbp->iocb.ulpStatus == IOSTAT_FABRIC_BSY)) { 8375 fabric_event.event_type = FC_REG_FABRIC_EVENT; 8376 if (rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY) 8377 fabric_event.subcategory = LPFC_EVENT_PORT_BUSY; 8378 else 8379 fabric_event.subcategory = LPFC_EVENT_FABRIC_BUSY; 8380 memcpy(fabric_event.wwpn, &ndlp->nlp_portname, 8381 sizeof(struct lpfc_name)); 8382 memcpy(fabric_event.wwnn, &ndlp->nlp_nodename, 8383 sizeof(struct lpfc_name)); 8384 fc_host_post_vendor_event(shost, 8385 fc_get_event_number(), 8386 sizeof(fabric_event), 8387 (char *)&fabric_event, 8388 LPFC_NL_VENDOR_ID); 8389 return; 8390 } 8391 8392 } 8393 8394 /** 8395 * lpfc_send_els_event - Posts unsolicited els event 8396 * @vport: Pointer to vport object. 8397 * @ndlp: Pointer FC node object. 8398 * @payload: ELS command code type. 8399 * 8400 * This function posts an event when there is an incoming 8401 * unsolicited ELS command. 8402 **/ 8403 static void 8404 lpfc_send_els_event(struct lpfc_vport *vport, 8405 struct lpfc_nodelist *ndlp, 8406 uint32_t *payload) 8407 { 8408 struct lpfc_els_event_header *els_data = NULL; 8409 struct lpfc_logo_event *logo_data = NULL; 8410 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 8411 8412 if (*payload == ELS_CMD_LOGO) { 8413 logo_data = kmalloc(sizeof(struct lpfc_logo_event), GFP_KERNEL); 8414 if (!logo_data) { 8415 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 8416 "0148 Failed to allocate memory " 8417 "for LOGO event\n"); 8418 return; 8419 } 8420 els_data = &logo_data->header; 8421 } else { 8422 els_data = kmalloc(sizeof(struct lpfc_els_event_header), 8423 GFP_KERNEL); 8424 if (!els_data) { 8425 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 8426 "0149 Failed to allocate memory " 8427 "for ELS event\n"); 8428 return; 8429 } 8430 } 8431 els_data->event_type = FC_REG_ELS_EVENT; 8432 switch (*payload) { 8433 case ELS_CMD_PLOGI: 8434 els_data->subcategory = LPFC_EVENT_PLOGI_RCV; 8435 break; 8436 case ELS_CMD_PRLO: 8437 els_data->subcategory = LPFC_EVENT_PRLO_RCV; 8438 break; 8439 case ELS_CMD_ADISC: 8440 els_data->subcategory = LPFC_EVENT_ADISC_RCV; 8441 break; 8442 case ELS_CMD_LOGO: 8443 els_data->subcategory = LPFC_EVENT_LOGO_RCV; 8444 /* Copy the WWPN in the LOGO payload */ 8445 memcpy(logo_data->logo_wwpn, &payload[2], 8446 sizeof(struct lpfc_name)); 8447 break; 8448 default: 8449 kfree(els_data); 8450 return; 8451 } 8452 memcpy(els_data->wwpn, &ndlp->nlp_portname, sizeof(struct lpfc_name)); 8453 memcpy(els_data->wwnn, &ndlp->nlp_nodename, sizeof(struct lpfc_name)); 8454 if (*payload == ELS_CMD_LOGO) { 8455 fc_host_post_vendor_event(shost, 8456 fc_get_event_number(), 8457 sizeof(struct lpfc_logo_event), 8458 (char *)logo_data, 8459 LPFC_NL_VENDOR_ID); 8460 kfree(logo_data); 8461 } else { 8462 fc_host_post_vendor_event(shost, 8463 fc_get_event_number(), 8464 sizeof(struct lpfc_els_event_header), 8465 (char *)els_data, 8466 LPFC_NL_VENDOR_ID); 8467 kfree(els_data); 8468 } 8469 8470 return; 8471 } 8472 8473 8474 DECLARE_ENUM2STR_LOOKUP(lpfc_get_tlv_dtag_nm, fc_ls_tlv_dtag, 8475 FC_LS_TLV_DTAG_INIT); 8476 8477 DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_li_event_nm, fc_fpin_li_event_types, 8478 FC_FPIN_LI_EVT_TYPES_INIT); 8479 8480 /** 8481 * lpfc_els_rcv_fpin_li - Process an FPIN Link Integrity Event. 8482 * @vport: Pointer to vport object. 8483 * @tlv: Pointer to the Link Integrity Notification Descriptor. 8484 * 8485 * This function processes a link integrity FPIN event by 8486 * logging a message 8487 **/ 8488 static void 8489 lpfc_els_rcv_fpin_li(struct lpfc_vport *vport, struct fc_tlv_desc *tlv) 8490 { 8491 struct fc_fn_li_desc *li = (struct fc_fn_li_desc *)tlv; 8492 const char *li_evt_str; 8493 u32 li_evt; 8494 8495 li_evt = be16_to_cpu(li->event_type); 8496 li_evt_str = lpfc_get_fpin_li_event_nm(li_evt); 8497 8498 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 8499 "4680 FPIN Link Integrity %s (x%x) " 8500 "Detecting PN x%016llx Attached PN x%016llx " 8501 "Duration %d mSecs Count %d Port Cnt %d\n", 8502 li_evt_str, li_evt, 8503 be64_to_cpu(li->detecting_wwpn), 8504 be64_to_cpu(li->attached_wwpn), 8505 be32_to_cpu(li->event_threshold), 8506 be32_to_cpu(li->event_count), 8507 be32_to_cpu(li->pname_count)); 8508 } 8509 8510 static void 8511 lpfc_els_rcv_fpin(struct lpfc_vport *vport, struct fc_els_fpin *fpin, 8512 u32 fpin_length) 8513 { 8514 struct fc_tlv_desc *tlv; 8515 const char *dtag_nm; 8516 uint32_t desc_cnt = 0, bytes_remain; 8517 u32 dtag; 8518 8519 /* FPINs handled only if we are in the right discovery state */ 8520 if (vport->port_state < LPFC_DISC_AUTH) 8521 return; 8522 8523 /* make sure there is the full fpin header */ 8524 if (fpin_length < sizeof(struct fc_els_fpin)) 8525 return; 8526 8527 tlv = (struct fc_tlv_desc *)&fpin->fpin_desc[0]; 8528 bytes_remain = fpin_length - offsetof(struct fc_els_fpin, fpin_desc); 8529 bytes_remain = min_t(u32, bytes_remain, be32_to_cpu(fpin->desc_len)); 8530 8531 /* process each descriptor */ 8532 while (bytes_remain >= FC_TLV_DESC_HDR_SZ && 8533 bytes_remain >= FC_TLV_DESC_SZ_FROM_LENGTH(tlv)) { 8534 8535 dtag = be32_to_cpu(tlv->desc_tag); 8536 switch (dtag) { 8537 case ELS_DTAG_LNK_INTEGRITY: 8538 lpfc_els_rcv_fpin_li(vport, tlv); 8539 break; 8540 default: 8541 dtag_nm = lpfc_get_tlv_dtag_nm(dtag); 8542 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 8543 "4678 skipped FPIN descriptor[%d]: " 8544 "tag x%x (%s)\n", 8545 desc_cnt, dtag, dtag_nm); 8546 break; 8547 } 8548 8549 desc_cnt++; 8550 bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv); 8551 tlv = fc_tlv_next_desc(tlv); 8552 } 8553 8554 fc_host_fpin_rcv(lpfc_shost_from_vport(vport), fpin_length, 8555 (char *)fpin); 8556 } 8557 8558 /** 8559 * lpfc_els_unsol_buffer - Process an unsolicited event data buffer 8560 * @phba: pointer to lpfc hba data structure. 8561 * @pring: pointer to a SLI ring. 8562 * @vport: pointer to a host virtual N_Port data structure. 8563 * @elsiocb: pointer to lpfc els command iocb data structure. 8564 * 8565 * This routine is used for processing the IOCB associated with a unsolicited 8566 * event. It first determines whether there is an existing ndlp that matches 8567 * the DID from the unsolicited IOCB. If not, it will create a new one with 8568 * the DID from the unsolicited IOCB. The ELS command from the unsolicited 8569 * IOCB is then used to invoke the proper routine and to set up proper state 8570 * of the discovery state machine. 8571 **/ 8572 static void 8573 lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 8574 struct lpfc_vport *vport, struct lpfc_iocbq *elsiocb) 8575 { 8576 struct lpfc_nodelist *ndlp; 8577 struct ls_rjt stat; 8578 uint32_t *payload, payload_len; 8579 uint32_t cmd, did, newnode; 8580 uint8_t rjt_exp, rjt_err = 0, init_link = 0; 8581 IOCB_t *icmd = &elsiocb->iocb; 8582 LPFC_MBOXQ_t *mbox; 8583 8584 if (!vport || !(elsiocb->context2)) 8585 goto dropit; 8586 8587 newnode = 0; 8588 payload = ((struct lpfc_dmabuf *)elsiocb->context2)->virt; 8589 payload_len = elsiocb->iocb.unsli3.rcvsli3.acc_len; 8590 cmd = *payload; 8591 if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) == 0) 8592 lpfc_post_buffer(phba, pring, 1); 8593 8594 did = icmd->un.rcvels.remoteID; 8595 if (icmd->ulpStatus) { 8596 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8597 "RCV Unsol ELS: status:x%x/x%x did:x%x", 8598 icmd->ulpStatus, icmd->un.ulpWord[4], did); 8599 goto dropit; 8600 } 8601 8602 /* Check to see if link went down during discovery */ 8603 if (lpfc_els_chk_latt(vport)) 8604 goto dropit; 8605 8606 /* Ignore traffic received during vport shutdown. */ 8607 if (vport->load_flag & FC_UNLOADING) 8608 goto dropit; 8609 8610 /* If NPort discovery is delayed drop incoming ELS */ 8611 if ((vport->fc_flag & FC_DISC_DELAYED) && 8612 (cmd != ELS_CMD_PLOGI)) 8613 goto dropit; 8614 8615 ndlp = lpfc_findnode_did(vport, did); 8616 if (!ndlp) { 8617 /* Cannot find existing Fabric ndlp, so allocate a new one */ 8618 ndlp = lpfc_nlp_init(vport, did); 8619 if (!ndlp) 8620 goto dropit; 8621 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 8622 newnode = 1; 8623 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) 8624 ndlp->nlp_type |= NLP_FABRIC; 8625 } else if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) { 8626 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 8627 newnode = 1; 8628 } 8629 8630 phba->fc_stat.elsRcvFrame++; 8631 8632 /* 8633 * Do not process any unsolicited ELS commands 8634 * if the ndlp is in DEV_LOSS 8635 */ 8636 spin_lock_irq(&ndlp->lock); 8637 if (ndlp->nlp_flag & NLP_IN_DEV_LOSS) { 8638 spin_unlock_irq(&ndlp->lock); 8639 if (newnode) 8640 lpfc_nlp_put(ndlp); 8641 goto dropit; 8642 } 8643 spin_unlock_irq(&ndlp->lock); 8644 8645 elsiocb->context1 = lpfc_nlp_get(ndlp); 8646 if (!elsiocb->context1) 8647 goto dropit; 8648 elsiocb->vport = vport; 8649 8650 if ((cmd & ELS_CMD_MASK) == ELS_CMD_RSCN) { 8651 cmd &= ELS_CMD_MASK; 8652 } 8653 /* ELS command <elsCmd> received from NPORT <did> */ 8654 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 8655 "0112 ELS command x%x received from NPORT x%x " 8656 "refcnt %d Data: x%x x%x x%x x%x\n", 8657 cmd, did, kref_read(&ndlp->kref), vport->port_state, 8658 vport->fc_flag, vport->fc_myDID, vport->fc_prevDID); 8659 8660 /* reject till our FLOGI completes or PLOGI assigned DID via PT2PT */ 8661 if ((vport->port_state < LPFC_FABRIC_CFG_LINK) && 8662 (cmd != ELS_CMD_FLOGI) && 8663 !((cmd == ELS_CMD_PLOGI) && (vport->fc_flag & FC_PT2PT))) { 8664 rjt_err = LSRJT_LOGICAL_BSY; 8665 rjt_exp = LSEXP_NOTHING_MORE; 8666 goto lsrjt; 8667 } 8668 8669 switch (cmd) { 8670 case ELS_CMD_PLOGI: 8671 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8672 "RCV PLOGI: did:x%x/ste:x%x flg:x%x", 8673 did, vport->port_state, ndlp->nlp_flag); 8674 8675 phba->fc_stat.elsRcvPLOGI++; 8676 ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp); 8677 if (phba->sli_rev == LPFC_SLI_REV4 && 8678 (phba->pport->fc_flag & FC_PT2PT)) { 8679 vport->fc_prevDID = vport->fc_myDID; 8680 /* Our DID needs to be updated before registering 8681 * the vfi. This is done in lpfc_rcv_plogi but 8682 * that is called after the reg_vfi. 8683 */ 8684 vport->fc_myDID = elsiocb->iocb.un.rcvels.parmRo; 8685 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 8686 "3312 Remote port assigned DID x%x " 8687 "%x\n", vport->fc_myDID, 8688 vport->fc_prevDID); 8689 } 8690 8691 lpfc_send_els_event(vport, ndlp, payload); 8692 8693 /* If Nport discovery is delayed, reject PLOGIs */ 8694 if (vport->fc_flag & FC_DISC_DELAYED) { 8695 rjt_err = LSRJT_UNABLE_TPC; 8696 rjt_exp = LSEXP_NOTHING_MORE; 8697 break; 8698 } 8699 8700 if (vport->port_state < LPFC_DISC_AUTH) { 8701 if (!(phba->pport->fc_flag & FC_PT2PT) || 8702 (phba->pport->fc_flag & FC_PT2PT_PLOGI)) { 8703 rjt_err = LSRJT_UNABLE_TPC; 8704 rjt_exp = LSEXP_NOTHING_MORE; 8705 break; 8706 } 8707 } 8708 8709 spin_lock_irq(&ndlp->lock); 8710 ndlp->nlp_flag &= ~NLP_TARGET_REMOVE; 8711 spin_unlock_irq(&ndlp->lock); 8712 8713 lpfc_disc_state_machine(vport, ndlp, elsiocb, 8714 NLP_EVT_RCV_PLOGI); 8715 8716 break; 8717 case ELS_CMD_FLOGI: 8718 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8719 "RCV FLOGI: did:x%x/ste:x%x flg:x%x", 8720 did, vport->port_state, ndlp->nlp_flag); 8721 8722 phba->fc_stat.elsRcvFLOGI++; 8723 8724 /* If the driver believes fabric discovery is done and is ready, 8725 * bounce the link. There is some descrepancy. 8726 */ 8727 if (vport->port_state >= LPFC_LOCAL_CFG_LINK && 8728 vport->fc_flag & FC_PT2PT && 8729 vport->rcv_flogi_cnt >= 1) { 8730 rjt_err = LSRJT_LOGICAL_BSY; 8731 rjt_exp = LSEXP_NOTHING_MORE; 8732 init_link++; 8733 goto lsrjt; 8734 } 8735 8736 lpfc_els_rcv_flogi(vport, elsiocb, ndlp); 8737 if (newnode) 8738 lpfc_disc_state_machine(vport, ndlp, NULL, 8739 NLP_EVT_DEVICE_RM); 8740 break; 8741 case ELS_CMD_LOGO: 8742 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8743 "RCV LOGO: did:x%x/ste:x%x flg:x%x", 8744 did, vport->port_state, ndlp->nlp_flag); 8745 8746 phba->fc_stat.elsRcvLOGO++; 8747 lpfc_send_els_event(vport, ndlp, payload); 8748 if (vport->port_state < LPFC_DISC_AUTH) { 8749 rjt_err = LSRJT_UNABLE_TPC; 8750 rjt_exp = LSEXP_NOTHING_MORE; 8751 break; 8752 } 8753 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_LOGO); 8754 break; 8755 case ELS_CMD_PRLO: 8756 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8757 "RCV PRLO: did:x%x/ste:x%x flg:x%x", 8758 did, vport->port_state, ndlp->nlp_flag); 8759 8760 phba->fc_stat.elsRcvPRLO++; 8761 lpfc_send_els_event(vport, ndlp, payload); 8762 if (vport->port_state < LPFC_DISC_AUTH) { 8763 rjt_err = LSRJT_UNABLE_TPC; 8764 rjt_exp = LSEXP_NOTHING_MORE; 8765 break; 8766 } 8767 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLO); 8768 break; 8769 case ELS_CMD_LCB: 8770 phba->fc_stat.elsRcvLCB++; 8771 lpfc_els_rcv_lcb(vport, elsiocb, ndlp); 8772 break; 8773 case ELS_CMD_RDP: 8774 phba->fc_stat.elsRcvRDP++; 8775 lpfc_els_rcv_rdp(vport, elsiocb, ndlp); 8776 break; 8777 case ELS_CMD_RSCN: 8778 phba->fc_stat.elsRcvRSCN++; 8779 lpfc_els_rcv_rscn(vport, elsiocb, ndlp); 8780 if (newnode) 8781 lpfc_disc_state_machine(vport, ndlp, NULL, 8782 NLP_EVT_DEVICE_RM); 8783 break; 8784 case ELS_CMD_ADISC: 8785 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8786 "RCV ADISC: did:x%x/ste:x%x flg:x%x", 8787 did, vport->port_state, ndlp->nlp_flag); 8788 8789 lpfc_send_els_event(vport, ndlp, payload); 8790 phba->fc_stat.elsRcvADISC++; 8791 if (vport->port_state < LPFC_DISC_AUTH) { 8792 rjt_err = LSRJT_UNABLE_TPC; 8793 rjt_exp = LSEXP_NOTHING_MORE; 8794 break; 8795 } 8796 lpfc_disc_state_machine(vport, ndlp, elsiocb, 8797 NLP_EVT_RCV_ADISC); 8798 break; 8799 case ELS_CMD_PDISC: 8800 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8801 "RCV PDISC: did:x%x/ste:x%x flg:x%x", 8802 did, vport->port_state, ndlp->nlp_flag); 8803 8804 phba->fc_stat.elsRcvPDISC++; 8805 if (vport->port_state < LPFC_DISC_AUTH) { 8806 rjt_err = LSRJT_UNABLE_TPC; 8807 rjt_exp = LSEXP_NOTHING_MORE; 8808 break; 8809 } 8810 lpfc_disc_state_machine(vport, ndlp, elsiocb, 8811 NLP_EVT_RCV_PDISC); 8812 break; 8813 case ELS_CMD_FARPR: 8814 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8815 "RCV FARPR: did:x%x/ste:x%x flg:x%x", 8816 did, vport->port_state, ndlp->nlp_flag); 8817 8818 phba->fc_stat.elsRcvFARPR++; 8819 lpfc_els_rcv_farpr(vport, elsiocb, ndlp); 8820 break; 8821 case ELS_CMD_FARP: 8822 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8823 "RCV FARP: did:x%x/ste:x%x flg:x%x", 8824 did, vport->port_state, ndlp->nlp_flag); 8825 8826 phba->fc_stat.elsRcvFARP++; 8827 lpfc_els_rcv_farp(vport, elsiocb, ndlp); 8828 break; 8829 case ELS_CMD_FAN: 8830 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8831 "RCV FAN: did:x%x/ste:x%x flg:x%x", 8832 did, vport->port_state, ndlp->nlp_flag); 8833 8834 phba->fc_stat.elsRcvFAN++; 8835 lpfc_els_rcv_fan(vport, elsiocb, ndlp); 8836 break; 8837 case ELS_CMD_PRLI: 8838 case ELS_CMD_NVMEPRLI: 8839 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8840 "RCV PRLI: did:x%x/ste:x%x flg:x%x", 8841 did, vport->port_state, ndlp->nlp_flag); 8842 8843 phba->fc_stat.elsRcvPRLI++; 8844 if ((vport->port_state < LPFC_DISC_AUTH) && 8845 (vport->fc_flag & FC_FABRIC)) { 8846 rjt_err = LSRJT_UNABLE_TPC; 8847 rjt_exp = LSEXP_NOTHING_MORE; 8848 break; 8849 } 8850 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLI); 8851 break; 8852 case ELS_CMD_LIRR: 8853 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8854 "RCV LIRR: did:x%x/ste:x%x flg:x%x", 8855 did, vport->port_state, ndlp->nlp_flag); 8856 8857 phba->fc_stat.elsRcvLIRR++; 8858 lpfc_els_rcv_lirr(vport, elsiocb, ndlp); 8859 if (newnode) 8860 lpfc_disc_state_machine(vport, ndlp, NULL, 8861 NLP_EVT_DEVICE_RM); 8862 break; 8863 case ELS_CMD_RLS: 8864 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8865 "RCV RLS: did:x%x/ste:x%x flg:x%x", 8866 did, vport->port_state, ndlp->nlp_flag); 8867 8868 phba->fc_stat.elsRcvRLS++; 8869 lpfc_els_rcv_rls(vport, elsiocb, ndlp); 8870 if (newnode) 8871 lpfc_disc_state_machine(vport, ndlp, NULL, 8872 NLP_EVT_DEVICE_RM); 8873 break; 8874 case ELS_CMD_RPL: 8875 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8876 "RCV RPL: did:x%x/ste:x%x flg:x%x", 8877 did, vport->port_state, ndlp->nlp_flag); 8878 8879 phba->fc_stat.elsRcvRPL++; 8880 lpfc_els_rcv_rpl(vport, elsiocb, ndlp); 8881 if (newnode) 8882 lpfc_disc_state_machine(vport, ndlp, NULL, 8883 NLP_EVT_DEVICE_RM); 8884 break; 8885 case ELS_CMD_RNID: 8886 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8887 "RCV RNID: did:x%x/ste:x%x flg:x%x", 8888 did, vport->port_state, ndlp->nlp_flag); 8889 8890 phba->fc_stat.elsRcvRNID++; 8891 lpfc_els_rcv_rnid(vport, elsiocb, ndlp); 8892 if (newnode) 8893 lpfc_disc_state_machine(vport, ndlp, NULL, 8894 NLP_EVT_DEVICE_RM); 8895 break; 8896 case ELS_CMD_RTV: 8897 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8898 "RCV RTV: did:x%x/ste:x%x flg:x%x", 8899 did, vport->port_state, ndlp->nlp_flag); 8900 phba->fc_stat.elsRcvRTV++; 8901 lpfc_els_rcv_rtv(vport, elsiocb, ndlp); 8902 if (newnode) 8903 lpfc_disc_state_machine(vport, ndlp, NULL, 8904 NLP_EVT_DEVICE_RM); 8905 break; 8906 case ELS_CMD_RRQ: 8907 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8908 "RCV RRQ: did:x%x/ste:x%x flg:x%x", 8909 did, vport->port_state, ndlp->nlp_flag); 8910 8911 phba->fc_stat.elsRcvRRQ++; 8912 lpfc_els_rcv_rrq(vport, elsiocb, ndlp); 8913 if (newnode) 8914 lpfc_disc_state_machine(vport, ndlp, NULL, 8915 NLP_EVT_DEVICE_RM); 8916 break; 8917 case ELS_CMD_ECHO: 8918 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8919 "RCV ECHO: did:x%x/ste:x%x flg:x%x", 8920 did, vport->port_state, ndlp->nlp_flag); 8921 8922 phba->fc_stat.elsRcvECHO++; 8923 lpfc_els_rcv_echo(vport, elsiocb, ndlp); 8924 if (newnode) 8925 lpfc_disc_state_machine(vport, ndlp, NULL, 8926 NLP_EVT_DEVICE_RM); 8927 break; 8928 case ELS_CMD_REC: 8929 /* receive this due to exchange closed */ 8930 rjt_err = LSRJT_UNABLE_TPC; 8931 rjt_exp = LSEXP_INVALID_OX_RX; 8932 break; 8933 case ELS_CMD_FPIN: 8934 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8935 "RCV FPIN: did:x%x/ste:x%x flg:x%x", 8936 did, vport->port_state, ndlp->nlp_flag); 8937 8938 lpfc_els_rcv_fpin(vport, (struct fc_els_fpin *)payload, 8939 payload_len); 8940 8941 /* There are no replies, so no rjt codes */ 8942 break; 8943 default: 8944 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8945 "RCV ELS cmd: cmd:x%x did:x%x/ste:x%x", 8946 cmd, did, vport->port_state); 8947 8948 /* Unsupported ELS command, reject */ 8949 rjt_err = LSRJT_CMD_UNSUPPORTED; 8950 rjt_exp = LSEXP_NOTHING_MORE; 8951 8952 /* Unknown ELS command <elsCmd> received from NPORT <did> */ 8953 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 8954 "0115 Unknown ELS command x%x " 8955 "received from NPORT x%x\n", cmd, did); 8956 if (newnode) 8957 lpfc_disc_state_machine(vport, ndlp, NULL, 8958 NLP_EVT_DEVICE_RM); 8959 break; 8960 } 8961 8962 lsrjt: 8963 /* check if need to LS_RJT received ELS cmd */ 8964 if (rjt_err) { 8965 memset(&stat, 0, sizeof(stat)); 8966 stat.un.b.lsRjtRsnCode = rjt_err; 8967 stat.un.b.lsRjtRsnCodeExp = rjt_exp; 8968 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, elsiocb, ndlp, 8969 NULL); 8970 /* Remove the reference from above for new nodes. */ 8971 if (newnode) 8972 lpfc_disc_state_machine(vport, ndlp, NULL, 8973 NLP_EVT_DEVICE_RM); 8974 } 8975 8976 /* Release the reference on this elsiocb, not the ndlp. */ 8977 lpfc_nlp_put(elsiocb->context1); 8978 elsiocb->context1 = NULL; 8979 8980 /* Special case. Driver received an unsolicited command that 8981 * unsupportable given the driver's current state. Reset the 8982 * link and start over. 8983 */ 8984 if (init_link) { 8985 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 8986 if (!mbox) 8987 return; 8988 lpfc_linkdown(phba); 8989 lpfc_init_link(phba, mbox, 8990 phba->cfg_topology, 8991 phba->cfg_link_speed); 8992 mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0; 8993 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 8994 mbox->vport = vport; 8995 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) == 8996 MBX_NOT_FINISHED) 8997 mempool_free(mbox, phba->mbox_mem_pool); 8998 } 8999 9000 return; 9001 9002 dropit: 9003 if (vport && !(vport->load_flag & FC_UNLOADING)) 9004 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9005 "0111 Dropping received ELS cmd " 9006 "Data: x%x x%x x%x\n", 9007 icmd->ulpStatus, icmd->un.ulpWord[4], icmd->ulpTimeout); 9008 phba->fc_stat.elsRcvDrop++; 9009 } 9010 9011 /** 9012 * lpfc_els_unsol_event - Process an unsolicited event from an els sli ring 9013 * @phba: pointer to lpfc hba data structure. 9014 * @pring: pointer to a SLI ring. 9015 * @elsiocb: pointer to lpfc els iocb data structure. 9016 * 9017 * This routine is used to process an unsolicited event received from a SLI 9018 * (Service Level Interface) ring. The actual processing of the data buffer 9019 * associated with the unsolicited event is done by invoking the routine 9020 * lpfc_els_unsol_buffer() after properly set up the iocb buffer from the 9021 * SLI ring on which the unsolicited event was received. 9022 **/ 9023 void 9024 lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 9025 struct lpfc_iocbq *elsiocb) 9026 { 9027 struct lpfc_vport *vport = phba->pport; 9028 IOCB_t *icmd = &elsiocb->iocb; 9029 dma_addr_t paddr; 9030 struct lpfc_dmabuf *bdeBuf1 = elsiocb->context2; 9031 struct lpfc_dmabuf *bdeBuf2 = elsiocb->context3; 9032 9033 elsiocb->context1 = NULL; 9034 elsiocb->context2 = NULL; 9035 elsiocb->context3 = NULL; 9036 9037 if (icmd->ulpStatus == IOSTAT_NEED_BUFFER) { 9038 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); 9039 } else if (icmd->ulpStatus == IOSTAT_LOCAL_REJECT && 9040 (icmd->un.ulpWord[4] & IOERR_PARAM_MASK) == 9041 IOERR_RCV_BUFFER_WAITING) { 9042 phba->fc_stat.NoRcvBuf++; 9043 /* Not enough posted buffers; Try posting more buffers */ 9044 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) 9045 lpfc_post_buffer(phba, pring, 0); 9046 return; 9047 } 9048 9049 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 9050 (icmd->ulpCommand == CMD_IOCB_RCV_ELS64_CX || 9051 icmd->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) { 9052 if (icmd->unsli3.rcvsli3.vpi == 0xffff) 9053 vport = phba->pport; 9054 else 9055 vport = lpfc_find_vport_by_vpid(phba, 9056 icmd->unsli3.rcvsli3.vpi); 9057 } 9058 9059 /* If there are no BDEs associated 9060 * with this IOCB, there is nothing to do. 9061 */ 9062 if (icmd->ulpBdeCount == 0) 9063 return; 9064 9065 /* type of ELS cmd is first 32bit word 9066 * in packet 9067 */ 9068 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 9069 elsiocb->context2 = bdeBuf1; 9070 } else { 9071 paddr = getPaddr(icmd->un.cont64[0].addrHigh, 9072 icmd->un.cont64[0].addrLow); 9073 elsiocb->context2 = lpfc_sli_ringpostbuf_get(phba, pring, 9074 paddr); 9075 } 9076 9077 lpfc_els_unsol_buffer(phba, pring, vport, elsiocb); 9078 /* 9079 * The different unsolicited event handlers would tell us 9080 * if they are done with "mp" by setting context2 to NULL. 9081 */ 9082 if (elsiocb->context2) { 9083 lpfc_in_buf_free(phba, (struct lpfc_dmabuf *)elsiocb->context2); 9084 elsiocb->context2 = NULL; 9085 } 9086 9087 /* RCV_ELS64_CX provide for 2 BDEs - process 2nd if included */ 9088 if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) && 9089 icmd->ulpBdeCount == 2) { 9090 elsiocb->context2 = bdeBuf2; 9091 lpfc_els_unsol_buffer(phba, pring, vport, elsiocb); 9092 /* free mp if we are done with it */ 9093 if (elsiocb->context2) { 9094 lpfc_in_buf_free(phba, elsiocb->context2); 9095 elsiocb->context2 = NULL; 9096 } 9097 } 9098 } 9099 9100 static void 9101 lpfc_start_fdmi(struct lpfc_vport *vport) 9102 { 9103 struct lpfc_nodelist *ndlp; 9104 9105 /* If this is the first time, allocate an ndlp and initialize 9106 * it. Otherwise, make sure the node is enabled and then do the 9107 * login. 9108 */ 9109 ndlp = lpfc_findnode_did(vport, FDMI_DID); 9110 if (!ndlp) { 9111 ndlp = lpfc_nlp_init(vport, FDMI_DID); 9112 if (ndlp) { 9113 ndlp->nlp_type |= NLP_FABRIC; 9114 } else { 9115 return; 9116 } 9117 } 9118 9119 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 9120 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 9121 } 9122 9123 /** 9124 * lpfc_do_scr_ns_plogi - Issue a plogi to the name server for scr 9125 * @phba: pointer to lpfc hba data structure. 9126 * @vport: pointer to a virtual N_Port data structure. 9127 * 9128 * This routine issues a Port Login (PLOGI) to the Name Server with 9129 * State Change Request (SCR) for a @vport. This routine will create an 9130 * ndlp for the Name Server associated to the @vport if such node does 9131 * not already exist. The PLOGI to Name Server is issued by invoking the 9132 * lpfc_issue_els_plogi() routine. If Fabric-Device Management Interface 9133 * (FDMI) is configured to the @vport, a FDMI node will be created and 9134 * the PLOGI to FDMI is issued by invoking lpfc_issue_els_plogi() routine. 9135 **/ 9136 void 9137 lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport) 9138 { 9139 struct lpfc_nodelist *ndlp; 9140 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 9141 9142 /* 9143 * If lpfc_delay_discovery parameter is set and the clean address 9144 * bit is cleared and fc fabric parameters chenged, delay FC NPort 9145 * discovery. 9146 */ 9147 spin_lock_irq(shost->host_lock); 9148 if (vport->fc_flag & FC_DISC_DELAYED) { 9149 spin_unlock_irq(shost->host_lock); 9150 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9151 "3334 Delay fc port discovery for %d secs\n", 9152 phba->fc_ratov); 9153 mod_timer(&vport->delayed_disc_tmo, 9154 jiffies + msecs_to_jiffies(1000 * phba->fc_ratov)); 9155 return; 9156 } 9157 spin_unlock_irq(shost->host_lock); 9158 9159 ndlp = lpfc_findnode_did(vport, NameServer_DID); 9160 if (!ndlp) { 9161 ndlp = lpfc_nlp_init(vport, NameServer_DID); 9162 if (!ndlp) { 9163 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 9164 lpfc_disc_start(vport); 9165 return; 9166 } 9167 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 9168 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9169 "0251 NameServer login: no memory\n"); 9170 return; 9171 } 9172 } 9173 9174 ndlp->nlp_type |= NLP_FABRIC; 9175 9176 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 9177 9178 if (lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0)) { 9179 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 9180 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9181 "0252 Cannot issue NameServer login\n"); 9182 return; 9183 } 9184 9185 if ((phba->cfg_enable_SmartSAN || 9186 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) && 9187 (vport->load_flag & FC_ALLOW_FDMI)) 9188 lpfc_start_fdmi(vport); 9189 } 9190 9191 /** 9192 * lpfc_cmpl_reg_new_vport - Completion callback function to register new vport 9193 * @phba: pointer to lpfc hba data structure. 9194 * @pmb: pointer to the driver internal queue element for mailbox command. 9195 * 9196 * This routine is the completion callback function to register new vport 9197 * mailbox command. If the new vport mailbox command completes successfully, 9198 * the fabric registration login shall be performed on physical port (the 9199 * new vport created is actually a physical port, with VPI 0) or the port 9200 * login to Name Server for State Change Request (SCR) will be performed 9201 * on virtual port (real virtual port, with VPI greater than 0). 9202 **/ 9203 static void 9204 lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 9205 { 9206 struct lpfc_vport *vport = pmb->vport; 9207 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 9208 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; 9209 MAILBOX_t *mb = &pmb->u.mb; 9210 int rc; 9211 9212 spin_lock_irq(shost->host_lock); 9213 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 9214 spin_unlock_irq(shost->host_lock); 9215 9216 if (mb->mbxStatus) { 9217 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9218 "0915 Register VPI failed : Status: x%x" 9219 " upd bit: x%x \n", mb->mbxStatus, 9220 mb->un.varRegVpi.upd); 9221 if (phba->sli_rev == LPFC_SLI_REV4 && 9222 mb->un.varRegVpi.upd) 9223 goto mbox_err_exit ; 9224 9225 switch (mb->mbxStatus) { 9226 case 0x11: /* unsupported feature */ 9227 case 0x9603: /* max_vpi exceeded */ 9228 case 0x9602: /* Link event since CLEAR_LA */ 9229 /* giving up on vport registration */ 9230 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 9231 spin_lock_irq(shost->host_lock); 9232 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 9233 spin_unlock_irq(shost->host_lock); 9234 lpfc_can_disctmo(vport); 9235 break; 9236 /* If reg_vpi fail with invalid VPI status, re-init VPI */ 9237 case 0x20: 9238 spin_lock_irq(shost->host_lock); 9239 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 9240 spin_unlock_irq(shost->host_lock); 9241 lpfc_init_vpi(phba, pmb, vport->vpi); 9242 pmb->vport = vport; 9243 pmb->mbox_cmpl = lpfc_init_vpi_cmpl; 9244 rc = lpfc_sli_issue_mbox(phba, pmb, 9245 MBX_NOWAIT); 9246 if (rc == MBX_NOT_FINISHED) { 9247 lpfc_printf_vlog(vport, KERN_ERR, 9248 LOG_TRACE_EVENT, 9249 "2732 Failed to issue INIT_VPI" 9250 " mailbox command\n"); 9251 } else { 9252 lpfc_nlp_put(ndlp); 9253 return; 9254 } 9255 fallthrough; 9256 default: 9257 /* Try to recover from this error */ 9258 if (phba->sli_rev == LPFC_SLI_REV4) 9259 lpfc_sli4_unreg_all_rpis(vport); 9260 lpfc_mbx_unreg_vpi(vport); 9261 spin_lock_irq(shost->host_lock); 9262 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 9263 spin_unlock_irq(shost->host_lock); 9264 if (mb->mbxStatus == MBX_NOT_FINISHED) 9265 break; 9266 if ((vport->port_type == LPFC_PHYSICAL_PORT) && 9267 !(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG)) { 9268 if (phba->sli_rev == LPFC_SLI_REV4) 9269 lpfc_issue_init_vfi(vport); 9270 else 9271 lpfc_initial_flogi(vport); 9272 } else { 9273 lpfc_initial_fdisc(vport); 9274 } 9275 break; 9276 } 9277 } else { 9278 spin_lock_irq(shost->host_lock); 9279 vport->vpi_state |= LPFC_VPI_REGISTERED; 9280 spin_unlock_irq(shost->host_lock); 9281 if (vport == phba->pport) { 9282 if (phba->sli_rev < LPFC_SLI_REV4) 9283 lpfc_issue_fabric_reglogin(vport); 9284 else { 9285 /* 9286 * If the physical port is instantiated using 9287 * FDISC, do not start vport discovery. 9288 */ 9289 if (vport->port_state != LPFC_FDISC) 9290 lpfc_start_fdiscs(phba); 9291 lpfc_do_scr_ns_plogi(phba, vport); 9292 } 9293 } else { 9294 lpfc_do_scr_ns_plogi(phba, vport); 9295 } 9296 } 9297 mbox_err_exit: 9298 /* Now, we decrement the ndlp reference count held for this 9299 * callback function 9300 */ 9301 lpfc_nlp_put(ndlp); 9302 9303 mempool_free(pmb, phba->mbox_mem_pool); 9304 return; 9305 } 9306 9307 /** 9308 * lpfc_register_new_vport - Register a new vport with a HBA 9309 * @phba: pointer to lpfc hba data structure. 9310 * @vport: pointer to a host virtual N_Port data structure. 9311 * @ndlp: pointer to a node-list data structure. 9312 * 9313 * This routine registers the @vport as a new virtual port with a HBA. 9314 * It is done through a registering vpi mailbox command. 9315 **/ 9316 void 9317 lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport, 9318 struct lpfc_nodelist *ndlp) 9319 { 9320 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 9321 LPFC_MBOXQ_t *mbox; 9322 9323 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 9324 if (mbox) { 9325 lpfc_reg_vpi(vport, mbox); 9326 mbox->vport = vport; 9327 mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 9328 if (!mbox->ctx_ndlp) { 9329 mempool_free(mbox, phba->mbox_mem_pool); 9330 goto mbox_err_exit; 9331 } 9332 9333 mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport; 9334 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 9335 == MBX_NOT_FINISHED) { 9336 /* mailbox command not success, decrement ndlp 9337 * reference count for this command 9338 */ 9339 lpfc_nlp_put(ndlp); 9340 mempool_free(mbox, phba->mbox_mem_pool); 9341 9342 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9343 "0253 Register VPI: Can't send mbox\n"); 9344 goto mbox_err_exit; 9345 } 9346 } else { 9347 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9348 "0254 Register VPI: no memory\n"); 9349 goto mbox_err_exit; 9350 } 9351 return; 9352 9353 mbox_err_exit: 9354 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 9355 spin_lock_irq(shost->host_lock); 9356 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 9357 spin_unlock_irq(shost->host_lock); 9358 return; 9359 } 9360 9361 /** 9362 * lpfc_cancel_all_vport_retry_delay_timer - Cancel all vport retry delay timer 9363 * @phba: pointer to lpfc hba data structure. 9364 * 9365 * This routine cancels the retry delay timers to all the vports. 9366 **/ 9367 void 9368 lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *phba) 9369 { 9370 struct lpfc_vport **vports; 9371 struct lpfc_nodelist *ndlp; 9372 uint32_t link_state; 9373 int i; 9374 9375 /* Treat this failure as linkdown for all vports */ 9376 link_state = phba->link_state; 9377 lpfc_linkdown(phba); 9378 phba->link_state = link_state; 9379 9380 vports = lpfc_create_vport_work_array(phba); 9381 9382 if (vports) { 9383 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 9384 ndlp = lpfc_findnode_did(vports[i], Fabric_DID); 9385 if (ndlp) 9386 lpfc_cancel_retry_delay_tmo(vports[i], ndlp); 9387 lpfc_els_flush_cmd(vports[i]); 9388 } 9389 lpfc_destroy_vport_work_array(phba, vports); 9390 } 9391 } 9392 9393 /** 9394 * lpfc_retry_pport_discovery - Start timer to retry FLOGI. 9395 * @phba: pointer to lpfc hba data structure. 9396 * 9397 * This routine abort all pending discovery commands and 9398 * start a timer to retry FLOGI for the physical port 9399 * discovery. 9400 **/ 9401 void 9402 lpfc_retry_pport_discovery(struct lpfc_hba *phba) 9403 { 9404 struct lpfc_nodelist *ndlp; 9405 9406 /* Cancel the all vports retry delay retry timers */ 9407 lpfc_cancel_all_vport_retry_delay_timer(phba); 9408 9409 /* If fabric require FLOGI, then re-instantiate physical login */ 9410 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); 9411 if (!ndlp) 9412 return; 9413 9414 mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000)); 9415 spin_lock_irq(&ndlp->lock); 9416 ndlp->nlp_flag |= NLP_DELAY_TMO; 9417 spin_unlock_irq(&ndlp->lock); 9418 ndlp->nlp_last_elscmd = ELS_CMD_FLOGI; 9419 phba->pport->port_state = LPFC_FLOGI; 9420 return; 9421 } 9422 9423 /** 9424 * lpfc_fabric_login_reqd - Check if FLOGI required. 9425 * @phba: pointer to lpfc hba data structure. 9426 * @cmdiocb: pointer to FDISC command iocb. 9427 * @rspiocb: pointer to FDISC response iocb. 9428 * 9429 * This routine checks if a FLOGI is reguired for FDISC 9430 * to succeed. 9431 **/ 9432 static int 9433 lpfc_fabric_login_reqd(struct lpfc_hba *phba, 9434 struct lpfc_iocbq *cmdiocb, 9435 struct lpfc_iocbq *rspiocb) 9436 { 9437 9438 if ((rspiocb->iocb.ulpStatus != IOSTAT_FABRIC_RJT) || 9439 (rspiocb->iocb.un.ulpWord[4] != RJT_LOGIN_REQUIRED)) 9440 return 0; 9441 else 9442 return 1; 9443 } 9444 9445 /** 9446 * lpfc_cmpl_els_fdisc - Completion function for fdisc iocb command 9447 * @phba: pointer to lpfc hba data structure. 9448 * @cmdiocb: pointer to lpfc command iocb data structure. 9449 * @rspiocb: pointer to lpfc response iocb data structure. 9450 * 9451 * This routine is the completion callback function to a Fabric Discover 9452 * (FDISC) ELS command. Since all the FDISC ELS commands are issued 9453 * single threaded, each FDISC completion callback function will reset 9454 * the discovery timer for all vports such that the timers will not get 9455 * unnecessary timeout. The function checks the FDISC IOCB status. If error 9456 * detected, the vport will be set to FC_VPORT_FAILED state. Otherwise,the 9457 * vport will set to FC_VPORT_ACTIVE state. It then checks whether the DID 9458 * assigned to the vport has been changed with the completion of the FDISC 9459 * command. If so, both RPI (Remote Port Index) and VPI (Virtual Port Index) 9460 * are unregistered from the HBA, and then the lpfc_register_new_vport() 9461 * routine is invoked to register new vport with the HBA. Otherwise, the 9462 * lpfc_do_scr_ns_plogi() routine is invoked to issue a PLOGI to the Name 9463 * Server for State Change Request (SCR). 9464 **/ 9465 static void 9466 lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 9467 struct lpfc_iocbq *rspiocb) 9468 { 9469 struct lpfc_vport *vport = cmdiocb->vport; 9470 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 9471 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 9472 struct lpfc_nodelist *np; 9473 struct lpfc_nodelist *next_np; 9474 IOCB_t *irsp = &rspiocb->iocb; 9475 struct lpfc_iocbq *piocb; 9476 struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp; 9477 struct serv_parm *sp; 9478 uint8_t fabric_param_changed; 9479 9480 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 9481 "0123 FDISC completes. x%x/x%x prevDID: x%x\n", 9482 irsp->ulpStatus, irsp->un.ulpWord[4], 9483 vport->fc_prevDID); 9484 /* Since all FDISCs are being single threaded, we 9485 * must reset the discovery timer for ALL vports 9486 * waiting to send FDISC when one completes. 9487 */ 9488 list_for_each_entry(piocb, &phba->fabric_iocb_list, list) { 9489 lpfc_set_disctmo(piocb->vport); 9490 } 9491 9492 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 9493 "FDISC cmpl: status:x%x/x%x prevdid:x%x", 9494 irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_prevDID); 9495 9496 if (irsp->ulpStatus) { 9497 9498 if (lpfc_fabric_login_reqd(phba, cmdiocb, rspiocb)) { 9499 lpfc_retry_pport_discovery(phba); 9500 goto out; 9501 } 9502 9503 /* Check for retry */ 9504 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) 9505 goto out; 9506 /* FDISC failed */ 9507 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9508 "0126 FDISC failed. (x%x/x%x)\n", 9509 irsp->ulpStatus, irsp->un.ulpWord[4]); 9510 goto fdisc_failed; 9511 } 9512 spin_lock_irq(shost->host_lock); 9513 vport->fc_flag &= ~FC_VPORT_CVL_RCVD; 9514 vport->fc_flag &= ~FC_VPORT_LOGO_RCVD; 9515 vport->fc_flag |= FC_FABRIC; 9516 if (vport->phba->fc_topology == LPFC_TOPOLOGY_LOOP) 9517 vport->fc_flag |= FC_PUBLIC_LOOP; 9518 spin_unlock_irq(shost->host_lock); 9519 9520 vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID; 9521 lpfc_vport_set_state(vport, FC_VPORT_ACTIVE); 9522 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 9523 if (!prsp) 9524 goto out; 9525 sp = prsp->virt + sizeof(uint32_t); 9526 fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp); 9527 memcpy(&vport->fabric_portname, &sp->portName, 9528 sizeof(struct lpfc_name)); 9529 memcpy(&vport->fabric_nodename, &sp->nodeName, 9530 sizeof(struct lpfc_name)); 9531 if (fabric_param_changed && 9532 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 9533 /* If our NportID changed, we need to ensure all 9534 * remaining NPORTs get unreg_login'ed so we can 9535 * issue unreg_vpi. 9536 */ 9537 list_for_each_entry_safe(np, next_np, 9538 &vport->fc_nodes, nlp_listp) { 9539 if ((np->nlp_state != NLP_STE_NPR_NODE) || 9540 !(np->nlp_flag & NLP_NPR_ADISC)) 9541 continue; 9542 spin_lock_irq(&ndlp->lock); 9543 np->nlp_flag &= ~NLP_NPR_ADISC; 9544 spin_unlock_irq(&ndlp->lock); 9545 lpfc_unreg_rpi(vport, np); 9546 } 9547 lpfc_cleanup_pending_mbox(vport); 9548 9549 if (phba->sli_rev == LPFC_SLI_REV4) 9550 lpfc_sli4_unreg_all_rpis(vport); 9551 9552 lpfc_mbx_unreg_vpi(vport); 9553 spin_lock_irq(shost->host_lock); 9554 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 9555 if (phba->sli_rev == LPFC_SLI_REV4) 9556 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 9557 else 9558 vport->fc_flag |= FC_LOGO_RCVD_DID_CHNG; 9559 spin_unlock_irq(shost->host_lock); 9560 } else if ((phba->sli_rev == LPFC_SLI_REV4) && 9561 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 9562 /* 9563 * Driver needs to re-reg VPI in order for f/w 9564 * to update the MAC address. 9565 */ 9566 lpfc_register_new_vport(phba, vport, ndlp); 9567 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 9568 goto out; 9569 } 9570 9571 if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI) 9572 lpfc_issue_init_vpi(vport); 9573 else if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) 9574 lpfc_register_new_vport(phba, vport, ndlp); 9575 else 9576 lpfc_do_scr_ns_plogi(phba, vport); 9577 9578 /* The FDISC completed successfully. Move the fabric ndlp to 9579 * UNMAPPED state and register with the transport. 9580 */ 9581 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 9582 goto out; 9583 9584 fdisc_failed: 9585 if (vport->fc_vport && 9586 (vport->fc_vport->vport_state != FC_VPORT_NO_FABRIC_RSCS)) 9587 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 9588 /* Cancel discovery timer */ 9589 lpfc_can_disctmo(vport); 9590 out: 9591 lpfc_els_free_iocb(phba, cmdiocb); 9592 lpfc_nlp_put(ndlp); 9593 } 9594 9595 /** 9596 * lpfc_issue_els_fdisc - Issue a fdisc iocb command 9597 * @vport: pointer to a virtual N_Port data structure. 9598 * @ndlp: pointer to a node-list data structure. 9599 * @retry: number of retries to the command IOCB. 9600 * 9601 * This routine prepares and issues a Fabric Discover (FDISC) IOCB to 9602 * a remote node (@ndlp) off a @vport. It uses the lpfc_issue_fabric_iocb() 9603 * routine to issue the IOCB, which makes sure only one outstanding fabric 9604 * IOCB will be sent off HBA at any given time. 9605 * 9606 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 9607 * will be incremented by 1 for holding the ndlp and the reference to ndlp 9608 * will be stored into the context1 field of the IOCB for the completion 9609 * callback function to the FDISC ELS command. 9610 * 9611 * Return code 9612 * 0 - Successfully issued fdisc iocb command 9613 * 1 - Failed to issue fdisc iocb command 9614 **/ 9615 static int 9616 lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 9617 uint8_t retry) 9618 { 9619 struct lpfc_hba *phba = vport->phba; 9620 IOCB_t *icmd; 9621 struct lpfc_iocbq *elsiocb; 9622 struct serv_parm *sp; 9623 uint8_t *pcmd; 9624 uint16_t cmdsize; 9625 int did = ndlp->nlp_DID; 9626 int rc; 9627 9628 vport->port_state = LPFC_FDISC; 9629 vport->fc_myDID = 0; 9630 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 9631 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did, 9632 ELS_CMD_FDISC); 9633 if (!elsiocb) { 9634 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 9635 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9636 "0255 Issue FDISC: no IOCB\n"); 9637 return 1; 9638 } 9639 9640 icmd = &elsiocb->iocb; 9641 icmd->un.elsreq64.myID = 0; 9642 icmd->un.elsreq64.fl = 1; 9643 9644 /* 9645 * SLI3 ports require a different context type value than SLI4. 9646 * Catch SLI3 ports here and override the prep. 9647 */ 9648 if (phba->sli_rev == LPFC_SLI_REV3) { 9649 icmd->ulpCt_h = 1; 9650 icmd->ulpCt_l = 0; 9651 } 9652 9653 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 9654 *((uint32_t *) (pcmd)) = ELS_CMD_FDISC; 9655 pcmd += sizeof(uint32_t); /* CSP Word 1 */ 9656 memcpy(pcmd, &vport->phba->pport->fc_sparam, sizeof(struct serv_parm)); 9657 sp = (struct serv_parm *) pcmd; 9658 /* Setup CSPs accordingly for Fabric */ 9659 sp->cmn.e_d_tov = 0; 9660 sp->cmn.w2.r_a_tov = 0; 9661 sp->cmn.virtual_fabric_support = 0; 9662 sp->cls1.classValid = 0; 9663 sp->cls2.seqDelivery = 1; 9664 sp->cls3.seqDelivery = 1; 9665 9666 pcmd += sizeof(uint32_t); /* CSP Word 2 */ 9667 pcmd += sizeof(uint32_t); /* CSP Word 3 */ 9668 pcmd += sizeof(uint32_t); /* CSP Word 4 */ 9669 pcmd += sizeof(uint32_t); /* Port Name */ 9670 memcpy(pcmd, &vport->fc_portname, 8); 9671 pcmd += sizeof(uint32_t); /* Node Name */ 9672 pcmd += sizeof(uint32_t); /* Node Name */ 9673 memcpy(pcmd, &vport->fc_nodename, 8); 9674 sp->cmn.valid_vendor_ver_level = 0; 9675 memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion)); 9676 lpfc_set_disctmo(vport); 9677 9678 phba->fc_stat.elsXmitFDISC++; 9679 elsiocb->iocb_cmpl = lpfc_cmpl_els_fdisc; 9680 9681 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 9682 "Issue FDISC: did:x%x", 9683 did, 0, 0); 9684 9685 elsiocb->context1 = lpfc_nlp_get(ndlp); 9686 if (!elsiocb->context1) 9687 goto err_out; 9688 9689 rc = lpfc_issue_fabric_iocb(phba, elsiocb); 9690 if (rc == IOCB_ERROR) { 9691 lpfc_nlp_put(ndlp); 9692 goto err_out; 9693 } 9694 9695 lpfc_vport_set_state(vport, FC_VPORT_INITIALIZING); 9696 return 0; 9697 9698 err_out: 9699 lpfc_els_free_iocb(phba, elsiocb); 9700 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 9701 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9702 "0256 Issue FDISC: Cannot send IOCB\n"); 9703 return 1; 9704 } 9705 9706 /** 9707 * lpfc_cmpl_els_npiv_logo - Completion function with vport logo 9708 * @phba: pointer to lpfc hba data structure. 9709 * @cmdiocb: pointer to lpfc command iocb data structure. 9710 * @rspiocb: pointer to lpfc response iocb data structure. 9711 * 9712 * This routine is the completion callback function to the issuing of a LOGO 9713 * ELS command off a vport. It frees the command IOCB and then decrement the 9714 * reference count held on ndlp for this completion function, indicating that 9715 * the reference to the ndlp is no long needed. Note that the 9716 * lpfc_els_free_iocb() routine decrements the ndlp reference held for this 9717 * callback function and an additional explicit ndlp reference decrementation 9718 * will trigger the actual release of the ndlp. 9719 **/ 9720 static void 9721 lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 9722 struct lpfc_iocbq *rspiocb) 9723 { 9724 struct lpfc_vport *vport = cmdiocb->vport; 9725 IOCB_t *irsp; 9726 struct lpfc_nodelist *ndlp; 9727 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 9728 9729 ndlp = (struct lpfc_nodelist *)cmdiocb->context1; 9730 irsp = &rspiocb->iocb; 9731 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 9732 "LOGO npiv cmpl: status:x%x/x%x did:x%x", 9733 irsp->ulpStatus, irsp->un.ulpWord[4], irsp->un.rcvels.remoteID); 9734 9735 /* NPIV LOGO completes to NPort <nlp_DID> */ 9736 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 9737 "2928 NPIV LOGO completes to NPort x%x " 9738 "Data: x%x x%x x%x x%x x%x x%x x%x\n", 9739 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4], 9740 irsp->ulpTimeout, vport->num_disc_nodes, 9741 kref_read(&ndlp->kref), ndlp->nlp_flag, 9742 ndlp->fc4_xpt_flags); 9743 9744 if (irsp->ulpStatus == IOSTAT_SUCCESS) { 9745 spin_lock_irq(shost->host_lock); 9746 vport->fc_flag &= ~FC_NDISC_ACTIVE; 9747 vport->fc_flag &= ~FC_FABRIC; 9748 spin_unlock_irq(shost->host_lock); 9749 lpfc_can_disctmo(vport); 9750 } 9751 9752 /* Safe to release resources now. */ 9753 lpfc_els_free_iocb(phba, cmdiocb); 9754 lpfc_nlp_put(ndlp); 9755 vport->unreg_vpi_cmpl = VPORT_ERROR; 9756 } 9757 9758 /** 9759 * lpfc_issue_els_npiv_logo - Issue a logo off a vport 9760 * @vport: pointer to a virtual N_Port data structure. 9761 * @ndlp: pointer to a node-list data structure. 9762 * 9763 * This routine issues a LOGO ELS command to an @ndlp off a @vport. 9764 * 9765 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 9766 * will be incremented by 1 for holding the ndlp and the reference to ndlp 9767 * will be stored into the context1 field of the IOCB for the completion 9768 * callback function to the LOGO ELS command. 9769 * 9770 * Return codes 9771 * 0 - Successfully issued logo off the @vport 9772 * 1 - Failed to issue logo off the @vport 9773 **/ 9774 int 9775 lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 9776 { 9777 int rc = 0; 9778 struct lpfc_hba *phba = vport->phba; 9779 struct lpfc_iocbq *elsiocb; 9780 uint8_t *pcmd; 9781 uint16_t cmdsize; 9782 9783 cmdsize = 2 * sizeof(uint32_t) + sizeof(struct lpfc_name); 9784 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, ndlp->nlp_DID, 9785 ELS_CMD_LOGO); 9786 if (!elsiocb) 9787 return 1; 9788 9789 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 9790 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO; 9791 pcmd += sizeof(uint32_t); 9792 9793 /* Fill in LOGO payload */ 9794 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID); 9795 pcmd += sizeof(uint32_t); 9796 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name)); 9797 9798 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 9799 "Issue LOGO npiv did:x%x flg:x%x", 9800 ndlp->nlp_DID, ndlp->nlp_flag, 0); 9801 9802 elsiocb->iocb_cmpl = lpfc_cmpl_els_npiv_logo; 9803 spin_lock_irq(&ndlp->lock); 9804 ndlp->nlp_flag |= NLP_LOGO_SND; 9805 spin_unlock_irq(&ndlp->lock); 9806 elsiocb->context1 = lpfc_nlp_get(ndlp); 9807 if (!elsiocb->context1) 9808 goto node_err; 9809 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 9810 if (rc == IOCB_ERROR) 9811 goto io_err; 9812 return 0; 9813 9814 io_err: 9815 lpfc_nlp_put(ndlp); 9816 node_err: 9817 spin_lock_irq(&ndlp->lock); 9818 ndlp->nlp_flag &= ~NLP_LOGO_SND; 9819 spin_unlock_irq(&ndlp->lock); 9820 lpfc_els_free_iocb(phba, elsiocb); 9821 return 1; 9822 } 9823 9824 /** 9825 * lpfc_fabric_block_timeout - Handler function to the fabric block timer 9826 * @t: timer context used to obtain the lpfc hba. 9827 * 9828 * This routine is invoked by the fabric iocb block timer after 9829 * timeout. It posts the fabric iocb block timeout event by setting the 9830 * WORKER_FABRIC_BLOCK_TMO bit to work port event bitmap and then invokes 9831 * lpfc_worker_wake_up() routine to wake up the worker thread. It is for 9832 * the worker thread to invoke the lpfc_unblock_fabric_iocbs() on the 9833 * posted event WORKER_FABRIC_BLOCK_TMO. 9834 **/ 9835 void 9836 lpfc_fabric_block_timeout(struct timer_list *t) 9837 { 9838 struct lpfc_hba *phba = from_timer(phba, t, fabric_block_timer); 9839 unsigned long iflags; 9840 uint32_t tmo_posted; 9841 9842 spin_lock_irqsave(&phba->pport->work_port_lock, iflags); 9843 tmo_posted = phba->pport->work_port_events & WORKER_FABRIC_BLOCK_TMO; 9844 if (!tmo_posted) 9845 phba->pport->work_port_events |= WORKER_FABRIC_BLOCK_TMO; 9846 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags); 9847 9848 if (!tmo_posted) 9849 lpfc_worker_wake_up(phba); 9850 return; 9851 } 9852 9853 /** 9854 * lpfc_resume_fabric_iocbs - Issue a fabric iocb from driver internal list 9855 * @phba: pointer to lpfc hba data structure. 9856 * 9857 * This routine issues one fabric iocb from the driver internal list to 9858 * the HBA. It first checks whether it's ready to issue one fabric iocb to 9859 * the HBA (whether there is no outstanding fabric iocb). If so, it shall 9860 * remove one pending fabric iocb from the driver internal list and invokes 9861 * lpfc_sli_issue_iocb() routine to send the fabric iocb to the HBA. 9862 **/ 9863 static void 9864 lpfc_resume_fabric_iocbs(struct lpfc_hba *phba) 9865 { 9866 struct lpfc_iocbq *iocb; 9867 unsigned long iflags; 9868 int ret; 9869 IOCB_t *cmd; 9870 9871 repeat: 9872 iocb = NULL; 9873 spin_lock_irqsave(&phba->hbalock, iflags); 9874 /* Post any pending iocb to the SLI layer */ 9875 if (atomic_read(&phba->fabric_iocb_count) == 0) { 9876 list_remove_head(&phba->fabric_iocb_list, iocb, typeof(*iocb), 9877 list); 9878 if (iocb) 9879 /* Increment fabric iocb count to hold the position */ 9880 atomic_inc(&phba->fabric_iocb_count); 9881 } 9882 spin_unlock_irqrestore(&phba->hbalock, iflags); 9883 if (iocb) { 9884 iocb->fabric_iocb_cmpl = iocb->iocb_cmpl; 9885 iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb; 9886 iocb->iocb_flag |= LPFC_IO_FABRIC; 9887 9888 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD, 9889 "Fabric sched1: ste:x%x", 9890 iocb->vport->port_state, 0, 0); 9891 9892 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0); 9893 9894 if (ret == IOCB_ERROR) { 9895 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl; 9896 iocb->fabric_iocb_cmpl = NULL; 9897 iocb->iocb_flag &= ~LPFC_IO_FABRIC; 9898 cmd = &iocb->iocb; 9899 cmd->ulpStatus = IOSTAT_LOCAL_REJECT; 9900 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED; 9901 iocb->iocb_cmpl(phba, iocb, iocb); 9902 9903 atomic_dec(&phba->fabric_iocb_count); 9904 goto repeat; 9905 } 9906 } 9907 } 9908 9909 /** 9910 * lpfc_unblock_fabric_iocbs - Unblock issuing fabric iocb command 9911 * @phba: pointer to lpfc hba data structure. 9912 * 9913 * This routine unblocks the issuing fabric iocb command. The function 9914 * will clear the fabric iocb block bit and then invoke the routine 9915 * lpfc_resume_fabric_iocbs() to issue one of the pending fabric iocb 9916 * from the driver internal fabric iocb list. 9917 **/ 9918 void 9919 lpfc_unblock_fabric_iocbs(struct lpfc_hba *phba) 9920 { 9921 clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 9922 9923 lpfc_resume_fabric_iocbs(phba); 9924 return; 9925 } 9926 9927 /** 9928 * lpfc_block_fabric_iocbs - Block issuing fabric iocb command 9929 * @phba: pointer to lpfc hba data structure. 9930 * 9931 * This routine blocks the issuing fabric iocb for a specified amount of 9932 * time (currently 100 ms). This is done by set the fabric iocb block bit 9933 * and set up a timeout timer for 100ms. When the block bit is set, no more 9934 * fabric iocb will be issued out of the HBA. 9935 **/ 9936 static void 9937 lpfc_block_fabric_iocbs(struct lpfc_hba *phba) 9938 { 9939 int blocked; 9940 9941 blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 9942 /* Start a timer to unblock fabric iocbs after 100ms */ 9943 if (!blocked) 9944 mod_timer(&phba->fabric_block_timer, 9945 jiffies + msecs_to_jiffies(100)); 9946 9947 return; 9948 } 9949 9950 /** 9951 * lpfc_cmpl_fabric_iocb - Completion callback function for fabric iocb 9952 * @phba: pointer to lpfc hba data structure. 9953 * @cmdiocb: pointer to lpfc command iocb data structure. 9954 * @rspiocb: pointer to lpfc response iocb data structure. 9955 * 9956 * This routine is the callback function that is put to the fabric iocb's 9957 * callback function pointer (iocb->iocb_cmpl). The original iocb's callback 9958 * function pointer has been stored in iocb->fabric_iocb_cmpl. This callback 9959 * function first restores and invokes the original iocb's callback function 9960 * and then invokes the lpfc_resume_fabric_iocbs() routine to issue the next 9961 * fabric bound iocb from the driver internal fabric iocb list onto the wire. 9962 **/ 9963 static void 9964 lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 9965 struct lpfc_iocbq *rspiocb) 9966 { 9967 struct ls_rjt stat; 9968 9969 BUG_ON((cmdiocb->iocb_flag & LPFC_IO_FABRIC) != LPFC_IO_FABRIC); 9970 9971 switch (rspiocb->iocb.ulpStatus) { 9972 case IOSTAT_NPORT_RJT: 9973 case IOSTAT_FABRIC_RJT: 9974 if (rspiocb->iocb.un.ulpWord[4] & RJT_UNAVAIL_TEMP) { 9975 lpfc_block_fabric_iocbs(phba); 9976 } 9977 break; 9978 9979 case IOSTAT_NPORT_BSY: 9980 case IOSTAT_FABRIC_BSY: 9981 lpfc_block_fabric_iocbs(phba); 9982 break; 9983 9984 case IOSTAT_LS_RJT: 9985 stat.un.lsRjtError = 9986 be32_to_cpu(rspiocb->iocb.un.ulpWord[4]); 9987 if ((stat.un.b.lsRjtRsnCode == LSRJT_UNABLE_TPC) || 9988 (stat.un.b.lsRjtRsnCode == LSRJT_LOGICAL_BSY)) 9989 lpfc_block_fabric_iocbs(phba); 9990 break; 9991 } 9992 9993 BUG_ON(atomic_read(&phba->fabric_iocb_count) == 0); 9994 9995 cmdiocb->iocb_cmpl = cmdiocb->fabric_iocb_cmpl; 9996 cmdiocb->fabric_iocb_cmpl = NULL; 9997 cmdiocb->iocb_flag &= ~LPFC_IO_FABRIC; 9998 cmdiocb->iocb_cmpl(phba, cmdiocb, rspiocb); 9999 10000 atomic_dec(&phba->fabric_iocb_count); 10001 if (!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags)) { 10002 /* Post any pending iocbs to HBA */ 10003 lpfc_resume_fabric_iocbs(phba); 10004 } 10005 } 10006 10007 /** 10008 * lpfc_issue_fabric_iocb - Issue a fabric iocb command 10009 * @phba: pointer to lpfc hba data structure. 10010 * @iocb: pointer to lpfc command iocb data structure. 10011 * 10012 * This routine is used as the top-level API for issuing a fabric iocb command 10013 * such as FLOGI and FDISC. To accommodate certain switch fabric, this driver 10014 * function makes sure that only one fabric bound iocb will be outstanding at 10015 * any given time. As such, this function will first check to see whether there 10016 * is already an outstanding fabric iocb on the wire. If so, it will put the 10017 * newly issued iocb onto the driver internal fabric iocb list, waiting to be 10018 * issued later. Otherwise, it will issue the iocb on the wire and update the 10019 * fabric iocb count it indicate that there is one fabric iocb on the wire. 10020 * 10021 * Note, this implementation has a potential sending out fabric IOCBs out of 10022 * order. The problem is caused by the construction of the "ready" boolen does 10023 * not include the condition that the internal fabric IOCB list is empty. As 10024 * such, it is possible a fabric IOCB issued by this routine might be "jump" 10025 * ahead of the fabric IOCBs in the internal list. 10026 * 10027 * Return code 10028 * IOCB_SUCCESS - either fabric iocb put on the list or issued successfully 10029 * IOCB_ERROR - failed to issue fabric iocb 10030 **/ 10031 static int 10032 lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb) 10033 { 10034 unsigned long iflags; 10035 int ready; 10036 int ret; 10037 10038 BUG_ON(atomic_read(&phba->fabric_iocb_count) > 1); 10039 10040 spin_lock_irqsave(&phba->hbalock, iflags); 10041 ready = atomic_read(&phba->fabric_iocb_count) == 0 && 10042 !test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 10043 10044 if (ready) 10045 /* Increment fabric iocb count to hold the position */ 10046 atomic_inc(&phba->fabric_iocb_count); 10047 spin_unlock_irqrestore(&phba->hbalock, iflags); 10048 if (ready) { 10049 iocb->fabric_iocb_cmpl = iocb->iocb_cmpl; 10050 iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb; 10051 iocb->iocb_flag |= LPFC_IO_FABRIC; 10052 10053 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD, 10054 "Fabric sched2: ste:x%x", 10055 iocb->vport->port_state, 0, 0); 10056 10057 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0); 10058 10059 if (ret == IOCB_ERROR) { 10060 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl; 10061 iocb->fabric_iocb_cmpl = NULL; 10062 iocb->iocb_flag &= ~LPFC_IO_FABRIC; 10063 atomic_dec(&phba->fabric_iocb_count); 10064 } 10065 } else { 10066 spin_lock_irqsave(&phba->hbalock, iflags); 10067 list_add_tail(&iocb->list, &phba->fabric_iocb_list); 10068 spin_unlock_irqrestore(&phba->hbalock, iflags); 10069 ret = IOCB_SUCCESS; 10070 } 10071 return ret; 10072 } 10073 10074 /** 10075 * lpfc_fabric_abort_vport - Abort a vport's iocbs from driver fabric iocb list 10076 * @vport: pointer to a virtual N_Port data structure. 10077 * 10078 * This routine aborts all the IOCBs associated with a @vport from the 10079 * driver internal fabric IOCB list. The list contains fabric IOCBs to be 10080 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB 10081 * list, removes each IOCB associated with the @vport off the list, set the 10082 * status feild to IOSTAT_LOCAL_REJECT, and invokes the callback function 10083 * associated with the IOCB. 10084 **/ 10085 static void lpfc_fabric_abort_vport(struct lpfc_vport *vport) 10086 { 10087 LIST_HEAD(completions); 10088 struct lpfc_hba *phba = vport->phba; 10089 struct lpfc_iocbq *tmp_iocb, *piocb; 10090 10091 spin_lock_irq(&phba->hbalock); 10092 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list, 10093 list) { 10094 10095 if (piocb->vport != vport) 10096 continue; 10097 10098 list_move_tail(&piocb->list, &completions); 10099 } 10100 spin_unlock_irq(&phba->hbalock); 10101 10102 /* Cancel all the IOCBs from the completions list */ 10103 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 10104 IOERR_SLI_ABORTED); 10105 } 10106 10107 /** 10108 * lpfc_fabric_abort_nport - Abort a ndlp's iocbs from driver fabric iocb list 10109 * @ndlp: pointer to a node-list data structure. 10110 * 10111 * This routine aborts all the IOCBs associated with an @ndlp from the 10112 * driver internal fabric IOCB list. The list contains fabric IOCBs to be 10113 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB 10114 * list, removes each IOCB associated with the @ndlp off the list, set the 10115 * status feild to IOSTAT_LOCAL_REJECT, and invokes the callback function 10116 * associated with the IOCB. 10117 **/ 10118 void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp) 10119 { 10120 LIST_HEAD(completions); 10121 struct lpfc_hba *phba = ndlp->phba; 10122 struct lpfc_iocbq *tmp_iocb, *piocb; 10123 struct lpfc_sli_ring *pring; 10124 10125 pring = lpfc_phba_elsring(phba); 10126 10127 if (unlikely(!pring)) 10128 return; 10129 10130 spin_lock_irq(&phba->hbalock); 10131 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list, 10132 list) { 10133 if ((lpfc_check_sli_ndlp(phba, pring, piocb, ndlp))) { 10134 10135 list_move_tail(&piocb->list, &completions); 10136 } 10137 } 10138 spin_unlock_irq(&phba->hbalock); 10139 10140 /* Cancel all the IOCBs from the completions list */ 10141 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 10142 IOERR_SLI_ABORTED); 10143 } 10144 10145 /** 10146 * lpfc_fabric_abort_hba - Abort all iocbs on driver fabric iocb list 10147 * @phba: pointer to lpfc hba data structure. 10148 * 10149 * This routine aborts all the IOCBs currently on the driver internal 10150 * fabric IOCB list. The list contains fabric IOCBs to be issued to the ELS 10151 * IOCB ring. This function takes the entire IOCB list off the fabric IOCB 10152 * list, removes IOCBs off the list, set the status feild to 10153 * IOSTAT_LOCAL_REJECT, and invokes the callback function associated with 10154 * the IOCB. 10155 **/ 10156 void lpfc_fabric_abort_hba(struct lpfc_hba *phba) 10157 { 10158 LIST_HEAD(completions); 10159 10160 spin_lock_irq(&phba->hbalock); 10161 list_splice_init(&phba->fabric_iocb_list, &completions); 10162 spin_unlock_irq(&phba->hbalock); 10163 10164 /* Cancel all the IOCBs from the completions list */ 10165 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 10166 IOERR_SLI_ABORTED); 10167 } 10168 10169 /** 10170 * lpfc_sli4_vport_delete_els_xri_aborted -Remove all ndlp references for vport 10171 * @vport: pointer to lpfc vport data structure. 10172 * 10173 * This routine is invoked by the vport cleanup for deletions and the cleanup 10174 * for an ndlp on removal. 10175 **/ 10176 void 10177 lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *vport) 10178 { 10179 struct lpfc_hba *phba = vport->phba; 10180 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 10181 unsigned long iflag = 0; 10182 10183 spin_lock_irqsave(&phba->hbalock, iflag); 10184 spin_lock(&phba->sli4_hba.sgl_list_lock); 10185 list_for_each_entry_safe(sglq_entry, sglq_next, 10186 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) { 10187 if (sglq_entry->ndlp && sglq_entry->ndlp->vport == vport) { 10188 lpfc_nlp_put(sglq_entry->ndlp); 10189 sglq_entry->ndlp = NULL; 10190 } 10191 } 10192 spin_unlock(&phba->sli4_hba.sgl_list_lock); 10193 spin_unlock_irqrestore(&phba->hbalock, iflag); 10194 return; 10195 } 10196 10197 /** 10198 * lpfc_sli4_els_xri_aborted - Slow-path process of els xri abort 10199 * @phba: pointer to lpfc hba data structure. 10200 * @axri: pointer to the els xri abort wcqe structure. 10201 * 10202 * This routine is invoked by the worker thread to process a SLI4 slow-path 10203 * ELS aborted xri. 10204 **/ 10205 void 10206 lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba, 10207 struct sli4_wcqe_xri_aborted *axri) 10208 { 10209 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); 10210 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri); 10211 uint16_t lxri = 0; 10212 10213 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 10214 unsigned long iflag = 0; 10215 struct lpfc_nodelist *ndlp; 10216 struct lpfc_sli_ring *pring; 10217 10218 pring = lpfc_phba_elsring(phba); 10219 10220 spin_lock_irqsave(&phba->hbalock, iflag); 10221 spin_lock(&phba->sli4_hba.sgl_list_lock); 10222 list_for_each_entry_safe(sglq_entry, sglq_next, 10223 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) { 10224 if (sglq_entry->sli4_xritag == xri) { 10225 list_del(&sglq_entry->list); 10226 ndlp = sglq_entry->ndlp; 10227 sglq_entry->ndlp = NULL; 10228 list_add_tail(&sglq_entry->list, 10229 &phba->sli4_hba.lpfc_els_sgl_list); 10230 sglq_entry->state = SGL_FREED; 10231 spin_unlock(&phba->sli4_hba.sgl_list_lock); 10232 spin_unlock_irqrestore(&phba->hbalock, iflag); 10233 10234 if (ndlp) { 10235 lpfc_set_rrq_active(phba, ndlp, 10236 sglq_entry->sli4_lxritag, 10237 rxid, 1); 10238 lpfc_nlp_put(ndlp); 10239 } 10240 10241 /* Check if TXQ queue needs to be serviced */ 10242 if (pring && !list_empty(&pring->txq)) 10243 lpfc_worker_wake_up(phba); 10244 return; 10245 } 10246 } 10247 spin_unlock(&phba->sli4_hba.sgl_list_lock); 10248 lxri = lpfc_sli4_xri_inrange(phba, xri); 10249 if (lxri == NO_XRI) { 10250 spin_unlock_irqrestore(&phba->hbalock, iflag); 10251 return; 10252 } 10253 spin_lock(&phba->sli4_hba.sgl_list_lock); 10254 sglq_entry = __lpfc_get_active_sglq(phba, lxri); 10255 if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) { 10256 spin_unlock(&phba->sli4_hba.sgl_list_lock); 10257 spin_unlock_irqrestore(&phba->hbalock, iflag); 10258 return; 10259 } 10260 sglq_entry->state = SGL_XRI_ABORTED; 10261 spin_unlock(&phba->sli4_hba.sgl_list_lock); 10262 spin_unlock_irqrestore(&phba->hbalock, iflag); 10263 return; 10264 } 10265 10266 /* lpfc_sli_abts_recover_port - Recover a port that failed a BLS_ABORT req. 10267 * @vport: pointer to virtual port object. 10268 * @ndlp: nodelist pointer for the impacted node. 10269 * 10270 * The driver calls this routine in response to an SLI4 XRI ABORT CQE 10271 * or an SLI3 ASYNC_STATUS_CN event from the port. For either event, 10272 * the driver is required to send a LOGO to the remote node before it 10273 * attempts to recover its login to the remote node. 10274 */ 10275 void 10276 lpfc_sli_abts_recover_port(struct lpfc_vport *vport, 10277 struct lpfc_nodelist *ndlp) 10278 { 10279 struct Scsi_Host *shost; 10280 struct lpfc_hba *phba; 10281 unsigned long flags = 0; 10282 10283 shost = lpfc_shost_from_vport(vport); 10284 phba = vport->phba; 10285 if (ndlp->nlp_state != NLP_STE_MAPPED_NODE) { 10286 lpfc_printf_log(phba, KERN_INFO, 10287 LOG_SLI, "3093 No rport recovery needed. " 10288 "rport in state 0x%x\n", ndlp->nlp_state); 10289 return; 10290 } 10291 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10292 "3094 Start rport recovery on shost id 0x%x " 10293 "fc_id 0x%06x vpi 0x%x rpi 0x%x state 0x%x " 10294 "flags 0x%x\n", 10295 shost->host_no, ndlp->nlp_DID, 10296 vport->vpi, ndlp->nlp_rpi, ndlp->nlp_state, 10297 ndlp->nlp_flag); 10298 /* 10299 * The rport is not responding. Remove the FCP-2 flag to prevent 10300 * an ADISC in the follow-up recovery code. 10301 */ 10302 spin_lock_irqsave(&ndlp->lock, flags); 10303 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; 10304 ndlp->nlp_flag |= NLP_ISSUE_LOGO; 10305 spin_unlock_irqrestore(&ndlp->lock, flags); 10306 lpfc_unreg_rpi(vport, ndlp); 10307 } 10308 10309