1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term * 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 * EMULEX and SLI are trademarks of Emulex. * 8 * www.broadcom.com * 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 10 * * 11 * This program is free software; you can redistribute it and/or * 12 * modify it under the terms of version 2 of the GNU General * 13 * Public License as published by the Free Software Foundation. * 14 * This program is distributed in the hope that it will be useful. * 15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 19 * TO BE LEGALLY INVALID. See the GNU General Public License for * 20 * more details, a copy of which can be found in the file COPYING * 21 * included with this package. * 22 *******************************************************************/ 23 /* See Fibre Channel protocol T11 FC-LS for details */ 24 #include <linux/blkdev.h> 25 #include <linux/pci.h> 26 #include <linux/slab.h> 27 #include <linux/interrupt.h> 28 29 #include <scsi/scsi.h> 30 #include <scsi/scsi_device.h> 31 #include <scsi/scsi_host.h> 32 #include <scsi/scsi_transport_fc.h> 33 #include <uapi/scsi/fc/fc_fs.h> 34 #include <uapi/scsi/fc/fc_els.h> 35 36 #include "lpfc_hw4.h" 37 #include "lpfc_hw.h" 38 #include "lpfc_sli.h" 39 #include "lpfc_sli4.h" 40 #include "lpfc_nl.h" 41 #include "lpfc_disc.h" 42 #include "lpfc_scsi.h" 43 #include "lpfc.h" 44 #include "lpfc_logmsg.h" 45 #include "lpfc_crtn.h" 46 #include "lpfc_vport.h" 47 #include "lpfc_debugfs.h" 48 49 static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *, 50 struct lpfc_iocbq *); 51 static void lpfc_cmpl_fabric_iocb(struct lpfc_hba *, struct lpfc_iocbq *, 52 struct lpfc_iocbq *); 53 static void lpfc_fabric_abort_vport(struct lpfc_vport *vport); 54 static int lpfc_issue_els_fdisc(struct lpfc_vport *vport, 55 struct lpfc_nodelist *ndlp, uint8_t retry); 56 static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba, 57 struct lpfc_iocbq *iocb); 58 59 static int lpfc_max_els_tries = 3; 60 61 /** 62 * lpfc_els_chk_latt - Check host link attention event for a vport 63 * @vport: pointer to a host virtual N_Port data structure. 64 * 65 * This routine checks whether there is an outstanding host link 66 * attention event during the discovery process with the @vport. It is done 67 * by reading the HBA's Host Attention (HA) register. If there is any host 68 * link attention events during this @vport's discovery process, the @vport 69 * shall be marked as FC_ABORT_DISCOVERY, a host link attention clear shall 70 * be issued if the link state is not already in host link cleared state, 71 * and a return code shall indicate whether the host link attention event 72 * had happened. 73 * 74 * Note that, if either the host link is in state LPFC_LINK_DOWN or @vport 75 * state in LPFC_VPORT_READY, the request for checking host link attention 76 * event will be ignored and a return code shall indicate no host link 77 * attention event had happened. 78 * 79 * Return codes 80 * 0 - no host link attention event happened 81 * 1 - host link attention event happened 82 **/ 83 int 84 lpfc_els_chk_latt(struct lpfc_vport *vport) 85 { 86 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 87 struct lpfc_hba *phba = vport->phba; 88 uint32_t ha_copy; 89 90 if (vport->port_state >= LPFC_VPORT_READY || 91 phba->link_state == LPFC_LINK_DOWN || 92 phba->sli_rev > LPFC_SLI_REV3) 93 return 0; 94 95 /* Read the HBA Host Attention Register */ 96 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 97 return 1; 98 99 if (!(ha_copy & HA_LATT)) 100 return 0; 101 102 /* Pending Link Event during Discovery */ 103 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 104 "0237 Pending Link Event during " 105 "Discovery: State x%x\n", 106 phba->pport->port_state); 107 108 /* CLEAR_LA should re-enable link attention events and 109 * we should then immediately take a LATT event. The 110 * LATT processing should call lpfc_linkdown() which 111 * will cleanup any left over in-progress discovery 112 * events. 113 */ 114 spin_lock_irq(shost->host_lock); 115 vport->fc_flag |= FC_ABORT_DISCOVERY; 116 spin_unlock_irq(shost->host_lock); 117 118 if (phba->link_state != LPFC_CLEAR_LA) 119 lpfc_issue_clear_la(phba, vport); 120 121 return 1; 122 } 123 124 /** 125 * lpfc_prep_els_iocb - Allocate and prepare a lpfc iocb data structure 126 * @vport: pointer to a host virtual N_Port data structure. 127 * @expectRsp: flag indicating whether response is expected. 128 * @cmdSize: size of the ELS command. 129 * @retry: number of retries to the command IOCB when it fails. 130 * @ndlp: pointer to a node-list data structure. 131 * @did: destination identifier. 132 * @elscmd: the ELS command code. 133 * 134 * This routine is used for allocating a lpfc-IOCB data structure from 135 * the driver lpfc-IOCB free-list and prepare the IOCB with the parameters 136 * passed into the routine for discovery state machine to issue an Extended 137 * Link Service (ELS) commands. It is a generic lpfc-IOCB allocation 138 * and preparation routine that is used by all the discovery state machine 139 * routines and the ELS command-specific fields will be later set up by 140 * the individual discovery machine routines after calling this routine 141 * allocating and preparing a generic IOCB data structure. It fills in the 142 * Buffer Descriptor Entries (BDEs), allocates buffers for both command 143 * payload and response payload (if expected). The reference count on the 144 * ndlp is incremented by 1 and the reference to the ndlp is put into 145 * context1 of the IOCB data structure for this IOCB to hold the ndlp 146 * reference for the command's callback function to access later. 147 * 148 * Return code 149 * Pointer to the newly allocated/prepared els iocb data structure 150 * NULL - when els iocb data structure allocation/preparation failed 151 **/ 152 struct lpfc_iocbq * 153 lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp, 154 uint16_t cmdSize, uint8_t retry, 155 struct lpfc_nodelist *ndlp, uint32_t did, 156 uint32_t elscmd) 157 { 158 struct lpfc_hba *phba = vport->phba; 159 struct lpfc_iocbq *elsiocb; 160 struct lpfc_dmabuf *pcmd, *prsp, *pbuflist; 161 struct ulp_bde64 *bpl; 162 IOCB_t *icmd; 163 164 165 if (!lpfc_is_link_up(phba)) 166 return NULL; 167 168 /* Allocate buffer for command iocb */ 169 elsiocb = lpfc_sli_get_iocbq(phba); 170 171 if (elsiocb == NULL) 172 return NULL; 173 174 /* 175 * If this command is for fabric controller and HBA running 176 * in FIP mode send FLOGI, FDISC and LOGO as FIP frames. 177 */ 178 if ((did == Fabric_DID) && 179 (phba->hba_flag & HBA_FIP_SUPPORT) && 180 ((elscmd == ELS_CMD_FLOGI) || 181 (elscmd == ELS_CMD_FDISC) || 182 (elscmd == ELS_CMD_LOGO))) 183 switch (elscmd) { 184 case ELS_CMD_FLOGI: 185 elsiocb->iocb_flag |= 186 ((LPFC_ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT) 187 & LPFC_FIP_ELS_ID_MASK); 188 break; 189 case ELS_CMD_FDISC: 190 elsiocb->iocb_flag |= 191 ((LPFC_ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT) 192 & LPFC_FIP_ELS_ID_MASK); 193 break; 194 case ELS_CMD_LOGO: 195 elsiocb->iocb_flag |= 196 ((LPFC_ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT) 197 & LPFC_FIP_ELS_ID_MASK); 198 break; 199 } 200 else 201 elsiocb->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK; 202 203 icmd = &elsiocb->iocb; 204 205 /* fill in BDEs for command */ 206 /* Allocate buffer for command payload */ 207 pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 208 if (pcmd) 209 pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys); 210 if (!pcmd || !pcmd->virt) 211 goto els_iocb_free_pcmb_exit; 212 213 INIT_LIST_HEAD(&pcmd->list); 214 215 /* Allocate buffer for response payload */ 216 if (expectRsp) { 217 prsp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 218 if (prsp) 219 prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 220 &prsp->phys); 221 if (!prsp || !prsp->virt) 222 goto els_iocb_free_prsp_exit; 223 INIT_LIST_HEAD(&prsp->list); 224 } else 225 prsp = NULL; 226 227 /* Allocate buffer for Buffer ptr list */ 228 pbuflist = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 229 if (pbuflist) 230 pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 231 &pbuflist->phys); 232 if (!pbuflist || !pbuflist->virt) 233 goto els_iocb_free_pbuf_exit; 234 235 INIT_LIST_HEAD(&pbuflist->list); 236 237 if (expectRsp) { 238 icmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys); 239 icmd->un.elsreq64.bdl.addrLow = putPaddrLow(pbuflist->phys); 240 icmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; 241 icmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64)); 242 243 icmd->un.elsreq64.remoteID = did; /* DID */ 244 icmd->ulpCommand = CMD_ELS_REQUEST64_CR; 245 if (elscmd == ELS_CMD_FLOGI) 246 icmd->ulpTimeout = FF_DEF_RATOV * 2; 247 else if (elscmd == ELS_CMD_LOGO) 248 icmd->ulpTimeout = phba->fc_ratov; 249 else 250 icmd->ulpTimeout = phba->fc_ratov * 2; 251 } else { 252 icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys); 253 icmd->un.xseq64.bdl.addrLow = putPaddrLow(pbuflist->phys); 254 icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; 255 icmd->un.xseq64.bdl.bdeSize = sizeof(struct ulp_bde64); 256 icmd->un.xseq64.xmit_els_remoteID = did; /* DID */ 257 icmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX; 258 } 259 icmd->ulpBdeCount = 1; 260 icmd->ulpLe = 1; 261 icmd->ulpClass = CLASS3; 262 263 /* 264 * If we have NPIV enabled, we want to send ELS traffic by VPI. 265 * For SLI4, since the driver controls VPIs we also want to include 266 * all ELS pt2pt protocol traffic as well. 267 */ 268 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) || 269 ((phba->sli_rev == LPFC_SLI_REV4) && 270 (vport->fc_flag & FC_PT2PT))) { 271 272 if (expectRsp) { 273 icmd->un.elsreq64.myID = vport->fc_myDID; 274 275 /* For ELS_REQUEST64_CR, use the VPI by default */ 276 icmd->ulpContext = phba->vpi_ids[vport->vpi]; 277 } 278 279 icmd->ulpCt_h = 0; 280 /* The CT field must be 0=INVALID_RPI for the ECHO cmd */ 281 if (elscmd == ELS_CMD_ECHO) 282 icmd->ulpCt_l = 0; /* context = invalid RPI */ 283 else 284 icmd->ulpCt_l = 1; /* context = VPI */ 285 } 286 287 bpl = (struct ulp_bde64 *) pbuflist->virt; 288 bpl->addrLow = le32_to_cpu(putPaddrLow(pcmd->phys)); 289 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pcmd->phys)); 290 bpl->tus.f.bdeSize = cmdSize; 291 bpl->tus.f.bdeFlags = 0; 292 bpl->tus.w = le32_to_cpu(bpl->tus.w); 293 294 if (expectRsp) { 295 bpl++; 296 bpl->addrLow = le32_to_cpu(putPaddrLow(prsp->phys)); 297 bpl->addrHigh = le32_to_cpu(putPaddrHigh(prsp->phys)); 298 bpl->tus.f.bdeSize = FCELSSIZE; 299 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 300 bpl->tus.w = le32_to_cpu(bpl->tus.w); 301 } 302 303 elsiocb->context2 = pcmd; 304 elsiocb->context3 = pbuflist; 305 elsiocb->retry = retry; 306 elsiocb->vport = vport; 307 elsiocb->drvrTimeout = (phba->fc_ratov << 1) + LPFC_DRVR_TIMEOUT; 308 309 if (prsp) { 310 list_add(&prsp->list, &pcmd->list); 311 } 312 if (expectRsp) { 313 /* Xmit ELS command <elsCmd> to remote NPORT <did> */ 314 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 315 "0116 Xmit ELS command x%x to remote " 316 "NPORT x%x I/O tag: x%x, port state:x%x " 317 "rpi x%x fc_flag:x%x\n", 318 elscmd, did, elsiocb->iotag, 319 vport->port_state, ndlp->nlp_rpi, 320 vport->fc_flag); 321 } else { 322 /* Xmit ELS response <elsCmd> to remote NPORT <did> */ 323 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 324 "0117 Xmit ELS response x%x to remote " 325 "NPORT x%x I/O tag: x%x, size: x%x " 326 "port_state x%x rpi x%x fc_flag x%x\n", 327 elscmd, ndlp->nlp_DID, elsiocb->iotag, 328 cmdSize, vport->port_state, 329 ndlp->nlp_rpi, vport->fc_flag); 330 } 331 return elsiocb; 332 333 els_iocb_free_pbuf_exit: 334 if (expectRsp) 335 lpfc_mbuf_free(phba, prsp->virt, prsp->phys); 336 kfree(pbuflist); 337 338 els_iocb_free_prsp_exit: 339 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys); 340 kfree(prsp); 341 342 els_iocb_free_pcmb_exit: 343 kfree(pcmd); 344 lpfc_sli_release_iocbq(phba, elsiocb); 345 return NULL; 346 } 347 348 /** 349 * lpfc_issue_fabric_reglogin - Issue fabric registration login for a vport 350 * @vport: pointer to a host virtual N_Port data structure. 351 * 352 * This routine issues a fabric registration login for a @vport. An 353 * active ndlp node with Fabric_DID must already exist for this @vport. 354 * The routine invokes two mailbox commands to carry out fabric registration 355 * login through the HBA firmware: the first mailbox command requests the 356 * HBA to perform link configuration for the @vport; and the second mailbox 357 * command requests the HBA to perform the actual fabric registration login 358 * with the @vport. 359 * 360 * Return code 361 * 0 - successfully issued fabric registration login for @vport 362 * -ENXIO -- failed to issue fabric registration login for @vport 363 **/ 364 int 365 lpfc_issue_fabric_reglogin(struct lpfc_vport *vport) 366 { 367 struct lpfc_hba *phba = vport->phba; 368 LPFC_MBOXQ_t *mbox; 369 struct lpfc_dmabuf *mp; 370 struct lpfc_nodelist *ndlp; 371 struct serv_parm *sp; 372 int rc; 373 int err = 0; 374 375 sp = &phba->fc_fabparam; 376 ndlp = lpfc_findnode_did(vport, Fabric_DID); 377 if (!ndlp) { 378 err = 1; 379 goto fail; 380 } 381 382 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 383 if (!mbox) { 384 err = 2; 385 goto fail; 386 } 387 388 vport->port_state = LPFC_FABRIC_CFG_LINK; 389 lpfc_config_link(phba, mbox); 390 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 391 mbox->vport = vport; 392 393 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 394 if (rc == MBX_NOT_FINISHED) { 395 err = 3; 396 goto fail_free_mbox; 397 } 398 399 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 400 if (!mbox) { 401 err = 4; 402 goto fail; 403 } 404 rc = lpfc_reg_rpi(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox, 405 ndlp->nlp_rpi); 406 if (rc) { 407 err = 5; 408 goto fail_free_mbox; 409 } 410 411 mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login; 412 mbox->vport = vport; 413 /* increment the reference count on ndlp to hold reference 414 * for the callback routine. 415 */ 416 mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 417 if (!mbox->ctx_ndlp) { 418 err = 6; 419 goto fail_no_ndlp; 420 } 421 422 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 423 if (rc == MBX_NOT_FINISHED) { 424 err = 7; 425 goto fail_issue_reg_login; 426 } 427 428 return 0; 429 430 fail_issue_reg_login: 431 /* decrement the reference count on ndlp just incremented 432 * for the failed mbox command. 433 */ 434 lpfc_nlp_put(ndlp); 435 fail_no_ndlp: 436 mp = (struct lpfc_dmabuf *)mbox->ctx_buf; 437 lpfc_mbuf_free(phba, mp->virt, mp->phys); 438 kfree(mp); 439 fail_free_mbox: 440 mempool_free(mbox, phba->mbox_mem_pool); 441 442 fail: 443 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 444 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 445 "0249 Cannot issue Register Fabric login: Err %d\n", 446 err); 447 return -ENXIO; 448 } 449 450 /** 451 * lpfc_issue_reg_vfi - Register VFI for this vport's fabric login 452 * @vport: pointer to a host virtual N_Port data structure. 453 * 454 * This routine issues a REG_VFI mailbox for the vfi, vpi, fcfi triplet for 455 * the @vport. This mailbox command is necessary for SLI4 port only. 456 * 457 * Return code 458 * 0 - successfully issued REG_VFI for @vport 459 * A failure code otherwise. 460 **/ 461 int 462 lpfc_issue_reg_vfi(struct lpfc_vport *vport) 463 { 464 struct lpfc_hba *phba = vport->phba; 465 LPFC_MBOXQ_t *mboxq = NULL; 466 struct lpfc_nodelist *ndlp; 467 struct lpfc_dmabuf *dmabuf = NULL; 468 int rc = 0; 469 470 /* move forward in case of SLI4 FC port loopback test and pt2pt mode */ 471 if ((phba->sli_rev == LPFC_SLI_REV4) && 472 !(phba->link_flag & LS_LOOPBACK_MODE) && 473 !(vport->fc_flag & FC_PT2PT)) { 474 ndlp = lpfc_findnode_did(vport, Fabric_DID); 475 if (!ndlp) { 476 rc = -ENODEV; 477 goto fail; 478 } 479 } 480 481 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 482 if (!mboxq) { 483 rc = -ENOMEM; 484 goto fail; 485 } 486 487 /* Supply CSP's only if we are fabric connect or pt-to-pt connect */ 488 if ((vport->fc_flag & FC_FABRIC) || (vport->fc_flag & FC_PT2PT)) { 489 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 490 if (!dmabuf) { 491 rc = -ENOMEM; 492 goto fail; 493 } 494 dmabuf->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &dmabuf->phys); 495 if (!dmabuf->virt) { 496 rc = -ENOMEM; 497 goto fail; 498 } 499 memcpy(dmabuf->virt, &phba->fc_fabparam, 500 sizeof(struct serv_parm)); 501 } 502 503 vport->port_state = LPFC_FABRIC_CFG_LINK; 504 if (dmabuf) 505 lpfc_reg_vfi(mboxq, vport, dmabuf->phys); 506 else 507 lpfc_reg_vfi(mboxq, vport, 0); 508 509 mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi; 510 mboxq->vport = vport; 511 mboxq->ctx_buf = dmabuf; 512 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 513 if (rc == MBX_NOT_FINISHED) { 514 rc = -ENXIO; 515 goto fail; 516 } 517 return 0; 518 519 fail: 520 if (mboxq) 521 mempool_free(mboxq, phba->mbox_mem_pool); 522 if (dmabuf) { 523 if (dmabuf->virt) 524 lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys); 525 kfree(dmabuf); 526 } 527 528 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 529 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 530 "0289 Issue Register VFI failed: Err %d\n", rc); 531 return rc; 532 } 533 534 /** 535 * lpfc_issue_unreg_vfi - Unregister VFI for this vport's fabric login 536 * @vport: pointer to a host virtual N_Port data structure. 537 * 538 * This routine issues a UNREG_VFI mailbox with the vfi, vpi, fcfi triplet for 539 * the @vport. This mailbox command is necessary for SLI4 port only. 540 * 541 * Return code 542 * 0 - successfully issued REG_VFI for @vport 543 * A failure code otherwise. 544 **/ 545 int 546 lpfc_issue_unreg_vfi(struct lpfc_vport *vport) 547 { 548 struct lpfc_hba *phba = vport->phba; 549 struct Scsi_Host *shost; 550 LPFC_MBOXQ_t *mboxq; 551 int rc; 552 553 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 554 if (!mboxq) { 555 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 556 "2556 UNREG_VFI mbox allocation failed" 557 "HBA state x%x\n", phba->pport->port_state); 558 return -ENOMEM; 559 } 560 561 lpfc_unreg_vfi(mboxq, vport); 562 mboxq->vport = vport; 563 mboxq->mbox_cmpl = lpfc_unregister_vfi_cmpl; 564 565 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 566 if (rc == MBX_NOT_FINISHED) { 567 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 568 "2557 UNREG_VFI issue mbox failed rc x%x " 569 "HBA state x%x\n", 570 rc, phba->pport->port_state); 571 mempool_free(mboxq, phba->mbox_mem_pool); 572 return -EIO; 573 } 574 575 shost = lpfc_shost_from_vport(vport); 576 spin_lock_irq(shost->host_lock); 577 vport->fc_flag &= ~FC_VFI_REGISTERED; 578 spin_unlock_irq(shost->host_lock); 579 return 0; 580 } 581 582 /** 583 * lpfc_check_clean_addr_bit - Check whether assigned FCID is clean. 584 * @vport: pointer to a host virtual N_Port data structure. 585 * @sp: pointer to service parameter data structure. 586 * 587 * This routine is called from FLOGI/FDISC completion handler functions. 588 * lpfc_check_clean_addr_bit return 1 when FCID/Fabric portname/ Fabric 589 * node nodename is changed in the completion service parameter else return 590 * 0. This function also set flag in the vport data structure to delay 591 * NP_Port discovery after the FLOGI/FDISC completion if Clean address bit 592 * in FLOGI/FDISC response is cleared and FCID/Fabric portname/ Fabric 593 * node nodename is changed in the completion service parameter. 594 * 595 * Return code 596 * 0 - FCID and Fabric Nodename and Fabric portname is not changed. 597 * 1 - FCID or Fabric Nodename or Fabric portname is changed. 598 * 599 **/ 600 static uint8_t 601 lpfc_check_clean_addr_bit(struct lpfc_vport *vport, 602 struct serv_parm *sp) 603 { 604 struct lpfc_hba *phba = vport->phba; 605 uint8_t fabric_param_changed = 0; 606 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 607 608 if ((vport->fc_prevDID != vport->fc_myDID) || 609 memcmp(&vport->fabric_portname, &sp->portName, 610 sizeof(struct lpfc_name)) || 611 memcmp(&vport->fabric_nodename, &sp->nodeName, 612 sizeof(struct lpfc_name)) || 613 (vport->vport_flag & FAWWPN_PARAM_CHG)) { 614 fabric_param_changed = 1; 615 vport->vport_flag &= ~FAWWPN_PARAM_CHG; 616 } 617 /* 618 * Word 1 Bit 31 in common service parameter is overloaded. 619 * Word 1 Bit 31 in FLOGI request is multiple NPort request 620 * Word 1 Bit 31 in FLOGI response is clean address bit 621 * 622 * If fabric parameter is changed and clean address bit is 623 * cleared delay nport discovery if 624 * - vport->fc_prevDID != 0 (not initial discovery) OR 625 * - lpfc_delay_discovery module parameter is set. 626 */ 627 if (fabric_param_changed && !sp->cmn.clean_address_bit && 628 (vport->fc_prevDID || phba->cfg_delay_discovery)) { 629 spin_lock_irq(shost->host_lock); 630 vport->fc_flag |= FC_DISC_DELAYED; 631 spin_unlock_irq(shost->host_lock); 632 } 633 634 return fabric_param_changed; 635 } 636 637 638 /** 639 * lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port 640 * @vport: pointer to a host virtual N_Port data structure. 641 * @ndlp: pointer to a node-list data structure. 642 * @sp: pointer to service parameter data structure. 643 * @irsp: pointer to the IOCB within the lpfc response IOCB. 644 * 645 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback 646 * function to handle the completion of a Fabric Login (FLOGI) into a fabric 647 * port in a fabric topology. It properly sets up the parameters to the @ndlp 648 * from the IOCB response. It also check the newly assigned N_Port ID to the 649 * @vport against the previously assigned N_Port ID. If it is different from 650 * the previously assigned Destination ID (DID), the lpfc_unreg_rpi() routine 651 * is invoked on all the remaining nodes with the @vport to unregister the 652 * Remote Port Indicators (RPIs). Finally, the lpfc_issue_fabric_reglogin() 653 * is invoked to register login to the fabric. 654 * 655 * Return code 656 * 0 - Success (currently, always return 0) 657 **/ 658 static int 659 lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 660 struct serv_parm *sp, IOCB_t *irsp) 661 { 662 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 663 struct lpfc_hba *phba = vport->phba; 664 struct lpfc_nodelist *np; 665 struct lpfc_nodelist *next_np; 666 uint8_t fabric_param_changed; 667 668 spin_lock_irq(shost->host_lock); 669 vport->fc_flag |= FC_FABRIC; 670 spin_unlock_irq(shost->host_lock); 671 672 phba->fc_edtov = be32_to_cpu(sp->cmn.e_d_tov); 673 if (sp->cmn.edtovResolution) /* E_D_TOV ticks are in nanoseconds */ 674 phba->fc_edtov = (phba->fc_edtov + 999999) / 1000000; 675 676 phba->fc_edtovResol = sp->cmn.edtovResolution; 677 phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000; 678 679 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 680 spin_lock_irq(shost->host_lock); 681 vport->fc_flag |= FC_PUBLIC_LOOP; 682 spin_unlock_irq(shost->host_lock); 683 } 684 685 vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID; 686 memcpy(&ndlp->nlp_portname, &sp->portName, sizeof(struct lpfc_name)); 687 memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof(struct lpfc_name)); 688 ndlp->nlp_class_sup = 0; 689 if (sp->cls1.classValid) 690 ndlp->nlp_class_sup |= FC_COS_CLASS1; 691 if (sp->cls2.classValid) 692 ndlp->nlp_class_sup |= FC_COS_CLASS2; 693 if (sp->cls3.classValid) 694 ndlp->nlp_class_sup |= FC_COS_CLASS3; 695 if (sp->cls4.classValid) 696 ndlp->nlp_class_sup |= FC_COS_CLASS4; 697 ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | 698 sp->cmn.bbRcvSizeLsb; 699 700 fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp); 701 if (fabric_param_changed) { 702 /* Reset FDMI attribute masks based on config parameter */ 703 if (phba->cfg_enable_SmartSAN || 704 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) { 705 /* Setup appropriate attribute masks */ 706 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR; 707 if (phba->cfg_enable_SmartSAN) 708 vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR; 709 else 710 vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR; 711 } else { 712 vport->fdmi_hba_mask = 0; 713 vport->fdmi_port_mask = 0; 714 } 715 716 } 717 memcpy(&vport->fabric_portname, &sp->portName, 718 sizeof(struct lpfc_name)); 719 memcpy(&vport->fabric_nodename, &sp->nodeName, 720 sizeof(struct lpfc_name)); 721 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm)); 722 723 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { 724 if (sp->cmn.response_multiple_NPort) { 725 lpfc_printf_vlog(vport, KERN_WARNING, 726 LOG_ELS | LOG_VPORT, 727 "1816 FLOGI NPIV supported, " 728 "response data 0x%x\n", 729 sp->cmn.response_multiple_NPort); 730 spin_lock_irq(&phba->hbalock); 731 phba->link_flag |= LS_NPIV_FAB_SUPPORTED; 732 spin_unlock_irq(&phba->hbalock); 733 } else { 734 /* Because we asked f/w for NPIV it still expects us 735 to call reg_vnpid at least for the physical host */ 736 lpfc_printf_vlog(vport, KERN_WARNING, 737 LOG_ELS | LOG_VPORT, 738 "1817 Fabric does not support NPIV " 739 "- configuring single port mode.\n"); 740 spin_lock_irq(&phba->hbalock); 741 phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED; 742 spin_unlock_irq(&phba->hbalock); 743 } 744 } 745 746 /* 747 * For FC we need to do some special processing because of the SLI 748 * Port's default settings of the Common Service Parameters. 749 */ 750 if ((phba->sli_rev == LPFC_SLI_REV4) && 751 (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC)) { 752 /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */ 753 if (fabric_param_changed) 754 lpfc_unregister_fcf_prep(phba); 755 756 /* This should just update the VFI CSPs*/ 757 if (vport->fc_flag & FC_VFI_REGISTERED) 758 lpfc_issue_reg_vfi(vport); 759 } 760 761 if (fabric_param_changed && 762 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 763 764 /* If our NportID changed, we need to ensure all 765 * remaining NPORTs get unreg_login'ed. 766 */ 767 list_for_each_entry_safe(np, next_np, 768 &vport->fc_nodes, nlp_listp) { 769 if ((np->nlp_state != NLP_STE_NPR_NODE) || 770 !(np->nlp_flag & NLP_NPR_ADISC)) 771 continue; 772 spin_lock_irq(&np->lock); 773 np->nlp_flag &= ~NLP_NPR_ADISC; 774 spin_unlock_irq(&np->lock); 775 lpfc_unreg_rpi(vport, np); 776 } 777 lpfc_cleanup_pending_mbox(vport); 778 779 if (phba->sli_rev == LPFC_SLI_REV4) { 780 lpfc_sli4_unreg_all_rpis(vport); 781 lpfc_mbx_unreg_vpi(vport); 782 spin_lock_irq(shost->host_lock); 783 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 784 spin_unlock_irq(shost->host_lock); 785 } 786 787 /* 788 * For SLI3 and SLI4, the VPI needs to be reregistered in 789 * response to this fabric parameter change event. 790 */ 791 spin_lock_irq(shost->host_lock); 792 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 793 spin_unlock_irq(shost->host_lock); 794 } else if ((phba->sli_rev == LPFC_SLI_REV4) && 795 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 796 /* 797 * Driver needs to re-reg VPI in order for f/w 798 * to update the MAC address. 799 */ 800 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 801 lpfc_register_new_vport(phba, vport, ndlp); 802 return 0; 803 } 804 805 if (phba->sli_rev < LPFC_SLI_REV4) { 806 lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE); 807 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED && 808 vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) 809 lpfc_register_new_vport(phba, vport, ndlp); 810 else 811 lpfc_issue_fabric_reglogin(vport); 812 } else { 813 ndlp->nlp_type |= NLP_FABRIC; 814 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 815 if ((!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) && 816 (vport->vpi_state & LPFC_VPI_REGISTERED)) { 817 lpfc_start_fdiscs(phba); 818 lpfc_do_scr_ns_plogi(phba, vport); 819 } else if (vport->fc_flag & FC_VFI_REGISTERED) 820 lpfc_issue_init_vpi(vport); 821 else { 822 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 823 "3135 Need register VFI: (x%x/%x)\n", 824 vport->fc_prevDID, vport->fc_myDID); 825 lpfc_issue_reg_vfi(vport); 826 } 827 } 828 return 0; 829 } 830 831 /** 832 * lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port 833 * @vport: pointer to a host virtual N_Port data structure. 834 * @ndlp: pointer to a node-list data structure. 835 * @sp: pointer to service parameter data structure. 836 * 837 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback 838 * function to handle the completion of a Fabric Login (FLOGI) into an N_Port 839 * in a point-to-point topology. First, the @vport's N_Port Name is compared 840 * with the received N_Port Name: if the @vport's N_Port Name is greater than 841 * the received N_Port Name lexicographically, this node shall assign local 842 * N_Port ID (PT2PT_LocalID: 1) and remote N_Port ID (PT2PT_RemoteID: 2) and 843 * will send out Port Login (PLOGI) with the N_Port IDs assigned. Otherwise, 844 * this node shall just wait for the remote node to issue PLOGI and assign 845 * N_Port IDs. 846 * 847 * Return code 848 * 0 - Success 849 * -ENXIO - Fail 850 **/ 851 static int 852 lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 853 struct serv_parm *sp) 854 { 855 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 856 struct lpfc_hba *phba = vport->phba; 857 LPFC_MBOXQ_t *mbox; 858 int rc; 859 860 spin_lock_irq(shost->host_lock); 861 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 862 vport->fc_flag |= FC_PT2PT; 863 spin_unlock_irq(shost->host_lock); 864 865 /* If we are pt2pt with another NPort, force NPIV off! */ 866 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED; 867 868 /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */ 869 if ((phba->sli_rev == LPFC_SLI_REV4) && phba->fc_topology_changed) { 870 lpfc_unregister_fcf_prep(phba); 871 872 spin_lock_irq(shost->host_lock); 873 vport->fc_flag &= ~FC_VFI_REGISTERED; 874 spin_unlock_irq(shost->host_lock); 875 phba->fc_topology_changed = 0; 876 } 877 878 rc = memcmp(&vport->fc_portname, &sp->portName, 879 sizeof(vport->fc_portname)); 880 881 if (rc >= 0) { 882 /* This side will initiate the PLOGI */ 883 spin_lock_irq(shost->host_lock); 884 vport->fc_flag |= FC_PT2PT_PLOGI; 885 spin_unlock_irq(shost->host_lock); 886 887 /* 888 * N_Port ID cannot be 0, set our Id to LocalID 889 * the other side will be RemoteID. 890 */ 891 892 /* not equal */ 893 if (rc) 894 vport->fc_myDID = PT2PT_LocalID; 895 896 /* Decrement ndlp reference count indicating that ndlp can be 897 * safely released when other references to it are done. 898 */ 899 lpfc_nlp_put(ndlp); 900 901 ndlp = lpfc_findnode_did(vport, PT2PT_RemoteID); 902 if (!ndlp) { 903 /* 904 * Cannot find existing Fabric ndlp, so allocate a 905 * new one 906 */ 907 ndlp = lpfc_nlp_init(vport, PT2PT_RemoteID); 908 if (!ndlp) 909 goto fail; 910 } 911 912 memcpy(&ndlp->nlp_portname, &sp->portName, 913 sizeof(struct lpfc_name)); 914 memcpy(&ndlp->nlp_nodename, &sp->nodeName, 915 sizeof(struct lpfc_name)); 916 /* Set state will put ndlp onto node list if not already done */ 917 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 918 spin_lock_irq(&ndlp->lock); 919 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 920 spin_unlock_irq(&ndlp->lock); 921 922 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 923 if (!mbox) 924 goto fail; 925 926 lpfc_config_link(phba, mbox); 927 928 mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link; 929 mbox->vport = vport; 930 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 931 if (rc == MBX_NOT_FINISHED) { 932 mempool_free(mbox, phba->mbox_mem_pool); 933 goto fail; 934 } 935 } else { 936 /* This side will wait for the PLOGI, decrement ndlp reference 937 * count indicating that ndlp can be released when other 938 * references to it are done. 939 */ 940 lpfc_nlp_put(ndlp); 941 942 /* Start discovery - this should just do CLEAR_LA */ 943 lpfc_disc_start(vport); 944 } 945 946 return 0; 947 fail: 948 return -ENXIO; 949 } 950 951 /** 952 * lpfc_cmpl_els_flogi - Completion callback function for flogi 953 * @phba: pointer to lpfc hba data structure. 954 * @cmdiocb: pointer to lpfc command iocb data structure. 955 * @rspiocb: pointer to lpfc response iocb data structure. 956 * 957 * This routine is the top-level completion callback function for issuing 958 * a Fabric Login (FLOGI) command. If the response IOCB reported error, 959 * the lpfc_els_retry() routine shall be invoked to retry the FLOGI. If 960 * retry has been made (either immediately or delayed with lpfc_els_retry() 961 * returning 1), the command IOCB will be released and function returned. 962 * If the retry attempt has been given up (possibly reach the maximum 963 * number of retries), one additional decrement of ndlp reference shall be 964 * invoked before going out after releasing the command IOCB. This will 965 * actually release the remote node (Note, lpfc_els_free_iocb() will also 966 * invoke one decrement of ndlp reference count). If no error reported in 967 * the IOCB status, the command Port ID field is used to determine whether 968 * this is a point-to-point topology or a fabric topology: if the Port ID 969 * field is assigned, it is a fabric topology; otherwise, it is a 970 * point-to-point topology. The routine lpfc_cmpl_els_flogi_fabric() or 971 * lpfc_cmpl_els_flogi_nport() shall be invoked accordingly to handle the 972 * specific topology completion conditions. 973 **/ 974 static void 975 lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 976 struct lpfc_iocbq *rspiocb) 977 { 978 struct lpfc_vport *vport = cmdiocb->vport; 979 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 980 IOCB_t *irsp = &rspiocb->iocb; 981 struct lpfc_nodelist *ndlp = cmdiocb->context1; 982 struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp; 983 struct serv_parm *sp; 984 uint16_t fcf_index; 985 int rc; 986 987 /* Check to see if link went down during discovery */ 988 if (lpfc_els_chk_latt(vport)) { 989 /* One additional decrement on node reference count to 990 * trigger the release of the node 991 */ 992 lpfc_nlp_put(ndlp); 993 goto out; 994 } 995 996 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 997 "FLOGI cmpl: status:x%x/x%x state:x%x", 998 irsp->ulpStatus, irsp->un.ulpWord[4], 999 vport->port_state); 1000 1001 if (irsp->ulpStatus) { 1002 /* 1003 * In case of FIP mode, perform roundrobin FCF failover 1004 * due to new FCF discovery 1005 */ 1006 if ((phba->hba_flag & HBA_FIP_SUPPORT) && 1007 (phba->fcf.fcf_flag & FCF_DISCOVERY)) { 1008 if (phba->link_state < LPFC_LINK_UP) 1009 goto stop_rr_fcf_flogi; 1010 if ((phba->fcoe_cvl_eventtag_attn == 1011 phba->fcoe_cvl_eventtag) && 1012 (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 1013 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == 1014 IOERR_SLI_ABORTED)) 1015 goto stop_rr_fcf_flogi; 1016 else 1017 phba->fcoe_cvl_eventtag_attn = 1018 phba->fcoe_cvl_eventtag; 1019 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS, 1020 "2611 FLOGI failed on FCF (x%x), " 1021 "status:x%x/x%x, tmo:x%x, perform " 1022 "roundrobin FCF failover\n", 1023 phba->fcf.current_rec.fcf_indx, 1024 irsp->ulpStatus, irsp->un.ulpWord[4], 1025 irsp->ulpTimeout); 1026 lpfc_sli4_set_fcf_flogi_fail(phba, 1027 phba->fcf.current_rec.fcf_indx); 1028 fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba); 1029 rc = lpfc_sli4_fcf_rr_next_proc(vport, fcf_index); 1030 if (rc) 1031 goto out; 1032 } 1033 1034 stop_rr_fcf_flogi: 1035 /* FLOGI failure */ 1036 if (!(irsp->ulpStatus == IOSTAT_LOCAL_REJECT && 1037 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == 1038 IOERR_LOOP_OPEN_FAILURE))) 1039 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1040 "2858 FLOGI failure Status:x%x/x%x TMO" 1041 ":x%x Data x%x x%x\n", 1042 irsp->ulpStatus, irsp->un.ulpWord[4], 1043 irsp->ulpTimeout, phba->hba_flag, 1044 phba->fcf.fcf_flag); 1045 1046 /* Check for retry */ 1047 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) 1048 goto out; 1049 1050 lpfc_printf_vlog(vport, KERN_WARNING, LOG_TRACE_EVENT, 1051 "0150 FLOGI failure Status:x%x/x%x " 1052 "xri x%x TMO:x%x\n", 1053 irsp->ulpStatus, irsp->un.ulpWord[4], 1054 cmdiocb->sli4_xritag, irsp->ulpTimeout); 1055 1056 /* If this is not a loop open failure, bail out */ 1057 if (!(irsp->ulpStatus == IOSTAT_LOCAL_REJECT && 1058 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == 1059 IOERR_LOOP_OPEN_FAILURE))) 1060 goto flogifail; 1061 1062 /* FLOGI failed, so there is no fabric */ 1063 spin_lock_irq(shost->host_lock); 1064 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 1065 spin_unlock_irq(shost->host_lock); 1066 1067 /* If private loop, then allow max outstanding els to be 1068 * LPFC_MAX_DISC_THREADS (32). Scanning in the case of no 1069 * alpa map would take too long otherwise. 1070 */ 1071 if (phba->alpa_map[0] == 0) 1072 vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS; 1073 if ((phba->sli_rev == LPFC_SLI_REV4) && 1074 (!(vport->fc_flag & FC_VFI_REGISTERED) || 1075 (vport->fc_prevDID != vport->fc_myDID) || 1076 phba->fc_topology_changed)) { 1077 if (vport->fc_flag & FC_VFI_REGISTERED) { 1078 if (phba->fc_topology_changed) { 1079 lpfc_unregister_fcf_prep(phba); 1080 spin_lock_irq(shost->host_lock); 1081 vport->fc_flag &= ~FC_VFI_REGISTERED; 1082 spin_unlock_irq(shost->host_lock); 1083 phba->fc_topology_changed = 0; 1084 } else { 1085 lpfc_sli4_unreg_all_rpis(vport); 1086 } 1087 } 1088 1089 /* Do not register VFI if the driver aborted FLOGI */ 1090 if (!lpfc_error_lost_link(irsp)) 1091 lpfc_issue_reg_vfi(vport); 1092 1093 lpfc_nlp_put(ndlp); 1094 goto out; 1095 } 1096 goto flogifail; 1097 } 1098 spin_lock_irq(shost->host_lock); 1099 vport->fc_flag &= ~FC_VPORT_CVL_RCVD; 1100 vport->fc_flag &= ~FC_VPORT_LOGO_RCVD; 1101 spin_unlock_irq(shost->host_lock); 1102 1103 /* 1104 * The FLogI succeeded. Sync the data for the CPU before 1105 * accessing it. 1106 */ 1107 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 1108 if (!prsp) 1109 goto out; 1110 sp = prsp->virt + sizeof(uint32_t); 1111 1112 /* FLOGI completes successfully */ 1113 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1114 "0101 FLOGI completes successfully, I/O tag:x%x, " 1115 "xri x%x Data: x%x x%x x%x x%x x%x %x\n", 1116 cmdiocb->iotag, cmdiocb->sli4_xritag, 1117 irsp->un.ulpWord[4], sp->cmn.e_d_tov, 1118 sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution, 1119 vport->port_state, vport->fc_flag); 1120 1121 if (vport->port_state == LPFC_FLOGI) { 1122 /* 1123 * If Common Service Parameters indicate Nport 1124 * we are point to point, if Fport we are Fabric. 1125 */ 1126 if (sp->cmn.fPort) 1127 rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp, irsp); 1128 else if (!(phba->hba_flag & HBA_FCOE_MODE)) 1129 rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp); 1130 else { 1131 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1132 "2831 FLOGI response with cleared Fabric " 1133 "bit fcf_index 0x%x " 1134 "Switch Name %02x%02x%02x%02x%02x%02x%02x%02x " 1135 "Fabric Name " 1136 "%02x%02x%02x%02x%02x%02x%02x%02x\n", 1137 phba->fcf.current_rec.fcf_indx, 1138 phba->fcf.current_rec.switch_name[0], 1139 phba->fcf.current_rec.switch_name[1], 1140 phba->fcf.current_rec.switch_name[2], 1141 phba->fcf.current_rec.switch_name[3], 1142 phba->fcf.current_rec.switch_name[4], 1143 phba->fcf.current_rec.switch_name[5], 1144 phba->fcf.current_rec.switch_name[6], 1145 phba->fcf.current_rec.switch_name[7], 1146 phba->fcf.current_rec.fabric_name[0], 1147 phba->fcf.current_rec.fabric_name[1], 1148 phba->fcf.current_rec.fabric_name[2], 1149 phba->fcf.current_rec.fabric_name[3], 1150 phba->fcf.current_rec.fabric_name[4], 1151 phba->fcf.current_rec.fabric_name[5], 1152 phba->fcf.current_rec.fabric_name[6], 1153 phba->fcf.current_rec.fabric_name[7]); 1154 1155 lpfc_nlp_put(ndlp); 1156 spin_lock_irq(&phba->hbalock); 1157 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 1158 phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO); 1159 spin_unlock_irq(&phba->hbalock); 1160 phba->fcf.fcf_redisc_attempted = 0; /* reset */ 1161 goto out; 1162 } 1163 if (!rc) { 1164 /* Mark the FCF discovery process done */ 1165 if (phba->hba_flag & HBA_FIP_SUPPORT) 1166 lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP | 1167 LOG_ELS, 1168 "2769 FLOGI to FCF (x%x) " 1169 "completed successfully\n", 1170 phba->fcf.current_rec.fcf_indx); 1171 spin_lock_irq(&phba->hbalock); 1172 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 1173 phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO); 1174 spin_unlock_irq(&phba->hbalock); 1175 phba->fcf.fcf_redisc_attempted = 0; /* reset */ 1176 goto out; 1177 } 1178 } 1179 1180 flogifail: 1181 spin_lock_irq(&phba->hbalock); 1182 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 1183 spin_unlock_irq(&phba->hbalock); 1184 1185 lpfc_nlp_put(ndlp); 1186 if (!lpfc_error_lost_link(irsp)) { 1187 /* FLOGI failed, so just use loop map to make discovery list */ 1188 lpfc_disc_list_loopmap(vport); 1189 1190 /* Start discovery */ 1191 lpfc_disc_start(vport); 1192 } else if (((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) || 1193 (((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) != 1194 IOERR_SLI_ABORTED) && 1195 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) != 1196 IOERR_SLI_DOWN))) && 1197 (phba->link_state != LPFC_CLEAR_LA)) { 1198 /* If FLOGI failed enable link interrupt. */ 1199 lpfc_issue_clear_la(phba, vport); 1200 } 1201 out: 1202 lpfc_els_free_iocb(phba, cmdiocb); 1203 lpfc_nlp_put(ndlp); 1204 } 1205 1206 /** 1207 * lpfc_cmpl_els_link_down - Completion callback function for ELS command 1208 * aborted during a link down 1209 * @phba: pointer to lpfc hba data structure. 1210 * @cmdiocb: pointer to lpfc command iocb data structure. 1211 * @rspiocb: pointer to lpfc response iocb data structure. 1212 * 1213 */ 1214 static void 1215 lpfc_cmpl_els_link_down(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1216 struct lpfc_iocbq *rspiocb) 1217 { 1218 IOCB_t *irsp; 1219 uint32_t *pcmd; 1220 uint32_t cmd; 1221 1222 pcmd = (uint32_t *)(((struct lpfc_dmabuf *)cmdiocb->context2)->virt); 1223 cmd = *pcmd; 1224 irsp = &rspiocb->iocb; 1225 1226 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 1227 "6445 ELS completes after LINK_DOWN: " 1228 " Status %x/%x cmd x%x flg x%x\n", 1229 irsp->ulpStatus, irsp->un.ulpWord[4], cmd, 1230 cmdiocb->iocb_flag); 1231 1232 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC) { 1233 cmdiocb->iocb_flag &= ~LPFC_IO_FABRIC; 1234 atomic_dec(&phba->fabric_iocb_count); 1235 } 1236 lpfc_els_free_iocb(phba, cmdiocb); 1237 } 1238 1239 /** 1240 * lpfc_issue_els_flogi - Issue an flogi iocb command for a vport 1241 * @vport: pointer to a host virtual N_Port data structure. 1242 * @ndlp: pointer to a node-list data structure. 1243 * @retry: number of retries to the command IOCB. 1244 * 1245 * This routine issues a Fabric Login (FLOGI) Request ELS command 1246 * for a @vport. The initiator service parameters are put into the payload 1247 * of the FLOGI Request IOCB and the top-level callback function pointer 1248 * to lpfc_cmpl_els_flogi() routine is put to the IOCB completion callback 1249 * function field. The lpfc_issue_fabric_iocb routine is invoked to send 1250 * out FLOGI ELS command with one outstanding fabric IOCB at a time. 1251 * 1252 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 1253 * will be incremented by 1 for holding the ndlp and the reference to ndlp 1254 * will be stored into the context1 field of the IOCB for the completion 1255 * callback function to the FLOGI ELS command. 1256 * 1257 * Return code 1258 * 0 - successfully issued flogi iocb for @vport 1259 * 1 - failed to issue flogi iocb for @vport 1260 **/ 1261 static int 1262 lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1263 uint8_t retry) 1264 { 1265 struct lpfc_hba *phba = vport->phba; 1266 struct serv_parm *sp; 1267 IOCB_t *icmd; 1268 struct lpfc_iocbq *elsiocb; 1269 struct lpfc_iocbq defer_flogi_acc; 1270 uint8_t *pcmd; 1271 uint16_t cmdsize; 1272 uint32_t tmo, did; 1273 int rc; 1274 1275 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 1276 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 1277 ndlp->nlp_DID, ELS_CMD_FLOGI); 1278 1279 if (!elsiocb) 1280 return 1; 1281 1282 icmd = &elsiocb->iocb; 1283 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 1284 1285 /* For FLOGI request, remainder of payload is service parameters */ 1286 *((uint32_t *) (pcmd)) = ELS_CMD_FLOGI; 1287 pcmd += sizeof(uint32_t); 1288 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm)); 1289 sp = (struct serv_parm *) pcmd; 1290 1291 /* Setup CSPs accordingly for Fabric */ 1292 sp->cmn.e_d_tov = 0; 1293 sp->cmn.w2.r_a_tov = 0; 1294 sp->cmn.virtual_fabric_support = 0; 1295 sp->cls1.classValid = 0; 1296 if (sp->cmn.fcphLow < FC_PH3) 1297 sp->cmn.fcphLow = FC_PH3; 1298 if (sp->cmn.fcphHigh < FC_PH3) 1299 sp->cmn.fcphHigh = FC_PH3; 1300 1301 if (phba->sli_rev == LPFC_SLI_REV4) { 1302 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 1303 LPFC_SLI_INTF_IF_TYPE_0) { 1304 elsiocb->iocb.ulpCt_h = ((SLI4_CT_FCFI >> 1) & 1); 1305 elsiocb->iocb.ulpCt_l = (SLI4_CT_FCFI & 1); 1306 /* FLOGI needs to be 3 for WQE FCFI */ 1307 /* Set the fcfi to the fcfi we registered with */ 1308 elsiocb->iocb.ulpContext = phba->fcf.fcfi; 1309 } 1310 /* Can't do SLI4 class2 without support sequence coalescing */ 1311 sp->cls2.classValid = 0; 1312 sp->cls2.seqDelivery = 0; 1313 } else { 1314 /* Historical, setting sequential-delivery bit for SLI3 */ 1315 sp->cls2.seqDelivery = (sp->cls2.classValid) ? 1 : 0; 1316 sp->cls3.seqDelivery = (sp->cls3.classValid) ? 1 : 0; 1317 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { 1318 sp->cmn.request_multiple_Nport = 1; 1319 /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */ 1320 icmd->ulpCt_h = 1; 1321 icmd->ulpCt_l = 0; 1322 } else 1323 sp->cmn.request_multiple_Nport = 0; 1324 } 1325 1326 if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) { 1327 icmd->un.elsreq64.myID = 0; 1328 icmd->un.elsreq64.fl = 1; 1329 } 1330 1331 tmo = phba->fc_ratov; 1332 phba->fc_ratov = LPFC_DISC_FLOGI_TMO; 1333 lpfc_set_disctmo(vport); 1334 phba->fc_ratov = tmo; 1335 1336 phba->fc_stat.elsXmitFLOGI++; 1337 elsiocb->iocb_cmpl = lpfc_cmpl_els_flogi; 1338 1339 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1340 "Issue FLOGI: opt:x%x", 1341 phba->sli3_options, 0, 0); 1342 1343 elsiocb->context1 = lpfc_nlp_get(ndlp); 1344 if (!elsiocb->context1) 1345 goto out; 1346 1347 rc = lpfc_issue_fabric_iocb(phba, elsiocb); 1348 if (rc == IOCB_ERROR) 1349 lpfc_nlp_put(ndlp); 1350 1351 phba->hba_flag |= HBA_FLOGI_ISSUED; 1352 1353 /* Check for a deferred FLOGI ACC condition */ 1354 if (phba->defer_flogi_acc_flag) { 1355 did = vport->fc_myDID; 1356 vport->fc_myDID = Fabric_DID; 1357 1358 memset(&defer_flogi_acc, 0, sizeof(struct lpfc_iocbq)); 1359 1360 defer_flogi_acc.iocb.ulpContext = phba->defer_flogi_acc_rx_id; 1361 defer_flogi_acc.iocb.unsli3.rcvsli3.ox_id = 1362 phba->defer_flogi_acc_ox_id; 1363 1364 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1365 "3354 Xmit deferred FLOGI ACC: rx_id: x%x," 1366 " ox_id: x%x, hba_flag x%x\n", 1367 phba->defer_flogi_acc_rx_id, 1368 phba->defer_flogi_acc_ox_id, phba->hba_flag); 1369 1370 /* Send deferred FLOGI ACC */ 1371 lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, &defer_flogi_acc, 1372 ndlp, NULL); 1373 1374 phba->defer_flogi_acc_flag = false; 1375 1376 vport->fc_myDID = did; 1377 } 1378 1379 if (!rc) 1380 return 0; 1381 out: 1382 lpfc_els_free_iocb(phba, elsiocb); 1383 return 1; 1384 } 1385 1386 /** 1387 * lpfc_els_abort_flogi - Abort all outstanding flogi iocbs 1388 * @phba: pointer to lpfc hba data structure. 1389 * 1390 * This routine aborts all the outstanding Fabric Login (FLOGI) IOCBs 1391 * with a @phba. This routine walks all the outstanding IOCBs on the txcmplq 1392 * list and issues an abort IOCB commond on each outstanding IOCB that 1393 * contains a active Fabric_DID ndlp. Note that this function is to issue 1394 * the abort IOCB command on all the outstanding IOCBs, thus when this 1395 * function returns, it does not guarantee all the IOCBs are actually aborted. 1396 * 1397 * Return code 1398 * 0 - Successfully issued abort iocb on all outstanding flogis (Always 0) 1399 **/ 1400 int 1401 lpfc_els_abort_flogi(struct lpfc_hba *phba) 1402 { 1403 struct lpfc_sli_ring *pring; 1404 struct lpfc_iocbq *iocb, *next_iocb; 1405 struct lpfc_nodelist *ndlp; 1406 IOCB_t *icmd; 1407 1408 /* Abort outstanding I/O on NPort <nlp_DID> */ 1409 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 1410 "0201 Abort outstanding I/O on NPort x%x\n", 1411 Fabric_DID); 1412 1413 pring = lpfc_phba_elsring(phba); 1414 if (unlikely(!pring)) 1415 return -EIO; 1416 1417 /* 1418 * Check the txcmplq for an iocb that matches the nport the driver is 1419 * searching for. 1420 */ 1421 spin_lock_irq(&phba->hbalock); 1422 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) { 1423 icmd = &iocb->iocb; 1424 if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR) { 1425 ndlp = (struct lpfc_nodelist *)(iocb->context1); 1426 if (ndlp && (ndlp->nlp_DID == Fabric_DID)) 1427 lpfc_sli_issue_abort_iotag(phba, pring, iocb, 1428 NULL); 1429 } 1430 } 1431 /* Make sure HBA is alive */ 1432 lpfc_issue_hb_tmo(phba); 1433 1434 spin_unlock_irq(&phba->hbalock); 1435 1436 return 0; 1437 } 1438 1439 /** 1440 * lpfc_initial_flogi - Issue an initial fabric login for a vport 1441 * @vport: pointer to a host virtual N_Port data structure. 1442 * 1443 * This routine issues an initial Fabric Login (FLOGI) for the @vport 1444 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from 1445 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and 1446 * put it into the @vport's ndlp list. If an inactive ndlp found on the list, 1447 * it will just be enabled and made active. The lpfc_issue_els_flogi() routine 1448 * is then invoked with the @vport and the ndlp to perform the FLOGI for the 1449 * @vport. 1450 * 1451 * Return code 1452 * 0 - failed to issue initial flogi for @vport 1453 * 1 - successfully issued initial flogi for @vport 1454 **/ 1455 int 1456 lpfc_initial_flogi(struct lpfc_vport *vport) 1457 { 1458 struct lpfc_nodelist *ndlp; 1459 1460 vport->port_state = LPFC_FLOGI; 1461 lpfc_set_disctmo(vport); 1462 1463 /* First look for the Fabric ndlp */ 1464 ndlp = lpfc_findnode_did(vport, Fabric_DID); 1465 if (!ndlp) { 1466 /* Cannot find existing Fabric ndlp, so allocate a new one */ 1467 ndlp = lpfc_nlp_init(vport, Fabric_DID); 1468 if (!ndlp) 1469 return 0; 1470 /* Set the node type */ 1471 ndlp->nlp_type |= NLP_FABRIC; 1472 1473 /* Put ndlp onto node list */ 1474 lpfc_enqueue_node(vport, ndlp); 1475 } 1476 1477 if (lpfc_issue_els_flogi(vport, ndlp, 0)) { 1478 /* This decrement of reference count to node shall kick off 1479 * the release of the node. 1480 */ 1481 lpfc_nlp_put(ndlp); 1482 return 0; 1483 } 1484 return 1; 1485 } 1486 1487 /** 1488 * lpfc_initial_fdisc - Issue an initial fabric discovery for a vport 1489 * @vport: pointer to a host virtual N_Port data structure. 1490 * 1491 * This routine issues an initial Fabric Discover (FDISC) for the @vport 1492 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from 1493 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and 1494 * put it into the @vport's ndlp list. If an inactive ndlp found on the list, 1495 * it will just be enabled and made active. The lpfc_issue_els_fdisc() routine 1496 * is then invoked with the @vport and the ndlp to perform the FDISC for the 1497 * @vport. 1498 * 1499 * Return code 1500 * 0 - failed to issue initial fdisc for @vport 1501 * 1 - successfully issued initial fdisc for @vport 1502 **/ 1503 int 1504 lpfc_initial_fdisc(struct lpfc_vport *vport) 1505 { 1506 struct lpfc_nodelist *ndlp; 1507 1508 /* First look for the Fabric ndlp */ 1509 ndlp = lpfc_findnode_did(vport, Fabric_DID); 1510 if (!ndlp) { 1511 /* Cannot find existing Fabric ndlp, so allocate a new one */ 1512 ndlp = lpfc_nlp_init(vport, Fabric_DID); 1513 if (!ndlp) 1514 return 0; 1515 1516 /* NPIV is only supported in Fabrics. */ 1517 ndlp->nlp_type |= NLP_FABRIC; 1518 1519 /* Put ndlp onto node list */ 1520 lpfc_enqueue_node(vport, ndlp); 1521 } 1522 1523 if (lpfc_issue_els_fdisc(vport, ndlp, 0)) { 1524 /* decrement node reference count to trigger the release of 1525 * the node. 1526 */ 1527 lpfc_nlp_put(ndlp); 1528 return 0; 1529 } 1530 return 1; 1531 } 1532 1533 /** 1534 * lpfc_more_plogi - Check and issue remaining plogis for a vport 1535 * @vport: pointer to a host virtual N_Port data structure. 1536 * 1537 * This routine checks whether there are more remaining Port Logins 1538 * (PLOGI) to be issued for the @vport. If so, it will invoke the routine 1539 * lpfc_els_disc_plogi() to go through the Node Port Recovery (NPR) nodes 1540 * to issue ELS PLOGIs up to the configured discover threads with the 1541 * @vport (@vport->cfg_discovery_threads). The function also decrement 1542 * the @vport's num_disc_node by 1 if it is not already 0. 1543 **/ 1544 void 1545 lpfc_more_plogi(struct lpfc_vport *vport) 1546 { 1547 if (vport->num_disc_nodes) 1548 vport->num_disc_nodes--; 1549 1550 /* Continue discovery with <num_disc_nodes> PLOGIs to go */ 1551 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 1552 "0232 Continue discovery with %d PLOGIs to go " 1553 "Data: x%x x%x x%x\n", 1554 vport->num_disc_nodes, vport->fc_plogi_cnt, 1555 vport->fc_flag, vport->port_state); 1556 /* Check to see if there are more PLOGIs to be sent */ 1557 if (vport->fc_flag & FC_NLP_MORE) 1558 /* go thru NPR nodes and issue any remaining ELS PLOGIs */ 1559 lpfc_els_disc_plogi(vport); 1560 1561 return; 1562 } 1563 1564 /** 1565 * lpfc_plogi_confirm_nport - Confirm plogi wwpn matches stored ndlp 1566 * @phba: pointer to lpfc hba data structure. 1567 * @prsp: pointer to response IOCB payload. 1568 * @ndlp: pointer to a node-list data structure. 1569 * 1570 * This routine checks and indicates whether the WWPN of an N_Port, retrieved 1571 * from a PLOGI, matches the WWPN that is stored in the @ndlp for that N_POrt. 1572 * The following cases are considered N_Port confirmed: 1573 * 1) The N_Port is a Fabric ndlp; 2) The @ndlp is on vport list and matches 1574 * the WWPN of the N_Port logged into; 3) The @ndlp is not on vport list but 1575 * it does not have WWPN assigned either. If the WWPN is confirmed, the 1576 * pointer to the @ndlp will be returned. If the WWPN is not confirmed: 1577 * 1) if there is a node on vport list other than the @ndlp with the same 1578 * WWPN of the N_Port PLOGI logged into, the lpfc_unreg_rpi() will be invoked 1579 * on that node to release the RPI associated with the node; 2) if there is 1580 * no node found on vport list with the same WWPN of the N_Port PLOGI logged 1581 * into, a new node shall be allocated (or activated). In either case, the 1582 * parameters of the @ndlp shall be copied to the new_ndlp, the @ndlp shall 1583 * be released and the new_ndlp shall be put on to the vport node list and 1584 * its pointer returned as the confirmed node. 1585 * 1586 * Note that before the @ndlp got "released", the keepDID from not-matching 1587 * or inactive "new_ndlp" on the vport node list is assigned to the nlp_DID 1588 * of the @ndlp. This is because the release of @ndlp is actually to put it 1589 * into an inactive state on the vport node list and the vport node list 1590 * management algorithm does not allow two node with a same DID. 1591 * 1592 * Return code 1593 * pointer to the PLOGI N_Port @ndlp 1594 **/ 1595 static struct lpfc_nodelist * 1596 lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, 1597 struct lpfc_nodelist *ndlp) 1598 { 1599 struct lpfc_vport *vport = ndlp->vport; 1600 struct lpfc_nodelist *new_ndlp; 1601 struct serv_parm *sp; 1602 uint8_t name[sizeof(struct lpfc_name)]; 1603 uint32_t rc, keepDID = 0, keep_nlp_flag = 0; 1604 uint32_t keep_new_nlp_flag = 0; 1605 uint16_t keep_nlp_state; 1606 u32 keep_nlp_fc4_type = 0; 1607 struct lpfc_nvme_rport *keep_nrport = NULL; 1608 unsigned long *active_rrqs_xri_bitmap = NULL; 1609 1610 /* Fabric nodes can have the same WWPN so we don't bother searching 1611 * by WWPN. Just return the ndlp that was given to us. 1612 */ 1613 if (ndlp->nlp_type & NLP_FABRIC) 1614 return ndlp; 1615 1616 sp = (struct serv_parm *) ((uint8_t *) prsp + sizeof(uint32_t)); 1617 memset(name, 0, sizeof(struct lpfc_name)); 1618 1619 /* Now we find out if the NPort we are logging into, matches the WWPN 1620 * we have for that ndlp. If not, we have some work to do. 1621 */ 1622 new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName); 1623 1624 /* return immediately if the WWPN matches ndlp */ 1625 if (new_ndlp == ndlp) 1626 return ndlp; 1627 1628 if (phba->sli_rev == LPFC_SLI_REV4) { 1629 active_rrqs_xri_bitmap = mempool_alloc(phba->active_rrq_pool, 1630 GFP_KERNEL); 1631 if (active_rrqs_xri_bitmap) 1632 memset(active_rrqs_xri_bitmap, 0, 1633 phba->cfg_rrq_xri_bitmap_sz); 1634 } 1635 1636 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE, 1637 "3178 PLOGI confirm: ndlp x%x x%x x%x: " 1638 "new_ndlp x%x x%x x%x\n", 1639 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_fc4_type, 1640 (new_ndlp ? new_ndlp->nlp_DID : 0), 1641 (new_ndlp ? new_ndlp->nlp_flag : 0), 1642 (new_ndlp ? new_ndlp->nlp_fc4_type : 0)); 1643 1644 if (!new_ndlp) { 1645 rc = memcmp(&ndlp->nlp_portname, name, 1646 sizeof(struct lpfc_name)); 1647 if (!rc) { 1648 if (active_rrqs_xri_bitmap) 1649 mempool_free(active_rrqs_xri_bitmap, 1650 phba->active_rrq_pool); 1651 return ndlp; 1652 } 1653 new_ndlp = lpfc_nlp_init(vport, ndlp->nlp_DID); 1654 if (!new_ndlp) { 1655 if (active_rrqs_xri_bitmap) 1656 mempool_free(active_rrqs_xri_bitmap, 1657 phba->active_rrq_pool); 1658 return ndlp; 1659 } 1660 } else { 1661 keepDID = new_ndlp->nlp_DID; 1662 if (phba->sli_rev == LPFC_SLI_REV4 && 1663 active_rrqs_xri_bitmap) 1664 memcpy(active_rrqs_xri_bitmap, 1665 new_ndlp->active_rrqs_xri_bitmap, 1666 phba->cfg_rrq_xri_bitmap_sz); 1667 } 1668 1669 /* At this point in this routine, we know new_ndlp will be 1670 * returned. however, any previous GID_FTs that were done 1671 * would have updated nlp_fc4_type in ndlp, so we must ensure 1672 * new_ndlp has the right value. 1673 */ 1674 if (vport->fc_flag & FC_FABRIC) { 1675 keep_nlp_fc4_type = new_ndlp->nlp_fc4_type; 1676 new_ndlp->nlp_fc4_type = ndlp->nlp_fc4_type; 1677 } 1678 1679 lpfc_unreg_rpi(vport, new_ndlp); 1680 new_ndlp->nlp_DID = ndlp->nlp_DID; 1681 new_ndlp->nlp_prev_state = ndlp->nlp_prev_state; 1682 if (phba->sli_rev == LPFC_SLI_REV4) 1683 memcpy(new_ndlp->active_rrqs_xri_bitmap, 1684 ndlp->active_rrqs_xri_bitmap, 1685 phba->cfg_rrq_xri_bitmap_sz); 1686 1687 /* Lock both ndlps */ 1688 spin_lock_irq(&ndlp->lock); 1689 spin_lock_irq(&new_ndlp->lock); 1690 keep_new_nlp_flag = new_ndlp->nlp_flag; 1691 keep_nlp_flag = ndlp->nlp_flag; 1692 new_ndlp->nlp_flag = ndlp->nlp_flag; 1693 1694 /* if new_ndlp had NLP_UNREG_INP set, keep it */ 1695 if (keep_new_nlp_flag & NLP_UNREG_INP) 1696 new_ndlp->nlp_flag |= NLP_UNREG_INP; 1697 else 1698 new_ndlp->nlp_flag &= ~NLP_UNREG_INP; 1699 1700 /* if new_ndlp had NLP_RPI_REGISTERED set, keep it */ 1701 if (keep_new_nlp_flag & NLP_RPI_REGISTERED) 1702 new_ndlp->nlp_flag |= NLP_RPI_REGISTERED; 1703 else 1704 new_ndlp->nlp_flag &= ~NLP_RPI_REGISTERED; 1705 1706 ndlp->nlp_flag = keep_new_nlp_flag; 1707 1708 /* if ndlp had NLP_UNREG_INP set, keep it */ 1709 if (keep_nlp_flag & NLP_UNREG_INP) 1710 ndlp->nlp_flag |= NLP_UNREG_INP; 1711 else 1712 ndlp->nlp_flag &= ~NLP_UNREG_INP; 1713 1714 /* if ndlp had NLP_RPI_REGISTERED set, keep it */ 1715 if (keep_nlp_flag & NLP_RPI_REGISTERED) 1716 ndlp->nlp_flag |= NLP_RPI_REGISTERED; 1717 else 1718 ndlp->nlp_flag &= ~NLP_RPI_REGISTERED; 1719 1720 spin_unlock_irq(&new_ndlp->lock); 1721 spin_unlock_irq(&ndlp->lock); 1722 1723 /* Set nlp_states accordingly */ 1724 keep_nlp_state = new_ndlp->nlp_state; 1725 lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state); 1726 1727 /* interchange the nvme remoteport structs */ 1728 keep_nrport = new_ndlp->nrport; 1729 new_ndlp->nrport = ndlp->nrport; 1730 1731 /* Move this back to NPR state */ 1732 if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) { 1733 /* The new_ndlp is replacing ndlp totally, so we need 1734 * to put ndlp on UNUSED list and try to free it. 1735 */ 1736 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1737 "3179 PLOGI confirm NEW: %x %x\n", 1738 new_ndlp->nlp_DID, keepDID); 1739 1740 /* Two ndlps cannot have the same did on the nodelist. 1741 * Note: for this case, ndlp has a NULL WWPN so setting 1742 * the nlp_fc4_type isn't required. 1743 */ 1744 ndlp->nlp_DID = keepDID; 1745 lpfc_nlp_set_state(vport, ndlp, keep_nlp_state); 1746 if (phba->sli_rev == LPFC_SLI_REV4 && 1747 active_rrqs_xri_bitmap) 1748 memcpy(ndlp->active_rrqs_xri_bitmap, 1749 active_rrqs_xri_bitmap, 1750 phba->cfg_rrq_xri_bitmap_sz); 1751 1752 } else { 1753 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1754 "3180 PLOGI confirm SWAP: %x %x\n", 1755 new_ndlp->nlp_DID, keepDID); 1756 1757 lpfc_unreg_rpi(vport, ndlp); 1758 1759 /* Two ndlps cannot have the same did and the fc4 1760 * type must be transferred because the ndlp is in 1761 * flight. 1762 */ 1763 ndlp->nlp_DID = keepDID; 1764 ndlp->nlp_fc4_type = keep_nlp_fc4_type; 1765 1766 if (phba->sli_rev == LPFC_SLI_REV4 && 1767 active_rrqs_xri_bitmap) 1768 memcpy(ndlp->active_rrqs_xri_bitmap, 1769 active_rrqs_xri_bitmap, 1770 phba->cfg_rrq_xri_bitmap_sz); 1771 1772 /* Since we are switching over to the new_ndlp, 1773 * reset the old ndlp state 1774 */ 1775 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) || 1776 (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) 1777 keep_nlp_state = NLP_STE_NPR_NODE; 1778 lpfc_nlp_set_state(vport, ndlp, keep_nlp_state); 1779 ndlp->nrport = keep_nrport; 1780 } 1781 1782 /* 1783 * If ndlp is not associated with any rport we can drop it here else 1784 * let dev_loss_tmo_callbk trigger DEVICE_RM event 1785 */ 1786 if (!ndlp->rport && (ndlp->nlp_state == NLP_STE_NPR_NODE)) 1787 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); 1788 1789 if (phba->sli_rev == LPFC_SLI_REV4 && 1790 active_rrqs_xri_bitmap) 1791 mempool_free(active_rrqs_xri_bitmap, 1792 phba->active_rrq_pool); 1793 1794 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE, 1795 "3173 PLOGI confirm exit: new_ndlp x%x x%x x%x\n", 1796 new_ndlp->nlp_DID, new_ndlp->nlp_flag, 1797 new_ndlp->nlp_fc4_type); 1798 1799 return new_ndlp; 1800 } 1801 1802 /** 1803 * lpfc_end_rscn - Check and handle more rscn for a vport 1804 * @vport: pointer to a host virtual N_Port data structure. 1805 * 1806 * This routine checks whether more Registration State Change 1807 * Notifications (RSCNs) came in while the discovery state machine was in 1808 * the FC_RSCN_MODE. If so, the lpfc_els_handle_rscn() routine will be 1809 * invoked to handle the additional RSCNs for the @vport. Otherwise, the 1810 * FC_RSCN_MODE bit will be cleared with the @vport to mark as the end of 1811 * handling the RSCNs. 1812 **/ 1813 void 1814 lpfc_end_rscn(struct lpfc_vport *vport) 1815 { 1816 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1817 1818 if (vport->fc_flag & FC_RSCN_MODE) { 1819 /* 1820 * Check to see if more RSCNs came in while we were 1821 * processing this one. 1822 */ 1823 if (vport->fc_rscn_id_cnt || 1824 (vport->fc_flag & FC_RSCN_DISCOVERY) != 0) 1825 lpfc_els_handle_rscn(vport); 1826 else { 1827 spin_lock_irq(shost->host_lock); 1828 vport->fc_flag &= ~FC_RSCN_MODE; 1829 spin_unlock_irq(shost->host_lock); 1830 } 1831 } 1832 } 1833 1834 /** 1835 * lpfc_cmpl_els_rrq - Completion handled for els RRQs. 1836 * @phba: pointer to lpfc hba data structure. 1837 * @cmdiocb: pointer to lpfc command iocb data structure. 1838 * @rspiocb: pointer to lpfc response iocb data structure. 1839 * 1840 * This routine will call the clear rrq function to free the rrq and 1841 * clear the xri's bit in the ndlp's xri_bitmap. If the ndlp does not 1842 * exist then the clear_rrq is still called because the rrq needs to 1843 * be freed. 1844 **/ 1845 1846 static void 1847 lpfc_cmpl_els_rrq(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1848 struct lpfc_iocbq *rspiocb) 1849 { 1850 struct lpfc_vport *vport = cmdiocb->vport; 1851 IOCB_t *irsp; 1852 struct lpfc_nodelist *ndlp; 1853 struct lpfc_node_rrq *rrq; 1854 1855 /* we pass cmdiocb to state machine which needs rspiocb as well */ 1856 rrq = cmdiocb->context_un.rrq; 1857 cmdiocb->context_un.rsp_iocb = rspiocb; 1858 1859 irsp = &rspiocb->iocb; 1860 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1861 "RRQ cmpl: status:x%x/x%x did:x%x", 1862 irsp->ulpStatus, irsp->un.ulpWord[4], 1863 irsp->un.elsreq64.remoteID); 1864 1865 ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID); 1866 if (!ndlp || ndlp != rrq->ndlp) { 1867 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1868 "2882 RRQ completes to NPort x%x " 1869 "with no ndlp. Data: x%x x%x x%x\n", 1870 irsp->un.elsreq64.remoteID, 1871 irsp->ulpStatus, irsp->un.ulpWord[4], 1872 irsp->ulpIoTag); 1873 goto out; 1874 } 1875 1876 /* rrq completes to NPort <nlp_DID> */ 1877 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1878 "2880 RRQ completes to NPort x%x " 1879 "Data: x%x x%x x%x x%x x%x\n", 1880 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4], 1881 irsp->ulpTimeout, rrq->xritag, rrq->rxid); 1882 1883 if (irsp->ulpStatus) { 1884 /* Check for retry */ 1885 /* RRQ failed Don't print the vport to vport rjts */ 1886 if (irsp->ulpStatus != IOSTAT_LS_RJT || 1887 (((irsp->un.ulpWord[4]) >> 16 != LSRJT_INVALID_CMD) && 1888 ((irsp->un.ulpWord[4]) >> 16 != LSRJT_UNABLE_TPC)) || 1889 (phba)->pport->cfg_log_verbose & LOG_ELS) 1890 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1891 "2881 RRQ failure DID:%06X Status:" 1892 "x%x/x%x\n", 1893 ndlp->nlp_DID, irsp->ulpStatus, 1894 irsp->un.ulpWord[4]); 1895 } 1896 out: 1897 if (rrq) 1898 lpfc_clr_rrq_active(phba, rrq->xritag, rrq); 1899 1900 lpfc_els_free_iocb(phba, cmdiocb); 1901 lpfc_nlp_put(ndlp); 1902 return; 1903 } 1904 /** 1905 * lpfc_cmpl_els_plogi - Completion callback function for plogi 1906 * @phba: pointer to lpfc hba data structure. 1907 * @cmdiocb: pointer to lpfc command iocb data structure. 1908 * @rspiocb: pointer to lpfc response iocb data structure. 1909 * 1910 * This routine is the completion callback function for issuing the Port 1911 * Login (PLOGI) command. For PLOGI completion, there must be an active 1912 * ndlp on the vport node list that matches the remote node ID from the 1913 * PLOGI response IOCB. If such ndlp does not exist, the PLOGI is simply 1914 * ignored and command IOCB released. The PLOGI response IOCB status is 1915 * checked for error conditons. If there is error status reported, PLOGI 1916 * retry shall be attempted by invoking the lpfc_els_retry() routine. 1917 * Otherwise, the lpfc_plogi_confirm_nport() routine shall be invoked on 1918 * the ndlp and the NLP_EVT_CMPL_PLOGI state to the Discover State Machine 1919 * (DSM) is set for this PLOGI completion. Finally, it checks whether 1920 * there are additional N_Port nodes with the vport that need to perform 1921 * PLOGI. If so, the lpfc_more_plogi() routine is invoked to issue addition 1922 * PLOGIs. 1923 **/ 1924 static void 1925 lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1926 struct lpfc_iocbq *rspiocb) 1927 { 1928 struct lpfc_vport *vport = cmdiocb->vport; 1929 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1930 IOCB_t *irsp; 1931 struct lpfc_nodelist *ndlp, *free_ndlp; 1932 struct lpfc_dmabuf *prsp; 1933 int disc; 1934 1935 /* we pass cmdiocb to state machine which needs rspiocb as well */ 1936 cmdiocb->context_un.rsp_iocb = rspiocb; 1937 1938 irsp = &rspiocb->iocb; 1939 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1940 "PLOGI cmpl: status:x%x/x%x did:x%x", 1941 irsp->ulpStatus, irsp->un.ulpWord[4], 1942 irsp->un.elsreq64.remoteID); 1943 1944 ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID); 1945 if (!ndlp) { 1946 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1947 "0136 PLOGI completes to NPort x%x " 1948 "with no ndlp. Data: x%x x%x x%x\n", 1949 irsp->un.elsreq64.remoteID, 1950 irsp->ulpStatus, irsp->un.ulpWord[4], 1951 irsp->ulpIoTag); 1952 goto out_freeiocb; 1953 } 1954 1955 /* Since ndlp can be freed in the disc state machine, note if this node 1956 * is being used during discovery. 1957 */ 1958 spin_lock_irq(&ndlp->lock); 1959 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC); 1960 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 1961 spin_unlock_irq(&ndlp->lock); 1962 1963 /* PLOGI completes to NPort <nlp_DID> */ 1964 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1965 "0102 PLOGI completes to NPort x%06x " 1966 "Data: x%x x%x x%x x%x x%x\n", 1967 ndlp->nlp_DID, ndlp->nlp_fc4_type, 1968 irsp->ulpStatus, irsp->un.ulpWord[4], 1969 disc, vport->num_disc_nodes); 1970 1971 /* Check to see if link went down during discovery */ 1972 if (lpfc_els_chk_latt(vport)) { 1973 spin_lock_irq(&ndlp->lock); 1974 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 1975 spin_unlock_irq(&ndlp->lock); 1976 goto out; 1977 } 1978 1979 if (irsp->ulpStatus) { 1980 /* Check for retry */ 1981 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 1982 /* ELS command is being retried */ 1983 if (disc) { 1984 spin_lock_irq(&ndlp->lock); 1985 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 1986 spin_unlock_irq(&ndlp->lock); 1987 } 1988 goto out; 1989 } 1990 /* PLOGI failed Don't print the vport to vport rjts */ 1991 if (irsp->ulpStatus != IOSTAT_LS_RJT || 1992 (((irsp->un.ulpWord[4]) >> 16 != LSRJT_INVALID_CMD) && 1993 ((irsp->un.ulpWord[4]) >> 16 != LSRJT_UNABLE_TPC)) || 1994 (phba)->pport->cfg_log_verbose & LOG_ELS) 1995 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1996 "2753 PLOGI failure DID:%06X Status:x%x/x%x\n", 1997 ndlp->nlp_DID, irsp->ulpStatus, 1998 irsp->un.ulpWord[4]); 1999 2000 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 2001 if (lpfc_error_lost_link(irsp)) 2002 goto check_plogi; 2003 else 2004 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2005 NLP_EVT_CMPL_PLOGI); 2006 2007 /* As long as this node is not registered with the scsi or nvme 2008 * transport, it is no longer an active node. Otherwise 2009 * devloss handles the final cleanup. 2010 */ 2011 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { 2012 spin_lock_irq(&ndlp->lock); 2013 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 2014 spin_unlock_irq(&ndlp->lock); 2015 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2016 NLP_EVT_DEVICE_RM); 2017 } 2018 } else { 2019 /* Good status, call state machine */ 2020 prsp = list_entry(((struct lpfc_dmabuf *) 2021 cmdiocb->context2)->list.next, 2022 struct lpfc_dmabuf, list); 2023 ndlp = lpfc_plogi_confirm_nport(phba, prsp->virt, ndlp); 2024 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2025 NLP_EVT_CMPL_PLOGI); 2026 } 2027 2028 check_plogi: 2029 if (disc && vport->num_disc_nodes) { 2030 /* Check to see if there are more PLOGIs to be sent */ 2031 lpfc_more_plogi(vport); 2032 2033 if (vport->num_disc_nodes == 0) { 2034 spin_lock_irq(shost->host_lock); 2035 vport->fc_flag &= ~FC_NDISC_ACTIVE; 2036 spin_unlock_irq(shost->host_lock); 2037 2038 lpfc_can_disctmo(vport); 2039 lpfc_end_rscn(vport); 2040 } 2041 } 2042 2043 out: 2044 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, 2045 "PLOGI Cmpl PUT: did:x%x refcnt %d", 2046 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 2047 2048 out_freeiocb: 2049 /* Release the reference on the original I/O request. */ 2050 free_ndlp = (struct lpfc_nodelist *)cmdiocb->context1; 2051 2052 lpfc_els_free_iocb(phba, cmdiocb); 2053 lpfc_nlp_put(free_ndlp); 2054 return; 2055 } 2056 2057 /** 2058 * lpfc_issue_els_plogi - Issue an plogi iocb command for a vport 2059 * @vport: pointer to a host virtual N_Port data structure. 2060 * @did: destination port identifier. 2061 * @retry: number of retries to the command IOCB. 2062 * 2063 * This routine issues a Port Login (PLOGI) command to a remote N_Port 2064 * (with the @did) for a @vport. Before issuing a PLOGI to a remote N_Port, 2065 * the ndlp with the remote N_Port DID must exist on the @vport's ndlp list. 2066 * This routine constructs the proper feilds of the PLOGI IOCB and invokes 2067 * the lpfc_sli_issue_iocb() routine to send out PLOGI ELS command. 2068 * 2069 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 2070 * will be incremented by 1 for holding the ndlp and the reference to ndlp 2071 * will be stored into the context1 field of the IOCB for the completion 2072 * callback function to the PLOGI ELS command. 2073 * 2074 * Return code 2075 * 0 - Successfully issued a plogi for @vport 2076 * 1 - failed to issue a plogi for @vport 2077 **/ 2078 int 2079 lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry) 2080 { 2081 struct lpfc_hba *phba = vport->phba; 2082 struct serv_parm *sp; 2083 struct lpfc_nodelist *ndlp; 2084 struct lpfc_iocbq *elsiocb; 2085 uint8_t *pcmd; 2086 uint16_t cmdsize; 2087 int ret; 2088 2089 ndlp = lpfc_findnode_did(vport, did); 2090 2091 if (ndlp) { 2092 /* Defer the processing of the issue PLOGI until after the 2093 * outstanding UNREG_RPI mbox command completes, unless we 2094 * are going offline. This logic does not apply for Fabric DIDs 2095 */ 2096 if ((ndlp->nlp_flag & NLP_UNREG_INP) && 2097 ((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) && 2098 !(vport->fc_flag & FC_OFFLINE_MODE)) { 2099 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2100 "4110 Issue PLOGI x%x deferred " 2101 "on NPort x%x rpi x%x Data: x%px\n", 2102 ndlp->nlp_defer_did, ndlp->nlp_DID, 2103 ndlp->nlp_rpi, ndlp); 2104 2105 /* We can only defer 1st PLOGI */ 2106 if (ndlp->nlp_defer_did == NLP_EVT_NOTHING_PENDING) 2107 ndlp->nlp_defer_did = did; 2108 return 0; 2109 } 2110 } 2111 2112 /* If ndlp is not NULL, we will bump the reference count on it */ 2113 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 2114 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did, 2115 ELS_CMD_PLOGI); 2116 if (!elsiocb) 2117 return 1; 2118 2119 spin_lock_irq(&ndlp->lock); 2120 ndlp->nlp_flag &= ~NLP_FCP_PRLI_RJT; 2121 spin_unlock_irq(&ndlp->lock); 2122 2123 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2124 2125 /* For PLOGI request, remainder of payload is service parameters */ 2126 *((uint32_t *) (pcmd)) = ELS_CMD_PLOGI; 2127 pcmd += sizeof(uint32_t); 2128 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm)); 2129 sp = (struct serv_parm *) pcmd; 2130 2131 /* 2132 * If we are a N-port connected to a Fabric, fix-up paramm's so logins 2133 * to device on remote loops work. 2134 */ 2135 if ((vport->fc_flag & FC_FABRIC) && !(vport->fc_flag & FC_PUBLIC_LOOP)) 2136 sp->cmn.altBbCredit = 1; 2137 2138 if (sp->cmn.fcphLow < FC_PH_4_3) 2139 sp->cmn.fcphLow = FC_PH_4_3; 2140 2141 if (sp->cmn.fcphHigh < FC_PH3) 2142 sp->cmn.fcphHigh = FC_PH3; 2143 2144 sp->cmn.valid_vendor_ver_level = 0; 2145 memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion)); 2146 sp->cmn.bbRcvSizeMsb &= 0xF; 2147 2148 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2149 "Issue PLOGI: did:x%x", 2150 did, 0, 0); 2151 2152 /* If our firmware supports this feature, convey that 2153 * information to the target using the vendor specific field. 2154 */ 2155 if (phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) { 2156 sp->cmn.valid_vendor_ver_level = 1; 2157 sp->un.vv.vid = cpu_to_be32(LPFC_VV_EMLX_ID); 2158 sp->un.vv.flags = cpu_to_be32(LPFC_VV_SUPPRESS_RSP); 2159 } 2160 2161 phba->fc_stat.elsXmitPLOGI++; 2162 elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi; 2163 2164 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2165 "Issue PLOGI: did:x%x refcnt %d", 2166 did, kref_read(&ndlp->kref), 0); 2167 elsiocb->context1 = lpfc_nlp_get(ndlp); 2168 if (!elsiocb->context1) 2169 goto io_err; 2170 2171 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 2172 if (ret) { 2173 lpfc_nlp_put(ndlp); 2174 goto io_err; 2175 } 2176 return 0; 2177 2178 io_err: 2179 lpfc_els_free_iocb(phba, elsiocb); 2180 return 1; 2181 } 2182 2183 /** 2184 * lpfc_cmpl_els_prli - Completion callback function for prli 2185 * @phba: pointer to lpfc hba data structure. 2186 * @cmdiocb: pointer to lpfc command iocb data structure. 2187 * @rspiocb: pointer to lpfc response iocb data structure. 2188 * 2189 * This routine is the completion callback function for a Process Login 2190 * (PRLI) ELS command. The PRLI response IOCB status is checked for error 2191 * status. If there is error status reported, PRLI retry shall be attempted 2192 * by invoking the lpfc_els_retry() routine. Otherwise, the state 2193 * NLP_EVT_CMPL_PRLI is sent to the Discover State Machine (DSM) for this 2194 * ndlp to mark the PRLI completion. 2195 **/ 2196 static void 2197 lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2198 struct lpfc_iocbq *rspiocb) 2199 { 2200 struct lpfc_vport *vport = cmdiocb->vport; 2201 IOCB_t *irsp; 2202 struct lpfc_nodelist *ndlp; 2203 char *mode; 2204 u32 loglevel; 2205 2206 /* we pass cmdiocb to state machine which needs rspiocb as well */ 2207 cmdiocb->context_un.rsp_iocb = rspiocb; 2208 2209 irsp = &(rspiocb->iocb); 2210 ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 2211 spin_lock_irq(&ndlp->lock); 2212 ndlp->nlp_flag &= ~NLP_PRLI_SND; 2213 2214 /* Driver supports multiple FC4 types. Counters matter. */ 2215 vport->fc_prli_sent--; 2216 ndlp->fc4_prli_sent--; 2217 spin_unlock_irq(&ndlp->lock); 2218 2219 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2220 "PRLI cmpl: status:x%x/x%x did:x%x", 2221 irsp->ulpStatus, irsp->un.ulpWord[4], 2222 ndlp->nlp_DID); 2223 2224 /* PRLI completes to NPort <nlp_DID> */ 2225 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2226 "0103 PRLI completes to NPort x%06x " 2227 "Data: x%x x%x x%x x%x\n", 2228 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4], 2229 vport->num_disc_nodes, ndlp->fc4_prli_sent); 2230 2231 /* Check to see if link went down during discovery */ 2232 if (lpfc_els_chk_latt(vport)) 2233 goto out; 2234 2235 if (irsp->ulpStatus) { 2236 /* Check for retry */ 2237 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 2238 /* ELS command is being retried */ 2239 goto out; 2240 } 2241 2242 /* If we don't send GFT_ID to Fabric, a PRLI error 2243 * could be expected. 2244 */ 2245 if ((vport->fc_flag & FC_FABRIC) || 2246 (vport->cfg_enable_fc4_type != LPFC_ENABLE_BOTH)) { 2247 mode = KERN_ERR; 2248 loglevel = LOG_TRACE_EVENT; 2249 } else { 2250 mode = KERN_INFO; 2251 loglevel = LOG_ELS; 2252 } 2253 2254 /* PRLI failed */ 2255 lpfc_printf_vlog(vport, mode, loglevel, 2256 "2754 PRLI failure DID:%06X Status:x%x/x%x, " 2257 "data: x%x\n", 2258 ndlp->nlp_DID, irsp->ulpStatus, 2259 irsp->un.ulpWord[4], ndlp->fc4_prli_sent); 2260 2261 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 2262 if (lpfc_error_lost_link(irsp)) 2263 goto out; 2264 else 2265 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2266 NLP_EVT_CMPL_PRLI); 2267 2268 /* As long as this node is not registered with the SCSI 2269 * or NVMe transport and no other PRLIs are outstanding, 2270 * it is no longer an active node. Otherwise devloss 2271 * handles the final cleanup. 2272 */ 2273 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) && 2274 !ndlp->fc4_prli_sent) { 2275 spin_lock_irq(&ndlp->lock); 2276 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 2277 spin_unlock_irq(&ndlp->lock); 2278 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2279 NLP_EVT_DEVICE_RM); 2280 } 2281 } else { 2282 /* Good status, call state machine. However, if another 2283 * PRLI is outstanding, don't call the state machine 2284 * because final disposition to Mapped or Unmapped is 2285 * completed there. 2286 */ 2287 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2288 NLP_EVT_CMPL_PRLI); 2289 } 2290 2291 out: 2292 lpfc_els_free_iocb(phba, cmdiocb); 2293 lpfc_nlp_put(ndlp); 2294 return; 2295 } 2296 2297 /** 2298 * lpfc_issue_els_prli - Issue a prli iocb command for a vport 2299 * @vport: pointer to a host virtual N_Port data structure. 2300 * @ndlp: pointer to a node-list data structure. 2301 * @retry: number of retries to the command IOCB. 2302 * 2303 * This routine issues a Process Login (PRLI) ELS command for the 2304 * @vport. The PRLI service parameters are set up in the payload of the 2305 * PRLI Request command and the pointer to lpfc_cmpl_els_prli() routine 2306 * is put to the IOCB completion callback func field before invoking the 2307 * routine lpfc_sli_issue_iocb() to send out PRLI command. 2308 * 2309 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 2310 * will be incremented by 1 for holding the ndlp and the reference to ndlp 2311 * will be stored into the context1 field of the IOCB for the completion 2312 * callback function to the PRLI ELS command. 2313 * 2314 * Return code 2315 * 0 - successfully issued prli iocb command for @vport 2316 * 1 - failed to issue prli iocb command for @vport 2317 **/ 2318 int 2319 lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2320 uint8_t retry) 2321 { 2322 int rc = 0; 2323 struct lpfc_hba *phba = vport->phba; 2324 PRLI *npr; 2325 struct lpfc_nvme_prli *npr_nvme; 2326 struct lpfc_iocbq *elsiocb; 2327 uint8_t *pcmd; 2328 uint16_t cmdsize; 2329 u32 local_nlp_type, elscmd; 2330 2331 /* 2332 * If we are in RSCN mode, the FC4 types supported from a 2333 * previous GFT_ID command may not be accurate. So, if we 2334 * are a NVME Initiator, always look for the possibility of 2335 * the remote NPort beng a NVME Target. 2336 */ 2337 if (phba->sli_rev == LPFC_SLI_REV4 && 2338 vport->fc_flag & FC_RSCN_MODE && 2339 vport->nvmei_support) 2340 ndlp->nlp_fc4_type |= NLP_FC4_NVME; 2341 local_nlp_type = ndlp->nlp_fc4_type; 2342 2343 /* This routine will issue 1 or 2 PRLIs, so zero all the ndlp 2344 * fields here before any of them can complete. 2345 */ 2346 ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); 2347 ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR); 2348 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; 2349 ndlp->nlp_flag &= ~(NLP_FIRSTBURST | NLP_NPR_2B_DISC); 2350 ndlp->nvme_fb_size = 0; 2351 2352 send_next_prli: 2353 if (local_nlp_type & NLP_FC4_FCP) { 2354 /* Payload is 4 + 16 = 20 x14 bytes. */ 2355 cmdsize = (sizeof(uint32_t) + sizeof(PRLI)); 2356 elscmd = ELS_CMD_PRLI; 2357 } else if (local_nlp_type & NLP_FC4_NVME) { 2358 /* Payload is 4 + 20 = 24 x18 bytes. */ 2359 cmdsize = (sizeof(uint32_t) + sizeof(struct lpfc_nvme_prli)); 2360 elscmd = ELS_CMD_NVMEPRLI; 2361 } else { 2362 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2363 "3083 Unknown FC_TYPE x%x ndlp x%06x\n", 2364 ndlp->nlp_fc4_type, ndlp->nlp_DID); 2365 return 1; 2366 } 2367 2368 /* SLI3 ports don't support NVME. If this rport is a strict NVME 2369 * FC4 type, implicitly LOGO. 2370 */ 2371 if (phba->sli_rev == LPFC_SLI_REV3 && 2372 ndlp->nlp_fc4_type == NLP_FC4_NVME) { 2373 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2374 "3088 Rport fc4 type 0x%x not supported by SLI3 adapter\n", 2375 ndlp->nlp_type); 2376 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); 2377 return 1; 2378 } 2379 2380 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 2381 ndlp->nlp_DID, elscmd); 2382 if (!elsiocb) 2383 return 1; 2384 2385 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2386 2387 /* For PRLI request, remainder of payload is service parameters */ 2388 memset(pcmd, 0, cmdsize); 2389 2390 if (local_nlp_type & NLP_FC4_FCP) { 2391 /* Remainder of payload is FCP PRLI parameter page. 2392 * Note: this data structure is defined as 2393 * BE/LE in the structure definition so no 2394 * byte swap call is made. 2395 */ 2396 *((uint32_t *)(pcmd)) = ELS_CMD_PRLI; 2397 pcmd += sizeof(uint32_t); 2398 npr = (PRLI *)pcmd; 2399 2400 /* 2401 * If our firmware version is 3.20 or later, 2402 * set the following bits for FC-TAPE support. 2403 */ 2404 if (phba->vpd.rev.feaLevelHigh >= 0x02) { 2405 npr->ConfmComplAllowed = 1; 2406 npr->Retry = 1; 2407 npr->TaskRetryIdReq = 1; 2408 } 2409 npr->estabImagePair = 1; 2410 npr->readXferRdyDis = 1; 2411 if (vport->cfg_first_burst_size) 2412 npr->writeXferRdyDis = 1; 2413 2414 /* For FCP support */ 2415 npr->prliType = PRLI_FCP_TYPE; 2416 npr->initiatorFunc = 1; 2417 elsiocb->iocb_flag |= LPFC_PRLI_FCP_REQ; 2418 2419 /* Remove FCP type - processed. */ 2420 local_nlp_type &= ~NLP_FC4_FCP; 2421 } else if (local_nlp_type & NLP_FC4_NVME) { 2422 /* Remainder of payload is NVME PRLI parameter page. 2423 * This data structure is the newer definition that 2424 * uses bf macros so a byte swap is required. 2425 */ 2426 *((uint32_t *)(pcmd)) = ELS_CMD_NVMEPRLI; 2427 pcmd += sizeof(uint32_t); 2428 npr_nvme = (struct lpfc_nvme_prli *)pcmd; 2429 bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE); 2430 bf_set(prli_estabImagePair, npr_nvme, 0); /* Should be 0 */ 2431 if (phba->nsler) { 2432 bf_set(prli_nsler, npr_nvme, 1); 2433 bf_set(prli_conf, npr_nvme, 1); 2434 } 2435 2436 /* Only initiators request first burst. */ 2437 if ((phba->cfg_nvme_enable_fb) && 2438 !phba->nvmet_support) 2439 bf_set(prli_fba, npr_nvme, 1); 2440 2441 if (phba->nvmet_support) { 2442 bf_set(prli_tgt, npr_nvme, 1); 2443 bf_set(prli_disc, npr_nvme, 1); 2444 } else { 2445 bf_set(prli_init, npr_nvme, 1); 2446 bf_set(prli_conf, npr_nvme, 1); 2447 } 2448 2449 npr_nvme->word1 = cpu_to_be32(npr_nvme->word1); 2450 npr_nvme->word4 = cpu_to_be32(npr_nvme->word4); 2451 elsiocb->iocb_flag |= LPFC_PRLI_NVME_REQ; 2452 2453 /* Remove NVME type - processed. */ 2454 local_nlp_type &= ~NLP_FC4_NVME; 2455 } 2456 2457 phba->fc_stat.elsXmitPRLI++; 2458 elsiocb->iocb_cmpl = lpfc_cmpl_els_prli; 2459 spin_lock_irq(&ndlp->lock); 2460 ndlp->nlp_flag |= NLP_PRLI_SND; 2461 2462 /* The vport counters are used for lpfc_scan_finished, but 2463 * the ndlp is used to track outstanding PRLIs for different 2464 * FC4 types. 2465 */ 2466 vport->fc_prli_sent++; 2467 ndlp->fc4_prli_sent++; 2468 spin_unlock_irq(&ndlp->lock); 2469 2470 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2471 "Issue PRLI: did:x%x refcnt %d", 2472 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 2473 elsiocb->context1 = lpfc_nlp_get(ndlp); 2474 if (!elsiocb->context1) 2475 goto io_err; 2476 2477 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 2478 if (rc == IOCB_ERROR) 2479 goto node_err; 2480 2481 2482 /* The driver supports 2 FC4 types. Make sure 2483 * a PRLI is issued for all types before exiting. 2484 */ 2485 if (phba->sli_rev == LPFC_SLI_REV4 && 2486 local_nlp_type & (NLP_FC4_FCP | NLP_FC4_NVME)) 2487 goto send_next_prli; 2488 else 2489 return 0; 2490 2491 node_err: 2492 lpfc_nlp_put(ndlp); 2493 io_err: 2494 spin_lock_irq(&ndlp->lock); 2495 ndlp->nlp_flag &= ~NLP_PRLI_SND; 2496 spin_unlock_irq(&ndlp->lock); 2497 lpfc_els_free_iocb(phba, elsiocb); 2498 return 1; 2499 } 2500 2501 /** 2502 * lpfc_rscn_disc - Perform rscn discovery for a vport 2503 * @vport: pointer to a host virtual N_Port data structure. 2504 * 2505 * This routine performs Registration State Change Notification (RSCN) 2506 * discovery for a @vport. If the @vport's node port recovery count is not 2507 * zero, it will invoke the lpfc_els_disc_plogi() to perform PLOGI for all 2508 * the nodes that need recovery. If none of the PLOGI were needed through 2509 * the lpfc_els_disc_plogi() routine, the lpfc_end_rscn() routine shall be 2510 * invoked to check and handle possible more RSCN came in during the period 2511 * of processing the current ones. 2512 **/ 2513 static void 2514 lpfc_rscn_disc(struct lpfc_vport *vport) 2515 { 2516 lpfc_can_disctmo(vport); 2517 2518 /* RSCN discovery */ 2519 /* go thru NPR nodes and issue ELS PLOGIs */ 2520 if (vport->fc_npr_cnt) 2521 if (lpfc_els_disc_plogi(vport)) 2522 return; 2523 2524 lpfc_end_rscn(vport); 2525 } 2526 2527 /** 2528 * lpfc_adisc_done - Complete the adisc phase of discovery 2529 * @vport: pointer to lpfc_vport hba data structure that finished all ADISCs. 2530 * 2531 * This function is called when the final ADISC is completed during discovery. 2532 * This function handles clearing link attention or issuing reg_vpi depending 2533 * on whether npiv is enabled. This function also kicks off the PLOGI phase of 2534 * discovery. 2535 * This function is called with no locks held. 2536 **/ 2537 static void 2538 lpfc_adisc_done(struct lpfc_vport *vport) 2539 { 2540 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2541 struct lpfc_hba *phba = vport->phba; 2542 2543 /* 2544 * For NPIV, cmpl_reg_vpi will set port_state to READY, 2545 * and continue discovery. 2546 */ 2547 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 2548 !(vport->fc_flag & FC_RSCN_MODE) && 2549 (phba->sli_rev < LPFC_SLI_REV4)) { 2550 /* The ADISCs are complete. Doesn't matter if they 2551 * succeeded or failed because the ADISC completion 2552 * routine guarantees to call the state machine and 2553 * the RPI is either unregistered (failed ADISC response) 2554 * or the RPI is still valid and the node is marked 2555 * mapped for a target. The exchanges should be in the 2556 * correct state. This code is specific to SLI3. 2557 */ 2558 lpfc_issue_clear_la(phba, vport); 2559 lpfc_issue_reg_vpi(phba, vport); 2560 return; 2561 } 2562 /* 2563 * For SLI2, we need to set port_state to READY 2564 * and continue discovery. 2565 */ 2566 if (vport->port_state < LPFC_VPORT_READY) { 2567 /* If we get here, there is nothing to ADISC */ 2568 lpfc_issue_clear_la(phba, vport); 2569 if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) { 2570 vport->num_disc_nodes = 0; 2571 /* go thru NPR list, issue ELS PLOGIs */ 2572 if (vport->fc_npr_cnt) 2573 lpfc_els_disc_plogi(vport); 2574 if (!vport->num_disc_nodes) { 2575 spin_lock_irq(shost->host_lock); 2576 vport->fc_flag &= ~FC_NDISC_ACTIVE; 2577 spin_unlock_irq(shost->host_lock); 2578 lpfc_can_disctmo(vport); 2579 lpfc_end_rscn(vport); 2580 } 2581 } 2582 vport->port_state = LPFC_VPORT_READY; 2583 } else 2584 lpfc_rscn_disc(vport); 2585 } 2586 2587 /** 2588 * lpfc_more_adisc - Issue more adisc as needed 2589 * @vport: pointer to a host virtual N_Port data structure. 2590 * 2591 * This routine determines whether there are more ndlps on a @vport 2592 * node list need to have Address Discover (ADISC) issued. If so, it will 2593 * invoke the lpfc_els_disc_adisc() routine to issue ADISC on the @vport's 2594 * remaining nodes which need to have ADISC sent. 2595 **/ 2596 void 2597 lpfc_more_adisc(struct lpfc_vport *vport) 2598 { 2599 if (vport->num_disc_nodes) 2600 vport->num_disc_nodes--; 2601 /* Continue discovery with <num_disc_nodes> ADISCs to go */ 2602 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2603 "0210 Continue discovery with %d ADISCs to go " 2604 "Data: x%x x%x x%x\n", 2605 vport->num_disc_nodes, vport->fc_adisc_cnt, 2606 vport->fc_flag, vport->port_state); 2607 /* Check to see if there are more ADISCs to be sent */ 2608 if (vport->fc_flag & FC_NLP_MORE) { 2609 lpfc_set_disctmo(vport); 2610 /* go thru NPR nodes and issue any remaining ELS ADISCs */ 2611 lpfc_els_disc_adisc(vport); 2612 } 2613 if (!vport->num_disc_nodes) 2614 lpfc_adisc_done(vport); 2615 return; 2616 } 2617 2618 /** 2619 * lpfc_cmpl_els_adisc - Completion callback function for adisc 2620 * @phba: pointer to lpfc hba data structure. 2621 * @cmdiocb: pointer to lpfc command iocb data structure. 2622 * @rspiocb: pointer to lpfc response iocb data structure. 2623 * 2624 * This routine is the completion function for issuing the Address Discover 2625 * (ADISC) command. It first checks to see whether link went down during 2626 * the discovery process. If so, the node will be marked as node port 2627 * recovery for issuing discover IOCB by the link attention handler and 2628 * exit. Otherwise, the response status is checked. If error was reported 2629 * in the response status, the ADISC command shall be retried by invoking 2630 * the lpfc_els_retry() routine. Otherwise, if no error was reported in 2631 * the response status, the state machine is invoked to set transition 2632 * with respect to NLP_EVT_CMPL_ADISC event. 2633 **/ 2634 static void 2635 lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2636 struct lpfc_iocbq *rspiocb) 2637 { 2638 struct lpfc_vport *vport = cmdiocb->vport; 2639 IOCB_t *irsp; 2640 struct lpfc_nodelist *ndlp; 2641 int disc; 2642 2643 /* we pass cmdiocb to state machine which needs rspiocb as well */ 2644 cmdiocb->context_un.rsp_iocb = rspiocb; 2645 2646 irsp = &(rspiocb->iocb); 2647 ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 2648 2649 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2650 "ADISC cmpl: status:x%x/x%x did:x%x", 2651 irsp->ulpStatus, irsp->un.ulpWord[4], 2652 ndlp->nlp_DID); 2653 2654 /* Since ndlp can be freed in the disc state machine, note if this node 2655 * is being used during discovery. 2656 */ 2657 spin_lock_irq(&ndlp->lock); 2658 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC); 2659 ndlp->nlp_flag &= ~(NLP_ADISC_SND | NLP_NPR_2B_DISC); 2660 spin_unlock_irq(&ndlp->lock); 2661 /* ADISC completes to NPort <nlp_DID> */ 2662 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2663 "0104 ADISC completes to NPort x%x " 2664 "Data: x%x x%x x%x x%x x%x\n", 2665 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4], 2666 irsp->ulpTimeout, disc, vport->num_disc_nodes); 2667 /* Check to see if link went down during discovery */ 2668 if (lpfc_els_chk_latt(vport)) { 2669 spin_lock_irq(&ndlp->lock); 2670 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2671 spin_unlock_irq(&ndlp->lock); 2672 goto out; 2673 } 2674 2675 if (irsp->ulpStatus) { 2676 /* Check for retry */ 2677 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 2678 /* ELS command is being retried */ 2679 if (disc) { 2680 spin_lock_irq(&ndlp->lock); 2681 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2682 spin_unlock_irq(&ndlp->lock); 2683 lpfc_set_disctmo(vport); 2684 } 2685 goto out; 2686 } 2687 /* ADISC failed */ 2688 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2689 "2755 ADISC failure DID:%06X Status:x%x/x%x\n", 2690 ndlp->nlp_DID, irsp->ulpStatus, 2691 irsp->un.ulpWord[4]); 2692 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 2693 if (lpfc_error_lost_link(irsp)) 2694 goto check_adisc; 2695 else 2696 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2697 NLP_EVT_CMPL_ADISC); 2698 2699 /* As long as this node is not registered with the SCSI or NVMe 2700 * transport, it is no longer an active node. Otherwise 2701 * devloss handles the final cleanup. 2702 */ 2703 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { 2704 spin_lock_irq(&ndlp->lock); 2705 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 2706 spin_unlock_irq(&ndlp->lock); 2707 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2708 NLP_EVT_DEVICE_RM); 2709 } 2710 } else 2711 /* Good status, call state machine */ 2712 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2713 NLP_EVT_CMPL_ADISC); 2714 2715 check_adisc: 2716 /* Check to see if there are more ADISCs to be sent */ 2717 if (disc && vport->num_disc_nodes) 2718 lpfc_more_adisc(vport); 2719 out: 2720 lpfc_els_free_iocb(phba, cmdiocb); 2721 lpfc_nlp_put(ndlp); 2722 return; 2723 } 2724 2725 /** 2726 * lpfc_issue_els_adisc - Issue an address discover iocb to an node on a vport 2727 * @vport: pointer to a virtual N_Port data structure. 2728 * @ndlp: pointer to a node-list data structure. 2729 * @retry: number of retries to the command IOCB. 2730 * 2731 * This routine issues an Address Discover (ADISC) for an @ndlp on a 2732 * @vport. It prepares the payload of the ADISC ELS command, updates the 2733 * and states of the ndlp, and invokes the lpfc_sli_issue_iocb() routine 2734 * to issue the ADISC ELS command. 2735 * 2736 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 2737 * will be incremented by 1 for holding the ndlp and the reference to ndlp 2738 * will be stored into the context1 field of the IOCB for the completion 2739 * callback function to the ADISC ELS command. 2740 * 2741 * Return code 2742 * 0 - successfully issued adisc 2743 * 1 - failed to issue adisc 2744 **/ 2745 int 2746 lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2747 uint8_t retry) 2748 { 2749 int rc = 0; 2750 struct lpfc_hba *phba = vport->phba; 2751 ADISC *ap; 2752 struct lpfc_iocbq *elsiocb; 2753 uint8_t *pcmd; 2754 uint16_t cmdsize; 2755 2756 cmdsize = (sizeof(uint32_t) + sizeof(ADISC)); 2757 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 2758 ndlp->nlp_DID, ELS_CMD_ADISC); 2759 if (!elsiocb) 2760 return 1; 2761 2762 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2763 2764 /* For ADISC request, remainder of payload is service parameters */ 2765 *((uint32_t *) (pcmd)) = ELS_CMD_ADISC; 2766 pcmd += sizeof(uint32_t); 2767 2768 /* Fill in ADISC payload */ 2769 ap = (ADISC *) pcmd; 2770 ap->hardAL_PA = phba->fc_pref_ALPA; 2771 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name)); 2772 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 2773 ap->DID = be32_to_cpu(vport->fc_myDID); 2774 2775 phba->fc_stat.elsXmitADISC++; 2776 elsiocb->iocb_cmpl = lpfc_cmpl_els_adisc; 2777 spin_lock_irq(&ndlp->lock); 2778 ndlp->nlp_flag |= NLP_ADISC_SND; 2779 spin_unlock_irq(&ndlp->lock); 2780 elsiocb->context1 = lpfc_nlp_get(ndlp); 2781 if (!elsiocb->context1) 2782 goto node_err; 2783 2784 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2785 "Issue ADISC: did:x%x refcnt %d", 2786 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 2787 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 2788 if (rc == IOCB_ERROR) 2789 goto io_err; 2790 return 0; 2791 2792 io_err: 2793 lpfc_nlp_put(ndlp); 2794 node_err: 2795 spin_lock_irq(&ndlp->lock); 2796 ndlp->nlp_flag &= ~NLP_ADISC_SND; 2797 spin_unlock_irq(&ndlp->lock); 2798 lpfc_els_free_iocb(phba, elsiocb); 2799 return 1; 2800 } 2801 2802 /** 2803 * lpfc_cmpl_els_logo - Completion callback function for logo 2804 * @phba: pointer to lpfc hba data structure. 2805 * @cmdiocb: pointer to lpfc command iocb data structure. 2806 * @rspiocb: pointer to lpfc response iocb data structure. 2807 * 2808 * This routine is the completion function for issuing the ELS Logout (LOGO) 2809 * command. If no error status was reported from the LOGO response, the 2810 * state machine of the associated ndlp shall be invoked for transition with 2811 * respect to NLP_EVT_CMPL_LOGO event. Otherwise, if error status was reported, 2812 * the lpfc_els_retry() routine will be invoked to retry the LOGO command. 2813 **/ 2814 static void 2815 lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2816 struct lpfc_iocbq *rspiocb) 2817 { 2818 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 2819 struct lpfc_vport *vport = ndlp->vport; 2820 IOCB_t *irsp; 2821 unsigned long flags; 2822 uint32_t skip_recovery = 0; 2823 int wake_up_waiter = 0; 2824 2825 /* we pass cmdiocb to state machine which needs rspiocb as well */ 2826 cmdiocb->context_un.rsp_iocb = rspiocb; 2827 2828 irsp = &(rspiocb->iocb); 2829 spin_lock_irq(&ndlp->lock); 2830 ndlp->nlp_flag &= ~NLP_LOGO_SND; 2831 if (ndlp->upcall_flags & NLP_WAIT_FOR_LOGO) { 2832 wake_up_waiter = 1; 2833 ndlp->upcall_flags &= ~NLP_WAIT_FOR_LOGO; 2834 } 2835 spin_unlock_irq(&ndlp->lock); 2836 2837 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2838 "LOGO cmpl: status:x%x/x%x did:x%x", 2839 irsp->ulpStatus, irsp->un.ulpWord[4], 2840 ndlp->nlp_DID); 2841 2842 /* LOGO completes to NPort <nlp_DID> */ 2843 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2844 "0105 LOGO completes to NPort x%x " 2845 "refcnt %d nflags x%x Data: x%x x%x x%x x%x\n", 2846 ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp->nlp_flag, 2847 irsp->ulpStatus, irsp->un.ulpWord[4], 2848 irsp->ulpTimeout, vport->num_disc_nodes); 2849 2850 if (lpfc_els_chk_latt(vport)) { 2851 skip_recovery = 1; 2852 goto out; 2853 } 2854 2855 /* The LOGO will not be retried on failure. A LOGO was 2856 * issued to the remote rport and a ACC or RJT or no Answer are 2857 * all acceptable. Note the failure and move forward with 2858 * discovery. The PLOGI will retry. 2859 */ 2860 if (irsp->ulpStatus) { 2861 /* LOGO failed */ 2862 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2863 "2756 LOGO failure, No Retry DID:%06X Status:x%x/x%x\n", 2864 ndlp->nlp_DID, irsp->ulpStatus, 2865 irsp->un.ulpWord[4]); 2866 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 2867 if (lpfc_error_lost_link(irsp)) { 2868 skip_recovery = 1; 2869 goto out; 2870 } 2871 } 2872 2873 /* Call state machine. This will unregister the rpi if needed. */ 2874 lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_CMPL_LOGO); 2875 2876 /* The driver sets this flag for an NPIV instance that doesn't want to 2877 * log into the remote port. 2878 */ 2879 if (ndlp->nlp_flag & NLP_TARGET_REMOVE) { 2880 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2881 NLP_EVT_DEVICE_RM); 2882 lpfc_els_free_iocb(phba, cmdiocb); 2883 lpfc_nlp_put(ndlp); 2884 2885 /* Presume the node was released. */ 2886 return; 2887 } 2888 2889 out: 2890 /* Driver is done with the IO. */ 2891 lpfc_els_free_iocb(phba, cmdiocb); 2892 lpfc_nlp_put(ndlp); 2893 2894 /* At this point, the LOGO processing is complete. NOTE: For a 2895 * pt2pt topology, we are assuming the NPortID will only change 2896 * on link up processing. For a LOGO / PLOGI initiated by the 2897 * Initiator, we are assuming the NPortID is not going to change. 2898 */ 2899 2900 if (wake_up_waiter && ndlp->logo_waitq) 2901 wake_up(ndlp->logo_waitq); 2902 /* 2903 * If the node is a target, the handling attempts to recover the port. 2904 * For any other port type, the rpi is unregistered as an implicit 2905 * LOGO. 2906 */ 2907 if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET) && 2908 skip_recovery == 0) { 2909 lpfc_cancel_retry_delay_tmo(vport, ndlp); 2910 spin_lock_irqsave(&ndlp->lock, flags); 2911 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2912 spin_unlock_irqrestore(&ndlp->lock, flags); 2913 2914 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2915 "3187 LOGO completes to NPort x%x: Start " 2916 "Recovery Data: x%x x%x x%x x%x\n", 2917 ndlp->nlp_DID, irsp->ulpStatus, 2918 irsp->un.ulpWord[4], irsp->ulpTimeout, 2919 vport->num_disc_nodes); 2920 lpfc_disc_start(vport); 2921 return; 2922 } 2923 2924 /* Cleanup path for failed REG_RPI handling. If REG_RPI fails, the 2925 * driver sends a LOGO to the rport to cleanup. For fabric and 2926 * initiator ports cleanup the node as long as it the node is not 2927 * register with the transport. 2928 */ 2929 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { 2930 spin_lock_irq(&ndlp->lock); 2931 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 2932 spin_unlock_irq(&ndlp->lock); 2933 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2934 NLP_EVT_DEVICE_RM); 2935 } 2936 } 2937 2938 /** 2939 * lpfc_issue_els_logo - Issue a logo to an node on a vport 2940 * @vport: pointer to a virtual N_Port data structure. 2941 * @ndlp: pointer to a node-list data structure. 2942 * @retry: number of retries to the command IOCB. 2943 * 2944 * This routine constructs and issues an ELS Logout (LOGO) iocb command 2945 * to a remote node, referred by an @ndlp on a @vport. It constructs the 2946 * payload of the IOCB, properly sets up the @ndlp state, and invokes the 2947 * lpfc_sli_issue_iocb() routine to send out the LOGO ELS command. 2948 * 2949 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 2950 * will be incremented by 1 for holding the ndlp and the reference to ndlp 2951 * will be stored into the context1 field of the IOCB for the completion 2952 * callback function to the LOGO ELS command. 2953 * 2954 * Callers of this routine are expected to unregister the RPI first 2955 * 2956 * Return code 2957 * 0 - successfully issued logo 2958 * 1 - failed to issue logo 2959 **/ 2960 int 2961 lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2962 uint8_t retry) 2963 { 2964 struct lpfc_hba *phba = vport->phba; 2965 struct lpfc_iocbq *elsiocb; 2966 uint8_t *pcmd; 2967 uint16_t cmdsize; 2968 int rc; 2969 2970 spin_lock_irq(&ndlp->lock); 2971 if (ndlp->nlp_flag & NLP_LOGO_SND) { 2972 spin_unlock_irq(&ndlp->lock); 2973 return 0; 2974 } 2975 spin_unlock_irq(&ndlp->lock); 2976 2977 cmdsize = (2 * sizeof(uint32_t)) + sizeof(struct lpfc_name); 2978 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 2979 ndlp->nlp_DID, ELS_CMD_LOGO); 2980 if (!elsiocb) 2981 return 1; 2982 2983 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2984 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO; 2985 pcmd += sizeof(uint32_t); 2986 2987 /* Fill in LOGO payload */ 2988 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID); 2989 pcmd += sizeof(uint32_t); 2990 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name)); 2991 2992 phba->fc_stat.elsXmitLOGO++; 2993 elsiocb->iocb_cmpl = lpfc_cmpl_els_logo; 2994 spin_lock_irq(&ndlp->lock); 2995 ndlp->nlp_flag |= NLP_LOGO_SND; 2996 ndlp->nlp_flag &= ~NLP_ISSUE_LOGO; 2997 spin_unlock_irq(&ndlp->lock); 2998 elsiocb->context1 = lpfc_nlp_get(ndlp); 2999 if (!elsiocb->context1) 3000 goto node_err; 3001 3002 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3003 "Issue LOGO: did:x%x refcnt %d", 3004 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 3005 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3006 if (rc == IOCB_ERROR) 3007 goto io_err; 3008 3009 spin_lock_irq(&ndlp->lock); 3010 ndlp->nlp_prev_state = ndlp->nlp_state; 3011 spin_unlock_irq(&ndlp->lock); 3012 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE); 3013 return 0; 3014 3015 io_err: 3016 lpfc_nlp_put(ndlp); 3017 node_err: 3018 spin_lock_irq(&ndlp->lock); 3019 ndlp->nlp_flag &= ~NLP_LOGO_SND; 3020 spin_unlock_irq(&ndlp->lock); 3021 lpfc_els_free_iocb(phba, elsiocb); 3022 return 1; 3023 } 3024 3025 /** 3026 * lpfc_cmpl_els_cmd - Completion callback function for generic els command 3027 * @phba: pointer to lpfc hba data structure. 3028 * @cmdiocb: pointer to lpfc command iocb data structure. 3029 * @rspiocb: pointer to lpfc response iocb data structure. 3030 * 3031 * This routine is a generic completion callback function for ELS commands. 3032 * Specifically, it is the callback function which does not need to perform 3033 * any command specific operations. It is currently used by the ELS command 3034 * issuing routines for RSCN, lpfc_issue_els_rscn, and the ELS Fibre Channel 3035 * Address Resolution Protocol Response (FARPR) routine, lpfc_issue_els_farpr(). 3036 * Other than certain debug loggings, this callback function simply invokes the 3037 * lpfc_els_chk_latt() routine to check whether link went down during the 3038 * discovery process. 3039 **/ 3040 static void 3041 lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 3042 struct lpfc_iocbq *rspiocb) 3043 { 3044 struct lpfc_vport *vport = cmdiocb->vport; 3045 struct lpfc_nodelist *free_ndlp; 3046 IOCB_t *irsp; 3047 3048 irsp = &rspiocb->iocb; 3049 3050 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3051 "ELS cmd cmpl: status:x%x/x%x did:x%x", 3052 irsp->ulpStatus, irsp->un.ulpWord[4], 3053 irsp->un.elsreq64.remoteID); 3054 3055 /* ELS cmd tag <ulpIoTag> completes */ 3056 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3057 "0106 ELS cmd tag x%x completes Data: x%x x%x x%x\n", 3058 irsp->ulpIoTag, irsp->ulpStatus, 3059 irsp->un.ulpWord[4], irsp->ulpTimeout); 3060 3061 /* Check to see if link went down during discovery */ 3062 lpfc_els_chk_latt(vport); 3063 3064 free_ndlp = (struct lpfc_nodelist *)cmdiocb->context1; 3065 3066 lpfc_els_free_iocb(phba, cmdiocb); 3067 lpfc_nlp_put(free_ndlp); 3068 } 3069 3070 /** 3071 * lpfc_cmpl_els_disc_cmd - Completion callback function for Discovery ELS cmd 3072 * @phba: pointer to lpfc hba data structure. 3073 * @cmdiocb: pointer to lpfc command iocb data structure. 3074 * @rspiocb: pointer to lpfc response iocb data structure. 3075 * 3076 * This routine is a generic completion callback function for Discovery ELS cmd. 3077 * Currently used by the ELS command issuing routines for the ELS State Change 3078 * Request (SCR), lpfc_issue_els_scr() and the ELS RDF, lpfc_issue_els_rdf(). 3079 * These commands will be retried once only for ELS timeout errors. 3080 **/ 3081 static void 3082 lpfc_cmpl_els_disc_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 3083 struct lpfc_iocbq *rspiocb) 3084 { 3085 struct lpfc_vport *vport = cmdiocb->vport; 3086 IOCB_t *irsp; 3087 struct lpfc_els_rdf_rsp *prdf; 3088 struct lpfc_dmabuf *pcmd, *prsp; 3089 u32 *pdata; 3090 u32 cmd; 3091 struct lpfc_nodelist *ndlp = cmdiocb->context1; 3092 3093 irsp = &rspiocb->iocb; 3094 3095 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3096 "ELS cmd cmpl: status:x%x/x%x did:x%x", 3097 irsp->ulpStatus, irsp->un.ulpWord[4], 3098 irsp->un.elsreq64.remoteID); 3099 /* ELS cmd tag <ulpIoTag> completes */ 3100 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3101 "0217 ELS cmd tag x%x completes Data: x%x x%x x%x " 3102 "x%x\n", 3103 irsp->ulpIoTag, irsp->ulpStatus, 3104 irsp->un.ulpWord[4], irsp->ulpTimeout, 3105 cmdiocb->retry); 3106 3107 pcmd = (struct lpfc_dmabuf *)cmdiocb->context2; 3108 if (!pcmd) 3109 goto out; 3110 3111 pdata = (u32 *)pcmd->virt; 3112 if (!pdata) 3113 goto out; 3114 cmd = *pdata; 3115 3116 /* Only 1 retry for ELS Timeout only */ 3117 if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT && 3118 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == 3119 IOERR_SEQUENCE_TIMEOUT)) { 3120 cmdiocb->retry++; 3121 if (cmdiocb->retry <= 1) { 3122 switch (cmd) { 3123 case ELS_CMD_SCR: 3124 lpfc_issue_els_scr(vport, cmdiocb->retry); 3125 break; 3126 case ELS_CMD_RDF: 3127 cmdiocb->context1 = NULL; /* save ndlp refcnt */ 3128 lpfc_issue_els_rdf(vport, cmdiocb->retry); 3129 break; 3130 } 3131 goto out; 3132 } 3133 phba->fc_stat.elsRetryExceeded++; 3134 } 3135 if (irsp->ulpStatus) { 3136 /* ELS discovery cmd completes with error */ 3137 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, 3138 "4203 ELS cmd x%x error: x%x x%X\n", cmd, 3139 irsp->ulpStatus, irsp->un.ulpWord[4]); 3140 goto out; 3141 } 3142 3143 /* The RDF response doesn't have any impact on the running driver 3144 * but the notification descriptors are dumped here for support. 3145 */ 3146 if (cmd == ELS_CMD_RDF) { 3147 int i; 3148 3149 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 3150 if (!prsp) 3151 goto out; 3152 3153 prdf = (struct lpfc_els_rdf_rsp *)prsp->virt; 3154 if (!prdf) 3155 goto out; 3156 3157 for (i = 0; i < ELS_RDF_REG_TAG_CNT && 3158 i < be32_to_cpu(prdf->reg_d1.reg_desc.count); i++) 3159 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3160 "4677 Fabric RDF Notification Grant Data: " 3161 "0x%08x\n", 3162 be32_to_cpu( 3163 prdf->reg_d1.desc_tags[i])); 3164 } 3165 3166 out: 3167 /* Check to see if link went down during discovery */ 3168 lpfc_els_chk_latt(vport); 3169 lpfc_els_free_iocb(phba, cmdiocb); 3170 lpfc_nlp_put(ndlp); 3171 return; 3172 } 3173 3174 /** 3175 * lpfc_issue_els_scr - Issue a scr to an node on a vport 3176 * @vport: pointer to a host virtual N_Port data structure. 3177 * @retry: retry counter for the command IOCB. 3178 * 3179 * This routine issues a State Change Request (SCR) to a fabric node 3180 * on a @vport. The remote node is Fabric Controller (0xfffffd). It 3181 * first search the @vport node list to find the matching ndlp. If no such 3182 * ndlp is found, a new ndlp shall be created for this (SCR) purpose. An 3183 * IOCB is allocated, payload prepared, and the lpfc_sli_issue_iocb() 3184 * routine is invoked to send the SCR IOCB. 3185 * 3186 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 3187 * will be incremented by 1 for holding the ndlp and the reference to ndlp 3188 * will be stored into the context1 field of the IOCB for the completion 3189 * callback function to the SCR ELS command. 3190 * 3191 * Return code 3192 * 0 - Successfully issued scr command 3193 * 1 - Failed to issue scr command 3194 **/ 3195 int 3196 lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry) 3197 { 3198 int rc = 0; 3199 struct lpfc_hba *phba = vport->phba; 3200 struct lpfc_iocbq *elsiocb; 3201 uint8_t *pcmd; 3202 uint16_t cmdsize; 3203 struct lpfc_nodelist *ndlp; 3204 3205 cmdsize = (sizeof(uint32_t) + sizeof(SCR)); 3206 3207 ndlp = lpfc_findnode_did(vport, Fabric_Cntl_DID); 3208 if (!ndlp) { 3209 ndlp = lpfc_nlp_init(vport, Fabric_Cntl_DID); 3210 if (!ndlp) 3211 return 1; 3212 lpfc_enqueue_node(vport, ndlp); 3213 } 3214 3215 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3216 ndlp->nlp_DID, ELS_CMD_SCR); 3217 3218 if (!elsiocb) 3219 return 1; 3220 3221 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 3222 3223 *((uint32_t *) (pcmd)) = ELS_CMD_SCR; 3224 pcmd += sizeof(uint32_t); 3225 3226 /* For SCR, remainder of payload is SCR parameter page */ 3227 memset(pcmd, 0, sizeof(SCR)); 3228 ((SCR *) pcmd)->Function = SCR_FUNC_FULL; 3229 3230 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3231 "Issue SCR: did:x%x", 3232 ndlp->nlp_DID, 0, 0); 3233 3234 phba->fc_stat.elsXmitSCR++; 3235 elsiocb->iocb_cmpl = lpfc_cmpl_els_disc_cmd; 3236 elsiocb->context1 = lpfc_nlp_get(ndlp); 3237 if (!elsiocb->context1) 3238 goto node_err; 3239 3240 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3241 "Issue SCR: did:x%x refcnt %d", 3242 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 3243 3244 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3245 if (rc == IOCB_ERROR) 3246 goto io_err; 3247 3248 /* Keep the ndlp just in case RDF is being sent */ 3249 return 0; 3250 3251 io_err: 3252 lpfc_nlp_put(ndlp); 3253 node_err: 3254 lpfc_els_free_iocb(phba, elsiocb); 3255 return 1; 3256 } 3257 3258 /** 3259 * lpfc_issue_els_rscn - Issue an RSCN to the Fabric Controller (Fabric) 3260 * or the other nport (pt2pt). 3261 * @vport: pointer to a host virtual N_Port data structure. 3262 * @retry: number of retries to the command IOCB. 3263 * 3264 * This routine issues a RSCN to the Fabric Controller (DID 0xFFFFFD) 3265 * when connected to a fabric, or to the remote port when connected 3266 * in point-to-point mode. When sent to the Fabric Controller, it will 3267 * replay the RSCN to registered recipients. 3268 * 3269 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 3270 * will be incremented by 1 for holding the ndlp and the reference to ndlp 3271 * will be stored into the context1 field of the IOCB for the completion 3272 * callback function to the RSCN ELS command. 3273 * 3274 * Return code 3275 * 0 - Successfully issued RSCN command 3276 * 1 - Failed to issue RSCN command 3277 **/ 3278 int 3279 lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry) 3280 { 3281 int rc = 0; 3282 struct lpfc_hba *phba = vport->phba; 3283 struct lpfc_iocbq *elsiocb; 3284 struct lpfc_nodelist *ndlp; 3285 struct { 3286 struct fc_els_rscn rscn; 3287 struct fc_els_rscn_page portid; 3288 } *event; 3289 uint32_t nportid; 3290 uint16_t cmdsize = sizeof(*event); 3291 3292 /* Not supported for private loop */ 3293 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP && 3294 !(vport->fc_flag & FC_PUBLIC_LOOP)) 3295 return 1; 3296 3297 if (vport->fc_flag & FC_PT2PT) { 3298 /* find any mapped nport - that would be the other nport */ 3299 ndlp = lpfc_findnode_mapped(vport); 3300 if (!ndlp) 3301 return 1; 3302 } else { 3303 nportid = FC_FID_FCTRL; 3304 /* find the fabric controller node */ 3305 ndlp = lpfc_findnode_did(vport, nportid); 3306 if (!ndlp) { 3307 /* if one didn't exist, make one */ 3308 ndlp = lpfc_nlp_init(vport, nportid); 3309 if (!ndlp) 3310 return 1; 3311 lpfc_enqueue_node(vport, ndlp); 3312 } 3313 } 3314 3315 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3316 ndlp->nlp_DID, ELS_CMD_RSCN_XMT); 3317 3318 if (!elsiocb) 3319 return 1; 3320 3321 event = ((struct lpfc_dmabuf *)elsiocb->context2)->virt; 3322 3323 event->rscn.rscn_cmd = ELS_RSCN; 3324 event->rscn.rscn_page_len = sizeof(struct fc_els_rscn_page); 3325 event->rscn.rscn_plen = cpu_to_be16(cmdsize); 3326 3327 nportid = vport->fc_myDID; 3328 /* appears that page flags must be 0 for fabric to broadcast RSCN */ 3329 event->portid.rscn_page_flags = 0; 3330 event->portid.rscn_fid[0] = (nportid & 0x00FF0000) >> 16; 3331 event->portid.rscn_fid[1] = (nportid & 0x0000FF00) >> 8; 3332 event->portid.rscn_fid[2] = nportid & 0x000000FF; 3333 3334 phba->fc_stat.elsXmitRSCN++; 3335 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd; 3336 elsiocb->context1 = lpfc_nlp_get(ndlp); 3337 if (!elsiocb->context1) 3338 goto node_err; 3339 3340 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3341 "Issue RSCN: did:x%x", 3342 ndlp->nlp_DID, 0, 0); 3343 3344 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3345 if (rc == IOCB_ERROR) 3346 goto io_err; 3347 3348 /* This will cause the callback-function lpfc_cmpl_els_cmd to 3349 * trigger the release of node. 3350 */ 3351 if (!(vport->fc_flag & FC_PT2PT)) 3352 lpfc_nlp_put(ndlp); 3353 return 0; 3354 io_err: 3355 lpfc_nlp_put(ndlp); 3356 node_err: 3357 lpfc_els_free_iocb(phba, elsiocb); 3358 return 1; 3359 } 3360 3361 /** 3362 * lpfc_issue_els_farpr - Issue a farp to an node on a vport 3363 * @vport: pointer to a host virtual N_Port data structure. 3364 * @nportid: N_Port identifier to the remote node. 3365 * @retry: number of retries to the command IOCB. 3366 * 3367 * This routine issues a Fibre Channel Address Resolution Response 3368 * (FARPR) to a node on a vport. The remote node N_Port identifier (@nportid) 3369 * is passed into the function. It first search the @vport node list to find 3370 * the matching ndlp. If no such ndlp is found, a new ndlp shall be created 3371 * for this (FARPR) purpose. An IOCB is allocated, payload prepared, and the 3372 * lpfc_sli_issue_iocb() routine is invoked to send the FARPR ELS command. 3373 * 3374 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 3375 * will be incremented by 1 for holding the ndlp and the reference to ndlp 3376 * will be stored into the context1 field of the IOCB for the completion 3377 * callback function to the PARPR ELS command. 3378 * 3379 * Return code 3380 * 0 - Successfully issued farpr command 3381 * 1 - Failed to issue farpr command 3382 **/ 3383 static int 3384 lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) 3385 { 3386 int rc = 0; 3387 struct lpfc_hba *phba = vport->phba; 3388 struct lpfc_iocbq *elsiocb; 3389 FARP *fp; 3390 uint8_t *pcmd; 3391 uint32_t *lp; 3392 uint16_t cmdsize; 3393 struct lpfc_nodelist *ondlp; 3394 struct lpfc_nodelist *ndlp; 3395 3396 cmdsize = (sizeof(uint32_t) + sizeof(FARP)); 3397 3398 ndlp = lpfc_findnode_did(vport, nportid); 3399 if (!ndlp) { 3400 ndlp = lpfc_nlp_init(vport, nportid); 3401 if (!ndlp) 3402 return 1; 3403 lpfc_enqueue_node(vport, ndlp); 3404 } 3405 3406 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3407 ndlp->nlp_DID, ELS_CMD_RNID); 3408 if (!elsiocb) 3409 return 1; 3410 3411 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 3412 3413 *((uint32_t *) (pcmd)) = ELS_CMD_FARPR; 3414 pcmd += sizeof(uint32_t); 3415 3416 /* Fill in FARPR payload */ 3417 fp = (FARP *) (pcmd); 3418 memset(fp, 0, sizeof(FARP)); 3419 lp = (uint32_t *) pcmd; 3420 *lp++ = be32_to_cpu(nportid); 3421 *lp++ = be32_to_cpu(vport->fc_myDID); 3422 fp->Rflags = 0; 3423 fp->Mflags = (FARP_MATCH_PORT | FARP_MATCH_NODE); 3424 3425 memcpy(&fp->RportName, &vport->fc_portname, sizeof(struct lpfc_name)); 3426 memcpy(&fp->RnodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 3427 ondlp = lpfc_findnode_did(vport, nportid); 3428 if (ondlp) { 3429 memcpy(&fp->OportName, &ondlp->nlp_portname, 3430 sizeof(struct lpfc_name)); 3431 memcpy(&fp->OnodeName, &ondlp->nlp_nodename, 3432 sizeof(struct lpfc_name)); 3433 } 3434 3435 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3436 "Issue FARPR: did:x%x", 3437 ndlp->nlp_DID, 0, 0); 3438 3439 phba->fc_stat.elsXmitFARPR++; 3440 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd; 3441 elsiocb->context1 = lpfc_nlp_get(ndlp); 3442 if (!elsiocb->context1) { 3443 lpfc_els_free_iocb(phba, elsiocb); 3444 return 1; 3445 } 3446 3447 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3448 if (rc == IOCB_ERROR) { 3449 /* The additional lpfc_nlp_put will cause the following 3450 * lpfc_els_free_iocb routine to trigger the release of 3451 * the node. 3452 */ 3453 lpfc_nlp_put(ndlp); 3454 lpfc_els_free_iocb(phba, elsiocb); 3455 return 1; 3456 } 3457 /* This will cause the callback-function lpfc_cmpl_els_cmd to 3458 * trigger the release of the node. 3459 */ 3460 /* Don't release reference count as RDF is likely outstanding */ 3461 return 0; 3462 } 3463 3464 /** 3465 * lpfc_issue_els_rdf - Register for diagnostic functions from the fabric. 3466 * @vport: pointer to a host virtual N_Port data structure. 3467 * @retry: retry counter for the command IOCB. 3468 * 3469 * This routine issues an ELS RDF to the Fabric Controller to register 3470 * for diagnostic functions. 3471 * 3472 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 3473 * will be incremented by 1 for holding the ndlp and the reference to ndlp 3474 * will be stored into the context1 field of the IOCB for the completion 3475 * callback function to the RDF ELS command. 3476 * 3477 * Return code 3478 * 0 - Successfully issued rdf command 3479 * 1 - Failed to issue rdf command 3480 **/ 3481 int 3482 lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry) 3483 { 3484 struct lpfc_hba *phba = vport->phba; 3485 struct lpfc_iocbq *elsiocb; 3486 struct lpfc_els_rdf_req *prdf; 3487 struct lpfc_nodelist *ndlp; 3488 uint16_t cmdsize; 3489 int rc; 3490 3491 cmdsize = sizeof(*prdf); 3492 3493 ndlp = lpfc_findnode_did(vport, Fabric_Cntl_DID); 3494 if (!ndlp) { 3495 ndlp = lpfc_nlp_init(vport, Fabric_Cntl_DID); 3496 if (!ndlp) 3497 return -ENODEV; 3498 lpfc_enqueue_node(vport, ndlp); 3499 } 3500 3501 /* RDF ELS is not required on an NPIV VN_Port. */ 3502 if (vport->port_type == LPFC_NPIV_PORT) { 3503 lpfc_nlp_put(ndlp); 3504 return -EACCES; 3505 } 3506 3507 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3508 ndlp->nlp_DID, ELS_CMD_RDF); 3509 if (!elsiocb) 3510 return -ENOMEM; 3511 3512 /* Configure the payload for the supported FPIN events. */ 3513 prdf = (struct lpfc_els_rdf_req *) 3514 (((struct lpfc_dmabuf *)elsiocb->context2)->virt); 3515 memset(prdf, 0, cmdsize); 3516 prdf->rdf.fpin_cmd = ELS_RDF; 3517 prdf->rdf.desc_len = cpu_to_be32(sizeof(struct lpfc_els_rdf_req) - 3518 sizeof(struct fc_els_rdf)); 3519 prdf->reg_d1.reg_desc.desc_tag = cpu_to_be32(ELS_DTAG_FPIN_REGISTER); 3520 prdf->reg_d1.reg_desc.desc_len = cpu_to_be32( 3521 FC_TLV_DESC_LENGTH_FROM_SZ(prdf->reg_d1)); 3522 prdf->reg_d1.reg_desc.count = cpu_to_be32(ELS_RDF_REG_TAG_CNT); 3523 prdf->reg_d1.desc_tags[0] = cpu_to_be32(ELS_DTAG_LNK_INTEGRITY); 3524 prdf->reg_d1.desc_tags[1] = cpu_to_be32(ELS_DTAG_DELIVERY); 3525 prdf->reg_d1.desc_tags[2] = cpu_to_be32(ELS_DTAG_PEER_CONGEST); 3526 prdf->reg_d1.desc_tags[3] = cpu_to_be32(ELS_DTAG_CONGESTION); 3527 3528 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3529 "6444 Xmit RDF to remote NPORT x%x\n", 3530 ndlp->nlp_DID); 3531 3532 elsiocb->iocb_cmpl = lpfc_cmpl_els_disc_cmd; 3533 elsiocb->context1 = lpfc_nlp_get(ndlp); 3534 if (!elsiocb->context1) 3535 goto node_err; 3536 3537 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3538 "Issue RDF: did:x%x refcnt %d", 3539 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 3540 3541 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3542 if (rc == IOCB_ERROR) 3543 goto io_err; 3544 return 0; 3545 3546 io_err: 3547 lpfc_nlp_put(ndlp); 3548 node_err: 3549 lpfc_els_free_iocb(phba, elsiocb); 3550 return -EIO; 3551 } 3552 3553 /** 3554 * lpfc_cancel_retry_delay_tmo - Cancel the timer with delayed iocb-cmd retry 3555 * @vport: pointer to a host virtual N_Port data structure. 3556 * @nlp: pointer to a node-list data structure. 3557 * 3558 * This routine cancels the timer with a delayed IOCB-command retry for 3559 * a @vport's @ndlp. It stops the timer for the delayed function retrial and 3560 * removes the ELS retry event if it presents. In addition, if the 3561 * NLP_NPR_2B_DISC bit is set in the @nlp's nlp_flag bitmap, ADISC IOCB 3562 * commands are sent for the @vport's nodes that require issuing discovery 3563 * ADISC. 3564 **/ 3565 void 3566 lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp) 3567 { 3568 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 3569 struct lpfc_work_evt *evtp; 3570 3571 if (!(nlp->nlp_flag & NLP_DELAY_TMO)) 3572 return; 3573 spin_lock_irq(&nlp->lock); 3574 nlp->nlp_flag &= ~NLP_DELAY_TMO; 3575 spin_unlock_irq(&nlp->lock); 3576 del_timer_sync(&nlp->nlp_delayfunc); 3577 nlp->nlp_last_elscmd = 0; 3578 if (!list_empty(&nlp->els_retry_evt.evt_listp)) { 3579 list_del_init(&nlp->els_retry_evt.evt_listp); 3580 /* Decrement nlp reference count held for the delayed retry */ 3581 evtp = &nlp->els_retry_evt; 3582 lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1); 3583 } 3584 if (nlp->nlp_flag & NLP_NPR_2B_DISC) { 3585 spin_lock_irq(&nlp->lock); 3586 nlp->nlp_flag &= ~NLP_NPR_2B_DISC; 3587 spin_unlock_irq(&nlp->lock); 3588 if (vport->num_disc_nodes) { 3589 if (vport->port_state < LPFC_VPORT_READY) { 3590 /* Check if there are more ADISCs to be sent */ 3591 lpfc_more_adisc(vport); 3592 } else { 3593 /* Check if there are more PLOGIs to be sent */ 3594 lpfc_more_plogi(vport); 3595 if (vport->num_disc_nodes == 0) { 3596 spin_lock_irq(shost->host_lock); 3597 vport->fc_flag &= ~FC_NDISC_ACTIVE; 3598 spin_unlock_irq(shost->host_lock); 3599 lpfc_can_disctmo(vport); 3600 lpfc_end_rscn(vport); 3601 } 3602 } 3603 } 3604 } 3605 return; 3606 } 3607 3608 /** 3609 * lpfc_els_retry_delay - Timer function with a ndlp delayed function timer 3610 * @t: pointer to the timer function associated data (ndlp). 3611 * 3612 * This routine is invoked by the ndlp delayed-function timer to check 3613 * whether there is any pending ELS retry event(s) with the node. If not, it 3614 * simply returns. Otherwise, if there is at least one ELS delayed event, it 3615 * adds the delayed events to the HBA work list and invokes the 3616 * lpfc_worker_wake_up() routine to wake up worker thread to process the 3617 * event. Note that lpfc_nlp_get() is called before posting the event to 3618 * the work list to hold reference count of ndlp so that it guarantees the 3619 * reference to ndlp will still be available when the worker thread gets 3620 * to the event associated with the ndlp. 3621 **/ 3622 void 3623 lpfc_els_retry_delay(struct timer_list *t) 3624 { 3625 struct lpfc_nodelist *ndlp = from_timer(ndlp, t, nlp_delayfunc); 3626 struct lpfc_vport *vport = ndlp->vport; 3627 struct lpfc_hba *phba = vport->phba; 3628 unsigned long flags; 3629 struct lpfc_work_evt *evtp = &ndlp->els_retry_evt; 3630 3631 spin_lock_irqsave(&phba->hbalock, flags); 3632 if (!list_empty(&evtp->evt_listp)) { 3633 spin_unlock_irqrestore(&phba->hbalock, flags); 3634 return; 3635 } 3636 3637 /* We need to hold the node by incrementing the reference 3638 * count until the queued work is done 3639 */ 3640 evtp->evt_arg1 = lpfc_nlp_get(ndlp); 3641 if (evtp->evt_arg1) { 3642 evtp->evt = LPFC_EVT_ELS_RETRY; 3643 list_add_tail(&evtp->evt_listp, &phba->work_list); 3644 lpfc_worker_wake_up(phba); 3645 } 3646 spin_unlock_irqrestore(&phba->hbalock, flags); 3647 return; 3648 } 3649 3650 /** 3651 * lpfc_els_retry_delay_handler - Work thread handler for ndlp delayed function 3652 * @ndlp: pointer to a node-list data structure. 3653 * 3654 * This routine is the worker-thread handler for processing the @ndlp delayed 3655 * event(s), posted by the lpfc_els_retry_delay() routine. It simply retrieves 3656 * the last ELS command from the associated ndlp and invokes the proper ELS 3657 * function according to the delayed ELS command to retry the command. 3658 **/ 3659 void 3660 lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp) 3661 { 3662 struct lpfc_vport *vport = ndlp->vport; 3663 uint32_t cmd, retry; 3664 3665 spin_lock_irq(&ndlp->lock); 3666 cmd = ndlp->nlp_last_elscmd; 3667 ndlp->nlp_last_elscmd = 0; 3668 3669 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { 3670 spin_unlock_irq(&ndlp->lock); 3671 return; 3672 } 3673 3674 ndlp->nlp_flag &= ~NLP_DELAY_TMO; 3675 spin_unlock_irq(&ndlp->lock); 3676 /* 3677 * If a discovery event readded nlp_delayfunc after timer 3678 * firing and before processing the timer, cancel the 3679 * nlp_delayfunc. 3680 */ 3681 del_timer_sync(&ndlp->nlp_delayfunc); 3682 retry = ndlp->nlp_retry; 3683 ndlp->nlp_retry = 0; 3684 3685 switch (cmd) { 3686 case ELS_CMD_FLOGI: 3687 lpfc_issue_els_flogi(vport, ndlp, retry); 3688 break; 3689 case ELS_CMD_PLOGI: 3690 if (!lpfc_issue_els_plogi(vport, ndlp->nlp_DID, retry)) { 3691 ndlp->nlp_prev_state = ndlp->nlp_state; 3692 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 3693 } 3694 break; 3695 case ELS_CMD_ADISC: 3696 if (!lpfc_issue_els_adisc(vport, ndlp, retry)) { 3697 ndlp->nlp_prev_state = ndlp->nlp_state; 3698 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 3699 } 3700 break; 3701 case ELS_CMD_PRLI: 3702 case ELS_CMD_NVMEPRLI: 3703 if (!lpfc_issue_els_prli(vport, ndlp, retry)) { 3704 ndlp->nlp_prev_state = ndlp->nlp_state; 3705 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); 3706 } 3707 break; 3708 case ELS_CMD_LOGO: 3709 if (!lpfc_issue_els_logo(vport, ndlp, retry)) { 3710 ndlp->nlp_prev_state = ndlp->nlp_state; 3711 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE); 3712 } 3713 break; 3714 case ELS_CMD_FDISC: 3715 if (!(vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI)) 3716 lpfc_issue_els_fdisc(vport, ndlp, retry); 3717 break; 3718 } 3719 return; 3720 } 3721 3722 /** 3723 * lpfc_link_reset - Issue link reset 3724 * @vport: pointer to a virtual N_Port data structure. 3725 * 3726 * This routine performs link reset by sending INIT_LINK mailbox command. 3727 * For SLI-3 adapter, link attention interrupt is enabled before issuing 3728 * INIT_LINK mailbox command. 3729 * 3730 * Return code 3731 * 0 - Link reset initiated successfully 3732 * 1 - Failed to initiate link reset 3733 **/ 3734 int 3735 lpfc_link_reset(struct lpfc_vport *vport) 3736 { 3737 struct lpfc_hba *phba = vport->phba; 3738 LPFC_MBOXQ_t *mbox; 3739 uint32_t control; 3740 int rc; 3741 3742 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 3743 "2851 Attempt link reset\n"); 3744 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3745 if (!mbox) { 3746 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3747 "2852 Failed to allocate mbox memory"); 3748 return 1; 3749 } 3750 3751 /* Enable Link attention interrupts */ 3752 if (phba->sli_rev <= LPFC_SLI_REV3) { 3753 spin_lock_irq(&phba->hbalock); 3754 phba->sli.sli_flag |= LPFC_PROCESS_LA; 3755 control = readl(phba->HCregaddr); 3756 control |= HC_LAINT_ENA; 3757 writel(control, phba->HCregaddr); 3758 readl(phba->HCregaddr); /* flush */ 3759 spin_unlock_irq(&phba->hbalock); 3760 } 3761 3762 lpfc_init_link(phba, mbox, phba->cfg_topology, 3763 phba->cfg_link_speed); 3764 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 3765 mbox->vport = vport; 3766 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 3767 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 3768 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3769 "2853 Failed to issue INIT_LINK " 3770 "mbox command, rc:x%x\n", rc); 3771 mempool_free(mbox, phba->mbox_mem_pool); 3772 return 1; 3773 } 3774 3775 return 0; 3776 } 3777 3778 /** 3779 * lpfc_els_retry - Make retry decision on an els command iocb 3780 * @phba: pointer to lpfc hba data structure. 3781 * @cmdiocb: pointer to lpfc command iocb data structure. 3782 * @rspiocb: pointer to lpfc response iocb data structure. 3783 * 3784 * This routine makes a retry decision on an ELS command IOCB, which has 3785 * failed. The following ELS IOCBs use this function for retrying the command 3786 * when previously issued command responsed with error status: FLOGI, PLOGI, 3787 * PRLI, ADISC, LOGO, and FDISC. Based on the ELS command type and the 3788 * returned error status, it makes the decision whether a retry shall be 3789 * issued for the command, and whether a retry shall be made immediately or 3790 * delayed. In the former case, the corresponding ELS command issuing-function 3791 * is called to retry the command. In the later case, the ELS command shall 3792 * be posted to the ndlp delayed event and delayed function timer set to the 3793 * ndlp for the delayed command issusing. 3794 * 3795 * Return code 3796 * 0 - No retry of els command is made 3797 * 1 - Immediate or delayed retry of els command is made 3798 **/ 3799 static int 3800 lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 3801 struct lpfc_iocbq *rspiocb) 3802 { 3803 struct lpfc_vport *vport = cmdiocb->vport; 3804 IOCB_t *irsp = &rspiocb->iocb; 3805 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 3806 struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 3807 uint32_t *elscmd; 3808 struct ls_rjt stat; 3809 int retry = 0, maxretry = lpfc_max_els_tries, delay = 0; 3810 int logerr = 0; 3811 uint32_t cmd = 0; 3812 uint32_t did; 3813 int link_reset = 0, rc; 3814 3815 3816 /* Note: context2 may be 0 for internal driver abort 3817 * of delays ELS command. 3818 */ 3819 3820 if (pcmd && pcmd->virt) { 3821 elscmd = (uint32_t *) (pcmd->virt); 3822 cmd = *elscmd++; 3823 } 3824 3825 if (ndlp) 3826 did = ndlp->nlp_DID; 3827 else { 3828 /* We should only hit this case for retrying PLOGI */ 3829 did = irsp->un.elsreq64.remoteID; 3830 ndlp = lpfc_findnode_did(vport, did); 3831 if (!ndlp && (cmd != ELS_CMD_PLOGI)) 3832 return 1; 3833 } 3834 3835 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3836 "Retry ELS: wd7:x%x wd4:x%x did:x%x", 3837 *(((uint32_t *) irsp) + 7), irsp->un.ulpWord[4], ndlp->nlp_DID); 3838 3839 switch (irsp->ulpStatus) { 3840 case IOSTAT_FCP_RSP_ERROR: 3841 break; 3842 case IOSTAT_REMOTE_STOP: 3843 if (phba->sli_rev == LPFC_SLI_REV4) { 3844 /* This IO was aborted by the target, we don't 3845 * know the rxid and because we did not send the 3846 * ABTS we cannot generate and RRQ. 3847 */ 3848 lpfc_set_rrq_active(phba, ndlp, 3849 cmdiocb->sli4_lxritag, 0, 0); 3850 } 3851 break; 3852 case IOSTAT_LOCAL_REJECT: 3853 switch ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK)) { 3854 case IOERR_LOOP_OPEN_FAILURE: 3855 if (cmd == ELS_CMD_FLOGI) { 3856 if (PCI_DEVICE_ID_HORNET == 3857 phba->pcidev->device) { 3858 phba->fc_topology = LPFC_TOPOLOGY_LOOP; 3859 phba->pport->fc_myDID = 0; 3860 phba->alpa_map[0] = 0; 3861 phba->alpa_map[1] = 0; 3862 } 3863 } 3864 if (cmd == ELS_CMD_PLOGI && cmdiocb->retry == 0) 3865 delay = 1000; 3866 retry = 1; 3867 break; 3868 3869 case IOERR_ILLEGAL_COMMAND: 3870 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 3871 "0124 Retry illegal cmd x%x " 3872 "retry:x%x delay:x%x\n", 3873 cmd, cmdiocb->retry, delay); 3874 retry = 1; 3875 /* All command's retry policy */ 3876 maxretry = 8; 3877 if (cmdiocb->retry > 2) 3878 delay = 1000; 3879 break; 3880 3881 case IOERR_NO_RESOURCES: 3882 logerr = 1; /* HBA out of resources */ 3883 retry = 1; 3884 if (cmdiocb->retry > 100) 3885 delay = 100; 3886 maxretry = 250; 3887 break; 3888 3889 case IOERR_ILLEGAL_FRAME: 3890 delay = 100; 3891 retry = 1; 3892 break; 3893 3894 case IOERR_INVALID_RPI: 3895 if (cmd == ELS_CMD_PLOGI && 3896 did == NameServer_DID) { 3897 /* Continue forever if plogi to */ 3898 /* the nameserver fails */ 3899 maxretry = 0; 3900 delay = 100; 3901 } 3902 retry = 1; 3903 break; 3904 3905 case IOERR_SEQUENCE_TIMEOUT: 3906 if (cmd == ELS_CMD_PLOGI && 3907 did == NameServer_DID && 3908 (cmdiocb->retry + 1) == maxretry) { 3909 /* Reset the Link */ 3910 link_reset = 1; 3911 break; 3912 } 3913 retry = 1; 3914 delay = 100; 3915 break; 3916 } 3917 break; 3918 3919 case IOSTAT_NPORT_RJT: 3920 case IOSTAT_FABRIC_RJT: 3921 if (irsp->un.ulpWord[4] & RJT_UNAVAIL_TEMP) { 3922 retry = 1; 3923 break; 3924 } 3925 break; 3926 3927 case IOSTAT_NPORT_BSY: 3928 case IOSTAT_FABRIC_BSY: 3929 logerr = 1; /* Fabric / Remote NPort out of resources */ 3930 retry = 1; 3931 break; 3932 3933 case IOSTAT_LS_RJT: 3934 stat.un.lsRjtError = be32_to_cpu(irsp->un.ulpWord[4]); 3935 /* Added for Vendor specifc support 3936 * Just keep retrying for these Rsn / Exp codes 3937 */ 3938 switch (stat.un.b.lsRjtRsnCode) { 3939 case LSRJT_UNABLE_TPC: 3940 /* The driver has a VALID PLOGI but the rport has 3941 * rejected the PRLI - can't do it now. Delay 3942 * for 1 second and try again. 3943 * 3944 * However, if explanation is REQ_UNSUPPORTED there's 3945 * no point to retry PRLI. 3946 */ 3947 if ((cmd == ELS_CMD_PRLI || cmd == ELS_CMD_NVMEPRLI) && 3948 stat.un.b.lsRjtRsnCodeExp != 3949 LSEXP_REQ_UNSUPPORTED) { 3950 delay = 1000; 3951 maxretry = lpfc_max_els_tries + 1; 3952 retry = 1; 3953 break; 3954 } 3955 3956 /* Legacy bug fix code for targets with PLOGI delays. */ 3957 if (stat.un.b.lsRjtRsnCodeExp == 3958 LSEXP_CMD_IN_PROGRESS) { 3959 if (cmd == ELS_CMD_PLOGI) { 3960 delay = 1000; 3961 maxretry = 48; 3962 } 3963 retry = 1; 3964 break; 3965 } 3966 if (stat.un.b.lsRjtRsnCodeExp == 3967 LSEXP_CANT_GIVE_DATA) { 3968 if (cmd == ELS_CMD_PLOGI) { 3969 delay = 1000; 3970 maxretry = 48; 3971 } 3972 retry = 1; 3973 break; 3974 } 3975 if (cmd == ELS_CMD_PLOGI) { 3976 delay = 1000; 3977 maxretry = lpfc_max_els_tries + 1; 3978 retry = 1; 3979 break; 3980 } 3981 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 3982 (cmd == ELS_CMD_FDISC) && 3983 (stat.un.b.lsRjtRsnCodeExp == LSEXP_OUT_OF_RESOURCE)){ 3984 lpfc_printf_vlog(vport, KERN_ERR, 3985 LOG_TRACE_EVENT, 3986 "0125 FDISC Failed (x%x). " 3987 "Fabric out of resources\n", 3988 stat.un.lsRjtError); 3989 lpfc_vport_set_state(vport, 3990 FC_VPORT_NO_FABRIC_RSCS); 3991 } 3992 break; 3993 3994 case LSRJT_LOGICAL_BSY: 3995 if ((cmd == ELS_CMD_PLOGI) || 3996 (cmd == ELS_CMD_PRLI) || 3997 (cmd == ELS_CMD_NVMEPRLI)) { 3998 delay = 1000; 3999 maxretry = 48; 4000 } else if (cmd == ELS_CMD_FDISC) { 4001 /* FDISC retry policy */ 4002 maxretry = 48; 4003 if (cmdiocb->retry >= 32) 4004 delay = 1000; 4005 } 4006 retry = 1; 4007 break; 4008 4009 case LSRJT_LOGICAL_ERR: 4010 /* There are some cases where switches return this 4011 * error when they are not ready and should be returning 4012 * Logical Busy. We should delay every time. 4013 */ 4014 if (cmd == ELS_CMD_FDISC && 4015 stat.un.b.lsRjtRsnCodeExp == LSEXP_PORT_LOGIN_REQ) { 4016 maxretry = 3; 4017 delay = 1000; 4018 retry = 1; 4019 } else if (cmd == ELS_CMD_FLOGI && 4020 stat.un.b.lsRjtRsnCodeExp == 4021 LSEXP_NOTHING_MORE) { 4022 vport->fc_sparam.cmn.bbRcvSizeMsb &= 0xf; 4023 retry = 1; 4024 lpfc_printf_vlog(vport, KERN_ERR, 4025 LOG_TRACE_EVENT, 4026 "0820 FLOGI Failed (x%x). " 4027 "BBCredit Not Supported\n", 4028 stat.un.lsRjtError); 4029 } 4030 break; 4031 4032 case LSRJT_PROTOCOL_ERR: 4033 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 4034 (cmd == ELS_CMD_FDISC) && 4035 ((stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_PNAME) || 4036 (stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_NPORT_ID)) 4037 ) { 4038 lpfc_printf_vlog(vport, KERN_ERR, 4039 LOG_TRACE_EVENT, 4040 "0122 FDISC Failed (x%x). " 4041 "Fabric Detected Bad WWN\n", 4042 stat.un.lsRjtError); 4043 lpfc_vport_set_state(vport, 4044 FC_VPORT_FABRIC_REJ_WWN); 4045 } 4046 break; 4047 case LSRJT_VENDOR_UNIQUE: 4048 if ((stat.un.b.vendorUnique == 0x45) && 4049 (cmd == ELS_CMD_FLOGI)) { 4050 goto out_retry; 4051 } 4052 break; 4053 case LSRJT_CMD_UNSUPPORTED: 4054 /* lpfc nvmet returns this type of LS_RJT when it 4055 * receives an FCP PRLI because lpfc nvmet only 4056 * support NVME. ELS request is terminated for FCP4 4057 * on this rport. 4058 */ 4059 if (stat.un.b.lsRjtRsnCodeExp == 4060 LSEXP_REQ_UNSUPPORTED && cmd == ELS_CMD_PRLI) { 4061 spin_lock_irq(&ndlp->lock); 4062 ndlp->nlp_flag |= NLP_FCP_PRLI_RJT; 4063 spin_unlock_irq(&ndlp->lock); 4064 retry = 0; 4065 goto out_retry; 4066 } 4067 break; 4068 } 4069 break; 4070 4071 case IOSTAT_INTERMED_RSP: 4072 case IOSTAT_BA_RJT: 4073 break; 4074 4075 default: 4076 break; 4077 } 4078 4079 if (link_reset) { 4080 rc = lpfc_link_reset(vport); 4081 if (rc) { 4082 /* Do not give up. Retry PLOGI one more time and attempt 4083 * link reset if PLOGI fails again. 4084 */ 4085 retry = 1; 4086 delay = 100; 4087 goto out_retry; 4088 } 4089 return 1; 4090 } 4091 4092 if (did == FDMI_DID) 4093 retry = 1; 4094 4095 if ((cmd == ELS_CMD_FLOGI) && 4096 (phba->fc_topology != LPFC_TOPOLOGY_LOOP) && 4097 !lpfc_error_lost_link(irsp)) { 4098 /* FLOGI retry policy */ 4099 retry = 1; 4100 /* retry FLOGI forever */ 4101 if (phba->link_flag != LS_LOOPBACK_MODE) 4102 maxretry = 0; 4103 else 4104 maxretry = 2; 4105 4106 if (cmdiocb->retry >= 100) 4107 delay = 5000; 4108 else if (cmdiocb->retry >= 32) 4109 delay = 1000; 4110 } else if ((cmd == ELS_CMD_FDISC) && !lpfc_error_lost_link(irsp)) { 4111 /* retry FDISCs every second up to devloss */ 4112 retry = 1; 4113 maxretry = vport->cfg_devloss_tmo; 4114 delay = 1000; 4115 } 4116 4117 cmdiocb->retry++; 4118 if (maxretry && (cmdiocb->retry >= maxretry)) { 4119 phba->fc_stat.elsRetryExceeded++; 4120 retry = 0; 4121 } 4122 4123 if ((vport->load_flag & FC_UNLOADING) != 0) 4124 retry = 0; 4125 4126 out_retry: 4127 if (retry) { 4128 if ((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_FDISC)) { 4129 /* Stop retrying PLOGI and FDISC if in FCF discovery */ 4130 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 4131 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4132 "2849 Stop retry ELS command " 4133 "x%x to remote NPORT x%x, " 4134 "Data: x%x x%x\n", cmd, did, 4135 cmdiocb->retry, delay); 4136 return 0; 4137 } 4138 } 4139 4140 /* Retry ELS command <elsCmd> to remote NPORT <did> */ 4141 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4142 "0107 Retry ELS command x%x to remote " 4143 "NPORT x%x Data: x%x x%x\n", 4144 cmd, did, cmdiocb->retry, delay); 4145 4146 if (((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) && 4147 ((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) || 4148 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) != 4149 IOERR_NO_RESOURCES))) { 4150 /* Don't reset timer for no resources */ 4151 4152 /* If discovery / RSCN timer is running, reset it */ 4153 if (timer_pending(&vport->fc_disctmo) || 4154 (vport->fc_flag & FC_RSCN_MODE)) 4155 lpfc_set_disctmo(vport); 4156 } 4157 4158 phba->fc_stat.elsXmitRetry++; 4159 if (ndlp && delay) { 4160 phba->fc_stat.elsDelayRetry++; 4161 ndlp->nlp_retry = cmdiocb->retry; 4162 4163 /* delay is specified in milliseconds */ 4164 mod_timer(&ndlp->nlp_delayfunc, 4165 jiffies + msecs_to_jiffies(delay)); 4166 spin_lock_irq(&ndlp->lock); 4167 ndlp->nlp_flag |= NLP_DELAY_TMO; 4168 spin_unlock_irq(&ndlp->lock); 4169 4170 ndlp->nlp_prev_state = ndlp->nlp_state; 4171 if ((cmd == ELS_CMD_PRLI) || 4172 (cmd == ELS_CMD_NVMEPRLI)) 4173 lpfc_nlp_set_state(vport, ndlp, 4174 NLP_STE_PRLI_ISSUE); 4175 else 4176 lpfc_nlp_set_state(vport, ndlp, 4177 NLP_STE_NPR_NODE); 4178 ndlp->nlp_last_elscmd = cmd; 4179 4180 return 1; 4181 } 4182 switch (cmd) { 4183 case ELS_CMD_FLOGI: 4184 lpfc_issue_els_flogi(vport, ndlp, cmdiocb->retry); 4185 return 1; 4186 case ELS_CMD_FDISC: 4187 lpfc_issue_els_fdisc(vport, ndlp, cmdiocb->retry); 4188 return 1; 4189 case ELS_CMD_PLOGI: 4190 if (ndlp) { 4191 ndlp->nlp_prev_state = ndlp->nlp_state; 4192 lpfc_nlp_set_state(vport, ndlp, 4193 NLP_STE_PLOGI_ISSUE); 4194 } 4195 lpfc_issue_els_plogi(vport, did, cmdiocb->retry); 4196 return 1; 4197 case ELS_CMD_ADISC: 4198 ndlp->nlp_prev_state = ndlp->nlp_state; 4199 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 4200 lpfc_issue_els_adisc(vport, ndlp, cmdiocb->retry); 4201 return 1; 4202 case ELS_CMD_PRLI: 4203 case ELS_CMD_NVMEPRLI: 4204 ndlp->nlp_prev_state = ndlp->nlp_state; 4205 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); 4206 lpfc_issue_els_prli(vport, ndlp, cmdiocb->retry); 4207 return 1; 4208 case ELS_CMD_LOGO: 4209 ndlp->nlp_prev_state = ndlp->nlp_state; 4210 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE); 4211 lpfc_issue_els_logo(vport, ndlp, cmdiocb->retry); 4212 return 1; 4213 } 4214 } 4215 /* No retry ELS command <elsCmd> to remote NPORT <did> */ 4216 if (logerr) { 4217 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 4218 "0137 No retry ELS command x%x to remote " 4219 "NPORT x%x: Out of Resources: Error:x%x/%x\n", 4220 cmd, did, irsp->ulpStatus, 4221 irsp->un.ulpWord[4]); 4222 } 4223 else { 4224 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4225 "0108 No retry ELS command x%x to remote " 4226 "NPORT x%x Retried:%d Error:x%x/%x\n", 4227 cmd, did, cmdiocb->retry, irsp->ulpStatus, 4228 irsp->un.ulpWord[4]); 4229 } 4230 return 0; 4231 } 4232 4233 /** 4234 * lpfc_els_free_data - Free lpfc dma buffer and data structure with an iocb 4235 * @phba: pointer to lpfc hba data structure. 4236 * @buf_ptr1: pointer to the lpfc DMA buffer data structure. 4237 * 4238 * This routine releases the lpfc DMA (Direct Memory Access) buffer(s) 4239 * associated with a command IOCB back to the lpfc DMA buffer pool. It first 4240 * checks to see whether there is a lpfc DMA buffer associated with the 4241 * response of the command IOCB. If so, it will be released before releasing 4242 * the lpfc DMA buffer associated with the IOCB itself. 4243 * 4244 * Return code 4245 * 0 - Successfully released lpfc DMA buffer (currently, always return 0) 4246 **/ 4247 static int 4248 lpfc_els_free_data(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr1) 4249 { 4250 struct lpfc_dmabuf *buf_ptr; 4251 4252 /* Free the response before processing the command. */ 4253 if (!list_empty(&buf_ptr1->list)) { 4254 list_remove_head(&buf_ptr1->list, buf_ptr, 4255 struct lpfc_dmabuf, 4256 list); 4257 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 4258 kfree(buf_ptr); 4259 } 4260 lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys); 4261 kfree(buf_ptr1); 4262 return 0; 4263 } 4264 4265 /** 4266 * lpfc_els_free_bpl - Free lpfc dma buffer and data structure with bpl 4267 * @phba: pointer to lpfc hba data structure. 4268 * @buf_ptr: pointer to the lpfc dma buffer data structure. 4269 * 4270 * This routine releases the lpfc Direct Memory Access (DMA) buffer 4271 * associated with a Buffer Pointer List (BPL) back to the lpfc DMA buffer 4272 * pool. 4273 * 4274 * Return code 4275 * 0 - Successfully released lpfc DMA buffer (currently, always return 0) 4276 **/ 4277 static int 4278 lpfc_els_free_bpl(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr) 4279 { 4280 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 4281 kfree(buf_ptr); 4282 return 0; 4283 } 4284 4285 /** 4286 * lpfc_els_free_iocb - Free a command iocb and its associated resources 4287 * @phba: pointer to lpfc hba data structure. 4288 * @elsiocb: pointer to lpfc els command iocb data structure. 4289 * 4290 * This routine frees a command IOCB and its associated resources. The 4291 * command IOCB data structure contains the reference to various associated 4292 * resources, these fields must be set to NULL if the associated reference 4293 * not present: 4294 * context1 - reference to ndlp 4295 * context2 - reference to cmd 4296 * context2->next - reference to rsp 4297 * context3 - reference to bpl 4298 * 4299 * It first properly decrements the reference count held on ndlp for the 4300 * IOCB completion callback function. If LPFC_DELAY_MEM_FREE flag is not 4301 * set, it invokes the lpfc_els_free_data() routine to release the Direct 4302 * Memory Access (DMA) buffers associated with the IOCB. Otherwise, it 4303 * adds the DMA buffer the @phba data structure for the delayed release. 4304 * If reference to the Buffer Pointer List (BPL) is present, the 4305 * lpfc_els_free_bpl() routine is invoked to release the DMA memory 4306 * associated with BPL. Finally, the lpfc_sli_release_iocbq() routine is 4307 * invoked to release the IOCB data structure back to @phba IOCBQ list. 4308 * 4309 * Return code 4310 * 0 - Success (currently, always return 0) 4311 **/ 4312 int 4313 lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb) 4314 { 4315 struct lpfc_dmabuf *buf_ptr, *buf_ptr1; 4316 4317 /* The I/O job is complete. Clear the context1 data. */ 4318 elsiocb->context1 = NULL; 4319 4320 /* context2 = cmd, context2->next = rsp, context3 = bpl */ 4321 if (elsiocb->context2) { 4322 if (elsiocb->iocb_flag & LPFC_DELAY_MEM_FREE) { 4323 /* Firmware could still be in progress of DMAing 4324 * payload, so don't free data buffer till after 4325 * a hbeat. 4326 */ 4327 elsiocb->iocb_flag &= ~LPFC_DELAY_MEM_FREE; 4328 buf_ptr = elsiocb->context2; 4329 elsiocb->context2 = NULL; 4330 if (buf_ptr) { 4331 buf_ptr1 = NULL; 4332 spin_lock_irq(&phba->hbalock); 4333 if (!list_empty(&buf_ptr->list)) { 4334 list_remove_head(&buf_ptr->list, 4335 buf_ptr1, struct lpfc_dmabuf, 4336 list); 4337 INIT_LIST_HEAD(&buf_ptr1->list); 4338 list_add_tail(&buf_ptr1->list, 4339 &phba->elsbuf); 4340 phba->elsbuf_cnt++; 4341 } 4342 INIT_LIST_HEAD(&buf_ptr->list); 4343 list_add_tail(&buf_ptr->list, &phba->elsbuf); 4344 phba->elsbuf_cnt++; 4345 spin_unlock_irq(&phba->hbalock); 4346 } 4347 } else { 4348 buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2; 4349 lpfc_els_free_data(phba, buf_ptr1); 4350 elsiocb->context2 = NULL; 4351 } 4352 } 4353 4354 if (elsiocb->context3) { 4355 buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3; 4356 lpfc_els_free_bpl(phba, buf_ptr); 4357 elsiocb->context3 = NULL; 4358 } 4359 lpfc_sli_release_iocbq(phba, elsiocb); 4360 return 0; 4361 } 4362 4363 /** 4364 * lpfc_cmpl_els_logo_acc - Completion callback function to logo acc response 4365 * @phba: pointer to lpfc hba data structure. 4366 * @cmdiocb: pointer to lpfc command iocb data structure. 4367 * @rspiocb: pointer to lpfc response iocb data structure. 4368 * 4369 * This routine is the completion callback function to the Logout (LOGO) 4370 * Accept (ACC) Response ELS command. This routine is invoked to indicate 4371 * the completion of the LOGO process. It invokes the lpfc_nlp_not_used() to 4372 * release the ndlp if it has the last reference remaining (reference count 4373 * is 1). If succeeded (meaning ndlp released), it sets the IOCB context1 4374 * field to NULL to inform the following lpfc_els_free_iocb() routine no 4375 * ndlp reference count needs to be decremented. Otherwise, the ndlp 4376 * reference use-count shall be decremented by the lpfc_els_free_iocb() 4377 * routine. Finally, the lpfc_els_free_iocb() is invoked to release the 4378 * IOCB data structure. 4379 **/ 4380 static void 4381 lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 4382 struct lpfc_iocbq *rspiocb) 4383 { 4384 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 4385 struct lpfc_vport *vport = cmdiocb->vport; 4386 IOCB_t *irsp; 4387 4388 irsp = &rspiocb->iocb; 4389 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 4390 "ACC LOGO cmpl: status:x%x/x%x did:x%x", 4391 irsp->ulpStatus, irsp->un.ulpWord[4], ndlp->nlp_DID); 4392 /* ACC to LOGO completes to NPort <nlp_DID> */ 4393 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4394 "0109 ACC to LOGO completes to NPort x%x refcnt %d" 4395 "Data: x%x x%x x%x\n", 4396 ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp->nlp_flag, 4397 ndlp->nlp_state, ndlp->nlp_rpi); 4398 4399 if (ndlp->nlp_state == NLP_STE_NPR_NODE) { 4400 /* NPort Recovery mode or node is just allocated */ 4401 if (!lpfc_nlp_not_used(ndlp)) { 4402 /* If the ndlp is being used by another discovery 4403 * thread, just unregister the RPI. 4404 */ 4405 lpfc_unreg_rpi(vport, ndlp); 4406 } else { 4407 /* Indicate the node has already released, should 4408 * not reference to it from within lpfc_els_free_iocb. 4409 */ 4410 cmdiocb->context1 = NULL; 4411 } 4412 } 4413 4414 /* 4415 * The driver received a LOGO from the rport and has ACK'd it. 4416 * At this point, the driver is done so release the IOCB 4417 */ 4418 lpfc_els_free_iocb(phba, cmdiocb); 4419 lpfc_nlp_put(ndlp); 4420 } 4421 4422 /** 4423 * lpfc_mbx_cmpl_dflt_rpi - Completion callbk func for unreg dflt rpi mbox cmd 4424 * @phba: pointer to lpfc hba data structure. 4425 * @pmb: pointer to the driver internal queue element for mailbox command. 4426 * 4427 * This routine is the completion callback function for unregister default 4428 * RPI (Remote Port Index) mailbox command to the @phba. It simply releases 4429 * the associated lpfc Direct Memory Access (DMA) buffer back to the pool and 4430 * decrements the ndlp reference count held for this completion callback 4431 * function. After that, it invokes the lpfc_nlp_not_used() to check 4432 * whether there is only one reference left on the ndlp. If so, it will 4433 * perform one more decrement and trigger the release of the ndlp. 4434 **/ 4435 void 4436 lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 4437 { 4438 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf); 4439 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; 4440 4441 pmb->ctx_buf = NULL; 4442 pmb->ctx_ndlp = NULL; 4443 4444 lpfc_mbuf_free(phba, mp->virt, mp->phys); 4445 kfree(mp); 4446 mempool_free(pmb, phba->mbox_mem_pool); 4447 if (ndlp) { 4448 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, 4449 "0006 rpi x%x DID:%x flg:%x %d x%px\n", 4450 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, 4451 kref_read(&ndlp->kref), 4452 ndlp); 4453 /* This is the end of the default RPI cleanup logic for 4454 * this ndlp and it could get released. Clear the nlp_flags to 4455 * prevent any further processing. 4456 */ 4457 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; 4458 lpfc_nlp_put(ndlp); 4459 lpfc_nlp_not_used(ndlp); 4460 } 4461 4462 return; 4463 } 4464 4465 /** 4466 * lpfc_cmpl_els_rsp - Completion callback function for els response iocb cmd 4467 * @phba: pointer to lpfc hba data structure. 4468 * @cmdiocb: pointer to lpfc command iocb data structure. 4469 * @rspiocb: pointer to lpfc response iocb data structure. 4470 * 4471 * This routine is the completion callback function for ELS Response IOCB 4472 * command. In normal case, this callback function just properly sets the 4473 * nlp_flag bitmap in the ndlp data structure, if the mbox command reference 4474 * field in the command IOCB is not NULL, the referred mailbox command will 4475 * be send out, and then invokes the lpfc_els_free_iocb() routine to release 4476 * the IOCB. Under error conditions, such as when a LS_RJT is returned or a 4477 * link down event occurred during the discovery, the lpfc_nlp_not_used() 4478 * routine shall be invoked trying to release the ndlp if no other threads 4479 * are currently referring it. 4480 **/ 4481 static void 4482 lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 4483 struct lpfc_iocbq *rspiocb) 4484 { 4485 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 4486 struct lpfc_vport *vport = ndlp ? ndlp->vport : NULL; 4487 struct Scsi_Host *shost = vport ? lpfc_shost_from_vport(vport) : NULL; 4488 IOCB_t *irsp; 4489 uint8_t *pcmd; 4490 LPFC_MBOXQ_t *mbox = NULL; 4491 struct lpfc_dmabuf *mp = NULL; 4492 uint32_t ls_rjt = 0; 4493 4494 irsp = &rspiocb->iocb; 4495 4496 if (!vport) { 4497 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4498 "3177 ELS response failed\n"); 4499 goto out; 4500 } 4501 if (cmdiocb->context_un.mbox) 4502 mbox = cmdiocb->context_un.mbox; 4503 4504 /* First determine if this is a LS_RJT cmpl. Note, this callback 4505 * function can have cmdiocb->contest1 (ndlp) field set to NULL. 4506 */ 4507 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt); 4508 if (ndlp && (*((uint32_t *) (pcmd)) == ELS_CMD_LS_RJT)) { 4509 /* A LS_RJT associated with Default RPI cleanup has its own 4510 * separate code path. 4511 */ 4512 if (!(ndlp->nlp_flag & NLP_RM_DFLT_RPI)) 4513 ls_rjt = 1; 4514 } 4515 4516 /* Check to see if link went down during discovery */ 4517 if (!ndlp || lpfc_els_chk_latt(vport)) { 4518 if (mbox) { 4519 mp = (struct lpfc_dmabuf *)mbox->ctx_buf; 4520 if (mp) { 4521 lpfc_mbuf_free(phba, mp->virt, mp->phys); 4522 kfree(mp); 4523 } 4524 mempool_free(mbox, phba->mbox_mem_pool); 4525 } 4526 if (ndlp && (ndlp->nlp_flag & NLP_RM_DFLT_RPI)) 4527 if (lpfc_nlp_not_used(ndlp)) { 4528 ndlp = NULL; 4529 /* Indicate the node has already released, 4530 * should not reference to it from within 4531 * the routine lpfc_els_free_iocb. 4532 */ 4533 cmdiocb->context1 = NULL; 4534 } 4535 goto out; 4536 } 4537 4538 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 4539 "ELS rsp cmpl: status:x%x/x%x did:x%x", 4540 irsp->ulpStatus, irsp->un.ulpWord[4], 4541 cmdiocb->iocb.un.elsreq64.remoteID); 4542 /* ELS response tag <ulpIoTag> completes */ 4543 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4544 "0110 ELS response tag x%x completes " 4545 "Data: x%x x%x x%x x%x x%x x%x x%x\n", 4546 cmdiocb->iocb.ulpIoTag, rspiocb->iocb.ulpStatus, 4547 rspiocb->iocb.un.ulpWord[4], rspiocb->iocb.ulpTimeout, 4548 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 4549 ndlp->nlp_rpi); 4550 if (mbox) { 4551 if ((rspiocb->iocb.ulpStatus == 0) && 4552 (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) { 4553 if (!lpfc_unreg_rpi(vport, ndlp) && 4554 (!(vport->fc_flag & FC_PT2PT))) { 4555 if (ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE) { 4556 lpfc_printf_vlog(vport, KERN_INFO, 4557 LOG_DISCOVERY, 4558 "0314 PLOGI recov " 4559 "DID x%x " 4560 "Data: x%x x%x x%x\n", 4561 ndlp->nlp_DID, 4562 ndlp->nlp_state, 4563 ndlp->nlp_rpi, 4564 ndlp->nlp_flag); 4565 mp = mbox->ctx_buf; 4566 if (mp) { 4567 lpfc_mbuf_free(phba, mp->virt, 4568 mp->phys); 4569 kfree(mp); 4570 } 4571 mempool_free(mbox, phba->mbox_mem_pool); 4572 goto out; 4573 } 4574 } 4575 4576 /* Increment reference count to ndlp to hold the 4577 * reference to ndlp for the callback function. 4578 */ 4579 mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 4580 if (!mbox->ctx_ndlp) 4581 goto out; 4582 4583 mbox->vport = vport; 4584 if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) { 4585 mbox->mbox_flag |= LPFC_MBX_IMED_UNREG; 4586 mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; 4587 } 4588 else { 4589 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login; 4590 ndlp->nlp_prev_state = ndlp->nlp_state; 4591 lpfc_nlp_set_state(vport, ndlp, 4592 NLP_STE_REG_LOGIN_ISSUE); 4593 } 4594 4595 ndlp->nlp_flag |= NLP_REG_LOGIN_SEND; 4596 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 4597 != MBX_NOT_FINISHED) 4598 goto out; 4599 4600 /* Decrement the ndlp reference count we 4601 * set for this failed mailbox command. 4602 */ 4603 lpfc_nlp_put(ndlp); 4604 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; 4605 4606 /* ELS rsp: Cannot issue reg_login for <NPortid> */ 4607 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 4608 "0138 ELS rsp: Cannot issue reg_login for x%x " 4609 "Data: x%x x%x x%x\n", 4610 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 4611 ndlp->nlp_rpi); 4612 4613 if (lpfc_nlp_not_used(ndlp)) { 4614 ndlp = NULL; 4615 /* Indicate node has already been released, 4616 * should not reference to it from within 4617 * the routine lpfc_els_free_iocb. 4618 */ 4619 cmdiocb->context1 = NULL; 4620 } 4621 } else { 4622 /* Do not drop node for lpfc_els_abort'ed ELS cmds */ 4623 if (!lpfc_error_lost_link(irsp) && 4624 ndlp->nlp_flag & NLP_ACC_REGLOGIN) { 4625 if (lpfc_nlp_not_used(ndlp)) { 4626 ndlp = NULL; 4627 /* Indicate node has already been 4628 * released, should not reference 4629 * to it from within the routine 4630 * lpfc_els_free_iocb. 4631 */ 4632 cmdiocb->context1 = NULL; 4633 } 4634 } 4635 } 4636 mp = (struct lpfc_dmabuf *)mbox->ctx_buf; 4637 if (mp) { 4638 lpfc_mbuf_free(phba, mp->virt, mp->phys); 4639 kfree(mp); 4640 } 4641 mempool_free(mbox, phba->mbox_mem_pool); 4642 } 4643 out: 4644 if (ndlp && shost) { 4645 spin_lock_irq(&ndlp->lock); 4646 if (mbox) 4647 ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN; 4648 ndlp->nlp_flag &= ~NLP_RM_DFLT_RPI; 4649 spin_unlock_irq(&ndlp->lock); 4650 4651 /* If the node is not being used by another discovery thread, 4652 * and we are sending a reject, we are done with it. 4653 * Release driver reference count here and free associated 4654 * resources. 4655 */ 4656 if (ls_rjt) 4657 if (lpfc_nlp_not_used(ndlp)) 4658 /* Indicate node has already been released, 4659 * should not reference to it from within 4660 * the routine lpfc_els_free_iocb. 4661 */ 4662 cmdiocb->context1 = NULL; 4663 } 4664 4665 /* Release the originating I/O reference. */ 4666 lpfc_els_free_iocb(phba, cmdiocb); 4667 lpfc_nlp_put(ndlp); 4668 return; 4669 } 4670 4671 /** 4672 * lpfc_els_rsp_acc - Prepare and issue an acc response iocb command 4673 * @vport: pointer to a host virtual N_Port data structure. 4674 * @flag: the els command code to be accepted. 4675 * @oldiocb: pointer to the original lpfc command iocb data structure. 4676 * @ndlp: pointer to a node-list data structure. 4677 * @mbox: pointer to the driver internal queue element for mailbox command. 4678 * 4679 * This routine prepares and issues an Accept (ACC) response IOCB 4680 * command. It uses the @flag to properly set up the IOCB field for the 4681 * specific ACC response command to be issued and invokes the 4682 * lpfc_sli_issue_iocb() routine to send out ACC response IOCB. If a 4683 * @mbox pointer is passed in, it will be put into the context_un.mbox 4684 * field of the IOCB for the completion callback function to issue the 4685 * mailbox command to the HBA later when callback is invoked. 4686 * 4687 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 4688 * will be incremented by 1 for holding the ndlp and the reference to ndlp 4689 * will be stored into the context1 field of the IOCB for the completion 4690 * callback function to the corresponding response ELS IOCB command. 4691 * 4692 * Return code 4693 * 0 - Successfully issued acc response 4694 * 1 - Failed to issue acc response 4695 **/ 4696 int 4697 lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, 4698 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp, 4699 LPFC_MBOXQ_t *mbox) 4700 { 4701 struct lpfc_hba *phba = vport->phba; 4702 IOCB_t *icmd; 4703 IOCB_t *oldcmd; 4704 struct lpfc_iocbq *elsiocb; 4705 uint8_t *pcmd; 4706 struct serv_parm *sp; 4707 uint16_t cmdsize; 4708 int rc; 4709 ELS_PKT *els_pkt_ptr; 4710 4711 oldcmd = &oldiocb->iocb; 4712 4713 switch (flag) { 4714 case ELS_CMD_ACC: 4715 cmdsize = sizeof(uint32_t); 4716 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 4717 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 4718 if (!elsiocb) { 4719 spin_lock_irq(&ndlp->lock); 4720 ndlp->nlp_flag &= ~NLP_LOGO_ACC; 4721 spin_unlock_irq(&ndlp->lock); 4722 return 1; 4723 } 4724 4725 icmd = &elsiocb->iocb; 4726 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 4727 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 4728 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 4729 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 4730 pcmd += sizeof(uint32_t); 4731 4732 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 4733 "Issue ACC: did:x%x flg:x%x", 4734 ndlp->nlp_DID, ndlp->nlp_flag, 0); 4735 break; 4736 case ELS_CMD_FLOGI: 4737 case ELS_CMD_PLOGI: 4738 cmdsize = (sizeof(struct serv_parm) + sizeof(uint32_t)); 4739 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 4740 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 4741 if (!elsiocb) 4742 return 1; 4743 4744 icmd = &elsiocb->iocb; 4745 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 4746 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 4747 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 4748 4749 if (mbox) 4750 elsiocb->context_un.mbox = mbox; 4751 4752 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 4753 pcmd += sizeof(uint32_t); 4754 sp = (struct serv_parm *)pcmd; 4755 4756 if (flag == ELS_CMD_FLOGI) { 4757 /* Copy the received service parameters back */ 4758 memcpy(sp, &phba->fc_fabparam, 4759 sizeof(struct serv_parm)); 4760 4761 /* Clear the F_Port bit */ 4762 sp->cmn.fPort = 0; 4763 4764 /* Mark all class service parameters as invalid */ 4765 sp->cls1.classValid = 0; 4766 sp->cls2.classValid = 0; 4767 sp->cls3.classValid = 0; 4768 sp->cls4.classValid = 0; 4769 4770 /* Copy our worldwide names */ 4771 memcpy(&sp->portName, &vport->fc_sparam.portName, 4772 sizeof(struct lpfc_name)); 4773 memcpy(&sp->nodeName, &vport->fc_sparam.nodeName, 4774 sizeof(struct lpfc_name)); 4775 } else { 4776 memcpy(pcmd, &vport->fc_sparam, 4777 sizeof(struct serv_parm)); 4778 4779 sp->cmn.valid_vendor_ver_level = 0; 4780 memset(sp->un.vendorVersion, 0, 4781 sizeof(sp->un.vendorVersion)); 4782 sp->cmn.bbRcvSizeMsb &= 0xF; 4783 4784 /* If our firmware supports this feature, convey that 4785 * info to the target using the vendor specific field. 4786 */ 4787 if (phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) { 4788 sp->cmn.valid_vendor_ver_level = 1; 4789 sp->un.vv.vid = cpu_to_be32(LPFC_VV_EMLX_ID); 4790 sp->un.vv.flags = 4791 cpu_to_be32(LPFC_VV_SUPPRESS_RSP); 4792 } 4793 } 4794 4795 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 4796 "Issue ACC FLOGI/PLOGI: did:x%x flg:x%x", 4797 ndlp->nlp_DID, ndlp->nlp_flag, 0); 4798 break; 4799 case ELS_CMD_PRLO: 4800 cmdsize = sizeof(uint32_t) + sizeof(PRLO); 4801 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 4802 ndlp, ndlp->nlp_DID, ELS_CMD_PRLO); 4803 if (!elsiocb) 4804 return 1; 4805 4806 icmd = &elsiocb->iocb; 4807 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 4808 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 4809 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 4810 4811 memcpy(pcmd, ((struct lpfc_dmabuf *) oldiocb->context2)->virt, 4812 sizeof(uint32_t) + sizeof(PRLO)); 4813 *((uint32_t *) (pcmd)) = ELS_CMD_PRLO_ACC; 4814 els_pkt_ptr = (ELS_PKT *) pcmd; 4815 els_pkt_ptr->un.prlo.acceptRspCode = PRLO_REQ_EXECUTED; 4816 4817 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 4818 "Issue ACC PRLO: did:x%x flg:x%x", 4819 ndlp->nlp_DID, ndlp->nlp_flag, 0); 4820 break; 4821 default: 4822 return 1; 4823 } 4824 if (ndlp->nlp_flag & NLP_LOGO_ACC) { 4825 spin_lock_irq(&ndlp->lock); 4826 if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED || 4827 ndlp->nlp_flag & NLP_REG_LOGIN_SEND)) 4828 ndlp->nlp_flag &= ~NLP_LOGO_ACC; 4829 spin_unlock_irq(&ndlp->lock); 4830 elsiocb->iocb_cmpl = lpfc_cmpl_els_logo_acc; 4831 } else { 4832 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 4833 } 4834 4835 phba->fc_stat.elsXmitACC++; 4836 elsiocb->context1 = lpfc_nlp_get(ndlp); 4837 if (!elsiocb->context1) 4838 goto node_err; 4839 4840 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 4841 if (rc == IOCB_ERROR) 4842 goto io_err; 4843 4844 /* Xmit ELS ACC response tag <ulpIoTag> */ 4845 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4846 "0128 Xmit ELS ACC response Status: x%x, IoTag: x%x, " 4847 "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x " 4848 "RPI: x%x, fc_flag x%x\n", 4849 rc, elsiocb->iotag, elsiocb->sli4_xritag, 4850 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 4851 ndlp->nlp_rpi, vport->fc_flag); 4852 return 0; 4853 4854 io_err: 4855 lpfc_nlp_put(ndlp); 4856 node_err: 4857 lpfc_els_free_iocb(phba, elsiocb); 4858 return 1; 4859 } 4860 4861 /** 4862 * lpfc_els_rsp_reject - Propare and issue a rjt response iocb command 4863 * @vport: pointer to a virtual N_Port data structure. 4864 * @rejectError: reject response to issue 4865 * @oldiocb: pointer to the original lpfc command iocb data structure. 4866 * @ndlp: pointer to a node-list data structure. 4867 * @mbox: pointer to the driver internal queue element for mailbox command. 4868 * 4869 * This routine prepares and issue an Reject (RJT) response IOCB 4870 * command. If a @mbox pointer is passed in, it will be put into the 4871 * context_un.mbox field of the IOCB for the completion callback function 4872 * to issue to the HBA later. 4873 * 4874 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 4875 * will be incremented by 1 for holding the ndlp and the reference to ndlp 4876 * will be stored into the context1 field of the IOCB for the completion 4877 * callback function to the reject response ELS IOCB command. 4878 * 4879 * Return code 4880 * 0 - Successfully issued reject response 4881 * 1 - Failed to issue reject response 4882 **/ 4883 int 4884 lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError, 4885 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp, 4886 LPFC_MBOXQ_t *mbox) 4887 { 4888 int rc; 4889 struct lpfc_hba *phba = vport->phba; 4890 IOCB_t *icmd; 4891 IOCB_t *oldcmd; 4892 struct lpfc_iocbq *elsiocb; 4893 uint8_t *pcmd; 4894 uint16_t cmdsize; 4895 4896 cmdsize = 2 * sizeof(uint32_t); 4897 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 4898 ndlp->nlp_DID, ELS_CMD_LS_RJT); 4899 if (!elsiocb) 4900 return 1; 4901 4902 icmd = &elsiocb->iocb; 4903 oldcmd = &oldiocb->iocb; 4904 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 4905 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 4906 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 4907 4908 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT; 4909 pcmd += sizeof(uint32_t); 4910 *((uint32_t *) (pcmd)) = rejectError; 4911 4912 if (mbox) 4913 elsiocb->context_un.mbox = mbox; 4914 4915 /* Xmit ELS RJT <err> response tag <ulpIoTag> */ 4916 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4917 "0129 Xmit ELS RJT x%x response tag x%x " 4918 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, " 4919 "rpi x%x\n", 4920 rejectError, elsiocb->iotag, 4921 elsiocb->iocb.ulpContext, ndlp->nlp_DID, 4922 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); 4923 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 4924 "Issue LS_RJT: did:x%x flg:x%x err:x%x", 4925 ndlp->nlp_DID, ndlp->nlp_flag, rejectError); 4926 4927 phba->fc_stat.elsXmitLSRJT++; 4928 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 4929 elsiocb->context1 = lpfc_nlp_get(ndlp); 4930 if (!elsiocb->context1) 4931 goto node_err; 4932 4933 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 4934 if (rc == IOCB_ERROR) 4935 goto io_err; 4936 4937 return 0; 4938 4939 io_err: 4940 lpfc_nlp_put(ndlp); 4941 node_err: 4942 lpfc_els_free_iocb(phba, elsiocb); 4943 return 1; 4944 } 4945 4946 /** 4947 * lpfc_els_rsp_adisc_acc - Prepare and issue acc response to adisc iocb cmd 4948 * @vport: pointer to a virtual N_Port data structure. 4949 * @oldiocb: pointer to the original lpfc command iocb data structure. 4950 * @ndlp: pointer to a node-list data structure. 4951 * 4952 * This routine prepares and issues an Accept (ACC) response to Address 4953 * Discover (ADISC) ELS command. It simply prepares the payload of the IOCB 4954 * and invokes the lpfc_sli_issue_iocb() routine to send out the command. 4955 * 4956 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 4957 * will be incremented by 1 for holding the ndlp and the reference to ndlp 4958 * will be stored into the context1 field of the IOCB for the completion 4959 * callback function to the ADISC Accept response ELS IOCB command. 4960 * 4961 * Return code 4962 * 0 - Successfully issued acc adisc response 4963 * 1 - Failed to issue adisc acc response 4964 **/ 4965 int 4966 lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, 4967 struct lpfc_nodelist *ndlp) 4968 { 4969 struct lpfc_hba *phba = vport->phba; 4970 ADISC *ap; 4971 IOCB_t *icmd, *oldcmd; 4972 struct lpfc_iocbq *elsiocb; 4973 uint8_t *pcmd; 4974 uint16_t cmdsize; 4975 int rc; 4976 4977 cmdsize = sizeof(uint32_t) + sizeof(ADISC); 4978 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 4979 ndlp->nlp_DID, ELS_CMD_ACC); 4980 if (!elsiocb) 4981 return 1; 4982 4983 icmd = &elsiocb->iocb; 4984 oldcmd = &oldiocb->iocb; 4985 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 4986 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 4987 4988 /* Xmit ADISC ACC response tag <ulpIoTag> */ 4989 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4990 "0130 Xmit ADISC ACC response iotag x%x xri: " 4991 "x%x, did x%x, nlp_flag x%x, nlp_state x%x rpi x%x\n", 4992 elsiocb->iotag, elsiocb->iocb.ulpContext, 4993 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 4994 ndlp->nlp_rpi); 4995 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 4996 4997 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 4998 pcmd += sizeof(uint32_t); 4999 5000 ap = (ADISC *) (pcmd); 5001 ap->hardAL_PA = phba->fc_pref_ALPA; 5002 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name)); 5003 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 5004 ap->DID = be32_to_cpu(vport->fc_myDID); 5005 5006 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5007 "Issue ACC ADISC: did:x%x flg:x%x refcnt %d", 5008 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); 5009 5010 phba->fc_stat.elsXmitACC++; 5011 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 5012 elsiocb->context1 = lpfc_nlp_get(ndlp); 5013 if (!elsiocb->context1) 5014 goto node_err; 5015 5016 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5017 if (rc == IOCB_ERROR) 5018 goto io_err; 5019 5020 /* Xmit ELS ACC response tag <ulpIoTag> */ 5021 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5022 "0128 Xmit ELS ACC response Status: x%x, IoTag: x%x, " 5023 "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x " 5024 "RPI: x%x, fc_flag x%x\n", 5025 rc, elsiocb->iotag, elsiocb->sli4_xritag, 5026 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5027 ndlp->nlp_rpi, vport->fc_flag); 5028 return 0; 5029 5030 io_err: 5031 lpfc_nlp_put(ndlp); 5032 node_err: 5033 lpfc_els_free_iocb(phba, elsiocb); 5034 return 1; 5035 } 5036 5037 /** 5038 * lpfc_els_rsp_prli_acc - Prepare and issue acc response to prli iocb cmd 5039 * @vport: pointer to a virtual N_Port data structure. 5040 * @oldiocb: pointer to the original lpfc command iocb data structure. 5041 * @ndlp: pointer to a node-list data structure. 5042 * 5043 * This routine prepares and issues an Accept (ACC) response to Process 5044 * Login (PRLI) ELS command. It simply prepares the payload of the IOCB 5045 * and invokes the lpfc_sli_issue_iocb() routine to send out the command. 5046 * 5047 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 5048 * will be incremented by 1 for holding the ndlp and the reference to ndlp 5049 * will be stored into the context1 field of the IOCB for the completion 5050 * callback function to the PRLI Accept response ELS IOCB command. 5051 * 5052 * Return code 5053 * 0 - Successfully issued acc prli response 5054 * 1 - Failed to issue acc prli response 5055 **/ 5056 int 5057 lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, 5058 struct lpfc_nodelist *ndlp) 5059 { 5060 struct lpfc_hba *phba = vport->phba; 5061 PRLI *npr; 5062 struct lpfc_nvme_prli *npr_nvme; 5063 lpfc_vpd_t *vpd; 5064 IOCB_t *icmd; 5065 IOCB_t *oldcmd; 5066 struct lpfc_iocbq *elsiocb; 5067 uint8_t *pcmd; 5068 uint16_t cmdsize; 5069 uint32_t prli_fc4_req, *req_payload; 5070 struct lpfc_dmabuf *req_buf; 5071 int rc; 5072 u32 elsrspcmd; 5073 5074 /* Need the incoming PRLI payload to determine if the ACC is for an 5075 * FC4 or NVME PRLI type. The PRLI type is at word 1. 5076 */ 5077 req_buf = (struct lpfc_dmabuf *)oldiocb->context2; 5078 req_payload = (((uint32_t *)req_buf->virt) + 1); 5079 5080 /* PRLI type payload is at byte 3 for FCP or NVME. */ 5081 prli_fc4_req = be32_to_cpu(*req_payload); 5082 prli_fc4_req = (prli_fc4_req >> 24) & 0xff; 5083 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5084 "6127 PRLI_ACC: Req Type x%x, Word1 x%08x\n", 5085 prli_fc4_req, *((uint32_t *)req_payload)); 5086 5087 if (prli_fc4_req == PRLI_FCP_TYPE) { 5088 cmdsize = sizeof(uint32_t) + sizeof(PRLI); 5089 elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK)); 5090 } else if (prli_fc4_req & PRLI_NVME_TYPE) { 5091 cmdsize = sizeof(uint32_t) + sizeof(struct lpfc_nvme_prli); 5092 elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_NVMEPRLI & ~ELS_RSP_MASK)); 5093 } else { 5094 return 1; 5095 } 5096 5097 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 5098 ndlp->nlp_DID, elsrspcmd); 5099 if (!elsiocb) 5100 return 1; 5101 5102 icmd = &elsiocb->iocb; 5103 oldcmd = &oldiocb->iocb; 5104 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5105 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 5106 5107 /* Xmit PRLI ACC response tag <ulpIoTag> */ 5108 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5109 "0131 Xmit PRLI ACC response tag x%x xri x%x, " 5110 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", 5111 elsiocb->iotag, elsiocb->iocb.ulpContext, 5112 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5113 ndlp->nlp_rpi); 5114 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 5115 memset(pcmd, 0, cmdsize); 5116 5117 *((uint32_t *)(pcmd)) = elsrspcmd; 5118 pcmd += sizeof(uint32_t); 5119 5120 /* For PRLI, remainder of payload is PRLI parameter page */ 5121 vpd = &phba->vpd; 5122 5123 if (prli_fc4_req == PRLI_FCP_TYPE) { 5124 /* 5125 * If the remote port is a target and our firmware version 5126 * is 3.20 or later, set the following bits for FC-TAPE 5127 * support. 5128 */ 5129 npr = (PRLI *) pcmd; 5130 if ((ndlp->nlp_type & NLP_FCP_TARGET) && 5131 (vpd->rev.feaLevelHigh >= 0x02)) { 5132 npr->ConfmComplAllowed = 1; 5133 npr->Retry = 1; 5134 npr->TaskRetryIdReq = 1; 5135 } 5136 npr->acceptRspCode = PRLI_REQ_EXECUTED; 5137 npr->estabImagePair = 1; 5138 npr->readXferRdyDis = 1; 5139 npr->ConfmComplAllowed = 1; 5140 npr->prliType = PRLI_FCP_TYPE; 5141 npr->initiatorFunc = 1; 5142 } else if (prli_fc4_req & PRLI_NVME_TYPE) { 5143 /* Respond with an NVME PRLI Type */ 5144 npr_nvme = (struct lpfc_nvme_prli *) pcmd; 5145 bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE); 5146 bf_set(prli_estabImagePair, npr_nvme, 0); /* Should be 0 */ 5147 bf_set(prli_acc_rsp_code, npr_nvme, PRLI_REQ_EXECUTED); 5148 if (phba->nvmet_support) { 5149 bf_set(prli_tgt, npr_nvme, 1); 5150 bf_set(prli_disc, npr_nvme, 1); 5151 if (phba->cfg_nvme_enable_fb) { 5152 bf_set(prli_fba, npr_nvme, 1); 5153 5154 /* TBD. Target mode needs to post buffers 5155 * that support the configured first burst 5156 * byte size. 5157 */ 5158 bf_set(prli_fb_sz, npr_nvme, 5159 phba->cfg_nvmet_fb_size); 5160 } 5161 } else { 5162 bf_set(prli_init, npr_nvme, 1); 5163 } 5164 5165 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, 5166 "6015 NVME issue PRLI ACC word1 x%08x " 5167 "word4 x%08x word5 x%08x flag x%x, " 5168 "fcp_info x%x nlp_type x%x\n", 5169 npr_nvme->word1, npr_nvme->word4, 5170 npr_nvme->word5, ndlp->nlp_flag, 5171 ndlp->nlp_fcp_info, ndlp->nlp_type); 5172 npr_nvme->word1 = cpu_to_be32(npr_nvme->word1); 5173 npr_nvme->word4 = cpu_to_be32(npr_nvme->word4); 5174 npr_nvme->word5 = cpu_to_be32(npr_nvme->word5); 5175 } else 5176 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 5177 "6128 Unknown FC_TYPE x%x x%x ndlp x%06x\n", 5178 prli_fc4_req, ndlp->nlp_fc4_type, 5179 ndlp->nlp_DID); 5180 5181 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5182 "Issue ACC PRLI: did:x%x flg:x%x", 5183 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); 5184 5185 phba->fc_stat.elsXmitACC++; 5186 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 5187 elsiocb->context1 = lpfc_nlp_get(ndlp); 5188 if (!elsiocb->context1) 5189 goto node_err; 5190 5191 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5192 if (rc == IOCB_ERROR) 5193 goto io_err; 5194 return 0; 5195 5196 io_err: 5197 lpfc_nlp_put(ndlp); 5198 node_err: 5199 lpfc_els_free_iocb(phba, elsiocb); 5200 return 1; 5201 } 5202 5203 /** 5204 * lpfc_els_rsp_rnid_acc - Issue rnid acc response iocb command 5205 * @vport: pointer to a virtual N_Port data structure. 5206 * @format: rnid command format. 5207 * @oldiocb: pointer to the original lpfc command iocb data structure. 5208 * @ndlp: pointer to a node-list data structure. 5209 * 5210 * This routine issues a Request Node Identification Data (RNID) Accept 5211 * (ACC) response. It constructs the RNID ACC response command according to 5212 * the proper @format and then calls the lpfc_sli_issue_iocb() routine to 5213 * issue the response. Note that this command does not need to hold the ndlp 5214 * reference count for the callback. So, the ndlp reference count taken by 5215 * the lpfc_prep_els_iocb() routine is put back and the context1 field of 5216 * IOCB is set to NULL to indicate to the lpfc_els_free_iocb() routine that 5217 * there is no ndlp reference available. 5218 * 5219 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 5220 * will be incremented by 1 for holding the ndlp and the reference to ndlp 5221 * will be stored into the context1 field of the IOCB for the completion 5222 * callback function. However, for the RNID Accept Response ELS command, 5223 * this is undone later by this routine after the IOCB is allocated. 5224 * 5225 * Return code 5226 * 0 - Successfully issued acc rnid response 5227 * 1 - Failed to issue acc rnid response 5228 **/ 5229 static int 5230 lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format, 5231 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) 5232 { 5233 struct lpfc_hba *phba = vport->phba; 5234 RNID *rn; 5235 IOCB_t *icmd, *oldcmd; 5236 struct lpfc_iocbq *elsiocb; 5237 uint8_t *pcmd; 5238 uint16_t cmdsize; 5239 int rc; 5240 5241 cmdsize = sizeof(uint32_t) + sizeof(uint32_t) 5242 + (2 * sizeof(struct lpfc_name)); 5243 if (format) 5244 cmdsize += sizeof(RNID_TOP_DISC); 5245 5246 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 5247 ndlp->nlp_DID, ELS_CMD_ACC); 5248 if (!elsiocb) 5249 return 1; 5250 5251 icmd = &elsiocb->iocb; 5252 oldcmd = &oldiocb->iocb; 5253 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5254 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 5255 5256 /* Xmit RNID ACC response tag <ulpIoTag> */ 5257 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5258 "0132 Xmit RNID ACC response tag x%x xri x%x\n", 5259 elsiocb->iotag, elsiocb->iocb.ulpContext); 5260 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 5261 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 5262 pcmd += sizeof(uint32_t); 5263 5264 memset(pcmd, 0, sizeof(RNID)); 5265 rn = (RNID *) (pcmd); 5266 rn->Format = format; 5267 rn->CommonLen = (2 * sizeof(struct lpfc_name)); 5268 memcpy(&rn->portName, &vport->fc_portname, sizeof(struct lpfc_name)); 5269 memcpy(&rn->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 5270 switch (format) { 5271 case 0: 5272 rn->SpecificLen = 0; 5273 break; 5274 case RNID_TOPOLOGY_DISC: 5275 rn->SpecificLen = sizeof(RNID_TOP_DISC); 5276 memcpy(&rn->un.topologyDisc.portName, 5277 &vport->fc_portname, sizeof(struct lpfc_name)); 5278 rn->un.topologyDisc.unitType = RNID_HBA; 5279 rn->un.topologyDisc.physPort = 0; 5280 rn->un.topologyDisc.attachedNodes = 0; 5281 break; 5282 default: 5283 rn->CommonLen = 0; 5284 rn->SpecificLen = 0; 5285 break; 5286 } 5287 5288 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5289 "Issue ACC RNID: did:x%x flg:x%x refcnt %d", 5290 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); 5291 5292 phba->fc_stat.elsXmitACC++; 5293 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 5294 elsiocb->context1 = lpfc_nlp_get(ndlp); 5295 if (!elsiocb->context1) 5296 goto node_err; 5297 5298 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5299 if (rc == IOCB_ERROR) 5300 goto io_err; 5301 5302 return 0; 5303 5304 io_err: 5305 lpfc_nlp_put(ndlp); 5306 node_err: 5307 lpfc_els_free_iocb(phba, elsiocb); 5308 return 1; 5309 } 5310 5311 /** 5312 * lpfc_els_clear_rrq - Clear the rq that this rrq describes. 5313 * @vport: pointer to a virtual N_Port data structure. 5314 * @iocb: pointer to the lpfc command iocb data structure. 5315 * @ndlp: pointer to a node-list data structure. 5316 * 5317 * Return 5318 **/ 5319 static void 5320 lpfc_els_clear_rrq(struct lpfc_vport *vport, 5321 struct lpfc_iocbq *iocb, struct lpfc_nodelist *ndlp) 5322 { 5323 struct lpfc_hba *phba = vport->phba; 5324 uint8_t *pcmd; 5325 struct RRQ *rrq; 5326 uint16_t rxid; 5327 uint16_t xri; 5328 struct lpfc_node_rrq *prrq; 5329 5330 5331 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) iocb->context2)->virt); 5332 pcmd += sizeof(uint32_t); 5333 rrq = (struct RRQ *)pcmd; 5334 rrq->rrq_exchg = be32_to_cpu(rrq->rrq_exchg); 5335 rxid = bf_get(rrq_rxid, rrq); 5336 5337 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5338 "2883 Clear RRQ for SID:x%x OXID:x%x RXID:x%x" 5339 " x%x x%x\n", 5340 be32_to_cpu(bf_get(rrq_did, rrq)), 5341 bf_get(rrq_oxid, rrq), 5342 rxid, 5343 iocb->iotag, iocb->iocb.ulpContext); 5344 5345 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5346 "Clear RRQ: did:x%x flg:x%x exchg:x%.08x", 5347 ndlp->nlp_DID, ndlp->nlp_flag, rrq->rrq_exchg); 5348 if (vport->fc_myDID == be32_to_cpu(bf_get(rrq_did, rrq))) 5349 xri = bf_get(rrq_oxid, rrq); 5350 else 5351 xri = rxid; 5352 prrq = lpfc_get_active_rrq(vport, xri, ndlp->nlp_DID); 5353 if (prrq) 5354 lpfc_clr_rrq_active(phba, xri, prrq); 5355 return; 5356 } 5357 5358 /** 5359 * lpfc_els_rsp_echo_acc - Issue echo acc response 5360 * @vport: pointer to a virtual N_Port data structure. 5361 * @data: pointer to echo data to return in the accept. 5362 * @oldiocb: pointer to the original lpfc command iocb data structure. 5363 * @ndlp: pointer to a node-list data structure. 5364 * 5365 * Return code 5366 * 0 - Successfully issued acc echo response 5367 * 1 - Failed to issue acc echo response 5368 **/ 5369 static int 5370 lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data, 5371 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) 5372 { 5373 struct lpfc_hba *phba = vport->phba; 5374 struct lpfc_iocbq *elsiocb; 5375 uint8_t *pcmd; 5376 uint16_t cmdsize; 5377 int rc; 5378 5379 cmdsize = oldiocb->iocb.unsli3.rcvsli3.acc_len; 5380 5381 /* The accumulated length can exceed the BPL_SIZE. For 5382 * now, use this as the limit 5383 */ 5384 if (cmdsize > LPFC_BPL_SIZE) 5385 cmdsize = LPFC_BPL_SIZE; 5386 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 5387 ndlp->nlp_DID, ELS_CMD_ACC); 5388 if (!elsiocb) 5389 return 1; 5390 5391 elsiocb->iocb.ulpContext = oldiocb->iocb.ulpContext; /* Xri / rx_id */ 5392 elsiocb->iocb.unsli3.rcvsli3.ox_id = oldiocb->iocb.unsli3.rcvsli3.ox_id; 5393 5394 /* Xmit ECHO ACC response tag <ulpIoTag> */ 5395 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5396 "2876 Xmit ECHO ACC response tag x%x xri x%x\n", 5397 elsiocb->iotag, elsiocb->iocb.ulpContext); 5398 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 5399 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 5400 pcmd += sizeof(uint32_t); 5401 memcpy(pcmd, data, cmdsize - sizeof(uint32_t)); 5402 5403 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5404 "Issue ACC ECHO: did:x%x flg:x%x refcnt %d", 5405 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); 5406 5407 phba->fc_stat.elsXmitACC++; 5408 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 5409 elsiocb->context1 = lpfc_nlp_get(ndlp); 5410 if (!elsiocb->context1) 5411 goto node_err; 5412 5413 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5414 if (rc == IOCB_ERROR) 5415 goto io_err; 5416 return 0; 5417 5418 io_err: 5419 lpfc_nlp_put(ndlp); 5420 node_err: 5421 lpfc_els_free_iocb(phba, elsiocb); 5422 return 1; 5423 } 5424 5425 /** 5426 * lpfc_els_disc_adisc - Issue remaining adisc iocbs to npr nodes of a vport 5427 * @vport: pointer to a host virtual N_Port data structure. 5428 * 5429 * This routine issues Address Discover (ADISC) ELS commands to those 5430 * N_Ports which are in node port recovery state and ADISC has not been issued 5431 * for the @vport. Each time an ELS ADISC IOCB is issued by invoking the 5432 * lpfc_issue_els_adisc() routine, the per @vport number of discover count 5433 * (num_disc_nodes) shall be incremented. If the num_disc_nodes reaches a 5434 * pre-configured threshold (cfg_discovery_threads), the @vport fc_flag will 5435 * be marked with FC_NLP_MORE bit and the process of issuing remaining ADISC 5436 * IOCBs quit for later pick up. On the other hand, after walking through 5437 * all the ndlps with the @vport and there is none ADISC IOCB issued, the 5438 * @vport fc_flag shall be cleared with FC_NLP_MORE bit indicating there is 5439 * no more ADISC need to be sent. 5440 * 5441 * Return code 5442 * The number of N_Ports with adisc issued. 5443 **/ 5444 int 5445 lpfc_els_disc_adisc(struct lpfc_vport *vport) 5446 { 5447 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 5448 struct lpfc_nodelist *ndlp, *next_ndlp; 5449 int sentadisc = 0; 5450 5451 /* go thru NPR nodes and issue any remaining ELS ADISCs */ 5452 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 5453 if (ndlp->nlp_state == NLP_STE_NPR_NODE && 5454 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 && 5455 (ndlp->nlp_flag & NLP_NPR_ADISC) != 0) { 5456 spin_lock_irq(&ndlp->lock); 5457 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 5458 spin_unlock_irq(&ndlp->lock); 5459 ndlp->nlp_prev_state = ndlp->nlp_state; 5460 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 5461 lpfc_issue_els_adisc(vport, ndlp, 0); 5462 sentadisc++; 5463 vport->num_disc_nodes++; 5464 if (vport->num_disc_nodes >= 5465 vport->cfg_discovery_threads) { 5466 spin_lock_irq(shost->host_lock); 5467 vport->fc_flag |= FC_NLP_MORE; 5468 spin_unlock_irq(shost->host_lock); 5469 break; 5470 } 5471 } 5472 } 5473 if (sentadisc == 0) { 5474 spin_lock_irq(shost->host_lock); 5475 vport->fc_flag &= ~FC_NLP_MORE; 5476 spin_unlock_irq(shost->host_lock); 5477 } 5478 return sentadisc; 5479 } 5480 5481 /** 5482 * lpfc_els_disc_plogi - Issue plogi for all npr nodes of a vport before adisc 5483 * @vport: pointer to a host virtual N_Port data structure. 5484 * 5485 * This routine issues Port Login (PLOGI) ELS commands to all the N_Ports 5486 * which are in node port recovery state, with a @vport. Each time an ELS 5487 * ADISC PLOGI IOCB is issued by invoking the lpfc_issue_els_plogi() routine, 5488 * the per @vport number of discover count (num_disc_nodes) shall be 5489 * incremented. If the num_disc_nodes reaches a pre-configured threshold 5490 * (cfg_discovery_threads), the @vport fc_flag will be marked with FC_NLP_MORE 5491 * bit set and quit the process of issuing remaining ADISC PLOGIN IOCBs for 5492 * later pick up. On the other hand, after walking through all the ndlps with 5493 * the @vport and there is none ADISC PLOGI IOCB issued, the @vport fc_flag 5494 * shall be cleared with the FC_NLP_MORE bit indicating there is no more ADISC 5495 * PLOGI need to be sent. 5496 * 5497 * Return code 5498 * The number of N_Ports with plogi issued. 5499 **/ 5500 int 5501 lpfc_els_disc_plogi(struct lpfc_vport *vport) 5502 { 5503 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 5504 struct lpfc_nodelist *ndlp, *next_ndlp; 5505 int sentplogi = 0; 5506 5507 /* go thru NPR nodes and issue any remaining ELS PLOGIs */ 5508 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 5509 if (ndlp->nlp_state == NLP_STE_NPR_NODE && 5510 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 && 5511 (ndlp->nlp_flag & NLP_DELAY_TMO) == 0 && 5512 (ndlp->nlp_flag & NLP_NPR_ADISC) == 0) { 5513 ndlp->nlp_prev_state = ndlp->nlp_state; 5514 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 5515 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 5516 sentplogi++; 5517 vport->num_disc_nodes++; 5518 if (vport->num_disc_nodes >= 5519 vport->cfg_discovery_threads) { 5520 spin_lock_irq(shost->host_lock); 5521 vport->fc_flag |= FC_NLP_MORE; 5522 spin_unlock_irq(shost->host_lock); 5523 break; 5524 } 5525 } 5526 } 5527 5528 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 5529 "6452 Discover PLOGI %d flag x%x\n", 5530 sentplogi, vport->fc_flag); 5531 5532 if (sentplogi) { 5533 lpfc_set_disctmo(vport); 5534 } 5535 else { 5536 spin_lock_irq(shost->host_lock); 5537 vport->fc_flag &= ~FC_NLP_MORE; 5538 spin_unlock_irq(shost->host_lock); 5539 } 5540 return sentplogi; 5541 } 5542 5543 static uint32_t 5544 lpfc_rdp_res_link_service(struct fc_rdp_link_service_desc *desc, 5545 uint32_t word0) 5546 { 5547 5548 desc->tag = cpu_to_be32(RDP_LINK_SERVICE_DESC_TAG); 5549 desc->payload.els_req = word0; 5550 desc->length = cpu_to_be32(sizeof(desc->payload)); 5551 5552 return sizeof(struct fc_rdp_link_service_desc); 5553 } 5554 5555 static uint32_t 5556 lpfc_rdp_res_sfp_desc(struct fc_rdp_sfp_desc *desc, 5557 uint8_t *page_a0, uint8_t *page_a2) 5558 { 5559 uint16_t wavelength; 5560 uint16_t temperature; 5561 uint16_t rx_power; 5562 uint16_t tx_bias; 5563 uint16_t tx_power; 5564 uint16_t vcc; 5565 uint16_t flag = 0; 5566 struct sff_trasnceiver_codes_byte4 *trasn_code_byte4; 5567 struct sff_trasnceiver_codes_byte5 *trasn_code_byte5; 5568 5569 desc->tag = cpu_to_be32(RDP_SFP_DESC_TAG); 5570 5571 trasn_code_byte4 = (struct sff_trasnceiver_codes_byte4 *) 5572 &page_a0[SSF_TRANSCEIVER_CODE_B4]; 5573 trasn_code_byte5 = (struct sff_trasnceiver_codes_byte5 *) 5574 &page_a0[SSF_TRANSCEIVER_CODE_B5]; 5575 5576 if ((trasn_code_byte4->fc_sw_laser) || 5577 (trasn_code_byte5->fc_sw_laser_sl) || 5578 (trasn_code_byte5->fc_sw_laser_sn)) { /* check if its short WL */ 5579 flag |= (SFP_FLAG_PT_SWLASER << SFP_FLAG_PT_SHIFT); 5580 } else if (trasn_code_byte4->fc_lw_laser) { 5581 wavelength = (page_a0[SSF_WAVELENGTH_B1] << 8) | 5582 page_a0[SSF_WAVELENGTH_B0]; 5583 if (wavelength == SFP_WAVELENGTH_LC1310) 5584 flag |= SFP_FLAG_PT_LWLASER_LC1310 << SFP_FLAG_PT_SHIFT; 5585 if (wavelength == SFP_WAVELENGTH_LL1550) 5586 flag |= SFP_FLAG_PT_LWLASER_LL1550 << SFP_FLAG_PT_SHIFT; 5587 } 5588 /* check if its SFP+ */ 5589 flag |= ((page_a0[SSF_IDENTIFIER] == SFF_PG0_IDENT_SFP) ? 5590 SFP_FLAG_CT_SFP_PLUS : SFP_FLAG_CT_UNKNOWN) 5591 << SFP_FLAG_CT_SHIFT; 5592 5593 /* check if its OPTICAL */ 5594 flag |= ((page_a0[SSF_CONNECTOR] == SFF_PG0_CONNECTOR_LC) ? 5595 SFP_FLAG_IS_OPTICAL_PORT : 0) 5596 << SFP_FLAG_IS_OPTICAL_SHIFT; 5597 5598 temperature = (page_a2[SFF_TEMPERATURE_B1] << 8 | 5599 page_a2[SFF_TEMPERATURE_B0]); 5600 vcc = (page_a2[SFF_VCC_B1] << 8 | 5601 page_a2[SFF_VCC_B0]); 5602 tx_power = (page_a2[SFF_TXPOWER_B1] << 8 | 5603 page_a2[SFF_TXPOWER_B0]); 5604 tx_bias = (page_a2[SFF_TX_BIAS_CURRENT_B1] << 8 | 5605 page_a2[SFF_TX_BIAS_CURRENT_B0]); 5606 rx_power = (page_a2[SFF_RXPOWER_B1] << 8 | 5607 page_a2[SFF_RXPOWER_B0]); 5608 desc->sfp_info.temperature = cpu_to_be16(temperature); 5609 desc->sfp_info.rx_power = cpu_to_be16(rx_power); 5610 desc->sfp_info.tx_bias = cpu_to_be16(tx_bias); 5611 desc->sfp_info.tx_power = cpu_to_be16(tx_power); 5612 desc->sfp_info.vcc = cpu_to_be16(vcc); 5613 5614 desc->sfp_info.flags = cpu_to_be16(flag); 5615 desc->length = cpu_to_be32(sizeof(desc->sfp_info)); 5616 5617 return sizeof(struct fc_rdp_sfp_desc); 5618 } 5619 5620 static uint32_t 5621 lpfc_rdp_res_link_error(struct fc_rdp_link_error_status_desc *desc, 5622 READ_LNK_VAR *stat) 5623 { 5624 uint32_t type; 5625 5626 desc->tag = cpu_to_be32(RDP_LINK_ERROR_STATUS_DESC_TAG); 5627 5628 type = VN_PT_PHY_PF_PORT << VN_PT_PHY_SHIFT; 5629 5630 desc->info.port_type = cpu_to_be32(type); 5631 5632 desc->info.link_status.link_failure_cnt = 5633 cpu_to_be32(stat->linkFailureCnt); 5634 desc->info.link_status.loss_of_synch_cnt = 5635 cpu_to_be32(stat->lossSyncCnt); 5636 desc->info.link_status.loss_of_signal_cnt = 5637 cpu_to_be32(stat->lossSignalCnt); 5638 desc->info.link_status.primitive_seq_proto_err = 5639 cpu_to_be32(stat->primSeqErrCnt); 5640 desc->info.link_status.invalid_trans_word = 5641 cpu_to_be32(stat->invalidXmitWord); 5642 desc->info.link_status.invalid_crc_cnt = cpu_to_be32(stat->crcCnt); 5643 5644 desc->length = cpu_to_be32(sizeof(desc->info)); 5645 5646 return sizeof(struct fc_rdp_link_error_status_desc); 5647 } 5648 5649 static uint32_t 5650 lpfc_rdp_res_bbc_desc(struct fc_rdp_bbc_desc *desc, READ_LNK_VAR *stat, 5651 struct lpfc_vport *vport) 5652 { 5653 uint32_t bbCredit; 5654 5655 desc->tag = cpu_to_be32(RDP_BBC_DESC_TAG); 5656 5657 bbCredit = vport->fc_sparam.cmn.bbCreditLsb | 5658 (vport->fc_sparam.cmn.bbCreditMsb << 8); 5659 desc->bbc_info.port_bbc = cpu_to_be32(bbCredit); 5660 if (vport->phba->fc_topology != LPFC_TOPOLOGY_LOOP) { 5661 bbCredit = vport->phba->fc_fabparam.cmn.bbCreditLsb | 5662 (vport->phba->fc_fabparam.cmn.bbCreditMsb << 8); 5663 desc->bbc_info.attached_port_bbc = cpu_to_be32(bbCredit); 5664 } else { 5665 desc->bbc_info.attached_port_bbc = 0; 5666 } 5667 5668 desc->bbc_info.rtt = 0; 5669 desc->length = cpu_to_be32(sizeof(desc->bbc_info)); 5670 5671 return sizeof(struct fc_rdp_bbc_desc); 5672 } 5673 5674 static uint32_t 5675 lpfc_rdp_res_oed_temp_desc(struct lpfc_hba *phba, 5676 struct fc_rdp_oed_sfp_desc *desc, uint8_t *page_a2) 5677 { 5678 uint32_t flags = 0; 5679 5680 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 5681 5682 desc->oed_info.hi_alarm = page_a2[SSF_TEMP_HIGH_ALARM]; 5683 desc->oed_info.lo_alarm = page_a2[SSF_TEMP_LOW_ALARM]; 5684 desc->oed_info.hi_warning = page_a2[SSF_TEMP_HIGH_WARNING]; 5685 desc->oed_info.lo_warning = page_a2[SSF_TEMP_LOW_WARNING]; 5686 5687 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TEMPERATURE) 5688 flags |= RDP_OET_HIGH_ALARM; 5689 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TEMPERATURE) 5690 flags |= RDP_OET_LOW_ALARM; 5691 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TEMPERATURE) 5692 flags |= RDP_OET_HIGH_WARNING; 5693 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TEMPERATURE) 5694 flags |= RDP_OET_LOW_WARNING; 5695 5696 flags |= ((0xf & RDP_OED_TEMPERATURE) << RDP_OED_TYPE_SHIFT); 5697 desc->oed_info.function_flags = cpu_to_be32(flags); 5698 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 5699 return sizeof(struct fc_rdp_oed_sfp_desc); 5700 } 5701 5702 static uint32_t 5703 lpfc_rdp_res_oed_voltage_desc(struct lpfc_hba *phba, 5704 struct fc_rdp_oed_sfp_desc *desc, 5705 uint8_t *page_a2) 5706 { 5707 uint32_t flags = 0; 5708 5709 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 5710 5711 desc->oed_info.hi_alarm = page_a2[SSF_VOLTAGE_HIGH_ALARM]; 5712 desc->oed_info.lo_alarm = page_a2[SSF_VOLTAGE_LOW_ALARM]; 5713 desc->oed_info.hi_warning = page_a2[SSF_VOLTAGE_HIGH_WARNING]; 5714 desc->oed_info.lo_warning = page_a2[SSF_VOLTAGE_LOW_WARNING]; 5715 5716 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_VOLTAGE) 5717 flags |= RDP_OET_HIGH_ALARM; 5718 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_VOLTAGE) 5719 flags |= RDP_OET_LOW_ALARM; 5720 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_VOLTAGE) 5721 flags |= RDP_OET_HIGH_WARNING; 5722 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_VOLTAGE) 5723 flags |= RDP_OET_LOW_WARNING; 5724 5725 flags |= ((0xf & RDP_OED_VOLTAGE) << RDP_OED_TYPE_SHIFT); 5726 desc->oed_info.function_flags = cpu_to_be32(flags); 5727 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 5728 return sizeof(struct fc_rdp_oed_sfp_desc); 5729 } 5730 5731 static uint32_t 5732 lpfc_rdp_res_oed_txbias_desc(struct lpfc_hba *phba, 5733 struct fc_rdp_oed_sfp_desc *desc, 5734 uint8_t *page_a2) 5735 { 5736 uint32_t flags = 0; 5737 5738 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 5739 5740 desc->oed_info.hi_alarm = page_a2[SSF_BIAS_HIGH_ALARM]; 5741 desc->oed_info.lo_alarm = page_a2[SSF_BIAS_LOW_ALARM]; 5742 desc->oed_info.hi_warning = page_a2[SSF_BIAS_HIGH_WARNING]; 5743 desc->oed_info.lo_warning = page_a2[SSF_BIAS_LOW_WARNING]; 5744 5745 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TXBIAS) 5746 flags |= RDP_OET_HIGH_ALARM; 5747 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TXBIAS) 5748 flags |= RDP_OET_LOW_ALARM; 5749 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TXBIAS) 5750 flags |= RDP_OET_HIGH_WARNING; 5751 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TXBIAS) 5752 flags |= RDP_OET_LOW_WARNING; 5753 5754 flags |= ((0xf & RDP_OED_TXBIAS) << RDP_OED_TYPE_SHIFT); 5755 desc->oed_info.function_flags = cpu_to_be32(flags); 5756 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 5757 return sizeof(struct fc_rdp_oed_sfp_desc); 5758 } 5759 5760 static uint32_t 5761 lpfc_rdp_res_oed_txpower_desc(struct lpfc_hba *phba, 5762 struct fc_rdp_oed_sfp_desc *desc, 5763 uint8_t *page_a2) 5764 { 5765 uint32_t flags = 0; 5766 5767 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 5768 5769 desc->oed_info.hi_alarm = page_a2[SSF_TXPOWER_HIGH_ALARM]; 5770 desc->oed_info.lo_alarm = page_a2[SSF_TXPOWER_LOW_ALARM]; 5771 desc->oed_info.hi_warning = page_a2[SSF_TXPOWER_HIGH_WARNING]; 5772 desc->oed_info.lo_warning = page_a2[SSF_TXPOWER_LOW_WARNING]; 5773 5774 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TXPOWER) 5775 flags |= RDP_OET_HIGH_ALARM; 5776 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TXPOWER) 5777 flags |= RDP_OET_LOW_ALARM; 5778 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TXPOWER) 5779 flags |= RDP_OET_HIGH_WARNING; 5780 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TXPOWER) 5781 flags |= RDP_OET_LOW_WARNING; 5782 5783 flags |= ((0xf & RDP_OED_TXPOWER) << RDP_OED_TYPE_SHIFT); 5784 desc->oed_info.function_flags = cpu_to_be32(flags); 5785 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 5786 return sizeof(struct fc_rdp_oed_sfp_desc); 5787 } 5788 5789 5790 static uint32_t 5791 lpfc_rdp_res_oed_rxpower_desc(struct lpfc_hba *phba, 5792 struct fc_rdp_oed_sfp_desc *desc, 5793 uint8_t *page_a2) 5794 { 5795 uint32_t flags = 0; 5796 5797 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 5798 5799 desc->oed_info.hi_alarm = page_a2[SSF_RXPOWER_HIGH_ALARM]; 5800 desc->oed_info.lo_alarm = page_a2[SSF_RXPOWER_LOW_ALARM]; 5801 desc->oed_info.hi_warning = page_a2[SSF_RXPOWER_HIGH_WARNING]; 5802 desc->oed_info.lo_warning = page_a2[SSF_RXPOWER_LOW_WARNING]; 5803 5804 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_RXPOWER) 5805 flags |= RDP_OET_HIGH_ALARM; 5806 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_RXPOWER) 5807 flags |= RDP_OET_LOW_ALARM; 5808 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_RXPOWER) 5809 flags |= RDP_OET_HIGH_WARNING; 5810 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_RXPOWER) 5811 flags |= RDP_OET_LOW_WARNING; 5812 5813 flags |= ((0xf & RDP_OED_RXPOWER) << RDP_OED_TYPE_SHIFT); 5814 desc->oed_info.function_flags = cpu_to_be32(flags); 5815 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 5816 return sizeof(struct fc_rdp_oed_sfp_desc); 5817 } 5818 5819 static uint32_t 5820 lpfc_rdp_res_opd_desc(struct fc_rdp_opd_sfp_desc *desc, 5821 uint8_t *page_a0, struct lpfc_vport *vport) 5822 { 5823 desc->tag = cpu_to_be32(RDP_OPD_DESC_TAG); 5824 memcpy(desc->opd_info.vendor_name, &page_a0[SSF_VENDOR_NAME], 16); 5825 memcpy(desc->opd_info.model_number, &page_a0[SSF_VENDOR_PN], 16); 5826 memcpy(desc->opd_info.serial_number, &page_a0[SSF_VENDOR_SN], 16); 5827 memcpy(desc->opd_info.revision, &page_a0[SSF_VENDOR_REV], 4); 5828 memcpy(desc->opd_info.date, &page_a0[SSF_DATE_CODE], 8); 5829 desc->length = cpu_to_be32(sizeof(desc->opd_info)); 5830 return sizeof(struct fc_rdp_opd_sfp_desc); 5831 } 5832 5833 static uint32_t 5834 lpfc_rdp_res_fec_desc(struct fc_fec_rdp_desc *desc, READ_LNK_VAR *stat) 5835 { 5836 if (bf_get(lpfc_read_link_stat_gec2, stat) == 0) 5837 return 0; 5838 desc->tag = cpu_to_be32(RDP_FEC_DESC_TAG); 5839 5840 desc->info.CorrectedBlocks = 5841 cpu_to_be32(stat->fecCorrBlkCount); 5842 desc->info.UncorrectableBlocks = 5843 cpu_to_be32(stat->fecUncorrBlkCount); 5844 5845 desc->length = cpu_to_be32(sizeof(desc->info)); 5846 5847 return sizeof(struct fc_fec_rdp_desc); 5848 } 5849 5850 static uint32_t 5851 lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba) 5852 { 5853 uint16_t rdp_cap = 0; 5854 uint16_t rdp_speed; 5855 5856 desc->tag = cpu_to_be32(RDP_PORT_SPEED_DESC_TAG); 5857 5858 switch (phba->fc_linkspeed) { 5859 case LPFC_LINK_SPEED_1GHZ: 5860 rdp_speed = RDP_PS_1GB; 5861 break; 5862 case LPFC_LINK_SPEED_2GHZ: 5863 rdp_speed = RDP_PS_2GB; 5864 break; 5865 case LPFC_LINK_SPEED_4GHZ: 5866 rdp_speed = RDP_PS_4GB; 5867 break; 5868 case LPFC_LINK_SPEED_8GHZ: 5869 rdp_speed = RDP_PS_8GB; 5870 break; 5871 case LPFC_LINK_SPEED_10GHZ: 5872 rdp_speed = RDP_PS_10GB; 5873 break; 5874 case LPFC_LINK_SPEED_16GHZ: 5875 rdp_speed = RDP_PS_16GB; 5876 break; 5877 case LPFC_LINK_SPEED_32GHZ: 5878 rdp_speed = RDP_PS_32GB; 5879 break; 5880 case LPFC_LINK_SPEED_64GHZ: 5881 rdp_speed = RDP_PS_64GB; 5882 break; 5883 default: 5884 rdp_speed = RDP_PS_UNKNOWN; 5885 break; 5886 } 5887 5888 desc->info.port_speed.speed = cpu_to_be16(rdp_speed); 5889 5890 if (phba->lmt & LMT_128Gb) 5891 rdp_cap |= RDP_PS_128GB; 5892 if (phba->lmt & LMT_64Gb) 5893 rdp_cap |= RDP_PS_64GB; 5894 if (phba->lmt & LMT_32Gb) 5895 rdp_cap |= RDP_PS_32GB; 5896 if (phba->lmt & LMT_16Gb) 5897 rdp_cap |= RDP_PS_16GB; 5898 if (phba->lmt & LMT_10Gb) 5899 rdp_cap |= RDP_PS_10GB; 5900 if (phba->lmt & LMT_8Gb) 5901 rdp_cap |= RDP_PS_8GB; 5902 if (phba->lmt & LMT_4Gb) 5903 rdp_cap |= RDP_PS_4GB; 5904 if (phba->lmt & LMT_2Gb) 5905 rdp_cap |= RDP_PS_2GB; 5906 if (phba->lmt & LMT_1Gb) 5907 rdp_cap |= RDP_PS_1GB; 5908 5909 if (rdp_cap == 0) 5910 rdp_cap = RDP_CAP_UNKNOWN; 5911 if (phba->cfg_link_speed != LPFC_USER_LINK_SPEED_AUTO) 5912 rdp_cap |= RDP_CAP_USER_CONFIGURED; 5913 5914 desc->info.port_speed.capabilities = cpu_to_be16(rdp_cap); 5915 desc->length = cpu_to_be32(sizeof(desc->info)); 5916 return sizeof(struct fc_rdp_port_speed_desc); 5917 } 5918 5919 static uint32_t 5920 lpfc_rdp_res_diag_port_names(struct fc_rdp_port_name_desc *desc, 5921 struct lpfc_vport *vport) 5922 { 5923 5924 desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG); 5925 5926 memcpy(desc->port_names.wwnn, &vport->fc_nodename, 5927 sizeof(desc->port_names.wwnn)); 5928 5929 memcpy(desc->port_names.wwpn, &vport->fc_portname, 5930 sizeof(desc->port_names.wwpn)); 5931 5932 desc->length = cpu_to_be32(sizeof(desc->port_names)); 5933 return sizeof(struct fc_rdp_port_name_desc); 5934 } 5935 5936 static uint32_t 5937 lpfc_rdp_res_attach_port_names(struct fc_rdp_port_name_desc *desc, 5938 struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 5939 { 5940 5941 desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG); 5942 if (vport->fc_flag & FC_FABRIC) { 5943 memcpy(desc->port_names.wwnn, &vport->fabric_nodename, 5944 sizeof(desc->port_names.wwnn)); 5945 5946 memcpy(desc->port_names.wwpn, &vport->fabric_portname, 5947 sizeof(desc->port_names.wwpn)); 5948 } else { /* Point to Point */ 5949 memcpy(desc->port_names.wwnn, &ndlp->nlp_nodename, 5950 sizeof(desc->port_names.wwnn)); 5951 5952 memcpy(desc->port_names.wwpn, &ndlp->nlp_portname, 5953 sizeof(desc->port_names.wwpn)); 5954 } 5955 5956 desc->length = cpu_to_be32(sizeof(desc->port_names)); 5957 return sizeof(struct fc_rdp_port_name_desc); 5958 } 5959 5960 static void 5961 lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context, 5962 int status) 5963 { 5964 struct lpfc_nodelist *ndlp = rdp_context->ndlp; 5965 struct lpfc_vport *vport = ndlp->vport; 5966 struct lpfc_iocbq *elsiocb; 5967 struct ulp_bde64 *bpl; 5968 IOCB_t *icmd; 5969 uint8_t *pcmd; 5970 struct ls_rjt *stat; 5971 struct fc_rdp_res_frame *rdp_res; 5972 uint32_t cmdsize, len; 5973 uint16_t *flag_ptr; 5974 int rc; 5975 5976 if (status != SUCCESS) 5977 goto error; 5978 5979 /* This will change once we know the true size of the RDP payload */ 5980 cmdsize = sizeof(struct fc_rdp_res_frame); 5981 5982 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, 5983 lpfc_max_els_tries, rdp_context->ndlp, 5984 rdp_context->ndlp->nlp_DID, ELS_CMD_ACC); 5985 if (!elsiocb) 5986 goto free_rdp_context; 5987 5988 icmd = &elsiocb->iocb; 5989 icmd->ulpContext = rdp_context->rx_id; 5990 icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id; 5991 5992 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5993 "2171 Xmit RDP response tag x%x xri x%x, " 5994 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x", 5995 elsiocb->iotag, elsiocb->iocb.ulpContext, 5996 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5997 ndlp->nlp_rpi); 5998 rdp_res = (struct fc_rdp_res_frame *) 5999 (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 6000 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 6001 memset(pcmd, 0, sizeof(struct fc_rdp_res_frame)); 6002 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 6003 6004 /* Update Alarm and Warning */ 6005 flag_ptr = (uint16_t *)(rdp_context->page_a2 + SSF_ALARM_FLAGS); 6006 phba->sfp_alarm |= *flag_ptr; 6007 flag_ptr = (uint16_t *)(rdp_context->page_a2 + SSF_WARNING_FLAGS); 6008 phba->sfp_warning |= *flag_ptr; 6009 6010 /* For RDP payload */ 6011 len = 8; 6012 len += lpfc_rdp_res_link_service((struct fc_rdp_link_service_desc *) 6013 (len + pcmd), ELS_CMD_RDP); 6014 6015 len += lpfc_rdp_res_sfp_desc((struct fc_rdp_sfp_desc *)(len + pcmd), 6016 rdp_context->page_a0, rdp_context->page_a2); 6017 len += lpfc_rdp_res_speed((struct fc_rdp_port_speed_desc *)(len + pcmd), 6018 phba); 6019 len += lpfc_rdp_res_link_error((struct fc_rdp_link_error_status_desc *) 6020 (len + pcmd), &rdp_context->link_stat); 6021 len += lpfc_rdp_res_diag_port_names((struct fc_rdp_port_name_desc *) 6022 (len + pcmd), vport); 6023 len += lpfc_rdp_res_attach_port_names((struct fc_rdp_port_name_desc *) 6024 (len + pcmd), vport, ndlp); 6025 len += lpfc_rdp_res_fec_desc((struct fc_fec_rdp_desc *)(len + pcmd), 6026 &rdp_context->link_stat); 6027 len += lpfc_rdp_res_bbc_desc((struct fc_rdp_bbc_desc *)(len + pcmd), 6028 &rdp_context->link_stat, vport); 6029 len += lpfc_rdp_res_oed_temp_desc(phba, 6030 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 6031 rdp_context->page_a2); 6032 len += lpfc_rdp_res_oed_voltage_desc(phba, 6033 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 6034 rdp_context->page_a2); 6035 len += lpfc_rdp_res_oed_txbias_desc(phba, 6036 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 6037 rdp_context->page_a2); 6038 len += lpfc_rdp_res_oed_txpower_desc(phba, 6039 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 6040 rdp_context->page_a2); 6041 len += lpfc_rdp_res_oed_rxpower_desc(phba, 6042 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 6043 rdp_context->page_a2); 6044 len += lpfc_rdp_res_opd_desc((struct fc_rdp_opd_sfp_desc *)(len + pcmd), 6045 rdp_context->page_a0, vport); 6046 6047 rdp_res->length = cpu_to_be32(len - 8); 6048 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 6049 6050 /* Now that we know the true size of the payload, update the BPL */ 6051 bpl = (struct ulp_bde64 *) 6052 (((struct lpfc_dmabuf *)(elsiocb->context3))->virt); 6053 bpl->tus.f.bdeSize = len; 6054 bpl->tus.f.bdeFlags = 0; 6055 bpl->tus.w = le32_to_cpu(bpl->tus.w); 6056 6057 phba->fc_stat.elsXmitACC++; 6058 elsiocb->context1 = lpfc_nlp_get(ndlp); 6059 if (!elsiocb->context1) { 6060 lpfc_els_free_iocb(phba, elsiocb); 6061 goto free_rdp_context; 6062 } 6063 6064 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 6065 if (rc == IOCB_ERROR) { 6066 lpfc_nlp_put(ndlp); 6067 lpfc_els_free_iocb(phba, elsiocb); 6068 } 6069 6070 goto free_rdp_context; 6071 6072 error: 6073 cmdsize = 2 * sizeof(uint32_t); 6074 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, lpfc_max_els_tries, 6075 ndlp, ndlp->nlp_DID, ELS_CMD_LS_RJT); 6076 if (!elsiocb) 6077 goto free_rdp_context; 6078 6079 icmd = &elsiocb->iocb; 6080 icmd->ulpContext = rdp_context->rx_id; 6081 icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id; 6082 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 6083 6084 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT; 6085 stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t)); 6086 stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 6087 6088 phba->fc_stat.elsXmitLSRJT++; 6089 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 6090 elsiocb->context1 = lpfc_nlp_get(ndlp); 6091 if (!elsiocb->context1) { 6092 lpfc_els_free_iocb(phba, elsiocb); 6093 goto free_rdp_context; 6094 } 6095 6096 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 6097 if (rc == IOCB_ERROR) { 6098 lpfc_nlp_put(ndlp); 6099 lpfc_els_free_iocb(phba, elsiocb); 6100 } 6101 6102 free_rdp_context: 6103 /* This reference put is for the original unsolicited RDP. If the 6104 * iocb prep failed, there is no reference to remove. 6105 */ 6106 lpfc_nlp_put(ndlp); 6107 kfree(rdp_context); 6108 } 6109 6110 static int 6111 lpfc_get_rdp_info(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context) 6112 { 6113 LPFC_MBOXQ_t *mbox = NULL; 6114 int rc; 6115 6116 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6117 if (!mbox) { 6118 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_ELS, 6119 "7105 failed to allocate mailbox memory"); 6120 return 1; 6121 } 6122 6123 if (lpfc_sli4_dump_page_a0(phba, mbox)) 6124 goto prep_mbox_fail; 6125 mbox->vport = rdp_context->ndlp->vport; 6126 mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_page_a0; 6127 mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context; 6128 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 6129 if (rc == MBX_NOT_FINISHED) 6130 goto issue_mbox_fail; 6131 6132 return 0; 6133 6134 prep_mbox_fail: 6135 issue_mbox_fail: 6136 mempool_free(mbox, phba->mbox_mem_pool); 6137 return 1; 6138 } 6139 6140 /* 6141 * lpfc_els_rcv_rdp - Process an unsolicited RDP ELS. 6142 * @vport: pointer to a host virtual N_Port data structure. 6143 * @cmdiocb: pointer to lpfc command iocb data structure. 6144 * @ndlp: pointer to a node-list data structure. 6145 * 6146 * This routine processes an unsolicited RDP(Read Diagnostic Parameters) 6147 * IOCB. First, the payload of the unsolicited RDP is checked. 6148 * Then it will (1) send MBX_DUMP_MEMORY, Embedded DMP_LMSD sub command TYPE-3 6149 * for Page A0, (2) send MBX_DUMP_MEMORY, DMP_LMSD for Page A2, 6150 * (3) send MBX_READ_LNK_STAT to get link stat, (4) Call lpfc_els_rdp_cmpl 6151 * gather all data and send RDP response. 6152 * 6153 * Return code 6154 * 0 - Sent the acc response 6155 * 1 - Sent the reject response. 6156 */ 6157 static int 6158 lpfc_els_rcv_rdp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 6159 struct lpfc_nodelist *ndlp) 6160 { 6161 struct lpfc_hba *phba = vport->phba; 6162 struct lpfc_dmabuf *pcmd; 6163 uint8_t rjt_err, rjt_expl = LSEXP_NOTHING_MORE; 6164 struct fc_rdp_req_frame *rdp_req; 6165 struct lpfc_rdp_context *rdp_context; 6166 IOCB_t *cmd = NULL; 6167 struct ls_rjt stat; 6168 6169 if (phba->sli_rev < LPFC_SLI_REV4 || 6170 bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 6171 LPFC_SLI_INTF_IF_TYPE_2) { 6172 rjt_err = LSRJT_UNABLE_TPC; 6173 rjt_expl = LSEXP_REQ_UNSUPPORTED; 6174 goto error; 6175 } 6176 6177 if (phba->sli_rev < LPFC_SLI_REV4 || (phba->hba_flag & HBA_FCOE_MODE)) { 6178 rjt_err = LSRJT_UNABLE_TPC; 6179 rjt_expl = LSEXP_REQ_UNSUPPORTED; 6180 goto error; 6181 } 6182 6183 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 6184 rdp_req = (struct fc_rdp_req_frame *) pcmd->virt; 6185 6186 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6187 "2422 ELS RDP Request " 6188 "dec len %d tag x%x port_id %d len %d\n", 6189 be32_to_cpu(rdp_req->rdp_des_length), 6190 be32_to_cpu(rdp_req->nport_id_desc.tag), 6191 be32_to_cpu(rdp_req->nport_id_desc.nport_id), 6192 be32_to_cpu(rdp_req->nport_id_desc.length)); 6193 6194 if (sizeof(struct fc_rdp_nport_desc) != 6195 be32_to_cpu(rdp_req->rdp_des_length)) 6196 goto rjt_logerr; 6197 if (RDP_N_PORT_DESC_TAG != be32_to_cpu(rdp_req->nport_id_desc.tag)) 6198 goto rjt_logerr; 6199 if (RDP_NPORT_ID_SIZE != 6200 be32_to_cpu(rdp_req->nport_id_desc.length)) 6201 goto rjt_logerr; 6202 rdp_context = kzalloc(sizeof(struct lpfc_rdp_context), GFP_KERNEL); 6203 if (!rdp_context) { 6204 rjt_err = LSRJT_UNABLE_TPC; 6205 goto error; 6206 } 6207 6208 cmd = &cmdiocb->iocb; 6209 rdp_context->ndlp = lpfc_nlp_get(ndlp); 6210 if (!rdp_context->ndlp) { 6211 kfree(rdp_context); 6212 rjt_err = LSRJT_UNABLE_TPC; 6213 goto error; 6214 } 6215 rdp_context->ox_id = cmd->unsli3.rcvsli3.ox_id; 6216 rdp_context->rx_id = cmd->ulpContext; 6217 rdp_context->cmpl = lpfc_els_rdp_cmpl; 6218 if (lpfc_get_rdp_info(phba, rdp_context)) { 6219 lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_ELS, 6220 "2423 Unable to send mailbox"); 6221 kfree(rdp_context); 6222 rjt_err = LSRJT_UNABLE_TPC; 6223 lpfc_nlp_put(ndlp); 6224 goto error; 6225 } 6226 6227 return 0; 6228 6229 rjt_logerr: 6230 rjt_err = LSRJT_LOGICAL_ERR; 6231 6232 error: 6233 memset(&stat, 0, sizeof(stat)); 6234 stat.un.b.lsRjtRsnCode = rjt_err; 6235 stat.un.b.lsRjtRsnCodeExp = rjt_expl; 6236 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 6237 return 1; 6238 } 6239 6240 6241 static void 6242 lpfc_els_lcb_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 6243 { 6244 MAILBOX_t *mb; 6245 IOCB_t *icmd; 6246 uint8_t *pcmd; 6247 struct lpfc_iocbq *elsiocb; 6248 struct lpfc_nodelist *ndlp; 6249 struct ls_rjt *stat; 6250 union lpfc_sli4_cfg_shdr *shdr; 6251 struct lpfc_lcb_context *lcb_context; 6252 struct fc_lcb_res_frame *lcb_res; 6253 uint32_t cmdsize, shdr_status, shdr_add_status; 6254 int rc; 6255 6256 mb = &pmb->u.mb; 6257 lcb_context = (struct lpfc_lcb_context *)pmb->ctx_ndlp; 6258 ndlp = lcb_context->ndlp; 6259 pmb->ctx_ndlp = NULL; 6260 pmb->ctx_buf = NULL; 6261 6262 shdr = (union lpfc_sli4_cfg_shdr *) 6263 &pmb->u.mqe.un.beacon_config.header.cfg_shdr; 6264 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 6265 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 6266 6267 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX, 6268 "0194 SET_BEACON_CONFIG mailbox " 6269 "completed with status x%x add_status x%x," 6270 " mbx status x%x\n", 6271 shdr_status, shdr_add_status, mb->mbxStatus); 6272 6273 if ((mb->mbxStatus != MBX_SUCCESS) || shdr_status || 6274 (shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE) || 6275 (shdr_add_status == ADD_STATUS_INVALID_REQUEST)) { 6276 mempool_free(pmb, phba->mbox_mem_pool); 6277 goto error; 6278 } 6279 6280 mempool_free(pmb, phba->mbox_mem_pool); 6281 cmdsize = sizeof(struct fc_lcb_res_frame); 6282 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 6283 lpfc_max_els_tries, ndlp, 6284 ndlp->nlp_DID, ELS_CMD_ACC); 6285 6286 /* Decrement the ndlp reference count from previous mbox command */ 6287 lpfc_nlp_put(ndlp); 6288 6289 if (!elsiocb) 6290 goto free_lcb_context; 6291 6292 lcb_res = (struct fc_lcb_res_frame *) 6293 (((struct lpfc_dmabuf *)elsiocb->context2)->virt); 6294 6295 memset(lcb_res, 0, sizeof(struct fc_lcb_res_frame)); 6296 icmd = &elsiocb->iocb; 6297 icmd->ulpContext = lcb_context->rx_id; 6298 icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id; 6299 6300 pcmd = (uint8_t *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt); 6301 *((uint32_t *)(pcmd)) = ELS_CMD_ACC; 6302 lcb_res->lcb_sub_command = lcb_context->sub_command; 6303 lcb_res->lcb_type = lcb_context->type; 6304 lcb_res->capability = lcb_context->capability; 6305 lcb_res->lcb_frequency = lcb_context->frequency; 6306 lcb_res->lcb_duration = lcb_context->duration; 6307 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 6308 phba->fc_stat.elsXmitACC++; 6309 6310 elsiocb->context1 = lpfc_nlp_get(ndlp); 6311 if (!elsiocb->context1) 6312 goto node_err; 6313 6314 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 6315 if (!rc) 6316 goto out; 6317 6318 lpfc_nlp_put(ndlp); 6319 node_err: 6320 lpfc_els_free_iocb(phba, elsiocb); 6321 out: 6322 kfree(lcb_context); 6323 return; 6324 6325 error: 6326 cmdsize = sizeof(struct fc_lcb_res_frame); 6327 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 6328 lpfc_max_els_tries, ndlp, 6329 ndlp->nlp_DID, ELS_CMD_LS_RJT); 6330 lpfc_nlp_put(ndlp); 6331 if (!elsiocb) 6332 goto free_lcb_context; 6333 6334 icmd = &elsiocb->iocb; 6335 icmd->ulpContext = lcb_context->rx_id; 6336 icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id; 6337 pcmd = (uint8_t *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt); 6338 6339 *((uint32_t *)(pcmd)) = ELS_CMD_LS_RJT; 6340 stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t)); 6341 stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 6342 6343 if (shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE) 6344 stat->un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS; 6345 6346 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 6347 phba->fc_stat.elsXmitLSRJT++; 6348 elsiocb->context1 = lpfc_nlp_get(ndlp); 6349 if (!elsiocb->context1) { 6350 lpfc_els_free_iocb(phba, elsiocb); 6351 goto free_lcb_context; 6352 } 6353 6354 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 6355 if (rc == IOCB_ERROR) { 6356 lpfc_nlp_put(ndlp); 6357 lpfc_els_free_iocb(phba, elsiocb); 6358 } 6359 free_lcb_context: 6360 kfree(lcb_context); 6361 } 6362 6363 static int 6364 lpfc_sli4_set_beacon(struct lpfc_vport *vport, 6365 struct lpfc_lcb_context *lcb_context, 6366 uint32_t beacon_state) 6367 { 6368 struct lpfc_hba *phba = vport->phba; 6369 union lpfc_sli4_cfg_shdr *cfg_shdr; 6370 LPFC_MBOXQ_t *mbox = NULL; 6371 uint32_t len; 6372 int rc; 6373 6374 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6375 if (!mbox) 6376 return 1; 6377 6378 cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr; 6379 len = sizeof(struct lpfc_mbx_set_beacon_config) - 6380 sizeof(struct lpfc_sli4_cfg_mhdr); 6381 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 6382 LPFC_MBOX_OPCODE_SET_BEACON_CONFIG, len, 6383 LPFC_SLI4_MBX_EMBED); 6384 mbox->ctx_ndlp = (void *)lcb_context; 6385 mbox->vport = phba->pport; 6386 mbox->mbox_cmpl = lpfc_els_lcb_rsp; 6387 bf_set(lpfc_mbx_set_beacon_port_num, &mbox->u.mqe.un.beacon_config, 6388 phba->sli4_hba.physical_port); 6389 bf_set(lpfc_mbx_set_beacon_state, &mbox->u.mqe.un.beacon_config, 6390 beacon_state); 6391 mbox->u.mqe.un.beacon_config.word5 = 0; /* Reserved */ 6392 6393 /* 6394 * Check bv1s bit before issuing the mailbox 6395 * if bv1s == 1, LCB V1 supported 6396 * else, LCB V0 supported 6397 */ 6398 6399 if (phba->sli4_hba.pc_sli4_params.bv1s) { 6400 /* COMMON_SET_BEACON_CONFIG_V1 */ 6401 cfg_shdr->request.word9 = BEACON_VERSION_V1; 6402 lcb_context->capability |= LCB_CAPABILITY_DURATION; 6403 bf_set(lpfc_mbx_set_beacon_port_type, 6404 &mbox->u.mqe.un.beacon_config, 0); 6405 bf_set(lpfc_mbx_set_beacon_duration_v1, 6406 &mbox->u.mqe.un.beacon_config, 6407 be16_to_cpu(lcb_context->duration)); 6408 } else { 6409 /* COMMON_SET_BEACON_CONFIG_V0 */ 6410 if (be16_to_cpu(lcb_context->duration) != 0) { 6411 mempool_free(mbox, phba->mbox_mem_pool); 6412 return 1; 6413 } 6414 cfg_shdr->request.word9 = BEACON_VERSION_V0; 6415 lcb_context->capability &= ~(LCB_CAPABILITY_DURATION); 6416 bf_set(lpfc_mbx_set_beacon_state, 6417 &mbox->u.mqe.un.beacon_config, beacon_state); 6418 bf_set(lpfc_mbx_set_beacon_port_type, 6419 &mbox->u.mqe.un.beacon_config, 1); 6420 bf_set(lpfc_mbx_set_beacon_duration, 6421 &mbox->u.mqe.un.beacon_config, 6422 be16_to_cpu(lcb_context->duration)); 6423 } 6424 6425 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 6426 if (rc == MBX_NOT_FINISHED) { 6427 mempool_free(mbox, phba->mbox_mem_pool); 6428 return 1; 6429 } 6430 6431 return 0; 6432 } 6433 6434 6435 /** 6436 * lpfc_els_rcv_lcb - Process an unsolicited LCB 6437 * @vport: pointer to a host virtual N_Port data structure. 6438 * @cmdiocb: pointer to lpfc command iocb data structure. 6439 * @ndlp: pointer to a node-list data structure. 6440 * 6441 * This routine processes an unsolicited LCB(LINK CABLE BEACON) IOCB. 6442 * First, the payload of the unsolicited LCB is checked. 6443 * Then based on Subcommand beacon will either turn on or off. 6444 * 6445 * Return code 6446 * 0 - Sent the acc response 6447 * 1 - Sent the reject response. 6448 **/ 6449 static int 6450 lpfc_els_rcv_lcb(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 6451 struct lpfc_nodelist *ndlp) 6452 { 6453 struct lpfc_hba *phba = vport->phba; 6454 struct lpfc_dmabuf *pcmd; 6455 uint8_t *lp; 6456 struct fc_lcb_request_frame *beacon; 6457 struct lpfc_lcb_context *lcb_context; 6458 u8 state, rjt_err = 0; 6459 struct ls_rjt stat; 6460 6461 pcmd = (struct lpfc_dmabuf *)cmdiocb->context2; 6462 lp = (uint8_t *)pcmd->virt; 6463 beacon = (struct fc_lcb_request_frame *)pcmd->virt; 6464 6465 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6466 "0192 ELS LCB Data x%x x%x x%x x%x sub x%x " 6467 "type x%x frequency %x duration x%x\n", 6468 lp[0], lp[1], lp[2], 6469 beacon->lcb_command, 6470 beacon->lcb_sub_command, 6471 beacon->lcb_type, 6472 beacon->lcb_frequency, 6473 be16_to_cpu(beacon->lcb_duration)); 6474 6475 if (beacon->lcb_sub_command != LPFC_LCB_ON && 6476 beacon->lcb_sub_command != LPFC_LCB_OFF) { 6477 rjt_err = LSRJT_CMD_UNSUPPORTED; 6478 goto rjt; 6479 } 6480 6481 if (phba->sli_rev < LPFC_SLI_REV4 || 6482 phba->hba_flag & HBA_FCOE_MODE || 6483 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 6484 LPFC_SLI_INTF_IF_TYPE_2)) { 6485 rjt_err = LSRJT_CMD_UNSUPPORTED; 6486 goto rjt; 6487 } 6488 6489 lcb_context = kmalloc(sizeof(*lcb_context), GFP_KERNEL); 6490 if (!lcb_context) { 6491 rjt_err = LSRJT_UNABLE_TPC; 6492 goto rjt; 6493 } 6494 6495 state = (beacon->lcb_sub_command == LPFC_LCB_ON) ? 1 : 0; 6496 lcb_context->sub_command = beacon->lcb_sub_command; 6497 lcb_context->capability = 0; 6498 lcb_context->type = beacon->lcb_type; 6499 lcb_context->frequency = beacon->lcb_frequency; 6500 lcb_context->duration = beacon->lcb_duration; 6501 lcb_context->ox_id = cmdiocb->iocb.unsli3.rcvsli3.ox_id; 6502 lcb_context->rx_id = cmdiocb->iocb.ulpContext; 6503 lcb_context->ndlp = lpfc_nlp_get(ndlp); 6504 if (!lcb_context->ndlp) { 6505 rjt_err = LSRJT_UNABLE_TPC; 6506 goto rjt_free; 6507 } 6508 6509 if (lpfc_sli4_set_beacon(vport, lcb_context, state)) { 6510 lpfc_printf_vlog(ndlp->vport, KERN_ERR, LOG_TRACE_EVENT, 6511 "0193 failed to send mail box"); 6512 lpfc_nlp_put(ndlp); 6513 rjt_err = LSRJT_UNABLE_TPC; 6514 goto rjt_free; 6515 } 6516 return 0; 6517 6518 rjt_free: 6519 kfree(lcb_context); 6520 rjt: 6521 memset(&stat, 0, sizeof(stat)); 6522 stat.un.b.lsRjtRsnCode = rjt_err; 6523 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 6524 return 1; 6525 } 6526 6527 6528 /** 6529 * lpfc_els_flush_rscn - Clean up any rscn activities with a vport 6530 * @vport: pointer to a host virtual N_Port data structure. 6531 * 6532 * This routine cleans up any Registration State Change Notification 6533 * (RSCN) activity with a @vport. Note that the fc_rscn_flush flag of the 6534 * @vport together with the host_lock is used to prevent multiple thread 6535 * trying to access the RSCN array on a same @vport at the same time. 6536 **/ 6537 void 6538 lpfc_els_flush_rscn(struct lpfc_vport *vport) 6539 { 6540 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6541 struct lpfc_hba *phba = vport->phba; 6542 int i; 6543 6544 spin_lock_irq(shost->host_lock); 6545 if (vport->fc_rscn_flush) { 6546 /* Another thread is walking fc_rscn_id_list on this vport */ 6547 spin_unlock_irq(shost->host_lock); 6548 return; 6549 } 6550 /* Indicate we are walking lpfc_els_flush_rscn on this vport */ 6551 vport->fc_rscn_flush = 1; 6552 spin_unlock_irq(shost->host_lock); 6553 6554 for (i = 0; i < vport->fc_rscn_id_cnt; i++) { 6555 lpfc_in_buf_free(phba, vport->fc_rscn_id_list[i]); 6556 vport->fc_rscn_id_list[i] = NULL; 6557 } 6558 spin_lock_irq(shost->host_lock); 6559 vport->fc_rscn_id_cnt = 0; 6560 vport->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY); 6561 spin_unlock_irq(shost->host_lock); 6562 lpfc_can_disctmo(vport); 6563 /* Indicate we are done walking this fc_rscn_id_list */ 6564 vport->fc_rscn_flush = 0; 6565 } 6566 6567 /** 6568 * lpfc_rscn_payload_check - Check whether there is a pending rscn to a did 6569 * @vport: pointer to a host virtual N_Port data structure. 6570 * @did: remote destination port identifier. 6571 * 6572 * This routine checks whether there is any pending Registration State 6573 * Configuration Notification (RSCN) to a @did on @vport. 6574 * 6575 * Return code 6576 * None zero - The @did matched with a pending rscn 6577 * 0 - not able to match @did with a pending rscn 6578 **/ 6579 int 6580 lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did) 6581 { 6582 D_ID ns_did; 6583 D_ID rscn_did; 6584 uint32_t *lp; 6585 uint32_t payload_len, i; 6586 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6587 6588 ns_did.un.word = did; 6589 6590 /* Never match fabric nodes for RSCNs */ 6591 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) 6592 return 0; 6593 6594 /* If we are doing a FULL RSCN rediscovery, match everything */ 6595 if (vport->fc_flag & FC_RSCN_DISCOVERY) 6596 return did; 6597 6598 spin_lock_irq(shost->host_lock); 6599 if (vport->fc_rscn_flush) { 6600 /* Another thread is walking fc_rscn_id_list on this vport */ 6601 spin_unlock_irq(shost->host_lock); 6602 return 0; 6603 } 6604 /* Indicate we are walking fc_rscn_id_list on this vport */ 6605 vport->fc_rscn_flush = 1; 6606 spin_unlock_irq(shost->host_lock); 6607 for (i = 0; i < vport->fc_rscn_id_cnt; i++) { 6608 lp = vport->fc_rscn_id_list[i]->virt; 6609 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK); 6610 payload_len -= sizeof(uint32_t); /* take off word 0 */ 6611 while (payload_len) { 6612 rscn_did.un.word = be32_to_cpu(*lp++); 6613 payload_len -= sizeof(uint32_t); 6614 switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) { 6615 case RSCN_ADDRESS_FORMAT_PORT: 6616 if ((ns_did.un.b.domain == rscn_did.un.b.domain) 6617 && (ns_did.un.b.area == rscn_did.un.b.area) 6618 && (ns_did.un.b.id == rscn_did.un.b.id)) 6619 goto return_did_out; 6620 break; 6621 case RSCN_ADDRESS_FORMAT_AREA: 6622 if ((ns_did.un.b.domain == rscn_did.un.b.domain) 6623 && (ns_did.un.b.area == rscn_did.un.b.area)) 6624 goto return_did_out; 6625 break; 6626 case RSCN_ADDRESS_FORMAT_DOMAIN: 6627 if (ns_did.un.b.domain == rscn_did.un.b.domain) 6628 goto return_did_out; 6629 break; 6630 case RSCN_ADDRESS_FORMAT_FABRIC: 6631 goto return_did_out; 6632 } 6633 } 6634 } 6635 /* Indicate we are done with walking fc_rscn_id_list on this vport */ 6636 vport->fc_rscn_flush = 0; 6637 return 0; 6638 return_did_out: 6639 /* Indicate we are done with walking fc_rscn_id_list on this vport */ 6640 vport->fc_rscn_flush = 0; 6641 return did; 6642 } 6643 6644 /** 6645 * lpfc_rscn_recovery_check - Send recovery event to vport nodes matching rscn 6646 * @vport: pointer to a host virtual N_Port data structure. 6647 * 6648 * This routine sends recovery (NLP_EVT_DEVICE_RECOVERY) event to the 6649 * state machine for a @vport's nodes that are with pending RSCN (Registration 6650 * State Change Notification). 6651 * 6652 * Return code 6653 * 0 - Successful (currently alway return 0) 6654 **/ 6655 static int 6656 lpfc_rscn_recovery_check(struct lpfc_vport *vport) 6657 { 6658 struct lpfc_nodelist *ndlp = NULL; 6659 6660 /* Move all affected nodes by pending RSCNs to NPR state. */ 6661 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 6662 if ((ndlp->nlp_state == NLP_STE_UNUSED_NODE) || 6663 !lpfc_rscn_payload_check(vport, ndlp->nlp_DID)) 6664 continue; 6665 6666 /* NVME Target mode does not do RSCN Recovery. */ 6667 if (vport->phba->nvmet_support) 6668 continue; 6669 6670 /* If we are in the process of doing discovery on this 6671 * NPort, let it continue on its own. 6672 */ 6673 switch (ndlp->nlp_state) { 6674 case NLP_STE_PLOGI_ISSUE: 6675 case NLP_STE_ADISC_ISSUE: 6676 case NLP_STE_REG_LOGIN_ISSUE: 6677 case NLP_STE_PRLI_ISSUE: 6678 case NLP_STE_LOGO_ISSUE: 6679 continue; 6680 } 6681 6682 /* Check to see if we need to NVME rescan this target 6683 * remoteport. 6684 */ 6685 if (ndlp->nlp_fc4_type & NLP_FC4_NVME && 6686 ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_DISCOVERY)) 6687 lpfc_nvme_rescan_port(vport, ndlp); 6688 6689 lpfc_disc_state_machine(vport, ndlp, NULL, 6690 NLP_EVT_DEVICE_RECOVERY); 6691 lpfc_cancel_retry_delay_tmo(vport, ndlp); 6692 } 6693 return 0; 6694 } 6695 6696 /** 6697 * lpfc_send_rscn_event - Send an RSCN event to management application 6698 * @vport: pointer to a host virtual N_Port data structure. 6699 * @cmdiocb: pointer to lpfc command iocb data structure. 6700 * 6701 * lpfc_send_rscn_event sends an RSCN netlink event to management 6702 * applications. 6703 */ 6704 static void 6705 lpfc_send_rscn_event(struct lpfc_vport *vport, 6706 struct lpfc_iocbq *cmdiocb) 6707 { 6708 struct lpfc_dmabuf *pcmd; 6709 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6710 uint32_t *payload_ptr; 6711 uint32_t payload_len; 6712 struct lpfc_rscn_event_header *rscn_event_data; 6713 6714 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 6715 payload_ptr = (uint32_t *) pcmd->virt; 6716 payload_len = be32_to_cpu(*payload_ptr & ~ELS_CMD_MASK); 6717 6718 rscn_event_data = kmalloc(sizeof(struct lpfc_rscn_event_header) + 6719 payload_len, GFP_KERNEL); 6720 if (!rscn_event_data) { 6721 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 6722 "0147 Failed to allocate memory for RSCN event\n"); 6723 return; 6724 } 6725 rscn_event_data->event_type = FC_REG_RSCN_EVENT; 6726 rscn_event_data->payload_length = payload_len; 6727 memcpy(rscn_event_data->rscn_payload, payload_ptr, 6728 payload_len); 6729 6730 fc_host_post_vendor_event(shost, 6731 fc_get_event_number(), 6732 sizeof(struct lpfc_rscn_event_header) + payload_len, 6733 (char *)rscn_event_data, 6734 LPFC_NL_VENDOR_ID); 6735 6736 kfree(rscn_event_data); 6737 } 6738 6739 /** 6740 * lpfc_els_rcv_rscn - Process an unsolicited rscn iocb 6741 * @vport: pointer to a host virtual N_Port data structure. 6742 * @cmdiocb: pointer to lpfc command iocb data structure. 6743 * @ndlp: pointer to a node-list data structure. 6744 * 6745 * This routine processes an unsolicited RSCN (Registration State Change 6746 * Notification) IOCB. First, the payload of the unsolicited RSCN is walked 6747 * to invoke fc_host_post_event() routine to the FC transport layer. If the 6748 * discover state machine is about to begin discovery, it just accepts the 6749 * RSCN and the discovery process will satisfy the RSCN. If this RSCN only 6750 * contains N_Port IDs for other vports on this HBA, it just accepts the 6751 * RSCN and ignore processing it. If the state machine is in the recovery 6752 * state, the fc_rscn_id_list of this @vport is walked and the 6753 * lpfc_rscn_recovery_check() routine is invoked to send recovery event for 6754 * all nodes that match RSCN payload. Otherwise, the lpfc_els_handle_rscn() 6755 * routine is invoked to handle the RSCN event. 6756 * 6757 * Return code 6758 * 0 - Just sent the acc response 6759 * 1 - Sent the acc response and waited for name server completion 6760 **/ 6761 static int 6762 lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 6763 struct lpfc_nodelist *ndlp) 6764 { 6765 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6766 struct lpfc_hba *phba = vport->phba; 6767 struct lpfc_dmabuf *pcmd; 6768 uint32_t *lp, *datap; 6769 uint32_t payload_len, length, nportid, *cmd; 6770 int rscn_cnt; 6771 int rscn_id = 0, hba_id = 0; 6772 int i, tmo; 6773 6774 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 6775 lp = (uint32_t *) pcmd->virt; 6776 6777 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK); 6778 payload_len -= sizeof(uint32_t); /* take off word 0 */ 6779 /* RSCN received */ 6780 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 6781 "0214 RSCN received Data: x%x x%x x%x x%x\n", 6782 vport->fc_flag, payload_len, *lp, 6783 vport->fc_rscn_id_cnt); 6784 6785 /* Send an RSCN event to the management application */ 6786 lpfc_send_rscn_event(vport, cmdiocb); 6787 6788 for (i = 0; i < payload_len/sizeof(uint32_t); i++) 6789 fc_host_post_event(shost, fc_get_event_number(), 6790 FCH_EVT_RSCN, lp[i]); 6791 6792 /* Check if RSCN is coming from a direct-connected remote NPort */ 6793 if (vport->fc_flag & FC_PT2PT) { 6794 /* If so, just ACC it, no other action needed for now */ 6795 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6796 "2024 pt2pt RSCN %08x Data: x%x x%x\n", 6797 *lp, vport->fc_flag, payload_len); 6798 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 6799 6800 /* Check to see if we need to NVME rescan this target 6801 * remoteport. 6802 */ 6803 if (ndlp->nlp_fc4_type & NLP_FC4_NVME && 6804 ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_DISCOVERY)) 6805 lpfc_nvme_rescan_port(vport, ndlp); 6806 return 0; 6807 } 6808 6809 /* If we are about to begin discovery, just ACC the RSCN. 6810 * Discovery processing will satisfy it. 6811 */ 6812 if (vport->port_state <= LPFC_NS_QRY) { 6813 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6814 "RCV RSCN ignore: did:x%x/ste:x%x flg:x%x", 6815 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); 6816 6817 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 6818 return 0; 6819 } 6820 6821 /* If this RSCN just contains NPortIDs for other vports on this HBA, 6822 * just ACC and ignore it. 6823 */ 6824 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 6825 !(vport->cfg_peer_port_login)) { 6826 i = payload_len; 6827 datap = lp; 6828 while (i > 0) { 6829 nportid = *datap++; 6830 nportid = ((be32_to_cpu(nportid)) & Mask_DID); 6831 i -= sizeof(uint32_t); 6832 rscn_id++; 6833 if (lpfc_find_vport_by_did(phba, nportid)) 6834 hba_id++; 6835 } 6836 if (rscn_id == hba_id) { 6837 /* ALL NPortIDs in RSCN are on HBA */ 6838 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 6839 "0219 Ignore RSCN " 6840 "Data: x%x x%x x%x x%x\n", 6841 vport->fc_flag, payload_len, 6842 *lp, vport->fc_rscn_id_cnt); 6843 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6844 "RCV RSCN vport: did:x%x/ste:x%x flg:x%x", 6845 ndlp->nlp_DID, vport->port_state, 6846 ndlp->nlp_flag); 6847 6848 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, 6849 ndlp, NULL); 6850 return 0; 6851 } 6852 } 6853 6854 spin_lock_irq(shost->host_lock); 6855 if (vport->fc_rscn_flush) { 6856 /* Another thread is walking fc_rscn_id_list on this vport */ 6857 vport->fc_flag |= FC_RSCN_DISCOVERY; 6858 spin_unlock_irq(shost->host_lock); 6859 /* Send back ACC */ 6860 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 6861 return 0; 6862 } 6863 /* Indicate we are walking fc_rscn_id_list on this vport */ 6864 vport->fc_rscn_flush = 1; 6865 spin_unlock_irq(shost->host_lock); 6866 /* Get the array count after successfully have the token */ 6867 rscn_cnt = vport->fc_rscn_id_cnt; 6868 /* If we are already processing an RSCN, save the received 6869 * RSCN payload buffer, cmdiocb->context2 to process later. 6870 */ 6871 if (vport->fc_flag & (FC_RSCN_MODE | FC_NDISC_ACTIVE)) { 6872 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6873 "RCV RSCN defer: did:x%x/ste:x%x flg:x%x", 6874 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); 6875 6876 spin_lock_irq(shost->host_lock); 6877 vport->fc_flag |= FC_RSCN_DEFERRED; 6878 6879 /* Restart disctmo if its already running */ 6880 if (vport->fc_flag & FC_DISC_TMO) { 6881 tmo = ((phba->fc_ratov * 3) + 3); 6882 mod_timer(&vport->fc_disctmo, 6883 jiffies + msecs_to_jiffies(1000 * tmo)); 6884 } 6885 if ((rscn_cnt < FC_MAX_HOLD_RSCN) && 6886 !(vport->fc_flag & FC_RSCN_DISCOVERY)) { 6887 vport->fc_flag |= FC_RSCN_MODE; 6888 spin_unlock_irq(shost->host_lock); 6889 if (rscn_cnt) { 6890 cmd = vport->fc_rscn_id_list[rscn_cnt-1]->virt; 6891 length = be32_to_cpu(*cmd & ~ELS_CMD_MASK); 6892 } 6893 if ((rscn_cnt) && 6894 (payload_len + length <= LPFC_BPL_SIZE)) { 6895 *cmd &= ELS_CMD_MASK; 6896 *cmd |= cpu_to_be32(payload_len + length); 6897 memcpy(((uint8_t *)cmd) + length, lp, 6898 payload_len); 6899 } else { 6900 vport->fc_rscn_id_list[rscn_cnt] = pcmd; 6901 vport->fc_rscn_id_cnt++; 6902 /* If we zero, cmdiocb->context2, the calling 6903 * routine will not try to free it. 6904 */ 6905 cmdiocb->context2 = NULL; 6906 } 6907 /* Deferred RSCN */ 6908 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 6909 "0235 Deferred RSCN " 6910 "Data: x%x x%x x%x\n", 6911 vport->fc_rscn_id_cnt, vport->fc_flag, 6912 vport->port_state); 6913 } else { 6914 vport->fc_flag |= FC_RSCN_DISCOVERY; 6915 spin_unlock_irq(shost->host_lock); 6916 /* ReDiscovery RSCN */ 6917 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 6918 "0234 ReDiscovery RSCN " 6919 "Data: x%x x%x x%x\n", 6920 vport->fc_rscn_id_cnt, vport->fc_flag, 6921 vport->port_state); 6922 } 6923 /* Indicate we are done walking fc_rscn_id_list on this vport */ 6924 vport->fc_rscn_flush = 0; 6925 /* Send back ACC */ 6926 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 6927 /* send RECOVERY event for ALL nodes that match RSCN payload */ 6928 lpfc_rscn_recovery_check(vport); 6929 return 0; 6930 } 6931 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6932 "RCV RSCN: did:x%x/ste:x%x flg:x%x", 6933 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); 6934 6935 spin_lock_irq(shost->host_lock); 6936 vport->fc_flag |= FC_RSCN_MODE; 6937 spin_unlock_irq(shost->host_lock); 6938 vport->fc_rscn_id_list[vport->fc_rscn_id_cnt++] = pcmd; 6939 /* Indicate we are done walking fc_rscn_id_list on this vport */ 6940 vport->fc_rscn_flush = 0; 6941 /* 6942 * If we zero, cmdiocb->context2, the calling routine will 6943 * not try to free it. 6944 */ 6945 cmdiocb->context2 = NULL; 6946 lpfc_set_disctmo(vport); 6947 /* Send back ACC */ 6948 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 6949 /* send RECOVERY event for ALL nodes that match RSCN payload */ 6950 lpfc_rscn_recovery_check(vport); 6951 return lpfc_els_handle_rscn(vport); 6952 } 6953 6954 /** 6955 * lpfc_els_handle_rscn - Handle rscn for a vport 6956 * @vport: pointer to a host virtual N_Port data structure. 6957 * 6958 * This routine handles the Registration State Configuration Notification 6959 * (RSCN) for a @vport. If login to NameServer does not exist, a new ndlp shall 6960 * be created and a Port Login (PLOGI) to the NameServer is issued. Otherwise, 6961 * if the ndlp to NameServer exists, a Common Transport (CT) command to the 6962 * NameServer shall be issued. If CT command to the NameServer fails to be 6963 * issued, the lpfc_els_flush_rscn() routine shall be invoked to clean up any 6964 * RSCN activities with the @vport. 6965 * 6966 * Return code 6967 * 0 - Cleaned up rscn on the @vport 6968 * 1 - Wait for plogi to name server before proceed 6969 **/ 6970 int 6971 lpfc_els_handle_rscn(struct lpfc_vport *vport) 6972 { 6973 struct lpfc_nodelist *ndlp; 6974 struct lpfc_hba *phba = vport->phba; 6975 6976 /* Ignore RSCN if the port is being torn down. */ 6977 if (vport->load_flag & FC_UNLOADING) { 6978 lpfc_els_flush_rscn(vport); 6979 return 0; 6980 } 6981 6982 /* Start timer for RSCN processing */ 6983 lpfc_set_disctmo(vport); 6984 6985 /* RSCN processed */ 6986 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 6987 "0215 RSCN processed Data: x%x x%x x%x x%x x%x x%x\n", 6988 vport->fc_flag, 0, vport->fc_rscn_id_cnt, 6989 vport->port_state, vport->num_disc_nodes, 6990 vport->gidft_inp); 6991 6992 /* To process RSCN, first compare RSCN data with NameServer */ 6993 vport->fc_ns_retry = 0; 6994 vport->num_disc_nodes = 0; 6995 6996 ndlp = lpfc_findnode_did(vport, NameServer_DID); 6997 if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { 6998 /* Good ndlp, issue CT Request to NameServer. Need to 6999 * know how many gidfts were issued. If none, then just 7000 * flush the RSCN. Otherwise, the outstanding requests 7001 * need to complete. 7002 */ 7003 if (phba->cfg_ns_query == LPFC_NS_QUERY_GID_FT) { 7004 if (lpfc_issue_gidft(vport) > 0) 7005 return 1; 7006 } else if (phba->cfg_ns_query == LPFC_NS_QUERY_GID_PT) { 7007 if (lpfc_issue_gidpt(vport) > 0) 7008 return 1; 7009 } else { 7010 return 1; 7011 } 7012 } else { 7013 /* Nameserver login in question. Revalidate. */ 7014 if (ndlp) { 7015 ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE; 7016 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 7017 } else { 7018 ndlp = lpfc_nlp_init(vport, NameServer_DID); 7019 if (!ndlp) { 7020 lpfc_els_flush_rscn(vport); 7021 return 0; 7022 } 7023 ndlp->nlp_prev_state = ndlp->nlp_state; 7024 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 7025 } 7026 ndlp->nlp_type |= NLP_FABRIC; 7027 lpfc_issue_els_plogi(vport, NameServer_DID, 0); 7028 /* Wait for NameServer login cmpl before we can 7029 * continue 7030 */ 7031 return 1; 7032 } 7033 7034 lpfc_els_flush_rscn(vport); 7035 return 0; 7036 } 7037 7038 /** 7039 * lpfc_els_rcv_flogi - Process an unsolicited flogi iocb 7040 * @vport: pointer to a host virtual N_Port data structure. 7041 * @cmdiocb: pointer to lpfc command iocb data structure. 7042 * @ndlp: pointer to a node-list data structure. 7043 * 7044 * This routine processes Fabric Login (FLOGI) IOCB received as an ELS 7045 * unsolicited event. An unsolicited FLOGI can be received in a point-to- 7046 * point topology. As an unsolicited FLOGI should not be received in a loop 7047 * mode, any unsolicited FLOGI received in loop mode shall be ignored. The 7048 * lpfc_check_sparm() routine is invoked to check the parameters in the 7049 * unsolicited FLOGI. If parameters validation failed, the routine 7050 * lpfc_els_rsp_reject() shall be called with reject reason code set to 7051 * LSEXP_SPARM_OPTIONS to reject the FLOGI. Otherwise, the Port WWN in the 7052 * FLOGI shall be compared with the Port WWN of the @vport to determine who 7053 * will initiate PLOGI. The higher lexicographical value party shall has 7054 * higher priority (as the winning port) and will initiate PLOGI and 7055 * communicate Port_IDs (Addresses) for both nodes in PLOGI. The result 7056 * of this will be marked in the @vport fc_flag field with FC_PT2PT_PLOGI 7057 * and then the lpfc_els_rsp_acc() routine is invoked to accept the FLOGI. 7058 * 7059 * Return code 7060 * 0 - Successfully processed the unsolicited flogi 7061 * 1 - Failed to process the unsolicited flogi 7062 **/ 7063 static int 7064 lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7065 struct lpfc_nodelist *ndlp) 7066 { 7067 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7068 struct lpfc_hba *phba = vport->phba; 7069 struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 7070 uint32_t *lp = (uint32_t *) pcmd->virt; 7071 IOCB_t *icmd = &cmdiocb->iocb; 7072 struct serv_parm *sp; 7073 LPFC_MBOXQ_t *mbox; 7074 uint32_t cmd, did; 7075 int rc; 7076 uint32_t fc_flag = 0; 7077 uint32_t port_state = 0; 7078 7079 cmd = *lp++; 7080 sp = (struct serv_parm *) lp; 7081 7082 /* FLOGI received */ 7083 7084 lpfc_set_disctmo(vport); 7085 7086 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 7087 /* We should never receive a FLOGI in loop mode, ignore it */ 7088 did = icmd->un.elsreq64.remoteID; 7089 7090 /* An FLOGI ELS command <elsCmd> was received from DID <did> in 7091 Loop Mode */ 7092 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 7093 "0113 An FLOGI ELS command x%x was " 7094 "received from DID x%x in Loop Mode\n", 7095 cmd, did); 7096 return 1; 7097 } 7098 7099 (void) lpfc_check_sparm(vport, ndlp, sp, CLASS3, 1); 7100 7101 /* 7102 * If our portname is greater than the remote portname, 7103 * then we initiate Nport login. 7104 */ 7105 7106 rc = memcmp(&vport->fc_portname, &sp->portName, 7107 sizeof(struct lpfc_name)); 7108 7109 if (!rc) { 7110 if (phba->sli_rev < LPFC_SLI_REV4) { 7111 mbox = mempool_alloc(phba->mbox_mem_pool, 7112 GFP_KERNEL); 7113 if (!mbox) 7114 return 1; 7115 lpfc_linkdown(phba); 7116 lpfc_init_link(phba, mbox, 7117 phba->cfg_topology, 7118 phba->cfg_link_speed); 7119 mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0; 7120 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 7121 mbox->vport = vport; 7122 rc = lpfc_sli_issue_mbox(phba, mbox, 7123 MBX_NOWAIT); 7124 lpfc_set_loopback_flag(phba); 7125 if (rc == MBX_NOT_FINISHED) 7126 mempool_free(mbox, phba->mbox_mem_pool); 7127 return 1; 7128 } 7129 7130 /* abort the flogi coming back to ourselves 7131 * due to external loopback on the port. 7132 */ 7133 lpfc_els_abort_flogi(phba); 7134 return 0; 7135 7136 } else if (rc > 0) { /* greater than */ 7137 spin_lock_irq(shost->host_lock); 7138 vport->fc_flag |= FC_PT2PT_PLOGI; 7139 spin_unlock_irq(shost->host_lock); 7140 7141 /* If we have the high WWPN we can assign our own 7142 * myDID; otherwise, we have to WAIT for a PLOGI 7143 * from the remote NPort to find out what it 7144 * will be. 7145 */ 7146 vport->fc_myDID = PT2PT_LocalID; 7147 } else { 7148 vport->fc_myDID = PT2PT_RemoteID; 7149 } 7150 7151 /* 7152 * The vport state should go to LPFC_FLOGI only 7153 * AFTER we issue a FLOGI, not receive one. 7154 */ 7155 spin_lock_irq(shost->host_lock); 7156 fc_flag = vport->fc_flag; 7157 port_state = vport->port_state; 7158 vport->fc_flag |= FC_PT2PT; 7159 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 7160 7161 /* Acking an unsol FLOGI. Count 1 for link bounce 7162 * work-around. 7163 */ 7164 vport->rcv_flogi_cnt++; 7165 spin_unlock_irq(shost->host_lock); 7166 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7167 "3311 Rcv Flogi PS x%x new PS x%x " 7168 "fc_flag x%x new fc_flag x%x\n", 7169 port_state, vport->port_state, 7170 fc_flag, vport->fc_flag); 7171 7172 /* 7173 * We temporarily set fc_myDID to make it look like we are 7174 * a Fabric. This is done just so we end up with the right 7175 * did / sid on the FLOGI ACC rsp. 7176 */ 7177 did = vport->fc_myDID; 7178 vport->fc_myDID = Fabric_DID; 7179 7180 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm)); 7181 7182 /* Defer ACC response until AFTER we issue a FLOGI */ 7183 if (!(phba->hba_flag & HBA_FLOGI_ISSUED)) { 7184 phba->defer_flogi_acc_rx_id = cmdiocb->iocb.ulpContext; 7185 phba->defer_flogi_acc_ox_id = 7186 cmdiocb->iocb.unsli3.rcvsli3.ox_id; 7187 7188 vport->fc_myDID = did; 7189 7190 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7191 "3344 Deferring FLOGI ACC: rx_id: x%x," 7192 " ox_id: x%x, hba_flag x%x\n", 7193 phba->defer_flogi_acc_rx_id, 7194 phba->defer_flogi_acc_ox_id, phba->hba_flag); 7195 7196 phba->defer_flogi_acc_flag = true; 7197 7198 return 0; 7199 } 7200 7201 /* Send back ACC */ 7202 lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, cmdiocb, ndlp, NULL); 7203 7204 /* Now lets put fc_myDID back to what its supposed to be */ 7205 vport->fc_myDID = did; 7206 7207 return 0; 7208 } 7209 7210 /** 7211 * lpfc_els_rcv_rnid - Process an unsolicited rnid iocb 7212 * @vport: pointer to a host virtual N_Port data structure. 7213 * @cmdiocb: pointer to lpfc command iocb data structure. 7214 * @ndlp: pointer to a node-list data structure. 7215 * 7216 * This routine processes Request Node Identification Data (RNID) IOCB 7217 * received as an ELS unsolicited event. Only when the RNID specified format 7218 * 0x0 or 0xDF (Topology Discovery Specific Node Identification Data) 7219 * present, this routine will invoke the lpfc_els_rsp_rnid_acc() routine to 7220 * Accept (ACC) the RNID ELS command. All the other RNID formats are 7221 * rejected by invoking the lpfc_els_rsp_reject() routine. 7222 * 7223 * Return code 7224 * 0 - Successfully processed rnid iocb (currently always return 0) 7225 **/ 7226 static int 7227 lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7228 struct lpfc_nodelist *ndlp) 7229 { 7230 struct lpfc_dmabuf *pcmd; 7231 uint32_t *lp; 7232 RNID *rn; 7233 struct ls_rjt stat; 7234 7235 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 7236 lp = (uint32_t *) pcmd->virt; 7237 7238 lp++; 7239 rn = (RNID *) lp; 7240 7241 /* RNID received */ 7242 7243 switch (rn->Format) { 7244 case 0: 7245 case RNID_TOPOLOGY_DISC: 7246 /* Send back ACC */ 7247 lpfc_els_rsp_rnid_acc(vport, rn->Format, cmdiocb, ndlp); 7248 break; 7249 default: 7250 /* Reject this request because format not supported */ 7251 stat.un.b.lsRjtRsvd0 = 0; 7252 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 7253 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 7254 stat.un.b.vendorUnique = 0; 7255 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, 7256 NULL); 7257 } 7258 return 0; 7259 } 7260 7261 /** 7262 * lpfc_els_rcv_echo - Process an unsolicited echo iocb 7263 * @vport: pointer to a host virtual N_Port data structure. 7264 * @cmdiocb: pointer to lpfc command iocb data structure. 7265 * @ndlp: pointer to a node-list data structure. 7266 * 7267 * Return code 7268 * 0 - Successfully processed echo iocb (currently always return 0) 7269 **/ 7270 static int 7271 lpfc_els_rcv_echo(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7272 struct lpfc_nodelist *ndlp) 7273 { 7274 uint8_t *pcmd; 7275 7276 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt); 7277 7278 /* skip over first word of echo command to find echo data */ 7279 pcmd += sizeof(uint32_t); 7280 7281 lpfc_els_rsp_echo_acc(vport, pcmd, cmdiocb, ndlp); 7282 return 0; 7283 } 7284 7285 /** 7286 * lpfc_els_rcv_lirr - Process an unsolicited lirr iocb 7287 * @vport: pointer to a host virtual N_Port data structure. 7288 * @cmdiocb: pointer to lpfc command iocb data structure. 7289 * @ndlp: pointer to a node-list data structure. 7290 * 7291 * This routine processes a Link Incident Report Registration(LIRR) IOCB 7292 * received as an ELS unsolicited event. Currently, this function just invokes 7293 * the lpfc_els_rsp_reject() routine to reject the LIRR IOCB unconditionally. 7294 * 7295 * Return code 7296 * 0 - Successfully processed lirr iocb (currently always return 0) 7297 **/ 7298 static int 7299 lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7300 struct lpfc_nodelist *ndlp) 7301 { 7302 struct ls_rjt stat; 7303 7304 /* For now, unconditionally reject this command */ 7305 stat.un.b.lsRjtRsvd0 = 0; 7306 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 7307 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 7308 stat.un.b.vendorUnique = 0; 7309 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 7310 return 0; 7311 } 7312 7313 /** 7314 * lpfc_els_rcv_rrq - Process an unsolicited rrq iocb 7315 * @vport: pointer to a host virtual N_Port data structure. 7316 * @cmdiocb: pointer to lpfc command iocb data structure. 7317 * @ndlp: pointer to a node-list data structure. 7318 * 7319 * This routine processes a Reinstate Recovery Qualifier (RRQ) IOCB 7320 * received as an ELS unsolicited event. A request to RRQ shall only 7321 * be accepted if the Originator Nx_Port N_Port_ID or the Responder 7322 * Nx_Port N_Port_ID of the target Exchange is the same as the 7323 * N_Port_ID of the Nx_Port that makes the request. If the RRQ is 7324 * not accepted, an LS_RJT with reason code "Unable to perform 7325 * command request" and reason code explanation "Invalid Originator 7326 * S_ID" shall be returned. For now, we just unconditionally accept 7327 * RRQ from the target. 7328 **/ 7329 static void 7330 lpfc_els_rcv_rrq(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7331 struct lpfc_nodelist *ndlp) 7332 { 7333 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 7334 if (vport->phba->sli_rev == LPFC_SLI_REV4) 7335 lpfc_els_clear_rrq(vport, cmdiocb, ndlp); 7336 } 7337 7338 /** 7339 * lpfc_els_rsp_rls_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd 7340 * @phba: pointer to lpfc hba data structure. 7341 * @pmb: pointer to the driver internal queue element for mailbox command. 7342 * 7343 * This routine is the completion callback function for the MBX_READ_LNK_STAT 7344 * mailbox command. This callback function is to actually send the Accept 7345 * (ACC) response to a Read Port Status (RPS) unsolicited IOCB event. It 7346 * collects the link statistics from the completion of the MBX_READ_LNK_STAT 7347 * mailbox command, constructs the RPS response with the link statistics 7348 * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC 7349 * response to the RPS. 7350 * 7351 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 7352 * will be incremented by 1 for holding the ndlp and the reference to ndlp 7353 * will be stored into the context1 field of the IOCB for the completion 7354 * callback function to the RPS Accept Response ELS IOCB command. 7355 * 7356 **/ 7357 static void 7358 lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 7359 { 7360 int rc = 0; 7361 MAILBOX_t *mb; 7362 IOCB_t *icmd; 7363 struct RLS_RSP *rls_rsp; 7364 uint8_t *pcmd; 7365 struct lpfc_iocbq *elsiocb; 7366 struct lpfc_nodelist *ndlp; 7367 uint16_t oxid; 7368 uint16_t rxid; 7369 uint32_t cmdsize; 7370 7371 mb = &pmb->u.mb; 7372 7373 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; 7374 rxid = (uint16_t)((unsigned long)(pmb->ctx_buf) & 0xffff); 7375 oxid = (uint16_t)(((unsigned long)(pmb->ctx_buf) >> 16) & 0xffff); 7376 pmb->ctx_buf = NULL; 7377 pmb->ctx_ndlp = NULL; 7378 7379 if (mb->mbxStatus) { 7380 mempool_free(pmb, phba->mbox_mem_pool); 7381 return; 7382 } 7383 7384 cmdsize = sizeof(struct RLS_RSP) + sizeof(uint32_t); 7385 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 7386 lpfc_max_els_tries, ndlp, 7387 ndlp->nlp_DID, ELS_CMD_ACC); 7388 7389 /* Decrement the ndlp reference count from previous mbox command */ 7390 lpfc_nlp_put(ndlp); 7391 7392 if (!elsiocb) { 7393 mempool_free(pmb, phba->mbox_mem_pool); 7394 return; 7395 } 7396 7397 icmd = &elsiocb->iocb; 7398 icmd->ulpContext = rxid; 7399 icmd->unsli3.rcvsli3.ox_id = oxid; 7400 7401 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 7402 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 7403 pcmd += sizeof(uint32_t); /* Skip past command */ 7404 rls_rsp = (struct RLS_RSP *)pcmd; 7405 7406 rls_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt); 7407 rls_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt); 7408 rls_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt); 7409 rls_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt); 7410 rls_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord); 7411 rls_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt); 7412 mempool_free(pmb, phba->mbox_mem_pool); 7413 /* Xmit ELS RLS ACC response tag <ulpIoTag> */ 7414 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS, 7415 "2874 Xmit ELS RLS ACC response tag x%x xri x%x, " 7416 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", 7417 elsiocb->iotag, elsiocb->iocb.ulpContext, 7418 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 7419 ndlp->nlp_rpi); 7420 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 7421 phba->fc_stat.elsXmitACC++; 7422 elsiocb->context1 = lpfc_nlp_get(ndlp); 7423 if (!elsiocb->context1) 7424 goto node_err; 7425 7426 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 7427 if (rc == IOCB_ERROR) 7428 goto io_err; 7429 return; 7430 7431 io_err: 7432 lpfc_nlp_put(ndlp); 7433 node_err: 7434 lpfc_els_free_iocb(phba, elsiocb); 7435 } 7436 7437 /** 7438 * lpfc_els_rcv_rls - Process an unsolicited rls iocb 7439 * @vport: pointer to a host virtual N_Port data structure. 7440 * @cmdiocb: pointer to lpfc command iocb data structure. 7441 * @ndlp: pointer to a node-list data structure. 7442 * 7443 * This routine processes Read Link Status (RLS) IOCB received as an 7444 * ELS unsolicited event. It first checks the remote port state. If the 7445 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE 7446 * state, it invokes the lpfc_els_rsl_reject() routine to send the reject 7447 * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command 7448 * for reading the HBA link statistics. It is for the callback function, 7449 * lpfc_els_rsp_rls_acc(), set to the MBX_READ_LNK_STAT mailbox command 7450 * to actually sending out RPL Accept (ACC) response. 7451 * 7452 * Return codes 7453 * 0 - Successfully processed rls iocb (currently always return 0) 7454 **/ 7455 static int 7456 lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7457 struct lpfc_nodelist *ndlp) 7458 { 7459 struct lpfc_hba *phba = vport->phba; 7460 LPFC_MBOXQ_t *mbox; 7461 struct ls_rjt stat; 7462 7463 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 7464 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) 7465 /* reject the unsolicited RLS request and done with it */ 7466 goto reject_out; 7467 7468 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC); 7469 if (mbox) { 7470 lpfc_read_lnk_stat(phba, mbox); 7471 mbox->ctx_buf = (void *)((unsigned long) 7472 ((cmdiocb->iocb.unsli3.rcvsli3.ox_id << 16) | 7473 cmdiocb->iocb.ulpContext)); /* rx_id */ 7474 mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 7475 if (!mbox->ctx_ndlp) 7476 goto node_err; 7477 mbox->vport = vport; 7478 mbox->mbox_cmpl = lpfc_els_rsp_rls_acc; 7479 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 7480 != MBX_NOT_FINISHED) 7481 /* Mbox completion will send ELS Response */ 7482 return 0; 7483 /* Decrement reference count used for the failed mbox 7484 * command. 7485 */ 7486 lpfc_nlp_put(ndlp); 7487 node_err: 7488 mempool_free(mbox, phba->mbox_mem_pool); 7489 } 7490 reject_out: 7491 /* issue rejection response */ 7492 stat.un.b.lsRjtRsvd0 = 0; 7493 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 7494 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 7495 stat.un.b.vendorUnique = 0; 7496 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 7497 return 0; 7498 } 7499 7500 /** 7501 * lpfc_els_rcv_rtv - Process an unsolicited rtv iocb 7502 * @vport: pointer to a host virtual N_Port data structure. 7503 * @cmdiocb: pointer to lpfc command iocb data structure. 7504 * @ndlp: pointer to a node-list data structure. 7505 * 7506 * This routine processes Read Timout Value (RTV) IOCB received as an 7507 * ELS unsolicited event. It first checks the remote port state. If the 7508 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE 7509 * state, it invokes the lpfc_els_rsl_reject() routine to send the reject 7510 * response. Otherwise, it sends the Accept(ACC) response to a Read Timeout 7511 * Value (RTV) unsolicited IOCB event. 7512 * 7513 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 7514 * will be incremented by 1 for holding the ndlp and the reference to ndlp 7515 * will be stored into the context1 field of the IOCB for the completion 7516 * callback function to the RTV Accept Response ELS IOCB command. 7517 * 7518 * Return codes 7519 * 0 - Successfully processed rtv iocb (currently always return 0) 7520 **/ 7521 static int 7522 lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7523 struct lpfc_nodelist *ndlp) 7524 { 7525 int rc = 0; 7526 struct lpfc_hba *phba = vport->phba; 7527 struct ls_rjt stat; 7528 struct RTV_RSP *rtv_rsp; 7529 uint8_t *pcmd; 7530 struct lpfc_iocbq *elsiocb; 7531 uint32_t cmdsize; 7532 7533 7534 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 7535 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) 7536 /* reject the unsolicited RTV request and done with it */ 7537 goto reject_out; 7538 7539 cmdsize = sizeof(struct RTV_RSP) + sizeof(uint32_t); 7540 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 7541 lpfc_max_els_tries, ndlp, 7542 ndlp->nlp_DID, ELS_CMD_ACC); 7543 7544 if (!elsiocb) 7545 return 1; 7546 7547 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 7548 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 7549 pcmd += sizeof(uint32_t); /* Skip past command */ 7550 7551 /* use the command's xri in the response */ 7552 elsiocb->iocb.ulpContext = cmdiocb->iocb.ulpContext; /* Xri / rx_id */ 7553 elsiocb->iocb.unsli3.rcvsli3.ox_id = cmdiocb->iocb.unsli3.rcvsli3.ox_id; 7554 7555 rtv_rsp = (struct RTV_RSP *)pcmd; 7556 7557 /* populate RTV payload */ 7558 rtv_rsp->ratov = cpu_to_be32(phba->fc_ratov * 1000); /* report msecs */ 7559 rtv_rsp->edtov = cpu_to_be32(phba->fc_edtov); 7560 bf_set(qtov_edtovres, rtv_rsp, phba->fc_edtovResol ? 1 : 0); 7561 bf_set(qtov_rttov, rtv_rsp, 0); /* Field is for FC ONLY */ 7562 rtv_rsp->qtov = cpu_to_be32(rtv_rsp->qtov); 7563 7564 /* Xmit ELS RLS ACC response tag <ulpIoTag> */ 7565 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS, 7566 "2875 Xmit ELS RTV ACC response tag x%x xri x%x, " 7567 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x, " 7568 "Data: x%x x%x x%x\n", 7569 elsiocb->iotag, elsiocb->iocb.ulpContext, 7570 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 7571 ndlp->nlp_rpi, 7572 rtv_rsp->ratov, rtv_rsp->edtov, rtv_rsp->qtov); 7573 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 7574 phba->fc_stat.elsXmitACC++; 7575 elsiocb->context1 = lpfc_nlp_get(ndlp); 7576 if (!elsiocb->context1) { 7577 lpfc_els_free_iocb(phba, elsiocb); 7578 return 0; 7579 } 7580 7581 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 7582 if (rc == IOCB_ERROR) { 7583 lpfc_nlp_put(ndlp); 7584 lpfc_els_free_iocb(phba, elsiocb); 7585 } 7586 return 0; 7587 7588 reject_out: 7589 /* issue rejection response */ 7590 stat.un.b.lsRjtRsvd0 = 0; 7591 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 7592 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 7593 stat.un.b.vendorUnique = 0; 7594 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 7595 return 0; 7596 } 7597 7598 /* lpfc_issue_els_rrq - Process an unsolicited rrq iocb 7599 * @vport: pointer to a host virtual N_Port data structure. 7600 * @ndlp: pointer to a node-list data structure. 7601 * @did: DID of the target. 7602 * @rrq: Pointer to the rrq struct. 7603 * 7604 * Build a ELS RRQ command and send it to the target. If the issue_iocb is 7605 * Successful the the completion handler will clear the RRQ. 7606 * 7607 * Return codes 7608 * 0 - Successfully sent rrq els iocb. 7609 * 1 - Failed to send rrq els iocb. 7610 **/ 7611 static int 7612 lpfc_issue_els_rrq(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 7613 uint32_t did, struct lpfc_node_rrq *rrq) 7614 { 7615 struct lpfc_hba *phba = vport->phba; 7616 struct RRQ *els_rrq; 7617 struct lpfc_iocbq *elsiocb; 7618 uint8_t *pcmd; 7619 uint16_t cmdsize; 7620 int ret; 7621 7622 7623 if (ndlp != rrq->ndlp) 7624 ndlp = rrq->ndlp; 7625 if (!ndlp) 7626 return 1; 7627 7628 /* If ndlp is not NULL, we will bump the reference count on it */ 7629 cmdsize = (sizeof(uint32_t) + sizeof(struct RRQ)); 7630 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, did, 7631 ELS_CMD_RRQ); 7632 if (!elsiocb) 7633 return 1; 7634 7635 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 7636 7637 /* For RRQ request, remainder of payload is Exchange IDs */ 7638 *((uint32_t *) (pcmd)) = ELS_CMD_RRQ; 7639 pcmd += sizeof(uint32_t); 7640 els_rrq = (struct RRQ *) pcmd; 7641 7642 bf_set(rrq_oxid, els_rrq, phba->sli4_hba.xri_ids[rrq->xritag]); 7643 bf_set(rrq_rxid, els_rrq, rrq->rxid); 7644 bf_set(rrq_did, els_rrq, vport->fc_myDID); 7645 els_rrq->rrq = cpu_to_be32(els_rrq->rrq); 7646 els_rrq->rrq_exchg = cpu_to_be32(els_rrq->rrq_exchg); 7647 7648 7649 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 7650 "Issue RRQ: did:x%x", 7651 did, rrq->xritag, rrq->rxid); 7652 elsiocb->context_un.rrq = rrq; 7653 elsiocb->iocb_cmpl = lpfc_cmpl_els_rrq; 7654 elsiocb->context1 = lpfc_nlp_get(ndlp); 7655 if (!elsiocb->context1) 7656 goto node_err; 7657 7658 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 7659 if (ret == IOCB_ERROR) 7660 goto io_err; 7661 return 0; 7662 7663 io_err: 7664 lpfc_nlp_put(ndlp); 7665 node_err: 7666 lpfc_els_free_iocb(phba, elsiocb); 7667 return 1; 7668 } 7669 7670 /** 7671 * lpfc_send_rrq - Sends ELS RRQ if needed. 7672 * @phba: pointer to lpfc hba data structure. 7673 * @rrq: pointer to the active rrq. 7674 * 7675 * This routine will call the lpfc_issue_els_rrq if the rrq is 7676 * still active for the xri. If this function returns a failure then 7677 * the caller needs to clean up the RRQ by calling lpfc_clr_active_rrq. 7678 * 7679 * Returns 0 Success. 7680 * 1 Failure. 7681 **/ 7682 int 7683 lpfc_send_rrq(struct lpfc_hba *phba, struct lpfc_node_rrq *rrq) 7684 { 7685 struct lpfc_nodelist *ndlp = lpfc_findnode_did(rrq->vport, 7686 rrq->nlp_DID); 7687 if (!ndlp) 7688 return 1; 7689 7690 if (lpfc_test_rrq_active(phba, ndlp, rrq->xritag)) 7691 return lpfc_issue_els_rrq(rrq->vport, ndlp, 7692 rrq->nlp_DID, rrq); 7693 else 7694 return 1; 7695 } 7696 7697 /** 7698 * lpfc_els_rsp_rpl_acc - Issue an accept rpl els command 7699 * @vport: pointer to a host virtual N_Port data structure. 7700 * @cmdsize: size of the ELS command. 7701 * @oldiocb: pointer to the original lpfc command iocb data structure. 7702 * @ndlp: pointer to a node-list data structure. 7703 * 7704 * This routine issuees an Accept (ACC) Read Port List (RPL) ELS command. 7705 * It is to be called by the lpfc_els_rcv_rpl() routine to accept the RPL. 7706 * 7707 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 7708 * will be incremented by 1 for holding the ndlp and the reference to ndlp 7709 * will be stored into the context1 field of the IOCB for the completion 7710 * callback function to the RPL Accept Response ELS command. 7711 * 7712 * Return code 7713 * 0 - Successfully issued ACC RPL ELS command 7714 * 1 - Failed to issue ACC RPL ELS command 7715 **/ 7716 static int 7717 lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize, 7718 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) 7719 { 7720 int rc = 0; 7721 struct lpfc_hba *phba = vport->phba; 7722 IOCB_t *icmd, *oldcmd; 7723 RPL_RSP rpl_rsp; 7724 struct lpfc_iocbq *elsiocb; 7725 uint8_t *pcmd; 7726 7727 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 7728 ndlp->nlp_DID, ELS_CMD_ACC); 7729 7730 if (!elsiocb) 7731 return 1; 7732 7733 icmd = &elsiocb->iocb; 7734 oldcmd = &oldiocb->iocb; 7735 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 7736 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 7737 7738 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 7739 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 7740 pcmd += sizeof(uint16_t); 7741 *((uint16_t *)(pcmd)) = be16_to_cpu(cmdsize); 7742 pcmd += sizeof(uint16_t); 7743 7744 /* Setup the RPL ACC payload */ 7745 rpl_rsp.listLen = be32_to_cpu(1); 7746 rpl_rsp.index = 0; 7747 rpl_rsp.port_num_blk.portNum = 0; 7748 rpl_rsp.port_num_blk.portID = be32_to_cpu(vport->fc_myDID); 7749 memcpy(&rpl_rsp.port_num_blk.portName, &vport->fc_portname, 7750 sizeof(struct lpfc_name)); 7751 memcpy(pcmd, &rpl_rsp, cmdsize - sizeof(uint32_t)); 7752 /* Xmit ELS RPL ACC response tag <ulpIoTag> */ 7753 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7754 "0120 Xmit ELS RPL ACC response tag x%x " 7755 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, " 7756 "rpi x%x\n", 7757 elsiocb->iotag, elsiocb->iocb.ulpContext, 7758 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 7759 ndlp->nlp_rpi); 7760 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 7761 phba->fc_stat.elsXmitACC++; 7762 elsiocb->context1 = lpfc_nlp_get(ndlp); 7763 if (!elsiocb->context1) 7764 goto node_err; 7765 7766 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 7767 if (rc == IOCB_ERROR) 7768 goto io_err; 7769 return 0; 7770 7771 io_err: 7772 lpfc_nlp_put(ndlp); 7773 node_err: 7774 lpfc_els_free_iocb(phba, elsiocb); 7775 return 1; 7776 } 7777 7778 /** 7779 * lpfc_els_rcv_rpl - Process an unsolicited rpl iocb 7780 * @vport: pointer to a host virtual N_Port data structure. 7781 * @cmdiocb: pointer to lpfc command iocb data structure. 7782 * @ndlp: pointer to a node-list data structure. 7783 * 7784 * This routine processes Read Port List (RPL) IOCB received as an ELS 7785 * unsolicited event. It first checks the remote port state. If the remote 7786 * port is not in NLP_STE_UNMAPPED_NODE and NLP_STE_MAPPED_NODE states, it 7787 * invokes the lpfc_els_rsp_reject() routine to send reject response. 7788 * Otherwise, this routine then invokes the lpfc_els_rsp_rpl_acc() routine 7789 * to accept the RPL. 7790 * 7791 * Return code 7792 * 0 - Successfully processed rpl iocb (currently always return 0) 7793 **/ 7794 static int 7795 lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7796 struct lpfc_nodelist *ndlp) 7797 { 7798 struct lpfc_dmabuf *pcmd; 7799 uint32_t *lp; 7800 uint32_t maxsize; 7801 uint16_t cmdsize; 7802 RPL *rpl; 7803 struct ls_rjt stat; 7804 7805 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 7806 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) { 7807 /* issue rejection response */ 7808 stat.un.b.lsRjtRsvd0 = 0; 7809 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 7810 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 7811 stat.un.b.vendorUnique = 0; 7812 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, 7813 NULL); 7814 /* rejected the unsolicited RPL request and done with it */ 7815 return 0; 7816 } 7817 7818 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 7819 lp = (uint32_t *) pcmd->virt; 7820 rpl = (RPL *) (lp + 1); 7821 maxsize = be32_to_cpu(rpl->maxsize); 7822 7823 /* We support only one port */ 7824 if ((rpl->index == 0) && 7825 ((maxsize == 0) || 7826 ((maxsize * sizeof(uint32_t)) >= sizeof(RPL_RSP)))) { 7827 cmdsize = sizeof(uint32_t) + sizeof(RPL_RSP); 7828 } else { 7829 cmdsize = sizeof(uint32_t) + maxsize * sizeof(uint32_t); 7830 } 7831 lpfc_els_rsp_rpl_acc(vport, cmdsize, cmdiocb, ndlp); 7832 7833 return 0; 7834 } 7835 7836 /** 7837 * lpfc_els_rcv_farp - Process an unsolicited farp request els command 7838 * @vport: pointer to a virtual N_Port data structure. 7839 * @cmdiocb: pointer to lpfc command iocb data structure. 7840 * @ndlp: pointer to a node-list data structure. 7841 * 7842 * This routine processes Fibre Channel Address Resolution Protocol 7843 * (FARP) Request IOCB received as an ELS unsolicited event. Currently, 7844 * the lpfc driver only supports matching on WWPN or WWNN for FARP. As such, 7845 * FARP_MATCH_PORT flag and FARP_MATCH_NODE flag are checked against the 7846 * Match Flag in the FARP request IOCB: if FARP_MATCH_PORT flag is set, the 7847 * remote PortName is compared against the FC PortName stored in the @vport 7848 * data structure; if FARP_MATCH_NODE flag is set, the remote NodeName is 7849 * compared against the FC NodeName stored in the @vport data structure. 7850 * If any of these matches and the FARP_REQUEST_FARPR flag is set in the 7851 * FARP request IOCB Response Flag, the lpfc_issue_els_farpr() routine is 7852 * invoked to send out FARP Response to the remote node. Before sending the 7853 * FARP Response, however, the FARP_REQUEST_PLOGI flag is check in the FARP 7854 * request IOCB Response Flag and, if it is set, the lpfc_issue_els_plogi() 7855 * routine is invoked to log into the remote port first. 7856 * 7857 * Return code 7858 * 0 - Either the FARP Match Mode not supported or successfully processed 7859 **/ 7860 static int 7861 lpfc_els_rcv_farp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7862 struct lpfc_nodelist *ndlp) 7863 { 7864 struct lpfc_dmabuf *pcmd; 7865 uint32_t *lp; 7866 IOCB_t *icmd; 7867 FARP *fp; 7868 uint32_t cnt, did; 7869 7870 icmd = &cmdiocb->iocb; 7871 did = icmd->un.elsreq64.remoteID; 7872 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 7873 lp = (uint32_t *) pcmd->virt; 7874 7875 lp++; 7876 fp = (FARP *) lp; 7877 /* FARP-REQ received from DID <did> */ 7878 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7879 "0601 FARP-REQ received from DID x%x\n", did); 7880 /* We will only support match on WWPN or WWNN */ 7881 if (fp->Mflags & ~(FARP_MATCH_NODE | FARP_MATCH_PORT)) { 7882 return 0; 7883 } 7884 7885 cnt = 0; 7886 /* If this FARP command is searching for my portname */ 7887 if (fp->Mflags & FARP_MATCH_PORT) { 7888 if (memcmp(&fp->RportName, &vport->fc_portname, 7889 sizeof(struct lpfc_name)) == 0) 7890 cnt = 1; 7891 } 7892 7893 /* If this FARP command is searching for my nodename */ 7894 if (fp->Mflags & FARP_MATCH_NODE) { 7895 if (memcmp(&fp->RnodeName, &vport->fc_nodename, 7896 sizeof(struct lpfc_name)) == 0) 7897 cnt = 1; 7898 } 7899 7900 if (cnt) { 7901 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) || 7902 (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) { 7903 /* Log back into the node before sending the FARP. */ 7904 if (fp->Rflags & FARP_REQUEST_PLOGI) { 7905 ndlp->nlp_prev_state = ndlp->nlp_state; 7906 lpfc_nlp_set_state(vport, ndlp, 7907 NLP_STE_PLOGI_ISSUE); 7908 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 7909 } 7910 7911 /* Send a FARP response to that node */ 7912 if (fp->Rflags & FARP_REQUEST_FARPR) 7913 lpfc_issue_els_farpr(vport, did, 0); 7914 } 7915 } 7916 return 0; 7917 } 7918 7919 /** 7920 * lpfc_els_rcv_farpr - Process an unsolicited farp response iocb 7921 * @vport: pointer to a host virtual N_Port data structure. 7922 * @cmdiocb: pointer to lpfc command iocb data structure. 7923 * @ndlp: pointer to a node-list data structure. 7924 * 7925 * This routine processes Fibre Channel Address Resolution Protocol 7926 * Response (FARPR) IOCB received as an ELS unsolicited event. It simply 7927 * invokes the lpfc_els_rsp_acc() routine to the remote node to accept 7928 * the FARP response request. 7929 * 7930 * Return code 7931 * 0 - Successfully processed FARPR IOCB (currently always return 0) 7932 **/ 7933 static int 7934 lpfc_els_rcv_farpr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7935 struct lpfc_nodelist *ndlp) 7936 { 7937 struct lpfc_dmabuf *pcmd; 7938 uint32_t *lp; 7939 IOCB_t *icmd; 7940 uint32_t did; 7941 7942 icmd = &cmdiocb->iocb; 7943 did = icmd->un.elsreq64.remoteID; 7944 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 7945 lp = (uint32_t *) pcmd->virt; 7946 7947 lp++; 7948 /* FARP-RSP received from DID <did> */ 7949 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7950 "0600 FARP-RSP received from DID x%x\n", did); 7951 /* ACCEPT the Farp resp request */ 7952 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 7953 7954 return 0; 7955 } 7956 7957 /** 7958 * lpfc_els_rcv_fan - Process an unsolicited fan iocb command 7959 * @vport: pointer to a host virtual N_Port data structure. 7960 * @cmdiocb: pointer to lpfc command iocb data structure. 7961 * @fan_ndlp: pointer to a node-list data structure. 7962 * 7963 * This routine processes a Fabric Address Notification (FAN) IOCB 7964 * command received as an ELS unsolicited event. The FAN ELS command will 7965 * only be processed on a physical port (i.e., the @vport represents the 7966 * physical port). The fabric NodeName and PortName from the FAN IOCB are 7967 * compared against those in the phba data structure. If any of those is 7968 * different, the lpfc_initial_flogi() routine is invoked to initialize 7969 * Fabric Login (FLOGI) to the fabric to start the discover over. Otherwise, 7970 * if both of those are identical, the lpfc_issue_fabric_reglogin() routine 7971 * is invoked to register login to the fabric. 7972 * 7973 * Return code 7974 * 0 - Successfully processed fan iocb (currently always return 0). 7975 **/ 7976 static int 7977 lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7978 struct lpfc_nodelist *fan_ndlp) 7979 { 7980 struct lpfc_hba *phba = vport->phba; 7981 uint32_t *lp; 7982 FAN *fp; 7983 7984 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0265 FAN received\n"); 7985 lp = (uint32_t *)((struct lpfc_dmabuf *)cmdiocb->context2)->virt; 7986 fp = (FAN *) ++lp; 7987 /* FAN received; Fan does not have a reply sequence */ 7988 if ((vport == phba->pport) && 7989 (vport->port_state == LPFC_LOCAL_CFG_LINK)) { 7990 if ((memcmp(&phba->fc_fabparam.nodeName, &fp->FnodeName, 7991 sizeof(struct lpfc_name))) || 7992 (memcmp(&phba->fc_fabparam.portName, &fp->FportName, 7993 sizeof(struct lpfc_name)))) { 7994 /* This port has switched fabrics. FLOGI is required */ 7995 lpfc_issue_init_vfi(vport); 7996 } else { 7997 /* FAN verified - skip FLOGI */ 7998 vport->fc_myDID = vport->fc_prevDID; 7999 if (phba->sli_rev < LPFC_SLI_REV4) 8000 lpfc_issue_fabric_reglogin(vport); 8001 else { 8002 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 8003 "3138 Need register VFI: (x%x/%x)\n", 8004 vport->fc_prevDID, vport->fc_myDID); 8005 lpfc_issue_reg_vfi(vport); 8006 } 8007 } 8008 } 8009 return 0; 8010 } 8011 8012 /** 8013 * lpfc_els_timeout - Handler funciton to the els timer 8014 * @t: timer context used to obtain the vport. 8015 * 8016 * This routine is invoked by the ELS timer after timeout. It posts the ELS 8017 * timer timeout event by setting the WORKER_ELS_TMO bit to the work port 8018 * event bitmap and then invokes the lpfc_worker_wake_up() routine to wake 8019 * up the worker thread. It is for the worker thread to invoke the routine 8020 * lpfc_els_timeout_handler() to work on the posted event WORKER_ELS_TMO. 8021 **/ 8022 void 8023 lpfc_els_timeout(struct timer_list *t) 8024 { 8025 struct lpfc_vport *vport = from_timer(vport, t, els_tmofunc); 8026 struct lpfc_hba *phba = vport->phba; 8027 uint32_t tmo_posted; 8028 unsigned long iflag; 8029 8030 spin_lock_irqsave(&vport->work_port_lock, iflag); 8031 tmo_posted = vport->work_port_events & WORKER_ELS_TMO; 8032 if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING))) 8033 vport->work_port_events |= WORKER_ELS_TMO; 8034 spin_unlock_irqrestore(&vport->work_port_lock, iflag); 8035 8036 if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING))) 8037 lpfc_worker_wake_up(phba); 8038 return; 8039 } 8040 8041 8042 /** 8043 * lpfc_els_timeout_handler - Process an els timeout event 8044 * @vport: pointer to a virtual N_Port data structure. 8045 * 8046 * This routine is the actual handler function that processes an ELS timeout 8047 * event. It walks the ELS ring to get and abort all the IOCBs (except the 8048 * ABORT/CLOSE/FARP/FARPR/FDISC), which are associated with the @vport by 8049 * invoking the lpfc_sli_issue_abort_iotag() routine. 8050 **/ 8051 void 8052 lpfc_els_timeout_handler(struct lpfc_vport *vport) 8053 { 8054 struct lpfc_hba *phba = vport->phba; 8055 struct lpfc_sli_ring *pring; 8056 struct lpfc_iocbq *tmp_iocb, *piocb; 8057 IOCB_t *cmd = NULL; 8058 struct lpfc_dmabuf *pcmd; 8059 uint32_t els_command = 0; 8060 uint32_t timeout; 8061 uint32_t remote_ID = 0xffffffff; 8062 LIST_HEAD(abort_list); 8063 8064 8065 timeout = (uint32_t)(phba->fc_ratov << 1); 8066 8067 pring = lpfc_phba_elsring(phba); 8068 if (unlikely(!pring)) 8069 return; 8070 8071 if (phba->pport->load_flag & FC_UNLOADING) 8072 return; 8073 8074 spin_lock_irq(&phba->hbalock); 8075 if (phba->sli_rev == LPFC_SLI_REV4) 8076 spin_lock(&pring->ring_lock); 8077 8078 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { 8079 cmd = &piocb->iocb; 8080 8081 if ((piocb->iocb_flag & LPFC_IO_LIBDFC) != 0 || 8082 piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN || 8083 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN) 8084 continue; 8085 8086 if (piocb->vport != vport) 8087 continue; 8088 8089 pcmd = (struct lpfc_dmabuf *) piocb->context2; 8090 if (pcmd) 8091 els_command = *(uint32_t *) (pcmd->virt); 8092 8093 if (els_command == ELS_CMD_FARP || 8094 els_command == ELS_CMD_FARPR || 8095 els_command == ELS_CMD_FDISC) 8096 continue; 8097 8098 if (piocb->drvrTimeout > 0) { 8099 if (piocb->drvrTimeout >= timeout) 8100 piocb->drvrTimeout -= timeout; 8101 else 8102 piocb->drvrTimeout = 0; 8103 continue; 8104 } 8105 8106 remote_ID = 0xffffffff; 8107 if (cmd->ulpCommand != CMD_GEN_REQUEST64_CR) 8108 remote_ID = cmd->un.elsreq64.remoteID; 8109 else { 8110 struct lpfc_nodelist *ndlp; 8111 ndlp = __lpfc_findnode_rpi(vport, cmd->ulpContext); 8112 if (ndlp) 8113 remote_ID = ndlp->nlp_DID; 8114 } 8115 list_add_tail(&piocb->dlist, &abort_list); 8116 } 8117 if (phba->sli_rev == LPFC_SLI_REV4) 8118 spin_unlock(&pring->ring_lock); 8119 spin_unlock_irq(&phba->hbalock); 8120 8121 list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) { 8122 cmd = &piocb->iocb; 8123 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 8124 "0127 ELS timeout Data: x%x x%x x%x " 8125 "x%x\n", els_command, 8126 remote_ID, cmd->ulpCommand, cmd->ulpIoTag); 8127 spin_lock_irq(&phba->hbalock); 8128 list_del_init(&piocb->dlist); 8129 lpfc_sli_issue_abort_iotag(phba, pring, piocb, NULL); 8130 spin_unlock_irq(&phba->hbalock); 8131 } 8132 8133 /* Make sure HBA is alive */ 8134 lpfc_issue_hb_tmo(phba); 8135 8136 if (!list_empty(&pring->txcmplq)) 8137 if (!(phba->pport->load_flag & FC_UNLOADING)) 8138 mod_timer(&vport->els_tmofunc, 8139 jiffies + msecs_to_jiffies(1000 * timeout)); 8140 } 8141 8142 /** 8143 * lpfc_els_flush_cmd - Clean up the outstanding els commands to a vport 8144 * @vport: pointer to a host virtual N_Port data structure. 8145 * 8146 * This routine is used to clean up all the outstanding ELS commands on a 8147 * @vport. It first aborts the @vport by invoking lpfc_fabric_abort_vport() 8148 * routine. After that, it walks the ELS transmit queue to remove all the 8149 * IOCBs with the @vport other than the QUE_RING and ABORT/CLOSE IOCBs. For 8150 * the IOCBs with a non-NULL completion callback function, the callback 8151 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and 8152 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs with a NULL completion 8153 * callback function, the IOCB will simply be released. Finally, it walks 8154 * the ELS transmit completion queue to issue an abort IOCB to any transmit 8155 * completion queue IOCB that is associated with the @vport and is not 8156 * an IOCB from libdfc (i.e., the management plane IOCBs that are not 8157 * part of the discovery state machine) out to HBA by invoking the 8158 * lpfc_sli_issue_abort_iotag() routine. Note that this function issues the 8159 * abort IOCB to any transmit completion queueed IOCB, it does not guarantee 8160 * the IOCBs are aborted when this function returns. 8161 **/ 8162 void 8163 lpfc_els_flush_cmd(struct lpfc_vport *vport) 8164 { 8165 LIST_HEAD(abort_list); 8166 struct lpfc_hba *phba = vport->phba; 8167 struct lpfc_sli_ring *pring; 8168 struct lpfc_iocbq *tmp_iocb, *piocb; 8169 IOCB_t *cmd = NULL; 8170 unsigned long iflags = 0; 8171 8172 lpfc_fabric_abort_vport(vport); 8173 8174 /* 8175 * For SLI3, only the hbalock is required. But SLI4 needs to coordinate 8176 * with the ring insert operation. Because lpfc_sli_issue_abort_iotag 8177 * ultimately grabs the ring_lock, the driver must splice the list into 8178 * a working list and release the locks before calling the abort. 8179 */ 8180 spin_lock_irqsave(&phba->hbalock, iflags); 8181 pring = lpfc_phba_elsring(phba); 8182 8183 /* Bail out if we've no ELS wq, like in PCI error recovery case. */ 8184 if (unlikely(!pring)) { 8185 spin_unlock_irqrestore(&phba->hbalock, iflags); 8186 return; 8187 } 8188 8189 if (phba->sli_rev == LPFC_SLI_REV4) 8190 spin_lock(&pring->ring_lock); 8191 8192 /* First we need to issue aborts to outstanding cmds on txcmpl */ 8193 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { 8194 if (piocb->iocb_flag & LPFC_IO_LIBDFC) 8195 continue; 8196 8197 if (piocb->vport != vport) 8198 continue; 8199 8200 if (piocb->iocb_flag & LPFC_DRIVER_ABORTED) 8201 continue; 8202 8203 /* On the ELS ring we can have ELS_REQUESTs or 8204 * GEN_REQUESTs waiting for a response. 8205 */ 8206 cmd = &piocb->iocb; 8207 if (cmd->ulpCommand == CMD_ELS_REQUEST64_CR) { 8208 list_add_tail(&piocb->dlist, &abort_list); 8209 8210 /* If the link is down when flushing ELS commands 8211 * the firmware will not complete them till after 8212 * the link comes back up. This may confuse 8213 * discovery for the new link up, so we need to 8214 * change the compl routine to just clean up the iocb 8215 * and avoid any retry logic. 8216 */ 8217 if (phba->link_state == LPFC_LINK_DOWN) 8218 piocb->iocb_cmpl = lpfc_cmpl_els_link_down; 8219 } 8220 if (cmd->ulpCommand == CMD_GEN_REQUEST64_CR) 8221 list_add_tail(&piocb->dlist, &abort_list); 8222 } 8223 8224 if (phba->sli_rev == LPFC_SLI_REV4) 8225 spin_unlock(&pring->ring_lock); 8226 spin_unlock_irqrestore(&phba->hbalock, iflags); 8227 8228 /* Abort each txcmpl iocb on aborted list and remove the dlist links. */ 8229 list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) { 8230 spin_lock_irqsave(&phba->hbalock, iflags); 8231 list_del_init(&piocb->dlist); 8232 lpfc_sli_issue_abort_iotag(phba, pring, piocb, NULL); 8233 spin_unlock_irqrestore(&phba->hbalock, iflags); 8234 } 8235 /* Make sure HBA is alive */ 8236 lpfc_issue_hb_tmo(phba); 8237 8238 if (!list_empty(&abort_list)) 8239 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 8240 "3387 abort list for txq not empty\n"); 8241 INIT_LIST_HEAD(&abort_list); 8242 8243 spin_lock_irqsave(&phba->hbalock, iflags); 8244 if (phba->sli_rev == LPFC_SLI_REV4) 8245 spin_lock(&pring->ring_lock); 8246 8247 /* No need to abort the txq list, 8248 * just queue them up for lpfc_sli_cancel_iocbs 8249 */ 8250 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) { 8251 cmd = &piocb->iocb; 8252 8253 if (piocb->iocb_flag & LPFC_IO_LIBDFC) { 8254 continue; 8255 } 8256 8257 /* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */ 8258 if (cmd->ulpCommand == CMD_QUE_RING_BUF_CN || 8259 cmd->ulpCommand == CMD_QUE_RING_BUF64_CN || 8260 cmd->ulpCommand == CMD_CLOSE_XRI_CN || 8261 cmd->ulpCommand == CMD_ABORT_XRI_CN) 8262 continue; 8263 8264 if (piocb->vport != vport) 8265 continue; 8266 8267 list_del_init(&piocb->list); 8268 list_add_tail(&piocb->list, &abort_list); 8269 } 8270 8271 /* The same holds true for any FLOGI/FDISC on the fabric_iocb_list */ 8272 if (vport == phba->pport) { 8273 list_for_each_entry_safe(piocb, tmp_iocb, 8274 &phba->fabric_iocb_list, list) { 8275 cmd = &piocb->iocb; 8276 list_del_init(&piocb->list); 8277 list_add_tail(&piocb->list, &abort_list); 8278 } 8279 } 8280 8281 if (phba->sli_rev == LPFC_SLI_REV4) 8282 spin_unlock(&pring->ring_lock); 8283 spin_unlock_irqrestore(&phba->hbalock, iflags); 8284 8285 /* Cancel all the IOCBs from the completions list */ 8286 lpfc_sli_cancel_iocbs(phba, &abort_list, 8287 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); 8288 8289 return; 8290 } 8291 8292 /** 8293 * lpfc_els_flush_all_cmd - Clean up all the outstanding els commands to a HBA 8294 * @phba: pointer to lpfc hba data structure. 8295 * 8296 * This routine is used to clean up all the outstanding ELS commands on a 8297 * @phba. It first aborts the @phba by invoking the lpfc_fabric_abort_hba() 8298 * routine. After that, it walks the ELS transmit queue to remove all the 8299 * IOCBs to the @phba other than the QUE_RING and ABORT/CLOSE IOCBs. For 8300 * the IOCBs with the completion callback function associated, the callback 8301 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and 8302 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs without the completion 8303 * callback function associated, the IOCB will simply be released. Finally, 8304 * it walks the ELS transmit completion queue to issue an abort IOCB to any 8305 * transmit completion queue IOCB that is not an IOCB from libdfc (i.e., the 8306 * management plane IOCBs that are not part of the discovery state machine) 8307 * out to HBA by invoking the lpfc_sli_issue_abort_iotag() routine. 8308 **/ 8309 void 8310 lpfc_els_flush_all_cmd(struct lpfc_hba *phba) 8311 { 8312 struct lpfc_vport *vport; 8313 8314 spin_lock_irq(&phba->port_list_lock); 8315 list_for_each_entry(vport, &phba->port_list, listentry) 8316 lpfc_els_flush_cmd(vport); 8317 spin_unlock_irq(&phba->port_list_lock); 8318 8319 return; 8320 } 8321 8322 /** 8323 * lpfc_send_els_failure_event - Posts an ELS command failure event 8324 * @phba: Pointer to hba context object. 8325 * @cmdiocbp: Pointer to command iocb which reported error. 8326 * @rspiocbp: Pointer to response iocb which reported error. 8327 * 8328 * This function sends an event when there is an ELS command 8329 * failure. 8330 **/ 8331 void 8332 lpfc_send_els_failure_event(struct lpfc_hba *phba, 8333 struct lpfc_iocbq *cmdiocbp, 8334 struct lpfc_iocbq *rspiocbp) 8335 { 8336 struct lpfc_vport *vport = cmdiocbp->vport; 8337 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 8338 struct lpfc_lsrjt_event lsrjt_event; 8339 struct lpfc_fabric_event_header fabric_event; 8340 struct ls_rjt stat; 8341 struct lpfc_nodelist *ndlp; 8342 uint32_t *pcmd; 8343 8344 ndlp = cmdiocbp->context1; 8345 if (!ndlp) 8346 return; 8347 8348 if (rspiocbp->iocb.ulpStatus == IOSTAT_LS_RJT) { 8349 lsrjt_event.header.event_type = FC_REG_ELS_EVENT; 8350 lsrjt_event.header.subcategory = LPFC_EVENT_LSRJT_RCV; 8351 memcpy(lsrjt_event.header.wwpn, &ndlp->nlp_portname, 8352 sizeof(struct lpfc_name)); 8353 memcpy(lsrjt_event.header.wwnn, &ndlp->nlp_nodename, 8354 sizeof(struct lpfc_name)); 8355 pcmd = (uint32_t *) (((struct lpfc_dmabuf *) 8356 cmdiocbp->context2)->virt); 8357 lsrjt_event.command = (pcmd != NULL) ? *pcmd : 0; 8358 stat.un.lsRjtError = be32_to_cpu(rspiocbp->iocb.un.ulpWord[4]); 8359 lsrjt_event.reason_code = stat.un.b.lsRjtRsnCode; 8360 lsrjt_event.explanation = stat.un.b.lsRjtRsnCodeExp; 8361 fc_host_post_vendor_event(shost, 8362 fc_get_event_number(), 8363 sizeof(lsrjt_event), 8364 (char *)&lsrjt_event, 8365 LPFC_NL_VENDOR_ID); 8366 return; 8367 } 8368 if ((rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY) || 8369 (rspiocbp->iocb.ulpStatus == IOSTAT_FABRIC_BSY)) { 8370 fabric_event.event_type = FC_REG_FABRIC_EVENT; 8371 if (rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY) 8372 fabric_event.subcategory = LPFC_EVENT_PORT_BUSY; 8373 else 8374 fabric_event.subcategory = LPFC_EVENT_FABRIC_BUSY; 8375 memcpy(fabric_event.wwpn, &ndlp->nlp_portname, 8376 sizeof(struct lpfc_name)); 8377 memcpy(fabric_event.wwnn, &ndlp->nlp_nodename, 8378 sizeof(struct lpfc_name)); 8379 fc_host_post_vendor_event(shost, 8380 fc_get_event_number(), 8381 sizeof(fabric_event), 8382 (char *)&fabric_event, 8383 LPFC_NL_VENDOR_ID); 8384 return; 8385 } 8386 8387 } 8388 8389 /** 8390 * lpfc_send_els_event - Posts unsolicited els event 8391 * @vport: Pointer to vport object. 8392 * @ndlp: Pointer FC node object. 8393 * @payload: ELS command code type. 8394 * 8395 * This function posts an event when there is an incoming 8396 * unsolicited ELS command. 8397 **/ 8398 static void 8399 lpfc_send_els_event(struct lpfc_vport *vport, 8400 struct lpfc_nodelist *ndlp, 8401 uint32_t *payload) 8402 { 8403 struct lpfc_els_event_header *els_data = NULL; 8404 struct lpfc_logo_event *logo_data = NULL; 8405 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 8406 8407 if (*payload == ELS_CMD_LOGO) { 8408 logo_data = kmalloc(sizeof(struct lpfc_logo_event), GFP_KERNEL); 8409 if (!logo_data) { 8410 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 8411 "0148 Failed to allocate memory " 8412 "for LOGO event\n"); 8413 return; 8414 } 8415 els_data = &logo_data->header; 8416 } else { 8417 els_data = kmalloc(sizeof(struct lpfc_els_event_header), 8418 GFP_KERNEL); 8419 if (!els_data) { 8420 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 8421 "0149 Failed to allocate memory " 8422 "for ELS event\n"); 8423 return; 8424 } 8425 } 8426 els_data->event_type = FC_REG_ELS_EVENT; 8427 switch (*payload) { 8428 case ELS_CMD_PLOGI: 8429 els_data->subcategory = LPFC_EVENT_PLOGI_RCV; 8430 break; 8431 case ELS_CMD_PRLO: 8432 els_data->subcategory = LPFC_EVENT_PRLO_RCV; 8433 break; 8434 case ELS_CMD_ADISC: 8435 els_data->subcategory = LPFC_EVENT_ADISC_RCV; 8436 break; 8437 case ELS_CMD_LOGO: 8438 els_data->subcategory = LPFC_EVENT_LOGO_RCV; 8439 /* Copy the WWPN in the LOGO payload */ 8440 memcpy(logo_data->logo_wwpn, &payload[2], 8441 sizeof(struct lpfc_name)); 8442 break; 8443 default: 8444 kfree(els_data); 8445 return; 8446 } 8447 memcpy(els_data->wwpn, &ndlp->nlp_portname, sizeof(struct lpfc_name)); 8448 memcpy(els_data->wwnn, &ndlp->nlp_nodename, sizeof(struct lpfc_name)); 8449 if (*payload == ELS_CMD_LOGO) { 8450 fc_host_post_vendor_event(shost, 8451 fc_get_event_number(), 8452 sizeof(struct lpfc_logo_event), 8453 (char *)logo_data, 8454 LPFC_NL_VENDOR_ID); 8455 kfree(logo_data); 8456 } else { 8457 fc_host_post_vendor_event(shost, 8458 fc_get_event_number(), 8459 sizeof(struct lpfc_els_event_header), 8460 (char *)els_data, 8461 LPFC_NL_VENDOR_ID); 8462 kfree(els_data); 8463 } 8464 8465 return; 8466 } 8467 8468 8469 DECLARE_ENUM2STR_LOOKUP(lpfc_get_tlv_dtag_nm, fc_ls_tlv_dtag, 8470 FC_LS_TLV_DTAG_INIT); 8471 8472 DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_li_event_nm, fc_fpin_li_event_types, 8473 FC_FPIN_LI_EVT_TYPES_INIT); 8474 8475 /** 8476 * lpfc_els_rcv_fpin_li - Process an FPIN Link Integrity Event. 8477 * @vport: Pointer to vport object. 8478 * @tlv: Pointer to the Link Integrity Notification Descriptor. 8479 * 8480 * This function processes a link integrity FPIN event by 8481 * logging a message 8482 **/ 8483 static void 8484 lpfc_els_rcv_fpin_li(struct lpfc_vport *vport, struct fc_tlv_desc *tlv) 8485 { 8486 struct fc_fn_li_desc *li = (struct fc_fn_li_desc *)tlv; 8487 const char *li_evt_str; 8488 u32 li_evt; 8489 8490 li_evt = be16_to_cpu(li->event_type); 8491 li_evt_str = lpfc_get_fpin_li_event_nm(li_evt); 8492 8493 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 8494 "4680 FPIN Link Integrity %s (x%x) " 8495 "Detecting PN x%016llx Attached PN x%016llx " 8496 "Duration %d mSecs Count %d Port Cnt %d\n", 8497 li_evt_str, li_evt, 8498 be64_to_cpu(li->detecting_wwpn), 8499 be64_to_cpu(li->attached_wwpn), 8500 be32_to_cpu(li->event_threshold), 8501 be32_to_cpu(li->event_count), 8502 be32_to_cpu(li->pname_count)); 8503 } 8504 8505 static void 8506 lpfc_els_rcv_fpin(struct lpfc_vport *vport, struct fc_els_fpin *fpin, 8507 u32 fpin_length) 8508 { 8509 struct fc_tlv_desc *tlv; 8510 const char *dtag_nm; 8511 uint32_t desc_cnt = 0, bytes_remain; 8512 u32 dtag; 8513 8514 /* FPINs handled only if we are in the right discovery state */ 8515 if (vport->port_state < LPFC_DISC_AUTH) 8516 return; 8517 8518 /* make sure there is the full fpin header */ 8519 if (fpin_length < sizeof(struct fc_els_fpin)) 8520 return; 8521 8522 tlv = (struct fc_tlv_desc *)&fpin->fpin_desc[0]; 8523 bytes_remain = fpin_length - offsetof(struct fc_els_fpin, fpin_desc); 8524 bytes_remain = min_t(u32, bytes_remain, be32_to_cpu(fpin->desc_len)); 8525 8526 /* process each descriptor */ 8527 while (bytes_remain >= FC_TLV_DESC_HDR_SZ && 8528 bytes_remain >= FC_TLV_DESC_SZ_FROM_LENGTH(tlv)) { 8529 8530 dtag = be32_to_cpu(tlv->desc_tag); 8531 switch (dtag) { 8532 case ELS_DTAG_LNK_INTEGRITY: 8533 lpfc_els_rcv_fpin_li(vport, tlv); 8534 break; 8535 default: 8536 dtag_nm = lpfc_get_tlv_dtag_nm(dtag); 8537 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 8538 "4678 skipped FPIN descriptor[%d]: " 8539 "tag x%x (%s)\n", 8540 desc_cnt, dtag, dtag_nm); 8541 break; 8542 } 8543 8544 desc_cnt++; 8545 bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv); 8546 tlv = fc_tlv_next_desc(tlv); 8547 } 8548 8549 fc_host_fpin_rcv(lpfc_shost_from_vport(vport), fpin_length, 8550 (char *)fpin); 8551 } 8552 8553 /** 8554 * lpfc_els_unsol_buffer - Process an unsolicited event data buffer 8555 * @phba: pointer to lpfc hba data structure. 8556 * @pring: pointer to a SLI ring. 8557 * @vport: pointer to a host virtual N_Port data structure. 8558 * @elsiocb: pointer to lpfc els command iocb data structure. 8559 * 8560 * This routine is used for processing the IOCB associated with a unsolicited 8561 * event. It first determines whether there is an existing ndlp that matches 8562 * the DID from the unsolicited IOCB. If not, it will create a new one with 8563 * the DID from the unsolicited IOCB. The ELS command from the unsolicited 8564 * IOCB is then used to invoke the proper routine and to set up proper state 8565 * of the discovery state machine. 8566 **/ 8567 static void 8568 lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 8569 struct lpfc_vport *vport, struct lpfc_iocbq *elsiocb) 8570 { 8571 struct lpfc_nodelist *ndlp; 8572 struct ls_rjt stat; 8573 uint32_t *payload, payload_len; 8574 uint32_t cmd, did, newnode; 8575 uint8_t rjt_exp, rjt_err = 0, init_link = 0; 8576 IOCB_t *icmd = &elsiocb->iocb; 8577 LPFC_MBOXQ_t *mbox; 8578 8579 if (!vport || !(elsiocb->context2)) 8580 goto dropit; 8581 8582 newnode = 0; 8583 payload = ((struct lpfc_dmabuf *)elsiocb->context2)->virt; 8584 payload_len = elsiocb->iocb.unsli3.rcvsli3.acc_len; 8585 cmd = *payload; 8586 if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) == 0) 8587 lpfc_post_buffer(phba, pring, 1); 8588 8589 did = icmd->un.rcvels.remoteID; 8590 if (icmd->ulpStatus) { 8591 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8592 "RCV Unsol ELS: status:x%x/x%x did:x%x", 8593 icmd->ulpStatus, icmd->un.ulpWord[4], did); 8594 goto dropit; 8595 } 8596 8597 /* Check to see if link went down during discovery */ 8598 if (lpfc_els_chk_latt(vport)) 8599 goto dropit; 8600 8601 /* Ignore traffic received during vport shutdown. */ 8602 if (vport->load_flag & FC_UNLOADING) 8603 goto dropit; 8604 8605 /* If NPort discovery is delayed drop incoming ELS */ 8606 if ((vport->fc_flag & FC_DISC_DELAYED) && 8607 (cmd != ELS_CMD_PLOGI)) 8608 goto dropit; 8609 8610 ndlp = lpfc_findnode_did(vport, did); 8611 if (!ndlp) { 8612 /* Cannot find existing Fabric ndlp, so allocate a new one */ 8613 ndlp = lpfc_nlp_init(vport, did); 8614 if (!ndlp) 8615 goto dropit; 8616 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 8617 newnode = 1; 8618 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) 8619 ndlp->nlp_type |= NLP_FABRIC; 8620 } else if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) { 8621 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 8622 newnode = 1; 8623 } 8624 8625 phba->fc_stat.elsRcvFrame++; 8626 8627 /* 8628 * Do not process any unsolicited ELS commands 8629 * if the ndlp is in DEV_LOSS 8630 */ 8631 spin_lock_irq(&ndlp->lock); 8632 if (ndlp->nlp_flag & NLP_IN_DEV_LOSS) { 8633 spin_unlock_irq(&ndlp->lock); 8634 if (newnode) 8635 lpfc_nlp_put(ndlp); 8636 goto dropit; 8637 } 8638 spin_unlock_irq(&ndlp->lock); 8639 8640 elsiocb->context1 = lpfc_nlp_get(ndlp); 8641 if (!elsiocb->context1) 8642 goto dropit; 8643 elsiocb->vport = vport; 8644 8645 if ((cmd & ELS_CMD_MASK) == ELS_CMD_RSCN) { 8646 cmd &= ELS_CMD_MASK; 8647 } 8648 /* ELS command <elsCmd> received from NPORT <did> */ 8649 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 8650 "0112 ELS command x%x received from NPORT x%x " 8651 "refcnt %d Data: x%x x%x x%x x%x\n", 8652 cmd, did, kref_read(&ndlp->kref), vport->port_state, 8653 vport->fc_flag, vport->fc_myDID, vport->fc_prevDID); 8654 8655 /* reject till our FLOGI completes or PLOGI assigned DID via PT2PT */ 8656 if ((vport->port_state < LPFC_FABRIC_CFG_LINK) && 8657 (cmd != ELS_CMD_FLOGI) && 8658 !((cmd == ELS_CMD_PLOGI) && (vport->fc_flag & FC_PT2PT))) { 8659 rjt_err = LSRJT_LOGICAL_BSY; 8660 rjt_exp = LSEXP_NOTHING_MORE; 8661 goto lsrjt; 8662 } 8663 8664 switch (cmd) { 8665 case ELS_CMD_PLOGI: 8666 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8667 "RCV PLOGI: did:x%x/ste:x%x flg:x%x", 8668 did, vport->port_state, ndlp->nlp_flag); 8669 8670 phba->fc_stat.elsRcvPLOGI++; 8671 ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp); 8672 if (phba->sli_rev == LPFC_SLI_REV4 && 8673 (phba->pport->fc_flag & FC_PT2PT)) { 8674 vport->fc_prevDID = vport->fc_myDID; 8675 /* Our DID needs to be updated before registering 8676 * the vfi. This is done in lpfc_rcv_plogi but 8677 * that is called after the reg_vfi. 8678 */ 8679 vport->fc_myDID = elsiocb->iocb.un.rcvels.parmRo; 8680 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 8681 "3312 Remote port assigned DID x%x " 8682 "%x\n", vport->fc_myDID, 8683 vport->fc_prevDID); 8684 } 8685 8686 lpfc_send_els_event(vport, ndlp, payload); 8687 8688 /* If Nport discovery is delayed, reject PLOGIs */ 8689 if (vport->fc_flag & FC_DISC_DELAYED) { 8690 rjt_err = LSRJT_UNABLE_TPC; 8691 rjt_exp = LSEXP_NOTHING_MORE; 8692 break; 8693 } 8694 8695 if (vport->port_state < LPFC_DISC_AUTH) { 8696 if (!(phba->pport->fc_flag & FC_PT2PT) || 8697 (phba->pport->fc_flag & FC_PT2PT_PLOGI)) { 8698 rjt_err = LSRJT_UNABLE_TPC; 8699 rjt_exp = LSEXP_NOTHING_MORE; 8700 break; 8701 } 8702 } 8703 8704 spin_lock_irq(&ndlp->lock); 8705 ndlp->nlp_flag &= ~NLP_TARGET_REMOVE; 8706 spin_unlock_irq(&ndlp->lock); 8707 8708 lpfc_disc_state_machine(vport, ndlp, elsiocb, 8709 NLP_EVT_RCV_PLOGI); 8710 8711 break; 8712 case ELS_CMD_FLOGI: 8713 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8714 "RCV FLOGI: did:x%x/ste:x%x flg:x%x", 8715 did, vport->port_state, ndlp->nlp_flag); 8716 8717 phba->fc_stat.elsRcvFLOGI++; 8718 8719 /* If the driver believes fabric discovery is done and is ready, 8720 * bounce the link. There is some descrepancy. 8721 */ 8722 if (vport->port_state >= LPFC_LOCAL_CFG_LINK && 8723 vport->fc_flag & FC_PT2PT && 8724 vport->rcv_flogi_cnt >= 1) { 8725 rjt_err = LSRJT_LOGICAL_BSY; 8726 rjt_exp = LSEXP_NOTHING_MORE; 8727 init_link++; 8728 goto lsrjt; 8729 } 8730 8731 lpfc_els_rcv_flogi(vport, elsiocb, ndlp); 8732 if (newnode) 8733 lpfc_disc_state_machine(vport, ndlp, NULL, 8734 NLP_EVT_DEVICE_RM); 8735 break; 8736 case ELS_CMD_LOGO: 8737 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8738 "RCV LOGO: did:x%x/ste:x%x flg:x%x", 8739 did, vport->port_state, ndlp->nlp_flag); 8740 8741 phba->fc_stat.elsRcvLOGO++; 8742 lpfc_send_els_event(vport, ndlp, payload); 8743 if (vport->port_state < LPFC_DISC_AUTH) { 8744 rjt_err = LSRJT_UNABLE_TPC; 8745 rjt_exp = LSEXP_NOTHING_MORE; 8746 break; 8747 } 8748 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_LOGO); 8749 break; 8750 case ELS_CMD_PRLO: 8751 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8752 "RCV PRLO: did:x%x/ste:x%x flg:x%x", 8753 did, vport->port_state, ndlp->nlp_flag); 8754 8755 phba->fc_stat.elsRcvPRLO++; 8756 lpfc_send_els_event(vport, ndlp, payload); 8757 if (vport->port_state < LPFC_DISC_AUTH) { 8758 rjt_err = LSRJT_UNABLE_TPC; 8759 rjt_exp = LSEXP_NOTHING_MORE; 8760 break; 8761 } 8762 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLO); 8763 break; 8764 case ELS_CMD_LCB: 8765 phba->fc_stat.elsRcvLCB++; 8766 lpfc_els_rcv_lcb(vport, elsiocb, ndlp); 8767 break; 8768 case ELS_CMD_RDP: 8769 phba->fc_stat.elsRcvRDP++; 8770 lpfc_els_rcv_rdp(vport, elsiocb, ndlp); 8771 break; 8772 case ELS_CMD_RSCN: 8773 phba->fc_stat.elsRcvRSCN++; 8774 lpfc_els_rcv_rscn(vport, elsiocb, ndlp); 8775 if (newnode) 8776 lpfc_disc_state_machine(vport, ndlp, NULL, 8777 NLP_EVT_DEVICE_RM); 8778 break; 8779 case ELS_CMD_ADISC: 8780 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8781 "RCV ADISC: did:x%x/ste:x%x flg:x%x", 8782 did, vport->port_state, ndlp->nlp_flag); 8783 8784 lpfc_send_els_event(vport, ndlp, payload); 8785 phba->fc_stat.elsRcvADISC++; 8786 if (vport->port_state < LPFC_DISC_AUTH) { 8787 rjt_err = LSRJT_UNABLE_TPC; 8788 rjt_exp = LSEXP_NOTHING_MORE; 8789 break; 8790 } 8791 lpfc_disc_state_machine(vport, ndlp, elsiocb, 8792 NLP_EVT_RCV_ADISC); 8793 break; 8794 case ELS_CMD_PDISC: 8795 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8796 "RCV PDISC: did:x%x/ste:x%x flg:x%x", 8797 did, vport->port_state, ndlp->nlp_flag); 8798 8799 phba->fc_stat.elsRcvPDISC++; 8800 if (vport->port_state < LPFC_DISC_AUTH) { 8801 rjt_err = LSRJT_UNABLE_TPC; 8802 rjt_exp = LSEXP_NOTHING_MORE; 8803 break; 8804 } 8805 lpfc_disc_state_machine(vport, ndlp, elsiocb, 8806 NLP_EVT_RCV_PDISC); 8807 break; 8808 case ELS_CMD_FARPR: 8809 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8810 "RCV FARPR: did:x%x/ste:x%x flg:x%x", 8811 did, vport->port_state, ndlp->nlp_flag); 8812 8813 phba->fc_stat.elsRcvFARPR++; 8814 lpfc_els_rcv_farpr(vport, elsiocb, ndlp); 8815 break; 8816 case ELS_CMD_FARP: 8817 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8818 "RCV FARP: did:x%x/ste:x%x flg:x%x", 8819 did, vport->port_state, ndlp->nlp_flag); 8820 8821 phba->fc_stat.elsRcvFARP++; 8822 lpfc_els_rcv_farp(vport, elsiocb, ndlp); 8823 break; 8824 case ELS_CMD_FAN: 8825 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8826 "RCV FAN: did:x%x/ste:x%x flg:x%x", 8827 did, vport->port_state, ndlp->nlp_flag); 8828 8829 phba->fc_stat.elsRcvFAN++; 8830 lpfc_els_rcv_fan(vport, elsiocb, ndlp); 8831 break; 8832 case ELS_CMD_PRLI: 8833 case ELS_CMD_NVMEPRLI: 8834 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8835 "RCV PRLI: did:x%x/ste:x%x flg:x%x", 8836 did, vport->port_state, ndlp->nlp_flag); 8837 8838 phba->fc_stat.elsRcvPRLI++; 8839 if ((vport->port_state < LPFC_DISC_AUTH) && 8840 (vport->fc_flag & FC_FABRIC)) { 8841 rjt_err = LSRJT_UNABLE_TPC; 8842 rjt_exp = LSEXP_NOTHING_MORE; 8843 break; 8844 } 8845 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLI); 8846 break; 8847 case ELS_CMD_LIRR: 8848 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8849 "RCV LIRR: did:x%x/ste:x%x flg:x%x", 8850 did, vport->port_state, ndlp->nlp_flag); 8851 8852 phba->fc_stat.elsRcvLIRR++; 8853 lpfc_els_rcv_lirr(vport, elsiocb, ndlp); 8854 if (newnode) 8855 lpfc_disc_state_machine(vport, ndlp, NULL, 8856 NLP_EVT_DEVICE_RM); 8857 break; 8858 case ELS_CMD_RLS: 8859 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8860 "RCV RLS: did:x%x/ste:x%x flg:x%x", 8861 did, vport->port_state, ndlp->nlp_flag); 8862 8863 phba->fc_stat.elsRcvRLS++; 8864 lpfc_els_rcv_rls(vport, elsiocb, ndlp); 8865 if (newnode) 8866 lpfc_disc_state_machine(vport, ndlp, NULL, 8867 NLP_EVT_DEVICE_RM); 8868 break; 8869 case ELS_CMD_RPL: 8870 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8871 "RCV RPL: did:x%x/ste:x%x flg:x%x", 8872 did, vport->port_state, ndlp->nlp_flag); 8873 8874 phba->fc_stat.elsRcvRPL++; 8875 lpfc_els_rcv_rpl(vport, elsiocb, ndlp); 8876 if (newnode) 8877 lpfc_disc_state_machine(vport, ndlp, NULL, 8878 NLP_EVT_DEVICE_RM); 8879 break; 8880 case ELS_CMD_RNID: 8881 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8882 "RCV RNID: did:x%x/ste:x%x flg:x%x", 8883 did, vport->port_state, ndlp->nlp_flag); 8884 8885 phba->fc_stat.elsRcvRNID++; 8886 lpfc_els_rcv_rnid(vport, elsiocb, ndlp); 8887 if (newnode) 8888 lpfc_disc_state_machine(vport, ndlp, NULL, 8889 NLP_EVT_DEVICE_RM); 8890 break; 8891 case ELS_CMD_RTV: 8892 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8893 "RCV RTV: did:x%x/ste:x%x flg:x%x", 8894 did, vport->port_state, ndlp->nlp_flag); 8895 phba->fc_stat.elsRcvRTV++; 8896 lpfc_els_rcv_rtv(vport, elsiocb, ndlp); 8897 if (newnode) 8898 lpfc_disc_state_machine(vport, ndlp, NULL, 8899 NLP_EVT_DEVICE_RM); 8900 break; 8901 case ELS_CMD_RRQ: 8902 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8903 "RCV RRQ: did:x%x/ste:x%x flg:x%x", 8904 did, vport->port_state, ndlp->nlp_flag); 8905 8906 phba->fc_stat.elsRcvRRQ++; 8907 lpfc_els_rcv_rrq(vport, elsiocb, ndlp); 8908 if (newnode) 8909 lpfc_disc_state_machine(vport, ndlp, NULL, 8910 NLP_EVT_DEVICE_RM); 8911 break; 8912 case ELS_CMD_ECHO: 8913 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8914 "RCV ECHO: did:x%x/ste:x%x flg:x%x", 8915 did, vport->port_state, ndlp->nlp_flag); 8916 8917 phba->fc_stat.elsRcvECHO++; 8918 lpfc_els_rcv_echo(vport, elsiocb, ndlp); 8919 if (newnode) 8920 lpfc_disc_state_machine(vport, ndlp, NULL, 8921 NLP_EVT_DEVICE_RM); 8922 break; 8923 case ELS_CMD_REC: 8924 /* receive this due to exchange closed */ 8925 rjt_err = LSRJT_UNABLE_TPC; 8926 rjt_exp = LSEXP_INVALID_OX_RX; 8927 break; 8928 case ELS_CMD_FPIN: 8929 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8930 "RCV FPIN: did:x%x/ste:x%x flg:x%x", 8931 did, vport->port_state, ndlp->nlp_flag); 8932 8933 lpfc_els_rcv_fpin(vport, (struct fc_els_fpin *)payload, 8934 payload_len); 8935 8936 /* There are no replies, so no rjt codes */ 8937 break; 8938 default: 8939 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8940 "RCV ELS cmd: cmd:x%x did:x%x/ste:x%x", 8941 cmd, did, vport->port_state); 8942 8943 /* Unsupported ELS command, reject */ 8944 rjt_err = LSRJT_CMD_UNSUPPORTED; 8945 rjt_exp = LSEXP_NOTHING_MORE; 8946 8947 /* Unknown ELS command <elsCmd> received from NPORT <did> */ 8948 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 8949 "0115 Unknown ELS command x%x " 8950 "received from NPORT x%x\n", cmd, did); 8951 if (newnode) 8952 lpfc_disc_state_machine(vport, ndlp, NULL, 8953 NLP_EVT_DEVICE_RM); 8954 break; 8955 } 8956 8957 lsrjt: 8958 /* check if need to LS_RJT received ELS cmd */ 8959 if (rjt_err) { 8960 memset(&stat, 0, sizeof(stat)); 8961 stat.un.b.lsRjtRsnCode = rjt_err; 8962 stat.un.b.lsRjtRsnCodeExp = rjt_exp; 8963 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, elsiocb, ndlp, 8964 NULL); 8965 /* Remove the reference from above for new nodes. */ 8966 if (newnode) 8967 lpfc_disc_state_machine(vport, ndlp, NULL, 8968 NLP_EVT_DEVICE_RM); 8969 } 8970 8971 /* Release the reference on this elsiocb, not the ndlp. */ 8972 lpfc_nlp_put(elsiocb->context1); 8973 elsiocb->context1 = NULL; 8974 8975 /* Special case. Driver received an unsolicited command that 8976 * unsupportable given the driver's current state. Reset the 8977 * link and start over. 8978 */ 8979 if (init_link) { 8980 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 8981 if (!mbox) 8982 return; 8983 lpfc_linkdown(phba); 8984 lpfc_init_link(phba, mbox, 8985 phba->cfg_topology, 8986 phba->cfg_link_speed); 8987 mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0; 8988 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 8989 mbox->vport = vport; 8990 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) == 8991 MBX_NOT_FINISHED) 8992 mempool_free(mbox, phba->mbox_mem_pool); 8993 } 8994 8995 return; 8996 8997 dropit: 8998 if (vport && !(vport->load_flag & FC_UNLOADING)) 8999 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9000 "0111 Dropping received ELS cmd " 9001 "Data: x%x x%x x%x\n", 9002 icmd->ulpStatus, icmd->un.ulpWord[4], icmd->ulpTimeout); 9003 phba->fc_stat.elsRcvDrop++; 9004 } 9005 9006 /** 9007 * lpfc_els_unsol_event - Process an unsolicited event from an els sli ring 9008 * @phba: pointer to lpfc hba data structure. 9009 * @pring: pointer to a SLI ring. 9010 * @elsiocb: pointer to lpfc els iocb data structure. 9011 * 9012 * This routine is used to process an unsolicited event received from a SLI 9013 * (Service Level Interface) ring. The actual processing of the data buffer 9014 * associated with the unsolicited event is done by invoking the routine 9015 * lpfc_els_unsol_buffer() after properly set up the iocb buffer from the 9016 * SLI ring on which the unsolicited event was received. 9017 **/ 9018 void 9019 lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 9020 struct lpfc_iocbq *elsiocb) 9021 { 9022 struct lpfc_vport *vport = phba->pport; 9023 IOCB_t *icmd = &elsiocb->iocb; 9024 dma_addr_t paddr; 9025 struct lpfc_dmabuf *bdeBuf1 = elsiocb->context2; 9026 struct lpfc_dmabuf *bdeBuf2 = elsiocb->context3; 9027 9028 elsiocb->context1 = NULL; 9029 elsiocb->context2 = NULL; 9030 elsiocb->context3 = NULL; 9031 9032 if (icmd->ulpStatus == IOSTAT_NEED_BUFFER) { 9033 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); 9034 } else if (icmd->ulpStatus == IOSTAT_LOCAL_REJECT && 9035 (icmd->un.ulpWord[4] & IOERR_PARAM_MASK) == 9036 IOERR_RCV_BUFFER_WAITING) { 9037 phba->fc_stat.NoRcvBuf++; 9038 /* Not enough posted buffers; Try posting more buffers */ 9039 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) 9040 lpfc_post_buffer(phba, pring, 0); 9041 return; 9042 } 9043 9044 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 9045 (icmd->ulpCommand == CMD_IOCB_RCV_ELS64_CX || 9046 icmd->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) { 9047 if (icmd->unsli3.rcvsli3.vpi == 0xffff) 9048 vport = phba->pport; 9049 else 9050 vport = lpfc_find_vport_by_vpid(phba, 9051 icmd->unsli3.rcvsli3.vpi); 9052 } 9053 9054 /* If there are no BDEs associated 9055 * with this IOCB, there is nothing to do. 9056 */ 9057 if (icmd->ulpBdeCount == 0) 9058 return; 9059 9060 /* type of ELS cmd is first 32bit word 9061 * in packet 9062 */ 9063 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 9064 elsiocb->context2 = bdeBuf1; 9065 } else { 9066 paddr = getPaddr(icmd->un.cont64[0].addrHigh, 9067 icmd->un.cont64[0].addrLow); 9068 elsiocb->context2 = lpfc_sli_ringpostbuf_get(phba, pring, 9069 paddr); 9070 } 9071 9072 lpfc_els_unsol_buffer(phba, pring, vport, elsiocb); 9073 /* 9074 * The different unsolicited event handlers would tell us 9075 * if they are done with "mp" by setting context2 to NULL. 9076 */ 9077 if (elsiocb->context2) { 9078 lpfc_in_buf_free(phba, (struct lpfc_dmabuf *)elsiocb->context2); 9079 elsiocb->context2 = NULL; 9080 } 9081 9082 /* RCV_ELS64_CX provide for 2 BDEs - process 2nd if included */ 9083 if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) && 9084 icmd->ulpBdeCount == 2) { 9085 elsiocb->context2 = bdeBuf2; 9086 lpfc_els_unsol_buffer(phba, pring, vport, elsiocb); 9087 /* free mp if we are done with it */ 9088 if (elsiocb->context2) { 9089 lpfc_in_buf_free(phba, elsiocb->context2); 9090 elsiocb->context2 = NULL; 9091 } 9092 } 9093 } 9094 9095 static void 9096 lpfc_start_fdmi(struct lpfc_vport *vport) 9097 { 9098 struct lpfc_nodelist *ndlp; 9099 9100 /* If this is the first time, allocate an ndlp and initialize 9101 * it. Otherwise, make sure the node is enabled and then do the 9102 * login. 9103 */ 9104 ndlp = lpfc_findnode_did(vport, FDMI_DID); 9105 if (!ndlp) { 9106 ndlp = lpfc_nlp_init(vport, FDMI_DID); 9107 if (ndlp) { 9108 ndlp->nlp_type |= NLP_FABRIC; 9109 } else { 9110 return; 9111 } 9112 } 9113 9114 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 9115 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 9116 } 9117 9118 /** 9119 * lpfc_do_scr_ns_plogi - Issue a plogi to the name server for scr 9120 * @phba: pointer to lpfc hba data structure. 9121 * @vport: pointer to a virtual N_Port data structure. 9122 * 9123 * This routine issues a Port Login (PLOGI) to the Name Server with 9124 * State Change Request (SCR) for a @vport. This routine will create an 9125 * ndlp for the Name Server associated to the @vport if such node does 9126 * not already exist. The PLOGI to Name Server is issued by invoking the 9127 * lpfc_issue_els_plogi() routine. If Fabric-Device Management Interface 9128 * (FDMI) is configured to the @vport, a FDMI node will be created and 9129 * the PLOGI to FDMI is issued by invoking lpfc_issue_els_plogi() routine. 9130 **/ 9131 void 9132 lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport) 9133 { 9134 struct lpfc_nodelist *ndlp; 9135 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 9136 9137 /* 9138 * If lpfc_delay_discovery parameter is set and the clean address 9139 * bit is cleared and fc fabric parameters chenged, delay FC NPort 9140 * discovery. 9141 */ 9142 spin_lock_irq(shost->host_lock); 9143 if (vport->fc_flag & FC_DISC_DELAYED) { 9144 spin_unlock_irq(shost->host_lock); 9145 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9146 "3334 Delay fc port discovery for %d secs\n", 9147 phba->fc_ratov); 9148 mod_timer(&vport->delayed_disc_tmo, 9149 jiffies + msecs_to_jiffies(1000 * phba->fc_ratov)); 9150 return; 9151 } 9152 spin_unlock_irq(shost->host_lock); 9153 9154 ndlp = lpfc_findnode_did(vport, NameServer_DID); 9155 if (!ndlp) { 9156 ndlp = lpfc_nlp_init(vport, NameServer_DID); 9157 if (!ndlp) { 9158 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 9159 lpfc_disc_start(vport); 9160 return; 9161 } 9162 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 9163 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9164 "0251 NameServer login: no memory\n"); 9165 return; 9166 } 9167 } 9168 9169 ndlp->nlp_type |= NLP_FABRIC; 9170 9171 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 9172 9173 if (lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0)) { 9174 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 9175 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9176 "0252 Cannot issue NameServer login\n"); 9177 return; 9178 } 9179 9180 if ((phba->cfg_enable_SmartSAN || 9181 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) && 9182 (vport->load_flag & FC_ALLOW_FDMI)) 9183 lpfc_start_fdmi(vport); 9184 } 9185 9186 /** 9187 * lpfc_cmpl_reg_new_vport - Completion callback function to register new vport 9188 * @phba: pointer to lpfc hba data structure. 9189 * @pmb: pointer to the driver internal queue element for mailbox command. 9190 * 9191 * This routine is the completion callback function to register new vport 9192 * mailbox command. If the new vport mailbox command completes successfully, 9193 * the fabric registration login shall be performed on physical port (the 9194 * new vport created is actually a physical port, with VPI 0) or the port 9195 * login to Name Server for State Change Request (SCR) will be performed 9196 * on virtual port (real virtual port, with VPI greater than 0). 9197 **/ 9198 static void 9199 lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 9200 { 9201 struct lpfc_vport *vport = pmb->vport; 9202 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 9203 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; 9204 MAILBOX_t *mb = &pmb->u.mb; 9205 int rc; 9206 9207 spin_lock_irq(shost->host_lock); 9208 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 9209 spin_unlock_irq(shost->host_lock); 9210 9211 if (mb->mbxStatus) { 9212 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9213 "0915 Register VPI failed : Status: x%x" 9214 " upd bit: x%x \n", mb->mbxStatus, 9215 mb->un.varRegVpi.upd); 9216 if (phba->sli_rev == LPFC_SLI_REV4 && 9217 mb->un.varRegVpi.upd) 9218 goto mbox_err_exit ; 9219 9220 switch (mb->mbxStatus) { 9221 case 0x11: /* unsupported feature */ 9222 case 0x9603: /* max_vpi exceeded */ 9223 case 0x9602: /* Link event since CLEAR_LA */ 9224 /* giving up on vport registration */ 9225 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 9226 spin_lock_irq(shost->host_lock); 9227 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 9228 spin_unlock_irq(shost->host_lock); 9229 lpfc_can_disctmo(vport); 9230 break; 9231 /* If reg_vpi fail with invalid VPI status, re-init VPI */ 9232 case 0x20: 9233 spin_lock_irq(shost->host_lock); 9234 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 9235 spin_unlock_irq(shost->host_lock); 9236 lpfc_init_vpi(phba, pmb, vport->vpi); 9237 pmb->vport = vport; 9238 pmb->mbox_cmpl = lpfc_init_vpi_cmpl; 9239 rc = lpfc_sli_issue_mbox(phba, pmb, 9240 MBX_NOWAIT); 9241 if (rc == MBX_NOT_FINISHED) { 9242 lpfc_printf_vlog(vport, KERN_ERR, 9243 LOG_TRACE_EVENT, 9244 "2732 Failed to issue INIT_VPI" 9245 " mailbox command\n"); 9246 } else { 9247 lpfc_nlp_put(ndlp); 9248 return; 9249 } 9250 fallthrough; 9251 default: 9252 /* Try to recover from this error */ 9253 if (phba->sli_rev == LPFC_SLI_REV4) 9254 lpfc_sli4_unreg_all_rpis(vport); 9255 lpfc_mbx_unreg_vpi(vport); 9256 spin_lock_irq(shost->host_lock); 9257 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 9258 spin_unlock_irq(shost->host_lock); 9259 if (mb->mbxStatus == MBX_NOT_FINISHED) 9260 break; 9261 if ((vport->port_type == LPFC_PHYSICAL_PORT) && 9262 !(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG)) { 9263 if (phba->sli_rev == LPFC_SLI_REV4) 9264 lpfc_issue_init_vfi(vport); 9265 else 9266 lpfc_initial_flogi(vport); 9267 } else { 9268 lpfc_initial_fdisc(vport); 9269 } 9270 break; 9271 } 9272 } else { 9273 spin_lock_irq(shost->host_lock); 9274 vport->vpi_state |= LPFC_VPI_REGISTERED; 9275 spin_unlock_irq(shost->host_lock); 9276 if (vport == phba->pport) { 9277 if (phba->sli_rev < LPFC_SLI_REV4) 9278 lpfc_issue_fabric_reglogin(vport); 9279 else { 9280 /* 9281 * If the physical port is instantiated using 9282 * FDISC, do not start vport discovery. 9283 */ 9284 if (vport->port_state != LPFC_FDISC) 9285 lpfc_start_fdiscs(phba); 9286 lpfc_do_scr_ns_plogi(phba, vport); 9287 } 9288 } else { 9289 lpfc_do_scr_ns_plogi(phba, vport); 9290 } 9291 } 9292 mbox_err_exit: 9293 /* Now, we decrement the ndlp reference count held for this 9294 * callback function 9295 */ 9296 lpfc_nlp_put(ndlp); 9297 9298 mempool_free(pmb, phba->mbox_mem_pool); 9299 return; 9300 } 9301 9302 /** 9303 * lpfc_register_new_vport - Register a new vport with a HBA 9304 * @phba: pointer to lpfc hba data structure. 9305 * @vport: pointer to a host virtual N_Port data structure. 9306 * @ndlp: pointer to a node-list data structure. 9307 * 9308 * This routine registers the @vport as a new virtual port with a HBA. 9309 * It is done through a registering vpi mailbox command. 9310 **/ 9311 void 9312 lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport, 9313 struct lpfc_nodelist *ndlp) 9314 { 9315 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 9316 LPFC_MBOXQ_t *mbox; 9317 9318 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 9319 if (mbox) { 9320 lpfc_reg_vpi(vport, mbox); 9321 mbox->vport = vport; 9322 mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 9323 if (!mbox->ctx_ndlp) { 9324 mempool_free(mbox, phba->mbox_mem_pool); 9325 goto mbox_err_exit; 9326 } 9327 9328 mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport; 9329 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 9330 == MBX_NOT_FINISHED) { 9331 /* mailbox command not success, decrement ndlp 9332 * reference count for this command 9333 */ 9334 lpfc_nlp_put(ndlp); 9335 mempool_free(mbox, phba->mbox_mem_pool); 9336 9337 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9338 "0253 Register VPI: Can't send mbox\n"); 9339 goto mbox_err_exit; 9340 } 9341 } else { 9342 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9343 "0254 Register VPI: no memory\n"); 9344 goto mbox_err_exit; 9345 } 9346 return; 9347 9348 mbox_err_exit: 9349 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 9350 spin_lock_irq(shost->host_lock); 9351 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 9352 spin_unlock_irq(shost->host_lock); 9353 return; 9354 } 9355 9356 /** 9357 * lpfc_cancel_all_vport_retry_delay_timer - Cancel all vport retry delay timer 9358 * @phba: pointer to lpfc hba data structure. 9359 * 9360 * This routine cancels the retry delay timers to all the vports. 9361 **/ 9362 void 9363 lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *phba) 9364 { 9365 struct lpfc_vport **vports; 9366 struct lpfc_nodelist *ndlp; 9367 uint32_t link_state; 9368 int i; 9369 9370 /* Treat this failure as linkdown for all vports */ 9371 link_state = phba->link_state; 9372 lpfc_linkdown(phba); 9373 phba->link_state = link_state; 9374 9375 vports = lpfc_create_vport_work_array(phba); 9376 9377 if (vports) { 9378 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 9379 ndlp = lpfc_findnode_did(vports[i], Fabric_DID); 9380 if (ndlp) 9381 lpfc_cancel_retry_delay_tmo(vports[i], ndlp); 9382 lpfc_els_flush_cmd(vports[i]); 9383 } 9384 lpfc_destroy_vport_work_array(phba, vports); 9385 } 9386 } 9387 9388 /** 9389 * lpfc_retry_pport_discovery - Start timer to retry FLOGI. 9390 * @phba: pointer to lpfc hba data structure. 9391 * 9392 * This routine abort all pending discovery commands and 9393 * start a timer to retry FLOGI for the physical port 9394 * discovery. 9395 **/ 9396 void 9397 lpfc_retry_pport_discovery(struct lpfc_hba *phba) 9398 { 9399 struct lpfc_nodelist *ndlp; 9400 9401 /* Cancel the all vports retry delay retry timers */ 9402 lpfc_cancel_all_vport_retry_delay_timer(phba); 9403 9404 /* If fabric require FLOGI, then re-instantiate physical login */ 9405 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); 9406 if (!ndlp) 9407 return; 9408 9409 mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000)); 9410 spin_lock_irq(&ndlp->lock); 9411 ndlp->nlp_flag |= NLP_DELAY_TMO; 9412 spin_unlock_irq(&ndlp->lock); 9413 ndlp->nlp_last_elscmd = ELS_CMD_FLOGI; 9414 phba->pport->port_state = LPFC_FLOGI; 9415 return; 9416 } 9417 9418 /** 9419 * lpfc_fabric_login_reqd - Check if FLOGI required. 9420 * @phba: pointer to lpfc hba data structure. 9421 * @cmdiocb: pointer to FDISC command iocb. 9422 * @rspiocb: pointer to FDISC response iocb. 9423 * 9424 * This routine checks if a FLOGI is reguired for FDISC 9425 * to succeed. 9426 **/ 9427 static int 9428 lpfc_fabric_login_reqd(struct lpfc_hba *phba, 9429 struct lpfc_iocbq *cmdiocb, 9430 struct lpfc_iocbq *rspiocb) 9431 { 9432 9433 if ((rspiocb->iocb.ulpStatus != IOSTAT_FABRIC_RJT) || 9434 (rspiocb->iocb.un.ulpWord[4] != RJT_LOGIN_REQUIRED)) 9435 return 0; 9436 else 9437 return 1; 9438 } 9439 9440 /** 9441 * lpfc_cmpl_els_fdisc - Completion function for fdisc iocb command 9442 * @phba: pointer to lpfc hba data structure. 9443 * @cmdiocb: pointer to lpfc command iocb data structure. 9444 * @rspiocb: pointer to lpfc response iocb data structure. 9445 * 9446 * This routine is the completion callback function to a Fabric Discover 9447 * (FDISC) ELS command. Since all the FDISC ELS commands are issued 9448 * single threaded, each FDISC completion callback function will reset 9449 * the discovery timer for all vports such that the timers will not get 9450 * unnecessary timeout. The function checks the FDISC IOCB status. If error 9451 * detected, the vport will be set to FC_VPORT_FAILED state. Otherwise,the 9452 * vport will set to FC_VPORT_ACTIVE state. It then checks whether the DID 9453 * assigned to the vport has been changed with the completion of the FDISC 9454 * command. If so, both RPI (Remote Port Index) and VPI (Virtual Port Index) 9455 * are unregistered from the HBA, and then the lpfc_register_new_vport() 9456 * routine is invoked to register new vport with the HBA. Otherwise, the 9457 * lpfc_do_scr_ns_plogi() routine is invoked to issue a PLOGI to the Name 9458 * Server for State Change Request (SCR). 9459 **/ 9460 static void 9461 lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 9462 struct lpfc_iocbq *rspiocb) 9463 { 9464 struct lpfc_vport *vport = cmdiocb->vport; 9465 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 9466 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 9467 struct lpfc_nodelist *np; 9468 struct lpfc_nodelist *next_np; 9469 IOCB_t *irsp = &rspiocb->iocb; 9470 struct lpfc_iocbq *piocb; 9471 struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp; 9472 struct serv_parm *sp; 9473 uint8_t fabric_param_changed; 9474 9475 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 9476 "0123 FDISC completes. x%x/x%x prevDID: x%x\n", 9477 irsp->ulpStatus, irsp->un.ulpWord[4], 9478 vport->fc_prevDID); 9479 /* Since all FDISCs are being single threaded, we 9480 * must reset the discovery timer for ALL vports 9481 * waiting to send FDISC when one completes. 9482 */ 9483 list_for_each_entry(piocb, &phba->fabric_iocb_list, list) { 9484 lpfc_set_disctmo(piocb->vport); 9485 } 9486 9487 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 9488 "FDISC cmpl: status:x%x/x%x prevdid:x%x", 9489 irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_prevDID); 9490 9491 if (irsp->ulpStatus) { 9492 9493 if (lpfc_fabric_login_reqd(phba, cmdiocb, rspiocb)) { 9494 lpfc_retry_pport_discovery(phba); 9495 goto out; 9496 } 9497 9498 /* Check for retry */ 9499 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) 9500 goto out; 9501 /* FDISC failed */ 9502 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9503 "0126 FDISC failed. (x%x/x%x)\n", 9504 irsp->ulpStatus, irsp->un.ulpWord[4]); 9505 goto fdisc_failed; 9506 } 9507 spin_lock_irq(shost->host_lock); 9508 vport->fc_flag &= ~FC_VPORT_CVL_RCVD; 9509 vport->fc_flag &= ~FC_VPORT_LOGO_RCVD; 9510 vport->fc_flag |= FC_FABRIC; 9511 if (vport->phba->fc_topology == LPFC_TOPOLOGY_LOOP) 9512 vport->fc_flag |= FC_PUBLIC_LOOP; 9513 spin_unlock_irq(shost->host_lock); 9514 9515 vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID; 9516 lpfc_vport_set_state(vport, FC_VPORT_ACTIVE); 9517 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 9518 if (!prsp) 9519 goto out; 9520 sp = prsp->virt + sizeof(uint32_t); 9521 fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp); 9522 memcpy(&vport->fabric_portname, &sp->portName, 9523 sizeof(struct lpfc_name)); 9524 memcpy(&vport->fabric_nodename, &sp->nodeName, 9525 sizeof(struct lpfc_name)); 9526 if (fabric_param_changed && 9527 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 9528 /* If our NportID changed, we need to ensure all 9529 * remaining NPORTs get unreg_login'ed so we can 9530 * issue unreg_vpi. 9531 */ 9532 list_for_each_entry_safe(np, next_np, 9533 &vport->fc_nodes, nlp_listp) { 9534 if ((np->nlp_state != NLP_STE_NPR_NODE) || 9535 !(np->nlp_flag & NLP_NPR_ADISC)) 9536 continue; 9537 spin_lock_irq(&ndlp->lock); 9538 np->nlp_flag &= ~NLP_NPR_ADISC; 9539 spin_unlock_irq(&ndlp->lock); 9540 lpfc_unreg_rpi(vport, np); 9541 } 9542 lpfc_cleanup_pending_mbox(vport); 9543 9544 if (phba->sli_rev == LPFC_SLI_REV4) 9545 lpfc_sli4_unreg_all_rpis(vport); 9546 9547 lpfc_mbx_unreg_vpi(vport); 9548 spin_lock_irq(shost->host_lock); 9549 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 9550 if (phba->sli_rev == LPFC_SLI_REV4) 9551 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 9552 else 9553 vport->fc_flag |= FC_LOGO_RCVD_DID_CHNG; 9554 spin_unlock_irq(shost->host_lock); 9555 } else if ((phba->sli_rev == LPFC_SLI_REV4) && 9556 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 9557 /* 9558 * Driver needs to re-reg VPI in order for f/w 9559 * to update the MAC address. 9560 */ 9561 lpfc_register_new_vport(phba, vport, ndlp); 9562 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 9563 goto out; 9564 } 9565 9566 if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI) 9567 lpfc_issue_init_vpi(vport); 9568 else if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) 9569 lpfc_register_new_vport(phba, vport, ndlp); 9570 else 9571 lpfc_do_scr_ns_plogi(phba, vport); 9572 9573 /* The FDISC completed successfully. Move the fabric ndlp to 9574 * UNMAPPED state and register with the transport. 9575 */ 9576 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 9577 goto out; 9578 9579 fdisc_failed: 9580 if (vport->fc_vport && 9581 (vport->fc_vport->vport_state != FC_VPORT_NO_FABRIC_RSCS)) 9582 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 9583 /* Cancel discovery timer */ 9584 lpfc_can_disctmo(vport); 9585 out: 9586 lpfc_els_free_iocb(phba, cmdiocb); 9587 lpfc_nlp_put(ndlp); 9588 } 9589 9590 /** 9591 * lpfc_issue_els_fdisc - Issue a fdisc iocb command 9592 * @vport: pointer to a virtual N_Port data structure. 9593 * @ndlp: pointer to a node-list data structure. 9594 * @retry: number of retries to the command IOCB. 9595 * 9596 * This routine prepares and issues a Fabric Discover (FDISC) IOCB to 9597 * a remote node (@ndlp) off a @vport. It uses the lpfc_issue_fabric_iocb() 9598 * routine to issue the IOCB, which makes sure only one outstanding fabric 9599 * IOCB will be sent off HBA at any given time. 9600 * 9601 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 9602 * will be incremented by 1 for holding the ndlp and the reference to ndlp 9603 * will be stored into the context1 field of the IOCB for the completion 9604 * callback function to the FDISC ELS command. 9605 * 9606 * Return code 9607 * 0 - Successfully issued fdisc iocb command 9608 * 1 - Failed to issue fdisc iocb command 9609 **/ 9610 static int 9611 lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 9612 uint8_t retry) 9613 { 9614 struct lpfc_hba *phba = vport->phba; 9615 IOCB_t *icmd; 9616 struct lpfc_iocbq *elsiocb; 9617 struct serv_parm *sp; 9618 uint8_t *pcmd; 9619 uint16_t cmdsize; 9620 int did = ndlp->nlp_DID; 9621 int rc; 9622 9623 vport->port_state = LPFC_FDISC; 9624 vport->fc_myDID = 0; 9625 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 9626 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did, 9627 ELS_CMD_FDISC); 9628 if (!elsiocb) { 9629 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 9630 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9631 "0255 Issue FDISC: no IOCB\n"); 9632 return 1; 9633 } 9634 9635 icmd = &elsiocb->iocb; 9636 icmd->un.elsreq64.myID = 0; 9637 icmd->un.elsreq64.fl = 1; 9638 9639 /* 9640 * SLI3 ports require a different context type value than SLI4. 9641 * Catch SLI3 ports here and override the prep. 9642 */ 9643 if (phba->sli_rev == LPFC_SLI_REV3) { 9644 icmd->ulpCt_h = 1; 9645 icmd->ulpCt_l = 0; 9646 } 9647 9648 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 9649 *((uint32_t *) (pcmd)) = ELS_CMD_FDISC; 9650 pcmd += sizeof(uint32_t); /* CSP Word 1 */ 9651 memcpy(pcmd, &vport->phba->pport->fc_sparam, sizeof(struct serv_parm)); 9652 sp = (struct serv_parm *) pcmd; 9653 /* Setup CSPs accordingly for Fabric */ 9654 sp->cmn.e_d_tov = 0; 9655 sp->cmn.w2.r_a_tov = 0; 9656 sp->cmn.virtual_fabric_support = 0; 9657 sp->cls1.classValid = 0; 9658 sp->cls2.seqDelivery = 1; 9659 sp->cls3.seqDelivery = 1; 9660 9661 pcmd += sizeof(uint32_t); /* CSP Word 2 */ 9662 pcmd += sizeof(uint32_t); /* CSP Word 3 */ 9663 pcmd += sizeof(uint32_t); /* CSP Word 4 */ 9664 pcmd += sizeof(uint32_t); /* Port Name */ 9665 memcpy(pcmd, &vport->fc_portname, 8); 9666 pcmd += sizeof(uint32_t); /* Node Name */ 9667 pcmd += sizeof(uint32_t); /* Node Name */ 9668 memcpy(pcmd, &vport->fc_nodename, 8); 9669 sp->cmn.valid_vendor_ver_level = 0; 9670 memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion)); 9671 lpfc_set_disctmo(vport); 9672 9673 phba->fc_stat.elsXmitFDISC++; 9674 elsiocb->iocb_cmpl = lpfc_cmpl_els_fdisc; 9675 9676 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 9677 "Issue FDISC: did:x%x", 9678 did, 0, 0); 9679 9680 elsiocb->context1 = lpfc_nlp_get(ndlp); 9681 if (!elsiocb->context1) 9682 goto err_out; 9683 9684 rc = lpfc_issue_fabric_iocb(phba, elsiocb); 9685 if (rc == IOCB_ERROR) { 9686 lpfc_nlp_put(ndlp); 9687 goto err_out; 9688 } 9689 9690 lpfc_vport_set_state(vport, FC_VPORT_INITIALIZING); 9691 return 0; 9692 9693 err_out: 9694 lpfc_els_free_iocb(phba, elsiocb); 9695 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 9696 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9697 "0256 Issue FDISC: Cannot send IOCB\n"); 9698 return 1; 9699 } 9700 9701 /** 9702 * lpfc_cmpl_els_npiv_logo - Completion function with vport logo 9703 * @phba: pointer to lpfc hba data structure. 9704 * @cmdiocb: pointer to lpfc command iocb data structure. 9705 * @rspiocb: pointer to lpfc response iocb data structure. 9706 * 9707 * This routine is the completion callback function to the issuing of a LOGO 9708 * ELS command off a vport. It frees the command IOCB and then decrement the 9709 * reference count held on ndlp for this completion function, indicating that 9710 * the reference to the ndlp is no long needed. Note that the 9711 * lpfc_els_free_iocb() routine decrements the ndlp reference held for this 9712 * callback function and an additional explicit ndlp reference decrementation 9713 * will trigger the actual release of the ndlp. 9714 **/ 9715 static void 9716 lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 9717 struct lpfc_iocbq *rspiocb) 9718 { 9719 struct lpfc_vport *vport = cmdiocb->vport; 9720 IOCB_t *irsp; 9721 struct lpfc_nodelist *ndlp; 9722 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 9723 9724 ndlp = (struct lpfc_nodelist *)cmdiocb->context1; 9725 irsp = &rspiocb->iocb; 9726 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 9727 "LOGO npiv cmpl: status:x%x/x%x did:x%x", 9728 irsp->ulpStatus, irsp->un.ulpWord[4], irsp->un.rcvels.remoteID); 9729 9730 /* NPIV LOGO completes to NPort <nlp_DID> */ 9731 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 9732 "2928 NPIV LOGO completes to NPort x%x " 9733 "Data: x%x x%x x%x x%x x%x x%x x%x\n", 9734 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4], 9735 irsp->ulpTimeout, vport->num_disc_nodes, 9736 kref_read(&ndlp->kref), ndlp->nlp_flag, 9737 ndlp->fc4_xpt_flags); 9738 9739 if (irsp->ulpStatus == IOSTAT_SUCCESS) { 9740 spin_lock_irq(shost->host_lock); 9741 vport->fc_flag &= ~FC_NDISC_ACTIVE; 9742 vport->fc_flag &= ~FC_FABRIC; 9743 spin_unlock_irq(shost->host_lock); 9744 lpfc_can_disctmo(vport); 9745 } 9746 9747 /* Safe to release resources now. */ 9748 lpfc_els_free_iocb(phba, cmdiocb); 9749 lpfc_nlp_put(ndlp); 9750 vport->unreg_vpi_cmpl = VPORT_ERROR; 9751 } 9752 9753 /** 9754 * lpfc_issue_els_npiv_logo - Issue a logo off a vport 9755 * @vport: pointer to a virtual N_Port data structure. 9756 * @ndlp: pointer to a node-list data structure. 9757 * 9758 * This routine issues a LOGO ELS command to an @ndlp off a @vport. 9759 * 9760 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 9761 * will be incremented by 1 for holding the ndlp and the reference to ndlp 9762 * will be stored into the context1 field of the IOCB for the completion 9763 * callback function to the LOGO ELS command. 9764 * 9765 * Return codes 9766 * 0 - Successfully issued logo off the @vport 9767 * 1 - Failed to issue logo off the @vport 9768 **/ 9769 int 9770 lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 9771 { 9772 int rc = 0; 9773 struct lpfc_hba *phba = vport->phba; 9774 struct lpfc_iocbq *elsiocb; 9775 uint8_t *pcmd; 9776 uint16_t cmdsize; 9777 9778 cmdsize = 2 * sizeof(uint32_t) + sizeof(struct lpfc_name); 9779 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, ndlp->nlp_DID, 9780 ELS_CMD_LOGO); 9781 if (!elsiocb) 9782 return 1; 9783 9784 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 9785 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO; 9786 pcmd += sizeof(uint32_t); 9787 9788 /* Fill in LOGO payload */ 9789 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID); 9790 pcmd += sizeof(uint32_t); 9791 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name)); 9792 9793 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 9794 "Issue LOGO npiv did:x%x flg:x%x", 9795 ndlp->nlp_DID, ndlp->nlp_flag, 0); 9796 9797 elsiocb->iocb_cmpl = lpfc_cmpl_els_npiv_logo; 9798 spin_lock_irq(&ndlp->lock); 9799 ndlp->nlp_flag |= NLP_LOGO_SND; 9800 spin_unlock_irq(&ndlp->lock); 9801 elsiocb->context1 = lpfc_nlp_get(ndlp); 9802 if (!elsiocb->context1) 9803 goto node_err; 9804 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 9805 if (rc == IOCB_ERROR) 9806 goto io_err; 9807 return 0; 9808 9809 io_err: 9810 lpfc_nlp_put(ndlp); 9811 node_err: 9812 spin_lock_irq(&ndlp->lock); 9813 ndlp->nlp_flag &= ~NLP_LOGO_SND; 9814 spin_unlock_irq(&ndlp->lock); 9815 lpfc_els_free_iocb(phba, elsiocb); 9816 return 1; 9817 } 9818 9819 /** 9820 * lpfc_fabric_block_timeout - Handler function to the fabric block timer 9821 * @t: timer context used to obtain the lpfc hba. 9822 * 9823 * This routine is invoked by the fabric iocb block timer after 9824 * timeout. It posts the fabric iocb block timeout event by setting the 9825 * WORKER_FABRIC_BLOCK_TMO bit to work port event bitmap and then invokes 9826 * lpfc_worker_wake_up() routine to wake up the worker thread. It is for 9827 * the worker thread to invoke the lpfc_unblock_fabric_iocbs() on the 9828 * posted event WORKER_FABRIC_BLOCK_TMO. 9829 **/ 9830 void 9831 lpfc_fabric_block_timeout(struct timer_list *t) 9832 { 9833 struct lpfc_hba *phba = from_timer(phba, t, fabric_block_timer); 9834 unsigned long iflags; 9835 uint32_t tmo_posted; 9836 9837 spin_lock_irqsave(&phba->pport->work_port_lock, iflags); 9838 tmo_posted = phba->pport->work_port_events & WORKER_FABRIC_BLOCK_TMO; 9839 if (!tmo_posted) 9840 phba->pport->work_port_events |= WORKER_FABRIC_BLOCK_TMO; 9841 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags); 9842 9843 if (!tmo_posted) 9844 lpfc_worker_wake_up(phba); 9845 return; 9846 } 9847 9848 /** 9849 * lpfc_resume_fabric_iocbs - Issue a fabric iocb from driver internal list 9850 * @phba: pointer to lpfc hba data structure. 9851 * 9852 * This routine issues one fabric iocb from the driver internal list to 9853 * the HBA. It first checks whether it's ready to issue one fabric iocb to 9854 * the HBA (whether there is no outstanding fabric iocb). If so, it shall 9855 * remove one pending fabric iocb from the driver internal list and invokes 9856 * lpfc_sli_issue_iocb() routine to send the fabric iocb to the HBA. 9857 **/ 9858 static void 9859 lpfc_resume_fabric_iocbs(struct lpfc_hba *phba) 9860 { 9861 struct lpfc_iocbq *iocb; 9862 unsigned long iflags; 9863 int ret; 9864 IOCB_t *cmd; 9865 9866 repeat: 9867 iocb = NULL; 9868 spin_lock_irqsave(&phba->hbalock, iflags); 9869 /* Post any pending iocb to the SLI layer */ 9870 if (atomic_read(&phba->fabric_iocb_count) == 0) { 9871 list_remove_head(&phba->fabric_iocb_list, iocb, typeof(*iocb), 9872 list); 9873 if (iocb) 9874 /* Increment fabric iocb count to hold the position */ 9875 atomic_inc(&phba->fabric_iocb_count); 9876 } 9877 spin_unlock_irqrestore(&phba->hbalock, iflags); 9878 if (iocb) { 9879 iocb->fabric_iocb_cmpl = iocb->iocb_cmpl; 9880 iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb; 9881 iocb->iocb_flag |= LPFC_IO_FABRIC; 9882 9883 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD, 9884 "Fabric sched1: ste:x%x", 9885 iocb->vport->port_state, 0, 0); 9886 9887 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0); 9888 9889 if (ret == IOCB_ERROR) { 9890 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl; 9891 iocb->fabric_iocb_cmpl = NULL; 9892 iocb->iocb_flag &= ~LPFC_IO_FABRIC; 9893 cmd = &iocb->iocb; 9894 cmd->ulpStatus = IOSTAT_LOCAL_REJECT; 9895 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED; 9896 iocb->iocb_cmpl(phba, iocb, iocb); 9897 9898 atomic_dec(&phba->fabric_iocb_count); 9899 goto repeat; 9900 } 9901 } 9902 } 9903 9904 /** 9905 * lpfc_unblock_fabric_iocbs - Unblock issuing fabric iocb command 9906 * @phba: pointer to lpfc hba data structure. 9907 * 9908 * This routine unblocks the issuing fabric iocb command. The function 9909 * will clear the fabric iocb block bit and then invoke the routine 9910 * lpfc_resume_fabric_iocbs() to issue one of the pending fabric iocb 9911 * from the driver internal fabric iocb list. 9912 **/ 9913 void 9914 lpfc_unblock_fabric_iocbs(struct lpfc_hba *phba) 9915 { 9916 clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 9917 9918 lpfc_resume_fabric_iocbs(phba); 9919 return; 9920 } 9921 9922 /** 9923 * lpfc_block_fabric_iocbs - Block issuing fabric iocb command 9924 * @phba: pointer to lpfc hba data structure. 9925 * 9926 * This routine blocks the issuing fabric iocb for a specified amount of 9927 * time (currently 100 ms). This is done by set the fabric iocb block bit 9928 * and set up a timeout timer for 100ms. When the block bit is set, no more 9929 * fabric iocb will be issued out of the HBA. 9930 **/ 9931 static void 9932 lpfc_block_fabric_iocbs(struct lpfc_hba *phba) 9933 { 9934 int blocked; 9935 9936 blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 9937 /* Start a timer to unblock fabric iocbs after 100ms */ 9938 if (!blocked) 9939 mod_timer(&phba->fabric_block_timer, 9940 jiffies + msecs_to_jiffies(100)); 9941 9942 return; 9943 } 9944 9945 /** 9946 * lpfc_cmpl_fabric_iocb - Completion callback function for fabric iocb 9947 * @phba: pointer to lpfc hba data structure. 9948 * @cmdiocb: pointer to lpfc command iocb data structure. 9949 * @rspiocb: pointer to lpfc response iocb data structure. 9950 * 9951 * This routine is the callback function that is put to the fabric iocb's 9952 * callback function pointer (iocb->iocb_cmpl). The original iocb's callback 9953 * function pointer has been stored in iocb->fabric_iocb_cmpl. This callback 9954 * function first restores and invokes the original iocb's callback function 9955 * and then invokes the lpfc_resume_fabric_iocbs() routine to issue the next 9956 * fabric bound iocb from the driver internal fabric iocb list onto the wire. 9957 **/ 9958 static void 9959 lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 9960 struct lpfc_iocbq *rspiocb) 9961 { 9962 struct ls_rjt stat; 9963 9964 BUG_ON((cmdiocb->iocb_flag & LPFC_IO_FABRIC) != LPFC_IO_FABRIC); 9965 9966 switch (rspiocb->iocb.ulpStatus) { 9967 case IOSTAT_NPORT_RJT: 9968 case IOSTAT_FABRIC_RJT: 9969 if (rspiocb->iocb.un.ulpWord[4] & RJT_UNAVAIL_TEMP) { 9970 lpfc_block_fabric_iocbs(phba); 9971 } 9972 break; 9973 9974 case IOSTAT_NPORT_BSY: 9975 case IOSTAT_FABRIC_BSY: 9976 lpfc_block_fabric_iocbs(phba); 9977 break; 9978 9979 case IOSTAT_LS_RJT: 9980 stat.un.lsRjtError = 9981 be32_to_cpu(rspiocb->iocb.un.ulpWord[4]); 9982 if ((stat.un.b.lsRjtRsnCode == LSRJT_UNABLE_TPC) || 9983 (stat.un.b.lsRjtRsnCode == LSRJT_LOGICAL_BSY)) 9984 lpfc_block_fabric_iocbs(phba); 9985 break; 9986 } 9987 9988 BUG_ON(atomic_read(&phba->fabric_iocb_count) == 0); 9989 9990 cmdiocb->iocb_cmpl = cmdiocb->fabric_iocb_cmpl; 9991 cmdiocb->fabric_iocb_cmpl = NULL; 9992 cmdiocb->iocb_flag &= ~LPFC_IO_FABRIC; 9993 cmdiocb->iocb_cmpl(phba, cmdiocb, rspiocb); 9994 9995 atomic_dec(&phba->fabric_iocb_count); 9996 if (!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags)) { 9997 /* Post any pending iocbs to HBA */ 9998 lpfc_resume_fabric_iocbs(phba); 9999 } 10000 } 10001 10002 /** 10003 * lpfc_issue_fabric_iocb - Issue a fabric iocb command 10004 * @phba: pointer to lpfc hba data structure. 10005 * @iocb: pointer to lpfc command iocb data structure. 10006 * 10007 * This routine is used as the top-level API for issuing a fabric iocb command 10008 * such as FLOGI and FDISC. To accommodate certain switch fabric, this driver 10009 * function makes sure that only one fabric bound iocb will be outstanding at 10010 * any given time. As such, this function will first check to see whether there 10011 * is already an outstanding fabric iocb on the wire. If so, it will put the 10012 * newly issued iocb onto the driver internal fabric iocb list, waiting to be 10013 * issued later. Otherwise, it will issue the iocb on the wire and update the 10014 * fabric iocb count it indicate that there is one fabric iocb on the wire. 10015 * 10016 * Note, this implementation has a potential sending out fabric IOCBs out of 10017 * order. The problem is caused by the construction of the "ready" boolen does 10018 * not include the condition that the internal fabric IOCB list is empty. As 10019 * such, it is possible a fabric IOCB issued by this routine might be "jump" 10020 * ahead of the fabric IOCBs in the internal list. 10021 * 10022 * Return code 10023 * IOCB_SUCCESS - either fabric iocb put on the list or issued successfully 10024 * IOCB_ERROR - failed to issue fabric iocb 10025 **/ 10026 static int 10027 lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb) 10028 { 10029 unsigned long iflags; 10030 int ready; 10031 int ret; 10032 10033 BUG_ON(atomic_read(&phba->fabric_iocb_count) > 1); 10034 10035 spin_lock_irqsave(&phba->hbalock, iflags); 10036 ready = atomic_read(&phba->fabric_iocb_count) == 0 && 10037 !test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 10038 10039 if (ready) 10040 /* Increment fabric iocb count to hold the position */ 10041 atomic_inc(&phba->fabric_iocb_count); 10042 spin_unlock_irqrestore(&phba->hbalock, iflags); 10043 if (ready) { 10044 iocb->fabric_iocb_cmpl = iocb->iocb_cmpl; 10045 iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb; 10046 iocb->iocb_flag |= LPFC_IO_FABRIC; 10047 10048 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD, 10049 "Fabric sched2: ste:x%x", 10050 iocb->vport->port_state, 0, 0); 10051 10052 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0); 10053 10054 if (ret == IOCB_ERROR) { 10055 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl; 10056 iocb->fabric_iocb_cmpl = NULL; 10057 iocb->iocb_flag &= ~LPFC_IO_FABRIC; 10058 atomic_dec(&phba->fabric_iocb_count); 10059 } 10060 } else { 10061 spin_lock_irqsave(&phba->hbalock, iflags); 10062 list_add_tail(&iocb->list, &phba->fabric_iocb_list); 10063 spin_unlock_irqrestore(&phba->hbalock, iflags); 10064 ret = IOCB_SUCCESS; 10065 } 10066 return ret; 10067 } 10068 10069 /** 10070 * lpfc_fabric_abort_vport - Abort a vport's iocbs from driver fabric iocb list 10071 * @vport: pointer to a virtual N_Port data structure. 10072 * 10073 * This routine aborts all the IOCBs associated with a @vport from the 10074 * driver internal fabric IOCB list. The list contains fabric IOCBs to be 10075 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB 10076 * list, removes each IOCB associated with the @vport off the list, set the 10077 * status feild to IOSTAT_LOCAL_REJECT, and invokes the callback function 10078 * associated with the IOCB. 10079 **/ 10080 static void lpfc_fabric_abort_vport(struct lpfc_vport *vport) 10081 { 10082 LIST_HEAD(completions); 10083 struct lpfc_hba *phba = vport->phba; 10084 struct lpfc_iocbq *tmp_iocb, *piocb; 10085 10086 spin_lock_irq(&phba->hbalock); 10087 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list, 10088 list) { 10089 10090 if (piocb->vport != vport) 10091 continue; 10092 10093 list_move_tail(&piocb->list, &completions); 10094 } 10095 spin_unlock_irq(&phba->hbalock); 10096 10097 /* Cancel all the IOCBs from the completions list */ 10098 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 10099 IOERR_SLI_ABORTED); 10100 } 10101 10102 /** 10103 * lpfc_fabric_abort_nport - Abort a ndlp's iocbs from driver fabric iocb list 10104 * @ndlp: pointer to a node-list data structure. 10105 * 10106 * This routine aborts all the IOCBs associated with an @ndlp from the 10107 * driver internal fabric IOCB list. The list contains fabric IOCBs to be 10108 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB 10109 * list, removes each IOCB associated with the @ndlp off the list, set the 10110 * status feild to IOSTAT_LOCAL_REJECT, and invokes the callback function 10111 * associated with the IOCB. 10112 **/ 10113 void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp) 10114 { 10115 LIST_HEAD(completions); 10116 struct lpfc_hba *phba = ndlp->phba; 10117 struct lpfc_iocbq *tmp_iocb, *piocb; 10118 struct lpfc_sli_ring *pring; 10119 10120 pring = lpfc_phba_elsring(phba); 10121 10122 if (unlikely(!pring)) 10123 return; 10124 10125 spin_lock_irq(&phba->hbalock); 10126 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list, 10127 list) { 10128 if ((lpfc_check_sli_ndlp(phba, pring, piocb, ndlp))) { 10129 10130 list_move_tail(&piocb->list, &completions); 10131 } 10132 } 10133 spin_unlock_irq(&phba->hbalock); 10134 10135 /* Cancel all the IOCBs from the completions list */ 10136 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 10137 IOERR_SLI_ABORTED); 10138 } 10139 10140 /** 10141 * lpfc_fabric_abort_hba - Abort all iocbs on driver fabric iocb list 10142 * @phba: pointer to lpfc hba data structure. 10143 * 10144 * This routine aborts all the IOCBs currently on the driver internal 10145 * fabric IOCB list. The list contains fabric IOCBs to be issued to the ELS 10146 * IOCB ring. This function takes the entire IOCB list off the fabric IOCB 10147 * list, removes IOCBs off the list, set the status feild to 10148 * IOSTAT_LOCAL_REJECT, and invokes the callback function associated with 10149 * the IOCB. 10150 **/ 10151 void lpfc_fabric_abort_hba(struct lpfc_hba *phba) 10152 { 10153 LIST_HEAD(completions); 10154 10155 spin_lock_irq(&phba->hbalock); 10156 list_splice_init(&phba->fabric_iocb_list, &completions); 10157 spin_unlock_irq(&phba->hbalock); 10158 10159 /* Cancel all the IOCBs from the completions list */ 10160 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 10161 IOERR_SLI_ABORTED); 10162 } 10163 10164 /** 10165 * lpfc_sli4_vport_delete_els_xri_aborted -Remove all ndlp references for vport 10166 * @vport: pointer to lpfc vport data structure. 10167 * 10168 * This routine is invoked by the vport cleanup for deletions and the cleanup 10169 * for an ndlp on removal. 10170 **/ 10171 void 10172 lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *vport) 10173 { 10174 struct lpfc_hba *phba = vport->phba; 10175 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 10176 unsigned long iflag = 0; 10177 10178 spin_lock_irqsave(&phba->hbalock, iflag); 10179 spin_lock(&phba->sli4_hba.sgl_list_lock); 10180 list_for_each_entry_safe(sglq_entry, sglq_next, 10181 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) { 10182 if (sglq_entry->ndlp && sglq_entry->ndlp->vport == vport) { 10183 lpfc_nlp_put(sglq_entry->ndlp); 10184 sglq_entry->ndlp = NULL; 10185 } 10186 } 10187 spin_unlock(&phba->sli4_hba.sgl_list_lock); 10188 spin_unlock_irqrestore(&phba->hbalock, iflag); 10189 return; 10190 } 10191 10192 /** 10193 * lpfc_sli4_els_xri_aborted - Slow-path process of els xri abort 10194 * @phba: pointer to lpfc hba data structure. 10195 * @axri: pointer to the els xri abort wcqe structure. 10196 * 10197 * This routine is invoked by the worker thread to process a SLI4 slow-path 10198 * ELS aborted xri. 10199 **/ 10200 void 10201 lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba, 10202 struct sli4_wcqe_xri_aborted *axri) 10203 { 10204 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); 10205 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri); 10206 uint16_t lxri = 0; 10207 10208 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 10209 unsigned long iflag = 0; 10210 struct lpfc_nodelist *ndlp; 10211 struct lpfc_sli_ring *pring; 10212 10213 pring = lpfc_phba_elsring(phba); 10214 10215 spin_lock_irqsave(&phba->hbalock, iflag); 10216 spin_lock(&phba->sli4_hba.sgl_list_lock); 10217 list_for_each_entry_safe(sglq_entry, sglq_next, 10218 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) { 10219 if (sglq_entry->sli4_xritag == xri) { 10220 list_del(&sglq_entry->list); 10221 ndlp = sglq_entry->ndlp; 10222 sglq_entry->ndlp = NULL; 10223 list_add_tail(&sglq_entry->list, 10224 &phba->sli4_hba.lpfc_els_sgl_list); 10225 sglq_entry->state = SGL_FREED; 10226 spin_unlock(&phba->sli4_hba.sgl_list_lock); 10227 spin_unlock_irqrestore(&phba->hbalock, iflag); 10228 10229 if (ndlp) { 10230 lpfc_set_rrq_active(phba, ndlp, 10231 sglq_entry->sli4_lxritag, 10232 rxid, 1); 10233 lpfc_nlp_put(ndlp); 10234 } 10235 10236 /* Check if TXQ queue needs to be serviced */ 10237 if (pring && !list_empty(&pring->txq)) 10238 lpfc_worker_wake_up(phba); 10239 return; 10240 } 10241 } 10242 spin_unlock(&phba->sli4_hba.sgl_list_lock); 10243 lxri = lpfc_sli4_xri_inrange(phba, xri); 10244 if (lxri == NO_XRI) { 10245 spin_unlock_irqrestore(&phba->hbalock, iflag); 10246 return; 10247 } 10248 spin_lock(&phba->sli4_hba.sgl_list_lock); 10249 sglq_entry = __lpfc_get_active_sglq(phba, lxri); 10250 if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) { 10251 spin_unlock(&phba->sli4_hba.sgl_list_lock); 10252 spin_unlock_irqrestore(&phba->hbalock, iflag); 10253 return; 10254 } 10255 sglq_entry->state = SGL_XRI_ABORTED; 10256 spin_unlock(&phba->sli4_hba.sgl_list_lock); 10257 spin_unlock_irqrestore(&phba->hbalock, iflag); 10258 return; 10259 } 10260 10261 /* lpfc_sli_abts_recover_port - Recover a port that failed a BLS_ABORT req. 10262 * @vport: pointer to virtual port object. 10263 * @ndlp: nodelist pointer for the impacted node. 10264 * 10265 * The driver calls this routine in response to an SLI4 XRI ABORT CQE 10266 * or an SLI3 ASYNC_STATUS_CN event from the port. For either event, 10267 * the driver is required to send a LOGO to the remote node before it 10268 * attempts to recover its login to the remote node. 10269 */ 10270 void 10271 lpfc_sli_abts_recover_port(struct lpfc_vport *vport, 10272 struct lpfc_nodelist *ndlp) 10273 { 10274 struct Scsi_Host *shost; 10275 struct lpfc_hba *phba; 10276 unsigned long flags = 0; 10277 10278 shost = lpfc_shost_from_vport(vport); 10279 phba = vport->phba; 10280 if (ndlp->nlp_state != NLP_STE_MAPPED_NODE) { 10281 lpfc_printf_log(phba, KERN_INFO, 10282 LOG_SLI, "3093 No rport recovery needed. " 10283 "rport in state 0x%x\n", ndlp->nlp_state); 10284 return; 10285 } 10286 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10287 "3094 Start rport recovery on shost id 0x%x " 10288 "fc_id 0x%06x vpi 0x%x rpi 0x%x state 0x%x " 10289 "flags 0x%x\n", 10290 shost->host_no, ndlp->nlp_DID, 10291 vport->vpi, ndlp->nlp_rpi, ndlp->nlp_state, 10292 ndlp->nlp_flag); 10293 /* 10294 * The rport is not responding. Remove the FCP-2 flag to prevent 10295 * an ADISC in the follow-up recovery code. 10296 */ 10297 spin_lock_irqsave(&ndlp->lock, flags); 10298 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; 10299 ndlp->nlp_flag |= NLP_ISSUE_LOGO; 10300 spin_unlock_irqrestore(&ndlp->lock, flags); 10301 lpfc_unreg_rpi(vport, ndlp); 10302 } 10303 10304