1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 * EMULEX and SLI are trademarks of Emulex. * 8 * www.broadcom.com * 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 10 * * 11 * This program is free software; you can redistribute it and/or * 12 * modify it under the terms of version 2 of the GNU General * 13 * Public License as published by the Free Software Foundation. * 14 * This program is distributed in the hope that it will be useful. * 15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 19 * TO BE LEGALLY INVALID. See the GNU General Public License for * 20 * more details, a copy of which can be found in the file COPYING * 21 * included with this package. * 22 *******************************************************************/ 23 /* See Fibre Channel protocol T11 FC-LS for details */ 24 #include <linux/blkdev.h> 25 #include <linux/pci.h> 26 #include <linux/slab.h> 27 #include <linux/interrupt.h> 28 29 #include <scsi/scsi.h> 30 #include <scsi/scsi_device.h> 31 #include <scsi/scsi_host.h> 32 #include <scsi/scsi_transport_fc.h> 33 34 #include "lpfc_hw4.h" 35 #include "lpfc_hw.h" 36 #include "lpfc_sli.h" 37 #include "lpfc_sli4.h" 38 #include "lpfc_nl.h" 39 #include "lpfc_disc.h" 40 #include "lpfc_scsi.h" 41 #include "lpfc.h" 42 #include "lpfc_logmsg.h" 43 #include "lpfc_crtn.h" 44 #include "lpfc_vport.h" 45 #include "lpfc_debugfs.h" 46 47 static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *, 48 struct lpfc_iocbq *); 49 static void lpfc_cmpl_fabric_iocb(struct lpfc_hba *, struct lpfc_iocbq *, 50 struct lpfc_iocbq *); 51 static void lpfc_fabric_abort_vport(struct lpfc_vport *vport); 52 static int lpfc_issue_els_fdisc(struct lpfc_vport *vport, 53 struct lpfc_nodelist *ndlp, uint8_t retry); 54 static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba, 55 struct lpfc_iocbq *iocb); 56 57 static int lpfc_max_els_tries = 3; 58 59 /** 60 * lpfc_els_chk_latt - Check host link attention event for a vport 61 * @vport: pointer to a host virtual N_Port data structure. 62 * 63 * This routine checks whether there is an outstanding host link 64 * attention event during the discovery process with the @vport. It is done 65 * by reading the HBA's Host Attention (HA) register. If there is any host 66 * link attention events during this @vport's discovery process, the @vport 67 * shall be marked as FC_ABORT_DISCOVERY, a host link attention clear shall 68 * be issued if the link state is not already in host link cleared state, 69 * and a return code shall indicate whether the host link attention event 70 * had happened. 71 * 72 * Note that, if either the host link is in state LPFC_LINK_DOWN or @vport 73 * state in LPFC_VPORT_READY, the request for checking host link attention 74 * event will be ignored and a return code shall indicate no host link 75 * attention event had happened. 76 * 77 * Return codes 78 * 0 - no host link attention event happened 79 * 1 - host link attention event happened 80 **/ 81 int 82 lpfc_els_chk_latt(struct lpfc_vport *vport) 83 { 84 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 85 struct lpfc_hba *phba = vport->phba; 86 uint32_t ha_copy; 87 88 if (vport->port_state >= LPFC_VPORT_READY || 89 phba->link_state == LPFC_LINK_DOWN || 90 phba->sli_rev > LPFC_SLI_REV3) 91 return 0; 92 93 /* Read the HBA Host Attention Register */ 94 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 95 return 1; 96 97 if (!(ha_copy & HA_LATT)) 98 return 0; 99 100 /* Pending Link Event during Discovery */ 101 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 102 "0237 Pending Link Event during " 103 "Discovery: State x%x\n", 104 phba->pport->port_state); 105 106 /* CLEAR_LA should re-enable link attention events and 107 * we should then immediately take a LATT event. The 108 * LATT processing should call lpfc_linkdown() which 109 * will cleanup any left over in-progress discovery 110 * events. 111 */ 112 spin_lock_irq(shost->host_lock); 113 vport->fc_flag |= FC_ABORT_DISCOVERY; 114 spin_unlock_irq(shost->host_lock); 115 116 if (phba->link_state != LPFC_CLEAR_LA) 117 lpfc_issue_clear_la(phba, vport); 118 119 return 1; 120 } 121 122 /** 123 * lpfc_prep_els_iocb - Allocate and prepare a lpfc iocb data structure 124 * @vport: pointer to a host virtual N_Port data structure. 125 * @expectRsp: flag indicating whether response is expected. 126 * @cmdSize: size of the ELS command. 127 * @retry: number of retries to the command IOCB when it fails. 128 * @ndlp: pointer to a node-list data structure. 129 * @did: destination identifier. 130 * @elscmd: the ELS command code. 131 * 132 * This routine is used for allocating a lpfc-IOCB data structure from 133 * the driver lpfc-IOCB free-list and prepare the IOCB with the parameters 134 * passed into the routine for discovery state machine to issue an Extended 135 * Link Service (ELS) commands. It is a generic lpfc-IOCB allocation 136 * and preparation routine that is used by all the discovery state machine 137 * routines and the ELS command-specific fields will be later set up by 138 * the individual discovery machine routines after calling this routine 139 * allocating and preparing a generic IOCB data structure. It fills in the 140 * Buffer Descriptor Entries (BDEs), allocates buffers for both command 141 * payload and response payload (if expected). The reference count on the 142 * ndlp is incremented by 1 and the reference to the ndlp is put into 143 * context1 of the IOCB data structure for this IOCB to hold the ndlp 144 * reference for the command's callback function to access later. 145 * 146 * Return code 147 * Pointer to the newly allocated/prepared els iocb data structure 148 * NULL - when els iocb data structure allocation/preparation failed 149 **/ 150 struct lpfc_iocbq * 151 lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp, 152 uint16_t cmdSize, uint8_t retry, 153 struct lpfc_nodelist *ndlp, uint32_t did, 154 uint32_t elscmd) 155 { 156 struct lpfc_hba *phba = vport->phba; 157 struct lpfc_iocbq *elsiocb; 158 struct lpfc_dmabuf *pcmd, *prsp, *pbuflist; 159 struct ulp_bde64 *bpl; 160 IOCB_t *icmd; 161 162 163 if (!lpfc_is_link_up(phba)) 164 return NULL; 165 166 /* Allocate buffer for command iocb */ 167 elsiocb = lpfc_sli_get_iocbq(phba); 168 169 if (elsiocb == NULL) 170 return NULL; 171 172 /* 173 * If this command is for fabric controller and HBA running 174 * in FIP mode send FLOGI, FDISC and LOGO as FIP frames. 175 */ 176 if ((did == Fabric_DID) && 177 (phba->hba_flag & HBA_FIP_SUPPORT) && 178 ((elscmd == ELS_CMD_FLOGI) || 179 (elscmd == ELS_CMD_FDISC) || 180 (elscmd == ELS_CMD_LOGO))) 181 switch (elscmd) { 182 case ELS_CMD_FLOGI: 183 elsiocb->iocb_flag |= 184 ((LPFC_ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT) 185 & LPFC_FIP_ELS_ID_MASK); 186 break; 187 case ELS_CMD_FDISC: 188 elsiocb->iocb_flag |= 189 ((LPFC_ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT) 190 & LPFC_FIP_ELS_ID_MASK); 191 break; 192 case ELS_CMD_LOGO: 193 elsiocb->iocb_flag |= 194 ((LPFC_ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT) 195 & LPFC_FIP_ELS_ID_MASK); 196 break; 197 } 198 else 199 elsiocb->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK; 200 201 icmd = &elsiocb->iocb; 202 203 /* fill in BDEs for command */ 204 /* Allocate buffer for command payload */ 205 pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 206 if (pcmd) 207 pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys); 208 if (!pcmd || !pcmd->virt) 209 goto els_iocb_free_pcmb_exit; 210 211 INIT_LIST_HEAD(&pcmd->list); 212 213 /* Allocate buffer for response payload */ 214 if (expectRsp) { 215 prsp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 216 if (prsp) 217 prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 218 &prsp->phys); 219 if (!prsp || !prsp->virt) 220 goto els_iocb_free_prsp_exit; 221 INIT_LIST_HEAD(&prsp->list); 222 } else 223 prsp = NULL; 224 225 /* Allocate buffer for Buffer ptr list */ 226 pbuflist = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 227 if (pbuflist) 228 pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 229 &pbuflist->phys); 230 if (!pbuflist || !pbuflist->virt) 231 goto els_iocb_free_pbuf_exit; 232 233 INIT_LIST_HEAD(&pbuflist->list); 234 235 if (expectRsp) { 236 icmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys); 237 icmd->un.elsreq64.bdl.addrLow = putPaddrLow(pbuflist->phys); 238 icmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; 239 icmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64)); 240 241 icmd->un.elsreq64.remoteID = did; /* DID */ 242 icmd->ulpCommand = CMD_ELS_REQUEST64_CR; 243 if (elscmd == ELS_CMD_FLOGI) 244 icmd->ulpTimeout = FF_DEF_RATOV * 2; 245 else if (elscmd == ELS_CMD_LOGO) 246 icmd->ulpTimeout = phba->fc_ratov; 247 else 248 icmd->ulpTimeout = phba->fc_ratov * 2; 249 } else { 250 icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys); 251 icmd->un.xseq64.bdl.addrLow = putPaddrLow(pbuflist->phys); 252 icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; 253 icmd->un.xseq64.bdl.bdeSize = sizeof(struct ulp_bde64); 254 icmd->un.xseq64.xmit_els_remoteID = did; /* DID */ 255 icmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX; 256 } 257 icmd->ulpBdeCount = 1; 258 icmd->ulpLe = 1; 259 icmd->ulpClass = CLASS3; 260 261 /* 262 * If we have NPIV enabled, we want to send ELS traffic by VPI. 263 * For SLI4, since the driver controls VPIs we also want to include 264 * all ELS pt2pt protocol traffic as well. 265 */ 266 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) || 267 ((phba->sli_rev == LPFC_SLI_REV4) && 268 (vport->fc_flag & FC_PT2PT))) { 269 270 if (expectRsp) { 271 icmd->un.elsreq64.myID = vport->fc_myDID; 272 273 /* For ELS_REQUEST64_CR, use the VPI by default */ 274 icmd->ulpContext = phba->vpi_ids[vport->vpi]; 275 } 276 277 icmd->ulpCt_h = 0; 278 /* The CT field must be 0=INVALID_RPI for the ECHO cmd */ 279 if (elscmd == ELS_CMD_ECHO) 280 icmd->ulpCt_l = 0; /* context = invalid RPI */ 281 else 282 icmd->ulpCt_l = 1; /* context = VPI */ 283 } 284 285 bpl = (struct ulp_bde64 *) pbuflist->virt; 286 bpl->addrLow = le32_to_cpu(putPaddrLow(pcmd->phys)); 287 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pcmd->phys)); 288 bpl->tus.f.bdeSize = cmdSize; 289 bpl->tus.f.bdeFlags = 0; 290 bpl->tus.w = le32_to_cpu(bpl->tus.w); 291 292 if (expectRsp) { 293 bpl++; 294 bpl->addrLow = le32_to_cpu(putPaddrLow(prsp->phys)); 295 bpl->addrHigh = le32_to_cpu(putPaddrHigh(prsp->phys)); 296 bpl->tus.f.bdeSize = FCELSSIZE; 297 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 298 bpl->tus.w = le32_to_cpu(bpl->tus.w); 299 } 300 301 /* prevent preparing iocb with NULL ndlp reference */ 302 elsiocb->context1 = lpfc_nlp_get(ndlp); 303 if (!elsiocb->context1) 304 goto els_iocb_free_pbuf_exit; 305 elsiocb->context2 = pcmd; 306 elsiocb->context3 = pbuflist; 307 elsiocb->retry = retry; 308 elsiocb->vport = vport; 309 elsiocb->drvrTimeout = (phba->fc_ratov << 1) + LPFC_DRVR_TIMEOUT; 310 311 if (prsp) { 312 list_add(&prsp->list, &pcmd->list); 313 } 314 if (expectRsp) { 315 /* Xmit ELS command <elsCmd> to remote NPORT <did> */ 316 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 317 "0116 Xmit ELS command x%x to remote " 318 "NPORT x%x I/O tag: x%x, port state:x%x " 319 "rpi x%x fc_flag:x%x\n", 320 elscmd, did, elsiocb->iotag, 321 vport->port_state, ndlp->nlp_rpi, 322 vport->fc_flag); 323 } else { 324 /* Xmit ELS response <elsCmd> to remote NPORT <did> */ 325 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 326 "0117 Xmit ELS response x%x to remote " 327 "NPORT x%x I/O tag: x%x, size: x%x " 328 "port_state x%x rpi x%x fc_flag x%x\n", 329 elscmd, ndlp->nlp_DID, elsiocb->iotag, 330 cmdSize, vport->port_state, 331 ndlp->nlp_rpi, vport->fc_flag); 332 } 333 return elsiocb; 334 335 els_iocb_free_pbuf_exit: 336 if (expectRsp) 337 lpfc_mbuf_free(phba, prsp->virt, prsp->phys); 338 kfree(pbuflist); 339 340 els_iocb_free_prsp_exit: 341 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys); 342 kfree(prsp); 343 344 els_iocb_free_pcmb_exit: 345 kfree(pcmd); 346 lpfc_sli_release_iocbq(phba, elsiocb); 347 return NULL; 348 } 349 350 /** 351 * lpfc_issue_fabric_reglogin - Issue fabric registration login for a vport 352 * @vport: pointer to a host virtual N_Port data structure. 353 * 354 * This routine issues a fabric registration login for a @vport. An 355 * active ndlp node with Fabric_DID must already exist for this @vport. 356 * The routine invokes two mailbox commands to carry out fabric registration 357 * login through the HBA firmware: the first mailbox command requests the 358 * HBA to perform link configuration for the @vport; and the second mailbox 359 * command requests the HBA to perform the actual fabric registration login 360 * with the @vport. 361 * 362 * Return code 363 * 0 - successfully issued fabric registration login for @vport 364 * -ENXIO -- failed to issue fabric registration login for @vport 365 **/ 366 int 367 lpfc_issue_fabric_reglogin(struct lpfc_vport *vport) 368 { 369 struct lpfc_hba *phba = vport->phba; 370 LPFC_MBOXQ_t *mbox; 371 struct lpfc_dmabuf *mp; 372 struct lpfc_nodelist *ndlp; 373 struct serv_parm *sp; 374 int rc; 375 int err = 0; 376 377 sp = &phba->fc_fabparam; 378 ndlp = lpfc_findnode_did(vport, Fabric_DID); 379 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { 380 err = 1; 381 goto fail; 382 } 383 384 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 385 if (!mbox) { 386 err = 2; 387 goto fail; 388 } 389 390 vport->port_state = LPFC_FABRIC_CFG_LINK; 391 lpfc_config_link(phba, mbox); 392 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 393 mbox->vport = vport; 394 395 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 396 if (rc == MBX_NOT_FINISHED) { 397 err = 3; 398 goto fail_free_mbox; 399 } 400 401 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 402 if (!mbox) { 403 err = 4; 404 goto fail; 405 } 406 rc = lpfc_reg_rpi(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox, 407 ndlp->nlp_rpi); 408 if (rc) { 409 err = 5; 410 goto fail_free_mbox; 411 } 412 413 mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login; 414 mbox->vport = vport; 415 /* increment the reference count on ndlp to hold reference 416 * for the callback routine. 417 */ 418 mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 419 420 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 421 if (rc == MBX_NOT_FINISHED) { 422 err = 6; 423 goto fail_issue_reg_login; 424 } 425 426 return 0; 427 428 fail_issue_reg_login: 429 /* decrement the reference count on ndlp just incremented 430 * for the failed mbox command. 431 */ 432 lpfc_nlp_put(ndlp); 433 mp = (struct lpfc_dmabuf *)mbox->ctx_buf; 434 lpfc_mbuf_free(phba, mp->virt, mp->phys); 435 kfree(mp); 436 fail_free_mbox: 437 mempool_free(mbox, phba->mbox_mem_pool); 438 439 fail: 440 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 441 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 442 "0249 Cannot issue Register Fabric login: Err %d\n", err); 443 return -ENXIO; 444 } 445 446 /** 447 * lpfc_issue_reg_vfi - Register VFI for this vport's fabric login 448 * @vport: pointer to a host virtual N_Port data structure. 449 * 450 * This routine issues a REG_VFI mailbox for the vfi, vpi, fcfi triplet for 451 * the @vport. This mailbox command is necessary for SLI4 port only. 452 * 453 * Return code 454 * 0 - successfully issued REG_VFI for @vport 455 * A failure code otherwise. 456 **/ 457 int 458 lpfc_issue_reg_vfi(struct lpfc_vport *vport) 459 { 460 struct lpfc_hba *phba = vport->phba; 461 LPFC_MBOXQ_t *mboxq = NULL; 462 struct lpfc_nodelist *ndlp; 463 struct lpfc_dmabuf *dmabuf = NULL; 464 int rc = 0; 465 466 /* move forward in case of SLI4 FC port loopback test and pt2pt mode */ 467 if ((phba->sli_rev == LPFC_SLI_REV4) && 468 !(phba->link_flag & LS_LOOPBACK_MODE) && 469 !(vport->fc_flag & FC_PT2PT)) { 470 ndlp = lpfc_findnode_did(vport, Fabric_DID); 471 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { 472 rc = -ENODEV; 473 goto fail; 474 } 475 } 476 477 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 478 if (!mboxq) { 479 rc = -ENOMEM; 480 goto fail; 481 } 482 483 /* Supply CSP's only if we are fabric connect or pt-to-pt connect */ 484 if ((vport->fc_flag & FC_FABRIC) || (vport->fc_flag & FC_PT2PT)) { 485 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 486 if (!dmabuf) { 487 rc = -ENOMEM; 488 goto fail; 489 } 490 dmabuf->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &dmabuf->phys); 491 if (!dmabuf->virt) { 492 rc = -ENOMEM; 493 goto fail; 494 } 495 memcpy(dmabuf->virt, &phba->fc_fabparam, 496 sizeof(struct serv_parm)); 497 } 498 499 vport->port_state = LPFC_FABRIC_CFG_LINK; 500 if (dmabuf) 501 lpfc_reg_vfi(mboxq, vport, dmabuf->phys); 502 else 503 lpfc_reg_vfi(mboxq, vport, 0); 504 505 mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi; 506 mboxq->vport = vport; 507 mboxq->ctx_buf = dmabuf; 508 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 509 if (rc == MBX_NOT_FINISHED) { 510 rc = -ENXIO; 511 goto fail; 512 } 513 return 0; 514 515 fail: 516 if (mboxq) 517 mempool_free(mboxq, phba->mbox_mem_pool); 518 if (dmabuf) { 519 if (dmabuf->virt) 520 lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys); 521 kfree(dmabuf); 522 } 523 524 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 525 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 526 "0289 Issue Register VFI failed: Err %d\n", rc); 527 return rc; 528 } 529 530 /** 531 * lpfc_issue_unreg_vfi - Unregister VFI for this vport's fabric login 532 * @vport: pointer to a host virtual N_Port data structure. 533 * 534 * This routine issues a UNREG_VFI mailbox with the vfi, vpi, fcfi triplet for 535 * the @vport. This mailbox command is necessary for SLI4 port only. 536 * 537 * Return code 538 * 0 - successfully issued REG_VFI for @vport 539 * A failure code otherwise. 540 **/ 541 int 542 lpfc_issue_unreg_vfi(struct lpfc_vport *vport) 543 { 544 struct lpfc_hba *phba = vport->phba; 545 struct Scsi_Host *shost; 546 LPFC_MBOXQ_t *mboxq; 547 int rc; 548 549 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 550 if (!mboxq) { 551 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, 552 "2556 UNREG_VFI mbox allocation failed" 553 "HBA state x%x\n", phba->pport->port_state); 554 return -ENOMEM; 555 } 556 557 lpfc_unreg_vfi(mboxq, vport); 558 mboxq->vport = vport; 559 mboxq->mbox_cmpl = lpfc_unregister_vfi_cmpl; 560 561 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 562 if (rc == MBX_NOT_FINISHED) { 563 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, 564 "2557 UNREG_VFI issue mbox failed rc x%x " 565 "HBA state x%x\n", 566 rc, phba->pport->port_state); 567 mempool_free(mboxq, phba->mbox_mem_pool); 568 return -EIO; 569 } 570 571 shost = lpfc_shost_from_vport(vport); 572 spin_lock_irq(shost->host_lock); 573 vport->fc_flag &= ~FC_VFI_REGISTERED; 574 spin_unlock_irq(shost->host_lock); 575 return 0; 576 } 577 578 /** 579 * lpfc_check_clean_addr_bit - Check whether assigned FCID is clean. 580 * @vport: pointer to a host virtual N_Port data structure. 581 * @sp: pointer to service parameter data structure. 582 * 583 * This routine is called from FLOGI/FDISC completion handler functions. 584 * lpfc_check_clean_addr_bit return 1 when FCID/Fabric portname/ Fabric 585 * node nodename is changed in the completion service parameter else return 586 * 0. This function also set flag in the vport data structure to delay 587 * NP_Port discovery after the FLOGI/FDISC completion if Clean address bit 588 * in FLOGI/FDISC response is cleared and FCID/Fabric portname/ Fabric 589 * node nodename is changed in the completion service parameter. 590 * 591 * Return code 592 * 0 - FCID and Fabric Nodename and Fabric portname is not changed. 593 * 1 - FCID or Fabric Nodename or Fabric portname is changed. 594 * 595 **/ 596 static uint8_t 597 lpfc_check_clean_addr_bit(struct lpfc_vport *vport, 598 struct serv_parm *sp) 599 { 600 struct lpfc_hba *phba = vport->phba; 601 uint8_t fabric_param_changed = 0; 602 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 603 604 if ((vport->fc_prevDID != vport->fc_myDID) || 605 memcmp(&vport->fabric_portname, &sp->portName, 606 sizeof(struct lpfc_name)) || 607 memcmp(&vport->fabric_nodename, &sp->nodeName, 608 sizeof(struct lpfc_name)) || 609 (vport->vport_flag & FAWWPN_PARAM_CHG)) { 610 fabric_param_changed = 1; 611 vport->vport_flag &= ~FAWWPN_PARAM_CHG; 612 } 613 /* 614 * Word 1 Bit 31 in common service parameter is overloaded. 615 * Word 1 Bit 31 in FLOGI request is multiple NPort request 616 * Word 1 Bit 31 in FLOGI response is clean address bit 617 * 618 * If fabric parameter is changed and clean address bit is 619 * cleared delay nport discovery if 620 * - vport->fc_prevDID != 0 (not initial discovery) OR 621 * - lpfc_delay_discovery module parameter is set. 622 */ 623 if (fabric_param_changed && !sp->cmn.clean_address_bit && 624 (vport->fc_prevDID || phba->cfg_delay_discovery)) { 625 spin_lock_irq(shost->host_lock); 626 vport->fc_flag |= FC_DISC_DELAYED; 627 spin_unlock_irq(shost->host_lock); 628 } 629 630 return fabric_param_changed; 631 } 632 633 634 /** 635 * lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port 636 * @vport: pointer to a host virtual N_Port data structure. 637 * @ndlp: pointer to a node-list data structure. 638 * @sp: pointer to service parameter data structure. 639 * @irsp: pointer to the IOCB within the lpfc response IOCB. 640 * 641 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback 642 * function to handle the completion of a Fabric Login (FLOGI) into a fabric 643 * port in a fabric topology. It properly sets up the parameters to the @ndlp 644 * from the IOCB response. It also check the newly assigned N_Port ID to the 645 * @vport against the previously assigned N_Port ID. If it is different from 646 * the previously assigned Destination ID (DID), the lpfc_unreg_rpi() routine 647 * is invoked on all the remaining nodes with the @vport to unregister the 648 * Remote Port Indicators (RPIs). Finally, the lpfc_issue_fabric_reglogin() 649 * is invoked to register login to the fabric. 650 * 651 * Return code 652 * 0 - Success (currently, always return 0) 653 **/ 654 static int 655 lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 656 struct serv_parm *sp, IOCB_t *irsp) 657 { 658 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 659 struct lpfc_hba *phba = vport->phba; 660 struct lpfc_nodelist *np; 661 struct lpfc_nodelist *next_np; 662 uint8_t fabric_param_changed; 663 664 spin_lock_irq(shost->host_lock); 665 vport->fc_flag |= FC_FABRIC; 666 spin_unlock_irq(shost->host_lock); 667 668 phba->fc_edtov = be32_to_cpu(sp->cmn.e_d_tov); 669 if (sp->cmn.edtovResolution) /* E_D_TOV ticks are in nanoseconds */ 670 phba->fc_edtov = (phba->fc_edtov + 999999) / 1000000; 671 672 phba->fc_edtovResol = sp->cmn.edtovResolution; 673 phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000; 674 675 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 676 spin_lock_irq(shost->host_lock); 677 vport->fc_flag |= FC_PUBLIC_LOOP; 678 spin_unlock_irq(shost->host_lock); 679 } 680 681 vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID; 682 memcpy(&ndlp->nlp_portname, &sp->portName, sizeof(struct lpfc_name)); 683 memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof(struct lpfc_name)); 684 ndlp->nlp_class_sup = 0; 685 if (sp->cls1.classValid) 686 ndlp->nlp_class_sup |= FC_COS_CLASS1; 687 if (sp->cls2.classValid) 688 ndlp->nlp_class_sup |= FC_COS_CLASS2; 689 if (sp->cls3.classValid) 690 ndlp->nlp_class_sup |= FC_COS_CLASS3; 691 if (sp->cls4.classValid) 692 ndlp->nlp_class_sup |= FC_COS_CLASS4; 693 ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | 694 sp->cmn.bbRcvSizeLsb; 695 696 fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp); 697 if (fabric_param_changed) { 698 /* Reset FDMI attribute masks based on config parameter */ 699 if (phba->cfg_enable_SmartSAN || 700 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) { 701 /* Setup appropriate attribute masks */ 702 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR; 703 if (phba->cfg_enable_SmartSAN) 704 vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR; 705 else 706 vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR; 707 } else { 708 vport->fdmi_hba_mask = 0; 709 vport->fdmi_port_mask = 0; 710 } 711 712 } 713 memcpy(&vport->fabric_portname, &sp->portName, 714 sizeof(struct lpfc_name)); 715 memcpy(&vport->fabric_nodename, &sp->nodeName, 716 sizeof(struct lpfc_name)); 717 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm)); 718 719 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { 720 if (sp->cmn.response_multiple_NPort) { 721 lpfc_printf_vlog(vport, KERN_WARNING, 722 LOG_ELS | LOG_VPORT, 723 "1816 FLOGI NPIV supported, " 724 "response data 0x%x\n", 725 sp->cmn.response_multiple_NPort); 726 spin_lock_irq(&phba->hbalock); 727 phba->link_flag |= LS_NPIV_FAB_SUPPORTED; 728 spin_unlock_irq(&phba->hbalock); 729 } else { 730 /* Because we asked f/w for NPIV it still expects us 731 to call reg_vnpid atleast for the physcial host */ 732 lpfc_printf_vlog(vport, KERN_WARNING, 733 LOG_ELS | LOG_VPORT, 734 "1817 Fabric does not support NPIV " 735 "- configuring single port mode.\n"); 736 spin_lock_irq(&phba->hbalock); 737 phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED; 738 spin_unlock_irq(&phba->hbalock); 739 } 740 } 741 742 /* 743 * For FC we need to do some special processing because of the SLI 744 * Port's default settings of the Common Service Parameters. 745 */ 746 if ((phba->sli_rev == LPFC_SLI_REV4) && 747 (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC)) { 748 /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */ 749 if (fabric_param_changed) 750 lpfc_unregister_fcf_prep(phba); 751 752 /* This should just update the VFI CSPs*/ 753 if (vport->fc_flag & FC_VFI_REGISTERED) 754 lpfc_issue_reg_vfi(vport); 755 } 756 757 if (fabric_param_changed && 758 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 759 760 /* If our NportID changed, we need to ensure all 761 * remaining NPORTs get unreg_login'ed. 762 */ 763 list_for_each_entry_safe(np, next_np, 764 &vport->fc_nodes, nlp_listp) { 765 if (!NLP_CHK_NODE_ACT(np)) 766 continue; 767 if ((np->nlp_state != NLP_STE_NPR_NODE) || 768 !(np->nlp_flag & NLP_NPR_ADISC)) 769 continue; 770 spin_lock_irq(shost->host_lock); 771 np->nlp_flag &= ~NLP_NPR_ADISC; 772 spin_unlock_irq(shost->host_lock); 773 lpfc_unreg_rpi(vport, np); 774 } 775 lpfc_cleanup_pending_mbox(vport); 776 777 if (phba->sli_rev == LPFC_SLI_REV4) { 778 lpfc_sli4_unreg_all_rpis(vport); 779 lpfc_mbx_unreg_vpi(vport); 780 spin_lock_irq(shost->host_lock); 781 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 782 spin_unlock_irq(shost->host_lock); 783 } 784 785 /* 786 * For SLI3 and SLI4, the VPI needs to be reregistered in 787 * response to this fabric parameter change event. 788 */ 789 spin_lock_irq(shost->host_lock); 790 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 791 spin_unlock_irq(shost->host_lock); 792 } else if ((phba->sli_rev == LPFC_SLI_REV4) && 793 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 794 /* 795 * Driver needs to re-reg VPI in order for f/w 796 * to update the MAC address. 797 */ 798 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 799 lpfc_register_new_vport(phba, vport, ndlp); 800 return 0; 801 } 802 803 if (phba->sli_rev < LPFC_SLI_REV4) { 804 lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE); 805 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED && 806 vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) 807 lpfc_register_new_vport(phba, vport, ndlp); 808 else 809 lpfc_issue_fabric_reglogin(vport); 810 } else { 811 ndlp->nlp_type |= NLP_FABRIC; 812 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 813 if ((!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) && 814 (vport->vpi_state & LPFC_VPI_REGISTERED)) { 815 lpfc_start_fdiscs(phba); 816 lpfc_do_scr_ns_plogi(phba, vport); 817 } else if (vport->fc_flag & FC_VFI_REGISTERED) 818 lpfc_issue_init_vpi(vport); 819 else { 820 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 821 "3135 Need register VFI: (x%x/%x)\n", 822 vport->fc_prevDID, vport->fc_myDID); 823 lpfc_issue_reg_vfi(vport); 824 } 825 } 826 return 0; 827 } 828 829 /** 830 * lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port 831 * @vport: pointer to a host virtual N_Port data structure. 832 * @ndlp: pointer to a node-list data structure. 833 * @sp: pointer to service parameter data structure. 834 * 835 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback 836 * function to handle the completion of a Fabric Login (FLOGI) into an N_Port 837 * in a point-to-point topology. First, the @vport's N_Port Name is compared 838 * with the received N_Port Name: if the @vport's N_Port Name is greater than 839 * the received N_Port Name lexicographically, this node shall assign local 840 * N_Port ID (PT2PT_LocalID: 1) and remote N_Port ID (PT2PT_RemoteID: 2) and 841 * will send out Port Login (PLOGI) with the N_Port IDs assigned. Otherwise, 842 * this node shall just wait for the remote node to issue PLOGI and assign 843 * N_Port IDs. 844 * 845 * Return code 846 * 0 - Success 847 * -ENXIO - Fail 848 **/ 849 static int 850 lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 851 struct serv_parm *sp) 852 { 853 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 854 struct lpfc_hba *phba = vport->phba; 855 LPFC_MBOXQ_t *mbox; 856 int rc; 857 858 spin_lock_irq(shost->host_lock); 859 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 860 vport->fc_flag |= FC_PT2PT; 861 spin_unlock_irq(shost->host_lock); 862 863 /* If we are pt2pt with another NPort, force NPIV off! */ 864 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED; 865 866 /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */ 867 if ((phba->sli_rev == LPFC_SLI_REV4) && phba->fc_topology_changed) { 868 lpfc_unregister_fcf_prep(phba); 869 870 spin_lock_irq(shost->host_lock); 871 vport->fc_flag &= ~FC_VFI_REGISTERED; 872 spin_unlock_irq(shost->host_lock); 873 phba->fc_topology_changed = 0; 874 } 875 876 rc = memcmp(&vport->fc_portname, &sp->portName, 877 sizeof(vport->fc_portname)); 878 879 if (rc >= 0) { 880 /* This side will initiate the PLOGI */ 881 spin_lock_irq(shost->host_lock); 882 vport->fc_flag |= FC_PT2PT_PLOGI; 883 spin_unlock_irq(shost->host_lock); 884 885 /* 886 * N_Port ID cannot be 0, set our Id to LocalID 887 * the other side will be RemoteID. 888 */ 889 890 /* not equal */ 891 if (rc) 892 vport->fc_myDID = PT2PT_LocalID; 893 894 /* Decrement ndlp reference count indicating that ndlp can be 895 * safely released when other references to it are done. 896 */ 897 lpfc_nlp_put(ndlp); 898 899 ndlp = lpfc_findnode_did(vport, PT2PT_RemoteID); 900 if (!ndlp) { 901 /* 902 * Cannot find existing Fabric ndlp, so allocate a 903 * new one 904 */ 905 ndlp = lpfc_nlp_init(vport, PT2PT_RemoteID); 906 if (!ndlp) 907 goto fail; 908 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 909 ndlp = lpfc_enable_node(vport, ndlp, 910 NLP_STE_UNUSED_NODE); 911 if(!ndlp) 912 goto fail; 913 } 914 915 memcpy(&ndlp->nlp_portname, &sp->portName, 916 sizeof(struct lpfc_name)); 917 memcpy(&ndlp->nlp_nodename, &sp->nodeName, 918 sizeof(struct lpfc_name)); 919 /* Set state will put ndlp onto node list if not already done */ 920 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 921 spin_lock_irq(shost->host_lock); 922 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 923 spin_unlock_irq(shost->host_lock); 924 925 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 926 if (!mbox) 927 goto fail; 928 929 lpfc_config_link(phba, mbox); 930 931 mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link; 932 mbox->vport = vport; 933 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 934 if (rc == MBX_NOT_FINISHED) { 935 mempool_free(mbox, phba->mbox_mem_pool); 936 goto fail; 937 } 938 } else { 939 /* This side will wait for the PLOGI, decrement ndlp reference 940 * count indicating that ndlp can be released when other 941 * references to it are done. 942 */ 943 lpfc_nlp_put(ndlp); 944 945 /* Start discovery - this should just do CLEAR_LA */ 946 lpfc_disc_start(vport); 947 } 948 949 return 0; 950 fail: 951 return -ENXIO; 952 } 953 954 /** 955 * lpfc_cmpl_els_flogi - Completion callback function for flogi 956 * @phba: pointer to lpfc hba data structure. 957 * @cmdiocb: pointer to lpfc command iocb data structure. 958 * @rspiocb: pointer to lpfc response iocb data structure. 959 * 960 * This routine is the top-level completion callback function for issuing 961 * a Fabric Login (FLOGI) command. If the response IOCB reported error, 962 * the lpfc_els_retry() routine shall be invoked to retry the FLOGI. If 963 * retry has been made (either immediately or delayed with lpfc_els_retry() 964 * returning 1), the command IOCB will be released and function returned. 965 * If the retry attempt has been given up (possibly reach the maximum 966 * number of retries), one additional decrement of ndlp reference shall be 967 * invoked before going out after releasing the command IOCB. This will 968 * actually release the remote node (Note, lpfc_els_free_iocb() will also 969 * invoke one decrement of ndlp reference count). If no error reported in 970 * the IOCB status, the command Port ID field is used to determine whether 971 * this is a point-to-point topology or a fabric topology: if the Port ID 972 * field is assigned, it is a fabric topology; otherwise, it is a 973 * point-to-point topology. The routine lpfc_cmpl_els_flogi_fabric() or 974 * lpfc_cmpl_els_flogi_nport() shall be invoked accordingly to handle the 975 * specific topology completion conditions. 976 **/ 977 static void 978 lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 979 struct lpfc_iocbq *rspiocb) 980 { 981 struct lpfc_vport *vport = cmdiocb->vport; 982 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 983 IOCB_t *irsp = &rspiocb->iocb; 984 struct lpfc_nodelist *ndlp = cmdiocb->context1; 985 struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp; 986 struct serv_parm *sp; 987 uint16_t fcf_index; 988 int rc; 989 990 /* Check to see if link went down during discovery */ 991 if (lpfc_els_chk_latt(vport)) { 992 /* One additional decrement on node reference count to 993 * trigger the release of the node 994 */ 995 lpfc_nlp_put(ndlp); 996 goto out; 997 } 998 999 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1000 "FLOGI cmpl: status:x%x/x%x state:x%x", 1001 irsp->ulpStatus, irsp->un.ulpWord[4], 1002 vport->port_state); 1003 1004 if (irsp->ulpStatus) { 1005 /* 1006 * In case of FIP mode, perform roundrobin FCF failover 1007 * due to new FCF discovery 1008 */ 1009 if ((phba->hba_flag & HBA_FIP_SUPPORT) && 1010 (phba->fcf.fcf_flag & FCF_DISCOVERY)) { 1011 if (phba->link_state < LPFC_LINK_UP) 1012 goto stop_rr_fcf_flogi; 1013 if ((phba->fcoe_cvl_eventtag_attn == 1014 phba->fcoe_cvl_eventtag) && 1015 (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 1016 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == 1017 IOERR_SLI_ABORTED)) 1018 goto stop_rr_fcf_flogi; 1019 else 1020 phba->fcoe_cvl_eventtag_attn = 1021 phba->fcoe_cvl_eventtag; 1022 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS, 1023 "2611 FLOGI failed on FCF (x%x), " 1024 "status:x%x/x%x, tmo:x%x, perform " 1025 "roundrobin FCF failover\n", 1026 phba->fcf.current_rec.fcf_indx, 1027 irsp->ulpStatus, irsp->un.ulpWord[4], 1028 irsp->ulpTimeout); 1029 lpfc_sli4_set_fcf_flogi_fail(phba, 1030 phba->fcf.current_rec.fcf_indx); 1031 fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba); 1032 rc = lpfc_sli4_fcf_rr_next_proc(vport, fcf_index); 1033 if (rc) 1034 goto out; 1035 } 1036 1037 stop_rr_fcf_flogi: 1038 /* FLOGI failure */ 1039 if (!(irsp->ulpStatus == IOSTAT_LOCAL_REJECT && 1040 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == 1041 IOERR_LOOP_OPEN_FAILURE))) 1042 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 1043 "2858 FLOGI failure Status:x%x/x%x " 1044 "TMO:x%x Data x%x x%x\n", 1045 irsp->ulpStatus, irsp->un.ulpWord[4], 1046 irsp->ulpTimeout, phba->hba_flag, 1047 phba->fcf.fcf_flag); 1048 1049 /* Check for retry */ 1050 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) 1051 goto out; 1052 1053 /* If this is not a loop open failure, bail out */ 1054 if (!(irsp->ulpStatus == IOSTAT_LOCAL_REJECT && 1055 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == 1056 IOERR_LOOP_OPEN_FAILURE))) 1057 goto flogifail; 1058 1059 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, 1060 "0150 FLOGI failure Status:x%x/x%x xri x%x TMO:x%x\n", 1061 irsp->ulpStatus, irsp->un.ulpWord[4], 1062 cmdiocb->sli4_xritag, irsp->ulpTimeout); 1063 1064 /* FLOGI failed, so there is no fabric */ 1065 spin_lock_irq(shost->host_lock); 1066 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 1067 spin_unlock_irq(shost->host_lock); 1068 1069 /* If private loop, then allow max outstanding els to be 1070 * LPFC_MAX_DISC_THREADS (32). Scanning in the case of no 1071 * alpa map would take too long otherwise. 1072 */ 1073 if (phba->alpa_map[0] == 0) 1074 vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS; 1075 if ((phba->sli_rev == LPFC_SLI_REV4) && 1076 (!(vport->fc_flag & FC_VFI_REGISTERED) || 1077 (vport->fc_prevDID != vport->fc_myDID) || 1078 phba->fc_topology_changed)) { 1079 if (vport->fc_flag & FC_VFI_REGISTERED) { 1080 if (phba->fc_topology_changed) { 1081 lpfc_unregister_fcf_prep(phba); 1082 spin_lock_irq(shost->host_lock); 1083 vport->fc_flag &= ~FC_VFI_REGISTERED; 1084 spin_unlock_irq(shost->host_lock); 1085 phba->fc_topology_changed = 0; 1086 } else { 1087 lpfc_sli4_unreg_all_rpis(vport); 1088 } 1089 } 1090 1091 /* Do not register VFI if the driver aborted FLOGI */ 1092 if (!lpfc_error_lost_link(irsp)) 1093 lpfc_issue_reg_vfi(vport); 1094 lpfc_nlp_put(ndlp); 1095 goto out; 1096 } 1097 goto flogifail; 1098 } 1099 spin_lock_irq(shost->host_lock); 1100 vport->fc_flag &= ~FC_VPORT_CVL_RCVD; 1101 vport->fc_flag &= ~FC_VPORT_LOGO_RCVD; 1102 spin_unlock_irq(shost->host_lock); 1103 1104 /* 1105 * The FLogI succeeded. Sync the data for the CPU before 1106 * accessing it. 1107 */ 1108 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 1109 if (!prsp) 1110 goto out; 1111 sp = prsp->virt + sizeof(uint32_t); 1112 1113 /* FLOGI completes successfully */ 1114 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1115 "0101 FLOGI completes successfully, I/O tag:x%x, " 1116 "xri x%x Data: x%x x%x x%x x%x x%x %x\n", 1117 cmdiocb->iotag, cmdiocb->sli4_xritag, 1118 irsp->un.ulpWord[4], sp->cmn.e_d_tov, 1119 sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution, 1120 vport->port_state, vport->fc_flag); 1121 1122 if (vport->port_state == LPFC_FLOGI) { 1123 /* 1124 * If Common Service Parameters indicate Nport 1125 * we are point to point, if Fport we are Fabric. 1126 */ 1127 if (sp->cmn.fPort) 1128 rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp, irsp); 1129 else if (!(phba->hba_flag & HBA_FCOE_MODE)) 1130 rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp); 1131 else { 1132 lpfc_printf_vlog(vport, KERN_ERR, 1133 LOG_FIP | LOG_ELS, 1134 "2831 FLOGI response with cleared Fabric " 1135 "bit fcf_index 0x%x " 1136 "Switch Name %02x%02x%02x%02x%02x%02x%02x%02x " 1137 "Fabric Name " 1138 "%02x%02x%02x%02x%02x%02x%02x%02x\n", 1139 phba->fcf.current_rec.fcf_indx, 1140 phba->fcf.current_rec.switch_name[0], 1141 phba->fcf.current_rec.switch_name[1], 1142 phba->fcf.current_rec.switch_name[2], 1143 phba->fcf.current_rec.switch_name[3], 1144 phba->fcf.current_rec.switch_name[4], 1145 phba->fcf.current_rec.switch_name[5], 1146 phba->fcf.current_rec.switch_name[6], 1147 phba->fcf.current_rec.switch_name[7], 1148 phba->fcf.current_rec.fabric_name[0], 1149 phba->fcf.current_rec.fabric_name[1], 1150 phba->fcf.current_rec.fabric_name[2], 1151 phba->fcf.current_rec.fabric_name[3], 1152 phba->fcf.current_rec.fabric_name[4], 1153 phba->fcf.current_rec.fabric_name[5], 1154 phba->fcf.current_rec.fabric_name[6], 1155 phba->fcf.current_rec.fabric_name[7]); 1156 lpfc_nlp_put(ndlp); 1157 spin_lock_irq(&phba->hbalock); 1158 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 1159 phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO); 1160 spin_unlock_irq(&phba->hbalock); 1161 phba->fcf.fcf_redisc_attempted = 0; /* reset */ 1162 goto out; 1163 } 1164 if (!rc) { 1165 /* Mark the FCF discovery process done */ 1166 if (phba->hba_flag & HBA_FIP_SUPPORT) 1167 lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP | 1168 LOG_ELS, 1169 "2769 FLOGI to FCF (x%x) " 1170 "completed successfully\n", 1171 phba->fcf.current_rec.fcf_indx); 1172 spin_lock_irq(&phba->hbalock); 1173 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 1174 phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO); 1175 spin_unlock_irq(&phba->hbalock); 1176 phba->fcf.fcf_redisc_attempted = 0; /* reset */ 1177 goto out; 1178 } 1179 } 1180 1181 flogifail: 1182 spin_lock_irq(&phba->hbalock); 1183 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 1184 spin_unlock_irq(&phba->hbalock); 1185 1186 lpfc_nlp_put(ndlp); 1187 1188 if (!lpfc_error_lost_link(irsp)) { 1189 /* FLOGI failed, so just use loop map to make discovery list */ 1190 lpfc_disc_list_loopmap(vport); 1191 1192 /* Start discovery */ 1193 lpfc_disc_start(vport); 1194 } else if (((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) || 1195 (((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) != 1196 IOERR_SLI_ABORTED) && 1197 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) != 1198 IOERR_SLI_DOWN))) && 1199 (phba->link_state != LPFC_CLEAR_LA)) { 1200 /* If FLOGI failed enable link interrupt. */ 1201 lpfc_issue_clear_la(phba, vport); 1202 } 1203 out: 1204 lpfc_els_free_iocb(phba, cmdiocb); 1205 } 1206 1207 /** 1208 * lpfc_issue_els_flogi - Issue an flogi iocb command for a vport 1209 * @vport: pointer to a host virtual N_Port data structure. 1210 * @ndlp: pointer to a node-list data structure. 1211 * @retry: number of retries to the command IOCB. 1212 * 1213 * This routine issues a Fabric Login (FLOGI) Request ELS command 1214 * for a @vport. The initiator service parameters are put into the payload 1215 * of the FLOGI Request IOCB and the top-level callback function pointer 1216 * to lpfc_cmpl_els_flogi() routine is put to the IOCB completion callback 1217 * function field. The lpfc_issue_fabric_iocb routine is invoked to send 1218 * out FLOGI ELS command with one outstanding fabric IOCB at a time. 1219 * 1220 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 1221 * will be incremented by 1 for holding the ndlp and the reference to ndlp 1222 * will be stored into the context1 field of the IOCB for the completion 1223 * callback function to the FLOGI ELS command. 1224 * 1225 * Return code 1226 * 0 - successfully issued flogi iocb for @vport 1227 * 1 - failed to issue flogi iocb for @vport 1228 **/ 1229 static int 1230 lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1231 uint8_t retry) 1232 { 1233 struct lpfc_hba *phba = vport->phba; 1234 struct serv_parm *sp; 1235 IOCB_t *icmd; 1236 struct lpfc_iocbq *elsiocb; 1237 struct lpfc_iocbq defer_flogi_acc; 1238 uint8_t *pcmd; 1239 uint16_t cmdsize; 1240 uint32_t tmo, did; 1241 int rc; 1242 1243 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 1244 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 1245 ndlp->nlp_DID, ELS_CMD_FLOGI); 1246 1247 if (!elsiocb) 1248 return 1; 1249 1250 icmd = &elsiocb->iocb; 1251 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 1252 1253 /* For FLOGI request, remainder of payload is service parameters */ 1254 *((uint32_t *) (pcmd)) = ELS_CMD_FLOGI; 1255 pcmd += sizeof(uint32_t); 1256 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm)); 1257 sp = (struct serv_parm *) pcmd; 1258 1259 /* Setup CSPs accordingly for Fabric */ 1260 sp->cmn.e_d_tov = 0; 1261 sp->cmn.w2.r_a_tov = 0; 1262 sp->cmn.virtual_fabric_support = 0; 1263 sp->cls1.classValid = 0; 1264 if (sp->cmn.fcphLow < FC_PH3) 1265 sp->cmn.fcphLow = FC_PH3; 1266 if (sp->cmn.fcphHigh < FC_PH3) 1267 sp->cmn.fcphHigh = FC_PH3; 1268 1269 if (phba->sli_rev == LPFC_SLI_REV4) { 1270 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 1271 LPFC_SLI_INTF_IF_TYPE_0) { 1272 elsiocb->iocb.ulpCt_h = ((SLI4_CT_FCFI >> 1) & 1); 1273 elsiocb->iocb.ulpCt_l = (SLI4_CT_FCFI & 1); 1274 /* FLOGI needs to be 3 for WQE FCFI */ 1275 /* Set the fcfi to the fcfi we registered with */ 1276 elsiocb->iocb.ulpContext = phba->fcf.fcfi; 1277 } 1278 /* Can't do SLI4 class2 without support sequence coalescing */ 1279 sp->cls2.classValid = 0; 1280 sp->cls2.seqDelivery = 0; 1281 } else { 1282 /* Historical, setting sequential-delivery bit for SLI3 */ 1283 sp->cls2.seqDelivery = (sp->cls2.classValid) ? 1 : 0; 1284 sp->cls3.seqDelivery = (sp->cls3.classValid) ? 1 : 0; 1285 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { 1286 sp->cmn.request_multiple_Nport = 1; 1287 /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */ 1288 icmd->ulpCt_h = 1; 1289 icmd->ulpCt_l = 0; 1290 } else 1291 sp->cmn.request_multiple_Nport = 0; 1292 } 1293 1294 if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) { 1295 icmd->un.elsreq64.myID = 0; 1296 icmd->un.elsreq64.fl = 1; 1297 } 1298 1299 tmo = phba->fc_ratov; 1300 phba->fc_ratov = LPFC_DISC_FLOGI_TMO; 1301 lpfc_set_disctmo(vport); 1302 phba->fc_ratov = tmo; 1303 1304 phba->fc_stat.elsXmitFLOGI++; 1305 elsiocb->iocb_cmpl = lpfc_cmpl_els_flogi; 1306 1307 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1308 "Issue FLOGI: opt:x%x", 1309 phba->sli3_options, 0, 0); 1310 1311 rc = lpfc_issue_fabric_iocb(phba, elsiocb); 1312 1313 phba->hba_flag |= HBA_FLOGI_ISSUED; 1314 1315 /* Check for a deferred FLOGI ACC condition */ 1316 if (phba->defer_flogi_acc_flag) { 1317 did = vport->fc_myDID; 1318 vport->fc_myDID = Fabric_DID; 1319 1320 memset(&defer_flogi_acc, 0, sizeof(struct lpfc_iocbq)); 1321 1322 defer_flogi_acc.iocb.ulpContext = phba->defer_flogi_acc_rx_id; 1323 defer_flogi_acc.iocb.unsli3.rcvsli3.ox_id = 1324 phba->defer_flogi_acc_ox_id; 1325 1326 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1327 "3354 Xmit deferred FLOGI ACC: rx_id: x%x," 1328 " ox_id: x%x, hba_flag x%x\n", 1329 phba->defer_flogi_acc_rx_id, 1330 phba->defer_flogi_acc_ox_id, phba->hba_flag); 1331 1332 /* Send deferred FLOGI ACC */ 1333 lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, &defer_flogi_acc, 1334 ndlp, NULL); 1335 1336 phba->defer_flogi_acc_flag = false; 1337 1338 vport->fc_myDID = did; 1339 } 1340 1341 if (rc == IOCB_ERROR) { 1342 lpfc_els_free_iocb(phba, elsiocb); 1343 return 1; 1344 } 1345 return 0; 1346 } 1347 1348 /** 1349 * lpfc_els_abort_flogi - Abort all outstanding flogi iocbs 1350 * @phba: pointer to lpfc hba data structure. 1351 * 1352 * This routine aborts all the outstanding Fabric Login (FLOGI) IOCBs 1353 * with a @phba. This routine walks all the outstanding IOCBs on the txcmplq 1354 * list and issues an abort IOCB commond on each outstanding IOCB that 1355 * contains a active Fabric_DID ndlp. Note that this function is to issue 1356 * the abort IOCB command on all the outstanding IOCBs, thus when this 1357 * function returns, it does not guarantee all the IOCBs are actually aborted. 1358 * 1359 * Return code 1360 * 0 - Successfully issued abort iocb on all outstanding flogis (Always 0) 1361 **/ 1362 int 1363 lpfc_els_abort_flogi(struct lpfc_hba *phba) 1364 { 1365 struct lpfc_sli_ring *pring; 1366 struct lpfc_iocbq *iocb, *next_iocb; 1367 struct lpfc_nodelist *ndlp; 1368 IOCB_t *icmd; 1369 1370 /* Abort outstanding I/O on NPort <nlp_DID> */ 1371 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 1372 "0201 Abort outstanding I/O on NPort x%x\n", 1373 Fabric_DID); 1374 1375 pring = lpfc_phba_elsring(phba); 1376 if (unlikely(!pring)) 1377 return -EIO; 1378 1379 /* 1380 * Check the txcmplq for an iocb that matches the nport the driver is 1381 * searching for. 1382 */ 1383 spin_lock_irq(&phba->hbalock); 1384 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) { 1385 icmd = &iocb->iocb; 1386 if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR) { 1387 ndlp = (struct lpfc_nodelist *)(iocb->context1); 1388 if (ndlp && NLP_CHK_NODE_ACT(ndlp) && 1389 (ndlp->nlp_DID == Fabric_DID)) 1390 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 1391 } 1392 } 1393 spin_unlock_irq(&phba->hbalock); 1394 1395 return 0; 1396 } 1397 1398 /** 1399 * lpfc_initial_flogi - Issue an initial fabric login for a vport 1400 * @vport: pointer to a host virtual N_Port data structure. 1401 * 1402 * This routine issues an initial Fabric Login (FLOGI) for the @vport 1403 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from 1404 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and 1405 * put it into the @vport's ndlp list. If an inactive ndlp found on the list, 1406 * it will just be enabled and made active. The lpfc_issue_els_flogi() routine 1407 * is then invoked with the @vport and the ndlp to perform the FLOGI for the 1408 * @vport. 1409 * 1410 * Return code 1411 * 0 - failed to issue initial flogi for @vport 1412 * 1 - successfully issued initial flogi for @vport 1413 **/ 1414 int 1415 lpfc_initial_flogi(struct lpfc_vport *vport) 1416 { 1417 struct lpfc_nodelist *ndlp; 1418 1419 vport->port_state = LPFC_FLOGI; 1420 lpfc_set_disctmo(vport); 1421 1422 /* First look for the Fabric ndlp */ 1423 ndlp = lpfc_findnode_did(vport, Fabric_DID); 1424 if (!ndlp) { 1425 /* Cannot find existing Fabric ndlp, so allocate a new one */ 1426 ndlp = lpfc_nlp_init(vport, Fabric_DID); 1427 if (!ndlp) 1428 return 0; 1429 /* Set the node type */ 1430 ndlp->nlp_type |= NLP_FABRIC; 1431 /* Put ndlp onto node list */ 1432 lpfc_enqueue_node(vport, ndlp); 1433 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 1434 /* re-setup ndlp without removing from node list */ 1435 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); 1436 if (!ndlp) 1437 return 0; 1438 } 1439 1440 if (lpfc_issue_els_flogi(vport, ndlp, 0)) { 1441 /* This decrement of reference count to node shall kick off 1442 * the release of the node. 1443 */ 1444 lpfc_nlp_put(ndlp); 1445 return 0; 1446 } 1447 return 1; 1448 } 1449 1450 /** 1451 * lpfc_initial_fdisc - Issue an initial fabric discovery for a vport 1452 * @vport: pointer to a host virtual N_Port data structure. 1453 * 1454 * This routine issues an initial Fabric Discover (FDISC) for the @vport 1455 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from 1456 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and 1457 * put it into the @vport's ndlp list. If an inactive ndlp found on the list, 1458 * it will just be enabled and made active. The lpfc_issue_els_fdisc() routine 1459 * is then invoked with the @vport and the ndlp to perform the FDISC for the 1460 * @vport. 1461 * 1462 * Return code 1463 * 0 - failed to issue initial fdisc for @vport 1464 * 1 - successfully issued initial fdisc for @vport 1465 **/ 1466 int 1467 lpfc_initial_fdisc(struct lpfc_vport *vport) 1468 { 1469 struct lpfc_nodelist *ndlp; 1470 1471 /* First look for the Fabric ndlp */ 1472 ndlp = lpfc_findnode_did(vport, Fabric_DID); 1473 if (!ndlp) { 1474 /* Cannot find existing Fabric ndlp, so allocate a new one */ 1475 ndlp = lpfc_nlp_init(vport, Fabric_DID); 1476 if (!ndlp) 1477 return 0; 1478 /* Put ndlp onto node list */ 1479 lpfc_enqueue_node(vport, ndlp); 1480 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 1481 /* re-setup ndlp without removing from node list */ 1482 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); 1483 if (!ndlp) 1484 return 0; 1485 } 1486 1487 if (lpfc_issue_els_fdisc(vport, ndlp, 0)) { 1488 /* decrement node reference count to trigger the release of 1489 * the node. 1490 */ 1491 lpfc_nlp_put(ndlp); 1492 return 0; 1493 } 1494 return 1; 1495 } 1496 1497 /** 1498 * lpfc_more_plogi - Check and issue remaining plogis for a vport 1499 * @vport: pointer to a host virtual N_Port data structure. 1500 * 1501 * This routine checks whether there are more remaining Port Logins 1502 * (PLOGI) to be issued for the @vport. If so, it will invoke the routine 1503 * lpfc_els_disc_plogi() to go through the Node Port Recovery (NPR) nodes 1504 * to issue ELS PLOGIs up to the configured discover threads with the 1505 * @vport (@vport->cfg_discovery_threads). The function also decrement 1506 * the @vport's num_disc_node by 1 if it is not already 0. 1507 **/ 1508 void 1509 lpfc_more_plogi(struct lpfc_vport *vport) 1510 { 1511 if (vport->num_disc_nodes) 1512 vport->num_disc_nodes--; 1513 1514 /* Continue discovery with <num_disc_nodes> PLOGIs to go */ 1515 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 1516 "0232 Continue discovery with %d PLOGIs to go " 1517 "Data: x%x x%x x%x\n", 1518 vport->num_disc_nodes, vport->fc_plogi_cnt, 1519 vport->fc_flag, vport->port_state); 1520 /* Check to see if there are more PLOGIs to be sent */ 1521 if (vport->fc_flag & FC_NLP_MORE) 1522 /* go thru NPR nodes and issue any remaining ELS PLOGIs */ 1523 lpfc_els_disc_plogi(vport); 1524 1525 return; 1526 } 1527 1528 /** 1529 * lpfc_plogi_confirm_nport - Confirm pologi wwpn matches stored ndlp 1530 * @phba: pointer to lpfc hba data structure. 1531 * @prsp: pointer to response IOCB payload. 1532 * @ndlp: pointer to a node-list data structure. 1533 * 1534 * This routine checks and indicates whether the WWPN of an N_Port, retrieved 1535 * from a PLOGI, matches the WWPN that is stored in the @ndlp for that N_POrt. 1536 * The following cases are considered N_Port confirmed: 1537 * 1) The N_Port is a Fabric ndlp; 2) The @ndlp is on vport list and matches 1538 * the WWPN of the N_Port logged into; 3) The @ndlp is not on vport list but 1539 * it does not have WWPN assigned either. If the WWPN is confirmed, the 1540 * pointer to the @ndlp will be returned. If the WWPN is not confirmed: 1541 * 1) if there is a node on vport list other than the @ndlp with the same 1542 * WWPN of the N_Port PLOGI logged into, the lpfc_unreg_rpi() will be invoked 1543 * on that node to release the RPI associated with the node; 2) if there is 1544 * no node found on vport list with the same WWPN of the N_Port PLOGI logged 1545 * into, a new node shall be allocated (or activated). In either case, the 1546 * parameters of the @ndlp shall be copied to the new_ndlp, the @ndlp shall 1547 * be released and the new_ndlp shall be put on to the vport node list and 1548 * its pointer returned as the confirmed node. 1549 * 1550 * Note that before the @ndlp got "released", the keepDID from not-matching 1551 * or inactive "new_ndlp" on the vport node list is assigned to the nlp_DID 1552 * of the @ndlp. This is because the release of @ndlp is actually to put it 1553 * into an inactive state on the vport node list and the vport node list 1554 * management algorithm does not allow two node with a same DID. 1555 * 1556 * Return code 1557 * pointer to the PLOGI N_Port @ndlp 1558 **/ 1559 static struct lpfc_nodelist * 1560 lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, 1561 struct lpfc_nodelist *ndlp) 1562 { 1563 struct lpfc_vport *vport = ndlp->vport; 1564 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1565 struct lpfc_nodelist *new_ndlp; 1566 struct lpfc_rport_data *rdata; 1567 struct fc_rport *rport; 1568 struct serv_parm *sp; 1569 uint8_t name[sizeof(struct lpfc_name)]; 1570 uint32_t rc, keepDID = 0, keep_nlp_flag = 0; 1571 uint32_t keep_new_nlp_flag = 0; 1572 uint16_t keep_nlp_state; 1573 u32 keep_nlp_fc4_type = 0; 1574 struct lpfc_nvme_rport *keep_nrport = NULL; 1575 int put_node; 1576 int put_rport; 1577 unsigned long *active_rrqs_xri_bitmap = NULL; 1578 1579 /* Fabric nodes can have the same WWPN so we don't bother searching 1580 * by WWPN. Just return the ndlp that was given to us. 1581 */ 1582 if (ndlp->nlp_type & NLP_FABRIC) 1583 return ndlp; 1584 1585 sp = (struct serv_parm *) ((uint8_t *) prsp + sizeof(uint32_t)); 1586 memset(name, 0, sizeof(struct lpfc_name)); 1587 1588 /* Now we find out if the NPort we are logging into, matches the WWPN 1589 * we have for that ndlp. If not, we have some work to do. 1590 */ 1591 new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName); 1592 1593 /* return immediately if the WWPN matches ndlp */ 1594 if (new_ndlp == ndlp && NLP_CHK_NODE_ACT(new_ndlp)) 1595 return ndlp; 1596 1597 if (phba->sli_rev == LPFC_SLI_REV4) { 1598 active_rrqs_xri_bitmap = mempool_alloc(phba->active_rrq_pool, 1599 GFP_KERNEL); 1600 if (active_rrqs_xri_bitmap) 1601 memset(active_rrqs_xri_bitmap, 0, 1602 phba->cfg_rrq_xri_bitmap_sz); 1603 } 1604 1605 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE, 1606 "3178 PLOGI confirm: ndlp x%x x%x x%x: " 1607 "new_ndlp x%x x%x x%x\n", 1608 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_fc4_type, 1609 (new_ndlp ? new_ndlp->nlp_DID : 0), 1610 (new_ndlp ? new_ndlp->nlp_flag : 0), 1611 (new_ndlp ? new_ndlp->nlp_fc4_type : 0)); 1612 1613 if (!new_ndlp) { 1614 rc = memcmp(&ndlp->nlp_portname, name, 1615 sizeof(struct lpfc_name)); 1616 if (!rc) { 1617 if (active_rrqs_xri_bitmap) 1618 mempool_free(active_rrqs_xri_bitmap, 1619 phba->active_rrq_pool); 1620 return ndlp; 1621 } 1622 new_ndlp = lpfc_nlp_init(vport, ndlp->nlp_DID); 1623 if (!new_ndlp) { 1624 if (active_rrqs_xri_bitmap) 1625 mempool_free(active_rrqs_xri_bitmap, 1626 phba->active_rrq_pool); 1627 return ndlp; 1628 } 1629 } else if (!NLP_CHK_NODE_ACT(new_ndlp)) { 1630 rc = memcmp(&ndlp->nlp_portname, name, 1631 sizeof(struct lpfc_name)); 1632 if (!rc) { 1633 if (active_rrqs_xri_bitmap) 1634 mempool_free(active_rrqs_xri_bitmap, 1635 phba->active_rrq_pool); 1636 return ndlp; 1637 } 1638 new_ndlp = lpfc_enable_node(vport, new_ndlp, 1639 NLP_STE_UNUSED_NODE); 1640 if (!new_ndlp) { 1641 if (active_rrqs_xri_bitmap) 1642 mempool_free(active_rrqs_xri_bitmap, 1643 phba->active_rrq_pool); 1644 return ndlp; 1645 } 1646 keepDID = new_ndlp->nlp_DID; 1647 if ((phba->sli_rev == LPFC_SLI_REV4) && active_rrqs_xri_bitmap) 1648 memcpy(active_rrqs_xri_bitmap, 1649 new_ndlp->active_rrqs_xri_bitmap, 1650 phba->cfg_rrq_xri_bitmap_sz); 1651 } else { 1652 keepDID = new_ndlp->nlp_DID; 1653 if (phba->sli_rev == LPFC_SLI_REV4 && 1654 active_rrqs_xri_bitmap) 1655 memcpy(active_rrqs_xri_bitmap, 1656 new_ndlp->active_rrqs_xri_bitmap, 1657 phba->cfg_rrq_xri_bitmap_sz); 1658 } 1659 1660 /* At this point in this routine, we know new_ndlp will be 1661 * returned. however, any previous GID_FTs that were done 1662 * would have updated nlp_fc4_type in ndlp, so we must ensure 1663 * new_ndlp has the right value. 1664 */ 1665 if (vport->fc_flag & FC_FABRIC) { 1666 keep_nlp_fc4_type = new_ndlp->nlp_fc4_type; 1667 new_ndlp->nlp_fc4_type = ndlp->nlp_fc4_type; 1668 } 1669 1670 lpfc_unreg_rpi(vport, new_ndlp); 1671 new_ndlp->nlp_DID = ndlp->nlp_DID; 1672 new_ndlp->nlp_prev_state = ndlp->nlp_prev_state; 1673 if (phba->sli_rev == LPFC_SLI_REV4) 1674 memcpy(new_ndlp->active_rrqs_xri_bitmap, 1675 ndlp->active_rrqs_xri_bitmap, 1676 phba->cfg_rrq_xri_bitmap_sz); 1677 1678 spin_lock_irq(shost->host_lock); 1679 keep_new_nlp_flag = new_ndlp->nlp_flag; 1680 keep_nlp_flag = ndlp->nlp_flag; 1681 new_ndlp->nlp_flag = ndlp->nlp_flag; 1682 1683 /* if new_ndlp had NLP_UNREG_INP set, keep it */ 1684 if (keep_new_nlp_flag & NLP_UNREG_INP) 1685 new_ndlp->nlp_flag |= NLP_UNREG_INP; 1686 else 1687 new_ndlp->nlp_flag &= ~NLP_UNREG_INP; 1688 1689 /* if new_ndlp had NLP_RPI_REGISTERED set, keep it */ 1690 if (keep_new_nlp_flag & NLP_RPI_REGISTERED) 1691 new_ndlp->nlp_flag |= NLP_RPI_REGISTERED; 1692 else 1693 new_ndlp->nlp_flag &= ~NLP_RPI_REGISTERED; 1694 1695 ndlp->nlp_flag = keep_new_nlp_flag; 1696 1697 /* if ndlp had NLP_UNREG_INP set, keep it */ 1698 if (keep_nlp_flag & NLP_UNREG_INP) 1699 ndlp->nlp_flag |= NLP_UNREG_INP; 1700 else 1701 ndlp->nlp_flag &= ~NLP_UNREG_INP; 1702 1703 /* if ndlp had NLP_RPI_REGISTERED set, keep it */ 1704 if (keep_nlp_flag & NLP_RPI_REGISTERED) 1705 ndlp->nlp_flag |= NLP_RPI_REGISTERED; 1706 else 1707 ndlp->nlp_flag &= ~NLP_RPI_REGISTERED; 1708 1709 spin_unlock_irq(shost->host_lock); 1710 1711 /* Set nlp_states accordingly */ 1712 keep_nlp_state = new_ndlp->nlp_state; 1713 lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state); 1714 1715 /* interchange the nvme remoteport structs */ 1716 keep_nrport = new_ndlp->nrport; 1717 new_ndlp->nrport = ndlp->nrport; 1718 1719 /* Move this back to NPR state */ 1720 if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) { 1721 /* The new_ndlp is replacing ndlp totally, so we need 1722 * to put ndlp on UNUSED list and try to free it. 1723 */ 1724 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1725 "3179 PLOGI confirm NEW: %x %x\n", 1726 new_ndlp->nlp_DID, keepDID); 1727 1728 /* Fix up the rport accordingly */ 1729 rport = ndlp->rport; 1730 if (rport) { 1731 rdata = rport->dd_data; 1732 if (rdata->pnode == ndlp) { 1733 /* break the link before dropping the ref */ 1734 ndlp->rport = NULL; 1735 lpfc_nlp_put(ndlp); 1736 rdata->pnode = lpfc_nlp_get(new_ndlp); 1737 new_ndlp->rport = rport; 1738 } 1739 new_ndlp->nlp_type = ndlp->nlp_type; 1740 } 1741 1742 /* Fix up the nvme rport */ 1743 if (ndlp->nrport) { 1744 ndlp->nrport = NULL; 1745 lpfc_nlp_put(ndlp); 1746 } 1747 1748 /* We shall actually free the ndlp with both nlp_DID and 1749 * nlp_portname fields equals 0 to avoid any ndlp on the 1750 * nodelist never to be used. 1751 */ 1752 if (ndlp->nlp_DID == 0) { 1753 spin_lock_irq(&phba->ndlp_lock); 1754 NLP_SET_FREE_REQ(ndlp); 1755 spin_unlock_irq(&phba->ndlp_lock); 1756 } 1757 1758 /* Two ndlps cannot have the same did on the nodelist. 1759 * Note: for this case, ndlp has a NULL WWPN so setting 1760 * the nlp_fc4_type isn't required. 1761 */ 1762 ndlp->nlp_DID = keepDID; 1763 lpfc_nlp_set_state(vport, ndlp, keep_nlp_state); 1764 if (phba->sli_rev == LPFC_SLI_REV4 && 1765 active_rrqs_xri_bitmap) 1766 memcpy(ndlp->active_rrqs_xri_bitmap, 1767 active_rrqs_xri_bitmap, 1768 phba->cfg_rrq_xri_bitmap_sz); 1769 1770 if (!NLP_CHK_NODE_ACT(ndlp)) 1771 lpfc_drop_node(vport, ndlp); 1772 } 1773 else { 1774 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1775 "3180 PLOGI confirm SWAP: %x %x\n", 1776 new_ndlp->nlp_DID, keepDID); 1777 1778 lpfc_unreg_rpi(vport, ndlp); 1779 1780 /* Two ndlps cannot have the same did and the fc4 1781 * type must be transferred because the ndlp is in 1782 * flight. 1783 */ 1784 ndlp->nlp_DID = keepDID; 1785 ndlp->nlp_fc4_type = keep_nlp_fc4_type; 1786 1787 if (phba->sli_rev == LPFC_SLI_REV4 && 1788 active_rrqs_xri_bitmap) 1789 memcpy(ndlp->active_rrqs_xri_bitmap, 1790 active_rrqs_xri_bitmap, 1791 phba->cfg_rrq_xri_bitmap_sz); 1792 1793 /* Since we are switching over to the new_ndlp, 1794 * reset the old ndlp state 1795 */ 1796 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) || 1797 (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) 1798 keep_nlp_state = NLP_STE_NPR_NODE; 1799 lpfc_nlp_set_state(vport, ndlp, keep_nlp_state); 1800 1801 /* Previous ndlp no longer active with nvme host transport. 1802 * Remove reference from earlier registration unless the 1803 * nvme host took care of it. 1804 */ 1805 if (ndlp->nrport) 1806 lpfc_nlp_put(ndlp); 1807 ndlp->nrport = keep_nrport; 1808 1809 /* Fix up the rport accordingly */ 1810 rport = ndlp->rport; 1811 if (rport) { 1812 rdata = rport->dd_data; 1813 put_node = rdata->pnode != NULL; 1814 put_rport = ndlp->rport != NULL; 1815 rdata->pnode = NULL; 1816 ndlp->rport = NULL; 1817 if (put_node) 1818 lpfc_nlp_put(ndlp); 1819 if (put_rport) 1820 put_device(&rport->dev); 1821 } 1822 } 1823 if (phba->sli_rev == LPFC_SLI_REV4 && 1824 active_rrqs_xri_bitmap) 1825 mempool_free(active_rrqs_xri_bitmap, 1826 phba->active_rrq_pool); 1827 1828 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE, 1829 "3173 PLOGI confirm exit: new_ndlp x%x x%x x%x\n", 1830 new_ndlp->nlp_DID, new_ndlp->nlp_flag, 1831 new_ndlp->nlp_fc4_type); 1832 1833 return new_ndlp; 1834 } 1835 1836 /** 1837 * lpfc_end_rscn - Check and handle more rscn for a vport 1838 * @vport: pointer to a host virtual N_Port data structure. 1839 * 1840 * This routine checks whether more Registration State Change 1841 * Notifications (RSCNs) came in while the discovery state machine was in 1842 * the FC_RSCN_MODE. If so, the lpfc_els_handle_rscn() routine will be 1843 * invoked to handle the additional RSCNs for the @vport. Otherwise, the 1844 * FC_RSCN_MODE bit will be cleared with the @vport to mark as the end of 1845 * handling the RSCNs. 1846 **/ 1847 void 1848 lpfc_end_rscn(struct lpfc_vport *vport) 1849 { 1850 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1851 1852 if (vport->fc_flag & FC_RSCN_MODE) { 1853 /* 1854 * Check to see if more RSCNs came in while we were 1855 * processing this one. 1856 */ 1857 if (vport->fc_rscn_id_cnt || 1858 (vport->fc_flag & FC_RSCN_DISCOVERY) != 0) 1859 lpfc_els_handle_rscn(vport); 1860 else { 1861 spin_lock_irq(shost->host_lock); 1862 vport->fc_flag &= ~FC_RSCN_MODE; 1863 spin_unlock_irq(shost->host_lock); 1864 } 1865 } 1866 } 1867 1868 /** 1869 * lpfc_cmpl_els_rrq - Completion handled for els RRQs. 1870 * @phba: pointer to lpfc hba data structure. 1871 * @cmdiocb: pointer to lpfc command iocb data structure. 1872 * @rspiocb: pointer to lpfc response iocb data structure. 1873 * 1874 * This routine will call the clear rrq function to free the rrq and 1875 * clear the xri's bit in the ndlp's xri_bitmap. If the ndlp does not 1876 * exist then the clear_rrq is still called because the rrq needs to 1877 * be freed. 1878 **/ 1879 1880 static void 1881 lpfc_cmpl_els_rrq(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1882 struct lpfc_iocbq *rspiocb) 1883 { 1884 struct lpfc_vport *vport = cmdiocb->vport; 1885 IOCB_t *irsp; 1886 struct lpfc_nodelist *ndlp; 1887 struct lpfc_node_rrq *rrq; 1888 1889 /* we pass cmdiocb to state machine which needs rspiocb as well */ 1890 rrq = cmdiocb->context_un.rrq; 1891 cmdiocb->context_un.rsp_iocb = rspiocb; 1892 1893 irsp = &rspiocb->iocb; 1894 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1895 "RRQ cmpl: status:x%x/x%x did:x%x", 1896 irsp->ulpStatus, irsp->un.ulpWord[4], 1897 irsp->un.elsreq64.remoteID); 1898 1899 ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID); 1900 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || ndlp != rrq->ndlp) { 1901 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 1902 "2882 RRQ completes to NPort x%x " 1903 "with no ndlp. Data: x%x x%x x%x\n", 1904 irsp->un.elsreq64.remoteID, 1905 irsp->ulpStatus, irsp->un.ulpWord[4], 1906 irsp->ulpIoTag); 1907 goto out; 1908 } 1909 1910 /* rrq completes to NPort <nlp_DID> */ 1911 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1912 "2880 RRQ completes to NPort x%x " 1913 "Data: x%x x%x x%x x%x x%x\n", 1914 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4], 1915 irsp->ulpTimeout, rrq->xritag, rrq->rxid); 1916 1917 if (irsp->ulpStatus) { 1918 /* Check for retry */ 1919 /* RRQ failed Don't print the vport to vport rjts */ 1920 if (irsp->ulpStatus != IOSTAT_LS_RJT || 1921 (((irsp->un.ulpWord[4]) >> 16 != LSRJT_INVALID_CMD) && 1922 ((irsp->un.ulpWord[4]) >> 16 != LSRJT_UNABLE_TPC)) || 1923 (phba)->pport->cfg_log_verbose & LOG_ELS) 1924 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 1925 "2881 RRQ failure DID:%06X Status:x%x/x%x\n", 1926 ndlp->nlp_DID, irsp->ulpStatus, 1927 irsp->un.ulpWord[4]); 1928 } 1929 out: 1930 if (rrq) 1931 lpfc_clr_rrq_active(phba, rrq->xritag, rrq); 1932 lpfc_els_free_iocb(phba, cmdiocb); 1933 return; 1934 } 1935 /** 1936 * lpfc_cmpl_els_plogi - Completion callback function for plogi 1937 * @phba: pointer to lpfc hba data structure. 1938 * @cmdiocb: pointer to lpfc command iocb data structure. 1939 * @rspiocb: pointer to lpfc response iocb data structure. 1940 * 1941 * This routine is the completion callback function for issuing the Port 1942 * Login (PLOGI) command. For PLOGI completion, there must be an active 1943 * ndlp on the vport node list that matches the remote node ID from the 1944 * PLOGI response IOCB. If such ndlp does not exist, the PLOGI is simply 1945 * ignored and command IOCB released. The PLOGI response IOCB status is 1946 * checked for error conditons. If there is error status reported, PLOGI 1947 * retry shall be attempted by invoking the lpfc_els_retry() routine. 1948 * Otherwise, the lpfc_plogi_confirm_nport() routine shall be invoked on 1949 * the ndlp and the NLP_EVT_CMPL_PLOGI state to the Discover State Machine 1950 * (DSM) is set for this PLOGI completion. Finally, it checks whether 1951 * there are additional N_Port nodes with the vport that need to perform 1952 * PLOGI. If so, the lpfc_more_plogi() routine is invoked to issue addition 1953 * PLOGIs. 1954 **/ 1955 static void 1956 lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1957 struct lpfc_iocbq *rspiocb) 1958 { 1959 struct lpfc_vport *vport = cmdiocb->vport; 1960 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1961 IOCB_t *irsp; 1962 struct lpfc_nodelist *ndlp; 1963 struct lpfc_dmabuf *prsp; 1964 int disc; 1965 1966 /* we pass cmdiocb to state machine which needs rspiocb as well */ 1967 cmdiocb->context_un.rsp_iocb = rspiocb; 1968 1969 irsp = &rspiocb->iocb; 1970 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1971 "PLOGI cmpl: status:x%x/x%x did:x%x", 1972 irsp->ulpStatus, irsp->un.ulpWord[4], 1973 irsp->un.elsreq64.remoteID); 1974 1975 ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID); 1976 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { 1977 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 1978 "0136 PLOGI completes to NPort x%x " 1979 "with no ndlp. Data: x%x x%x x%x\n", 1980 irsp->un.elsreq64.remoteID, 1981 irsp->ulpStatus, irsp->un.ulpWord[4], 1982 irsp->ulpIoTag); 1983 goto out; 1984 } 1985 1986 /* Since ndlp can be freed in the disc state machine, note if this node 1987 * is being used during discovery. 1988 */ 1989 spin_lock_irq(shost->host_lock); 1990 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC); 1991 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 1992 spin_unlock_irq(shost->host_lock); 1993 1994 /* PLOGI completes to NPort <nlp_DID> */ 1995 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1996 "0102 PLOGI completes to NPort x%06x " 1997 "Data: x%x x%x x%x x%x x%x\n", 1998 ndlp->nlp_DID, ndlp->nlp_fc4_type, 1999 irsp->ulpStatus, irsp->un.ulpWord[4], 2000 disc, vport->num_disc_nodes); 2001 2002 /* Check to see if link went down during discovery */ 2003 if (lpfc_els_chk_latt(vport)) { 2004 spin_lock_irq(shost->host_lock); 2005 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2006 spin_unlock_irq(shost->host_lock); 2007 goto out; 2008 } 2009 2010 if (irsp->ulpStatus) { 2011 /* Check for retry */ 2012 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 2013 /* ELS command is being retried */ 2014 if (disc) { 2015 spin_lock_irq(shost->host_lock); 2016 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2017 spin_unlock_irq(shost->host_lock); 2018 } 2019 goto out; 2020 } 2021 /* PLOGI failed Don't print the vport to vport rjts */ 2022 if (irsp->ulpStatus != IOSTAT_LS_RJT || 2023 (((irsp->un.ulpWord[4]) >> 16 != LSRJT_INVALID_CMD) && 2024 ((irsp->un.ulpWord[4]) >> 16 != LSRJT_UNABLE_TPC)) || 2025 (phba)->pport->cfg_log_verbose & LOG_ELS) 2026 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 2027 "2753 PLOGI failure DID:%06X Status:x%x/x%x\n", 2028 ndlp->nlp_DID, irsp->ulpStatus, 2029 irsp->un.ulpWord[4]); 2030 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 2031 if (!lpfc_error_lost_link(irsp)) 2032 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2033 NLP_EVT_CMPL_PLOGI); 2034 } else { 2035 /* Good status, call state machine */ 2036 prsp = list_entry(((struct lpfc_dmabuf *) 2037 cmdiocb->context2)->list.next, 2038 struct lpfc_dmabuf, list); 2039 ndlp = lpfc_plogi_confirm_nport(phba, prsp->virt, ndlp); 2040 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2041 NLP_EVT_CMPL_PLOGI); 2042 } 2043 2044 if (disc && vport->num_disc_nodes) { 2045 /* Check to see if there are more PLOGIs to be sent */ 2046 lpfc_more_plogi(vport); 2047 2048 if (vport->num_disc_nodes == 0) { 2049 spin_lock_irq(shost->host_lock); 2050 vport->fc_flag &= ~FC_NDISC_ACTIVE; 2051 spin_unlock_irq(shost->host_lock); 2052 2053 lpfc_can_disctmo(vport); 2054 lpfc_end_rscn(vport); 2055 } 2056 } 2057 2058 out: 2059 lpfc_els_free_iocb(phba, cmdiocb); 2060 return; 2061 } 2062 2063 /** 2064 * lpfc_issue_els_plogi - Issue an plogi iocb command for a vport 2065 * @vport: pointer to a host virtual N_Port data structure. 2066 * @did: destination port identifier. 2067 * @retry: number of retries to the command IOCB. 2068 * 2069 * This routine issues a Port Login (PLOGI) command to a remote N_Port 2070 * (with the @did) for a @vport. Before issuing a PLOGI to a remote N_Port, 2071 * the ndlp with the remote N_Port DID must exist on the @vport's ndlp list. 2072 * This routine constructs the proper feilds of the PLOGI IOCB and invokes 2073 * the lpfc_sli_issue_iocb() routine to send out PLOGI ELS command. 2074 * 2075 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 2076 * will be incremented by 1 for holding the ndlp and the reference to ndlp 2077 * will be stored into the context1 field of the IOCB for the completion 2078 * callback function to the PLOGI ELS command. 2079 * 2080 * Return code 2081 * 0 - Successfully issued a plogi for @vport 2082 * 1 - failed to issue a plogi for @vport 2083 **/ 2084 int 2085 lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry) 2086 { 2087 struct lpfc_hba *phba = vport->phba; 2088 struct Scsi_Host *shost; 2089 struct serv_parm *sp; 2090 struct lpfc_nodelist *ndlp; 2091 struct lpfc_iocbq *elsiocb; 2092 uint8_t *pcmd; 2093 uint16_t cmdsize; 2094 int ret; 2095 2096 ndlp = lpfc_findnode_did(vport, did); 2097 2098 if (ndlp) { 2099 /* Defer the processing of the issue PLOGI until after the 2100 * outstanding UNREG_RPI mbox command completes, unless we 2101 * are going offline. This logic does not apply for Fabric DIDs 2102 */ 2103 if ((ndlp->nlp_flag & NLP_UNREG_INP) && 2104 ((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) && 2105 !(vport->fc_flag & FC_OFFLINE_MODE)) { 2106 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2107 "4110 Issue PLOGI x%x deferred " 2108 "on NPort x%x rpi x%x Data: %p\n", 2109 ndlp->nlp_defer_did, ndlp->nlp_DID, 2110 ndlp->nlp_rpi, ndlp); 2111 2112 /* We can only defer 1st PLOGI */ 2113 if (ndlp->nlp_defer_did == NLP_EVT_NOTHING_PENDING) 2114 ndlp->nlp_defer_did = did; 2115 return 0; 2116 } 2117 if (!NLP_CHK_NODE_ACT(ndlp)) 2118 ndlp = NULL; 2119 } 2120 2121 /* If ndlp is not NULL, we will bump the reference count on it */ 2122 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 2123 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did, 2124 ELS_CMD_PLOGI); 2125 if (!elsiocb) 2126 return 1; 2127 2128 shost = lpfc_shost_from_vport(vport); 2129 spin_lock_irq(shost->host_lock); 2130 ndlp->nlp_flag &= ~NLP_FCP_PRLI_RJT; 2131 spin_unlock_irq(shost->host_lock); 2132 2133 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2134 2135 /* For PLOGI request, remainder of payload is service parameters */ 2136 *((uint32_t *) (pcmd)) = ELS_CMD_PLOGI; 2137 pcmd += sizeof(uint32_t); 2138 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm)); 2139 sp = (struct serv_parm *) pcmd; 2140 2141 /* 2142 * If we are a N-port connected to a Fabric, fix-up paramm's so logins 2143 * to device on remote loops work. 2144 */ 2145 if ((vport->fc_flag & FC_FABRIC) && !(vport->fc_flag & FC_PUBLIC_LOOP)) 2146 sp->cmn.altBbCredit = 1; 2147 2148 if (sp->cmn.fcphLow < FC_PH_4_3) 2149 sp->cmn.fcphLow = FC_PH_4_3; 2150 2151 if (sp->cmn.fcphHigh < FC_PH3) 2152 sp->cmn.fcphHigh = FC_PH3; 2153 2154 sp->cmn.valid_vendor_ver_level = 0; 2155 memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion)); 2156 sp->cmn.bbRcvSizeMsb &= 0xF; 2157 2158 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2159 "Issue PLOGI: did:x%x", 2160 did, 0, 0); 2161 2162 /* If our firmware supports this feature, convey that 2163 * information to the target using the vendor specific field. 2164 */ 2165 if (phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) { 2166 sp->cmn.valid_vendor_ver_level = 1; 2167 sp->un.vv.vid = cpu_to_be32(LPFC_VV_EMLX_ID); 2168 sp->un.vv.flags = cpu_to_be32(LPFC_VV_SUPPRESS_RSP); 2169 } 2170 2171 phba->fc_stat.elsXmitPLOGI++; 2172 elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi; 2173 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 2174 2175 if (ret == IOCB_ERROR) { 2176 lpfc_els_free_iocb(phba, elsiocb); 2177 return 1; 2178 } 2179 return 0; 2180 } 2181 2182 /** 2183 * lpfc_cmpl_els_prli - Completion callback function for prli 2184 * @phba: pointer to lpfc hba data structure. 2185 * @cmdiocb: pointer to lpfc command iocb data structure. 2186 * @rspiocb: pointer to lpfc response iocb data structure. 2187 * 2188 * This routine is the completion callback function for a Process Login 2189 * (PRLI) ELS command. The PRLI response IOCB status is checked for error 2190 * status. If there is error status reported, PRLI retry shall be attempted 2191 * by invoking the lpfc_els_retry() routine. Otherwise, the state 2192 * NLP_EVT_CMPL_PRLI is sent to the Discover State Machine (DSM) for this 2193 * ndlp to mark the PRLI completion. 2194 **/ 2195 static void 2196 lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2197 struct lpfc_iocbq *rspiocb) 2198 { 2199 struct lpfc_vport *vport = cmdiocb->vport; 2200 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2201 IOCB_t *irsp; 2202 struct lpfc_nodelist *ndlp; 2203 2204 /* we pass cmdiocb to state machine which needs rspiocb as well */ 2205 cmdiocb->context_un.rsp_iocb = rspiocb; 2206 2207 irsp = &(rspiocb->iocb); 2208 ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 2209 spin_lock_irq(shost->host_lock); 2210 ndlp->nlp_flag &= ~NLP_PRLI_SND; 2211 2212 /* Driver supports multiple FC4 types. Counters matter. */ 2213 vport->fc_prli_sent--; 2214 ndlp->fc4_prli_sent--; 2215 spin_unlock_irq(shost->host_lock); 2216 2217 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2218 "PRLI cmpl: status:x%x/x%x did:x%x", 2219 irsp->ulpStatus, irsp->un.ulpWord[4], 2220 ndlp->nlp_DID); 2221 2222 /* PRLI completes to NPort <nlp_DID> */ 2223 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2224 "0103 PRLI completes to NPort x%06x " 2225 "Data: x%x x%x x%x x%x\n", 2226 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4], 2227 vport->num_disc_nodes, ndlp->fc4_prli_sent); 2228 2229 /* Check to see if link went down during discovery */ 2230 if (lpfc_els_chk_latt(vport)) 2231 goto out; 2232 2233 if (irsp->ulpStatus) { 2234 /* Check for retry */ 2235 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 2236 /* ELS command is being retried */ 2237 goto out; 2238 } 2239 2240 /* PRLI failed */ 2241 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 2242 "2754 PRLI failure DID:%06X Status:x%x/x%x, " 2243 "data: x%x\n", 2244 ndlp->nlp_DID, irsp->ulpStatus, 2245 irsp->un.ulpWord[4], ndlp->fc4_prli_sent); 2246 2247 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 2248 if (lpfc_error_lost_link(irsp)) 2249 goto out; 2250 else 2251 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2252 NLP_EVT_CMPL_PRLI); 2253 } else { 2254 /* Good status, call state machine. However, if another 2255 * PRLI is outstanding, don't call the state machine 2256 * because final disposition to Mapped or Unmapped is 2257 * completed there. 2258 */ 2259 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2260 NLP_EVT_CMPL_PRLI); 2261 } 2262 2263 out: 2264 lpfc_els_free_iocb(phba, cmdiocb); 2265 return; 2266 } 2267 2268 /** 2269 * lpfc_issue_els_prli - Issue a prli iocb command for a vport 2270 * @vport: pointer to a host virtual N_Port data structure. 2271 * @ndlp: pointer to a node-list data structure. 2272 * @retry: number of retries to the command IOCB. 2273 * 2274 * This routine issues a Process Login (PRLI) ELS command for the 2275 * @vport. The PRLI service parameters are set up in the payload of the 2276 * PRLI Request command and the pointer to lpfc_cmpl_els_prli() routine 2277 * is put to the IOCB completion callback func field before invoking the 2278 * routine lpfc_sli_issue_iocb() to send out PRLI command. 2279 * 2280 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 2281 * will be incremented by 1 for holding the ndlp and the reference to ndlp 2282 * will be stored into the context1 field of the IOCB for the completion 2283 * callback function to the PRLI ELS command. 2284 * 2285 * Return code 2286 * 0 - successfully issued prli iocb command for @vport 2287 * 1 - failed to issue prli iocb command for @vport 2288 **/ 2289 int 2290 lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2291 uint8_t retry) 2292 { 2293 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2294 struct lpfc_hba *phba = vport->phba; 2295 PRLI *npr; 2296 struct lpfc_nvme_prli *npr_nvme; 2297 struct lpfc_iocbq *elsiocb; 2298 uint8_t *pcmd; 2299 uint16_t cmdsize; 2300 u32 local_nlp_type, elscmd; 2301 2302 /* 2303 * If we are in RSCN mode, the FC4 types supported from a 2304 * previous GFT_ID command may not be accurate. So, if we 2305 * are a NVME Initiator, always look for the possibility of 2306 * the remote NPort beng a NVME Target. 2307 */ 2308 if (phba->sli_rev == LPFC_SLI_REV4 && 2309 vport->fc_flag & FC_RSCN_MODE && 2310 vport->nvmei_support) 2311 ndlp->nlp_fc4_type |= NLP_FC4_NVME; 2312 local_nlp_type = ndlp->nlp_fc4_type; 2313 2314 /* This routine will issue 1 or 2 PRLIs, so zero all the ndlp 2315 * fields here before any of them can complete. 2316 */ 2317 ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); 2318 ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR); 2319 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; 2320 ndlp->nlp_flag &= ~(NLP_FIRSTBURST | NLP_NPR_2B_DISC); 2321 ndlp->nvme_fb_size = 0; 2322 2323 send_next_prli: 2324 if (local_nlp_type & NLP_FC4_FCP) { 2325 /* Payload is 4 + 16 = 20 x14 bytes. */ 2326 cmdsize = (sizeof(uint32_t) + sizeof(PRLI)); 2327 elscmd = ELS_CMD_PRLI; 2328 } else if (local_nlp_type & NLP_FC4_NVME) { 2329 /* Payload is 4 + 20 = 24 x18 bytes. */ 2330 cmdsize = (sizeof(uint32_t) + sizeof(struct lpfc_nvme_prli)); 2331 elscmd = ELS_CMD_NVMEPRLI; 2332 } else { 2333 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2334 "3083 Unknown FC_TYPE x%x ndlp x%06x\n", 2335 ndlp->nlp_fc4_type, ndlp->nlp_DID); 2336 return 1; 2337 } 2338 2339 /* SLI3 ports don't support NVME. If this rport is a strict NVME 2340 * FC4 type, implicitly LOGO. 2341 */ 2342 if (phba->sli_rev == LPFC_SLI_REV3 && 2343 ndlp->nlp_fc4_type == NLP_FC4_NVME) { 2344 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2345 "3088 Rport fc4 type 0x%x not supported by SLI3 adapter\n", 2346 ndlp->nlp_type); 2347 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); 2348 return 1; 2349 } 2350 2351 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 2352 ndlp->nlp_DID, elscmd); 2353 if (!elsiocb) 2354 return 1; 2355 2356 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2357 2358 /* For PRLI request, remainder of payload is service parameters */ 2359 memset(pcmd, 0, cmdsize); 2360 2361 if (local_nlp_type & NLP_FC4_FCP) { 2362 /* Remainder of payload is FCP PRLI parameter page. 2363 * Note: this data structure is defined as 2364 * BE/LE in the structure definition so no 2365 * byte swap call is made. 2366 */ 2367 *((uint32_t *)(pcmd)) = ELS_CMD_PRLI; 2368 pcmd += sizeof(uint32_t); 2369 npr = (PRLI *)pcmd; 2370 2371 /* 2372 * If our firmware version is 3.20 or later, 2373 * set the following bits for FC-TAPE support. 2374 */ 2375 if (phba->vpd.rev.feaLevelHigh >= 0x02) { 2376 npr->ConfmComplAllowed = 1; 2377 npr->Retry = 1; 2378 npr->TaskRetryIdReq = 1; 2379 } 2380 npr->estabImagePair = 1; 2381 npr->readXferRdyDis = 1; 2382 if (vport->cfg_first_burst_size) 2383 npr->writeXferRdyDis = 1; 2384 2385 /* For FCP support */ 2386 npr->prliType = PRLI_FCP_TYPE; 2387 npr->initiatorFunc = 1; 2388 elsiocb->iocb_flag |= LPFC_PRLI_FCP_REQ; 2389 2390 /* Remove FCP type - processed. */ 2391 local_nlp_type &= ~NLP_FC4_FCP; 2392 } else if (local_nlp_type & NLP_FC4_NVME) { 2393 /* Remainder of payload is NVME PRLI parameter page. 2394 * This data structure is the newer definition that 2395 * uses bf macros so a byte swap is required. 2396 */ 2397 *((uint32_t *)(pcmd)) = ELS_CMD_NVMEPRLI; 2398 pcmd += sizeof(uint32_t); 2399 npr_nvme = (struct lpfc_nvme_prli *)pcmd; 2400 bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE); 2401 bf_set(prli_estabImagePair, npr_nvme, 0); /* Should be 0 */ 2402 2403 /* Only initiators request first burst. */ 2404 if ((phba->cfg_nvme_enable_fb) && 2405 !phba->nvmet_support) 2406 bf_set(prli_fba, npr_nvme, 1); 2407 2408 if (phba->nvmet_support) { 2409 bf_set(prli_tgt, npr_nvme, 1); 2410 bf_set(prli_disc, npr_nvme, 1); 2411 } else { 2412 bf_set(prli_init, npr_nvme, 1); 2413 bf_set(prli_conf, npr_nvme, 1); 2414 } 2415 2416 npr_nvme->word1 = cpu_to_be32(npr_nvme->word1); 2417 npr_nvme->word4 = cpu_to_be32(npr_nvme->word4); 2418 elsiocb->iocb_flag |= LPFC_PRLI_NVME_REQ; 2419 2420 /* Remove NVME type - processed. */ 2421 local_nlp_type &= ~NLP_FC4_NVME; 2422 } 2423 2424 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2425 "Issue PRLI: did:x%x", 2426 ndlp->nlp_DID, 0, 0); 2427 2428 phba->fc_stat.elsXmitPRLI++; 2429 elsiocb->iocb_cmpl = lpfc_cmpl_els_prli; 2430 spin_lock_irq(shost->host_lock); 2431 ndlp->nlp_flag |= NLP_PRLI_SND; 2432 2433 /* The vport counters are used for lpfc_scan_finished, but 2434 * the ndlp is used to track outstanding PRLIs for different 2435 * FC4 types. 2436 */ 2437 vport->fc_prli_sent++; 2438 ndlp->fc4_prli_sent++; 2439 spin_unlock_irq(shost->host_lock); 2440 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == 2441 IOCB_ERROR) { 2442 spin_lock_irq(shost->host_lock); 2443 ndlp->nlp_flag &= ~NLP_PRLI_SND; 2444 spin_unlock_irq(shost->host_lock); 2445 lpfc_els_free_iocb(phba, elsiocb); 2446 return 1; 2447 } 2448 2449 2450 /* The driver supports 2 FC4 types. Make sure 2451 * a PRLI is issued for all types before exiting. 2452 */ 2453 if (phba->sli_rev == LPFC_SLI_REV4 && 2454 local_nlp_type & (NLP_FC4_FCP | NLP_FC4_NVME)) 2455 goto send_next_prli; 2456 2457 return 0; 2458 } 2459 2460 /** 2461 * lpfc_rscn_disc - Perform rscn discovery for a vport 2462 * @vport: pointer to a host virtual N_Port data structure. 2463 * 2464 * This routine performs Registration State Change Notification (RSCN) 2465 * discovery for a @vport. If the @vport's node port recovery count is not 2466 * zero, it will invoke the lpfc_els_disc_plogi() to perform PLOGI for all 2467 * the nodes that need recovery. If none of the PLOGI were needed through 2468 * the lpfc_els_disc_plogi() routine, the lpfc_end_rscn() routine shall be 2469 * invoked to check and handle possible more RSCN came in during the period 2470 * of processing the current ones. 2471 **/ 2472 static void 2473 lpfc_rscn_disc(struct lpfc_vport *vport) 2474 { 2475 lpfc_can_disctmo(vport); 2476 2477 /* RSCN discovery */ 2478 /* go thru NPR nodes and issue ELS PLOGIs */ 2479 if (vport->fc_npr_cnt) 2480 if (lpfc_els_disc_plogi(vport)) 2481 return; 2482 2483 lpfc_end_rscn(vport); 2484 } 2485 2486 /** 2487 * lpfc_adisc_done - Complete the adisc phase of discovery 2488 * @vport: pointer to lpfc_vport hba data structure that finished all ADISCs. 2489 * 2490 * This function is called when the final ADISC is completed during discovery. 2491 * This function handles clearing link attention or issuing reg_vpi depending 2492 * on whether npiv is enabled. This function also kicks off the PLOGI phase of 2493 * discovery. 2494 * This function is called with no locks held. 2495 **/ 2496 static void 2497 lpfc_adisc_done(struct lpfc_vport *vport) 2498 { 2499 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2500 struct lpfc_hba *phba = vport->phba; 2501 2502 /* 2503 * For NPIV, cmpl_reg_vpi will set port_state to READY, 2504 * and continue discovery. 2505 */ 2506 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 2507 !(vport->fc_flag & FC_RSCN_MODE) && 2508 (phba->sli_rev < LPFC_SLI_REV4)) { 2509 /* The ADISCs are complete. Doesn't matter if they 2510 * succeeded or failed because the ADISC completion 2511 * routine guarantees to call the state machine and 2512 * the RPI is either unregistered (failed ADISC response) 2513 * or the RPI is still valid and the node is marked 2514 * mapped for a target. The exchanges should be in the 2515 * correct state. This code is specific to SLI3. 2516 */ 2517 lpfc_issue_clear_la(phba, vport); 2518 lpfc_issue_reg_vpi(phba, vport); 2519 return; 2520 } 2521 /* 2522 * For SLI2, we need to set port_state to READY 2523 * and continue discovery. 2524 */ 2525 if (vport->port_state < LPFC_VPORT_READY) { 2526 /* If we get here, there is nothing to ADISC */ 2527 lpfc_issue_clear_la(phba, vport); 2528 if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) { 2529 vport->num_disc_nodes = 0; 2530 /* go thru NPR list, issue ELS PLOGIs */ 2531 if (vport->fc_npr_cnt) 2532 lpfc_els_disc_plogi(vport); 2533 if (!vport->num_disc_nodes) { 2534 spin_lock_irq(shost->host_lock); 2535 vport->fc_flag &= ~FC_NDISC_ACTIVE; 2536 spin_unlock_irq(shost->host_lock); 2537 lpfc_can_disctmo(vport); 2538 lpfc_end_rscn(vport); 2539 } 2540 } 2541 vport->port_state = LPFC_VPORT_READY; 2542 } else 2543 lpfc_rscn_disc(vport); 2544 } 2545 2546 /** 2547 * lpfc_more_adisc - Issue more adisc as needed 2548 * @vport: pointer to a host virtual N_Port data structure. 2549 * 2550 * This routine determines whether there are more ndlps on a @vport 2551 * node list need to have Address Discover (ADISC) issued. If so, it will 2552 * invoke the lpfc_els_disc_adisc() routine to issue ADISC on the @vport's 2553 * remaining nodes which need to have ADISC sent. 2554 **/ 2555 void 2556 lpfc_more_adisc(struct lpfc_vport *vport) 2557 { 2558 if (vport->num_disc_nodes) 2559 vport->num_disc_nodes--; 2560 /* Continue discovery with <num_disc_nodes> ADISCs to go */ 2561 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2562 "0210 Continue discovery with %d ADISCs to go " 2563 "Data: x%x x%x x%x\n", 2564 vport->num_disc_nodes, vport->fc_adisc_cnt, 2565 vport->fc_flag, vport->port_state); 2566 /* Check to see if there are more ADISCs to be sent */ 2567 if (vport->fc_flag & FC_NLP_MORE) { 2568 lpfc_set_disctmo(vport); 2569 /* go thru NPR nodes and issue any remaining ELS ADISCs */ 2570 lpfc_els_disc_adisc(vport); 2571 } 2572 if (!vport->num_disc_nodes) 2573 lpfc_adisc_done(vport); 2574 return; 2575 } 2576 2577 /** 2578 * lpfc_cmpl_els_adisc - Completion callback function for adisc 2579 * @phba: pointer to lpfc hba data structure. 2580 * @cmdiocb: pointer to lpfc command iocb data structure. 2581 * @rspiocb: pointer to lpfc response iocb data structure. 2582 * 2583 * This routine is the completion function for issuing the Address Discover 2584 * (ADISC) command. It first checks to see whether link went down during 2585 * the discovery process. If so, the node will be marked as node port 2586 * recovery for issuing discover IOCB by the link attention handler and 2587 * exit. Otherwise, the response status is checked. If error was reported 2588 * in the response status, the ADISC command shall be retried by invoking 2589 * the lpfc_els_retry() routine. Otherwise, if no error was reported in 2590 * the response status, the state machine is invoked to set transition 2591 * with respect to NLP_EVT_CMPL_ADISC event. 2592 **/ 2593 static void 2594 lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2595 struct lpfc_iocbq *rspiocb) 2596 { 2597 struct lpfc_vport *vport = cmdiocb->vport; 2598 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2599 IOCB_t *irsp; 2600 struct lpfc_nodelist *ndlp; 2601 int disc; 2602 2603 /* we pass cmdiocb to state machine which needs rspiocb as well */ 2604 cmdiocb->context_un.rsp_iocb = rspiocb; 2605 2606 irsp = &(rspiocb->iocb); 2607 ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 2608 2609 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2610 "ADISC cmpl: status:x%x/x%x did:x%x", 2611 irsp->ulpStatus, irsp->un.ulpWord[4], 2612 ndlp->nlp_DID); 2613 2614 /* Since ndlp can be freed in the disc state machine, note if this node 2615 * is being used during discovery. 2616 */ 2617 spin_lock_irq(shost->host_lock); 2618 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC); 2619 ndlp->nlp_flag &= ~(NLP_ADISC_SND | NLP_NPR_2B_DISC); 2620 spin_unlock_irq(shost->host_lock); 2621 /* ADISC completes to NPort <nlp_DID> */ 2622 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2623 "0104 ADISC completes to NPort x%x " 2624 "Data: x%x x%x x%x x%x x%x\n", 2625 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4], 2626 irsp->ulpTimeout, disc, vport->num_disc_nodes); 2627 /* Check to see if link went down during discovery */ 2628 if (lpfc_els_chk_latt(vport)) { 2629 spin_lock_irq(shost->host_lock); 2630 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2631 spin_unlock_irq(shost->host_lock); 2632 goto out; 2633 } 2634 2635 if (irsp->ulpStatus) { 2636 /* Check for retry */ 2637 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 2638 /* ELS command is being retried */ 2639 if (disc) { 2640 spin_lock_irq(shost->host_lock); 2641 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2642 spin_unlock_irq(shost->host_lock); 2643 lpfc_set_disctmo(vport); 2644 } 2645 goto out; 2646 } 2647 /* ADISC failed */ 2648 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 2649 "2755 ADISC failure DID:%06X Status:x%x/x%x\n", 2650 ndlp->nlp_DID, irsp->ulpStatus, 2651 irsp->un.ulpWord[4]); 2652 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 2653 if (!lpfc_error_lost_link(irsp)) 2654 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2655 NLP_EVT_CMPL_ADISC); 2656 } else 2657 /* Good status, call state machine */ 2658 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2659 NLP_EVT_CMPL_ADISC); 2660 2661 /* Check to see if there are more ADISCs to be sent */ 2662 if (disc && vport->num_disc_nodes) 2663 lpfc_more_adisc(vport); 2664 out: 2665 lpfc_els_free_iocb(phba, cmdiocb); 2666 return; 2667 } 2668 2669 /** 2670 * lpfc_issue_els_adisc - Issue an address discover iocb to an node on a vport 2671 * @vport: pointer to a virtual N_Port data structure. 2672 * @ndlp: pointer to a node-list data structure. 2673 * @retry: number of retries to the command IOCB. 2674 * 2675 * This routine issues an Address Discover (ADISC) for an @ndlp on a 2676 * @vport. It prepares the payload of the ADISC ELS command, updates the 2677 * and states of the ndlp, and invokes the lpfc_sli_issue_iocb() routine 2678 * to issue the ADISC ELS command. 2679 * 2680 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 2681 * will be incremented by 1 for holding the ndlp and the reference to ndlp 2682 * will be stored into the context1 field of the IOCB for the completion 2683 * callback function to the ADISC ELS command. 2684 * 2685 * Return code 2686 * 0 - successfully issued adisc 2687 * 1 - failed to issue adisc 2688 **/ 2689 int 2690 lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2691 uint8_t retry) 2692 { 2693 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2694 struct lpfc_hba *phba = vport->phba; 2695 ADISC *ap; 2696 struct lpfc_iocbq *elsiocb; 2697 uint8_t *pcmd; 2698 uint16_t cmdsize; 2699 2700 cmdsize = (sizeof(uint32_t) + sizeof(ADISC)); 2701 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 2702 ndlp->nlp_DID, ELS_CMD_ADISC); 2703 if (!elsiocb) 2704 return 1; 2705 2706 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2707 2708 /* For ADISC request, remainder of payload is service parameters */ 2709 *((uint32_t *) (pcmd)) = ELS_CMD_ADISC; 2710 pcmd += sizeof(uint32_t); 2711 2712 /* Fill in ADISC payload */ 2713 ap = (ADISC *) pcmd; 2714 ap->hardAL_PA = phba->fc_pref_ALPA; 2715 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name)); 2716 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 2717 ap->DID = be32_to_cpu(vport->fc_myDID); 2718 2719 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2720 "Issue ADISC: did:x%x", 2721 ndlp->nlp_DID, 0, 0); 2722 2723 phba->fc_stat.elsXmitADISC++; 2724 elsiocb->iocb_cmpl = lpfc_cmpl_els_adisc; 2725 spin_lock_irq(shost->host_lock); 2726 ndlp->nlp_flag |= NLP_ADISC_SND; 2727 spin_unlock_irq(shost->host_lock); 2728 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == 2729 IOCB_ERROR) { 2730 spin_lock_irq(shost->host_lock); 2731 ndlp->nlp_flag &= ~NLP_ADISC_SND; 2732 spin_unlock_irq(shost->host_lock); 2733 lpfc_els_free_iocb(phba, elsiocb); 2734 return 1; 2735 } 2736 return 0; 2737 } 2738 2739 /** 2740 * lpfc_cmpl_els_logo - Completion callback function for logo 2741 * @phba: pointer to lpfc hba data structure. 2742 * @cmdiocb: pointer to lpfc command iocb data structure. 2743 * @rspiocb: pointer to lpfc response iocb data structure. 2744 * 2745 * This routine is the completion function for issuing the ELS Logout (LOGO) 2746 * command. If no error status was reported from the LOGO response, the 2747 * state machine of the associated ndlp shall be invoked for transition with 2748 * respect to NLP_EVT_CMPL_LOGO event. Otherwise, if error status was reported, 2749 * the lpfc_els_retry() routine will be invoked to retry the LOGO command. 2750 **/ 2751 static void 2752 lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2753 struct lpfc_iocbq *rspiocb) 2754 { 2755 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 2756 struct lpfc_vport *vport = ndlp->vport; 2757 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2758 IOCB_t *irsp; 2759 struct lpfcMboxq *mbox; 2760 unsigned long flags; 2761 uint32_t skip_recovery = 0; 2762 2763 /* we pass cmdiocb to state machine which needs rspiocb as well */ 2764 cmdiocb->context_un.rsp_iocb = rspiocb; 2765 2766 irsp = &(rspiocb->iocb); 2767 spin_lock_irq(shost->host_lock); 2768 ndlp->nlp_flag &= ~NLP_LOGO_SND; 2769 spin_unlock_irq(shost->host_lock); 2770 2771 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2772 "LOGO cmpl: status:x%x/x%x did:x%x", 2773 irsp->ulpStatus, irsp->un.ulpWord[4], 2774 ndlp->nlp_DID); 2775 2776 /* LOGO completes to NPort <nlp_DID> */ 2777 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2778 "0105 LOGO completes to NPort x%x " 2779 "Data: x%x x%x x%x x%x\n", 2780 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4], 2781 irsp->ulpTimeout, vport->num_disc_nodes); 2782 2783 if (lpfc_els_chk_latt(vport)) { 2784 skip_recovery = 1; 2785 goto out; 2786 } 2787 2788 /* Check to see if link went down during discovery */ 2789 if (ndlp->nlp_flag & NLP_TARGET_REMOVE) { 2790 /* NLP_EVT_DEVICE_RM should unregister the RPI 2791 * which should abort all outstanding IOs. 2792 */ 2793 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2794 NLP_EVT_DEVICE_RM); 2795 skip_recovery = 1; 2796 goto out; 2797 } 2798 2799 /* The LOGO will not be retried on failure. A LOGO was 2800 * issued to the remote rport and a ACC or RJT or no Answer are 2801 * all acceptable. Note the failure and move forward with 2802 * discovery. The PLOGI will retry. 2803 */ 2804 if (irsp->ulpStatus) { 2805 /* LOGO failed */ 2806 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 2807 "2756 LOGO failure, No Retry DID:%06X Status:x%x/x%x\n", 2808 ndlp->nlp_DID, irsp->ulpStatus, 2809 irsp->un.ulpWord[4]); 2810 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 2811 if (lpfc_error_lost_link(irsp)) { 2812 skip_recovery = 1; 2813 goto out; 2814 } 2815 } 2816 2817 /* Call state machine. This will unregister the rpi if needed. */ 2818 lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_CMPL_LOGO); 2819 2820 out: 2821 lpfc_els_free_iocb(phba, cmdiocb); 2822 /* If we are in pt2pt mode, we could rcv new S_ID on PLOGI */ 2823 if ((vport->fc_flag & FC_PT2PT) && 2824 !(vport->fc_flag & FC_PT2PT_PLOGI)) { 2825 phba->pport->fc_myDID = 0; 2826 2827 if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || 2828 (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) { 2829 if (phba->nvmet_support) 2830 lpfc_nvmet_update_targetport(phba); 2831 else 2832 lpfc_nvme_update_localport(phba->pport); 2833 } 2834 2835 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2836 if (mbox) { 2837 lpfc_config_link(phba, mbox); 2838 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 2839 mbox->vport = vport; 2840 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) == 2841 MBX_NOT_FINISHED) { 2842 mempool_free(mbox, phba->mbox_mem_pool); 2843 skip_recovery = 1; 2844 } 2845 } 2846 } 2847 2848 /* 2849 * If the node is a target, the handling attempts to recover the port. 2850 * For any other port type, the rpi is unregistered as an implicit 2851 * LOGO. 2852 */ 2853 if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET) && 2854 skip_recovery == 0) { 2855 lpfc_cancel_retry_delay_tmo(vport, ndlp); 2856 spin_lock_irqsave(shost->host_lock, flags); 2857 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2858 spin_unlock_irqrestore(shost->host_lock, flags); 2859 2860 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2861 "3187 LOGO completes to NPort x%x: Start " 2862 "Recovery Data: x%x x%x x%x x%x\n", 2863 ndlp->nlp_DID, irsp->ulpStatus, 2864 irsp->un.ulpWord[4], irsp->ulpTimeout, 2865 vport->num_disc_nodes); 2866 lpfc_disc_start(vport); 2867 } 2868 return; 2869 } 2870 2871 /** 2872 * lpfc_issue_els_logo - Issue a logo to an node on a vport 2873 * @vport: pointer to a virtual N_Port data structure. 2874 * @ndlp: pointer to a node-list data structure. 2875 * @retry: number of retries to the command IOCB. 2876 * 2877 * This routine constructs and issues an ELS Logout (LOGO) iocb command 2878 * to a remote node, referred by an @ndlp on a @vport. It constructs the 2879 * payload of the IOCB, properly sets up the @ndlp state, and invokes the 2880 * lpfc_sli_issue_iocb() routine to send out the LOGO ELS command. 2881 * 2882 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 2883 * will be incremented by 1 for holding the ndlp and the reference to ndlp 2884 * will be stored into the context1 field of the IOCB for the completion 2885 * callback function to the LOGO ELS command. 2886 * 2887 * Callers of this routine are expected to unregister the RPI first 2888 * 2889 * Return code 2890 * 0 - successfully issued logo 2891 * 1 - failed to issue logo 2892 **/ 2893 int 2894 lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2895 uint8_t retry) 2896 { 2897 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2898 struct lpfc_hba *phba = vport->phba; 2899 struct lpfc_iocbq *elsiocb; 2900 uint8_t *pcmd; 2901 uint16_t cmdsize; 2902 int rc; 2903 2904 spin_lock_irq(shost->host_lock); 2905 if (ndlp->nlp_flag & NLP_LOGO_SND) { 2906 spin_unlock_irq(shost->host_lock); 2907 return 0; 2908 } 2909 spin_unlock_irq(shost->host_lock); 2910 2911 cmdsize = (2 * sizeof(uint32_t)) + sizeof(struct lpfc_name); 2912 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 2913 ndlp->nlp_DID, ELS_CMD_LOGO); 2914 if (!elsiocb) 2915 return 1; 2916 2917 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2918 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO; 2919 pcmd += sizeof(uint32_t); 2920 2921 /* Fill in LOGO payload */ 2922 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID); 2923 pcmd += sizeof(uint32_t); 2924 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name)); 2925 2926 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2927 "Issue LOGO: did:x%x", 2928 ndlp->nlp_DID, 0, 0); 2929 2930 phba->fc_stat.elsXmitLOGO++; 2931 elsiocb->iocb_cmpl = lpfc_cmpl_els_logo; 2932 spin_lock_irq(shost->host_lock); 2933 ndlp->nlp_flag |= NLP_LOGO_SND; 2934 ndlp->nlp_flag &= ~NLP_ISSUE_LOGO; 2935 spin_unlock_irq(shost->host_lock); 2936 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 2937 if (rc == IOCB_ERROR) { 2938 spin_lock_irq(shost->host_lock); 2939 ndlp->nlp_flag &= ~NLP_LOGO_SND; 2940 spin_unlock_irq(shost->host_lock); 2941 lpfc_els_free_iocb(phba, elsiocb); 2942 return 1; 2943 } 2944 2945 spin_lock_irq(shost->host_lock); 2946 ndlp->nlp_prev_state = ndlp->nlp_state; 2947 spin_unlock_irq(shost->host_lock); 2948 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE); 2949 return 0; 2950 } 2951 2952 /** 2953 * lpfc_cmpl_els_cmd - Completion callback function for generic els command 2954 * @phba: pointer to lpfc hba data structure. 2955 * @cmdiocb: pointer to lpfc command iocb data structure. 2956 * @rspiocb: pointer to lpfc response iocb data structure. 2957 * 2958 * This routine is a generic completion callback function for ELS commands. 2959 * Specifically, it is the callback function which does not need to perform 2960 * any command specific operations. It is currently used by the ELS command 2961 * issuing routines for the ELS State Change Request (SCR), 2962 * lpfc_issue_els_scr(), and the ELS Fibre Channel Address Resolution 2963 * Protocol Response (FARPR) routine, lpfc_issue_els_farpr(). Other than 2964 * certain debug loggings, this callback function simply invokes the 2965 * lpfc_els_chk_latt() routine to check whether link went down during the 2966 * discovery process. 2967 **/ 2968 static void 2969 lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2970 struct lpfc_iocbq *rspiocb) 2971 { 2972 struct lpfc_vport *vport = cmdiocb->vport; 2973 IOCB_t *irsp; 2974 2975 irsp = &rspiocb->iocb; 2976 2977 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2978 "ELS cmd cmpl: status:x%x/x%x did:x%x", 2979 irsp->ulpStatus, irsp->un.ulpWord[4], 2980 irsp->un.elsreq64.remoteID); 2981 /* ELS cmd tag <ulpIoTag> completes */ 2982 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2983 "0106 ELS cmd tag x%x completes Data: x%x x%x x%x\n", 2984 irsp->ulpIoTag, irsp->ulpStatus, 2985 irsp->un.ulpWord[4], irsp->ulpTimeout); 2986 /* Check to see if link went down during discovery */ 2987 lpfc_els_chk_latt(vport); 2988 lpfc_els_free_iocb(phba, cmdiocb); 2989 return; 2990 } 2991 2992 /** 2993 * lpfc_issue_els_scr - Issue a scr to an node on a vport 2994 * @vport: pointer to a host virtual N_Port data structure. 2995 * @nportid: N_Port identifier to the remote node. 2996 * @retry: number of retries to the command IOCB. 2997 * 2998 * This routine issues a State Change Request (SCR) to a fabric node 2999 * on a @vport. The remote node @nportid is passed into the function. It 3000 * first search the @vport node list to find the matching ndlp. If no such 3001 * ndlp is found, a new ndlp shall be created for this (SCR) purpose. An 3002 * IOCB is allocated, payload prepared, and the lpfc_sli_issue_iocb() 3003 * routine is invoked to send the SCR IOCB. 3004 * 3005 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 3006 * will be incremented by 1 for holding the ndlp and the reference to ndlp 3007 * will be stored into the context1 field of the IOCB for the completion 3008 * callback function to the SCR ELS command. 3009 * 3010 * Return code 3011 * 0 - Successfully issued scr command 3012 * 1 - Failed to issue scr command 3013 **/ 3014 int 3015 lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) 3016 { 3017 struct lpfc_hba *phba = vport->phba; 3018 struct lpfc_iocbq *elsiocb; 3019 uint8_t *pcmd; 3020 uint16_t cmdsize; 3021 struct lpfc_nodelist *ndlp; 3022 3023 cmdsize = (sizeof(uint32_t) + sizeof(SCR)); 3024 3025 ndlp = lpfc_findnode_did(vport, nportid); 3026 if (!ndlp) { 3027 ndlp = lpfc_nlp_init(vport, nportid); 3028 if (!ndlp) 3029 return 1; 3030 lpfc_enqueue_node(vport, ndlp); 3031 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 3032 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); 3033 if (!ndlp) 3034 return 1; 3035 } 3036 3037 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3038 ndlp->nlp_DID, ELS_CMD_SCR); 3039 3040 if (!elsiocb) { 3041 /* This will trigger the release of the node just 3042 * allocated 3043 */ 3044 lpfc_nlp_put(ndlp); 3045 return 1; 3046 } 3047 3048 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 3049 3050 *((uint32_t *) (pcmd)) = ELS_CMD_SCR; 3051 pcmd += sizeof(uint32_t); 3052 3053 /* For SCR, remainder of payload is SCR parameter page */ 3054 memset(pcmd, 0, sizeof(SCR)); 3055 ((SCR *) pcmd)->Function = SCR_FUNC_FULL; 3056 3057 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3058 "Issue SCR: did:x%x", 3059 ndlp->nlp_DID, 0, 0); 3060 3061 phba->fc_stat.elsXmitSCR++; 3062 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd; 3063 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == 3064 IOCB_ERROR) { 3065 /* The additional lpfc_nlp_put will cause the following 3066 * lpfc_els_free_iocb routine to trigger the rlease of 3067 * the node. 3068 */ 3069 lpfc_nlp_put(ndlp); 3070 lpfc_els_free_iocb(phba, elsiocb); 3071 return 1; 3072 } 3073 /* This will cause the callback-function lpfc_cmpl_els_cmd to 3074 * trigger the release of node. 3075 */ 3076 if (!(vport->fc_flag & FC_PT2PT)) 3077 lpfc_nlp_put(ndlp); 3078 return 0; 3079 } 3080 3081 /** 3082 * lpfc_issue_els_farpr - Issue a farp to an node on a vport 3083 * @vport: pointer to a host virtual N_Port data structure. 3084 * @nportid: N_Port identifier to the remote node. 3085 * @retry: number of retries to the command IOCB. 3086 * 3087 * This routine issues a Fibre Channel Address Resolution Response 3088 * (FARPR) to a node on a vport. The remote node N_Port identifier (@nportid) 3089 * is passed into the function. It first search the @vport node list to find 3090 * the matching ndlp. If no such ndlp is found, a new ndlp shall be created 3091 * for this (FARPR) purpose. An IOCB is allocated, payload prepared, and the 3092 * lpfc_sli_issue_iocb() routine is invoked to send the FARPR ELS command. 3093 * 3094 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 3095 * will be incremented by 1 for holding the ndlp and the reference to ndlp 3096 * will be stored into the context1 field of the IOCB for the completion 3097 * callback function to the PARPR ELS command. 3098 * 3099 * Return code 3100 * 0 - Successfully issued farpr command 3101 * 1 - Failed to issue farpr command 3102 **/ 3103 static int 3104 lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) 3105 { 3106 struct lpfc_hba *phba = vport->phba; 3107 struct lpfc_iocbq *elsiocb; 3108 FARP *fp; 3109 uint8_t *pcmd; 3110 uint32_t *lp; 3111 uint16_t cmdsize; 3112 struct lpfc_nodelist *ondlp; 3113 struct lpfc_nodelist *ndlp; 3114 3115 cmdsize = (sizeof(uint32_t) + sizeof(FARP)); 3116 3117 ndlp = lpfc_findnode_did(vport, nportid); 3118 if (!ndlp) { 3119 ndlp = lpfc_nlp_init(vport, nportid); 3120 if (!ndlp) 3121 return 1; 3122 lpfc_enqueue_node(vport, ndlp); 3123 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 3124 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); 3125 if (!ndlp) 3126 return 1; 3127 } 3128 3129 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3130 ndlp->nlp_DID, ELS_CMD_RNID); 3131 if (!elsiocb) { 3132 /* This will trigger the release of the node just 3133 * allocated 3134 */ 3135 lpfc_nlp_put(ndlp); 3136 return 1; 3137 } 3138 3139 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 3140 3141 *((uint32_t *) (pcmd)) = ELS_CMD_FARPR; 3142 pcmd += sizeof(uint32_t); 3143 3144 /* Fill in FARPR payload */ 3145 fp = (FARP *) (pcmd); 3146 memset(fp, 0, sizeof(FARP)); 3147 lp = (uint32_t *) pcmd; 3148 *lp++ = be32_to_cpu(nportid); 3149 *lp++ = be32_to_cpu(vport->fc_myDID); 3150 fp->Rflags = 0; 3151 fp->Mflags = (FARP_MATCH_PORT | FARP_MATCH_NODE); 3152 3153 memcpy(&fp->RportName, &vport->fc_portname, sizeof(struct lpfc_name)); 3154 memcpy(&fp->RnodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 3155 ondlp = lpfc_findnode_did(vport, nportid); 3156 if (ondlp && NLP_CHK_NODE_ACT(ondlp)) { 3157 memcpy(&fp->OportName, &ondlp->nlp_portname, 3158 sizeof(struct lpfc_name)); 3159 memcpy(&fp->OnodeName, &ondlp->nlp_nodename, 3160 sizeof(struct lpfc_name)); 3161 } 3162 3163 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3164 "Issue FARPR: did:x%x", 3165 ndlp->nlp_DID, 0, 0); 3166 3167 phba->fc_stat.elsXmitFARPR++; 3168 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd; 3169 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == 3170 IOCB_ERROR) { 3171 /* The additional lpfc_nlp_put will cause the following 3172 * lpfc_els_free_iocb routine to trigger the release of 3173 * the node. 3174 */ 3175 lpfc_nlp_put(ndlp); 3176 lpfc_els_free_iocb(phba, elsiocb); 3177 return 1; 3178 } 3179 /* This will cause the callback-function lpfc_cmpl_els_cmd to 3180 * trigger the release of the node. 3181 */ 3182 lpfc_nlp_put(ndlp); 3183 return 0; 3184 } 3185 3186 /** 3187 * lpfc_cancel_retry_delay_tmo - Cancel the timer with delayed iocb-cmd retry 3188 * @vport: pointer to a host virtual N_Port data structure. 3189 * @nlp: pointer to a node-list data structure. 3190 * 3191 * This routine cancels the timer with a delayed IOCB-command retry for 3192 * a @vport's @ndlp. It stops the timer for the delayed function retrial and 3193 * removes the ELS retry event if it presents. In addition, if the 3194 * NLP_NPR_2B_DISC bit is set in the @nlp's nlp_flag bitmap, ADISC IOCB 3195 * commands are sent for the @vport's nodes that require issuing discovery 3196 * ADISC. 3197 **/ 3198 void 3199 lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp) 3200 { 3201 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 3202 struct lpfc_work_evt *evtp; 3203 3204 if (!(nlp->nlp_flag & NLP_DELAY_TMO)) 3205 return; 3206 spin_lock_irq(shost->host_lock); 3207 nlp->nlp_flag &= ~NLP_DELAY_TMO; 3208 spin_unlock_irq(shost->host_lock); 3209 del_timer_sync(&nlp->nlp_delayfunc); 3210 nlp->nlp_last_elscmd = 0; 3211 if (!list_empty(&nlp->els_retry_evt.evt_listp)) { 3212 list_del_init(&nlp->els_retry_evt.evt_listp); 3213 /* Decrement nlp reference count held for the delayed retry */ 3214 evtp = &nlp->els_retry_evt; 3215 lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1); 3216 } 3217 if (nlp->nlp_flag & NLP_NPR_2B_DISC) { 3218 spin_lock_irq(shost->host_lock); 3219 nlp->nlp_flag &= ~NLP_NPR_2B_DISC; 3220 spin_unlock_irq(shost->host_lock); 3221 if (vport->num_disc_nodes) { 3222 if (vport->port_state < LPFC_VPORT_READY) { 3223 /* Check if there are more ADISCs to be sent */ 3224 lpfc_more_adisc(vport); 3225 } else { 3226 /* Check if there are more PLOGIs to be sent */ 3227 lpfc_more_plogi(vport); 3228 if (vport->num_disc_nodes == 0) { 3229 spin_lock_irq(shost->host_lock); 3230 vport->fc_flag &= ~FC_NDISC_ACTIVE; 3231 spin_unlock_irq(shost->host_lock); 3232 lpfc_can_disctmo(vport); 3233 lpfc_end_rscn(vport); 3234 } 3235 } 3236 } 3237 } 3238 return; 3239 } 3240 3241 /** 3242 * lpfc_els_retry_delay - Timer function with a ndlp delayed function timer 3243 * @ptr: holder for the pointer to the timer function associated data (ndlp). 3244 * 3245 * This routine is invoked by the ndlp delayed-function timer to check 3246 * whether there is any pending ELS retry event(s) with the node. If not, it 3247 * simply returns. Otherwise, if there is at least one ELS delayed event, it 3248 * adds the delayed events to the HBA work list and invokes the 3249 * lpfc_worker_wake_up() routine to wake up worker thread to process the 3250 * event. Note that lpfc_nlp_get() is called before posting the event to 3251 * the work list to hold reference count of ndlp so that it guarantees the 3252 * reference to ndlp will still be available when the worker thread gets 3253 * to the event associated with the ndlp. 3254 **/ 3255 void 3256 lpfc_els_retry_delay(struct timer_list *t) 3257 { 3258 struct lpfc_nodelist *ndlp = from_timer(ndlp, t, nlp_delayfunc); 3259 struct lpfc_vport *vport = ndlp->vport; 3260 struct lpfc_hba *phba = vport->phba; 3261 unsigned long flags; 3262 struct lpfc_work_evt *evtp = &ndlp->els_retry_evt; 3263 3264 spin_lock_irqsave(&phba->hbalock, flags); 3265 if (!list_empty(&evtp->evt_listp)) { 3266 spin_unlock_irqrestore(&phba->hbalock, flags); 3267 return; 3268 } 3269 3270 /* We need to hold the node by incrementing the reference 3271 * count until the queued work is done 3272 */ 3273 evtp->evt_arg1 = lpfc_nlp_get(ndlp); 3274 if (evtp->evt_arg1) { 3275 evtp->evt = LPFC_EVT_ELS_RETRY; 3276 list_add_tail(&evtp->evt_listp, &phba->work_list); 3277 lpfc_worker_wake_up(phba); 3278 } 3279 spin_unlock_irqrestore(&phba->hbalock, flags); 3280 return; 3281 } 3282 3283 /** 3284 * lpfc_els_retry_delay_handler - Work thread handler for ndlp delayed function 3285 * @ndlp: pointer to a node-list data structure. 3286 * 3287 * This routine is the worker-thread handler for processing the @ndlp delayed 3288 * event(s), posted by the lpfc_els_retry_delay() routine. It simply retrieves 3289 * the last ELS command from the associated ndlp and invokes the proper ELS 3290 * function according to the delayed ELS command to retry the command. 3291 **/ 3292 void 3293 lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp) 3294 { 3295 struct lpfc_vport *vport = ndlp->vport; 3296 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 3297 uint32_t cmd, retry; 3298 3299 spin_lock_irq(shost->host_lock); 3300 cmd = ndlp->nlp_last_elscmd; 3301 ndlp->nlp_last_elscmd = 0; 3302 3303 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { 3304 spin_unlock_irq(shost->host_lock); 3305 return; 3306 } 3307 3308 ndlp->nlp_flag &= ~NLP_DELAY_TMO; 3309 spin_unlock_irq(shost->host_lock); 3310 /* 3311 * If a discovery event readded nlp_delayfunc after timer 3312 * firing and before processing the timer, cancel the 3313 * nlp_delayfunc. 3314 */ 3315 del_timer_sync(&ndlp->nlp_delayfunc); 3316 retry = ndlp->nlp_retry; 3317 ndlp->nlp_retry = 0; 3318 3319 switch (cmd) { 3320 case ELS_CMD_FLOGI: 3321 lpfc_issue_els_flogi(vport, ndlp, retry); 3322 break; 3323 case ELS_CMD_PLOGI: 3324 if (!lpfc_issue_els_plogi(vport, ndlp->nlp_DID, retry)) { 3325 ndlp->nlp_prev_state = ndlp->nlp_state; 3326 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 3327 } 3328 break; 3329 case ELS_CMD_ADISC: 3330 if (!lpfc_issue_els_adisc(vport, ndlp, retry)) { 3331 ndlp->nlp_prev_state = ndlp->nlp_state; 3332 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 3333 } 3334 break; 3335 case ELS_CMD_PRLI: 3336 case ELS_CMD_NVMEPRLI: 3337 if (!lpfc_issue_els_prli(vport, ndlp, retry)) { 3338 ndlp->nlp_prev_state = ndlp->nlp_state; 3339 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); 3340 } 3341 break; 3342 case ELS_CMD_LOGO: 3343 if (!lpfc_issue_els_logo(vport, ndlp, retry)) { 3344 ndlp->nlp_prev_state = ndlp->nlp_state; 3345 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE); 3346 } 3347 break; 3348 case ELS_CMD_FDISC: 3349 if (!(vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI)) 3350 lpfc_issue_els_fdisc(vport, ndlp, retry); 3351 break; 3352 } 3353 return; 3354 } 3355 3356 /** 3357 * lpfc_link_reset - Issue link reset 3358 * @vport: pointer to a virtual N_Port data structure. 3359 * 3360 * This routine performs link reset by sending INIT_LINK mailbox command. 3361 * For SLI-3 adapter, link attention interrupt is enabled before issuing 3362 * INIT_LINK mailbox command. 3363 * 3364 * Return code 3365 * 0 - Link reset initiated successfully 3366 * 1 - Failed to initiate link reset 3367 **/ 3368 int 3369 lpfc_link_reset(struct lpfc_vport *vport) 3370 { 3371 struct lpfc_hba *phba = vport->phba; 3372 LPFC_MBOXQ_t *mbox; 3373 uint32_t control; 3374 int rc; 3375 3376 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 3377 "2851 Attempt link reset\n"); 3378 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3379 if (!mbox) { 3380 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 3381 "2852 Failed to allocate mbox memory"); 3382 return 1; 3383 } 3384 3385 /* Enable Link attention interrupts */ 3386 if (phba->sli_rev <= LPFC_SLI_REV3) { 3387 spin_lock_irq(&phba->hbalock); 3388 phba->sli.sli_flag |= LPFC_PROCESS_LA; 3389 control = readl(phba->HCregaddr); 3390 control |= HC_LAINT_ENA; 3391 writel(control, phba->HCregaddr); 3392 readl(phba->HCregaddr); /* flush */ 3393 spin_unlock_irq(&phba->hbalock); 3394 } 3395 3396 lpfc_init_link(phba, mbox, phba->cfg_topology, 3397 phba->cfg_link_speed); 3398 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 3399 mbox->vport = vport; 3400 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 3401 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 3402 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 3403 "2853 Failed to issue INIT_LINK " 3404 "mbox command, rc:x%x\n", rc); 3405 mempool_free(mbox, phba->mbox_mem_pool); 3406 return 1; 3407 } 3408 3409 return 0; 3410 } 3411 3412 /** 3413 * lpfc_els_retry - Make retry decision on an els command iocb 3414 * @phba: pointer to lpfc hba data structure. 3415 * @cmdiocb: pointer to lpfc command iocb data structure. 3416 * @rspiocb: pointer to lpfc response iocb data structure. 3417 * 3418 * This routine makes a retry decision on an ELS command IOCB, which has 3419 * failed. The following ELS IOCBs use this function for retrying the command 3420 * when previously issued command responsed with error status: FLOGI, PLOGI, 3421 * PRLI, ADISC, LOGO, and FDISC. Based on the ELS command type and the 3422 * returned error status, it makes the decision whether a retry shall be 3423 * issued for the command, and whether a retry shall be made immediately or 3424 * delayed. In the former case, the corresponding ELS command issuing-function 3425 * is called to retry the command. In the later case, the ELS command shall 3426 * be posted to the ndlp delayed event and delayed function timer set to the 3427 * ndlp for the delayed command issusing. 3428 * 3429 * Return code 3430 * 0 - No retry of els command is made 3431 * 1 - Immediate or delayed retry of els command is made 3432 **/ 3433 static int 3434 lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 3435 struct lpfc_iocbq *rspiocb) 3436 { 3437 struct lpfc_vport *vport = cmdiocb->vport; 3438 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 3439 IOCB_t *irsp = &rspiocb->iocb; 3440 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 3441 struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 3442 uint32_t *elscmd; 3443 struct ls_rjt stat; 3444 int retry = 0, maxretry = lpfc_max_els_tries, delay = 0; 3445 int logerr = 0; 3446 uint32_t cmd = 0; 3447 uint32_t did; 3448 int link_reset = 0, rc; 3449 3450 3451 /* Note: context2 may be 0 for internal driver abort 3452 * of delays ELS command. 3453 */ 3454 3455 if (pcmd && pcmd->virt) { 3456 elscmd = (uint32_t *) (pcmd->virt); 3457 cmd = *elscmd++; 3458 } 3459 3460 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) 3461 did = ndlp->nlp_DID; 3462 else { 3463 /* We should only hit this case for retrying PLOGI */ 3464 did = irsp->un.elsreq64.remoteID; 3465 ndlp = lpfc_findnode_did(vport, did); 3466 if ((!ndlp || !NLP_CHK_NODE_ACT(ndlp)) 3467 && (cmd != ELS_CMD_PLOGI)) 3468 return 1; 3469 } 3470 3471 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3472 "Retry ELS: wd7:x%x wd4:x%x did:x%x", 3473 *(((uint32_t *) irsp) + 7), irsp->un.ulpWord[4], ndlp->nlp_DID); 3474 3475 switch (irsp->ulpStatus) { 3476 case IOSTAT_FCP_RSP_ERROR: 3477 break; 3478 case IOSTAT_REMOTE_STOP: 3479 if (phba->sli_rev == LPFC_SLI_REV4) { 3480 /* This IO was aborted by the target, we don't 3481 * know the rxid and because we did not send the 3482 * ABTS we cannot generate and RRQ. 3483 */ 3484 lpfc_set_rrq_active(phba, ndlp, 3485 cmdiocb->sli4_lxritag, 0, 0); 3486 } 3487 break; 3488 case IOSTAT_LOCAL_REJECT: 3489 switch ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK)) { 3490 case IOERR_LOOP_OPEN_FAILURE: 3491 if (cmd == ELS_CMD_FLOGI) { 3492 if (PCI_DEVICE_ID_HORNET == 3493 phba->pcidev->device) { 3494 phba->fc_topology = LPFC_TOPOLOGY_LOOP; 3495 phba->pport->fc_myDID = 0; 3496 phba->alpa_map[0] = 0; 3497 phba->alpa_map[1] = 0; 3498 } 3499 } 3500 if (cmd == ELS_CMD_PLOGI && cmdiocb->retry == 0) 3501 delay = 1000; 3502 retry = 1; 3503 break; 3504 3505 case IOERR_ILLEGAL_COMMAND: 3506 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 3507 "0124 Retry illegal cmd x%x " 3508 "retry:x%x delay:x%x\n", 3509 cmd, cmdiocb->retry, delay); 3510 retry = 1; 3511 /* All command's retry policy */ 3512 maxretry = 8; 3513 if (cmdiocb->retry > 2) 3514 delay = 1000; 3515 break; 3516 3517 case IOERR_NO_RESOURCES: 3518 logerr = 1; /* HBA out of resources */ 3519 retry = 1; 3520 if (cmdiocb->retry > 100) 3521 delay = 100; 3522 maxretry = 250; 3523 break; 3524 3525 case IOERR_ILLEGAL_FRAME: 3526 delay = 100; 3527 retry = 1; 3528 break; 3529 3530 case IOERR_INVALID_RPI: 3531 if (cmd == ELS_CMD_PLOGI && 3532 did == NameServer_DID) { 3533 /* Continue forever if plogi to */ 3534 /* the nameserver fails */ 3535 maxretry = 0; 3536 delay = 100; 3537 } 3538 retry = 1; 3539 break; 3540 3541 case IOERR_SEQUENCE_TIMEOUT: 3542 if (cmd == ELS_CMD_PLOGI && 3543 did == NameServer_DID && 3544 (cmdiocb->retry + 1) == maxretry) { 3545 /* Reset the Link */ 3546 link_reset = 1; 3547 break; 3548 } 3549 retry = 1; 3550 delay = 100; 3551 break; 3552 } 3553 break; 3554 3555 case IOSTAT_NPORT_RJT: 3556 case IOSTAT_FABRIC_RJT: 3557 if (irsp->un.ulpWord[4] & RJT_UNAVAIL_TEMP) { 3558 retry = 1; 3559 break; 3560 } 3561 break; 3562 3563 case IOSTAT_NPORT_BSY: 3564 case IOSTAT_FABRIC_BSY: 3565 logerr = 1; /* Fabric / Remote NPort out of resources */ 3566 retry = 1; 3567 break; 3568 3569 case IOSTAT_LS_RJT: 3570 stat.un.lsRjtError = be32_to_cpu(irsp->un.ulpWord[4]); 3571 /* Added for Vendor specifc support 3572 * Just keep retrying for these Rsn / Exp codes 3573 */ 3574 switch (stat.un.b.lsRjtRsnCode) { 3575 case LSRJT_UNABLE_TPC: 3576 /* The driver has a VALID PLOGI but the rport has 3577 * rejected the PRLI - can't do it now. Delay 3578 * for 1 second and try again - don't care about 3579 * the explanation. 3580 */ 3581 if (cmd == ELS_CMD_PRLI || cmd == ELS_CMD_NVMEPRLI) { 3582 delay = 1000; 3583 maxretry = lpfc_max_els_tries + 1; 3584 retry = 1; 3585 break; 3586 } 3587 3588 /* Legacy bug fix code for targets with PLOGI delays. */ 3589 if (stat.un.b.lsRjtRsnCodeExp == 3590 LSEXP_CMD_IN_PROGRESS) { 3591 if (cmd == ELS_CMD_PLOGI) { 3592 delay = 1000; 3593 maxretry = 48; 3594 } 3595 retry = 1; 3596 break; 3597 } 3598 if (stat.un.b.lsRjtRsnCodeExp == 3599 LSEXP_CANT_GIVE_DATA) { 3600 if (cmd == ELS_CMD_PLOGI) { 3601 delay = 1000; 3602 maxretry = 48; 3603 } 3604 retry = 1; 3605 break; 3606 } 3607 if (cmd == ELS_CMD_PLOGI) { 3608 delay = 1000; 3609 maxretry = lpfc_max_els_tries + 1; 3610 retry = 1; 3611 break; 3612 } 3613 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 3614 (cmd == ELS_CMD_FDISC) && 3615 (stat.un.b.lsRjtRsnCodeExp == LSEXP_OUT_OF_RESOURCE)){ 3616 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 3617 "0125 FDISC Failed (x%x). " 3618 "Fabric out of resources\n", 3619 stat.un.lsRjtError); 3620 lpfc_vport_set_state(vport, 3621 FC_VPORT_NO_FABRIC_RSCS); 3622 } 3623 break; 3624 3625 case LSRJT_LOGICAL_BSY: 3626 if ((cmd == ELS_CMD_PLOGI) || 3627 (cmd == ELS_CMD_PRLI) || 3628 (cmd == ELS_CMD_NVMEPRLI)) { 3629 delay = 1000; 3630 maxretry = 48; 3631 } else if (cmd == ELS_CMD_FDISC) { 3632 /* FDISC retry policy */ 3633 maxretry = 48; 3634 if (cmdiocb->retry >= 32) 3635 delay = 1000; 3636 } 3637 retry = 1; 3638 break; 3639 3640 case LSRJT_LOGICAL_ERR: 3641 /* There are some cases where switches return this 3642 * error when they are not ready and should be returning 3643 * Logical Busy. We should delay every time. 3644 */ 3645 if (cmd == ELS_CMD_FDISC && 3646 stat.un.b.lsRjtRsnCodeExp == LSEXP_PORT_LOGIN_REQ) { 3647 maxretry = 3; 3648 delay = 1000; 3649 retry = 1; 3650 } else if (cmd == ELS_CMD_FLOGI && 3651 stat.un.b.lsRjtRsnCodeExp == 3652 LSEXP_NOTHING_MORE) { 3653 vport->fc_sparam.cmn.bbRcvSizeMsb &= 0xf; 3654 retry = 1; 3655 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 3656 "0820 FLOGI Failed (x%x). " 3657 "BBCredit Not Supported\n", 3658 stat.un.lsRjtError); 3659 } 3660 break; 3661 3662 case LSRJT_PROTOCOL_ERR: 3663 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 3664 (cmd == ELS_CMD_FDISC) && 3665 ((stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_PNAME) || 3666 (stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_NPORT_ID)) 3667 ) { 3668 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 3669 "0122 FDISC Failed (x%x). " 3670 "Fabric Detected Bad WWN\n", 3671 stat.un.lsRjtError); 3672 lpfc_vport_set_state(vport, 3673 FC_VPORT_FABRIC_REJ_WWN); 3674 } 3675 break; 3676 case LSRJT_VENDOR_UNIQUE: 3677 if ((stat.un.b.vendorUnique == 0x45) && 3678 (cmd == ELS_CMD_FLOGI)) { 3679 goto out_retry; 3680 } 3681 break; 3682 case LSRJT_CMD_UNSUPPORTED: 3683 /* lpfc nvmet returns this type of LS_RJT when it 3684 * receives an FCP PRLI because lpfc nvmet only 3685 * support NVME. ELS request is terminated for FCP4 3686 * on this rport. 3687 */ 3688 if (stat.un.b.lsRjtRsnCodeExp == 3689 LSEXP_REQ_UNSUPPORTED && cmd == ELS_CMD_PRLI) { 3690 spin_lock_irq(shost->host_lock); 3691 ndlp->nlp_flag |= NLP_FCP_PRLI_RJT; 3692 spin_unlock_irq(shost->host_lock); 3693 retry = 0; 3694 goto out_retry; 3695 } 3696 break; 3697 } 3698 break; 3699 3700 case IOSTAT_INTERMED_RSP: 3701 case IOSTAT_BA_RJT: 3702 break; 3703 3704 default: 3705 break; 3706 } 3707 3708 if (link_reset) { 3709 rc = lpfc_link_reset(vport); 3710 if (rc) { 3711 /* Do not give up. Retry PLOGI one more time and attempt 3712 * link reset if PLOGI fails again. 3713 */ 3714 retry = 1; 3715 delay = 100; 3716 goto out_retry; 3717 } 3718 return 1; 3719 } 3720 3721 if (did == FDMI_DID) 3722 retry = 1; 3723 3724 if ((cmd == ELS_CMD_FLOGI) && 3725 (phba->fc_topology != LPFC_TOPOLOGY_LOOP) && 3726 !lpfc_error_lost_link(irsp)) { 3727 /* FLOGI retry policy */ 3728 retry = 1; 3729 /* retry FLOGI forever */ 3730 if (phba->link_flag != LS_LOOPBACK_MODE) 3731 maxretry = 0; 3732 else 3733 maxretry = 2; 3734 3735 if (cmdiocb->retry >= 100) 3736 delay = 5000; 3737 else if (cmdiocb->retry >= 32) 3738 delay = 1000; 3739 } else if ((cmd == ELS_CMD_FDISC) && !lpfc_error_lost_link(irsp)) { 3740 /* retry FDISCs every second up to devloss */ 3741 retry = 1; 3742 maxretry = vport->cfg_devloss_tmo; 3743 delay = 1000; 3744 } 3745 3746 cmdiocb->retry++; 3747 if (maxretry && (cmdiocb->retry >= maxretry)) { 3748 phba->fc_stat.elsRetryExceeded++; 3749 retry = 0; 3750 } 3751 3752 if ((vport->load_flag & FC_UNLOADING) != 0) 3753 retry = 0; 3754 3755 out_retry: 3756 if (retry) { 3757 if ((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_FDISC)) { 3758 /* Stop retrying PLOGI and FDISC if in FCF discovery */ 3759 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 3760 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3761 "2849 Stop retry ELS command " 3762 "x%x to remote NPORT x%x, " 3763 "Data: x%x x%x\n", cmd, did, 3764 cmdiocb->retry, delay); 3765 return 0; 3766 } 3767 } 3768 3769 /* Retry ELS command <elsCmd> to remote NPORT <did> */ 3770 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3771 "0107 Retry ELS command x%x to remote " 3772 "NPORT x%x Data: x%x x%x\n", 3773 cmd, did, cmdiocb->retry, delay); 3774 3775 if (((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) && 3776 ((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) || 3777 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) != 3778 IOERR_NO_RESOURCES))) { 3779 /* Don't reset timer for no resources */ 3780 3781 /* If discovery / RSCN timer is running, reset it */ 3782 if (timer_pending(&vport->fc_disctmo) || 3783 (vport->fc_flag & FC_RSCN_MODE)) 3784 lpfc_set_disctmo(vport); 3785 } 3786 3787 phba->fc_stat.elsXmitRetry++; 3788 if (ndlp && NLP_CHK_NODE_ACT(ndlp) && delay) { 3789 phba->fc_stat.elsDelayRetry++; 3790 ndlp->nlp_retry = cmdiocb->retry; 3791 3792 /* delay is specified in milliseconds */ 3793 mod_timer(&ndlp->nlp_delayfunc, 3794 jiffies + msecs_to_jiffies(delay)); 3795 spin_lock_irq(shost->host_lock); 3796 ndlp->nlp_flag |= NLP_DELAY_TMO; 3797 spin_unlock_irq(shost->host_lock); 3798 3799 ndlp->nlp_prev_state = ndlp->nlp_state; 3800 if ((cmd == ELS_CMD_PRLI) || 3801 (cmd == ELS_CMD_NVMEPRLI)) 3802 lpfc_nlp_set_state(vport, ndlp, 3803 NLP_STE_PRLI_ISSUE); 3804 else 3805 lpfc_nlp_set_state(vport, ndlp, 3806 NLP_STE_NPR_NODE); 3807 ndlp->nlp_last_elscmd = cmd; 3808 3809 return 1; 3810 } 3811 switch (cmd) { 3812 case ELS_CMD_FLOGI: 3813 lpfc_issue_els_flogi(vport, ndlp, cmdiocb->retry); 3814 return 1; 3815 case ELS_CMD_FDISC: 3816 lpfc_issue_els_fdisc(vport, ndlp, cmdiocb->retry); 3817 return 1; 3818 case ELS_CMD_PLOGI: 3819 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) { 3820 ndlp->nlp_prev_state = ndlp->nlp_state; 3821 lpfc_nlp_set_state(vport, ndlp, 3822 NLP_STE_PLOGI_ISSUE); 3823 } 3824 lpfc_issue_els_plogi(vport, did, cmdiocb->retry); 3825 return 1; 3826 case ELS_CMD_ADISC: 3827 ndlp->nlp_prev_state = ndlp->nlp_state; 3828 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 3829 lpfc_issue_els_adisc(vport, ndlp, cmdiocb->retry); 3830 return 1; 3831 case ELS_CMD_PRLI: 3832 case ELS_CMD_NVMEPRLI: 3833 ndlp->nlp_prev_state = ndlp->nlp_state; 3834 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); 3835 lpfc_issue_els_prli(vport, ndlp, cmdiocb->retry); 3836 return 1; 3837 case ELS_CMD_LOGO: 3838 ndlp->nlp_prev_state = ndlp->nlp_state; 3839 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE); 3840 lpfc_issue_els_logo(vport, ndlp, cmdiocb->retry); 3841 return 1; 3842 } 3843 } 3844 /* No retry ELS command <elsCmd> to remote NPORT <did> */ 3845 if (logerr) { 3846 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 3847 "0137 No retry ELS command x%x to remote " 3848 "NPORT x%x: Out of Resources: Error:x%x/%x\n", 3849 cmd, did, irsp->ulpStatus, 3850 irsp->un.ulpWord[4]); 3851 } 3852 else { 3853 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3854 "0108 No retry ELS command x%x to remote " 3855 "NPORT x%x Retried:%d Error:x%x/%x\n", 3856 cmd, did, cmdiocb->retry, irsp->ulpStatus, 3857 irsp->un.ulpWord[4]); 3858 } 3859 return 0; 3860 } 3861 3862 /** 3863 * lpfc_els_free_data - Free lpfc dma buffer and data structure with an iocb 3864 * @phba: pointer to lpfc hba data structure. 3865 * @buf_ptr1: pointer to the lpfc DMA buffer data structure. 3866 * 3867 * This routine releases the lpfc DMA (Direct Memory Access) buffer(s) 3868 * associated with a command IOCB back to the lpfc DMA buffer pool. It first 3869 * checks to see whether there is a lpfc DMA buffer associated with the 3870 * response of the command IOCB. If so, it will be released before releasing 3871 * the lpfc DMA buffer associated with the IOCB itself. 3872 * 3873 * Return code 3874 * 0 - Successfully released lpfc DMA buffer (currently, always return 0) 3875 **/ 3876 static int 3877 lpfc_els_free_data(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr1) 3878 { 3879 struct lpfc_dmabuf *buf_ptr; 3880 3881 /* Free the response before processing the command. */ 3882 if (!list_empty(&buf_ptr1->list)) { 3883 list_remove_head(&buf_ptr1->list, buf_ptr, 3884 struct lpfc_dmabuf, 3885 list); 3886 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 3887 kfree(buf_ptr); 3888 } 3889 lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys); 3890 kfree(buf_ptr1); 3891 return 0; 3892 } 3893 3894 /** 3895 * lpfc_els_free_bpl - Free lpfc dma buffer and data structure with bpl 3896 * @phba: pointer to lpfc hba data structure. 3897 * @buf_ptr: pointer to the lpfc dma buffer data structure. 3898 * 3899 * This routine releases the lpfc Direct Memory Access (DMA) buffer 3900 * associated with a Buffer Pointer List (BPL) back to the lpfc DMA buffer 3901 * pool. 3902 * 3903 * Return code 3904 * 0 - Successfully released lpfc DMA buffer (currently, always return 0) 3905 **/ 3906 static int 3907 lpfc_els_free_bpl(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr) 3908 { 3909 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 3910 kfree(buf_ptr); 3911 return 0; 3912 } 3913 3914 /** 3915 * lpfc_els_free_iocb - Free a command iocb and its associated resources 3916 * @phba: pointer to lpfc hba data structure. 3917 * @elsiocb: pointer to lpfc els command iocb data structure. 3918 * 3919 * This routine frees a command IOCB and its associated resources. The 3920 * command IOCB data structure contains the reference to various associated 3921 * resources, these fields must be set to NULL if the associated reference 3922 * not present: 3923 * context1 - reference to ndlp 3924 * context2 - reference to cmd 3925 * context2->next - reference to rsp 3926 * context3 - reference to bpl 3927 * 3928 * It first properly decrements the reference count held on ndlp for the 3929 * IOCB completion callback function. If LPFC_DELAY_MEM_FREE flag is not 3930 * set, it invokes the lpfc_els_free_data() routine to release the Direct 3931 * Memory Access (DMA) buffers associated with the IOCB. Otherwise, it 3932 * adds the DMA buffer the @phba data structure for the delayed release. 3933 * If reference to the Buffer Pointer List (BPL) is present, the 3934 * lpfc_els_free_bpl() routine is invoked to release the DMA memory 3935 * associated with BPL. Finally, the lpfc_sli_release_iocbq() routine is 3936 * invoked to release the IOCB data structure back to @phba IOCBQ list. 3937 * 3938 * Return code 3939 * 0 - Success (currently, always return 0) 3940 **/ 3941 int 3942 lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb) 3943 { 3944 struct lpfc_dmabuf *buf_ptr, *buf_ptr1; 3945 struct lpfc_nodelist *ndlp; 3946 3947 ndlp = (struct lpfc_nodelist *)elsiocb->context1; 3948 if (ndlp) { 3949 if (ndlp->nlp_flag & NLP_DEFER_RM) { 3950 lpfc_nlp_put(ndlp); 3951 3952 /* If the ndlp is not being used by another discovery 3953 * thread, free it. 3954 */ 3955 if (!lpfc_nlp_not_used(ndlp)) { 3956 /* If ndlp is being used by another discovery 3957 * thread, just clear NLP_DEFER_RM 3958 */ 3959 ndlp->nlp_flag &= ~NLP_DEFER_RM; 3960 } 3961 } 3962 else 3963 lpfc_nlp_put(ndlp); 3964 elsiocb->context1 = NULL; 3965 } 3966 /* context2 = cmd, context2->next = rsp, context3 = bpl */ 3967 if (elsiocb->context2) { 3968 if (elsiocb->iocb_flag & LPFC_DELAY_MEM_FREE) { 3969 /* Firmware could still be in progress of DMAing 3970 * payload, so don't free data buffer till after 3971 * a hbeat. 3972 */ 3973 elsiocb->iocb_flag &= ~LPFC_DELAY_MEM_FREE; 3974 buf_ptr = elsiocb->context2; 3975 elsiocb->context2 = NULL; 3976 if (buf_ptr) { 3977 buf_ptr1 = NULL; 3978 spin_lock_irq(&phba->hbalock); 3979 if (!list_empty(&buf_ptr->list)) { 3980 list_remove_head(&buf_ptr->list, 3981 buf_ptr1, struct lpfc_dmabuf, 3982 list); 3983 INIT_LIST_HEAD(&buf_ptr1->list); 3984 list_add_tail(&buf_ptr1->list, 3985 &phba->elsbuf); 3986 phba->elsbuf_cnt++; 3987 } 3988 INIT_LIST_HEAD(&buf_ptr->list); 3989 list_add_tail(&buf_ptr->list, &phba->elsbuf); 3990 phba->elsbuf_cnt++; 3991 spin_unlock_irq(&phba->hbalock); 3992 } 3993 } else { 3994 buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2; 3995 lpfc_els_free_data(phba, buf_ptr1); 3996 elsiocb->context2 = NULL; 3997 } 3998 } 3999 4000 if (elsiocb->context3) { 4001 buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3; 4002 lpfc_els_free_bpl(phba, buf_ptr); 4003 elsiocb->context3 = NULL; 4004 } 4005 lpfc_sli_release_iocbq(phba, elsiocb); 4006 return 0; 4007 } 4008 4009 /** 4010 * lpfc_cmpl_els_logo_acc - Completion callback function to logo acc response 4011 * @phba: pointer to lpfc hba data structure. 4012 * @cmdiocb: pointer to lpfc command iocb data structure. 4013 * @rspiocb: pointer to lpfc response iocb data structure. 4014 * 4015 * This routine is the completion callback function to the Logout (LOGO) 4016 * Accept (ACC) Response ELS command. This routine is invoked to indicate 4017 * the completion of the LOGO process. It invokes the lpfc_nlp_not_used() to 4018 * release the ndlp if it has the last reference remaining (reference count 4019 * is 1). If succeeded (meaning ndlp released), it sets the IOCB context1 4020 * field to NULL to inform the following lpfc_els_free_iocb() routine no 4021 * ndlp reference count needs to be decremented. Otherwise, the ndlp 4022 * reference use-count shall be decremented by the lpfc_els_free_iocb() 4023 * routine. Finally, the lpfc_els_free_iocb() is invoked to release the 4024 * IOCB data structure. 4025 **/ 4026 static void 4027 lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 4028 struct lpfc_iocbq *rspiocb) 4029 { 4030 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 4031 struct lpfc_vport *vport = cmdiocb->vport; 4032 IOCB_t *irsp; 4033 4034 irsp = &rspiocb->iocb; 4035 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 4036 "ACC LOGO cmpl: status:x%x/x%x did:x%x", 4037 irsp->ulpStatus, irsp->un.ulpWord[4], ndlp->nlp_DID); 4038 /* ACC to LOGO completes to NPort <nlp_DID> */ 4039 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4040 "0109 ACC to LOGO completes to NPort x%x " 4041 "Data: x%x x%x x%x\n", 4042 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 4043 ndlp->nlp_rpi); 4044 4045 if (ndlp->nlp_state == NLP_STE_NPR_NODE) { 4046 /* NPort Recovery mode or node is just allocated */ 4047 if (!lpfc_nlp_not_used(ndlp)) { 4048 /* If the ndlp is being used by another discovery 4049 * thread, just unregister the RPI. 4050 */ 4051 lpfc_unreg_rpi(vport, ndlp); 4052 } else { 4053 /* Indicate the node has already released, should 4054 * not reference to it from within lpfc_els_free_iocb. 4055 */ 4056 cmdiocb->context1 = NULL; 4057 } 4058 } 4059 4060 /* 4061 * The driver received a LOGO from the rport and has ACK'd it. 4062 * At this point, the driver is done so release the IOCB 4063 */ 4064 lpfc_els_free_iocb(phba, cmdiocb); 4065 } 4066 4067 /** 4068 * lpfc_mbx_cmpl_dflt_rpi - Completion callbk func for unreg dflt rpi mbox cmd 4069 * @phba: pointer to lpfc hba data structure. 4070 * @pmb: pointer to the driver internal queue element for mailbox command. 4071 * 4072 * This routine is the completion callback function for unregister default 4073 * RPI (Remote Port Index) mailbox command to the @phba. It simply releases 4074 * the associated lpfc Direct Memory Access (DMA) buffer back to the pool and 4075 * decrements the ndlp reference count held for this completion callback 4076 * function. After that, it invokes the lpfc_nlp_not_used() to check 4077 * whether there is only one reference left on the ndlp. If so, it will 4078 * perform one more decrement and trigger the release of the ndlp. 4079 **/ 4080 void 4081 lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 4082 { 4083 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf); 4084 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; 4085 4086 pmb->ctx_buf = NULL; 4087 pmb->ctx_ndlp = NULL; 4088 4089 lpfc_mbuf_free(phba, mp->virt, mp->phys); 4090 kfree(mp); 4091 mempool_free(pmb, phba->mbox_mem_pool); 4092 if (ndlp) { 4093 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, 4094 "0006 rpi%x DID:%x flg:%x %d map:%x %p\n", 4095 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, 4096 kref_read(&ndlp->kref), 4097 ndlp->nlp_usg_map, ndlp); 4098 if (NLP_CHK_NODE_ACT(ndlp)) { 4099 lpfc_nlp_put(ndlp); 4100 /* This is the end of the default RPI cleanup logic for 4101 * this ndlp. If no other discovery threads are using 4102 * this ndlp, free all resources associated with it. 4103 */ 4104 lpfc_nlp_not_used(ndlp); 4105 } else { 4106 lpfc_drop_node(ndlp->vport, ndlp); 4107 } 4108 } 4109 4110 return; 4111 } 4112 4113 /** 4114 * lpfc_cmpl_els_rsp - Completion callback function for els response iocb cmd 4115 * @phba: pointer to lpfc hba data structure. 4116 * @cmdiocb: pointer to lpfc command iocb data structure. 4117 * @rspiocb: pointer to lpfc response iocb data structure. 4118 * 4119 * This routine is the completion callback function for ELS Response IOCB 4120 * command. In normal case, this callback function just properly sets the 4121 * nlp_flag bitmap in the ndlp data structure, if the mbox command reference 4122 * field in the command IOCB is not NULL, the referred mailbox command will 4123 * be send out, and then invokes the lpfc_els_free_iocb() routine to release 4124 * the IOCB. Under error conditions, such as when a LS_RJT is returned or a 4125 * link down event occurred during the discovery, the lpfc_nlp_not_used() 4126 * routine shall be invoked trying to release the ndlp if no other threads 4127 * are currently referring it. 4128 **/ 4129 static void 4130 lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 4131 struct lpfc_iocbq *rspiocb) 4132 { 4133 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 4134 struct lpfc_vport *vport = ndlp ? ndlp->vport : NULL; 4135 struct Scsi_Host *shost = vport ? lpfc_shost_from_vport(vport) : NULL; 4136 IOCB_t *irsp; 4137 uint8_t *pcmd; 4138 LPFC_MBOXQ_t *mbox = NULL; 4139 struct lpfc_dmabuf *mp = NULL; 4140 uint32_t ls_rjt = 0; 4141 4142 irsp = &rspiocb->iocb; 4143 4144 if (cmdiocb->context_un.mbox) 4145 mbox = cmdiocb->context_un.mbox; 4146 4147 /* First determine if this is a LS_RJT cmpl. Note, this callback 4148 * function can have cmdiocb->contest1 (ndlp) field set to NULL. 4149 */ 4150 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt); 4151 if (ndlp && NLP_CHK_NODE_ACT(ndlp) && 4152 (*((uint32_t *) (pcmd)) == ELS_CMD_LS_RJT)) { 4153 /* A LS_RJT associated with Default RPI cleanup has its own 4154 * separate code path. 4155 */ 4156 if (!(ndlp->nlp_flag & NLP_RM_DFLT_RPI)) 4157 ls_rjt = 1; 4158 } 4159 4160 /* Check to see if link went down during discovery */ 4161 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || lpfc_els_chk_latt(vport)) { 4162 if (mbox) { 4163 mp = (struct lpfc_dmabuf *)mbox->ctx_buf; 4164 if (mp) { 4165 lpfc_mbuf_free(phba, mp->virt, mp->phys); 4166 kfree(mp); 4167 } 4168 mempool_free(mbox, phba->mbox_mem_pool); 4169 } 4170 if (ndlp && NLP_CHK_NODE_ACT(ndlp) && 4171 (ndlp->nlp_flag & NLP_RM_DFLT_RPI)) 4172 if (lpfc_nlp_not_used(ndlp)) { 4173 ndlp = NULL; 4174 /* Indicate the node has already released, 4175 * should not reference to it from within 4176 * the routine lpfc_els_free_iocb. 4177 */ 4178 cmdiocb->context1 = NULL; 4179 } 4180 goto out; 4181 } 4182 4183 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 4184 "ELS rsp cmpl: status:x%x/x%x did:x%x", 4185 irsp->ulpStatus, irsp->un.ulpWord[4], 4186 cmdiocb->iocb.un.elsreq64.remoteID); 4187 /* ELS response tag <ulpIoTag> completes */ 4188 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4189 "0110 ELS response tag x%x completes " 4190 "Data: x%x x%x x%x x%x x%x x%x x%x\n", 4191 cmdiocb->iocb.ulpIoTag, rspiocb->iocb.ulpStatus, 4192 rspiocb->iocb.un.ulpWord[4], rspiocb->iocb.ulpTimeout, 4193 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 4194 ndlp->nlp_rpi); 4195 if (mbox) { 4196 if ((rspiocb->iocb.ulpStatus == 0) 4197 && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) { 4198 if (!lpfc_unreg_rpi(vport, ndlp) && 4199 (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE || 4200 ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE)) { 4201 lpfc_printf_vlog(vport, KERN_INFO, 4202 LOG_DISCOVERY, 4203 "0314 PLOGI recov DID x%x " 4204 "Data: x%x x%x x%x\n", 4205 ndlp->nlp_DID, ndlp->nlp_state, 4206 ndlp->nlp_rpi, ndlp->nlp_flag); 4207 mp = mbox->ctx_buf; 4208 if (mp) { 4209 lpfc_mbuf_free(phba, mp->virt, 4210 mp->phys); 4211 kfree(mp); 4212 } 4213 mempool_free(mbox, phba->mbox_mem_pool); 4214 goto out; 4215 } 4216 4217 /* Increment reference count to ndlp to hold the 4218 * reference to ndlp for the callback function. 4219 */ 4220 mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 4221 mbox->vport = vport; 4222 if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) { 4223 mbox->mbox_flag |= LPFC_MBX_IMED_UNREG; 4224 mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; 4225 } 4226 else { 4227 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login; 4228 ndlp->nlp_prev_state = ndlp->nlp_state; 4229 lpfc_nlp_set_state(vport, ndlp, 4230 NLP_STE_REG_LOGIN_ISSUE); 4231 } 4232 4233 ndlp->nlp_flag |= NLP_REG_LOGIN_SEND; 4234 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 4235 != MBX_NOT_FINISHED) 4236 goto out; 4237 4238 /* Decrement the ndlp reference count we 4239 * set for this failed mailbox command. 4240 */ 4241 lpfc_nlp_put(ndlp); 4242 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; 4243 4244 /* ELS rsp: Cannot issue reg_login for <NPortid> */ 4245 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 4246 "0138 ELS rsp: Cannot issue reg_login for x%x " 4247 "Data: x%x x%x x%x\n", 4248 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 4249 ndlp->nlp_rpi); 4250 4251 if (lpfc_nlp_not_used(ndlp)) { 4252 ndlp = NULL; 4253 /* Indicate node has already been released, 4254 * should not reference to it from within 4255 * the routine lpfc_els_free_iocb. 4256 */ 4257 cmdiocb->context1 = NULL; 4258 } 4259 } else { 4260 /* Do not drop node for lpfc_els_abort'ed ELS cmds */ 4261 if (!lpfc_error_lost_link(irsp) && 4262 ndlp->nlp_flag & NLP_ACC_REGLOGIN) { 4263 if (lpfc_nlp_not_used(ndlp)) { 4264 ndlp = NULL; 4265 /* Indicate node has already been 4266 * released, should not reference 4267 * to it from within the routine 4268 * lpfc_els_free_iocb. 4269 */ 4270 cmdiocb->context1 = NULL; 4271 } 4272 } 4273 } 4274 mp = (struct lpfc_dmabuf *)mbox->ctx_buf; 4275 if (mp) { 4276 lpfc_mbuf_free(phba, mp->virt, mp->phys); 4277 kfree(mp); 4278 } 4279 mempool_free(mbox, phba->mbox_mem_pool); 4280 } 4281 out: 4282 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) { 4283 spin_lock_irq(shost->host_lock); 4284 ndlp->nlp_flag &= ~(NLP_ACC_REGLOGIN | NLP_RM_DFLT_RPI); 4285 spin_unlock_irq(shost->host_lock); 4286 4287 /* If the node is not being used by another discovery thread, 4288 * and we are sending a reject, we are done with it. 4289 * Release driver reference count here and free associated 4290 * resources. 4291 */ 4292 if (ls_rjt) 4293 if (lpfc_nlp_not_used(ndlp)) 4294 /* Indicate node has already been released, 4295 * should not reference to it from within 4296 * the routine lpfc_els_free_iocb. 4297 */ 4298 cmdiocb->context1 = NULL; 4299 4300 } 4301 4302 lpfc_els_free_iocb(phba, cmdiocb); 4303 return; 4304 } 4305 4306 /** 4307 * lpfc_els_rsp_acc - Prepare and issue an acc response iocb command 4308 * @vport: pointer to a host virtual N_Port data structure. 4309 * @flag: the els command code to be accepted. 4310 * @oldiocb: pointer to the original lpfc command iocb data structure. 4311 * @ndlp: pointer to a node-list data structure. 4312 * @mbox: pointer to the driver internal queue element for mailbox command. 4313 * 4314 * This routine prepares and issues an Accept (ACC) response IOCB 4315 * command. It uses the @flag to properly set up the IOCB field for the 4316 * specific ACC response command to be issued and invokes the 4317 * lpfc_sli_issue_iocb() routine to send out ACC response IOCB. If a 4318 * @mbox pointer is passed in, it will be put into the context_un.mbox 4319 * field of the IOCB for the completion callback function to issue the 4320 * mailbox command to the HBA later when callback is invoked. 4321 * 4322 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 4323 * will be incremented by 1 for holding the ndlp and the reference to ndlp 4324 * will be stored into the context1 field of the IOCB for the completion 4325 * callback function to the corresponding response ELS IOCB command. 4326 * 4327 * Return code 4328 * 0 - Successfully issued acc response 4329 * 1 - Failed to issue acc response 4330 **/ 4331 int 4332 lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, 4333 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp, 4334 LPFC_MBOXQ_t *mbox) 4335 { 4336 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4337 struct lpfc_hba *phba = vport->phba; 4338 IOCB_t *icmd; 4339 IOCB_t *oldcmd; 4340 struct lpfc_iocbq *elsiocb; 4341 uint8_t *pcmd; 4342 struct serv_parm *sp; 4343 uint16_t cmdsize; 4344 int rc; 4345 ELS_PKT *els_pkt_ptr; 4346 4347 oldcmd = &oldiocb->iocb; 4348 4349 switch (flag) { 4350 case ELS_CMD_ACC: 4351 cmdsize = sizeof(uint32_t); 4352 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 4353 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 4354 if (!elsiocb) { 4355 spin_lock_irq(shost->host_lock); 4356 ndlp->nlp_flag &= ~NLP_LOGO_ACC; 4357 spin_unlock_irq(shost->host_lock); 4358 return 1; 4359 } 4360 4361 icmd = &elsiocb->iocb; 4362 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 4363 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 4364 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 4365 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 4366 pcmd += sizeof(uint32_t); 4367 4368 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 4369 "Issue ACC: did:x%x flg:x%x", 4370 ndlp->nlp_DID, ndlp->nlp_flag, 0); 4371 break; 4372 case ELS_CMD_FLOGI: 4373 case ELS_CMD_PLOGI: 4374 cmdsize = (sizeof(struct serv_parm) + sizeof(uint32_t)); 4375 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 4376 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 4377 if (!elsiocb) 4378 return 1; 4379 4380 icmd = &elsiocb->iocb; 4381 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 4382 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 4383 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 4384 4385 if (mbox) 4386 elsiocb->context_un.mbox = mbox; 4387 4388 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 4389 pcmd += sizeof(uint32_t); 4390 sp = (struct serv_parm *)pcmd; 4391 4392 if (flag == ELS_CMD_FLOGI) { 4393 /* Copy the received service parameters back */ 4394 memcpy(sp, &phba->fc_fabparam, 4395 sizeof(struct serv_parm)); 4396 4397 /* Clear the F_Port bit */ 4398 sp->cmn.fPort = 0; 4399 4400 /* Mark all class service parameters as invalid */ 4401 sp->cls1.classValid = 0; 4402 sp->cls2.classValid = 0; 4403 sp->cls3.classValid = 0; 4404 sp->cls4.classValid = 0; 4405 4406 /* Copy our worldwide names */ 4407 memcpy(&sp->portName, &vport->fc_sparam.portName, 4408 sizeof(struct lpfc_name)); 4409 memcpy(&sp->nodeName, &vport->fc_sparam.nodeName, 4410 sizeof(struct lpfc_name)); 4411 } else { 4412 memcpy(pcmd, &vport->fc_sparam, 4413 sizeof(struct serv_parm)); 4414 4415 sp->cmn.valid_vendor_ver_level = 0; 4416 memset(sp->un.vendorVersion, 0, 4417 sizeof(sp->un.vendorVersion)); 4418 sp->cmn.bbRcvSizeMsb &= 0xF; 4419 4420 /* If our firmware supports this feature, convey that 4421 * info to the target using the vendor specific field. 4422 */ 4423 if (phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) { 4424 sp->cmn.valid_vendor_ver_level = 1; 4425 sp->un.vv.vid = cpu_to_be32(LPFC_VV_EMLX_ID); 4426 sp->un.vv.flags = 4427 cpu_to_be32(LPFC_VV_SUPPRESS_RSP); 4428 } 4429 } 4430 4431 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 4432 "Issue ACC FLOGI/PLOGI: did:x%x flg:x%x", 4433 ndlp->nlp_DID, ndlp->nlp_flag, 0); 4434 break; 4435 case ELS_CMD_PRLO: 4436 cmdsize = sizeof(uint32_t) + sizeof(PRLO); 4437 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 4438 ndlp, ndlp->nlp_DID, ELS_CMD_PRLO); 4439 if (!elsiocb) 4440 return 1; 4441 4442 icmd = &elsiocb->iocb; 4443 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 4444 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 4445 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 4446 4447 memcpy(pcmd, ((struct lpfc_dmabuf *) oldiocb->context2)->virt, 4448 sizeof(uint32_t) + sizeof(PRLO)); 4449 *((uint32_t *) (pcmd)) = ELS_CMD_PRLO_ACC; 4450 els_pkt_ptr = (ELS_PKT *) pcmd; 4451 els_pkt_ptr->un.prlo.acceptRspCode = PRLO_REQ_EXECUTED; 4452 4453 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 4454 "Issue ACC PRLO: did:x%x flg:x%x", 4455 ndlp->nlp_DID, ndlp->nlp_flag, 0); 4456 break; 4457 default: 4458 return 1; 4459 } 4460 if (ndlp->nlp_flag & NLP_LOGO_ACC) { 4461 spin_lock_irq(shost->host_lock); 4462 if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED || 4463 ndlp->nlp_flag & NLP_REG_LOGIN_SEND)) 4464 ndlp->nlp_flag &= ~NLP_LOGO_ACC; 4465 spin_unlock_irq(shost->host_lock); 4466 elsiocb->iocb_cmpl = lpfc_cmpl_els_logo_acc; 4467 } else { 4468 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 4469 } 4470 4471 phba->fc_stat.elsXmitACC++; 4472 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 4473 if (rc == IOCB_ERROR) { 4474 lpfc_els_free_iocb(phba, elsiocb); 4475 return 1; 4476 } 4477 return 0; 4478 } 4479 4480 /** 4481 * lpfc_els_rsp_reject - Propare and issue a rjt response iocb command 4482 * @vport: pointer to a virtual N_Port data structure. 4483 * @rejectError: 4484 * @oldiocb: pointer to the original lpfc command iocb data structure. 4485 * @ndlp: pointer to a node-list data structure. 4486 * @mbox: pointer to the driver internal queue element for mailbox command. 4487 * 4488 * This routine prepares and issue an Reject (RJT) response IOCB 4489 * command. If a @mbox pointer is passed in, it will be put into the 4490 * context_un.mbox field of the IOCB for the completion callback function 4491 * to issue to the HBA later. 4492 * 4493 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 4494 * will be incremented by 1 for holding the ndlp and the reference to ndlp 4495 * will be stored into the context1 field of the IOCB for the completion 4496 * callback function to the reject response ELS IOCB command. 4497 * 4498 * Return code 4499 * 0 - Successfully issued reject response 4500 * 1 - Failed to issue reject response 4501 **/ 4502 int 4503 lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError, 4504 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp, 4505 LPFC_MBOXQ_t *mbox) 4506 { 4507 struct lpfc_hba *phba = vport->phba; 4508 IOCB_t *icmd; 4509 IOCB_t *oldcmd; 4510 struct lpfc_iocbq *elsiocb; 4511 uint8_t *pcmd; 4512 uint16_t cmdsize; 4513 int rc; 4514 4515 cmdsize = 2 * sizeof(uint32_t); 4516 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 4517 ndlp->nlp_DID, ELS_CMD_LS_RJT); 4518 if (!elsiocb) 4519 return 1; 4520 4521 icmd = &elsiocb->iocb; 4522 oldcmd = &oldiocb->iocb; 4523 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 4524 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 4525 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 4526 4527 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT; 4528 pcmd += sizeof(uint32_t); 4529 *((uint32_t *) (pcmd)) = rejectError; 4530 4531 if (mbox) 4532 elsiocb->context_un.mbox = mbox; 4533 4534 /* Xmit ELS RJT <err> response tag <ulpIoTag> */ 4535 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4536 "0129 Xmit ELS RJT x%x response tag x%x " 4537 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, " 4538 "rpi x%x\n", 4539 rejectError, elsiocb->iotag, 4540 elsiocb->iocb.ulpContext, ndlp->nlp_DID, 4541 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); 4542 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 4543 "Issue LS_RJT: did:x%x flg:x%x err:x%x", 4544 ndlp->nlp_DID, ndlp->nlp_flag, rejectError); 4545 4546 phba->fc_stat.elsXmitLSRJT++; 4547 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 4548 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 4549 4550 if (rc == IOCB_ERROR) { 4551 lpfc_els_free_iocb(phba, elsiocb); 4552 return 1; 4553 } 4554 return 0; 4555 } 4556 4557 /** 4558 * lpfc_els_rsp_adisc_acc - Prepare and issue acc response to adisc iocb cmd 4559 * @vport: pointer to a virtual N_Port data structure. 4560 * @oldiocb: pointer to the original lpfc command iocb data structure. 4561 * @ndlp: pointer to a node-list data structure. 4562 * 4563 * This routine prepares and issues an Accept (ACC) response to Address 4564 * Discover (ADISC) ELS command. It simply prepares the payload of the IOCB 4565 * and invokes the lpfc_sli_issue_iocb() routine to send out the command. 4566 * 4567 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 4568 * will be incremented by 1 for holding the ndlp and the reference to ndlp 4569 * will be stored into the context1 field of the IOCB for the completion 4570 * callback function to the ADISC Accept response ELS IOCB command. 4571 * 4572 * Return code 4573 * 0 - Successfully issued acc adisc response 4574 * 1 - Failed to issue adisc acc response 4575 **/ 4576 int 4577 lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, 4578 struct lpfc_nodelist *ndlp) 4579 { 4580 struct lpfc_hba *phba = vport->phba; 4581 ADISC *ap; 4582 IOCB_t *icmd, *oldcmd; 4583 struct lpfc_iocbq *elsiocb; 4584 uint8_t *pcmd; 4585 uint16_t cmdsize; 4586 int rc; 4587 4588 cmdsize = sizeof(uint32_t) + sizeof(ADISC); 4589 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 4590 ndlp->nlp_DID, ELS_CMD_ACC); 4591 if (!elsiocb) 4592 return 1; 4593 4594 icmd = &elsiocb->iocb; 4595 oldcmd = &oldiocb->iocb; 4596 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 4597 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 4598 4599 /* Xmit ADISC ACC response tag <ulpIoTag> */ 4600 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4601 "0130 Xmit ADISC ACC response iotag x%x xri: " 4602 "x%x, did x%x, nlp_flag x%x, nlp_state x%x rpi x%x\n", 4603 elsiocb->iotag, elsiocb->iocb.ulpContext, 4604 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 4605 ndlp->nlp_rpi); 4606 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 4607 4608 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 4609 pcmd += sizeof(uint32_t); 4610 4611 ap = (ADISC *) (pcmd); 4612 ap->hardAL_PA = phba->fc_pref_ALPA; 4613 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name)); 4614 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 4615 ap->DID = be32_to_cpu(vport->fc_myDID); 4616 4617 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 4618 "Issue ACC ADISC: did:x%x flg:x%x", 4619 ndlp->nlp_DID, ndlp->nlp_flag, 0); 4620 4621 phba->fc_stat.elsXmitACC++; 4622 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 4623 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 4624 if (rc == IOCB_ERROR) { 4625 lpfc_els_free_iocb(phba, elsiocb); 4626 return 1; 4627 } 4628 4629 /* Xmit ELS ACC response tag <ulpIoTag> */ 4630 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4631 "0128 Xmit ELS ACC response Status: x%x, IoTag: x%x, " 4632 "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x " 4633 "RPI: x%x, fc_flag x%x\n", 4634 rc, elsiocb->iotag, elsiocb->sli4_xritag, 4635 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 4636 ndlp->nlp_rpi, vport->fc_flag); 4637 return 0; 4638 } 4639 4640 /** 4641 * lpfc_els_rsp_prli_acc - Prepare and issue acc response to prli iocb cmd 4642 * @vport: pointer to a virtual N_Port data structure. 4643 * @oldiocb: pointer to the original lpfc command iocb data structure. 4644 * @ndlp: pointer to a node-list data structure. 4645 * 4646 * This routine prepares and issues an Accept (ACC) response to Process 4647 * Login (PRLI) ELS command. It simply prepares the payload of the IOCB 4648 * and invokes the lpfc_sli_issue_iocb() routine to send out the command. 4649 * 4650 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 4651 * will be incremented by 1 for holding the ndlp and the reference to ndlp 4652 * will be stored into the context1 field of the IOCB for the completion 4653 * callback function to the PRLI Accept response ELS IOCB command. 4654 * 4655 * Return code 4656 * 0 - Successfully issued acc prli response 4657 * 1 - Failed to issue acc prli response 4658 **/ 4659 int 4660 lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, 4661 struct lpfc_nodelist *ndlp) 4662 { 4663 struct lpfc_hba *phba = vport->phba; 4664 PRLI *npr; 4665 struct lpfc_nvme_prli *npr_nvme; 4666 lpfc_vpd_t *vpd; 4667 IOCB_t *icmd; 4668 IOCB_t *oldcmd; 4669 struct lpfc_iocbq *elsiocb; 4670 uint8_t *pcmd; 4671 uint16_t cmdsize; 4672 uint32_t prli_fc4_req, *req_payload; 4673 struct lpfc_dmabuf *req_buf; 4674 int rc; 4675 u32 elsrspcmd; 4676 4677 /* Need the incoming PRLI payload to determine if the ACC is for an 4678 * FC4 or NVME PRLI type. The PRLI type is at word 1. 4679 */ 4680 req_buf = (struct lpfc_dmabuf *)oldiocb->context2; 4681 req_payload = (((uint32_t *)req_buf->virt) + 1); 4682 4683 /* PRLI type payload is at byte 3 for FCP or NVME. */ 4684 prli_fc4_req = be32_to_cpu(*req_payload); 4685 prli_fc4_req = (prli_fc4_req >> 24) & 0xff; 4686 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4687 "6127 PRLI_ACC: Req Type x%x, Word1 x%08x\n", 4688 prli_fc4_req, *((uint32_t *)req_payload)); 4689 4690 if (prli_fc4_req == PRLI_FCP_TYPE) { 4691 cmdsize = sizeof(uint32_t) + sizeof(PRLI); 4692 elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK)); 4693 } else if (prli_fc4_req & PRLI_NVME_TYPE) { 4694 cmdsize = sizeof(uint32_t) + sizeof(struct lpfc_nvme_prli); 4695 elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_NVMEPRLI & ~ELS_RSP_MASK)); 4696 } else { 4697 return 1; 4698 } 4699 4700 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 4701 ndlp->nlp_DID, elsrspcmd); 4702 if (!elsiocb) 4703 return 1; 4704 4705 icmd = &elsiocb->iocb; 4706 oldcmd = &oldiocb->iocb; 4707 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 4708 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 4709 4710 /* Xmit PRLI ACC response tag <ulpIoTag> */ 4711 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4712 "0131 Xmit PRLI ACC response tag x%x xri x%x, " 4713 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", 4714 elsiocb->iotag, elsiocb->iocb.ulpContext, 4715 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 4716 ndlp->nlp_rpi); 4717 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 4718 memset(pcmd, 0, cmdsize); 4719 4720 *((uint32_t *)(pcmd)) = elsrspcmd; 4721 pcmd += sizeof(uint32_t); 4722 4723 /* For PRLI, remainder of payload is PRLI parameter page */ 4724 vpd = &phba->vpd; 4725 4726 if (prli_fc4_req == PRLI_FCP_TYPE) { 4727 /* 4728 * If the remote port is a target and our firmware version 4729 * is 3.20 or later, set the following bits for FC-TAPE 4730 * support. 4731 */ 4732 npr = (PRLI *) pcmd; 4733 if ((ndlp->nlp_type & NLP_FCP_TARGET) && 4734 (vpd->rev.feaLevelHigh >= 0x02)) { 4735 npr->ConfmComplAllowed = 1; 4736 npr->Retry = 1; 4737 npr->TaskRetryIdReq = 1; 4738 } 4739 npr->acceptRspCode = PRLI_REQ_EXECUTED; 4740 npr->estabImagePair = 1; 4741 npr->readXferRdyDis = 1; 4742 npr->ConfmComplAllowed = 1; 4743 npr->prliType = PRLI_FCP_TYPE; 4744 npr->initiatorFunc = 1; 4745 } else if (prli_fc4_req & PRLI_NVME_TYPE) { 4746 /* Respond with an NVME PRLI Type */ 4747 npr_nvme = (struct lpfc_nvme_prli *) pcmd; 4748 bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE); 4749 bf_set(prli_estabImagePair, npr_nvme, 0); /* Should be 0 */ 4750 bf_set(prli_acc_rsp_code, npr_nvme, PRLI_REQ_EXECUTED); 4751 if (phba->nvmet_support) { 4752 bf_set(prli_tgt, npr_nvme, 1); 4753 bf_set(prli_disc, npr_nvme, 1); 4754 if (phba->cfg_nvme_enable_fb) { 4755 bf_set(prli_fba, npr_nvme, 1); 4756 4757 /* TBD. Target mode needs to post buffers 4758 * that support the configured first burst 4759 * byte size. 4760 */ 4761 bf_set(prli_fb_sz, npr_nvme, 4762 phba->cfg_nvmet_fb_size); 4763 } 4764 } else { 4765 bf_set(prli_init, npr_nvme, 1); 4766 } 4767 4768 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, 4769 "6015 NVME issue PRLI ACC word1 x%08x " 4770 "word4 x%08x word5 x%08x flag x%x, " 4771 "fcp_info x%x nlp_type x%x\n", 4772 npr_nvme->word1, npr_nvme->word4, 4773 npr_nvme->word5, ndlp->nlp_flag, 4774 ndlp->nlp_fcp_info, ndlp->nlp_type); 4775 npr_nvme->word1 = cpu_to_be32(npr_nvme->word1); 4776 npr_nvme->word4 = cpu_to_be32(npr_nvme->word4); 4777 npr_nvme->word5 = cpu_to_be32(npr_nvme->word5); 4778 } else 4779 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 4780 "6128 Unknown FC_TYPE x%x x%x ndlp x%06x\n", 4781 prli_fc4_req, ndlp->nlp_fc4_type, 4782 ndlp->nlp_DID); 4783 4784 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 4785 "Issue ACC PRLI: did:x%x flg:x%x", 4786 ndlp->nlp_DID, ndlp->nlp_flag, 0); 4787 4788 phba->fc_stat.elsXmitACC++; 4789 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 4790 4791 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 4792 if (rc == IOCB_ERROR) { 4793 lpfc_els_free_iocb(phba, elsiocb); 4794 return 1; 4795 } 4796 return 0; 4797 } 4798 4799 /** 4800 * lpfc_els_rsp_rnid_acc - Issue rnid acc response iocb command 4801 * @vport: pointer to a virtual N_Port data structure. 4802 * @format: rnid command format. 4803 * @oldiocb: pointer to the original lpfc command iocb data structure. 4804 * @ndlp: pointer to a node-list data structure. 4805 * 4806 * This routine issues a Request Node Identification Data (RNID) Accept 4807 * (ACC) response. It constructs the RNID ACC response command according to 4808 * the proper @format and then calls the lpfc_sli_issue_iocb() routine to 4809 * issue the response. Note that this command does not need to hold the ndlp 4810 * reference count for the callback. So, the ndlp reference count taken by 4811 * the lpfc_prep_els_iocb() routine is put back and the context1 field of 4812 * IOCB is set to NULL to indicate to the lpfc_els_free_iocb() routine that 4813 * there is no ndlp reference available. 4814 * 4815 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 4816 * will be incremented by 1 for holding the ndlp and the reference to ndlp 4817 * will be stored into the context1 field of the IOCB for the completion 4818 * callback function. However, for the RNID Accept Response ELS command, 4819 * this is undone later by this routine after the IOCB is allocated. 4820 * 4821 * Return code 4822 * 0 - Successfully issued acc rnid response 4823 * 1 - Failed to issue acc rnid response 4824 **/ 4825 static int 4826 lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format, 4827 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) 4828 { 4829 struct lpfc_hba *phba = vport->phba; 4830 RNID *rn; 4831 IOCB_t *icmd, *oldcmd; 4832 struct lpfc_iocbq *elsiocb; 4833 uint8_t *pcmd; 4834 uint16_t cmdsize; 4835 int rc; 4836 4837 cmdsize = sizeof(uint32_t) + sizeof(uint32_t) 4838 + (2 * sizeof(struct lpfc_name)); 4839 if (format) 4840 cmdsize += sizeof(RNID_TOP_DISC); 4841 4842 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 4843 ndlp->nlp_DID, ELS_CMD_ACC); 4844 if (!elsiocb) 4845 return 1; 4846 4847 icmd = &elsiocb->iocb; 4848 oldcmd = &oldiocb->iocb; 4849 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 4850 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 4851 4852 /* Xmit RNID ACC response tag <ulpIoTag> */ 4853 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4854 "0132 Xmit RNID ACC response tag x%x xri x%x\n", 4855 elsiocb->iotag, elsiocb->iocb.ulpContext); 4856 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 4857 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 4858 pcmd += sizeof(uint32_t); 4859 4860 memset(pcmd, 0, sizeof(RNID)); 4861 rn = (RNID *) (pcmd); 4862 rn->Format = format; 4863 rn->CommonLen = (2 * sizeof(struct lpfc_name)); 4864 memcpy(&rn->portName, &vport->fc_portname, sizeof(struct lpfc_name)); 4865 memcpy(&rn->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 4866 switch (format) { 4867 case 0: 4868 rn->SpecificLen = 0; 4869 break; 4870 case RNID_TOPOLOGY_DISC: 4871 rn->SpecificLen = sizeof(RNID_TOP_DISC); 4872 memcpy(&rn->un.topologyDisc.portName, 4873 &vport->fc_portname, sizeof(struct lpfc_name)); 4874 rn->un.topologyDisc.unitType = RNID_HBA; 4875 rn->un.topologyDisc.physPort = 0; 4876 rn->un.topologyDisc.attachedNodes = 0; 4877 break; 4878 default: 4879 rn->CommonLen = 0; 4880 rn->SpecificLen = 0; 4881 break; 4882 } 4883 4884 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 4885 "Issue ACC RNID: did:x%x flg:x%x", 4886 ndlp->nlp_DID, ndlp->nlp_flag, 0); 4887 4888 phba->fc_stat.elsXmitACC++; 4889 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 4890 4891 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 4892 if (rc == IOCB_ERROR) { 4893 lpfc_els_free_iocb(phba, elsiocb); 4894 return 1; 4895 } 4896 return 0; 4897 } 4898 4899 /** 4900 * lpfc_els_clear_rrq - Clear the rq that this rrq describes. 4901 * @vport: pointer to a virtual N_Port data structure. 4902 * @iocb: pointer to the lpfc command iocb data structure. 4903 * @ndlp: pointer to a node-list data structure. 4904 * 4905 * Return 4906 **/ 4907 static void 4908 lpfc_els_clear_rrq(struct lpfc_vport *vport, 4909 struct lpfc_iocbq *iocb, struct lpfc_nodelist *ndlp) 4910 { 4911 struct lpfc_hba *phba = vport->phba; 4912 uint8_t *pcmd; 4913 struct RRQ *rrq; 4914 uint16_t rxid; 4915 uint16_t xri; 4916 struct lpfc_node_rrq *prrq; 4917 4918 4919 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) iocb->context2)->virt); 4920 pcmd += sizeof(uint32_t); 4921 rrq = (struct RRQ *)pcmd; 4922 rrq->rrq_exchg = be32_to_cpu(rrq->rrq_exchg); 4923 rxid = bf_get(rrq_rxid, rrq); 4924 4925 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4926 "2883 Clear RRQ for SID:x%x OXID:x%x RXID:x%x" 4927 " x%x x%x\n", 4928 be32_to_cpu(bf_get(rrq_did, rrq)), 4929 bf_get(rrq_oxid, rrq), 4930 rxid, 4931 iocb->iotag, iocb->iocb.ulpContext); 4932 4933 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 4934 "Clear RRQ: did:x%x flg:x%x exchg:x%.08x", 4935 ndlp->nlp_DID, ndlp->nlp_flag, rrq->rrq_exchg); 4936 if (vport->fc_myDID == be32_to_cpu(bf_get(rrq_did, rrq))) 4937 xri = bf_get(rrq_oxid, rrq); 4938 else 4939 xri = rxid; 4940 prrq = lpfc_get_active_rrq(vport, xri, ndlp->nlp_DID); 4941 if (prrq) 4942 lpfc_clr_rrq_active(phba, xri, prrq); 4943 return; 4944 } 4945 4946 /** 4947 * lpfc_els_rsp_echo_acc - Issue echo acc response 4948 * @vport: pointer to a virtual N_Port data structure. 4949 * @data: pointer to echo data to return in the accept. 4950 * @oldiocb: pointer to the original lpfc command iocb data structure. 4951 * @ndlp: pointer to a node-list data structure. 4952 * 4953 * Return code 4954 * 0 - Successfully issued acc echo response 4955 * 1 - Failed to issue acc echo response 4956 **/ 4957 static int 4958 lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data, 4959 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) 4960 { 4961 struct lpfc_hba *phba = vport->phba; 4962 struct lpfc_iocbq *elsiocb; 4963 uint8_t *pcmd; 4964 uint16_t cmdsize; 4965 int rc; 4966 4967 cmdsize = oldiocb->iocb.unsli3.rcvsli3.acc_len; 4968 4969 /* The accumulated length can exceed the BPL_SIZE. For 4970 * now, use this as the limit 4971 */ 4972 if (cmdsize > LPFC_BPL_SIZE) 4973 cmdsize = LPFC_BPL_SIZE; 4974 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 4975 ndlp->nlp_DID, ELS_CMD_ACC); 4976 if (!elsiocb) 4977 return 1; 4978 4979 elsiocb->iocb.ulpContext = oldiocb->iocb.ulpContext; /* Xri / rx_id */ 4980 elsiocb->iocb.unsli3.rcvsli3.ox_id = oldiocb->iocb.unsli3.rcvsli3.ox_id; 4981 4982 /* Xmit ECHO ACC response tag <ulpIoTag> */ 4983 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4984 "2876 Xmit ECHO ACC response tag x%x xri x%x\n", 4985 elsiocb->iotag, elsiocb->iocb.ulpContext); 4986 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 4987 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 4988 pcmd += sizeof(uint32_t); 4989 memcpy(pcmd, data, cmdsize - sizeof(uint32_t)); 4990 4991 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 4992 "Issue ACC ECHO: did:x%x flg:x%x", 4993 ndlp->nlp_DID, ndlp->nlp_flag, 0); 4994 4995 phba->fc_stat.elsXmitACC++; 4996 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 4997 4998 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 4999 if (rc == IOCB_ERROR) { 5000 lpfc_els_free_iocb(phba, elsiocb); 5001 return 1; 5002 } 5003 return 0; 5004 } 5005 5006 /** 5007 * lpfc_els_disc_adisc - Issue remaining adisc iocbs to npr nodes of a vport 5008 * @vport: pointer to a host virtual N_Port data structure. 5009 * 5010 * This routine issues Address Discover (ADISC) ELS commands to those 5011 * N_Ports which are in node port recovery state and ADISC has not been issued 5012 * for the @vport. Each time an ELS ADISC IOCB is issued by invoking the 5013 * lpfc_issue_els_adisc() routine, the per @vport number of discover count 5014 * (num_disc_nodes) shall be incremented. If the num_disc_nodes reaches a 5015 * pre-configured threshold (cfg_discovery_threads), the @vport fc_flag will 5016 * be marked with FC_NLP_MORE bit and the process of issuing remaining ADISC 5017 * IOCBs quit for later pick up. On the other hand, after walking through 5018 * all the ndlps with the @vport and there is none ADISC IOCB issued, the 5019 * @vport fc_flag shall be cleared with FC_NLP_MORE bit indicating there is 5020 * no more ADISC need to be sent. 5021 * 5022 * Return code 5023 * The number of N_Ports with adisc issued. 5024 **/ 5025 int 5026 lpfc_els_disc_adisc(struct lpfc_vport *vport) 5027 { 5028 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 5029 struct lpfc_nodelist *ndlp, *next_ndlp; 5030 int sentadisc = 0; 5031 5032 /* go thru NPR nodes and issue any remaining ELS ADISCs */ 5033 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 5034 if (!NLP_CHK_NODE_ACT(ndlp)) 5035 continue; 5036 if (ndlp->nlp_state == NLP_STE_NPR_NODE && 5037 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 && 5038 (ndlp->nlp_flag & NLP_NPR_ADISC) != 0) { 5039 spin_lock_irq(shost->host_lock); 5040 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 5041 spin_unlock_irq(shost->host_lock); 5042 ndlp->nlp_prev_state = ndlp->nlp_state; 5043 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 5044 lpfc_issue_els_adisc(vport, ndlp, 0); 5045 sentadisc++; 5046 vport->num_disc_nodes++; 5047 if (vport->num_disc_nodes >= 5048 vport->cfg_discovery_threads) { 5049 spin_lock_irq(shost->host_lock); 5050 vport->fc_flag |= FC_NLP_MORE; 5051 spin_unlock_irq(shost->host_lock); 5052 break; 5053 } 5054 } 5055 } 5056 if (sentadisc == 0) { 5057 spin_lock_irq(shost->host_lock); 5058 vport->fc_flag &= ~FC_NLP_MORE; 5059 spin_unlock_irq(shost->host_lock); 5060 } 5061 return sentadisc; 5062 } 5063 5064 /** 5065 * lpfc_els_disc_plogi - Issue plogi for all npr nodes of a vport before adisc 5066 * @vport: pointer to a host virtual N_Port data structure. 5067 * 5068 * This routine issues Port Login (PLOGI) ELS commands to all the N_Ports 5069 * which are in node port recovery state, with a @vport. Each time an ELS 5070 * ADISC PLOGI IOCB is issued by invoking the lpfc_issue_els_plogi() routine, 5071 * the per @vport number of discover count (num_disc_nodes) shall be 5072 * incremented. If the num_disc_nodes reaches a pre-configured threshold 5073 * (cfg_discovery_threads), the @vport fc_flag will be marked with FC_NLP_MORE 5074 * bit set and quit the process of issuing remaining ADISC PLOGIN IOCBs for 5075 * later pick up. On the other hand, after walking through all the ndlps with 5076 * the @vport and there is none ADISC PLOGI IOCB issued, the @vport fc_flag 5077 * shall be cleared with the FC_NLP_MORE bit indicating there is no more ADISC 5078 * PLOGI need to be sent. 5079 * 5080 * Return code 5081 * The number of N_Ports with plogi issued. 5082 **/ 5083 int 5084 lpfc_els_disc_plogi(struct lpfc_vport *vport) 5085 { 5086 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 5087 struct lpfc_nodelist *ndlp, *next_ndlp; 5088 int sentplogi = 0; 5089 5090 /* go thru NPR nodes and issue any remaining ELS PLOGIs */ 5091 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 5092 if (!NLP_CHK_NODE_ACT(ndlp)) 5093 continue; 5094 if (ndlp->nlp_state == NLP_STE_NPR_NODE && 5095 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 && 5096 (ndlp->nlp_flag & NLP_DELAY_TMO) == 0 && 5097 (ndlp->nlp_flag & NLP_NPR_ADISC) == 0) { 5098 ndlp->nlp_prev_state = ndlp->nlp_state; 5099 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 5100 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 5101 sentplogi++; 5102 vport->num_disc_nodes++; 5103 if (vport->num_disc_nodes >= 5104 vport->cfg_discovery_threads) { 5105 spin_lock_irq(shost->host_lock); 5106 vport->fc_flag |= FC_NLP_MORE; 5107 spin_unlock_irq(shost->host_lock); 5108 break; 5109 } 5110 } 5111 } 5112 if (sentplogi) { 5113 lpfc_set_disctmo(vport); 5114 } 5115 else { 5116 spin_lock_irq(shost->host_lock); 5117 vport->fc_flag &= ~FC_NLP_MORE; 5118 spin_unlock_irq(shost->host_lock); 5119 } 5120 return sentplogi; 5121 } 5122 5123 static uint32_t 5124 lpfc_rdp_res_link_service(struct fc_rdp_link_service_desc *desc, 5125 uint32_t word0) 5126 { 5127 5128 desc->tag = cpu_to_be32(RDP_LINK_SERVICE_DESC_TAG); 5129 desc->payload.els_req = word0; 5130 desc->length = cpu_to_be32(sizeof(desc->payload)); 5131 5132 return sizeof(struct fc_rdp_link_service_desc); 5133 } 5134 5135 static uint32_t 5136 lpfc_rdp_res_sfp_desc(struct fc_rdp_sfp_desc *desc, 5137 uint8_t *page_a0, uint8_t *page_a2) 5138 { 5139 uint16_t wavelength; 5140 uint16_t temperature; 5141 uint16_t rx_power; 5142 uint16_t tx_bias; 5143 uint16_t tx_power; 5144 uint16_t vcc; 5145 uint16_t flag = 0; 5146 struct sff_trasnceiver_codes_byte4 *trasn_code_byte4; 5147 struct sff_trasnceiver_codes_byte5 *trasn_code_byte5; 5148 5149 desc->tag = cpu_to_be32(RDP_SFP_DESC_TAG); 5150 5151 trasn_code_byte4 = (struct sff_trasnceiver_codes_byte4 *) 5152 &page_a0[SSF_TRANSCEIVER_CODE_B4]; 5153 trasn_code_byte5 = (struct sff_trasnceiver_codes_byte5 *) 5154 &page_a0[SSF_TRANSCEIVER_CODE_B5]; 5155 5156 if ((trasn_code_byte4->fc_sw_laser) || 5157 (trasn_code_byte5->fc_sw_laser_sl) || 5158 (trasn_code_byte5->fc_sw_laser_sn)) { /* check if its short WL */ 5159 flag |= (SFP_FLAG_PT_SWLASER << SFP_FLAG_PT_SHIFT); 5160 } else if (trasn_code_byte4->fc_lw_laser) { 5161 wavelength = (page_a0[SSF_WAVELENGTH_B1] << 8) | 5162 page_a0[SSF_WAVELENGTH_B0]; 5163 if (wavelength == SFP_WAVELENGTH_LC1310) 5164 flag |= SFP_FLAG_PT_LWLASER_LC1310 << SFP_FLAG_PT_SHIFT; 5165 if (wavelength == SFP_WAVELENGTH_LL1550) 5166 flag |= SFP_FLAG_PT_LWLASER_LL1550 << SFP_FLAG_PT_SHIFT; 5167 } 5168 /* check if its SFP+ */ 5169 flag |= ((page_a0[SSF_IDENTIFIER] == SFF_PG0_IDENT_SFP) ? 5170 SFP_FLAG_CT_SFP_PLUS : SFP_FLAG_CT_UNKNOWN) 5171 << SFP_FLAG_CT_SHIFT; 5172 5173 /* check if its OPTICAL */ 5174 flag |= ((page_a0[SSF_CONNECTOR] == SFF_PG0_CONNECTOR_LC) ? 5175 SFP_FLAG_IS_OPTICAL_PORT : 0) 5176 << SFP_FLAG_IS_OPTICAL_SHIFT; 5177 5178 temperature = (page_a2[SFF_TEMPERATURE_B1] << 8 | 5179 page_a2[SFF_TEMPERATURE_B0]); 5180 vcc = (page_a2[SFF_VCC_B1] << 8 | 5181 page_a2[SFF_VCC_B0]); 5182 tx_power = (page_a2[SFF_TXPOWER_B1] << 8 | 5183 page_a2[SFF_TXPOWER_B0]); 5184 tx_bias = (page_a2[SFF_TX_BIAS_CURRENT_B1] << 8 | 5185 page_a2[SFF_TX_BIAS_CURRENT_B0]); 5186 rx_power = (page_a2[SFF_RXPOWER_B1] << 8 | 5187 page_a2[SFF_RXPOWER_B0]); 5188 desc->sfp_info.temperature = cpu_to_be16(temperature); 5189 desc->sfp_info.rx_power = cpu_to_be16(rx_power); 5190 desc->sfp_info.tx_bias = cpu_to_be16(tx_bias); 5191 desc->sfp_info.tx_power = cpu_to_be16(tx_power); 5192 desc->sfp_info.vcc = cpu_to_be16(vcc); 5193 5194 desc->sfp_info.flags = cpu_to_be16(flag); 5195 desc->length = cpu_to_be32(sizeof(desc->sfp_info)); 5196 5197 return sizeof(struct fc_rdp_sfp_desc); 5198 } 5199 5200 static uint32_t 5201 lpfc_rdp_res_link_error(struct fc_rdp_link_error_status_desc *desc, 5202 READ_LNK_VAR *stat) 5203 { 5204 uint32_t type; 5205 5206 desc->tag = cpu_to_be32(RDP_LINK_ERROR_STATUS_DESC_TAG); 5207 5208 type = VN_PT_PHY_PF_PORT << VN_PT_PHY_SHIFT; 5209 5210 desc->info.port_type = cpu_to_be32(type); 5211 5212 desc->info.link_status.link_failure_cnt = 5213 cpu_to_be32(stat->linkFailureCnt); 5214 desc->info.link_status.loss_of_synch_cnt = 5215 cpu_to_be32(stat->lossSyncCnt); 5216 desc->info.link_status.loss_of_signal_cnt = 5217 cpu_to_be32(stat->lossSignalCnt); 5218 desc->info.link_status.primitive_seq_proto_err = 5219 cpu_to_be32(stat->primSeqErrCnt); 5220 desc->info.link_status.invalid_trans_word = 5221 cpu_to_be32(stat->invalidXmitWord); 5222 desc->info.link_status.invalid_crc_cnt = cpu_to_be32(stat->crcCnt); 5223 5224 desc->length = cpu_to_be32(sizeof(desc->info)); 5225 5226 return sizeof(struct fc_rdp_link_error_status_desc); 5227 } 5228 5229 static uint32_t 5230 lpfc_rdp_res_bbc_desc(struct fc_rdp_bbc_desc *desc, READ_LNK_VAR *stat, 5231 struct lpfc_vport *vport) 5232 { 5233 uint32_t bbCredit; 5234 5235 desc->tag = cpu_to_be32(RDP_BBC_DESC_TAG); 5236 5237 bbCredit = vport->fc_sparam.cmn.bbCreditLsb | 5238 (vport->fc_sparam.cmn.bbCreditMsb << 8); 5239 desc->bbc_info.port_bbc = cpu_to_be32(bbCredit); 5240 if (vport->phba->fc_topology != LPFC_TOPOLOGY_LOOP) { 5241 bbCredit = vport->phba->fc_fabparam.cmn.bbCreditLsb | 5242 (vport->phba->fc_fabparam.cmn.bbCreditMsb << 8); 5243 desc->bbc_info.attached_port_bbc = cpu_to_be32(bbCredit); 5244 } else { 5245 desc->bbc_info.attached_port_bbc = 0; 5246 } 5247 5248 desc->bbc_info.rtt = 0; 5249 desc->length = cpu_to_be32(sizeof(desc->bbc_info)); 5250 5251 return sizeof(struct fc_rdp_bbc_desc); 5252 } 5253 5254 static uint32_t 5255 lpfc_rdp_res_oed_temp_desc(struct lpfc_hba *phba, 5256 struct fc_rdp_oed_sfp_desc *desc, uint8_t *page_a2) 5257 { 5258 uint32_t flags = 0; 5259 5260 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 5261 5262 desc->oed_info.hi_alarm = page_a2[SSF_TEMP_HIGH_ALARM]; 5263 desc->oed_info.lo_alarm = page_a2[SSF_TEMP_LOW_ALARM]; 5264 desc->oed_info.hi_warning = page_a2[SSF_TEMP_HIGH_WARNING]; 5265 desc->oed_info.lo_warning = page_a2[SSF_TEMP_LOW_WARNING]; 5266 5267 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TEMPERATURE) 5268 flags |= RDP_OET_HIGH_ALARM; 5269 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TEMPERATURE) 5270 flags |= RDP_OET_LOW_ALARM; 5271 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TEMPERATURE) 5272 flags |= RDP_OET_HIGH_WARNING; 5273 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TEMPERATURE) 5274 flags |= RDP_OET_LOW_WARNING; 5275 5276 flags |= ((0xf & RDP_OED_TEMPERATURE) << RDP_OED_TYPE_SHIFT); 5277 desc->oed_info.function_flags = cpu_to_be32(flags); 5278 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 5279 return sizeof(struct fc_rdp_oed_sfp_desc); 5280 } 5281 5282 static uint32_t 5283 lpfc_rdp_res_oed_voltage_desc(struct lpfc_hba *phba, 5284 struct fc_rdp_oed_sfp_desc *desc, 5285 uint8_t *page_a2) 5286 { 5287 uint32_t flags = 0; 5288 5289 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 5290 5291 desc->oed_info.hi_alarm = page_a2[SSF_VOLTAGE_HIGH_ALARM]; 5292 desc->oed_info.lo_alarm = page_a2[SSF_VOLTAGE_LOW_ALARM]; 5293 desc->oed_info.hi_warning = page_a2[SSF_VOLTAGE_HIGH_WARNING]; 5294 desc->oed_info.lo_warning = page_a2[SSF_VOLTAGE_LOW_WARNING]; 5295 5296 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_VOLTAGE) 5297 flags |= RDP_OET_HIGH_ALARM; 5298 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_VOLTAGE) 5299 flags |= RDP_OET_LOW_ALARM; 5300 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_VOLTAGE) 5301 flags |= RDP_OET_HIGH_WARNING; 5302 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_VOLTAGE) 5303 flags |= RDP_OET_LOW_WARNING; 5304 5305 flags |= ((0xf & RDP_OED_VOLTAGE) << RDP_OED_TYPE_SHIFT); 5306 desc->oed_info.function_flags = cpu_to_be32(flags); 5307 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 5308 return sizeof(struct fc_rdp_oed_sfp_desc); 5309 } 5310 5311 static uint32_t 5312 lpfc_rdp_res_oed_txbias_desc(struct lpfc_hba *phba, 5313 struct fc_rdp_oed_sfp_desc *desc, 5314 uint8_t *page_a2) 5315 { 5316 uint32_t flags = 0; 5317 5318 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 5319 5320 desc->oed_info.hi_alarm = page_a2[SSF_BIAS_HIGH_ALARM]; 5321 desc->oed_info.lo_alarm = page_a2[SSF_BIAS_LOW_ALARM]; 5322 desc->oed_info.hi_warning = page_a2[SSF_BIAS_HIGH_WARNING]; 5323 desc->oed_info.lo_warning = page_a2[SSF_BIAS_LOW_WARNING]; 5324 5325 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TXBIAS) 5326 flags |= RDP_OET_HIGH_ALARM; 5327 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TXBIAS) 5328 flags |= RDP_OET_LOW_ALARM; 5329 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TXBIAS) 5330 flags |= RDP_OET_HIGH_WARNING; 5331 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TXBIAS) 5332 flags |= RDP_OET_LOW_WARNING; 5333 5334 flags |= ((0xf & RDP_OED_TXBIAS) << RDP_OED_TYPE_SHIFT); 5335 desc->oed_info.function_flags = cpu_to_be32(flags); 5336 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 5337 return sizeof(struct fc_rdp_oed_sfp_desc); 5338 } 5339 5340 static uint32_t 5341 lpfc_rdp_res_oed_txpower_desc(struct lpfc_hba *phba, 5342 struct fc_rdp_oed_sfp_desc *desc, 5343 uint8_t *page_a2) 5344 { 5345 uint32_t flags = 0; 5346 5347 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 5348 5349 desc->oed_info.hi_alarm = page_a2[SSF_TXPOWER_HIGH_ALARM]; 5350 desc->oed_info.lo_alarm = page_a2[SSF_TXPOWER_LOW_ALARM]; 5351 desc->oed_info.hi_warning = page_a2[SSF_TXPOWER_HIGH_WARNING]; 5352 desc->oed_info.lo_warning = page_a2[SSF_TXPOWER_LOW_WARNING]; 5353 5354 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TXPOWER) 5355 flags |= RDP_OET_HIGH_ALARM; 5356 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TXPOWER) 5357 flags |= RDP_OET_LOW_ALARM; 5358 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TXPOWER) 5359 flags |= RDP_OET_HIGH_WARNING; 5360 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TXPOWER) 5361 flags |= RDP_OET_LOW_WARNING; 5362 5363 flags |= ((0xf & RDP_OED_TXPOWER) << RDP_OED_TYPE_SHIFT); 5364 desc->oed_info.function_flags = cpu_to_be32(flags); 5365 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 5366 return sizeof(struct fc_rdp_oed_sfp_desc); 5367 } 5368 5369 5370 static uint32_t 5371 lpfc_rdp_res_oed_rxpower_desc(struct lpfc_hba *phba, 5372 struct fc_rdp_oed_sfp_desc *desc, 5373 uint8_t *page_a2) 5374 { 5375 uint32_t flags = 0; 5376 5377 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 5378 5379 desc->oed_info.hi_alarm = page_a2[SSF_RXPOWER_HIGH_ALARM]; 5380 desc->oed_info.lo_alarm = page_a2[SSF_RXPOWER_LOW_ALARM]; 5381 desc->oed_info.hi_warning = page_a2[SSF_RXPOWER_HIGH_WARNING]; 5382 desc->oed_info.lo_warning = page_a2[SSF_RXPOWER_LOW_WARNING]; 5383 5384 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_RXPOWER) 5385 flags |= RDP_OET_HIGH_ALARM; 5386 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_RXPOWER) 5387 flags |= RDP_OET_LOW_ALARM; 5388 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_RXPOWER) 5389 flags |= RDP_OET_HIGH_WARNING; 5390 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_RXPOWER) 5391 flags |= RDP_OET_LOW_WARNING; 5392 5393 flags |= ((0xf & RDP_OED_RXPOWER) << RDP_OED_TYPE_SHIFT); 5394 desc->oed_info.function_flags = cpu_to_be32(flags); 5395 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 5396 return sizeof(struct fc_rdp_oed_sfp_desc); 5397 } 5398 5399 static uint32_t 5400 lpfc_rdp_res_opd_desc(struct fc_rdp_opd_sfp_desc *desc, 5401 uint8_t *page_a0, struct lpfc_vport *vport) 5402 { 5403 desc->tag = cpu_to_be32(RDP_OPD_DESC_TAG); 5404 memcpy(desc->opd_info.vendor_name, &page_a0[SSF_VENDOR_NAME], 16); 5405 memcpy(desc->opd_info.model_number, &page_a0[SSF_VENDOR_PN], 16); 5406 memcpy(desc->opd_info.serial_number, &page_a0[SSF_VENDOR_SN], 16); 5407 memcpy(desc->opd_info.revision, &page_a0[SSF_VENDOR_REV], 4); 5408 memcpy(desc->opd_info.date, &page_a0[SSF_DATE_CODE], 8); 5409 desc->length = cpu_to_be32(sizeof(desc->opd_info)); 5410 return sizeof(struct fc_rdp_opd_sfp_desc); 5411 } 5412 5413 static uint32_t 5414 lpfc_rdp_res_fec_desc(struct fc_fec_rdp_desc *desc, READ_LNK_VAR *stat) 5415 { 5416 if (bf_get(lpfc_read_link_stat_gec2, stat) == 0) 5417 return 0; 5418 desc->tag = cpu_to_be32(RDP_FEC_DESC_TAG); 5419 5420 desc->info.CorrectedBlocks = 5421 cpu_to_be32(stat->fecCorrBlkCount); 5422 desc->info.UncorrectableBlocks = 5423 cpu_to_be32(stat->fecUncorrBlkCount); 5424 5425 desc->length = cpu_to_be32(sizeof(desc->info)); 5426 5427 return sizeof(struct fc_fec_rdp_desc); 5428 } 5429 5430 static uint32_t 5431 lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba) 5432 { 5433 uint16_t rdp_cap = 0; 5434 uint16_t rdp_speed; 5435 5436 desc->tag = cpu_to_be32(RDP_PORT_SPEED_DESC_TAG); 5437 5438 switch (phba->fc_linkspeed) { 5439 case LPFC_LINK_SPEED_1GHZ: 5440 rdp_speed = RDP_PS_1GB; 5441 break; 5442 case LPFC_LINK_SPEED_2GHZ: 5443 rdp_speed = RDP_PS_2GB; 5444 break; 5445 case LPFC_LINK_SPEED_4GHZ: 5446 rdp_speed = RDP_PS_4GB; 5447 break; 5448 case LPFC_LINK_SPEED_8GHZ: 5449 rdp_speed = RDP_PS_8GB; 5450 break; 5451 case LPFC_LINK_SPEED_10GHZ: 5452 rdp_speed = RDP_PS_10GB; 5453 break; 5454 case LPFC_LINK_SPEED_16GHZ: 5455 rdp_speed = RDP_PS_16GB; 5456 break; 5457 case LPFC_LINK_SPEED_32GHZ: 5458 rdp_speed = RDP_PS_32GB; 5459 break; 5460 case LPFC_LINK_SPEED_64GHZ: 5461 rdp_speed = RDP_PS_64GB; 5462 break; 5463 default: 5464 rdp_speed = RDP_PS_UNKNOWN; 5465 break; 5466 } 5467 5468 desc->info.port_speed.speed = cpu_to_be16(rdp_speed); 5469 5470 if (phba->lmt & LMT_128Gb) 5471 rdp_cap |= RDP_PS_128GB; 5472 if (phba->lmt & LMT_64Gb) 5473 rdp_cap |= RDP_PS_64GB; 5474 if (phba->lmt & LMT_32Gb) 5475 rdp_cap |= RDP_PS_32GB; 5476 if (phba->lmt & LMT_16Gb) 5477 rdp_cap |= RDP_PS_16GB; 5478 if (phba->lmt & LMT_10Gb) 5479 rdp_cap |= RDP_PS_10GB; 5480 if (phba->lmt & LMT_8Gb) 5481 rdp_cap |= RDP_PS_8GB; 5482 if (phba->lmt & LMT_4Gb) 5483 rdp_cap |= RDP_PS_4GB; 5484 if (phba->lmt & LMT_2Gb) 5485 rdp_cap |= RDP_PS_2GB; 5486 if (phba->lmt & LMT_1Gb) 5487 rdp_cap |= RDP_PS_1GB; 5488 5489 if (rdp_cap == 0) 5490 rdp_cap = RDP_CAP_UNKNOWN; 5491 if (phba->cfg_link_speed != LPFC_USER_LINK_SPEED_AUTO) 5492 rdp_cap |= RDP_CAP_USER_CONFIGURED; 5493 5494 desc->info.port_speed.capabilities = cpu_to_be16(rdp_cap); 5495 desc->length = cpu_to_be32(sizeof(desc->info)); 5496 return sizeof(struct fc_rdp_port_speed_desc); 5497 } 5498 5499 static uint32_t 5500 lpfc_rdp_res_diag_port_names(struct fc_rdp_port_name_desc *desc, 5501 struct lpfc_vport *vport) 5502 { 5503 5504 desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG); 5505 5506 memcpy(desc->port_names.wwnn, &vport->fc_nodename, 5507 sizeof(desc->port_names.wwnn)); 5508 5509 memcpy(desc->port_names.wwpn, &vport->fc_portname, 5510 sizeof(desc->port_names.wwpn)); 5511 5512 desc->length = cpu_to_be32(sizeof(desc->port_names)); 5513 return sizeof(struct fc_rdp_port_name_desc); 5514 } 5515 5516 static uint32_t 5517 lpfc_rdp_res_attach_port_names(struct fc_rdp_port_name_desc *desc, 5518 struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 5519 { 5520 5521 desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG); 5522 if (vport->fc_flag & FC_FABRIC) { 5523 memcpy(desc->port_names.wwnn, &vport->fabric_nodename, 5524 sizeof(desc->port_names.wwnn)); 5525 5526 memcpy(desc->port_names.wwpn, &vport->fabric_portname, 5527 sizeof(desc->port_names.wwpn)); 5528 } else { /* Point to Point */ 5529 memcpy(desc->port_names.wwnn, &ndlp->nlp_nodename, 5530 sizeof(desc->port_names.wwnn)); 5531 5532 memcpy(desc->port_names.wwnn, &ndlp->nlp_portname, 5533 sizeof(desc->port_names.wwpn)); 5534 } 5535 5536 desc->length = cpu_to_be32(sizeof(desc->port_names)); 5537 return sizeof(struct fc_rdp_port_name_desc); 5538 } 5539 5540 static void 5541 lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context, 5542 int status) 5543 { 5544 struct lpfc_nodelist *ndlp = rdp_context->ndlp; 5545 struct lpfc_vport *vport = ndlp->vport; 5546 struct lpfc_iocbq *elsiocb; 5547 struct ulp_bde64 *bpl; 5548 IOCB_t *icmd; 5549 uint8_t *pcmd; 5550 struct ls_rjt *stat; 5551 struct fc_rdp_res_frame *rdp_res; 5552 uint32_t cmdsize, len; 5553 uint16_t *flag_ptr; 5554 int rc; 5555 5556 if (status != SUCCESS) 5557 goto error; 5558 5559 /* This will change once we know the true size of the RDP payload */ 5560 cmdsize = sizeof(struct fc_rdp_res_frame); 5561 5562 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, 5563 lpfc_max_els_tries, rdp_context->ndlp, 5564 rdp_context->ndlp->nlp_DID, ELS_CMD_ACC); 5565 lpfc_nlp_put(ndlp); 5566 if (!elsiocb) 5567 goto free_rdp_context; 5568 5569 icmd = &elsiocb->iocb; 5570 icmd->ulpContext = rdp_context->rx_id; 5571 icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id; 5572 5573 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5574 "2171 Xmit RDP response tag x%x xri x%x, " 5575 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x", 5576 elsiocb->iotag, elsiocb->iocb.ulpContext, 5577 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5578 ndlp->nlp_rpi); 5579 rdp_res = (struct fc_rdp_res_frame *) 5580 (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 5581 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 5582 memset(pcmd, 0, sizeof(struct fc_rdp_res_frame)); 5583 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 5584 5585 /* Update Alarm and Warning */ 5586 flag_ptr = (uint16_t *)(rdp_context->page_a2 + SSF_ALARM_FLAGS); 5587 phba->sfp_alarm |= *flag_ptr; 5588 flag_ptr = (uint16_t *)(rdp_context->page_a2 + SSF_WARNING_FLAGS); 5589 phba->sfp_warning |= *flag_ptr; 5590 5591 /* For RDP payload */ 5592 len = 8; 5593 len += lpfc_rdp_res_link_service((struct fc_rdp_link_service_desc *) 5594 (len + pcmd), ELS_CMD_RDP); 5595 5596 len += lpfc_rdp_res_sfp_desc((struct fc_rdp_sfp_desc *)(len + pcmd), 5597 rdp_context->page_a0, rdp_context->page_a2); 5598 len += lpfc_rdp_res_speed((struct fc_rdp_port_speed_desc *)(len + pcmd), 5599 phba); 5600 len += lpfc_rdp_res_link_error((struct fc_rdp_link_error_status_desc *) 5601 (len + pcmd), &rdp_context->link_stat); 5602 len += lpfc_rdp_res_diag_port_names((struct fc_rdp_port_name_desc *) 5603 (len + pcmd), vport); 5604 len += lpfc_rdp_res_attach_port_names((struct fc_rdp_port_name_desc *) 5605 (len + pcmd), vport, ndlp); 5606 len += lpfc_rdp_res_fec_desc((struct fc_fec_rdp_desc *)(len + pcmd), 5607 &rdp_context->link_stat); 5608 len += lpfc_rdp_res_bbc_desc((struct fc_rdp_bbc_desc *)(len + pcmd), 5609 &rdp_context->link_stat, vport); 5610 len += lpfc_rdp_res_oed_temp_desc(phba, 5611 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 5612 rdp_context->page_a2); 5613 len += lpfc_rdp_res_oed_voltage_desc(phba, 5614 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 5615 rdp_context->page_a2); 5616 len += lpfc_rdp_res_oed_txbias_desc(phba, 5617 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 5618 rdp_context->page_a2); 5619 len += lpfc_rdp_res_oed_txpower_desc(phba, 5620 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 5621 rdp_context->page_a2); 5622 len += lpfc_rdp_res_oed_rxpower_desc(phba, 5623 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 5624 rdp_context->page_a2); 5625 len += lpfc_rdp_res_opd_desc((struct fc_rdp_opd_sfp_desc *)(len + pcmd), 5626 rdp_context->page_a0, vport); 5627 5628 rdp_res->length = cpu_to_be32(len - 8); 5629 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 5630 5631 /* Now that we know the true size of the payload, update the BPL */ 5632 bpl = (struct ulp_bde64 *) 5633 (((struct lpfc_dmabuf *)(elsiocb->context3))->virt); 5634 bpl->tus.f.bdeSize = len; 5635 bpl->tus.f.bdeFlags = 0; 5636 bpl->tus.w = le32_to_cpu(bpl->tus.w); 5637 5638 phba->fc_stat.elsXmitACC++; 5639 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5640 if (rc == IOCB_ERROR) 5641 lpfc_els_free_iocb(phba, elsiocb); 5642 5643 kfree(rdp_context); 5644 5645 return; 5646 error: 5647 cmdsize = 2 * sizeof(uint32_t); 5648 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, lpfc_max_els_tries, 5649 ndlp, ndlp->nlp_DID, ELS_CMD_LS_RJT); 5650 lpfc_nlp_put(ndlp); 5651 if (!elsiocb) 5652 goto free_rdp_context; 5653 5654 icmd = &elsiocb->iocb; 5655 icmd->ulpContext = rdp_context->rx_id; 5656 icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id; 5657 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 5658 5659 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT; 5660 stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t)); 5661 stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 5662 5663 phba->fc_stat.elsXmitLSRJT++; 5664 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 5665 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5666 5667 if (rc == IOCB_ERROR) 5668 lpfc_els_free_iocb(phba, elsiocb); 5669 free_rdp_context: 5670 kfree(rdp_context); 5671 } 5672 5673 static int 5674 lpfc_get_rdp_info(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context) 5675 { 5676 LPFC_MBOXQ_t *mbox = NULL; 5677 int rc; 5678 5679 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5680 if (!mbox) { 5681 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_ELS, 5682 "7105 failed to allocate mailbox memory"); 5683 return 1; 5684 } 5685 5686 if (lpfc_sli4_dump_page_a0(phba, mbox)) 5687 goto prep_mbox_fail; 5688 mbox->vport = rdp_context->ndlp->vport; 5689 mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_page_a0; 5690 mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context; 5691 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 5692 if (rc == MBX_NOT_FINISHED) 5693 goto issue_mbox_fail; 5694 5695 return 0; 5696 5697 prep_mbox_fail: 5698 issue_mbox_fail: 5699 mempool_free(mbox, phba->mbox_mem_pool); 5700 return 1; 5701 } 5702 5703 /* 5704 * lpfc_els_rcv_rdp - Process an unsolicited RDP ELS. 5705 * @vport: pointer to a host virtual N_Port data structure. 5706 * @cmdiocb: pointer to lpfc command iocb data structure. 5707 * @ndlp: pointer to a node-list data structure. 5708 * 5709 * This routine processes an unsolicited RDP(Read Diagnostic Parameters) 5710 * IOCB. First, the payload of the unsolicited RDP is checked. 5711 * Then it will (1) send MBX_DUMP_MEMORY, Embedded DMP_LMSD sub command TYPE-3 5712 * for Page A0, (2) send MBX_DUMP_MEMORY, DMP_LMSD for Page A2, 5713 * (3) send MBX_READ_LNK_STAT to get link stat, (4) Call lpfc_els_rdp_cmpl 5714 * gather all data and send RDP response. 5715 * 5716 * Return code 5717 * 0 - Sent the acc response 5718 * 1 - Sent the reject response. 5719 */ 5720 static int 5721 lpfc_els_rcv_rdp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 5722 struct lpfc_nodelist *ndlp) 5723 { 5724 struct lpfc_hba *phba = vport->phba; 5725 struct lpfc_dmabuf *pcmd; 5726 uint8_t rjt_err, rjt_expl = LSEXP_NOTHING_MORE; 5727 struct fc_rdp_req_frame *rdp_req; 5728 struct lpfc_rdp_context *rdp_context; 5729 IOCB_t *cmd = NULL; 5730 struct ls_rjt stat; 5731 5732 if (phba->sli_rev < LPFC_SLI_REV4 || 5733 bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 5734 LPFC_SLI_INTF_IF_TYPE_2) { 5735 rjt_err = LSRJT_UNABLE_TPC; 5736 rjt_expl = LSEXP_REQ_UNSUPPORTED; 5737 goto error; 5738 } 5739 5740 if (phba->sli_rev < LPFC_SLI_REV4 || (phba->hba_flag & HBA_FCOE_MODE)) { 5741 rjt_err = LSRJT_UNABLE_TPC; 5742 rjt_expl = LSEXP_REQ_UNSUPPORTED; 5743 goto error; 5744 } 5745 5746 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 5747 rdp_req = (struct fc_rdp_req_frame *) pcmd->virt; 5748 5749 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5750 "2422 ELS RDP Request " 5751 "dec len %d tag x%x port_id %d len %d\n", 5752 be32_to_cpu(rdp_req->rdp_des_length), 5753 be32_to_cpu(rdp_req->nport_id_desc.tag), 5754 be32_to_cpu(rdp_req->nport_id_desc.nport_id), 5755 be32_to_cpu(rdp_req->nport_id_desc.length)); 5756 5757 if (sizeof(struct fc_rdp_nport_desc) != 5758 be32_to_cpu(rdp_req->rdp_des_length)) 5759 goto rjt_logerr; 5760 if (RDP_N_PORT_DESC_TAG != be32_to_cpu(rdp_req->nport_id_desc.tag)) 5761 goto rjt_logerr; 5762 if (RDP_NPORT_ID_SIZE != 5763 be32_to_cpu(rdp_req->nport_id_desc.length)) 5764 goto rjt_logerr; 5765 rdp_context = kzalloc(sizeof(struct lpfc_rdp_context), GFP_KERNEL); 5766 if (!rdp_context) { 5767 rjt_err = LSRJT_UNABLE_TPC; 5768 goto error; 5769 } 5770 5771 cmd = &cmdiocb->iocb; 5772 rdp_context->ndlp = lpfc_nlp_get(ndlp); 5773 rdp_context->ox_id = cmd->unsli3.rcvsli3.ox_id; 5774 rdp_context->rx_id = cmd->ulpContext; 5775 rdp_context->cmpl = lpfc_els_rdp_cmpl; 5776 if (lpfc_get_rdp_info(phba, rdp_context)) { 5777 lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_ELS, 5778 "2423 Unable to send mailbox"); 5779 kfree(rdp_context); 5780 rjt_err = LSRJT_UNABLE_TPC; 5781 lpfc_nlp_put(ndlp); 5782 goto error; 5783 } 5784 5785 return 0; 5786 5787 rjt_logerr: 5788 rjt_err = LSRJT_LOGICAL_ERR; 5789 5790 error: 5791 memset(&stat, 0, sizeof(stat)); 5792 stat.un.b.lsRjtRsnCode = rjt_err; 5793 stat.un.b.lsRjtRsnCodeExp = rjt_expl; 5794 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 5795 return 1; 5796 } 5797 5798 5799 static void 5800 lpfc_els_lcb_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 5801 { 5802 MAILBOX_t *mb; 5803 IOCB_t *icmd; 5804 uint8_t *pcmd; 5805 struct lpfc_iocbq *elsiocb; 5806 struct lpfc_nodelist *ndlp; 5807 struct ls_rjt *stat; 5808 union lpfc_sli4_cfg_shdr *shdr; 5809 struct lpfc_lcb_context *lcb_context; 5810 struct fc_lcb_res_frame *lcb_res; 5811 uint32_t cmdsize, shdr_status, shdr_add_status; 5812 int rc; 5813 5814 mb = &pmb->u.mb; 5815 lcb_context = (struct lpfc_lcb_context *)pmb->ctx_ndlp; 5816 ndlp = lcb_context->ndlp; 5817 pmb->ctx_ndlp = NULL; 5818 pmb->ctx_buf = NULL; 5819 5820 shdr = (union lpfc_sli4_cfg_shdr *) 5821 &pmb->u.mqe.un.beacon_config.header.cfg_shdr; 5822 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 5823 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 5824 5825 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX, 5826 "0194 SET_BEACON_CONFIG mailbox " 5827 "completed with status x%x add_status x%x," 5828 " mbx status x%x\n", 5829 shdr_status, shdr_add_status, mb->mbxStatus); 5830 5831 if ((mb->mbxStatus != MBX_SUCCESS) || shdr_status || 5832 (shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE) || 5833 (shdr_add_status == ADD_STATUS_INVALID_REQUEST)) { 5834 mempool_free(pmb, phba->mbox_mem_pool); 5835 goto error; 5836 } 5837 5838 mempool_free(pmb, phba->mbox_mem_pool); 5839 cmdsize = sizeof(struct fc_lcb_res_frame); 5840 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 5841 lpfc_max_els_tries, ndlp, 5842 ndlp->nlp_DID, ELS_CMD_ACC); 5843 5844 /* Decrement the ndlp reference count from previous mbox command */ 5845 lpfc_nlp_put(ndlp); 5846 5847 if (!elsiocb) 5848 goto free_lcb_context; 5849 5850 lcb_res = (struct fc_lcb_res_frame *) 5851 (((struct lpfc_dmabuf *)elsiocb->context2)->virt); 5852 5853 memset(lcb_res, 0, sizeof(struct fc_lcb_res_frame)); 5854 icmd = &elsiocb->iocb; 5855 icmd->ulpContext = lcb_context->rx_id; 5856 icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id; 5857 5858 pcmd = (uint8_t *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt); 5859 *((uint32_t *)(pcmd)) = ELS_CMD_ACC; 5860 lcb_res->lcb_sub_command = lcb_context->sub_command; 5861 lcb_res->lcb_type = lcb_context->type; 5862 lcb_res->capability = lcb_context->capability; 5863 lcb_res->lcb_frequency = lcb_context->frequency; 5864 lcb_res->lcb_duration = lcb_context->duration; 5865 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 5866 phba->fc_stat.elsXmitACC++; 5867 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5868 if (rc == IOCB_ERROR) 5869 lpfc_els_free_iocb(phba, elsiocb); 5870 5871 kfree(lcb_context); 5872 return; 5873 5874 error: 5875 cmdsize = sizeof(struct fc_lcb_res_frame); 5876 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 5877 lpfc_max_els_tries, ndlp, 5878 ndlp->nlp_DID, ELS_CMD_LS_RJT); 5879 lpfc_nlp_put(ndlp); 5880 if (!elsiocb) 5881 goto free_lcb_context; 5882 5883 icmd = &elsiocb->iocb; 5884 icmd->ulpContext = lcb_context->rx_id; 5885 icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id; 5886 pcmd = (uint8_t *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt); 5887 5888 *((uint32_t *)(pcmd)) = ELS_CMD_LS_RJT; 5889 stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t)); 5890 stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 5891 5892 if (shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE) 5893 stat->un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS; 5894 5895 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 5896 phba->fc_stat.elsXmitLSRJT++; 5897 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5898 if (rc == IOCB_ERROR) 5899 lpfc_els_free_iocb(phba, elsiocb); 5900 free_lcb_context: 5901 kfree(lcb_context); 5902 } 5903 5904 static int 5905 lpfc_sli4_set_beacon(struct lpfc_vport *vport, 5906 struct lpfc_lcb_context *lcb_context, 5907 uint32_t beacon_state) 5908 { 5909 struct lpfc_hba *phba = vport->phba; 5910 union lpfc_sli4_cfg_shdr *cfg_shdr; 5911 LPFC_MBOXQ_t *mbox = NULL; 5912 uint32_t len; 5913 int rc; 5914 5915 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5916 if (!mbox) 5917 return 1; 5918 5919 cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr; 5920 len = sizeof(struct lpfc_mbx_set_beacon_config) - 5921 sizeof(struct lpfc_sli4_cfg_mhdr); 5922 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 5923 LPFC_MBOX_OPCODE_SET_BEACON_CONFIG, len, 5924 LPFC_SLI4_MBX_EMBED); 5925 mbox->ctx_ndlp = (void *)lcb_context; 5926 mbox->vport = phba->pport; 5927 mbox->mbox_cmpl = lpfc_els_lcb_rsp; 5928 bf_set(lpfc_mbx_set_beacon_port_num, &mbox->u.mqe.un.beacon_config, 5929 phba->sli4_hba.physical_port); 5930 bf_set(lpfc_mbx_set_beacon_state, &mbox->u.mqe.un.beacon_config, 5931 beacon_state); 5932 mbox->u.mqe.un.beacon_config.word5 = 0; /* Reserved */ 5933 5934 /* 5935 * Check bv1s bit before issuing the mailbox 5936 * if bv1s == 1, LCB V1 supported 5937 * else, LCB V0 supported 5938 */ 5939 5940 if (phba->sli4_hba.pc_sli4_params.bv1s) { 5941 /* COMMON_SET_BEACON_CONFIG_V1 */ 5942 cfg_shdr->request.word9 = BEACON_VERSION_V1; 5943 lcb_context->capability |= LCB_CAPABILITY_DURATION; 5944 bf_set(lpfc_mbx_set_beacon_port_type, 5945 &mbox->u.mqe.un.beacon_config, 0); 5946 bf_set(lpfc_mbx_set_beacon_duration_v1, 5947 &mbox->u.mqe.un.beacon_config, 5948 be16_to_cpu(lcb_context->duration)); 5949 } else { 5950 /* COMMON_SET_BEACON_CONFIG_V0 */ 5951 if (be16_to_cpu(lcb_context->duration) != 0) { 5952 mempool_free(mbox, phba->mbox_mem_pool); 5953 return 1; 5954 } 5955 cfg_shdr->request.word9 = BEACON_VERSION_V0; 5956 lcb_context->capability &= ~(LCB_CAPABILITY_DURATION); 5957 bf_set(lpfc_mbx_set_beacon_state, 5958 &mbox->u.mqe.un.beacon_config, beacon_state); 5959 bf_set(lpfc_mbx_set_beacon_port_type, 5960 &mbox->u.mqe.un.beacon_config, 1); 5961 bf_set(lpfc_mbx_set_beacon_duration, 5962 &mbox->u.mqe.un.beacon_config, 5963 be16_to_cpu(lcb_context->duration)); 5964 } 5965 5966 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 5967 if (rc == MBX_NOT_FINISHED) { 5968 mempool_free(mbox, phba->mbox_mem_pool); 5969 return 1; 5970 } 5971 5972 return 0; 5973 } 5974 5975 5976 /** 5977 * lpfc_els_rcv_lcb - Process an unsolicited LCB 5978 * @vport: pointer to a host virtual N_Port data structure. 5979 * @cmdiocb: pointer to lpfc command iocb data structure. 5980 * @ndlp: pointer to a node-list data structure. 5981 * 5982 * This routine processes an unsolicited LCB(LINK CABLE BEACON) IOCB. 5983 * First, the payload of the unsolicited LCB is checked. 5984 * Then based on Subcommand beacon will either turn on or off. 5985 * 5986 * Return code 5987 * 0 - Sent the acc response 5988 * 1 - Sent the reject response. 5989 **/ 5990 static int 5991 lpfc_els_rcv_lcb(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 5992 struct lpfc_nodelist *ndlp) 5993 { 5994 struct lpfc_hba *phba = vport->phba; 5995 struct lpfc_dmabuf *pcmd; 5996 uint8_t *lp; 5997 struct fc_lcb_request_frame *beacon; 5998 struct lpfc_lcb_context *lcb_context; 5999 uint8_t state, rjt_err; 6000 struct ls_rjt stat; 6001 6002 pcmd = (struct lpfc_dmabuf *)cmdiocb->context2; 6003 lp = (uint8_t *)pcmd->virt; 6004 beacon = (struct fc_lcb_request_frame *)pcmd->virt; 6005 6006 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6007 "0192 ELS LCB Data x%x x%x x%x x%x sub x%x " 6008 "type x%x frequency %x duration x%x\n", 6009 lp[0], lp[1], lp[2], 6010 beacon->lcb_command, 6011 beacon->lcb_sub_command, 6012 beacon->lcb_type, 6013 beacon->lcb_frequency, 6014 be16_to_cpu(beacon->lcb_duration)); 6015 6016 if (beacon->lcb_sub_command != LPFC_LCB_ON && 6017 beacon->lcb_sub_command != LPFC_LCB_OFF) { 6018 rjt_err = LSRJT_CMD_UNSUPPORTED; 6019 goto rjt; 6020 } 6021 6022 if (phba->sli_rev < LPFC_SLI_REV4 || 6023 phba->hba_flag & HBA_FCOE_MODE || 6024 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 6025 LPFC_SLI_INTF_IF_TYPE_2)) { 6026 rjt_err = LSRJT_CMD_UNSUPPORTED; 6027 goto rjt; 6028 } 6029 6030 lcb_context = kmalloc(sizeof(*lcb_context), GFP_KERNEL); 6031 if (!lcb_context) { 6032 rjt_err = LSRJT_UNABLE_TPC; 6033 goto rjt; 6034 } 6035 6036 state = (beacon->lcb_sub_command == LPFC_LCB_ON) ? 1 : 0; 6037 lcb_context->sub_command = beacon->lcb_sub_command; 6038 lcb_context->capability = 0; 6039 lcb_context->type = beacon->lcb_type; 6040 lcb_context->frequency = beacon->lcb_frequency; 6041 lcb_context->duration = beacon->lcb_duration; 6042 lcb_context->ox_id = cmdiocb->iocb.unsli3.rcvsli3.ox_id; 6043 lcb_context->rx_id = cmdiocb->iocb.ulpContext; 6044 lcb_context->ndlp = lpfc_nlp_get(ndlp); 6045 if (lpfc_sli4_set_beacon(vport, lcb_context, state)) { 6046 lpfc_printf_vlog(ndlp->vport, KERN_ERR, 6047 LOG_ELS, "0193 failed to send mail box"); 6048 kfree(lcb_context); 6049 lpfc_nlp_put(ndlp); 6050 rjt_err = LSRJT_UNABLE_TPC; 6051 goto rjt; 6052 } 6053 return 0; 6054 rjt: 6055 memset(&stat, 0, sizeof(stat)); 6056 stat.un.b.lsRjtRsnCode = rjt_err; 6057 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 6058 return 1; 6059 } 6060 6061 6062 /** 6063 * lpfc_els_flush_rscn - Clean up any rscn activities with a vport 6064 * @vport: pointer to a host virtual N_Port data structure. 6065 * 6066 * This routine cleans up any Registration State Change Notification 6067 * (RSCN) activity with a @vport. Note that the fc_rscn_flush flag of the 6068 * @vport together with the host_lock is used to prevent multiple thread 6069 * trying to access the RSCN array on a same @vport at the same time. 6070 **/ 6071 void 6072 lpfc_els_flush_rscn(struct lpfc_vport *vport) 6073 { 6074 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6075 struct lpfc_hba *phba = vport->phba; 6076 int i; 6077 6078 spin_lock_irq(shost->host_lock); 6079 if (vport->fc_rscn_flush) { 6080 /* Another thread is walking fc_rscn_id_list on this vport */ 6081 spin_unlock_irq(shost->host_lock); 6082 return; 6083 } 6084 /* Indicate we are walking lpfc_els_flush_rscn on this vport */ 6085 vport->fc_rscn_flush = 1; 6086 spin_unlock_irq(shost->host_lock); 6087 6088 for (i = 0; i < vport->fc_rscn_id_cnt; i++) { 6089 lpfc_in_buf_free(phba, vport->fc_rscn_id_list[i]); 6090 vport->fc_rscn_id_list[i] = NULL; 6091 } 6092 spin_lock_irq(shost->host_lock); 6093 vport->fc_rscn_id_cnt = 0; 6094 vport->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY); 6095 spin_unlock_irq(shost->host_lock); 6096 lpfc_can_disctmo(vport); 6097 /* Indicate we are done walking this fc_rscn_id_list */ 6098 vport->fc_rscn_flush = 0; 6099 } 6100 6101 /** 6102 * lpfc_rscn_payload_check - Check whether there is a pending rscn to a did 6103 * @vport: pointer to a host virtual N_Port data structure. 6104 * @did: remote destination port identifier. 6105 * 6106 * This routine checks whether there is any pending Registration State 6107 * Configuration Notification (RSCN) to a @did on @vport. 6108 * 6109 * Return code 6110 * None zero - The @did matched with a pending rscn 6111 * 0 - not able to match @did with a pending rscn 6112 **/ 6113 int 6114 lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did) 6115 { 6116 D_ID ns_did; 6117 D_ID rscn_did; 6118 uint32_t *lp; 6119 uint32_t payload_len, i; 6120 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6121 6122 ns_did.un.word = did; 6123 6124 /* Never match fabric nodes for RSCNs */ 6125 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) 6126 return 0; 6127 6128 /* If we are doing a FULL RSCN rediscovery, match everything */ 6129 if (vport->fc_flag & FC_RSCN_DISCOVERY) 6130 return did; 6131 6132 spin_lock_irq(shost->host_lock); 6133 if (vport->fc_rscn_flush) { 6134 /* Another thread is walking fc_rscn_id_list on this vport */ 6135 spin_unlock_irq(shost->host_lock); 6136 return 0; 6137 } 6138 /* Indicate we are walking fc_rscn_id_list on this vport */ 6139 vport->fc_rscn_flush = 1; 6140 spin_unlock_irq(shost->host_lock); 6141 for (i = 0; i < vport->fc_rscn_id_cnt; i++) { 6142 lp = vport->fc_rscn_id_list[i]->virt; 6143 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK); 6144 payload_len -= sizeof(uint32_t); /* take off word 0 */ 6145 while (payload_len) { 6146 rscn_did.un.word = be32_to_cpu(*lp++); 6147 payload_len -= sizeof(uint32_t); 6148 switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) { 6149 case RSCN_ADDRESS_FORMAT_PORT: 6150 if ((ns_did.un.b.domain == rscn_did.un.b.domain) 6151 && (ns_did.un.b.area == rscn_did.un.b.area) 6152 && (ns_did.un.b.id == rscn_did.un.b.id)) 6153 goto return_did_out; 6154 break; 6155 case RSCN_ADDRESS_FORMAT_AREA: 6156 if ((ns_did.un.b.domain == rscn_did.un.b.domain) 6157 && (ns_did.un.b.area == rscn_did.un.b.area)) 6158 goto return_did_out; 6159 break; 6160 case RSCN_ADDRESS_FORMAT_DOMAIN: 6161 if (ns_did.un.b.domain == rscn_did.un.b.domain) 6162 goto return_did_out; 6163 break; 6164 case RSCN_ADDRESS_FORMAT_FABRIC: 6165 goto return_did_out; 6166 } 6167 } 6168 } 6169 /* Indicate we are done with walking fc_rscn_id_list on this vport */ 6170 vport->fc_rscn_flush = 0; 6171 return 0; 6172 return_did_out: 6173 /* Indicate we are done with walking fc_rscn_id_list on this vport */ 6174 vport->fc_rscn_flush = 0; 6175 return did; 6176 } 6177 6178 /** 6179 * lpfc_rscn_recovery_check - Send recovery event to vport nodes matching rscn 6180 * @vport: pointer to a host virtual N_Port data structure. 6181 * 6182 * This routine sends recovery (NLP_EVT_DEVICE_RECOVERY) event to the 6183 * state machine for a @vport's nodes that are with pending RSCN (Registration 6184 * State Change Notification). 6185 * 6186 * Return code 6187 * 0 - Successful (currently alway return 0) 6188 **/ 6189 static int 6190 lpfc_rscn_recovery_check(struct lpfc_vport *vport) 6191 { 6192 struct lpfc_nodelist *ndlp = NULL; 6193 6194 /* Move all affected nodes by pending RSCNs to NPR state. */ 6195 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 6196 if (!NLP_CHK_NODE_ACT(ndlp) || 6197 (ndlp->nlp_state == NLP_STE_UNUSED_NODE) || 6198 !lpfc_rscn_payload_check(vport, ndlp->nlp_DID)) 6199 continue; 6200 6201 /* NVME Target mode does not do RSCN Recovery. */ 6202 if (vport->phba->nvmet_support) 6203 continue; 6204 6205 /* If we are in the process of doing discovery on this 6206 * NPort, let it continue on its own. 6207 */ 6208 switch (ndlp->nlp_state) { 6209 case NLP_STE_PLOGI_ISSUE: 6210 case NLP_STE_ADISC_ISSUE: 6211 case NLP_STE_REG_LOGIN_ISSUE: 6212 case NLP_STE_PRLI_ISSUE: 6213 case NLP_STE_LOGO_ISSUE: 6214 continue; 6215 } 6216 6217 6218 lpfc_disc_state_machine(vport, ndlp, NULL, 6219 NLP_EVT_DEVICE_RECOVERY); 6220 lpfc_cancel_retry_delay_tmo(vport, ndlp); 6221 } 6222 return 0; 6223 } 6224 6225 /** 6226 * lpfc_send_rscn_event - Send an RSCN event to management application 6227 * @vport: pointer to a host virtual N_Port data structure. 6228 * @cmdiocb: pointer to lpfc command iocb data structure. 6229 * 6230 * lpfc_send_rscn_event sends an RSCN netlink event to management 6231 * applications. 6232 */ 6233 static void 6234 lpfc_send_rscn_event(struct lpfc_vport *vport, 6235 struct lpfc_iocbq *cmdiocb) 6236 { 6237 struct lpfc_dmabuf *pcmd; 6238 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6239 uint32_t *payload_ptr; 6240 uint32_t payload_len; 6241 struct lpfc_rscn_event_header *rscn_event_data; 6242 6243 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 6244 payload_ptr = (uint32_t *) pcmd->virt; 6245 payload_len = be32_to_cpu(*payload_ptr & ~ELS_CMD_MASK); 6246 6247 rscn_event_data = kmalloc(sizeof(struct lpfc_rscn_event_header) + 6248 payload_len, GFP_KERNEL); 6249 if (!rscn_event_data) { 6250 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 6251 "0147 Failed to allocate memory for RSCN event\n"); 6252 return; 6253 } 6254 rscn_event_data->event_type = FC_REG_RSCN_EVENT; 6255 rscn_event_data->payload_length = payload_len; 6256 memcpy(rscn_event_data->rscn_payload, payload_ptr, 6257 payload_len); 6258 6259 fc_host_post_vendor_event(shost, 6260 fc_get_event_number(), 6261 sizeof(struct lpfc_rscn_event_header) + payload_len, 6262 (char *)rscn_event_data, 6263 LPFC_NL_VENDOR_ID); 6264 6265 kfree(rscn_event_data); 6266 } 6267 6268 /** 6269 * lpfc_els_rcv_rscn - Process an unsolicited rscn iocb 6270 * @vport: pointer to a host virtual N_Port data structure. 6271 * @cmdiocb: pointer to lpfc command iocb data structure. 6272 * @ndlp: pointer to a node-list data structure. 6273 * 6274 * This routine processes an unsolicited RSCN (Registration State Change 6275 * Notification) IOCB. First, the payload of the unsolicited RSCN is walked 6276 * to invoke fc_host_post_event() routine to the FC transport layer. If the 6277 * discover state machine is about to begin discovery, it just accepts the 6278 * RSCN and the discovery process will satisfy the RSCN. If this RSCN only 6279 * contains N_Port IDs for other vports on this HBA, it just accepts the 6280 * RSCN and ignore processing it. If the state machine is in the recovery 6281 * state, the fc_rscn_id_list of this @vport is walked and the 6282 * lpfc_rscn_recovery_check() routine is invoked to send recovery event for 6283 * all nodes that match RSCN payload. Otherwise, the lpfc_els_handle_rscn() 6284 * routine is invoked to handle the RSCN event. 6285 * 6286 * Return code 6287 * 0 - Just sent the acc response 6288 * 1 - Sent the acc response and waited for name server completion 6289 **/ 6290 static int 6291 lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 6292 struct lpfc_nodelist *ndlp) 6293 { 6294 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6295 struct lpfc_hba *phba = vport->phba; 6296 struct lpfc_dmabuf *pcmd; 6297 uint32_t *lp, *datap; 6298 uint32_t payload_len, length, nportid, *cmd; 6299 int rscn_cnt; 6300 int rscn_id = 0, hba_id = 0; 6301 int i; 6302 6303 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 6304 lp = (uint32_t *) pcmd->virt; 6305 6306 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK); 6307 payload_len -= sizeof(uint32_t); /* take off word 0 */ 6308 /* RSCN received */ 6309 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 6310 "0214 RSCN received Data: x%x x%x x%x x%x\n", 6311 vport->fc_flag, payload_len, *lp, 6312 vport->fc_rscn_id_cnt); 6313 6314 /* Send an RSCN event to the management application */ 6315 lpfc_send_rscn_event(vport, cmdiocb); 6316 6317 for (i = 0; i < payload_len/sizeof(uint32_t); i++) 6318 fc_host_post_event(shost, fc_get_event_number(), 6319 FCH_EVT_RSCN, lp[i]); 6320 6321 /* If we are about to begin discovery, just ACC the RSCN. 6322 * Discovery processing will satisfy it. 6323 */ 6324 if (vport->port_state <= LPFC_NS_QRY) { 6325 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6326 "RCV RSCN ignore: did:x%x/ste:x%x flg:x%x", 6327 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); 6328 6329 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 6330 return 0; 6331 } 6332 6333 /* If this RSCN just contains NPortIDs for other vports on this HBA, 6334 * just ACC and ignore it. 6335 */ 6336 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 6337 !(vport->cfg_peer_port_login)) { 6338 i = payload_len; 6339 datap = lp; 6340 while (i > 0) { 6341 nportid = *datap++; 6342 nportid = ((be32_to_cpu(nportid)) & Mask_DID); 6343 i -= sizeof(uint32_t); 6344 rscn_id++; 6345 if (lpfc_find_vport_by_did(phba, nportid)) 6346 hba_id++; 6347 } 6348 if (rscn_id == hba_id) { 6349 /* ALL NPortIDs in RSCN are on HBA */ 6350 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 6351 "0219 Ignore RSCN " 6352 "Data: x%x x%x x%x x%x\n", 6353 vport->fc_flag, payload_len, 6354 *lp, vport->fc_rscn_id_cnt); 6355 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6356 "RCV RSCN vport: did:x%x/ste:x%x flg:x%x", 6357 ndlp->nlp_DID, vport->port_state, 6358 ndlp->nlp_flag); 6359 6360 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, 6361 ndlp, NULL); 6362 return 0; 6363 } 6364 } 6365 6366 spin_lock_irq(shost->host_lock); 6367 if (vport->fc_rscn_flush) { 6368 /* Another thread is walking fc_rscn_id_list on this vport */ 6369 vport->fc_flag |= FC_RSCN_DISCOVERY; 6370 spin_unlock_irq(shost->host_lock); 6371 /* Send back ACC */ 6372 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 6373 return 0; 6374 } 6375 /* Indicate we are walking fc_rscn_id_list on this vport */ 6376 vport->fc_rscn_flush = 1; 6377 spin_unlock_irq(shost->host_lock); 6378 /* Get the array count after successfully have the token */ 6379 rscn_cnt = vport->fc_rscn_id_cnt; 6380 /* If we are already processing an RSCN, save the received 6381 * RSCN payload buffer, cmdiocb->context2 to process later. 6382 */ 6383 if (vport->fc_flag & (FC_RSCN_MODE | FC_NDISC_ACTIVE)) { 6384 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6385 "RCV RSCN defer: did:x%x/ste:x%x flg:x%x", 6386 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); 6387 6388 spin_lock_irq(shost->host_lock); 6389 vport->fc_flag |= FC_RSCN_DEFERRED; 6390 if ((rscn_cnt < FC_MAX_HOLD_RSCN) && 6391 !(vport->fc_flag & FC_RSCN_DISCOVERY)) { 6392 vport->fc_flag |= FC_RSCN_MODE; 6393 spin_unlock_irq(shost->host_lock); 6394 if (rscn_cnt) { 6395 cmd = vport->fc_rscn_id_list[rscn_cnt-1]->virt; 6396 length = be32_to_cpu(*cmd & ~ELS_CMD_MASK); 6397 } 6398 if ((rscn_cnt) && 6399 (payload_len + length <= LPFC_BPL_SIZE)) { 6400 *cmd &= ELS_CMD_MASK; 6401 *cmd |= cpu_to_be32(payload_len + length); 6402 memcpy(((uint8_t *)cmd) + length, lp, 6403 payload_len); 6404 } else { 6405 vport->fc_rscn_id_list[rscn_cnt] = pcmd; 6406 vport->fc_rscn_id_cnt++; 6407 /* If we zero, cmdiocb->context2, the calling 6408 * routine will not try to free it. 6409 */ 6410 cmdiocb->context2 = NULL; 6411 } 6412 /* Deferred RSCN */ 6413 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 6414 "0235 Deferred RSCN " 6415 "Data: x%x x%x x%x\n", 6416 vport->fc_rscn_id_cnt, vport->fc_flag, 6417 vport->port_state); 6418 } else { 6419 vport->fc_flag |= FC_RSCN_DISCOVERY; 6420 spin_unlock_irq(shost->host_lock); 6421 /* ReDiscovery RSCN */ 6422 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 6423 "0234 ReDiscovery RSCN " 6424 "Data: x%x x%x x%x\n", 6425 vport->fc_rscn_id_cnt, vport->fc_flag, 6426 vport->port_state); 6427 } 6428 /* Indicate we are done walking fc_rscn_id_list on this vport */ 6429 vport->fc_rscn_flush = 0; 6430 /* Send back ACC */ 6431 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 6432 /* send RECOVERY event for ALL nodes that match RSCN payload */ 6433 lpfc_rscn_recovery_check(vport); 6434 return 0; 6435 } 6436 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6437 "RCV RSCN: did:x%x/ste:x%x flg:x%x", 6438 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); 6439 6440 spin_lock_irq(shost->host_lock); 6441 vport->fc_flag |= FC_RSCN_MODE; 6442 spin_unlock_irq(shost->host_lock); 6443 vport->fc_rscn_id_list[vport->fc_rscn_id_cnt++] = pcmd; 6444 /* Indicate we are done walking fc_rscn_id_list on this vport */ 6445 vport->fc_rscn_flush = 0; 6446 /* 6447 * If we zero, cmdiocb->context2, the calling routine will 6448 * not try to free it. 6449 */ 6450 cmdiocb->context2 = NULL; 6451 lpfc_set_disctmo(vport); 6452 /* Send back ACC */ 6453 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 6454 /* send RECOVERY event for ALL nodes that match RSCN payload */ 6455 lpfc_rscn_recovery_check(vport); 6456 return lpfc_els_handle_rscn(vport); 6457 } 6458 6459 /** 6460 * lpfc_els_handle_rscn - Handle rscn for a vport 6461 * @vport: pointer to a host virtual N_Port data structure. 6462 * 6463 * This routine handles the Registration State Configuration Notification 6464 * (RSCN) for a @vport. If login to NameServer does not exist, a new ndlp shall 6465 * be created and a Port Login (PLOGI) to the NameServer is issued. Otherwise, 6466 * if the ndlp to NameServer exists, a Common Transport (CT) command to the 6467 * NameServer shall be issued. If CT command to the NameServer fails to be 6468 * issued, the lpfc_els_flush_rscn() routine shall be invoked to clean up any 6469 * RSCN activities with the @vport. 6470 * 6471 * Return code 6472 * 0 - Cleaned up rscn on the @vport 6473 * 1 - Wait for plogi to name server before proceed 6474 **/ 6475 int 6476 lpfc_els_handle_rscn(struct lpfc_vport *vport) 6477 { 6478 struct lpfc_nodelist *ndlp; 6479 struct lpfc_hba *phba = vport->phba; 6480 6481 /* Ignore RSCN if the port is being torn down. */ 6482 if (vport->load_flag & FC_UNLOADING) { 6483 lpfc_els_flush_rscn(vport); 6484 return 0; 6485 } 6486 6487 /* Start timer for RSCN processing */ 6488 lpfc_set_disctmo(vport); 6489 6490 /* RSCN processed */ 6491 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 6492 "0215 RSCN processed Data: x%x x%x x%x x%x\n", 6493 vport->fc_flag, 0, vport->fc_rscn_id_cnt, 6494 vport->port_state); 6495 6496 /* To process RSCN, first compare RSCN data with NameServer */ 6497 vport->fc_ns_retry = 0; 6498 vport->num_disc_nodes = 0; 6499 6500 ndlp = lpfc_findnode_did(vport, NameServer_DID); 6501 if (ndlp && NLP_CHK_NODE_ACT(ndlp) 6502 && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { 6503 /* Good ndlp, issue CT Request to NameServer. Need to 6504 * know how many gidfts were issued. If none, then just 6505 * flush the RSCN. Otherwise, the outstanding requests 6506 * need to complete. 6507 */ 6508 if (phba->cfg_ns_query == LPFC_NS_QUERY_GID_FT) { 6509 if (lpfc_issue_gidft(vport) > 0) 6510 return 1; 6511 } else if (phba->cfg_ns_query == LPFC_NS_QUERY_GID_PT) { 6512 if (lpfc_issue_gidpt(vport) > 0) 6513 return 1; 6514 } else { 6515 return 1; 6516 } 6517 } else { 6518 /* Nameserver login in question. Revalidate. */ 6519 if (ndlp) { 6520 ndlp = lpfc_enable_node(vport, ndlp, 6521 NLP_STE_PLOGI_ISSUE); 6522 if (!ndlp) { 6523 lpfc_els_flush_rscn(vport); 6524 return 0; 6525 } 6526 ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE; 6527 } else { 6528 ndlp = lpfc_nlp_init(vport, NameServer_DID); 6529 if (!ndlp) { 6530 lpfc_els_flush_rscn(vport); 6531 return 0; 6532 } 6533 ndlp->nlp_prev_state = ndlp->nlp_state; 6534 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 6535 } 6536 ndlp->nlp_type |= NLP_FABRIC; 6537 lpfc_issue_els_plogi(vport, NameServer_DID, 0); 6538 /* Wait for NameServer login cmpl before we can 6539 * continue 6540 */ 6541 return 1; 6542 } 6543 6544 lpfc_els_flush_rscn(vport); 6545 return 0; 6546 } 6547 6548 /** 6549 * lpfc_els_rcv_flogi - Process an unsolicited flogi iocb 6550 * @vport: pointer to a host virtual N_Port data structure. 6551 * @cmdiocb: pointer to lpfc command iocb data structure. 6552 * @ndlp: pointer to a node-list data structure. 6553 * 6554 * This routine processes Fabric Login (FLOGI) IOCB received as an ELS 6555 * unsolicited event. An unsolicited FLOGI can be received in a point-to- 6556 * point topology. As an unsolicited FLOGI should not be received in a loop 6557 * mode, any unsolicited FLOGI received in loop mode shall be ignored. The 6558 * lpfc_check_sparm() routine is invoked to check the parameters in the 6559 * unsolicited FLOGI. If parameters validation failed, the routine 6560 * lpfc_els_rsp_reject() shall be called with reject reason code set to 6561 * LSEXP_SPARM_OPTIONS to reject the FLOGI. Otherwise, the Port WWN in the 6562 * FLOGI shall be compared with the Port WWN of the @vport to determine who 6563 * will initiate PLOGI. The higher lexicographical value party shall has 6564 * higher priority (as the winning port) and will initiate PLOGI and 6565 * communicate Port_IDs (Addresses) for both nodes in PLOGI. The result 6566 * of this will be marked in the @vport fc_flag field with FC_PT2PT_PLOGI 6567 * and then the lpfc_els_rsp_acc() routine is invoked to accept the FLOGI. 6568 * 6569 * Return code 6570 * 0 - Successfully processed the unsolicited flogi 6571 * 1 - Failed to process the unsolicited flogi 6572 **/ 6573 static int 6574 lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 6575 struct lpfc_nodelist *ndlp) 6576 { 6577 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6578 struct lpfc_hba *phba = vport->phba; 6579 struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 6580 uint32_t *lp = (uint32_t *) pcmd->virt; 6581 IOCB_t *icmd = &cmdiocb->iocb; 6582 struct serv_parm *sp; 6583 LPFC_MBOXQ_t *mbox; 6584 uint32_t cmd, did; 6585 int rc; 6586 uint32_t fc_flag = 0; 6587 uint32_t port_state = 0; 6588 6589 cmd = *lp++; 6590 sp = (struct serv_parm *) lp; 6591 6592 /* FLOGI received */ 6593 6594 lpfc_set_disctmo(vport); 6595 6596 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 6597 /* We should never receive a FLOGI in loop mode, ignore it */ 6598 did = icmd->un.elsreq64.remoteID; 6599 6600 /* An FLOGI ELS command <elsCmd> was received from DID <did> in 6601 Loop Mode */ 6602 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 6603 "0113 An FLOGI ELS command x%x was " 6604 "received from DID x%x in Loop Mode\n", 6605 cmd, did); 6606 return 1; 6607 } 6608 6609 (void) lpfc_check_sparm(vport, ndlp, sp, CLASS3, 1); 6610 6611 /* 6612 * If our portname is greater than the remote portname, 6613 * then we initiate Nport login. 6614 */ 6615 6616 rc = memcmp(&vport->fc_portname, &sp->portName, 6617 sizeof(struct lpfc_name)); 6618 6619 if (!rc) { 6620 if (phba->sli_rev < LPFC_SLI_REV4) { 6621 mbox = mempool_alloc(phba->mbox_mem_pool, 6622 GFP_KERNEL); 6623 if (!mbox) 6624 return 1; 6625 lpfc_linkdown(phba); 6626 lpfc_init_link(phba, mbox, 6627 phba->cfg_topology, 6628 phba->cfg_link_speed); 6629 mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0; 6630 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 6631 mbox->vport = vport; 6632 rc = lpfc_sli_issue_mbox(phba, mbox, 6633 MBX_NOWAIT); 6634 lpfc_set_loopback_flag(phba); 6635 if (rc == MBX_NOT_FINISHED) 6636 mempool_free(mbox, phba->mbox_mem_pool); 6637 return 1; 6638 } 6639 6640 /* abort the flogi coming back to ourselves 6641 * due to external loopback on the port. 6642 */ 6643 lpfc_els_abort_flogi(phba); 6644 return 0; 6645 6646 } else if (rc > 0) { /* greater than */ 6647 spin_lock_irq(shost->host_lock); 6648 vport->fc_flag |= FC_PT2PT_PLOGI; 6649 spin_unlock_irq(shost->host_lock); 6650 6651 /* If we have the high WWPN we can assign our own 6652 * myDID; otherwise, we have to WAIT for a PLOGI 6653 * from the remote NPort to find out what it 6654 * will be. 6655 */ 6656 vport->fc_myDID = PT2PT_LocalID; 6657 } else { 6658 vport->fc_myDID = PT2PT_RemoteID; 6659 } 6660 6661 /* 6662 * The vport state should go to LPFC_FLOGI only 6663 * AFTER we issue a FLOGI, not receive one. 6664 */ 6665 spin_lock_irq(shost->host_lock); 6666 fc_flag = vport->fc_flag; 6667 port_state = vport->port_state; 6668 vport->fc_flag |= FC_PT2PT; 6669 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 6670 6671 /* Acking an unsol FLOGI. Count 1 for link bounce 6672 * work-around. 6673 */ 6674 vport->rcv_flogi_cnt++; 6675 spin_unlock_irq(shost->host_lock); 6676 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6677 "3311 Rcv Flogi PS x%x new PS x%x " 6678 "fc_flag x%x new fc_flag x%x\n", 6679 port_state, vport->port_state, 6680 fc_flag, vport->fc_flag); 6681 6682 /* 6683 * We temporarily set fc_myDID to make it look like we are 6684 * a Fabric. This is done just so we end up with the right 6685 * did / sid on the FLOGI ACC rsp. 6686 */ 6687 did = vport->fc_myDID; 6688 vport->fc_myDID = Fabric_DID; 6689 6690 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm)); 6691 6692 /* Defer ACC response until AFTER we issue a FLOGI */ 6693 if (!(phba->hba_flag & HBA_FLOGI_ISSUED)) { 6694 phba->defer_flogi_acc_rx_id = cmdiocb->iocb.ulpContext; 6695 phba->defer_flogi_acc_ox_id = 6696 cmdiocb->iocb.unsli3.rcvsli3.ox_id; 6697 6698 vport->fc_myDID = did; 6699 6700 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6701 "3344 Deferring FLOGI ACC: rx_id: x%x," 6702 " ox_id: x%x, hba_flag x%x\n", 6703 phba->defer_flogi_acc_rx_id, 6704 phba->defer_flogi_acc_ox_id, phba->hba_flag); 6705 6706 phba->defer_flogi_acc_flag = true; 6707 6708 return 0; 6709 } 6710 6711 /* Send back ACC */ 6712 lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, cmdiocb, ndlp, NULL); 6713 6714 /* Now lets put fc_myDID back to what its supposed to be */ 6715 vport->fc_myDID = did; 6716 6717 return 0; 6718 } 6719 6720 /** 6721 * lpfc_els_rcv_rnid - Process an unsolicited rnid iocb 6722 * @vport: pointer to a host virtual N_Port data structure. 6723 * @cmdiocb: pointer to lpfc command iocb data structure. 6724 * @ndlp: pointer to a node-list data structure. 6725 * 6726 * This routine processes Request Node Identification Data (RNID) IOCB 6727 * received as an ELS unsolicited event. Only when the RNID specified format 6728 * 0x0 or 0xDF (Topology Discovery Specific Node Identification Data) 6729 * present, this routine will invoke the lpfc_els_rsp_rnid_acc() routine to 6730 * Accept (ACC) the RNID ELS command. All the other RNID formats are 6731 * rejected by invoking the lpfc_els_rsp_reject() routine. 6732 * 6733 * Return code 6734 * 0 - Successfully processed rnid iocb (currently always return 0) 6735 **/ 6736 static int 6737 lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 6738 struct lpfc_nodelist *ndlp) 6739 { 6740 struct lpfc_dmabuf *pcmd; 6741 uint32_t *lp; 6742 RNID *rn; 6743 struct ls_rjt stat; 6744 6745 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 6746 lp = (uint32_t *) pcmd->virt; 6747 6748 lp++; 6749 rn = (RNID *) lp; 6750 6751 /* RNID received */ 6752 6753 switch (rn->Format) { 6754 case 0: 6755 case RNID_TOPOLOGY_DISC: 6756 /* Send back ACC */ 6757 lpfc_els_rsp_rnid_acc(vport, rn->Format, cmdiocb, ndlp); 6758 break; 6759 default: 6760 /* Reject this request because format not supported */ 6761 stat.un.b.lsRjtRsvd0 = 0; 6762 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 6763 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 6764 stat.un.b.vendorUnique = 0; 6765 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, 6766 NULL); 6767 } 6768 return 0; 6769 } 6770 6771 /** 6772 * lpfc_els_rcv_echo - Process an unsolicited echo iocb 6773 * @vport: pointer to a host virtual N_Port data structure. 6774 * @cmdiocb: pointer to lpfc command iocb data structure. 6775 * @ndlp: pointer to a node-list data structure. 6776 * 6777 * Return code 6778 * 0 - Successfully processed echo iocb (currently always return 0) 6779 **/ 6780 static int 6781 lpfc_els_rcv_echo(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 6782 struct lpfc_nodelist *ndlp) 6783 { 6784 uint8_t *pcmd; 6785 6786 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt); 6787 6788 /* skip over first word of echo command to find echo data */ 6789 pcmd += sizeof(uint32_t); 6790 6791 lpfc_els_rsp_echo_acc(vport, pcmd, cmdiocb, ndlp); 6792 return 0; 6793 } 6794 6795 /** 6796 * lpfc_els_rcv_lirr - Process an unsolicited lirr iocb 6797 * @vport: pointer to a host virtual N_Port data structure. 6798 * @cmdiocb: pointer to lpfc command iocb data structure. 6799 * @ndlp: pointer to a node-list data structure. 6800 * 6801 * This routine processes a Link Incident Report Registration(LIRR) IOCB 6802 * received as an ELS unsolicited event. Currently, this function just invokes 6803 * the lpfc_els_rsp_reject() routine to reject the LIRR IOCB unconditionally. 6804 * 6805 * Return code 6806 * 0 - Successfully processed lirr iocb (currently always return 0) 6807 **/ 6808 static int 6809 lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 6810 struct lpfc_nodelist *ndlp) 6811 { 6812 struct ls_rjt stat; 6813 6814 /* For now, unconditionally reject this command */ 6815 stat.un.b.lsRjtRsvd0 = 0; 6816 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 6817 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 6818 stat.un.b.vendorUnique = 0; 6819 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 6820 return 0; 6821 } 6822 6823 /** 6824 * lpfc_els_rcv_rrq - Process an unsolicited rrq iocb 6825 * @vport: pointer to a host virtual N_Port data structure. 6826 * @cmdiocb: pointer to lpfc command iocb data structure. 6827 * @ndlp: pointer to a node-list data structure. 6828 * 6829 * This routine processes a Reinstate Recovery Qualifier (RRQ) IOCB 6830 * received as an ELS unsolicited event. A request to RRQ shall only 6831 * be accepted if the Originator Nx_Port N_Port_ID or the Responder 6832 * Nx_Port N_Port_ID of the target Exchange is the same as the 6833 * N_Port_ID of the Nx_Port that makes the request. If the RRQ is 6834 * not accepted, an LS_RJT with reason code "Unable to perform 6835 * command request" and reason code explanation "Invalid Originator 6836 * S_ID" shall be returned. For now, we just unconditionally accept 6837 * RRQ from the target. 6838 **/ 6839 static void 6840 lpfc_els_rcv_rrq(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 6841 struct lpfc_nodelist *ndlp) 6842 { 6843 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 6844 if (vport->phba->sli_rev == LPFC_SLI_REV4) 6845 lpfc_els_clear_rrq(vport, cmdiocb, ndlp); 6846 } 6847 6848 /** 6849 * lpfc_els_rsp_rls_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd 6850 * @phba: pointer to lpfc hba data structure. 6851 * @pmb: pointer to the driver internal queue element for mailbox command. 6852 * 6853 * This routine is the completion callback function for the MBX_READ_LNK_STAT 6854 * mailbox command. This callback function is to actually send the Accept 6855 * (ACC) response to a Read Port Status (RPS) unsolicited IOCB event. It 6856 * collects the link statistics from the completion of the MBX_READ_LNK_STAT 6857 * mailbox command, constructs the RPS response with the link statistics 6858 * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC 6859 * response to the RPS. 6860 * 6861 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 6862 * will be incremented by 1 for holding the ndlp and the reference to ndlp 6863 * will be stored into the context1 field of the IOCB for the completion 6864 * callback function to the RPS Accept Response ELS IOCB command. 6865 * 6866 **/ 6867 static void 6868 lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 6869 { 6870 MAILBOX_t *mb; 6871 IOCB_t *icmd; 6872 struct RLS_RSP *rls_rsp; 6873 uint8_t *pcmd; 6874 struct lpfc_iocbq *elsiocb; 6875 struct lpfc_nodelist *ndlp; 6876 uint16_t oxid; 6877 uint16_t rxid; 6878 uint32_t cmdsize; 6879 6880 mb = &pmb->u.mb; 6881 6882 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; 6883 rxid = (uint16_t)((unsigned long)(pmb->ctx_buf) & 0xffff); 6884 oxid = (uint16_t)(((unsigned long)(pmb->ctx_buf) >> 16) & 0xffff); 6885 pmb->ctx_buf = NULL; 6886 pmb->ctx_ndlp = NULL; 6887 6888 if (mb->mbxStatus) { 6889 mempool_free(pmb, phba->mbox_mem_pool); 6890 return; 6891 } 6892 6893 cmdsize = sizeof(struct RLS_RSP) + sizeof(uint32_t); 6894 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 6895 lpfc_max_els_tries, ndlp, 6896 ndlp->nlp_DID, ELS_CMD_ACC); 6897 6898 /* Decrement the ndlp reference count from previous mbox command */ 6899 lpfc_nlp_put(ndlp); 6900 6901 if (!elsiocb) { 6902 mempool_free(pmb, phba->mbox_mem_pool); 6903 return; 6904 } 6905 6906 icmd = &elsiocb->iocb; 6907 icmd->ulpContext = rxid; 6908 icmd->unsli3.rcvsli3.ox_id = oxid; 6909 6910 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 6911 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 6912 pcmd += sizeof(uint32_t); /* Skip past command */ 6913 rls_rsp = (struct RLS_RSP *)pcmd; 6914 6915 rls_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt); 6916 rls_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt); 6917 rls_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt); 6918 rls_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt); 6919 rls_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord); 6920 rls_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt); 6921 mempool_free(pmb, phba->mbox_mem_pool); 6922 /* Xmit ELS RLS ACC response tag <ulpIoTag> */ 6923 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS, 6924 "2874 Xmit ELS RLS ACC response tag x%x xri x%x, " 6925 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", 6926 elsiocb->iotag, elsiocb->iocb.ulpContext, 6927 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 6928 ndlp->nlp_rpi); 6929 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 6930 phba->fc_stat.elsXmitACC++; 6931 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR) 6932 lpfc_els_free_iocb(phba, elsiocb); 6933 } 6934 6935 /** 6936 * lpfc_els_rsp_rps_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd 6937 * @phba: pointer to lpfc hba data structure. 6938 * @pmb: pointer to the driver internal queue element for mailbox command. 6939 * 6940 * This routine is the completion callback function for the MBX_READ_LNK_STAT 6941 * mailbox command. This callback function is to actually send the Accept 6942 * (ACC) response to a Read Port Status (RPS) unsolicited IOCB event. It 6943 * collects the link statistics from the completion of the MBX_READ_LNK_STAT 6944 * mailbox command, constructs the RPS response with the link statistics 6945 * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC 6946 * response to the RPS. 6947 * 6948 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 6949 * will be incremented by 1 for holding the ndlp and the reference to ndlp 6950 * will be stored into the context1 field of the IOCB for the completion 6951 * callback function to the RPS Accept Response ELS IOCB command. 6952 * 6953 **/ 6954 static void 6955 lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 6956 { 6957 MAILBOX_t *mb; 6958 IOCB_t *icmd; 6959 RPS_RSP *rps_rsp; 6960 uint8_t *pcmd; 6961 struct lpfc_iocbq *elsiocb; 6962 struct lpfc_nodelist *ndlp; 6963 uint16_t status; 6964 uint16_t oxid; 6965 uint16_t rxid; 6966 uint32_t cmdsize; 6967 6968 mb = &pmb->u.mb; 6969 6970 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; 6971 rxid = (uint16_t)((unsigned long)(pmb->ctx_buf) & 0xffff); 6972 oxid = (uint16_t)(((unsigned long)(pmb->ctx_buf) >> 16) & 0xffff); 6973 pmb->ctx_ndlp = NULL; 6974 pmb->ctx_buf = NULL; 6975 6976 if (mb->mbxStatus) { 6977 mempool_free(pmb, phba->mbox_mem_pool); 6978 return; 6979 } 6980 6981 cmdsize = sizeof(RPS_RSP) + sizeof(uint32_t); 6982 mempool_free(pmb, phba->mbox_mem_pool); 6983 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 6984 lpfc_max_els_tries, ndlp, 6985 ndlp->nlp_DID, ELS_CMD_ACC); 6986 6987 /* Decrement the ndlp reference count from previous mbox command */ 6988 lpfc_nlp_put(ndlp); 6989 6990 if (!elsiocb) 6991 return; 6992 6993 icmd = &elsiocb->iocb; 6994 icmd->ulpContext = rxid; 6995 icmd->unsli3.rcvsli3.ox_id = oxid; 6996 6997 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 6998 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 6999 pcmd += sizeof(uint32_t); /* Skip past command */ 7000 rps_rsp = (RPS_RSP *)pcmd; 7001 7002 if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) 7003 status = 0x10; 7004 else 7005 status = 0x8; 7006 if (phba->pport->fc_flag & FC_FABRIC) 7007 status |= 0x4; 7008 7009 rps_rsp->rsvd1 = 0; 7010 rps_rsp->portStatus = cpu_to_be16(status); 7011 rps_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt); 7012 rps_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt); 7013 rps_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt); 7014 rps_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt); 7015 rps_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord); 7016 rps_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt); 7017 /* Xmit ELS RPS ACC response tag <ulpIoTag> */ 7018 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS, 7019 "0118 Xmit ELS RPS ACC response tag x%x xri x%x, " 7020 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", 7021 elsiocb->iotag, elsiocb->iocb.ulpContext, 7022 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 7023 ndlp->nlp_rpi); 7024 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 7025 phba->fc_stat.elsXmitACC++; 7026 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR) 7027 lpfc_els_free_iocb(phba, elsiocb); 7028 return; 7029 } 7030 7031 /** 7032 * lpfc_els_rcv_rls - Process an unsolicited rls iocb 7033 * @vport: pointer to a host virtual N_Port data structure. 7034 * @cmdiocb: pointer to lpfc command iocb data structure. 7035 * @ndlp: pointer to a node-list data structure. 7036 * 7037 * This routine processes Read Port Status (RPL) IOCB received as an 7038 * ELS unsolicited event. It first checks the remote port state. If the 7039 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE 7040 * state, it invokes the lpfc_els_rsl_reject() routine to send the reject 7041 * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command 7042 * for reading the HBA link statistics. It is for the callback function, 7043 * lpfc_els_rsp_rls_acc(), set to the MBX_READ_LNK_STAT mailbox command 7044 * to actually sending out RPL Accept (ACC) response. 7045 * 7046 * Return codes 7047 * 0 - Successfully processed rls iocb (currently always return 0) 7048 **/ 7049 static int 7050 lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7051 struct lpfc_nodelist *ndlp) 7052 { 7053 struct lpfc_hba *phba = vport->phba; 7054 LPFC_MBOXQ_t *mbox; 7055 struct ls_rjt stat; 7056 7057 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 7058 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) 7059 /* reject the unsolicited RPS request and done with it */ 7060 goto reject_out; 7061 7062 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC); 7063 if (mbox) { 7064 lpfc_read_lnk_stat(phba, mbox); 7065 mbox->ctx_buf = (void *)((unsigned long) 7066 ((cmdiocb->iocb.unsli3.rcvsli3.ox_id << 16) | 7067 cmdiocb->iocb.ulpContext)); /* rx_id */ 7068 mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 7069 mbox->vport = vport; 7070 mbox->mbox_cmpl = lpfc_els_rsp_rls_acc; 7071 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 7072 != MBX_NOT_FINISHED) 7073 /* Mbox completion will send ELS Response */ 7074 return 0; 7075 /* Decrement reference count used for the failed mbox 7076 * command. 7077 */ 7078 lpfc_nlp_put(ndlp); 7079 mempool_free(mbox, phba->mbox_mem_pool); 7080 } 7081 reject_out: 7082 /* issue rejection response */ 7083 stat.un.b.lsRjtRsvd0 = 0; 7084 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 7085 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 7086 stat.un.b.vendorUnique = 0; 7087 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 7088 return 0; 7089 } 7090 7091 /** 7092 * lpfc_els_rcv_rtv - Process an unsolicited rtv iocb 7093 * @vport: pointer to a host virtual N_Port data structure. 7094 * @cmdiocb: pointer to lpfc command iocb data structure. 7095 * @ndlp: pointer to a node-list data structure. 7096 * 7097 * This routine processes Read Timout Value (RTV) IOCB received as an 7098 * ELS unsolicited event. It first checks the remote port state. If the 7099 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE 7100 * state, it invokes the lpfc_els_rsl_reject() routine to send the reject 7101 * response. Otherwise, it sends the Accept(ACC) response to a Read Timeout 7102 * Value (RTV) unsolicited IOCB event. 7103 * 7104 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 7105 * will be incremented by 1 for holding the ndlp and the reference to ndlp 7106 * will be stored into the context1 field of the IOCB for the completion 7107 * callback function to the RPS Accept Response ELS IOCB command. 7108 * 7109 * Return codes 7110 * 0 - Successfully processed rtv iocb (currently always return 0) 7111 **/ 7112 static int 7113 lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7114 struct lpfc_nodelist *ndlp) 7115 { 7116 struct lpfc_hba *phba = vport->phba; 7117 struct ls_rjt stat; 7118 struct RTV_RSP *rtv_rsp; 7119 uint8_t *pcmd; 7120 struct lpfc_iocbq *elsiocb; 7121 uint32_t cmdsize; 7122 7123 7124 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 7125 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) 7126 /* reject the unsolicited RPS request and done with it */ 7127 goto reject_out; 7128 7129 cmdsize = sizeof(struct RTV_RSP) + sizeof(uint32_t); 7130 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 7131 lpfc_max_els_tries, ndlp, 7132 ndlp->nlp_DID, ELS_CMD_ACC); 7133 7134 if (!elsiocb) 7135 return 1; 7136 7137 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 7138 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 7139 pcmd += sizeof(uint32_t); /* Skip past command */ 7140 7141 /* use the command's xri in the response */ 7142 elsiocb->iocb.ulpContext = cmdiocb->iocb.ulpContext; /* Xri / rx_id */ 7143 elsiocb->iocb.unsli3.rcvsli3.ox_id = cmdiocb->iocb.unsli3.rcvsli3.ox_id; 7144 7145 rtv_rsp = (struct RTV_RSP *)pcmd; 7146 7147 /* populate RTV payload */ 7148 rtv_rsp->ratov = cpu_to_be32(phba->fc_ratov * 1000); /* report msecs */ 7149 rtv_rsp->edtov = cpu_to_be32(phba->fc_edtov); 7150 bf_set(qtov_edtovres, rtv_rsp, phba->fc_edtovResol ? 1 : 0); 7151 bf_set(qtov_rttov, rtv_rsp, 0); /* Field is for FC ONLY */ 7152 rtv_rsp->qtov = cpu_to_be32(rtv_rsp->qtov); 7153 7154 /* Xmit ELS RLS ACC response tag <ulpIoTag> */ 7155 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS, 7156 "2875 Xmit ELS RTV ACC response tag x%x xri x%x, " 7157 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x, " 7158 "Data: x%x x%x x%x\n", 7159 elsiocb->iotag, elsiocb->iocb.ulpContext, 7160 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 7161 ndlp->nlp_rpi, 7162 rtv_rsp->ratov, rtv_rsp->edtov, rtv_rsp->qtov); 7163 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 7164 phba->fc_stat.elsXmitACC++; 7165 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR) 7166 lpfc_els_free_iocb(phba, elsiocb); 7167 return 0; 7168 7169 reject_out: 7170 /* issue rejection response */ 7171 stat.un.b.lsRjtRsvd0 = 0; 7172 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 7173 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 7174 stat.un.b.vendorUnique = 0; 7175 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 7176 return 0; 7177 } 7178 7179 /* lpfc_els_rcv_rps - Process an unsolicited rps iocb 7180 * @vport: pointer to a host virtual N_Port data structure. 7181 * @cmdiocb: pointer to lpfc command iocb data structure. 7182 * @ndlp: pointer to a node-list data structure. 7183 * 7184 * This routine processes Read Port Status (RPS) IOCB received as an 7185 * ELS unsolicited event. It first checks the remote port state. If the 7186 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE 7187 * state, it invokes the lpfc_els_rsp_reject() routine to send the reject 7188 * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command 7189 * for reading the HBA link statistics. It is for the callback function, 7190 * lpfc_els_rsp_rps_acc(), set to the MBX_READ_LNK_STAT mailbox command 7191 * to actually sending out RPS Accept (ACC) response. 7192 * 7193 * Return codes 7194 * 0 - Successfully processed rps iocb (currently always return 0) 7195 **/ 7196 static int 7197 lpfc_els_rcv_rps(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7198 struct lpfc_nodelist *ndlp) 7199 { 7200 struct lpfc_hba *phba = vport->phba; 7201 uint32_t *lp; 7202 uint8_t flag; 7203 LPFC_MBOXQ_t *mbox; 7204 struct lpfc_dmabuf *pcmd; 7205 RPS *rps; 7206 struct ls_rjt stat; 7207 7208 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 7209 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) 7210 /* reject the unsolicited RPS request and done with it */ 7211 goto reject_out; 7212 7213 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 7214 lp = (uint32_t *) pcmd->virt; 7215 flag = (be32_to_cpu(*lp++) & 0xf); 7216 rps = (RPS *) lp; 7217 7218 if ((flag == 0) || 7219 ((flag == 1) && (be32_to_cpu(rps->un.portNum) == 0)) || 7220 ((flag == 2) && (memcmp(&rps->un.portName, &vport->fc_portname, 7221 sizeof(struct lpfc_name)) == 0))) { 7222 7223 printk("Fix me....\n"); 7224 dump_stack(); 7225 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC); 7226 if (mbox) { 7227 lpfc_read_lnk_stat(phba, mbox); 7228 mbox->ctx_buf = (void *)((unsigned long) 7229 ((cmdiocb->iocb.unsli3.rcvsli3.ox_id << 16) | 7230 cmdiocb->iocb.ulpContext)); /* rx_id */ 7231 mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 7232 mbox->vport = vport; 7233 mbox->mbox_cmpl = lpfc_els_rsp_rps_acc; 7234 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 7235 != MBX_NOT_FINISHED) 7236 /* Mbox completion will send ELS Response */ 7237 return 0; 7238 /* Decrement reference count used for the failed mbox 7239 * command. 7240 */ 7241 lpfc_nlp_put(ndlp); 7242 mempool_free(mbox, phba->mbox_mem_pool); 7243 } 7244 } 7245 7246 reject_out: 7247 /* issue rejection response */ 7248 stat.un.b.lsRjtRsvd0 = 0; 7249 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 7250 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 7251 stat.un.b.vendorUnique = 0; 7252 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 7253 return 0; 7254 } 7255 7256 /* lpfc_issue_els_rrq - Process an unsolicited rps iocb 7257 * @vport: pointer to a host virtual N_Port data structure. 7258 * @ndlp: pointer to a node-list data structure. 7259 * @did: DID of the target. 7260 * @rrq: Pointer to the rrq struct. 7261 * 7262 * Build a ELS RRQ command and send it to the target. If the issue_iocb is 7263 * Successful the the completion handler will clear the RRQ. 7264 * 7265 * Return codes 7266 * 0 - Successfully sent rrq els iocb. 7267 * 1 - Failed to send rrq els iocb. 7268 **/ 7269 static int 7270 lpfc_issue_els_rrq(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 7271 uint32_t did, struct lpfc_node_rrq *rrq) 7272 { 7273 struct lpfc_hba *phba = vport->phba; 7274 struct RRQ *els_rrq; 7275 struct lpfc_iocbq *elsiocb; 7276 uint8_t *pcmd; 7277 uint16_t cmdsize; 7278 int ret; 7279 7280 7281 if (ndlp != rrq->ndlp) 7282 ndlp = rrq->ndlp; 7283 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) 7284 return 1; 7285 7286 /* If ndlp is not NULL, we will bump the reference count on it */ 7287 cmdsize = (sizeof(uint32_t) + sizeof(struct RRQ)); 7288 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, did, 7289 ELS_CMD_RRQ); 7290 if (!elsiocb) 7291 return 1; 7292 7293 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 7294 7295 /* For RRQ request, remainder of payload is Exchange IDs */ 7296 *((uint32_t *) (pcmd)) = ELS_CMD_RRQ; 7297 pcmd += sizeof(uint32_t); 7298 els_rrq = (struct RRQ *) pcmd; 7299 7300 bf_set(rrq_oxid, els_rrq, phba->sli4_hba.xri_ids[rrq->xritag]); 7301 bf_set(rrq_rxid, els_rrq, rrq->rxid); 7302 bf_set(rrq_did, els_rrq, vport->fc_myDID); 7303 els_rrq->rrq = cpu_to_be32(els_rrq->rrq); 7304 els_rrq->rrq_exchg = cpu_to_be32(els_rrq->rrq_exchg); 7305 7306 7307 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 7308 "Issue RRQ: did:x%x", 7309 did, rrq->xritag, rrq->rxid); 7310 elsiocb->context_un.rrq = rrq; 7311 elsiocb->iocb_cmpl = lpfc_cmpl_els_rrq; 7312 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 7313 7314 if (ret == IOCB_ERROR) { 7315 lpfc_els_free_iocb(phba, elsiocb); 7316 return 1; 7317 } 7318 return 0; 7319 } 7320 7321 /** 7322 * lpfc_send_rrq - Sends ELS RRQ if needed. 7323 * @phba: pointer to lpfc hba data structure. 7324 * @rrq: pointer to the active rrq. 7325 * 7326 * This routine will call the lpfc_issue_els_rrq if the rrq is 7327 * still active for the xri. If this function returns a failure then 7328 * the caller needs to clean up the RRQ by calling lpfc_clr_active_rrq. 7329 * 7330 * Returns 0 Success. 7331 * 1 Failure. 7332 **/ 7333 int 7334 lpfc_send_rrq(struct lpfc_hba *phba, struct lpfc_node_rrq *rrq) 7335 { 7336 struct lpfc_nodelist *ndlp = lpfc_findnode_did(rrq->vport, 7337 rrq->nlp_DID); 7338 if (!ndlp) 7339 return 1; 7340 7341 if (lpfc_test_rrq_active(phba, ndlp, rrq->xritag)) 7342 return lpfc_issue_els_rrq(rrq->vport, ndlp, 7343 rrq->nlp_DID, rrq); 7344 else 7345 return 1; 7346 } 7347 7348 /** 7349 * lpfc_els_rsp_rpl_acc - Issue an accept rpl els command 7350 * @vport: pointer to a host virtual N_Port data structure. 7351 * @cmdsize: size of the ELS command. 7352 * @oldiocb: pointer to the original lpfc command iocb data structure. 7353 * @ndlp: pointer to a node-list data structure. 7354 * 7355 * This routine issuees an Accept (ACC) Read Port List (RPL) ELS command. 7356 * It is to be called by the lpfc_els_rcv_rpl() routine to accept the RPL. 7357 * 7358 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 7359 * will be incremented by 1 for holding the ndlp and the reference to ndlp 7360 * will be stored into the context1 field of the IOCB for the completion 7361 * callback function to the RPL Accept Response ELS command. 7362 * 7363 * Return code 7364 * 0 - Successfully issued ACC RPL ELS command 7365 * 1 - Failed to issue ACC RPL ELS command 7366 **/ 7367 static int 7368 lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize, 7369 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) 7370 { 7371 struct lpfc_hba *phba = vport->phba; 7372 IOCB_t *icmd, *oldcmd; 7373 RPL_RSP rpl_rsp; 7374 struct lpfc_iocbq *elsiocb; 7375 uint8_t *pcmd; 7376 7377 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 7378 ndlp->nlp_DID, ELS_CMD_ACC); 7379 7380 if (!elsiocb) 7381 return 1; 7382 7383 icmd = &elsiocb->iocb; 7384 oldcmd = &oldiocb->iocb; 7385 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 7386 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 7387 7388 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 7389 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 7390 pcmd += sizeof(uint16_t); 7391 *((uint16_t *)(pcmd)) = be16_to_cpu(cmdsize); 7392 pcmd += sizeof(uint16_t); 7393 7394 /* Setup the RPL ACC payload */ 7395 rpl_rsp.listLen = be32_to_cpu(1); 7396 rpl_rsp.index = 0; 7397 rpl_rsp.port_num_blk.portNum = 0; 7398 rpl_rsp.port_num_blk.portID = be32_to_cpu(vport->fc_myDID); 7399 memcpy(&rpl_rsp.port_num_blk.portName, &vport->fc_portname, 7400 sizeof(struct lpfc_name)); 7401 memcpy(pcmd, &rpl_rsp, cmdsize - sizeof(uint32_t)); 7402 /* Xmit ELS RPL ACC response tag <ulpIoTag> */ 7403 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7404 "0120 Xmit ELS RPL ACC response tag x%x " 7405 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, " 7406 "rpi x%x\n", 7407 elsiocb->iotag, elsiocb->iocb.ulpContext, 7408 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 7409 ndlp->nlp_rpi); 7410 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 7411 phba->fc_stat.elsXmitACC++; 7412 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == 7413 IOCB_ERROR) { 7414 lpfc_els_free_iocb(phba, elsiocb); 7415 return 1; 7416 } 7417 return 0; 7418 } 7419 7420 /** 7421 * lpfc_els_rcv_rpl - Process an unsolicited rpl iocb 7422 * @vport: pointer to a host virtual N_Port data structure. 7423 * @cmdiocb: pointer to lpfc command iocb data structure. 7424 * @ndlp: pointer to a node-list data structure. 7425 * 7426 * This routine processes Read Port List (RPL) IOCB received as an ELS 7427 * unsolicited event. It first checks the remote port state. If the remote 7428 * port is not in NLP_STE_UNMAPPED_NODE and NLP_STE_MAPPED_NODE states, it 7429 * invokes the lpfc_els_rsp_reject() routine to send reject response. 7430 * Otherwise, this routine then invokes the lpfc_els_rsp_rpl_acc() routine 7431 * to accept the RPL. 7432 * 7433 * Return code 7434 * 0 - Successfully processed rpl iocb (currently always return 0) 7435 **/ 7436 static int 7437 lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7438 struct lpfc_nodelist *ndlp) 7439 { 7440 struct lpfc_dmabuf *pcmd; 7441 uint32_t *lp; 7442 uint32_t maxsize; 7443 uint16_t cmdsize; 7444 RPL *rpl; 7445 struct ls_rjt stat; 7446 7447 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 7448 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) { 7449 /* issue rejection response */ 7450 stat.un.b.lsRjtRsvd0 = 0; 7451 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 7452 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 7453 stat.un.b.vendorUnique = 0; 7454 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, 7455 NULL); 7456 /* rejected the unsolicited RPL request and done with it */ 7457 return 0; 7458 } 7459 7460 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 7461 lp = (uint32_t *) pcmd->virt; 7462 rpl = (RPL *) (lp + 1); 7463 maxsize = be32_to_cpu(rpl->maxsize); 7464 7465 /* We support only one port */ 7466 if ((rpl->index == 0) && 7467 ((maxsize == 0) || 7468 ((maxsize * sizeof(uint32_t)) >= sizeof(RPL_RSP)))) { 7469 cmdsize = sizeof(uint32_t) + sizeof(RPL_RSP); 7470 } else { 7471 cmdsize = sizeof(uint32_t) + maxsize * sizeof(uint32_t); 7472 } 7473 lpfc_els_rsp_rpl_acc(vport, cmdsize, cmdiocb, ndlp); 7474 7475 return 0; 7476 } 7477 7478 /** 7479 * lpfc_els_rcv_farp - Process an unsolicited farp request els command 7480 * @vport: pointer to a virtual N_Port data structure. 7481 * @cmdiocb: pointer to lpfc command iocb data structure. 7482 * @ndlp: pointer to a node-list data structure. 7483 * 7484 * This routine processes Fibre Channel Address Resolution Protocol 7485 * (FARP) Request IOCB received as an ELS unsolicited event. Currently, 7486 * the lpfc driver only supports matching on WWPN or WWNN for FARP. As such, 7487 * FARP_MATCH_PORT flag and FARP_MATCH_NODE flag are checked against the 7488 * Match Flag in the FARP request IOCB: if FARP_MATCH_PORT flag is set, the 7489 * remote PortName is compared against the FC PortName stored in the @vport 7490 * data structure; if FARP_MATCH_NODE flag is set, the remote NodeName is 7491 * compared against the FC NodeName stored in the @vport data structure. 7492 * If any of these matches and the FARP_REQUEST_FARPR flag is set in the 7493 * FARP request IOCB Response Flag, the lpfc_issue_els_farpr() routine is 7494 * invoked to send out FARP Response to the remote node. Before sending the 7495 * FARP Response, however, the FARP_REQUEST_PLOGI flag is check in the FARP 7496 * request IOCB Response Flag and, if it is set, the lpfc_issue_els_plogi() 7497 * routine is invoked to log into the remote port first. 7498 * 7499 * Return code 7500 * 0 - Either the FARP Match Mode not supported or successfully processed 7501 **/ 7502 static int 7503 lpfc_els_rcv_farp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7504 struct lpfc_nodelist *ndlp) 7505 { 7506 struct lpfc_dmabuf *pcmd; 7507 uint32_t *lp; 7508 IOCB_t *icmd; 7509 FARP *fp; 7510 uint32_t cnt, did; 7511 7512 icmd = &cmdiocb->iocb; 7513 did = icmd->un.elsreq64.remoteID; 7514 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 7515 lp = (uint32_t *) pcmd->virt; 7516 7517 lp++; 7518 fp = (FARP *) lp; 7519 /* FARP-REQ received from DID <did> */ 7520 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7521 "0601 FARP-REQ received from DID x%x\n", did); 7522 /* We will only support match on WWPN or WWNN */ 7523 if (fp->Mflags & ~(FARP_MATCH_NODE | FARP_MATCH_PORT)) { 7524 return 0; 7525 } 7526 7527 cnt = 0; 7528 /* If this FARP command is searching for my portname */ 7529 if (fp->Mflags & FARP_MATCH_PORT) { 7530 if (memcmp(&fp->RportName, &vport->fc_portname, 7531 sizeof(struct lpfc_name)) == 0) 7532 cnt = 1; 7533 } 7534 7535 /* If this FARP command is searching for my nodename */ 7536 if (fp->Mflags & FARP_MATCH_NODE) { 7537 if (memcmp(&fp->RnodeName, &vport->fc_nodename, 7538 sizeof(struct lpfc_name)) == 0) 7539 cnt = 1; 7540 } 7541 7542 if (cnt) { 7543 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) || 7544 (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) { 7545 /* Log back into the node before sending the FARP. */ 7546 if (fp->Rflags & FARP_REQUEST_PLOGI) { 7547 ndlp->nlp_prev_state = ndlp->nlp_state; 7548 lpfc_nlp_set_state(vport, ndlp, 7549 NLP_STE_PLOGI_ISSUE); 7550 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 7551 } 7552 7553 /* Send a FARP response to that node */ 7554 if (fp->Rflags & FARP_REQUEST_FARPR) 7555 lpfc_issue_els_farpr(vport, did, 0); 7556 } 7557 } 7558 return 0; 7559 } 7560 7561 /** 7562 * lpfc_els_rcv_farpr - Process an unsolicited farp response iocb 7563 * @vport: pointer to a host virtual N_Port data structure. 7564 * @cmdiocb: pointer to lpfc command iocb data structure. 7565 * @ndlp: pointer to a node-list data structure. 7566 * 7567 * This routine processes Fibre Channel Address Resolution Protocol 7568 * Response (FARPR) IOCB received as an ELS unsolicited event. It simply 7569 * invokes the lpfc_els_rsp_acc() routine to the remote node to accept 7570 * the FARP response request. 7571 * 7572 * Return code 7573 * 0 - Successfully processed FARPR IOCB (currently always return 0) 7574 **/ 7575 static int 7576 lpfc_els_rcv_farpr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7577 struct lpfc_nodelist *ndlp) 7578 { 7579 struct lpfc_dmabuf *pcmd; 7580 uint32_t *lp; 7581 IOCB_t *icmd; 7582 uint32_t did; 7583 7584 icmd = &cmdiocb->iocb; 7585 did = icmd->un.elsreq64.remoteID; 7586 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 7587 lp = (uint32_t *) pcmd->virt; 7588 7589 lp++; 7590 /* FARP-RSP received from DID <did> */ 7591 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7592 "0600 FARP-RSP received from DID x%x\n", did); 7593 /* ACCEPT the Farp resp request */ 7594 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 7595 7596 return 0; 7597 } 7598 7599 /** 7600 * lpfc_els_rcv_fan - Process an unsolicited fan iocb command 7601 * @vport: pointer to a host virtual N_Port data structure. 7602 * @cmdiocb: pointer to lpfc command iocb data structure. 7603 * @fan_ndlp: pointer to a node-list data structure. 7604 * 7605 * This routine processes a Fabric Address Notification (FAN) IOCB 7606 * command received as an ELS unsolicited event. The FAN ELS command will 7607 * only be processed on a physical port (i.e., the @vport represents the 7608 * physical port). The fabric NodeName and PortName from the FAN IOCB are 7609 * compared against those in the phba data structure. If any of those is 7610 * different, the lpfc_initial_flogi() routine is invoked to initialize 7611 * Fabric Login (FLOGI) to the fabric to start the discover over. Otherwise, 7612 * if both of those are identical, the lpfc_issue_fabric_reglogin() routine 7613 * is invoked to register login to the fabric. 7614 * 7615 * Return code 7616 * 0 - Successfully processed fan iocb (currently always return 0). 7617 **/ 7618 static int 7619 lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7620 struct lpfc_nodelist *fan_ndlp) 7621 { 7622 struct lpfc_hba *phba = vport->phba; 7623 uint32_t *lp; 7624 FAN *fp; 7625 7626 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0265 FAN received\n"); 7627 lp = (uint32_t *)((struct lpfc_dmabuf *)cmdiocb->context2)->virt; 7628 fp = (FAN *) ++lp; 7629 /* FAN received; Fan does not have a reply sequence */ 7630 if ((vport == phba->pport) && 7631 (vport->port_state == LPFC_LOCAL_CFG_LINK)) { 7632 if ((memcmp(&phba->fc_fabparam.nodeName, &fp->FnodeName, 7633 sizeof(struct lpfc_name))) || 7634 (memcmp(&phba->fc_fabparam.portName, &fp->FportName, 7635 sizeof(struct lpfc_name)))) { 7636 /* This port has switched fabrics. FLOGI is required */ 7637 lpfc_issue_init_vfi(vport); 7638 } else { 7639 /* FAN verified - skip FLOGI */ 7640 vport->fc_myDID = vport->fc_prevDID; 7641 if (phba->sli_rev < LPFC_SLI_REV4) 7642 lpfc_issue_fabric_reglogin(vport); 7643 else { 7644 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7645 "3138 Need register VFI: (x%x/%x)\n", 7646 vport->fc_prevDID, vport->fc_myDID); 7647 lpfc_issue_reg_vfi(vport); 7648 } 7649 } 7650 } 7651 return 0; 7652 } 7653 7654 /** 7655 * lpfc_els_timeout - Handler funciton to the els timer 7656 * @ptr: holder for the timer function associated data. 7657 * 7658 * This routine is invoked by the ELS timer after timeout. It posts the ELS 7659 * timer timeout event by setting the WORKER_ELS_TMO bit to the work port 7660 * event bitmap and then invokes the lpfc_worker_wake_up() routine to wake 7661 * up the worker thread. It is for the worker thread to invoke the routine 7662 * lpfc_els_timeout_handler() to work on the posted event WORKER_ELS_TMO. 7663 **/ 7664 void 7665 lpfc_els_timeout(struct timer_list *t) 7666 { 7667 struct lpfc_vport *vport = from_timer(vport, t, els_tmofunc); 7668 struct lpfc_hba *phba = vport->phba; 7669 uint32_t tmo_posted; 7670 unsigned long iflag; 7671 7672 spin_lock_irqsave(&vport->work_port_lock, iflag); 7673 tmo_posted = vport->work_port_events & WORKER_ELS_TMO; 7674 if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING))) 7675 vport->work_port_events |= WORKER_ELS_TMO; 7676 spin_unlock_irqrestore(&vport->work_port_lock, iflag); 7677 7678 if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING))) 7679 lpfc_worker_wake_up(phba); 7680 return; 7681 } 7682 7683 7684 /** 7685 * lpfc_els_timeout_handler - Process an els timeout event 7686 * @vport: pointer to a virtual N_Port data structure. 7687 * 7688 * This routine is the actual handler function that processes an ELS timeout 7689 * event. It walks the ELS ring to get and abort all the IOCBs (except the 7690 * ABORT/CLOSE/FARP/FARPR/FDISC), which are associated with the @vport by 7691 * invoking the lpfc_sli_issue_abort_iotag() routine. 7692 **/ 7693 void 7694 lpfc_els_timeout_handler(struct lpfc_vport *vport) 7695 { 7696 struct lpfc_hba *phba = vport->phba; 7697 struct lpfc_sli_ring *pring; 7698 struct lpfc_iocbq *tmp_iocb, *piocb; 7699 IOCB_t *cmd = NULL; 7700 struct lpfc_dmabuf *pcmd; 7701 uint32_t els_command = 0; 7702 uint32_t timeout; 7703 uint32_t remote_ID = 0xffffffff; 7704 LIST_HEAD(abort_list); 7705 7706 7707 timeout = (uint32_t)(phba->fc_ratov << 1); 7708 7709 pring = lpfc_phba_elsring(phba); 7710 if (unlikely(!pring)) 7711 return; 7712 7713 if ((phba->pport->load_flag & FC_UNLOADING)) 7714 return; 7715 spin_lock_irq(&phba->hbalock); 7716 if (phba->sli_rev == LPFC_SLI_REV4) 7717 spin_lock(&pring->ring_lock); 7718 7719 if ((phba->pport->load_flag & FC_UNLOADING)) { 7720 if (phba->sli_rev == LPFC_SLI_REV4) 7721 spin_unlock(&pring->ring_lock); 7722 spin_unlock_irq(&phba->hbalock); 7723 return; 7724 } 7725 7726 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { 7727 cmd = &piocb->iocb; 7728 7729 if ((piocb->iocb_flag & LPFC_IO_LIBDFC) != 0 || 7730 piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN || 7731 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN) 7732 continue; 7733 7734 if (piocb->vport != vport) 7735 continue; 7736 7737 pcmd = (struct lpfc_dmabuf *) piocb->context2; 7738 if (pcmd) 7739 els_command = *(uint32_t *) (pcmd->virt); 7740 7741 if (els_command == ELS_CMD_FARP || 7742 els_command == ELS_CMD_FARPR || 7743 els_command == ELS_CMD_FDISC) 7744 continue; 7745 7746 if (piocb->drvrTimeout > 0) { 7747 if (piocb->drvrTimeout >= timeout) 7748 piocb->drvrTimeout -= timeout; 7749 else 7750 piocb->drvrTimeout = 0; 7751 continue; 7752 } 7753 7754 remote_ID = 0xffffffff; 7755 if (cmd->ulpCommand != CMD_GEN_REQUEST64_CR) 7756 remote_ID = cmd->un.elsreq64.remoteID; 7757 else { 7758 struct lpfc_nodelist *ndlp; 7759 ndlp = __lpfc_findnode_rpi(vport, cmd->ulpContext); 7760 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) 7761 remote_ID = ndlp->nlp_DID; 7762 } 7763 list_add_tail(&piocb->dlist, &abort_list); 7764 } 7765 if (phba->sli_rev == LPFC_SLI_REV4) 7766 spin_unlock(&pring->ring_lock); 7767 spin_unlock_irq(&phba->hbalock); 7768 7769 list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) { 7770 cmd = &piocb->iocb; 7771 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 7772 "0127 ELS timeout Data: x%x x%x x%x " 7773 "x%x\n", els_command, 7774 remote_ID, cmd->ulpCommand, cmd->ulpIoTag); 7775 spin_lock_irq(&phba->hbalock); 7776 list_del_init(&piocb->dlist); 7777 lpfc_sli_issue_abort_iotag(phba, pring, piocb); 7778 spin_unlock_irq(&phba->hbalock); 7779 } 7780 7781 if (!list_empty(&pring->txcmplq)) 7782 if (!(phba->pport->load_flag & FC_UNLOADING)) 7783 mod_timer(&vport->els_tmofunc, 7784 jiffies + msecs_to_jiffies(1000 * timeout)); 7785 } 7786 7787 /** 7788 * lpfc_els_flush_cmd - Clean up the outstanding els commands to a vport 7789 * @vport: pointer to a host virtual N_Port data structure. 7790 * 7791 * This routine is used to clean up all the outstanding ELS commands on a 7792 * @vport. It first aborts the @vport by invoking lpfc_fabric_abort_vport() 7793 * routine. After that, it walks the ELS transmit queue to remove all the 7794 * IOCBs with the @vport other than the QUE_RING and ABORT/CLOSE IOCBs. For 7795 * the IOCBs with a non-NULL completion callback function, the callback 7796 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and 7797 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs with a NULL completion 7798 * callback function, the IOCB will simply be released. Finally, it walks 7799 * the ELS transmit completion queue to issue an abort IOCB to any transmit 7800 * completion queue IOCB that is associated with the @vport and is not 7801 * an IOCB from libdfc (i.e., the management plane IOCBs that are not 7802 * part of the discovery state machine) out to HBA by invoking the 7803 * lpfc_sli_issue_abort_iotag() routine. Note that this function issues the 7804 * abort IOCB to any transmit completion queueed IOCB, it does not guarantee 7805 * the IOCBs are aborted when this function returns. 7806 **/ 7807 void 7808 lpfc_els_flush_cmd(struct lpfc_vport *vport) 7809 { 7810 LIST_HEAD(abort_list); 7811 struct lpfc_hba *phba = vport->phba; 7812 struct lpfc_sli_ring *pring; 7813 struct lpfc_iocbq *tmp_iocb, *piocb; 7814 IOCB_t *cmd = NULL; 7815 7816 lpfc_fabric_abort_vport(vport); 7817 /* 7818 * For SLI3, only the hbalock is required. But SLI4 needs to coordinate 7819 * with the ring insert operation. Because lpfc_sli_issue_abort_iotag 7820 * ultimately grabs the ring_lock, the driver must splice the list into 7821 * a working list and release the locks before calling the abort. 7822 */ 7823 spin_lock_irq(&phba->hbalock); 7824 pring = lpfc_phba_elsring(phba); 7825 7826 /* Bail out if we've no ELS wq, like in PCI error recovery case. */ 7827 if (unlikely(!pring)) { 7828 spin_unlock_irq(&phba->hbalock); 7829 return; 7830 } 7831 7832 if (phba->sli_rev == LPFC_SLI_REV4) 7833 spin_lock(&pring->ring_lock); 7834 7835 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { 7836 if (piocb->iocb_flag & LPFC_IO_LIBDFC) 7837 continue; 7838 7839 if (piocb->vport != vport) 7840 continue; 7841 list_add_tail(&piocb->dlist, &abort_list); 7842 } 7843 if (phba->sli_rev == LPFC_SLI_REV4) 7844 spin_unlock(&pring->ring_lock); 7845 spin_unlock_irq(&phba->hbalock); 7846 /* Abort each iocb on the aborted list and remove the dlist links. */ 7847 list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) { 7848 spin_lock_irq(&phba->hbalock); 7849 list_del_init(&piocb->dlist); 7850 lpfc_sli_issue_abort_iotag(phba, pring, piocb); 7851 spin_unlock_irq(&phba->hbalock); 7852 } 7853 if (!list_empty(&abort_list)) 7854 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 7855 "3387 abort list for txq not empty\n"); 7856 INIT_LIST_HEAD(&abort_list); 7857 7858 spin_lock_irq(&phba->hbalock); 7859 if (phba->sli_rev == LPFC_SLI_REV4) 7860 spin_lock(&pring->ring_lock); 7861 7862 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) { 7863 cmd = &piocb->iocb; 7864 7865 if (piocb->iocb_flag & LPFC_IO_LIBDFC) { 7866 continue; 7867 } 7868 7869 /* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */ 7870 if (cmd->ulpCommand == CMD_QUE_RING_BUF_CN || 7871 cmd->ulpCommand == CMD_QUE_RING_BUF64_CN || 7872 cmd->ulpCommand == CMD_CLOSE_XRI_CN || 7873 cmd->ulpCommand == CMD_ABORT_XRI_CN) 7874 continue; 7875 7876 if (piocb->vport != vport) 7877 continue; 7878 7879 list_del_init(&piocb->list); 7880 list_add_tail(&piocb->list, &abort_list); 7881 } 7882 if (phba->sli_rev == LPFC_SLI_REV4) 7883 spin_unlock(&pring->ring_lock); 7884 spin_unlock_irq(&phba->hbalock); 7885 7886 /* Cancell all the IOCBs from the completions list */ 7887 lpfc_sli_cancel_iocbs(phba, &abort_list, 7888 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); 7889 7890 return; 7891 } 7892 7893 /** 7894 * lpfc_els_flush_all_cmd - Clean up all the outstanding els commands to a HBA 7895 * @phba: pointer to lpfc hba data structure. 7896 * 7897 * This routine is used to clean up all the outstanding ELS commands on a 7898 * @phba. It first aborts the @phba by invoking the lpfc_fabric_abort_hba() 7899 * routine. After that, it walks the ELS transmit queue to remove all the 7900 * IOCBs to the @phba other than the QUE_RING and ABORT/CLOSE IOCBs. For 7901 * the IOCBs with the completion callback function associated, the callback 7902 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and 7903 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs without the completion 7904 * callback function associated, the IOCB will simply be released. Finally, 7905 * it walks the ELS transmit completion queue to issue an abort IOCB to any 7906 * transmit completion queue IOCB that is not an IOCB from libdfc (i.e., the 7907 * management plane IOCBs that are not part of the discovery state machine) 7908 * out to HBA by invoking the lpfc_sli_issue_abort_iotag() routine. 7909 **/ 7910 void 7911 lpfc_els_flush_all_cmd(struct lpfc_hba *phba) 7912 { 7913 struct lpfc_vport *vport; 7914 7915 spin_lock_irq(&phba->port_list_lock); 7916 list_for_each_entry(vport, &phba->port_list, listentry) 7917 lpfc_els_flush_cmd(vport); 7918 spin_unlock_irq(&phba->port_list_lock); 7919 7920 return; 7921 } 7922 7923 /** 7924 * lpfc_send_els_failure_event - Posts an ELS command failure event 7925 * @phba: Pointer to hba context object. 7926 * @cmdiocbp: Pointer to command iocb which reported error. 7927 * @rspiocbp: Pointer to response iocb which reported error. 7928 * 7929 * This function sends an event when there is an ELS command 7930 * failure. 7931 **/ 7932 void 7933 lpfc_send_els_failure_event(struct lpfc_hba *phba, 7934 struct lpfc_iocbq *cmdiocbp, 7935 struct lpfc_iocbq *rspiocbp) 7936 { 7937 struct lpfc_vport *vport = cmdiocbp->vport; 7938 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 7939 struct lpfc_lsrjt_event lsrjt_event; 7940 struct lpfc_fabric_event_header fabric_event; 7941 struct ls_rjt stat; 7942 struct lpfc_nodelist *ndlp; 7943 uint32_t *pcmd; 7944 7945 ndlp = cmdiocbp->context1; 7946 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) 7947 return; 7948 7949 if (rspiocbp->iocb.ulpStatus == IOSTAT_LS_RJT) { 7950 lsrjt_event.header.event_type = FC_REG_ELS_EVENT; 7951 lsrjt_event.header.subcategory = LPFC_EVENT_LSRJT_RCV; 7952 memcpy(lsrjt_event.header.wwpn, &ndlp->nlp_portname, 7953 sizeof(struct lpfc_name)); 7954 memcpy(lsrjt_event.header.wwnn, &ndlp->nlp_nodename, 7955 sizeof(struct lpfc_name)); 7956 pcmd = (uint32_t *) (((struct lpfc_dmabuf *) 7957 cmdiocbp->context2)->virt); 7958 lsrjt_event.command = (pcmd != NULL) ? *pcmd : 0; 7959 stat.un.lsRjtError = be32_to_cpu(rspiocbp->iocb.un.ulpWord[4]); 7960 lsrjt_event.reason_code = stat.un.b.lsRjtRsnCode; 7961 lsrjt_event.explanation = stat.un.b.lsRjtRsnCodeExp; 7962 fc_host_post_vendor_event(shost, 7963 fc_get_event_number(), 7964 sizeof(lsrjt_event), 7965 (char *)&lsrjt_event, 7966 LPFC_NL_VENDOR_ID); 7967 return; 7968 } 7969 if ((rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY) || 7970 (rspiocbp->iocb.ulpStatus == IOSTAT_FABRIC_BSY)) { 7971 fabric_event.event_type = FC_REG_FABRIC_EVENT; 7972 if (rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY) 7973 fabric_event.subcategory = LPFC_EVENT_PORT_BUSY; 7974 else 7975 fabric_event.subcategory = LPFC_EVENT_FABRIC_BUSY; 7976 memcpy(fabric_event.wwpn, &ndlp->nlp_portname, 7977 sizeof(struct lpfc_name)); 7978 memcpy(fabric_event.wwnn, &ndlp->nlp_nodename, 7979 sizeof(struct lpfc_name)); 7980 fc_host_post_vendor_event(shost, 7981 fc_get_event_number(), 7982 sizeof(fabric_event), 7983 (char *)&fabric_event, 7984 LPFC_NL_VENDOR_ID); 7985 return; 7986 } 7987 7988 } 7989 7990 /** 7991 * lpfc_send_els_event - Posts unsolicited els event 7992 * @vport: Pointer to vport object. 7993 * @ndlp: Pointer FC node object. 7994 * @cmd: ELS command code. 7995 * 7996 * This function posts an event when there is an incoming 7997 * unsolicited ELS command. 7998 **/ 7999 static void 8000 lpfc_send_els_event(struct lpfc_vport *vport, 8001 struct lpfc_nodelist *ndlp, 8002 uint32_t *payload) 8003 { 8004 struct lpfc_els_event_header *els_data = NULL; 8005 struct lpfc_logo_event *logo_data = NULL; 8006 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 8007 8008 if (*payload == ELS_CMD_LOGO) { 8009 logo_data = kmalloc(sizeof(struct lpfc_logo_event), GFP_KERNEL); 8010 if (!logo_data) { 8011 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 8012 "0148 Failed to allocate memory " 8013 "for LOGO event\n"); 8014 return; 8015 } 8016 els_data = &logo_data->header; 8017 } else { 8018 els_data = kmalloc(sizeof(struct lpfc_els_event_header), 8019 GFP_KERNEL); 8020 if (!els_data) { 8021 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 8022 "0149 Failed to allocate memory " 8023 "for ELS event\n"); 8024 return; 8025 } 8026 } 8027 els_data->event_type = FC_REG_ELS_EVENT; 8028 switch (*payload) { 8029 case ELS_CMD_PLOGI: 8030 els_data->subcategory = LPFC_EVENT_PLOGI_RCV; 8031 break; 8032 case ELS_CMD_PRLO: 8033 els_data->subcategory = LPFC_EVENT_PRLO_RCV; 8034 break; 8035 case ELS_CMD_ADISC: 8036 els_data->subcategory = LPFC_EVENT_ADISC_RCV; 8037 break; 8038 case ELS_CMD_LOGO: 8039 els_data->subcategory = LPFC_EVENT_LOGO_RCV; 8040 /* Copy the WWPN in the LOGO payload */ 8041 memcpy(logo_data->logo_wwpn, &payload[2], 8042 sizeof(struct lpfc_name)); 8043 break; 8044 default: 8045 kfree(els_data); 8046 return; 8047 } 8048 memcpy(els_data->wwpn, &ndlp->nlp_portname, sizeof(struct lpfc_name)); 8049 memcpy(els_data->wwnn, &ndlp->nlp_nodename, sizeof(struct lpfc_name)); 8050 if (*payload == ELS_CMD_LOGO) { 8051 fc_host_post_vendor_event(shost, 8052 fc_get_event_number(), 8053 sizeof(struct lpfc_logo_event), 8054 (char *)logo_data, 8055 LPFC_NL_VENDOR_ID); 8056 kfree(logo_data); 8057 } else { 8058 fc_host_post_vendor_event(shost, 8059 fc_get_event_number(), 8060 sizeof(struct lpfc_els_event_header), 8061 (char *)els_data, 8062 LPFC_NL_VENDOR_ID); 8063 kfree(els_data); 8064 } 8065 8066 return; 8067 } 8068 8069 8070 /** 8071 * lpfc_els_unsol_buffer - Process an unsolicited event data buffer 8072 * @phba: pointer to lpfc hba data structure. 8073 * @pring: pointer to a SLI ring. 8074 * @vport: pointer to a host virtual N_Port data structure. 8075 * @elsiocb: pointer to lpfc els command iocb data structure. 8076 * 8077 * This routine is used for processing the IOCB associated with a unsolicited 8078 * event. It first determines whether there is an existing ndlp that matches 8079 * the DID from the unsolicited IOCB. If not, it will create a new one with 8080 * the DID from the unsolicited IOCB. The ELS command from the unsolicited 8081 * IOCB is then used to invoke the proper routine and to set up proper state 8082 * of the discovery state machine. 8083 **/ 8084 static void 8085 lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 8086 struct lpfc_vport *vport, struct lpfc_iocbq *elsiocb) 8087 { 8088 struct Scsi_Host *shost; 8089 struct lpfc_nodelist *ndlp; 8090 struct ls_rjt stat; 8091 uint32_t *payload; 8092 uint32_t cmd, did, newnode; 8093 uint8_t rjt_exp, rjt_err = 0, init_link = 0; 8094 IOCB_t *icmd = &elsiocb->iocb; 8095 LPFC_MBOXQ_t *mbox; 8096 8097 if (!vport || !(elsiocb->context2)) 8098 goto dropit; 8099 8100 newnode = 0; 8101 payload = ((struct lpfc_dmabuf *)elsiocb->context2)->virt; 8102 cmd = *payload; 8103 if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) == 0) 8104 lpfc_post_buffer(phba, pring, 1); 8105 8106 did = icmd->un.rcvels.remoteID; 8107 if (icmd->ulpStatus) { 8108 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8109 "RCV Unsol ELS: status:x%x/x%x did:x%x", 8110 icmd->ulpStatus, icmd->un.ulpWord[4], did); 8111 goto dropit; 8112 } 8113 8114 /* Check to see if link went down during discovery */ 8115 if (lpfc_els_chk_latt(vport)) 8116 goto dropit; 8117 8118 /* Ignore traffic received during vport shutdown. */ 8119 if (vport->load_flag & FC_UNLOADING) 8120 goto dropit; 8121 8122 /* If NPort discovery is delayed drop incoming ELS */ 8123 if ((vport->fc_flag & FC_DISC_DELAYED) && 8124 (cmd != ELS_CMD_PLOGI)) 8125 goto dropit; 8126 8127 ndlp = lpfc_findnode_did(vport, did); 8128 if (!ndlp) { 8129 /* Cannot find existing Fabric ndlp, so allocate a new one */ 8130 ndlp = lpfc_nlp_init(vport, did); 8131 if (!ndlp) 8132 goto dropit; 8133 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 8134 newnode = 1; 8135 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) 8136 ndlp->nlp_type |= NLP_FABRIC; 8137 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 8138 ndlp = lpfc_enable_node(vport, ndlp, 8139 NLP_STE_UNUSED_NODE); 8140 if (!ndlp) 8141 goto dropit; 8142 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 8143 newnode = 1; 8144 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) 8145 ndlp->nlp_type |= NLP_FABRIC; 8146 } else if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) { 8147 /* This is similar to the new node path */ 8148 ndlp = lpfc_nlp_get(ndlp); 8149 if (!ndlp) 8150 goto dropit; 8151 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 8152 newnode = 1; 8153 } 8154 8155 phba->fc_stat.elsRcvFrame++; 8156 8157 /* 8158 * Do not process any unsolicited ELS commands 8159 * if the ndlp is in DEV_LOSS 8160 */ 8161 shost = lpfc_shost_from_vport(vport); 8162 spin_lock_irq(shost->host_lock); 8163 if (ndlp->nlp_flag & NLP_IN_DEV_LOSS) { 8164 spin_unlock_irq(shost->host_lock); 8165 goto dropit; 8166 } 8167 spin_unlock_irq(shost->host_lock); 8168 8169 elsiocb->context1 = lpfc_nlp_get(ndlp); 8170 elsiocb->vport = vport; 8171 8172 if ((cmd & ELS_CMD_MASK) == ELS_CMD_RSCN) { 8173 cmd &= ELS_CMD_MASK; 8174 } 8175 /* ELS command <elsCmd> received from NPORT <did> */ 8176 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 8177 "0112 ELS command x%x received from NPORT x%x " 8178 "Data: x%x x%x x%x x%x\n", 8179 cmd, did, vport->port_state, vport->fc_flag, 8180 vport->fc_myDID, vport->fc_prevDID); 8181 8182 /* reject till our FLOGI completes or PLOGI assigned DID via PT2PT */ 8183 if ((vport->port_state < LPFC_FABRIC_CFG_LINK) && 8184 (cmd != ELS_CMD_FLOGI) && 8185 !((cmd == ELS_CMD_PLOGI) && (vport->fc_flag & FC_PT2PT))) { 8186 rjt_err = LSRJT_LOGICAL_BSY; 8187 rjt_exp = LSEXP_NOTHING_MORE; 8188 goto lsrjt; 8189 } 8190 8191 switch (cmd) { 8192 case ELS_CMD_PLOGI: 8193 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8194 "RCV PLOGI: did:x%x/ste:x%x flg:x%x", 8195 did, vport->port_state, ndlp->nlp_flag); 8196 8197 phba->fc_stat.elsRcvPLOGI++; 8198 ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp); 8199 if (phba->sli_rev == LPFC_SLI_REV4 && 8200 (phba->pport->fc_flag & FC_PT2PT)) { 8201 vport->fc_prevDID = vport->fc_myDID; 8202 /* Our DID needs to be updated before registering 8203 * the vfi. This is done in lpfc_rcv_plogi but 8204 * that is called after the reg_vfi. 8205 */ 8206 vport->fc_myDID = elsiocb->iocb.un.rcvels.parmRo; 8207 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 8208 "3312 Remote port assigned DID x%x " 8209 "%x\n", vport->fc_myDID, 8210 vport->fc_prevDID); 8211 } 8212 8213 lpfc_send_els_event(vport, ndlp, payload); 8214 8215 /* If Nport discovery is delayed, reject PLOGIs */ 8216 if (vport->fc_flag & FC_DISC_DELAYED) { 8217 rjt_err = LSRJT_UNABLE_TPC; 8218 rjt_exp = LSEXP_NOTHING_MORE; 8219 break; 8220 } 8221 8222 if (vport->port_state < LPFC_DISC_AUTH) { 8223 if (!(phba->pport->fc_flag & FC_PT2PT) || 8224 (phba->pport->fc_flag & FC_PT2PT_PLOGI)) { 8225 rjt_err = LSRJT_UNABLE_TPC; 8226 rjt_exp = LSEXP_NOTHING_MORE; 8227 break; 8228 } 8229 } 8230 8231 spin_lock_irq(shost->host_lock); 8232 ndlp->nlp_flag &= ~NLP_TARGET_REMOVE; 8233 spin_unlock_irq(shost->host_lock); 8234 8235 lpfc_disc_state_machine(vport, ndlp, elsiocb, 8236 NLP_EVT_RCV_PLOGI); 8237 8238 break; 8239 case ELS_CMD_FLOGI: 8240 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8241 "RCV FLOGI: did:x%x/ste:x%x flg:x%x", 8242 did, vport->port_state, ndlp->nlp_flag); 8243 8244 phba->fc_stat.elsRcvFLOGI++; 8245 8246 /* If the driver believes fabric discovery is done and is ready, 8247 * bounce the link. There is some descrepancy. 8248 */ 8249 if (vport->port_state >= LPFC_LOCAL_CFG_LINK && 8250 vport->fc_flag & FC_PT2PT && 8251 vport->rcv_flogi_cnt >= 1) { 8252 rjt_err = LSRJT_LOGICAL_BSY; 8253 rjt_exp = LSEXP_NOTHING_MORE; 8254 init_link++; 8255 goto lsrjt; 8256 } 8257 8258 lpfc_els_rcv_flogi(vport, elsiocb, ndlp); 8259 if (newnode) 8260 lpfc_nlp_put(ndlp); 8261 break; 8262 case ELS_CMD_LOGO: 8263 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8264 "RCV LOGO: did:x%x/ste:x%x flg:x%x", 8265 did, vport->port_state, ndlp->nlp_flag); 8266 8267 phba->fc_stat.elsRcvLOGO++; 8268 lpfc_send_els_event(vport, ndlp, payload); 8269 if (vport->port_state < LPFC_DISC_AUTH) { 8270 rjt_err = LSRJT_UNABLE_TPC; 8271 rjt_exp = LSEXP_NOTHING_MORE; 8272 break; 8273 } 8274 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_LOGO); 8275 break; 8276 case ELS_CMD_PRLO: 8277 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8278 "RCV PRLO: did:x%x/ste:x%x flg:x%x", 8279 did, vport->port_state, ndlp->nlp_flag); 8280 8281 phba->fc_stat.elsRcvPRLO++; 8282 lpfc_send_els_event(vport, ndlp, payload); 8283 if (vport->port_state < LPFC_DISC_AUTH) { 8284 rjt_err = LSRJT_UNABLE_TPC; 8285 rjt_exp = LSEXP_NOTHING_MORE; 8286 break; 8287 } 8288 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLO); 8289 break; 8290 case ELS_CMD_LCB: 8291 phba->fc_stat.elsRcvLCB++; 8292 lpfc_els_rcv_lcb(vport, elsiocb, ndlp); 8293 break; 8294 case ELS_CMD_RDP: 8295 phba->fc_stat.elsRcvRDP++; 8296 lpfc_els_rcv_rdp(vport, elsiocb, ndlp); 8297 break; 8298 case ELS_CMD_RSCN: 8299 phba->fc_stat.elsRcvRSCN++; 8300 lpfc_els_rcv_rscn(vport, elsiocb, ndlp); 8301 if (newnode) 8302 lpfc_nlp_put(ndlp); 8303 break; 8304 case ELS_CMD_ADISC: 8305 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8306 "RCV ADISC: did:x%x/ste:x%x flg:x%x", 8307 did, vport->port_state, ndlp->nlp_flag); 8308 8309 lpfc_send_els_event(vport, ndlp, payload); 8310 phba->fc_stat.elsRcvADISC++; 8311 if (vport->port_state < LPFC_DISC_AUTH) { 8312 rjt_err = LSRJT_UNABLE_TPC; 8313 rjt_exp = LSEXP_NOTHING_MORE; 8314 break; 8315 } 8316 lpfc_disc_state_machine(vport, ndlp, elsiocb, 8317 NLP_EVT_RCV_ADISC); 8318 break; 8319 case ELS_CMD_PDISC: 8320 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8321 "RCV PDISC: did:x%x/ste:x%x flg:x%x", 8322 did, vport->port_state, ndlp->nlp_flag); 8323 8324 phba->fc_stat.elsRcvPDISC++; 8325 if (vport->port_state < LPFC_DISC_AUTH) { 8326 rjt_err = LSRJT_UNABLE_TPC; 8327 rjt_exp = LSEXP_NOTHING_MORE; 8328 break; 8329 } 8330 lpfc_disc_state_machine(vport, ndlp, elsiocb, 8331 NLP_EVT_RCV_PDISC); 8332 break; 8333 case ELS_CMD_FARPR: 8334 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8335 "RCV FARPR: did:x%x/ste:x%x flg:x%x", 8336 did, vport->port_state, ndlp->nlp_flag); 8337 8338 phba->fc_stat.elsRcvFARPR++; 8339 lpfc_els_rcv_farpr(vport, elsiocb, ndlp); 8340 break; 8341 case ELS_CMD_FARP: 8342 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8343 "RCV FARP: did:x%x/ste:x%x flg:x%x", 8344 did, vport->port_state, ndlp->nlp_flag); 8345 8346 phba->fc_stat.elsRcvFARP++; 8347 lpfc_els_rcv_farp(vport, elsiocb, ndlp); 8348 break; 8349 case ELS_CMD_FAN: 8350 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8351 "RCV FAN: did:x%x/ste:x%x flg:x%x", 8352 did, vport->port_state, ndlp->nlp_flag); 8353 8354 phba->fc_stat.elsRcvFAN++; 8355 lpfc_els_rcv_fan(vport, elsiocb, ndlp); 8356 break; 8357 case ELS_CMD_PRLI: 8358 case ELS_CMD_NVMEPRLI: 8359 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8360 "RCV PRLI: did:x%x/ste:x%x flg:x%x", 8361 did, vport->port_state, ndlp->nlp_flag); 8362 8363 phba->fc_stat.elsRcvPRLI++; 8364 if ((vport->port_state < LPFC_DISC_AUTH) && 8365 (vport->fc_flag & FC_FABRIC)) { 8366 rjt_err = LSRJT_UNABLE_TPC; 8367 rjt_exp = LSEXP_NOTHING_MORE; 8368 break; 8369 } 8370 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLI); 8371 break; 8372 case ELS_CMD_LIRR: 8373 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8374 "RCV LIRR: did:x%x/ste:x%x flg:x%x", 8375 did, vport->port_state, ndlp->nlp_flag); 8376 8377 phba->fc_stat.elsRcvLIRR++; 8378 lpfc_els_rcv_lirr(vport, elsiocb, ndlp); 8379 if (newnode) 8380 lpfc_nlp_put(ndlp); 8381 break; 8382 case ELS_CMD_RLS: 8383 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8384 "RCV RLS: did:x%x/ste:x%x flg:x%x", 8385 did, vport->port_state, ndlp->nlp_flag); 8386 8387 phba->fc_stat.elsRcvRLS++; 8388 lpfc_els_rcv_rls(vport, elsiocb, ndlp); 8389 if (newnode) 8390 lpfc_nlp_put(ndlp); 8391 break; 8392 case ELS_CMD_RPS: 8393 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8394 "RCV RPS: did:x%x/ste:x%x flg:x%x", 8395 did, vport->port_state, ndlp->nlp_flag); 8396 8397 phba->fc_stat.elsRcvRPS++; 8398 lpfc_els_rcv_rps(vport, elsiocb, ndlp); 8399 if (newnode) 8400 lpfc_nlp_put(ndlp); 8401 break; 8402 case ELS_CMD_RPL: 8403 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8404 "RCV RPL: did:x%x/ste:x%x flg:x%x", 8405 did, vport->port_state, ndlp->nlp_flag); 8406 8407 phba->fc_stat.elsRcvRPL++; 8408 lpfc_els_rcv_rpl(vport, elsiocb, ndlp); 8409 if (newnode) 8410 lpfc_nlp_put(ndlp); 8411 break; 8412 case ELS_CMD_RNID: 8413 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8414 "RCV RNID: did:x%x/ste:x%x flg:x%x", 8415 did, vport->port_state, ndlp->nlp_flag); 8416 8417 phba->fc_stat.elsRcvRNID++; 8418 lpfc_els_rcv_rnid(vport, elsiocb, ndlp); 8419 if (newnode) 8420 lpfc_nlp_put(ndlp); 8421 break; 8422 case ELS_CMD_RTV: 8423 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8424 "RCV RTV: did:x%x/ste:x%x flg:x%x", 8425 did, vport->port_state, ndlp->nlp_flag); 8426 phba->fc_stat.elsRcvRTV++; 8427 lpfc_els_rcv_rtv(vport, elsiocb, ndlp); 8428 if (newnode) 8429 lpfc_nlp_put(ndlp); 8430 break; 8431 case ELS_CMD_RRQ: 8432 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8433 "RCV RRQ: did:x%x/ste:x%x flg:x%x", 8434 did, vport->port_state, ndlp->nlp_flag); 8435 8436 phba->fc_stat.elsRcvRRQ++; 8437 lpfc_els_rcv_rrq(vport, elsiocb, ndlp); 8438 if (newnode) 8439 lpfc_nlp_put(ndlp); 8440 break; 8441 case ELS_CMD_ECHO: 8442 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8443 "RCV ECHO: did:x%x/ste:x%x flg:x%x", 8444 did, vport->port_state, ndlp->nlp_flag); 8445 8446 phba->fc_stat.elsRcvECHO++; 8447 lpfc_els_rcv_echo(vport, elsiocb, ndlp); 8448 if (newnode) 8449 lpfc_nlp_put(ndlp); 8450 break; 8451 case ELS_CMD_REC: 8452 /* receive this due to exchange closed */ 8453 rjt_err = LSRJT_UNABLE_TPC; 8454 rjt_exp = LSEXP_INVALID_OX_RX; 8455 break; 8456 case ELS_CMD_FPIN: 8457 /* 8458 * Received FPIN from fabric - pass it to the 8459 * transport FPIN handler. 8460 */ 8461 fc_host_fpin_rcv(shost, elsiocb->iocb.unsli3.rcvsli3.acc_len, 8462 (char *)payload); 8463 break; 8464 default: 8465 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8466 "RCV ELS cmd: cmd:x%x did:x%x/ste:x%x", 8467 cmd, did, vport->port_state); 8468 8469 /* Unsupported ELS command, reject */ 8470 rjt_err = LSRJT_CMD_UNSUPPORTED; 8471 rjt_exp = LSEXP_NOTHING_MORE; 8472 8473 /* Unknown ELS command <elsCmd> received from NPORT <did> */ 8474 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 8475 "0115 Unknown ELS command x%x " 8476 "received from NPORT x%x\n", cmd, did); 8477 if (newnode) 8478 lpfc_nlp_put(ndlp); 8479 break; 8480 } 8481 8482 lsrjt: 8483 /* check if need to LS_RJT received ELS cmd */ 8484 if (rjt_err) { 8485 memset(&stat, 0, sizeof(stat)); 8486 stat.un.b.lsRjtRsnCode = rjt_err; 8487 stat.un.b.lsRjtRsnCodeExp = rjt_exp; 8488 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, elsiocb, ndlp, 8489 NULL); 8490 } 8491 8492 lpfc_nlp_put(elsiocb->context1); 8493 elsiocb->context1 = NULL; 8494 8495 /* Special case. Driver received an unsolicited command that 8496 * unsupportable given the driver's current state. Reset the 8497 * link and start over. 8498 */ 8499 if (init_link) { 8500 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 8501 if (!mbox) 8502 return; 8503 lpfc_linkdown(phba); 8504 lpfc_init_link(phba, mbox, 8505 phba->cfg_topology, 8506 phba->cfg_link_speed); 8507 mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0; 8508 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 8509 mbox->vport = vport; 8510 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) == 8511 MBX_NOT_FINISHED) 8512 mempool_free(mbox, phba->mbox_mem_pool); 8513 } 8514 8515 return; 8516 8517 dropit: 8518 if (vport && !(vport->load_flag & FC_UNLOADING)) 8519 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 8520 "0111 Dropping received ELS cmd " 8521 "Data: x%x x%x x%x\n", 8522 icmd->ulpStatus, icmd->un.ulpWord[4], icmd->ulpTimeout); 8523 phba->fc_stat.elsRcvDrop++; 8524 } 8525 8526 /** 8527 * lpfc_els_unsol_event - Process an unsolicited event from an els sli ring 8528 * @phba: pointer to lpfc hba data structure. 8529 * @pring: pointer to a SLI ring. 8530 * @elsiocb: pointer to lpfc els iocb data structure. 8531 * 8532 * This routine is used to process an unsolicited event received from a SLI 8533 * (Service Level Interface) ring. The actual processing of the data buffer 8534 * associated with the unsolicited event is done by invoking the routine 8535 * lpfc_els_unsol_buffer() after properly set up the iocb buffer from the 8536 * SLI ring on which the unsolicited event was received. 8537 **/ 8538 void 8539 lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 8540 struct lpfc_iocbq *elsiocb) 8541 { 8542 struct lpfc_vport *vport = phba->pport; 8543 IOCB_t *icmd = &elsiocb->iocb; 8544 dma_addr_t paddr; 8545 struct lpfc_dmabuf *bdeBuf1 = elsiocb->context2; 8546 struct lpfc_dmabuf *bdeBuf2 = elsiocb->context3; 8547 8548 elsiocb->context1 = NULL; 8549 elsiocb->context2 = NULL; 8550 elsiocb->context3 = NULL; 8551 8552 if (icmd->ulpStatus == IOSTAT_NEED_BUFFER) { 8553 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); 8554 } else if (icmd->ulpStatus == IOSTAT_LOCAL_REJECT && 8555 (icmd->un.ulpWord[4] & IOERR_PARAM_MASK) == 8556 IOERR_RCV_BUFFER_WAITING) { 8557 phba->fc_stat.NoRcvBuf++; 8558 /* Not enough posted buffers; Try posting more buffers */ 8559 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) 8560 lpfc_post_buffer(phba, pring, 0); 8561 return; 8562 } 8563 8564 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 8565 (icmd->ulpCommand == CMD_IOCB_RCV_ELS64_CX || 8566 icmd->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) { 8567 if (icmd->unsli3.rcvsli3.vpi == 0xffff) 8568 vport = phba->pport; 8569 else 8570 vport = lpfc_find_vport_by_vpid(phba, 8571 icmd->unsli3.rcvsli3.vpi); 8572 } 8573 8574 /* If there are no BDEs associated 8575 * with this IOCB, there is nothing to do. 8576 */ 8577 if (icmd->ulpBdeCount == 0) 8578 return; 8579 8580 /* type of ELS cmd is first 32bit word 8581 * in packet 8582 */ 8583 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 8584 elsiocb->context2 = bdeBuf1; 8585 } else { 8586 paddr = getPaddr(icmd->un.cont64[0].addrHigh, 8587 icmd->un.cont64[0].addrLow); 8588 elsiocb->context2 = lpfc_sli_ringpostbuf_get(phba, pring, 8589 paddr); 8590 } 8591 8592 lpfc_els_unsol_buffer(phba, pring, vport, elsiocb); 8593 /* 8594 * The different unsolicited event handlers would tell us 8595 * if they are done with "mp" by setting context2 to NULL. 8596 */ 8597 if (elsiocb->context2) { 8598 lpfc_in_buf_free(phba, (struct lpfc_dmabuf *)elsiocb->context2); 8599 elsiocb->context2 = NULL; 8600 } 8601 8602 /* RCV_ELS64_CX provide for 2 BDEs - process 2nd if included */ 8603 if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) && 8604 icmd->ulpBdeCount == 2) { 8605 elsiocb->context2 = bdeBuf2; 8606 lpfc_els_unsol_buffer(phba, pring, vport, elsiocb); 8607 /* free mp if we are done with it */ 8608 if (elsiocb->context2) { 8609 lpfc_in_buf_free(phba, elsiocb->context2); 8610 elsiocb->context2 = NULL; 8611 } 8612 } 8613 } 8614 8615 static void 8616 lpfc_start_fdmi(struct lpfc_vport *vport) 8617 { 8618 struct lpfc_nodelist *ndlp; 8619 8620 /* If this is the first time, allocate an ndlp and initialize 8621 * it. Otherwise, make sure the node is enabled and then do the 8622 * login. 8623 */ 8624 ndlp = lpfc_findnode_did(vport, FDMI_DID); 8625 if (!ndlp) { 8626 ndlp = lpfc_nlp_init(vport, FDMI_DID); 8627 if (ndlp) { 8628 ndlp->nlp_type |= NLP_FABRIC; 8629 } else { 8630 return; 8631 } 8632 } 8633 if (!NLP_CHK_NODE_ACT(ndlp)) 8634 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_NPR_NODE); 8635 8636 if (ndlp) { 8637 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 8638 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 8639 } 8640 } 8641 8642 /** 8643 * lpfc_do_scr_ns_plogi - Issue a plogi to the name server for scr 8644 * @phba: pointer to lpfc hba data structure. 8645 * @vport: pointer to a virtual N_Port data structure. 8646 * 8647 * This routine issues a Port Login (PLOGI) to the Name Server with 8648 * State Change Request (SCR) for a @vport. This routine will create an 8649 * ndlp for the Name Server associated to the @vport if such node does 8650 * not already exist. The PLOGI to Name Server is issued by invoking the 8651 * lpfc_issue_els_plogi() routine. If Fabric-Device Management Interface 8652 * (FDMI) is configured to the @vport, a FDMI node will be created and 8653 * the PLOGI to FDMI is issued by invoking lpfc_issue_els_plogi() routine. 8654 **/ 8655 void 8656 lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport) 8657 { 8658 struct lpfc_nodelist *ndlp; 8659 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 8660 8661 /* 8662 * If lpfc_delay_discovery parameter is set and the clean address 8663 * bit is cleared and fc fabric parameters chenged, delay FC NPort 8664 * discovery. 8665 */ 8666 spin_lock_irq(shost->host_lock); 8667 if (vport->fc_flag & FC_DISC_DELAYED) { 8668 spin_unlock_irq(shost->host_lock); 8669 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 8670 "3334 Delay fc port discovery for %d seconds\n", 8671 phba->fc_ratov); 8672 mod_timer(&vport->delayed_disc_tmo, 8673 jiffies + msecs_to_jiffies(1000 * phba->fc_ratov)); 8674 return; 8675 } 8676 spin_unlock_irq(shost->host_lock); 8677 8678 ndlp = lpfc_findnode_did(vport, NameServer_DID); 8679 if (!ndlp) { 8680 ndlp = lpfc_nlp_init(vport, NameServer_DID); 8681 if (!ndlp) { 8682 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 8683 lpfc_disc_start(vport); 8684 return; 8685 } 8686 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 8687 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 8688 "0251 NameServer login: no memory\n"); 8689 return; 8690 } 8691 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 8692 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); 8693 if (!ndlp) { 8694 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 8695 lpfc_disc_start(vport); 8696 return; 8697 } 8698 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 8699 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 8700 "0348 NameServer login: node freed\n"); 8701 return; 8702 } 8703 } 8704 ndlp->nlp_type |= NLP_FABRIC; 8705 8706 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 8707 8708 if (lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0)) { 8709 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 8710 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 8711 "0252 Cannot issue NameServer login\n"); 8712 return; 8713 } 8714 8715 if ((phba->cfg_enable_SmartSAN || 8716 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) && 8717 (vport->load_flag & FC_ALLOW_FDMI)) 8718 lpfc_start_fdmi(vport); 8719 } 8720 8721 /** 8722 * lpfc_cmpl_reg_new_vport - Completion callback function to register new vport 8723 * @phba: pointer to lpfc hba data structure. 8724 * @pmb: pointer to the driver internal queue element for mailbox command. 8725 * 8726 * This routine is the completion callback function to register new vport 8727 * mailbox command. If the new vport mailbox command completes successfully, 8728 * the fabric registration login shall be performed on physical port (the 8729 * new vport created is actually a physical port, with VPI 0) or the port 8730 * login to Name Server for State Change Request (SCR) will be performed 8731 * on virtual port (real virtual port, with VPI greater than 0). 8732 **/ 8733 static void 8734 lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 8735 { 8736 struct lpfc_vport *vport = pmb->vport; 8737 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 8738 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; 8739 MAILBOX_t *mb = &pmb->u.mb; 8740 int rc; 8741 8742 spin_lock_irq(shost->host_lock); 8743 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 8744 spin_unlock_irq(shost->host_lock); 8745 8746 if (mb->mbxStatus) { 8747 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 8748 "0915 Register VPI failed : Status: x%x" 8749 " upd bit: x%x \n", mb->mbxStatus, 8750 mb->un.varRegVpi.upd); 8751 if (phba->sli_rev == LPFC_SLI_REV4 && 8752 mb->un.varRegVpi.upd) 8753 goto mbox_err_exit ; 8754 8755 switch (mb->mbxStatus) { 8756 case 0x11: /* unsupported feature */ 8757 case 0x9603: /* max_vpi exceeded */ 8758 case 0x9602: /* Link event since CLEAR_LA */ 8759 /* giving up on vport registration */ 8760 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 8761 spin_lock_irq(shost->host_lock); 8762 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 8763 spin_unlock_irq(shost->host_lock); 8764 lpfc_can_disctmo(vport); 8765 break; 8766 /* If reg_vpi fail with invalid VPI status, re-init VPI */ 8767 case 0x20: 8768 spin_lock_irq(shost->host_lock); 8769 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 8770 spin_unlock_irq(shost->host_lock); 8771 lpfc_init_vpi(phba, pmb, vport->vpi); 8772 pmb->vport = vport; 8773 pmb->mbox_cmpl = lpfc_init_vpi_cmpl; 8774 rc = lpfc_sli_issue_mbox(phba, pmb, 8775 MBX_NOWAIT); 8776 if (rc == MBX_NOT_FINISHED) { 8777 lpfc_printf_vlog(vport, 8778 KERN_ERR, LOG_MBOX, 8779 "2732 Failed to issue INIT_VPI" 8780 " mailbox command\n"); 8781 } else { 8782 lpfc_nlp_put(ndlp); 8783 return; 8784 } 8785 /* fall through */ 8786 default: 8787 /* Try to recover from this error */ 8788 if (phba->sli_rev == LPFC_SLI_REV4) 8789 lpfc_sli4_unreg_all_rpis(vport); 8790 lpfc_mbx_unreg_vpi(vport); 8791 spin_lock_irq(shost->host_lock); 8792 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 8793 spin_unlock_irq(shost->host_lock); 8794 if (mb->mbxStatus == MBX_NOT_FINISHED) 8795 break; 8796 if ((vport->port_type == LPFC_PHYSICAL_PORT) && 8797 !(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG)) { 8798 if (phba->sli_rev == LPFC_SLI_REV4) 8799 lpfc_issue_init_vfi(vport); 8800 else 8801 lpfc_initial_flogi(vport); 8802 } else { 8803 lpfc_initial_fdisc(vport); 8804 } 8805 break; 8806 } 8807 } else { 8808 spin_lock_irq(shost->host_lock); 8809 vport->vpi_state |= LPFC_VPI_REGISTERED; 8810 spin_unlock_irq(shost->host_lock); 8811 if (vport == phba->pport) { 8812 if (phba->sli_rev < LPFC_SLI_REV4) 8813 lpfc_issue_fabric_reglogin(vport); 8814 else { 8815 /* 8816 * If the physical port is instantiated using 8817 * FDISC, do not start vport discovery. 8818 */ 8819 if (vport->port_state != LPFC_FDISC) 8820 lpfc_start_fdiscs(phba); 8821 lpfc_do_scr_ns_plogi(phba, vport); 8822 } 8823 } else 8824 lpfc_do_scr_ns_plogi(phba, vport); 8825 } 8826 mbox_err_exit: 8827 /* Now, we decrement the ndlp reference count held for this 8828 * callback function 8829 */ 8830 lpfc_nlp_put(ndlp); 8831 8832 mempool_free(pmb, phba->mbox_mem_pool); 8833 return; 8834 } 8835 8836 /** 8837 * lpfc_register_new_vport - Register a new vport with a HBA 8838 * @phba: pointer to lpfc hba data structure. 8839 * @vport: pointer to a host virtual N_Port data structure. 8840 * @ndlp: pointer to a node-list data structure. 8841 * 8842 * This routine registers the @vport as a new virtual port with a HBA. 8843 * It is done through a registering vpi mailbox command. 8844 **/ 8845 void 8846 lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport, 8847 struct lpfc_nodelist *ndlp) 8848 { 8849 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 8850 LPFC_MBOXQ_t *mbox; 8851 8852 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 8853 if (mbox) { 8854 lpfc_reg_vpi(vport, mbox); 8855 mbox->vport = vport; 8856 mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 8857 mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport; 8858 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 8859 == MBX_NOT_FINISHED) { 8860 /* mailbox command not success, decrement ndlp 8861 * reference count for this command 8862 */ 8863 lpfc_nlp_put(ndlp); 8864 mempool_free(mbox, phba->mbox_mem_pool); 8865 8866 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 8867 "0253 Register VPI: Can't send mbox\n"); 8868 goto mbox_err_exit; 8869 } 8870 } else { 8871 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 8872 "0254 Register VPI: no memory\n"); 8873 goto mbox_err_exit; 8874 } 8875 return; 8876 8877 mbox_err_exit: 8878 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 8879 spin_lock_irq(shost->host_lock); 8880 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 8881 spin_unlock_irq(shost->host_lock); 8882 return; 8883 } 8884 8885 /** 8886 * lpfc_cancel_all_vport_retry_delay_timer - Cancel all vport retry delay timer 8887 * @phba: pointer to lpfc hba data structure. 8888 * 8889 * This routine cancels the retry delay timers to all the vports. 8890 **/ 8891 void 8892 lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *phba) 8893 { 8894 struct lpfc_vport **vports; 8895 struct lpfc_nodelist *ndlp; 8896 uint32_t link_state; 8897 int i; 8898 8899 /* Treat this failure as linkdown for all vports */ 8900 link_state = phba->link_state; 8901 lpfc_linkdown(phba); 8902 phba->link_state = link_state; 8903 8904 vports = lpfc_create_vport_work_array(phba); 8905 8906 if (vports) { 8907 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 8908 ndlp = lpfc_findnode_did(vports[i], Fabric_DID); 8909 if (ndlp) 8910 lpfc_cancel_retry_delay_tmo(vports[i], ndlp); 8911 lpfc_els_flush_cmd(vports[i]); 8912 } 8913 lpfc_destroy_vport_work_array(phba, vports); 8914 } 8915 } 8916 8917 /** 8918 * lpfc_retry_pport_discovery - Start timer to retry FLOGI. 8919 * @phba: pointer to lpfc hba data structure. 8920 * 8921 * This routine abort all pending discovery commands and 8922 * start a timer to retry FLOGI for the physical port 8923 * discovery. 8924 **/ 8925 void 8926 lpfc_retry_pport_discovery(struct lpfc_hba *phba) 8927 { 8928 struct lpfc_nodelist *ndlp; 8929 struct Scsi_Host *shost; 8930 8931 /* Cancel the all vports retry delay retry timers */ 8932 lpfc_cancel_all_vport_retry_delay_timer(phba); 8933 8934 /* If fabric require FLOGI, then re-instantiate physical login */ 8935 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); 8936 if (!ndlp) 8937 return; 8938 8939 shost = lpfc_shost_from_vport(phba->pport); 8940 mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000)); 8941 spin_lock_irq(shost->host_lock); 8942 ndlp->nlp_flag |= NLP_DELAY_TMO; 8943 spin_unlock_irq(shost->host_lock); 8944 ndlp->nlp_last_elscmd = ELS_CMD_FLOGI; 8945 phba->pport->port_state = LPFC_FLOGI; 8946 return; 8947 } 8948 8949 /** 8950 * lpfc_fabric_login_reqd - Check if FLOGI required. 8951 * @phba: pointer to lpfc hba data structure. 8952 * @cmdiocb: pointer to FDISC command iocb. 8953 * @rspiocb: pointer to FDISC response iocb. 8954 * 8955 * This routine checks if a FLOGI is reguired for FDISC 8956 * to succeed. 8957 **/ 8958 static int 8959 lpfc_fabric_login_reqd(struct lpfc_hba *phba, 8960 struct lpfc_iocbq *cmdiocb, 8961 struct lpfc_iocbq *rspiocb) 8962 { 8963 8964 if ((rspiocb->iocb.ulpStatus != IOSTAT_FABRIC_RJT) || 8965 (rspiocb->iocb.un.ulpWord[4] != RJT_LOGIN_REQUIRED)) 8966 return 0; 8967 else 8968 return 1; 8969 } 8970 8971 /** 8972 * lpfc_cmpl_els_fdisc - Completion function for fdisc iocb command 8973 * @phba: pointer to lpfc hba data structure. 8974 * @cmdiocb: pointer to lpfc command iocb data structure. 8975 * @rspiocb: pointer to lpfc response iocb data structure. 8976 * 8977 * This routine is the completion callback function to a Fabric Discover 8978 * (FDISC) ELS command. Since all the FDISC ELS commands are issued 8979 * single threaded, each FDISC completion callback function will reset 8980 * the discovery timer for all vports such that the timers will not get 8981 * unnecessary timeout. The function checks the FDISC IOCB status. If error 8982 * detected, the vport will be set to FC_VPORT_FAILED state. Otherwise,the 8983 * vport will set to FC_VPORT_ACTIVE state. It then checks whether the DID 8984 * assigned to the vport has been changed with the completion of the FDISC 8985 * command. If so, both RPI (Remote Port Index) and VPI (Virtual Port Index) 8986 * are unregistered from the HBA, and then the lpfc_register_new_vport() 8987 * routine is invoked to register new vport with the HBA. Otherwise, the 8988 * lpfc_do_scr_ns_plogi() routine is invoked to issue a PLOGI to the Name 8989 * Server for State Change Request (SCR). 8990 **/ 8991 static void 8992 lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 8993 struct lpfc_iocbq *rspiocb) 8994 { 8995 struct lpfc_vport *vport = cmdiocb->vport; 8996 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 8997 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 8998 struct lpfc_nodelist *np; 8999 struct lpfc_nodelist *next_np; 9000 IOCB_t *irsp = &rspiocb->iocb; 9001 struct lpfc_iocbq *piocb; 9002 struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp; 9003 struct serv_parm *sp; 9004 uint8_t fabric_param_changed; 9005 9006 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 9007 "0123 FDISC completes. x%x/x%x prevDID: x%x\n", 9008 irsp->ulpStatus, irsp->un.ulpWord[4], 9009 vport->fc_prevDID); 9010 /* Since all FDISCs are being single threaded, we 9011 * must reset the discovery timer for ALL vports 9012 * waiting to send FDISC when one completes. 9013 */ 9014 list_for_each_entry(piocb, &phba->fabric_iocb_list, list) { 9015 lpfc_set_disctmo(piocb->vport); 9016 } 9017 9018 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 9019 "FDISC cmpl: status:x%x/x%x prevdid:x%x", 9020 irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_prevDID); 9021 9022 if (irsp->ulpStatus) { 9023 9024 if (lpfc_fabric_login_reqd(phba, cmdiocb, rspiocb)) { 9025 lpfc_retry_pport_discovery(phba); 9026 goto out; 9027 } 9028 9029 /* Check for retry */ 9030 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) 9031 goto out; 9032 /* FDISC failed */ 9033 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 9034 "0126 FDISC failed. (x%x/x%x)\n", 9035 irsp->ulpStatus, irsp->un.ulpWord[4]); 9036 goto fdisc_failed; 9037 } 9038 spin_lock_irq(shost->host_lock); 9039 vport->fc_flag &= ~FC_VPORT_CVL_RCVD; 9040 vport->fc_flag &= ~FC_VPORT_LOGO_RCVD; 9041 vport->fc_flag |= FC_FABRIC; 9042 if (vport->phba->fc_topology == LPFC_TOPOLOGY_LOOP) 9043 vport->fc_flag |= FC_PUBLIC_LOOP; 9044 spin_unlock_irq(shost->host_lock); 9045 9046 vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID; 9047 lpfc_vport_set_state(vport, FC_VPORT_ACTIVE); 9048 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 9049 if (!prsp) 9050 goto out; 9051 sp = prsp->virt + sizeof(uint32_t); 9052 fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp); 9053 memcpy(&vport->fabric_portname, &sp->portName, 9054 sizeof(struct lpfc_name)); 9055 memcpy(&vport->fabric_nodename, &sp->nodeName, 9056 sizeof(struct lpfc_name)); 9057 if (fabric_param_changed && 9058 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 9059 /* If our NportID changed, we need to ensure all 9060 * remaining NPORTs get unreg_login'ed so we can 9061 * issue unreg_vpi. 9062 */ 9063 list_for_each_entry_safe(np, next_np, 9064 &vport->fc_nodes, nlp_listp) { 9065 if (!NLP_CHK_NODE_ACT(ndlp) || 9066 (np->nlp_state != NLP_STE_NPR_NODE) || 9067 !(np->nlp_flag & NLP_NPR_ADISC)) 9068 continue; 9069 spin_lock_irq(shost->host_lock); 9070 np->nlp_flag &= ~NLP_NPR_ADISC; 9071 spin_unlock_irq(shost->host_lock); 9072 lpfc_unreg_rpi(vport, np); 9073 } 9074 lpfc_cleanup_pending_mbox(vport); 9075 9076 if (phba->sli_rev == LPFC_SLI_REV4) 9077 lpfc_sli4_unreg_all_rpis(vport); 9078 9079 lpfc_mbx_unreg_vpi(vport); 9080 spin_lock_irq(shost->host_lock); 9081 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 9082 if (phba->sli_rev == LPFC_SLI_REV4) 9083 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 9084 else 9085 vport->fc_flag |= FC_LOGO_RCVD_DID_CHNG; 9086 spin_unlock_irq(shost->host_lock); 9087 } else if ((phba->sli_rev == LPFC_SLI_REV4) && 9088 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 9089 /* 9090 * Driver needs to re-reg VPI in order for f/w 9091 * to update the MAC address. 9092 */ 9093 lpfc_register_new_vport(phba, vport, ndlp); 9094 goto out; 9095 } 9096 9097 if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI) 9098 lpfc_issue_init_vpi(vport); 9099 else if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) 9100 lpfc_register_new_vport(phba, vport, ndlp); 9101 else 9102 lpfc_do_scr_ns_plogi(phba, vport); 9103 goto out; 9104 fdisc_failed: 9105 if (vport->fc_vport && 9106 (vport->fc_vport->vport_state != FC_VPORT_NO_FABRIC_RSCS)) 9107 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 9108 /* Cancel discovery timer */ 9109 lpfc_can_disctmo(vport); 9110 lpfc_nlp_put(ndlp); 9111 out: 9112 lpfc_els_free_iocb(phba, cmdiocb); 9113 } 9114 9115 /** 9116 * lpfc_issue_els_fdisc - Issue a fdisc iocb command 9117 * @vport: pointer to a virtual N_Port data structure. 9118 * @ndlp: pointer to a node-list data structure. 9119 * @retry: number of retries to the command IOCB. 9120 * 9121 * This routine prepares and issues a Fabric Discover (FDISC) IOCB to 9122 * a remote node (@ndlp) off a @vport. It uses the lpfc_issue_fabric_iocb() 9123 * routine to issue the IOCB, which makes sure only one outstanding fabric 9124 * IOCB will be sent off HBA at any given time. 9125 * 9126 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 9127 * will be incremented by 1 for holding the ndlp and the reference to ndlp 9128 * will be stored into the context1 field of the IOCB for the completion 9129 * callback function to the FDISC ELS command. 9130 * 9131 * Return code 9132 * 0 - Successfully issued fdisc iocb command 9133 * 1 - Failed to issue fdisc iocb command 9134 **/ 9135 static int 9136 lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 9137 uint8_t retry) 9138 { 9139 struct lpfc_hba *phba = vport->phba; 9140 IOCB_t *icmd; 9141 struct lpfc_iocbq *elsiocb; 9142 struct serv_parm *sp; 9143 uint8_t *pcmd; 9144 uint16_t cmdsize; 9145 int did = ndlp->nlp_DID; 9146 int rc; 9147 9148 vport->port_state = LPFC_FDISC; 9149 vport->fc_myDID = 0; 9150 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 9151 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did, 9152 ELS_CMD_FDISC); 9153 if (!elsiocb) { 9154 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 9155 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 9156 "0255 Issue FDISC: no IOCB\n"); 9157 return 1; 9158 } 9159 9160 icmd = &elsiocb->iocb; 9161 icmd->un.elsreq64.myID = 0; 9162 icmd->un.elsreq64.fl = 1; 9163 9164 /* 9165 * SLI3 ports require a different context type value than SLI4. 9166 * Catch SLI3 ports here and override the prep. 9167 */ 9168 if (phba->sli_rev == LPFC_SLI_REV3) { 9169 icmd->ulpCt_h = 1; 9170 icmd->ulpCt_l = 0; 9171 } 9172 9173 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 9174 *((uint32_t *) (pcmd)) = ELS_CMD_FDISC; 9175 pcmd += sizeof(uint32_t); /* CSP Word 1 */ 9176 memcpy(pcmd, &vport->phba->pport->fc_sparam, sizeof(struct serv_parm)); 9177 sp = (struct serv_parm *) pcmd; 9178 /* Setup CSPs accordingly for Fabric */ 9179 sp->cmn.e_d_tov = 0; 9180 sp->cmn.w2.r_a_tov = 0; 9181 sp->cmn.virtual_fabric_support = 0; 9182 sp->cls1.classValid = 0; 9183 sp->cls2.seqDelivery = 1; 9184 sp->cls3.seqDelivery = 1; 9185 9186 pcmd += sizeof(uint32_t); /* CSP Word 2 */ 9187 pcmd += sizeof(uint32_t); /* CSP Word 3 */ 9188 pcmd += sizeof(uint32_t); /* CSP Word 4 */ 9189 pcmd += sizeof(uint32_t); /* Port Name */ 9190 memcpy(pcmd, &vport->fc_portname, 8); 9191 pcmd += sizeof(uint32_t); /* Node Name */ 9192 pcmd += sizeof(uint32_t); /* Node Name */ 9193 memcpy(pcmd, &vport->fc_nodename, 8); 9194 sp->cmn.valid_vendor_ver_level = 0; 9195 memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion)); 9196 lpfc_set_disctmo(vport); 9197 9198 phba->fc_stat.elsXmitFDISC++; 9199 elsiocb->iocb_cmpl = lpfc_cmpl_els_fdisc; 9200 9201 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 9202 "Issue FDISC: did:x%x", 9203 did, 0, 0); 9204 9205 rc = lpfc_issue_fabric_iocb(phba, elsiocb); 9206 if (rc == IOCB_ERROR) { 9207 lpfc_els_free_iocb(phba, elsiocb); 9208 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 9209 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 9210 "0256 Issue FDISC: Cannot send IOCB\n"); 9211 return 1; 9212 } 9213 lpfc_vport_set_state(vport, FC_VPORT_INITIALIZING); 9214 return 0; 9215 } 9216 9217 /** 9218 * lpfc_cmpl_els_npiv_logo - Completion function with vport logo 9219 * @phba: pointer to lpfc hba data structure. 9220 * @cmdiocb: pointer to lpfc command iocb data structure. 9221 * @rspiocb: pointer to lpfc response iocb data structure. 9222 * 9223 * This routine is the completion callback function to the issuing of a LOGO 9224 * ELS command off a vport. It frees the command IOCB and then decrement the 9225 * reference count held on ndlp for this completion function, indicating that 9226 * the reference to the ndlp is no long needed. Note that the 9227 * lpfc_els_free_iocb() routine decrements the ndlp reference held for this 9228 * callback function and an additional explicit ndlp reference decrementation 9229 * will trigger the actual release of the ndlp. 9230 **/ 9231 static void 9232 lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 9233 struct lpfc_iocbq *rspiocb) 9234 { 9235 struct lpfc_vport *vport = cmdiocb->vport; 9236 IOCB_t *irsp; 9237 struct lpfc_nodelist *ndlp; 9238 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 9239 9240 ndlp = (struct lpfc_nodelist *)cmdiocb->context1; 9241 irsp = &rspiocb->iocb; 9242 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 9243 "LOGO npiv cmpl: status:x%x/x%x did:x%x", 9244 irsp->ulpStatus, irsp->un.ulpWord[4], irsp->un.rcvels.remoteID); 9245 9246 lpfc_els_free_iocb(phba, cmdiocb); 9247 vport->unreg_vpi_cmpl = VPORT_ERROR; 9248 9249 /* Trigger the release of the ndlp after logo */ 9250 lpfc_nlp_put(ndlp); 9251 9252 /* NPIV LOGO completes to NPort <nlp_DID> */ 9253 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 9254 "2928 NPIV LOGO completes to NPort x%x " 9255 "Data: x%x x%x x%x x%x\n", 9256 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4], 9257 irsp->ulpTimeout, vport->num_disc_nodes); 9258 9259 if (irsp->ulpStatus == IOSTAT_SUCCESS) { 9260 spin_lock_irq(shost->host_lock); 9261 vport->fc_flag &= ~FC_NDISC_ACTIVE; 9262 vport->fc_flag &= ~FC_FABRIC; 9263 spin_unlock_irq(shost->host_lock); 9264 lpfc_can_disctmo(vport); 9265 } 9266 } 9267 9268 /** 9269 * lpfc_issue_els_npiv_logo - Issue a logo off a vport 9270 * @vport: pointer to a virtual N_Port data structure. 9271 * @ndlp: pointer to a node-list data structure. 9272 * 9273 * This routine issues a LOGO ELS command to an @ndlp off a @vport. 9274 * 9275 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp 9276 * will be incremented by 1 for holding the ndlp and the reference to ndlp 9277 * will be stored into the context1 field of the IOCB for the completion 9278 * callback function to the LOGO ELS command. 9279 * 9280 * Return codes 9281 * 0 - Successfully issued logo off the @vport 9282 * 1 - Failed to issue logo off the @vport 9283 **/ 9284 int 9285 lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 9286 { 9287 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 9288 struct lpfc_hba *phba = vport->phba; 9289 struct lpfc_iocbq *elsiocb; 9290 uint8_t *pcmd; 9291 uint16_t cmdsize; 9292 9293 cmdsize = 2 * sizeof(uint32_t) + sizeof(struct lpfc_name); 9294 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, ndlp->nlp_DID, 9295 ELS_CMD_LOGO); 9296 if (!elsiocb) 9297 return 1; 9298 9299 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 9300 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO; 9301 pcmd += sizeof(uint32_t); 9302 9303 /* Fill in LOGO payload */ 9304 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID); 9305 pcmd += sizeof(uint32_t); 9306 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name)); 9307 9308 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 9309 "Issue LOGO npiv did:x%x flg:x%x", 9310 ndlp->nlp_DID, ndlp->nlp_flag, 0); 9311 9312 elsiocb->iocb_cmpl = lpfc_cmpl_els_npiv_logo; 9313 spin_lock_irq(shost->host_lock); 9314 ndlp->nlp_flag |= NLP_LOGO_SND; 9315 spin_unlock_irq(shost->host_lock); 9316 if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == 9317 IOCB_ERROR) { 9318 spin_lock_irq(shost->host_lock); 9319 ndlp->nlp_flag &= ~NLP_LOGO_SND; 9320 spin_unlock_irq(shost->host_lock); 9321 lpfc_els_free_iocb(phba, elsiocb); 9322 return 1; 9323 } 9324 return 0; 9325 } 9326 9327 /** 9328 * lpfc_fabric_block_timeout - Handler function to the fabric block timer 9329 * @ptr: holder for the timer function associated data. 9330 * 9331 * This routine is invoked by the fabric iocb block timer after 9332 * timeout. It posts the fabric iocb block timeout event by setting the 9333 * WORKER_FABRIC_BLOCK_TMO bit to work port event bitmap and then invokes 9334 * lpfc_worker_wake_up() routine to wake up the worker thread. It is for 9335 * the worker thread to invoke the lpfc_unblock_fabric_iocbs() on the 9336 * posted event WORKER_FABRIC_BLOCK_TMO. 9337 **/ 9338 void 9339 lpfc_fabric_block_timeout(struct timer_list *t) 9340 { 9341 struct lpfc_hba *phba = from_timer(phba, t, fabric_block_timer); 9342 unsigned long iflags; 9343 uint32_t tmo_posted; 9344 9345 spin_lock_irqsave(&phba->pport->work_port_lock, iflags); 9346 tmo_posted = phba->pport->work_port_events & WORKER_FABRIC_BLOCK_TMO; 9347 if (!tmo_posted) 9348 phba->pport->work_port_events |= WORKER_FABRIC_BLOCK_TMO; 9349 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags); 9350 9351 if (!tmo_posted) 9352 lpfc_worker_wake_up(phba); 9353 return; 9354 } 9355 9356 /** 9357 * lpfc_resume_fabric_iocbs - Issue a fabric iocb from driver internal list 9358 * @phba: pointer to lpfc hba data structure. 9359 * 9360 * This routine issues one fabric iocb from the driver internal list to 9361 * the HBA. It first checks whether it's ready to issue one fabric iocb to 9362 * the HBA (whether there is no outstanding fabric iocb). If so, it shall 9363 * remove one pending fabric iocb from the driver internal list and invokes 9364 * lpfc_sli_issue_iocb() routine to send the fabric iocb to the HBA. 9365 **/ 9366 static void 9367 lpfc_resume_fabric_iocbs(struct lpfc_hba *phba) 9368 { 9369 struct lpfc_iocbq *iocb; 9370 unsigned long iflags; 9371 int ret; 9372 IOCB_t *cmd; 9373 9374 repeat: 9375 iocb = NULL; 9376 spin_lock_irqsave(&phba->hbalock, iflags); 9377 /* Post any pending iocb to the SLI layer */ 9378 if (atomic_read(&phba->fabric_iocb_count) == 0) { 9379 list_remove_head(&phba->fabric_iocb_list, iocb, typeof(*iocb), 9380 list); 9381 if (iocb) 9382 /* Increment fabric iocb count to hold the position */ 9383 atomic_inc(&phba->fabric_iocb_count); 9384 } 9385 spin_unlock_irqrestore(&phba->hbalock, iflags); 9386 if (iocb) { 9387 iocb->fabric_iocb_cmpl = iocb->iocb_cmpl; 9388 iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb; 9389 iocb->iocb_flag |= LPFC_IO_FABRIC; 9390 9391 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD, 9392 "Fabric sched1: ste:x%x", 9393 iocb->vport->port_state, 0, 0); 9394 9395 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0); 9396 9397 if (ret == IOCB_ERROR) { 9398 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl; 9399 iocb->fabric_iocb_cmpl = NULL; 9400 iocb->iocb_flag &= ~LPFC_IO_FABRIC; 9401 cmd = &iocb->iocb; 9402 cmd->ulpStatus = IOSTAT_LOCAL_REJECT; 9403 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED; 9404 iocb->iocb_cmpl(phba, iocb, iocb); 9405 9406 atomic_dec(&phba->fabric_iocb_count); 9407 goto repeat; 9408 } 9409 } 9410 9411 return; 9412 } 9413 9414 /** 9415 * lpfc_unblock_fabric_iocbs - Unblock issuing fabric iocb command 9416 * @phba: pointer to lpfc hba data structure. 9417 * 9418 * This routine unblocks the issuing fabric iocb command. The function 9419 * will clear the fabric iocb block bit and then invoke the routine 9420 * lpfc_resume_fabric_iocbs() to issue one of the pending fabric iocb 9421 * from the driver internal fabric iocb list. 9422 **/ 9423 void 9424 lpfc_unblock_fabric_iocbs(struct lpfc_hba *phba) 9425 { 9426 clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 9427 9428 lpfc_resume_fabric_iocbs(phba); 9429 return; 9430 } 9431 9432 /** 9433 * lpfc_block_fabric_iocbs - Block issuing fabric iocb command 9434 * @phba: pointer to lpfc hba data structure. 9435 * 9436 * This routine blocks the issuing fabric iocb for a specified amount of 9437 * time (currently 100 ms). This is done by set the fabric iocb block bit 9438 * and set up a timeout timer for 100ms. When the block bit is set, no more 9439 * fabric iocb will be issued out of the HBA. 9440 **/ 9441 static void 9442 lpfc_block_fabric_iocbs(struct lpfc_hba *phba) 9443 { 9444 int blocked; 9445 9446 blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 9447 /* Start a timer to unblock fabric iocbs after 100ms */ 9448 if (!blocked) 9449 mod_timer(&phba->fabric_block_timer, 9450 jiffies + msecs_to_jiffies(100)); 9451 9452 return; 9453 } 9454 9455 /** 9456 * lpfc_cmpl_fabric_iocb - Completion callback function for fabric iocb 9457 * @phba: pointer to lpfc hba data structure. 9458 * @cmdiocb: pointer to lpfc command iocb data structure. 9459 * @rspiocb: pointer to lpfc response iocb data structure. 9460 * 9461 * This routine is the callback function that is put to the fabric iocb's 9462 * callback function pointer (iocb->iocb_cmpl). The original iocb's callback 9463 * function pointer has been stored in iocb->fabric_iocb_cmpl. This callback 9464 * function first restores and invokes the original iocb's callback function 9465 * and then invokes the lpfc_resume_fabric_iocbs() routine to issue the next 9466 * fabric bound iocb from the driver internal fabric iocb list onto the wire. 9467 **/ 9468 static void 9469 lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 9470 struct lpfc_iocbq *rspiocb) 9471 { 9472 struct ls_rjt stat; 9473 9474 BUG_ON((cmdiocb->iocb_flag & LPFC_IO_FABRIC) != LPFC_IO_FABRIC); 9475 9476 switch (rspiocb->iocb.ulpStatus) { 9477 case IOSTAT_NPORT_RJT: 9478 case IOSTAT_FABRIC_RJT: 9479 if (rspiocb->iocb.un.ulpWord[4] & RJT_UNAVAIL_TEMP) { 9480 lpfc_block_fabric_iocbs(phba); 9481 } 9482 break; 9483 9484 case IOSTAT_NPORT_BSY: 9485 case IOSTAT_FABRIC_BSY: 9486 lpfc_block_fabric_iocbs(phba); 9487 break; 9488 9489 case IOSTAT_LS_RJT: 9490 stat.un.lsRjtError = 9491 be32_to_cpu(rspiocb->iocb.un.ulpWord[4]); 9492 if ((stat.un.b.lsRjtRsnCode == LSRJT_UNABLE_TPC) || 9493 (stat.un.b.lsRjtRsnCode == LSRJT_LOGICAL_BSY)) 9494 lpfc_block_fabric_iocbs(phba); 9495 break; 9496 } 9497 9498 BUG_ON(atomic_read(&phba->fabric_iocb_count) == 0); 9499 9500 cmdiocb->iocb_cmpl = cmdiocb->fabric_iocb_cmpl; 9501 cmdiocb->fabric_iocb_cmpl = NULL; 9502 cmdiocb->iocb_flag &= ~LPFC_IO_FABRIC; 9503 cmdiocb->iocb_cmpl(phba, cmdiocb, rspiocb); 9504 9505 atomic_dec(&phba->fabric_iocb_count); 9506 if (!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags)) { 9507 /* Post any pending iocbs to HBA */ 9508 lpfc_resume_fabric_iocbs(phba); 9509 } 9510 } 9511 9512 /** 9513 * lpfc_issue_fabric_iocb - Issue a fabric iocb command 9514 * @phba: pointer to lpfc hba data structure. 9515 * @iocb: pointer to lpfc command iocb data structure. 9516 * 9517 * This routine is used as the top-level API for issuing a fabric iocb command 9518 * such as FLOGI and FDISC. To accommodate certain switch fabric, this driver 9519 * function makes sure that only one fabric bound iocb will be outstanding at 9520 * any given time. As such, this function will first check to see whether there 9521 * is already an outstanding fabric iocb on the wire. If so, it will put the 9522 * newly issued iocb onto the driver internal fabric iocb list, waiting to be 9523 * issued later. Otherwise, it will issue the iocb on the wire and update the 9524 * fabric iocb count it indicate that there is one fabric iocb on the wire. 9525 * 9526 * Note, this implementation has a potential sending out fabric IOCBs out of 9527 * order. The problem is caused by the construction of the "ready" boolen does 9528 * not include the condition that the internal fabric IOCB list is empty. As 9529 * such, it is possible a fabric IOCB issued by this routine might be "jump" 9530 * ahead of the fabric IOCBs in the internal list. 9531 * 9532 * Return code 9533 * IOCB_SUCCESS - either fabric iocb put on the list or issued successfully 9534 * IOCB_ERROR - failed to issue fabric iocb 9535 **/ 9536 static int 9537 lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb) 9538 { 9539 unsigned long iflags; 9540 int ready; 9541 int ret; 9542 9543 BUG_ON(atomic_read(&phba->fabric_iocb_count) > 1); 9544 9545 spin_lock_irqsave(&phba->hbalock, iflags); 9546 ready = atomic_read(&phba->fabric_iocb_count) == 0 && 9547 !test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 9548 9549 if (ready) 9550 /* Increment fabric iocb count to hold the position */ 9551 atomic_inc(&phba->fabric_iocb_count); 9552 spin_unlock_irqrestore(&phba->hbalock, iflags); 9553 if (ready) { 9554 iocb->fabric_iocb_cmpl = iocb->iocb_cmpl; 9555 iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb; 9556 iocb->iocb_flag |= LPFC_IO_FABRIC; 9557 9558 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD, 9559 "Fabric sched2: ste:x%x", 9560 iocb->vport->port_state, 0, 0); 9561 9562 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0); 9563 9564 if (ret == IOCB_ERROR) { 9565 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl; 9566 iocb->fabric_iocb_cmpl = NULL; 9567 iocb->iocb_flag &= ~LPFC_IO_FABRIC; 9568 atomic_dec(&phba->fabric_iocb_count); 9569 } 9570 } else { 9571 spin_lock_irqsave(&phba->hbalock, iflags); 9572 list_add_tail(&iocb->list, &phba->fabric_iocb_list); 9573 spin_unlock_irqrestore(&phba->hbalock, iflags); 9574 ret = IOCB_SUCCESS; 9575 } 9576 return ret; 9577 } 9578 9579 /** 9580 * lpfc_fabric_abort_vport - Abort a vport's iocbs from driver fabric iocb list 9581 * @vport: pointer to a virtual N_Port data structure. 9582 * 9583 * This routine aborts all the IOCBs associated with a @vport from the 9584 * driver internal fabric IOCB list. The list contains fabric IOCBs to be 9585 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB 9586 * list, removes each IOCB associated with the @vport off the list, set the 9587 * status feild to IOSTAT_LOCAL_REJECT, and invokes the callback function 9588 * associated with the IOCB. 9589 **/ 9590 static void lpfc_fabric_abort_vport(struct lpfc_vport *vport) 9591 { 9592 LIST_HEAD(completions); 9593 struct lpfc_hba *phba = vport->phba; 9594 struct lpfc_iocbq *tmp_iocb, *piocb; 9595 9596 spin_lock_irq(&phba->hbalock); 9597 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list, 9598 list) { 9599 9600 if (piocb->vport != vport) 9601 continue; 9602 9603 list_move_tail(&piocb->list, &completions); 9604 } 9605 spin_unlock_irq(&phba->hbalock); 9606 9607 /* Cancel all the IOCBs from the completions list */ 9608 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 9609 IOERR_SLI_ABORTED); 9610 } 9611 9612 /** 9613 * lpfc_fabric_abort_nport - Abort a ndlp's iocbs from driver fabric iocb list 9614 * @ndlp: pointer to a node-list data structure. 9615 * 9616 * This routine aborts all the IOCBs associated with an @ndlp from the 9617 * driver internal fabric IOCB list. The list contains fabric IOCBs to be 9618 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB 9619 * list, removes each IOCB associated with the @ndlp off the list, set the 9620 * status feild to IOSTAT_LOCAL_REJECT, and invokes the callback function 9621 * associated with the IOCB. 9622 **/ 9623 void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp) 9624 { 9625 LIST_HEAD(completions); 9626 struct lpfc_hba *phba = ndlp->phba; 9627 struct lpfc_iocbq *tmp_iocb, *piocb; 9628 struct lpfc_sli_ring *pring; 9629 9630 pring = lpfc_phba_elsring(phba); 9631 9632 if (unlikely(!pring)) 9633 return; 9634 9635 spin_lock_irq(&phba->hbalock); 9636 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list, 9637 list) { 9638 if ((lpfc_check_sli_ndlp(phba, pring, piocb, ndlp))) { 9639 9640 list_move_tail(&piocb->list, &completions); 9641 } 9642 } 9643 spin_unlock_irq(&phba->hbalock); 9644 9645 /* Cancel all the IOCBs from the completions list */ 9646 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 9647 IOERR_SLI_ABORTED); 9648 } 9649 9650 /** 9651 * lpfc_fabric_abort_hba - Abort all iocbs on driver fabric iocb list 9652 * @phba: pointer to lpfc hba data structure. 9653 * 9654 * This routine aborts all the IOCBs currently on the driver internal 9655 * fabric IOCB list. The list contains fabric IOCBs to be issued to the ELS 9656 * IOCB ring. This function takes the entire IOCB list off the fabric IOCB 9657 * list, removes IOCBs off the list, set the status feild to 9658 * IOSTAT_LOCAL_REJECT, and invokes the callback function associated with 9659 * the IOCB. 9660 **/ 9661 void lpfc_fabric_abort_hba(struct lpfc_hba *phba) 9662 { 9663 LIST_HEAD(completions); 9664 9665 spin_lock_irq(&phba->hbalock); 9666 list_splice_init(&phba->fabric_iocb_list, &completions); 9667 spin_unlock_irq(&phba->hbalock); 9668 9669 /* Cancel all the IOCBs from the completions list */ 9670 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 9671 IOERR_SLI_ABORTED); 9672 } 9673 9674 /** 9675 * lpfc_sli4_vport_delete_els_xri_aborted -Remove all ndlp references for vport 9676 * @vport: pointer to lpfc vport data structure. 9677 * 9678 * This routine is invoked by the vport cleanup for deletions and the cleanup 9679 * for an ndlp on removal. 9680 **/ 9681 void 9682 lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *vport) 9683 { 9684 struct lpfc_hba *phba = vport->phba; 9685 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 9686 unsigned long iflag = 0; 9687 9688 spin_lock_irqsave(&phba->hbalock, iflag); 9689 spin_lock(&phba->sli4_hba.sgl_list_lock); 9690 list_for_each_entry_safe(sglq_entry, sglq_next, 9691 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) { 9692 if (sglq_entry->ndlp && sglq_entry->ndlp->vport == vport) 9693 sglq_entry->ndlp = NULL; 9694 } 9695 spin_unlock(&phba->sli4_hba.sgl_list_lock); 9696 spin_unlock_irqrestore(&phba->hbalock, iflag); 9697 return; 9698 } 9699 9700 /** 9701 * lpfc_sli4_els_xri_aborted - Slow-path process of els xri abort 9702 * @phba: pointer to lpfc hba data structure. 9703 * @axri: pointer to the els xri abort wcqe structure. 9704 * 9705 * This routine is invoked by the worker thread to process a SLI4 slow-path 9706 * ELS aborted xri. 9707 **/ 9708 void 9709 lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba, 9710 struct sli4_wcqe_xri_aborted *axri) 9711 { 9712 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); 9713 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri); 9714 uint16_t lxri = 0; 9715 9716 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 9717 unsigned long iflag = 0; 9718 struct lpfc_nodelist *ndlp; 9719 struct lpfc_sli_ring *pring; 9720 9721 pring = lpfc_phba_elsring(phba); 9722 9723 spin_lock_irqsave(&phba->hbalock, iflag); 9724 spin_lock(&phba->sli4_hba.sgl_list_lock); 9725 list_for_each_entry_safe(sglq_entry, sglq_next, 9726 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) { 9727 if (sglq_entry->sli4_xritag == xri) { 9728 list_del(&sglq_entry->list); 9729 ndlp = sglq_entry->ndlp; 9730 sglq_entry->ndlp = NULL; 9731 list_add_tail(&sglq_entry->list, 9732 &phba->sli4_hba.lpfc_els_sgl_list); 9733 sglq_entry->state = SGL_FREED; 9734 spin_unlock(&phba->sli4_hba.sgl_list_lock); 9735 spin_unlock_irqrestore(&phba->hbalock, iflag); 9736 lpfc_set_rrq_active(phba, ndlp, 9737 sglq_entry->sli4_lxritag, 9738 rxid, 1); 9739 9740 /* Check if TXQ queue needs to be serviced */ 9741 if (pring && !list_empty(&pring->txq)) 9742 lpfc_worker_wake_up(phba); 9743 return; 9744 } 9745 } 9746 spin_unlock(&phba->sli4_hba.sgl_list_lock); 9747 lxri = lpfc_sli4_xri_inrange(phba, xri); 9748 if (lxri == NO_XRI) { 9749 spin_unlock_irqrestore(&phba->hbalock, iflag); 9750 return; 9751 } 9752 spin_lock(&phba->sli4_hba.sgl_list_lock); 9753 sglq_entry = __lpfc_get_active_sglq(phba, lxri); 9754 if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) { 9755 spin_unlock(&phba->sli4_hba.sgl_list_lock); 9756 spin_unlock_irqrestore(&phba->hbalock, iflag); 9757 return; 9758 } 9759 sglq_entry->state = SGL_XRI_ABORTED; 9760 spin_unlock(&phba->sli4_hba.sgl_list_lock); 9761 spin_unlock_irqrestore(&phba->hbalock, iflag); 9762 return; 9763 } 9764 9765 /* lpfc_sli_abts_recover_port - Recover a port that failed a BLS_ABORT req. 9766 * @vport: pointer to virtual port object. 9767 * @ndlp: nodelist pointer for the impacted node. 9768 * 9769 * The driver calls this routine in response to an SLI4 XRI ABORT CQE 9770 * or an SLI3 ASYNC_STATUS_CN event from the port. For either event, 9771 * the driver is required to send a LOGO to the remote node before it 9772 * attempts to recover its login to the remote node. 9773 */ 9774 void 9775 lpfc_sli_abts_recover_port(struct lpfc_vport *vport, 9776 struct lpfc_nodelist *ndlp) 9777 { 9778 struct Scsi_Host *shost; 9779 struct lpfc_hba *phba; 9780 unsigned long flags = 0; 9781 9782 shost = lpfc_shost_from_vport(vport); 9783 phba = vport->phba; 9784 if (ndlp->nlp_state != NLP_STE_MAPPED_NODE) { 9785 lpfc_printf_log(phba, KERN_INFO, 9786 LOG_SLI, "3093 No rport recovery needed. " 9787 "rport in state 0x%x\n", ndlp->nlp_state); 9788 return; 9789 } 9790 lpfc_printf_log(phba, KERN_ERR, 9791 LOG_ELS | LOG_FCP_ERROR | LOG_NVME_IOERR, 9792 "3094 Start rport recovery on shost id 0x%x " 9793 "fc_id 0x%06x vpi 0x%x rpi 0x%x state 0x%x " 9794 "flags 0x%x\n", 9795 shost->host_no, ndlp->nlp_DID, 9796 vport->vpi, ndlp->nlp_rpi, ndlp->nlp_state, 9797 ndlp->nlp_flag); 9798 /* 9799 * The rport is not responding. Remove the FCP-2 flag to prevent 9800 * an ADISC in the follow-up recovery code. 9801 */ 9802 spin_lock_irqsave(shost->host_lock, flags); 9803 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; 9804 ndlp->nlp_flag |= NLP_ISSUE_LOGO; 9805 spin_unlock_irqrestore(shost->host_lock, flags); 9806 lpfc_unreg_rpi(vport, ndlp); 9807 } 9808 9809