1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term * 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 * EMULEX and SLI are trademarks of Emulex. * 8 * www.broadcom.com * 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 10 * * 11 * This program is free software; you can redistribute it and/or * 12 * modify it under the terms of version 2 of the GNU General * 13 * Public License as published by the Free Software Foundation. * 14 * This program is distributed in the hope that it will be useful. * 15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 19 * TO BE LEGALLY INVALID. See the GNU General Public License for * 20 * more details, a copy of which can be found in the file COPYING * 21 * included with this package. * 22 *******************************************************************/ 23 /* See Fibre Channel protocol T11 FC-LS for details */ 24 #include <linux/blkdev.h> 25 #include <linux/pci.h> 26 #include <linux/slab.h> 27 #include <linux/interrupt.h> 28 29 #include <scsi/scsi.h> 30 #include <scsi/scsi_device.h> 31 #include <scsi/scsi_host.h> 32 #include <scsi/scsi_transport_fc.h> 33 #include <uapi/scsi/fc/fc_fs.h> 34 #include <uapi/scsi/fc/fc_els.h> 35 36 #include "lpfc_hw4.h" 37 #include "lpfc_hw.h" 38 #include "lpfc_sli.h" 39 #include "lpfc_sli4.h" 40 #include "lpfc_nl.h" 41 #include "lpfc_disc.h" 42 #include "lpfc_scsi.h" 43 #include "lpfc.h" 44 #include "lpfc_logmsg.h" 45 #include "lpfc_crtn.h" 46 #include "lpfc_vport.h" 47 #include "lpfc_debugfs.h" 48 49 static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *, 50 struct lpfc_iocbq *); 51 static void lpfc_cmpl_fabric_iocb(struct lpfc_hba *, struct lpfc_iocbq *, 52 struct lpfc_iocbq *); 53 static void lpfc_fabric_abort_vport(struct lpfc_vport *vport); 54 static int lpfc_issue_els_fdisc(struct lpfc_vport *vport, 55 struct lpfc_nodelist *ndlp, uint8_t retry); 56 static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba, 57 struct lpfc_iocbq *iocb); 58 59 static int lpfc_max_els_tries = 3; 60 61 /** 62 * lpfc_els_chk_latt - Check host link attention event for a vport 63 * @vport: pointer to a host virtual N_Port data structure. 64 * 65 * This routine checks whether there is an outstanding host link 66 * attention event during the discovery process with the @vport. It is done 67 * by reading the HBA's Host Attention (HA) register. If there is any host 68 * link attention events during this @vport's discovery process, the @vport 69 * shall be marked as FC_ABORT_DISCOVERY, a host link attention clear shall 70 * be issued if the link state is not already in host link cleared state, 71 * and a return code shall indicate whether the host link attention event 72 * had happened. 73 * 74 * Note that, if either the host link is in state LPFC_LINK_DOWN or @vport 75 * state in LPFC_VPORT_READY, the request for checking host link attention 76 * event will be ignored and a return code shall indicate no host link 77 * attention event had happened. 78 * 79 * Return codes 80 * 0 - no host link attention event happened 81 * 1 - host link attention event happened 82 **/ 83 int 84 lpfc_els_chk_latt(struct lpfc_vport *vport) 85 { 86 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 87 struct lpfc_hba *phba = vport->phba; 88 uint32_t ha_copy; 89 90 if (vport->port_state >= LPFC_VPORT_READY || 91 phba->link_state == LPFC_LINK_DOWN || 92 phba->sli_rev > LPFC_SLI_REV3) 93 return 0; 94 95 /* Read the HBA Host Attention Register */ 96 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 97 return 1; 98 99 if (!(ha_copy & HA_LATT)) 100 return 0; 101 102 /* Pending Link Event during Discovery */ 103 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 104 "0237 Pending Link Event during " 105 "Discovery: State x%x\n", 106 phba->pport->port_state); 107 108 /* CLEAR_LA should re-enable link attention events and 109 * we should then immediately take a LATT event. The 110 * LATT processing should call lpfc_linkdown() which 111 * will cleanup any left over in-progress discovery 112 * events. 113 */ 114 spin_lock_irq(shost->host_lock); 115 vport->fc_flag |= FC_ABORT_DISCOVERY; 116 spin_unlock_irq(shost->host_lock); 117 118 if (phba->link_state != LPFC_CLEAR_LA) 119 lpfc_issue_clear_la(phba, vport); 120 121 return 1; 122 } 123 124 /** 125 * lpfc_prep_els_iocb - Allocate and prepare a lpfc iocb data structure 126 * @vport: pointer to a host virtual N_Port data structure. 127 * @expectRsp: flag indicating whether response is expected. 128 * @cmdSize: size of the ELS command. 129 * @retry: number of retries to the command IOCB when it fails. 130 * @ndlp: pointer to a node-list data structure. 131 * @did: destination identifier. 132 * @elscmd: the ELS command code. 133 * 134 * This routine is used for allocating a lpfc-IOCB data structure from 135 * the driver lpfc-IOCB free-list and prepare the IOCB with the parameters 136 * passed into the routine for discovery state machine to issue an Extended 137 * Link Service (ELS) commands. It is a generic lpfc-IOCB allocation 138 * and preparation routine that is used by all the discovery state machine 139 * routines and the ELS command-specific fields will be later set up by 140 * the individual discovery machine routines after calling this routine 141 * allocating and preparing a generic IOCB data structure. It fills in the 142 * Buffer Descriptor Entries (BDEs), allocates buffers for both command 143 * payload and response payload (if expected). The reference count on the 144 * ndlp is incremented by 1 and the reference to the ndlp is put into 145 * context1 of the IOCB data structure for this IOCB to hold the ndlp 146 * reference for the command's callback function to access later. 147 * 148 * Return code 149 * Pointer to the newly allocated/prepared els iocb data structure 150 * NULL - when els iocb data structure allocation/preparation failed 151 **/ 152 struct lpfc_iocbq * 153 lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp, 154 uint16_t cmdSize, uint8_t retry, 155 struct lpfc_nodelist *ndlp, uint32_t did, 156 uint32_t elscmd) 157 { 158 struct lpfc_hba *phba = vport->phba; 159 struct lpfc_iocbq *elsiocb; 160 struct lpfc_dmabuf *pcmd, *prsp, *pbuflist; 161 struct ulp_bde64 *bpl; 162 IOCB_t *icmd; 163 164 165 if (!lpfc_is_link_up(phba)) 166 return NULL; 167 168 /* Allocate buffer for command iocb */ 169 elsiocb = lpfc_sli_get_iocbq(phba); 170 171 if (elsiocb == NULL) 172 return NULL; 173 174 /* 175 * If this command is for fabric controller and HBA running 176 * in FIP mode send FLOGI, FDISC and LOGO as FIP frames. 177 */ 178 if ((did == Fabric_DID) && 179 (phba->hba_flag & HBA_FIP_SUPPORT) && 180 ((elscmd == ELS_CMD_FLOGI) || 181 (elscmd == ELS_CMD_FDISC) || 182 (elscmd == ELS_CMD_LOGO))) 183 switch (elscmd) { 184 case ELS_CMD_FLOGI: 185 elsiocb->iocb_flag |= 186 ((LPFC_ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT) 187 & LPFC_FIP_ELS_ID_MASK); 188 break; 189 case ELS_CMD_FDISC: 190 elsiocb->iocb_flag |= 191 ((LPFC_ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT) 192 & LPFC_FIP_ELS_ID_MASK); 193 break; 194 case ELS_CMD_LOGO: 195 elsiocb->iocb_flag |= 196 ((LPFC_ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT) 197 & LPFC_FIP_ELS_ID_MASK); 198 break; 199 } 200 else 201 elsiocb->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK; 202 203 icmd = &elsiocb->iocb; 204 205 /* fill in BDEs for command */ 206 /* Allocate buffer for command payload */ 207 pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 208 if (pcmd) 209 pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys); 210 if (!pcmd || !pcmd->virt) 211 goto els_iocb_free_pcmb_exit; 212 213 INIT_LIST_HEAD(&pcmd->list); 214 215 /* Allocate buffer for response payload */ 216 if (expectRsp) { 217 prsp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 218 if (prsp) 219 prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 220 &prsp->phys); 221 if (!prsp || !prsp->virt) 222 goto els_iocb_free_prsp_exit; 223 INIT_LIST_HEAD(&prsp->list); 224 } else 225 prsp = NULL; 226 227 /* Allocate buffer for Buffer ptr list */ 228 pbuflist = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 229 if (pbuflist) 230 pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI, 231 &pbuflist->phys); 232 if (!pbuflist || !pbuflist->virt) 233 goto els_iocb_free_pbuf_exit; 234 235 INIT_LIST_HEAD(&pbuflist->list); 236 237 if (expectRsp) { 238 icmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys); 239 icmd->un.elsreq64.bdl.addrLow = putPaddrLow(pbuflist->phys); 240 icmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; 241 icmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64)); 242 243 icmd->un.elsreq64.remoteID = did; /* DID */ 244 icmd->ulpCommand = CMD_ELS_REQUEST64_CR; 245 if (elscmd == ELS_CMD_FLOGI) 246 icmd->ulpTimeout = FF_DEF_RATOV * 2; 247 else if (elscmd == ELS_CMD_LOGO) 248 icmd->ulpTimeout = phba->fc_ratov; 249 else 250 icmd->ulpTimeout = phba->fc_ratov * 2; 251 } else { 252 icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys); 253 icmd->un.xseq64.bdl.addrLow = putPaddrLow(pbuflist->phys); 254 icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; 255 icmd->un.xseq64.bdl.bdeSize = sizeof(struct ulp_bde64); 256 icmd->un.xseq64.xmit_els_remoteID = did; /* DID */ 257 icmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX; 258 } 259 icmd->ulpBdeCount = 1; 260 icmd->ulpLe = 1; 261 icmd->ulpClass = CLASS3; 262 263 /* 264 * If we have NPIV enabled, we want to send ELS traffic by VPI. 265 * For SLI4, since the driver controls VPIs we also want to include 266 * all ELS pt2pt protocol traffic as well. 267 */ 268 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) || 269 ((phba->sli_rev == LPFC_SLI_REV4) && 270 (vport->fc_flag & FC_PT2PT))) { 271 272 if (expectRsp) { 273 icmd->un.elsreq64.myID = vport->fc_myDID; 274 275 /* For ELS_REQUEST64_CR, use the VPI by default */ 276 icmd->ulpContext = phba->vpi_ids[vport->vpi]; 277 } 278 279 icmd->ulpCt_h = 0; 280 /* The CT field must be 0=INVALID_RPI for the ECHO cmd */ 281 if (elscmd == ELS_CMD_ECHO) 282 icmd->ulpCt_l = 0; /* context = invalid RPI */ 283 else 284 icmd->ulpCt_l = 1; /* context = VPI */ 285 } 286 287 bpl = (struct ulp_bde64 *) pbuflist->virt; 288 bpl->addrLow = le32_to_cpu(putPaddrLow(pcmd->phys)); 289 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pcmd->phys)); 290 bpl->tus.f.bdeSize = cmdSize; 291 bpl->tus.f.bdeFlags = 0; 292 bpl->tus.w = le32_to_cpu(bpl->tus.w); 293 294 if (expectRsp) { 295 bpl++; 296 bpl->addrLow = le32_to_cpu(putPaddrLow(prsp->phys)); 297 bpl->addrHigh = le32_to_cpu(putPaddrHigh(prsp->phys)); 298 bpl->tus.f.bdeSize = FCELSSIZE; 299 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; 300 bpl->tus.w = le32_to_cpu(bpl->tus.w); 301 } 302 303 elsiocb->context2 = pcmd; 304 elsiocb->context3 = pbuflist; 305 elsiocb->retry = retry; 306 elsiocb->vport = vport; 307 elsiocb->drvrTimeout = (phba->fc_ratov << 1) + LPFC_DRVR_TIMEOUT; 308 309 if (prsp) { 310 list_add(&prsp->list, &pcmd->list); 311 } 312 if (expectRsp) { 313 /* Xmit ELS command <elsCmd> to remote NPORT <did> */ 314 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 315 "0116 Xmit ELS command x%x to remote " 316 "NPORT x%x I/O tag: x%x, port state:x%x " 317 "rpi x%x fc_flag:x%x\n", 318 elscmd, did, elsiocb->iotag, 319 vport->port_state, ndlp->nlp_rpi, 320 vport->fc_flag); 321 } else { 322 /* Xmit ELS response <elsCmd> to remote NPORT <did> */ 323 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 324 "0117 Xmit ELS response x%x to remote " 325 "NPORT x%x I/O tag: x%x, size: x%x " 326 "port_state x%x rpi x%x fc_flag x%x\n", 327 elscmd, ndlp->nlp_DID, elsiocb->iotag, 328 cmdSize, vport->port_state, 329 ndlp->nlp_rpi, vport->fc_flag); 330 } 331 return elsiocb; 332 333 els_iocb_free_pbuf_exit: 334 if (expectRsp) 335 lpfc_mbuf_free(phba, prsp->virt, prsp->phys); 336 kfree(pbuflist); 337 338 els_iocb_free_prsp_exit: 339 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys); 340 kfree(prsp); 341 342 els_iocb_free_pcmb_exit: 343 kfree(pcmd); 344 lpfc_sli_release_iocbq(phba, elsiocb); 345 return NULL; 346 } 347 348 /** 349 * lpfc_issue_fabric_reglogin - Issue fabric registration login for a vport 350 * @vport: pointer to a host virtual N_Port data structure. 351 * 352 * This routine issues a fabric registration login for a @vport. An 353 * active ndlp node with Fabric_DID must already exist for this @vport. 354 * The routine invokes two mailbox commands to carry out fabric registration 355 * login through the HBA firmware: the first mailbox command requests the 356 * HBA to perform link configuration for the @vport; and the second mailbox 357 * command requests the HBA to perform the actual fabric registration login 358 * with the @vport. 359 * 360 * Return code 361 * 0 - successfully issued fabric registration login for @vport 362 * -ENXIO -- failed to issue fabric registration login for @vport 363 **/ 364 int 365 lpfc_issue_fabric_reglogin(struct lpfc_vport *vport) 366 { 367 struct lpfc_hba *phba = vport->phba; 368 LPFC_MBOXQ_t *mbox; 369 struct lpfc_dmabuf *mp; 370 struct lpfc_nodelist *ndlp; 371 struct serv_parm *sp; 372 int rc; 373 int err = 0; 374 375 sp = &phba->fc_fabparam; 376 ndlp = lpfc_findnode_did(vport, Fabric_DID); 377 if (!ndlp) { 378 err = 1; 379 goto fail; 380 } 381 382 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 383 if (!mbox) { 384 err = 2; 385 goto fail; 386 } 387 388 vport->port_state = LPFC_FABRIC_CFG_LINK; 389 lpfc_config_link(phba, mbox); 390 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 391 mbox->vport = vport; 392 393 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 394 if (rc == MBX_NOT_FINISHED) { 395 err = 3; 396 goto fail_free_mbox; 397 } 398 399 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 400 if (!mbox) { 401 err = 4; 402 goto fail; 403 } 404 rc = lpfc_reg_rpi(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox, 405 ndlp->nlp_rpi); 406 if (rc) { 407 err = 5; 408 goto fail_free_mbox; 409 } 410 411 mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login; 412 mbox->vport = vport; 413 /* increment the reference count on ndlp to hold reference 414 * for the callback routine. 415 */ 416 mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 417 if (!mbox->ctx_ndlp) { 418 err = 6; 419 goto fail_no_ndlp; 420 } 421 422 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 423 if (rc == MBX_NOT_FINISHED) { 424 err = 7; 425 goto fail_issue_reg_login; 426 } 427 428 return 0; 429 430 fail_issue_reg_login: 431 /* decrement the reference count on ndlp just incremented 432 * for the failed mbox command. 433 */ 434 lpfc_nlp_put(ndlp); 435 fail_no_ndlp: 436 mp = (struct lpfc_dmabuf *)mbox->ctx_buf; 437 lpfc_mbuf_free(phba, mp->virt, mp->phys); 438 kfree(mp); 439 fail_free_mbox: 440 mempool_free(mbox, phba->mbox_mem_pool); 441 442 fail: 443 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 444 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 445 "0249 Cannot issue Register Fabric login: Err %d\n", 446 err); 447 return -ENXIO; 448 } 449 450 /** 451 * lpfc_issue_reg_vfi - Register VFI for this vport's fabric login 452 * @vport: pointer to a host virtual N_Port data structure. 453 * 454 * This routine issues a REG_VFI mailbox for the vfi, vpi, fcfi triplet for 455 * the @vport. This mailbox command is necessary for SLI4 port only. 456 * 457 * Return code 458 * 0 - successfully issued REG_VFI for @vport 459 * A failure code otherwise. 460 **/ 461 int 462 lpfc_issue_reg_vfi(struct lpfc_vport *vport) 463 { 464 struct lpfc_hba *phba = vport->phba; 465 LPFC_MBOXQ_t *mboxq = NULL; 466 struct lpfc_nodelist *ndlp; 467 struct lpfc_dmabuf *dmabuf = NULL; 468 int rc = 0; 469 470 /* move forward in case of SLI4 FC port loopback test and pt2pt mode */ 471 if ((phba->sli_rev == LPFC_SLI_REV4) && 472 !(phba->link_flag & LS_LOOPBACK_MODE) && 473 !(vport->fc_flag & FC_PT2PT)) { 474 ndlp = lpfc_findnode_did(vport, Fabric_DID); 475 if (!ndlp) { 476 rc = -ENODEV; 477 goto fail; 478 } 479 } 480 481 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 482 if (!mboxq) { 483 rc = -ENOMEM; 484 goto fail; 485 } 486 487 /* Supply CSP's only if we are fabric connect or pt-to-pt connect */ 488 if ((vport->fc_flag & FC_FABRIC) || (vport->fc_flag & FC_PT2PT)) { 489 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 490 if (!dmabuf) { 491 rc = -ENOMEM; 492 goto fail; 493 } 494 dmabuf->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &dmabuf->phys); 495 if (!dmabuf->virt) { 496 rc = -ENOMEM; 497 goto fail; 498 } 499 memcpy(dmabuf->virt, &phba->fc_fabparam, 500 sizeof(struct serv_parm)); 501 } 502 503 vport->port_state = LPFC_FABRIC_CFG_LINK; 504 if (dmabuf) 505 lpfc_reg_vfi(mboxq, vport, dmabuf->phys); 506 else 507 lpfc_reg_vfi(mboxq, vport, 0); 508 509 mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi; 510 mboxq->vport = vport; 511 mboxq->ctx_buf = dmabuf; 512 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 513 if (rc == MBX_NOT_FINISHED) { 514 rc = -ENXIO; 515 goto fail; 516 } 517 return 0; 518 519 fail: 520 if (mboxq) 521 mempool_free(mboxq, phba->mbox_mem_pool); 522 if (dmabuf) { 523 if (dmabuf->virt) 524 lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys); 525 kfree(dmabuf); 526 } 527 528 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 529 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 530 "0289 Issue Register VFI failed: Err %d\n", rc); 531 return rc; 532 } 533 534 /** 535 * lpfc_issue_unreg_vfi - Unregister VFI for this vport's fabric login 536 * @vport: pointer to a host virtual N_Port data structure. 537 * 538 * This routine issues a UNREG_VFI mailbox with the vfi, vpi, fcfi triplet for 539 * the @vport. This mailbox command is necessary for SLI4 port only. 540 * 541 * Return code 542 * 0 - successfully issued REG_VFI for @vport 543 * A failure code otherwise. 544 **/ 545 int 546 lpfc_issue_unreg_vfi(struct lpfc_vport *vport) 547 { 548 struct lpfc_hba *phba = vport->phba; 549 struct Scsi_Host *shost; 550 LPFC_MBOXQ_t *mboxq; 551 int rc; 552 553 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 554 if (!mboxq) { 555 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 556 "2556 UNREG_VFI mbox allocation failed" 557 "HBA state x%x\n", phba->pport->port_state); 558 return -ENOMEM; 559 } 560 561 lpfc_unreg_vfi(mboxq, vport); 562 mboxq->vport = vport; 563 mboxq->mbox_cmpl = lpfc_unregister_vfi_cmpl; 564 565 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 566 if (rc == MBX_NOT_FINISHED) { 567 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 568 "2557 UNREG_VFI issue mbox failed rc x%x " 569 "HBA state x%x\n", 570 rc, phba->pport->port_state); 571 mempool_free(mboxq, phba->mbox_mem_pool); 572 return -EIO; 573 } 574 575 shost = lpfc_shost_from_vport(vport); 576 spin_lock_irq(shost->host_lock); 577 vport->fc_flag &= ~FC_VFI_REGISTERED; 578 spin_unlock_irq(shost->host_lock); 579 return 0; 580 } 581 582 /** 583 * lpfc_check_clean_addr_bit - Check whether assigned FCID is clean. 584 * @vport: pointer to a host virtual N_Port data structure. 585 * @sp: pointer to service parameter data structure. 586 * 587 * This routine is called from FLOGI/FDISC completion handler functions. 588 * lpfc_check_clean_addr_bit return 1 when FCID/Fabric portname/ Fabric 589 * node nodename is changed in the completion service parameter else return 590 * 0. This function also set flag in the vport data structure to delay 591 * NP_Port discovery after the FLOGI/FDISC completion if Clean address bit 592 * in FLOGI/FDISC response is cleared and FCID/Fabric portname/ Fabric 593 * node nodename is changed in the completion service parameter. 594 * 595 * Return code 596 * 0 - FCID and Fabric Nodename and Fabric portname is not changed. 597 * 1 - FCID or Fabric Nodename or Fabric portname is changed. 598 * 599 **/ 600 static uint8_t 601 lpfc_check_clean_addr_bit(struct lpfc_vport *vport, 602 struct serv_parm *sp) 603 { 604 struct lpfc_hba *phba = vport->phba; 605 uint8_t fabric_param_changed = 0; 606 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 607 608 if ((vport->fc_prevDID != vport->fc_myDID) || 609 memcmp(&vport->fabric_portname, &sp->portName, 610 sizeof(struct lpfc_name)) || 611 memcmp(&vport->fabric_nodename, &sp->nodeName, 612 sizeof(struct lpfc_name)) || 613 (vport->vport_flag & FAWWPN_PARAM_CHG)) { 614 fabric_param_changed = 1; 615 vport->vport_flag &= ~FAWWPN_PARAM_CHG; 616 } 617 /* 618 * Word 1 Bit 31 in common service parameter is overloaded. 619 * Word 1 Bit 31 in FLOGI request is multiple NPort request 620 * Word 1 Bit 31 in FLOGI response is clean address bit 621 * 622 * If fabric parameter is changed and clean address bit is 623 * cleared delay nport discovery if 624 * - vport->fc_prevDID != 0 (not initial discovery) OR 625 * - lpfc_delay_discovery module parameter is set. 626 */ 627 if (fabric_param_changed && !sp->cmn.clean_address_bit && 628 (vport->fc_prevDID || phba->cfg_delay_discovery)) { 629 spin_lock_irq(shost->host_lock); 630 vport->fc_flag |= FC_DISC_DELAYED; 631 spin_unlock_irq(shost->host_lock); 632 } 633 634 return fabric_param_changed; 635 } 636 637 638 /** 639 * lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port 640 * @vport: pointer to a host virtual N_Port data structure. 641 * @ndlp: pointer to a node-list data structure. 642 * @sp: pointer to service parameter data structure. 643 * @irsp: pointer to the IOCB within the lpfc response IOCB. 644 * 645 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback 646 * function to handle the completion of a Fabric Login (FLOGI) into a fabric 647 * port in a fabric topology. It properly sets up the parameters to the @ndlp 648 * from the IOCB response. It also check the newly assigned N_Port ID to the 649 * @vport against the previously assigned N_Port ID. If it is different from 650 * the previously assigned Destination ID (DID), the lpfc_unreg_rpi() routine 651 * is invoked on all the remaining nodes with the @vport to unregister the 652 * Remote Port Indicators (RPIs). Finally, the lpfc_issue_fabric_reglogin() 653 * is invoked to register login to the fabric. 654 * 655 * Return code 656 * 0 - Success (currently, always return 0) 657 **/ 658 static int 659 lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 660 struct serv_parm *sp, IOCB_t *irsp) 661 { 662 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 663 struct lpfc_hba *phba = vport->phba; 664 struct lpfc_nodelist *np; 665 struct lpfc_nodelist *next_np; 666 uint8_t fabric_param_changed; 667 668 spin_lock_irq(shost->host_lock); 669 vport->fc_flag |= FC_FABRIC; 670 spin_unlock_irq(shost->host_lock); 671 672 phba->fc_edtov = be32_to_cpu(sp->cmn.e_d_tov); 673 if (sp->cmn.edtovResolution) /* E_D_TOV ticks are in nanoseconds */ 674 phba->fc_edtov = (phba->fc_edtov + 999999) / 1000000; 675 676 phba->fc_edtovResol = sp->cmn.edtovResolution; 677 phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000; 678 679 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 680 spin_lock_irq(shost->host_lock); 681 vport->fc_flag |= FC_PUBLIC_LOOP; 682 spin_unlock_irq(shost->host_lock); 683 } 684 685 vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID; 686 memcpy(&ndlp->nlp_portname, &sp->portName, sizeof(struct lpfc_name)); 687 memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof(struct lpfc_name)); 688 ndlp->nlp_class_sup = 0; 689 if (sp->cls1.classValid) 690 ndlp->nlp_class_sup |= FC_COS_CLASS1; 691 if (sp->cls2.classValid) 692 ndlp->nlp_class_sup |= FC_COS_CLASS2; 693 if (sp->cls3.classValid) 694 ndlp->nlp_class_sup |= FC_COS_CLASS3; 695 if (sp->cls4.classValid) 696 ndlp->nlp_class_sup |= FC_COS_CLASS4; 697 ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | 698 sp->cmn.bbRcvSizeLsb; 699 700 fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp); 701 if (fabric_param_changed) { 702 /* Reset FDMI attribute masks based on config parameter */ 703 if (phba->cfg_enable_SmartSAN || 704 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) { 705 /* Setup appropriate attribute masks */ 706 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR; 707 if (phba->cfg_enable_SmartSAN) 708 vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR; 709 else 710 vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR; 711 } else { 712 vport->fdmi_hba_mask = 0; 713 vport->fdmi_port_mask = 0; 714 } 715 716 } 717 memcpy(&vport->fabric_portname, &sp->portName, 718 sizeof(struct lpfc_name)); 719 memcpy(&vport->fabric_nodename, &sp->nodeName, 720 sizeof(struct lpfc_name)); 721 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm)); 722 723 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { 724 if (sp->cmn.response_multiple_NPort) { 725 lpfc_printf_vlog(vport, KERN_WARNING, 726 LOG_ELS | LOG_VPORT, 727 "1816 FLOGI NPIV supported, " 728 "response data 0x%x\n", 729 sp->cmn.response_multiple_NPort); 730 spin_lock_irq(&phba->hbalock); 731 phba->link_flag |= LS_NPIV_FAB_SUPPORTED; 732 spin_unlock_irq(&phba->hbalock); 733 } else { 734 /* Because we asked f/w for NPIV it still expects us 735 to call reg_vnpid at least for the physical host */ 736 lpfc_printf_vlog(vport, KERN_WARNING, 737 LOG_ELS | LOG_VPORT, 738 "1817 Fabric does not support NPIV " 739 "- configuring single port mode.\n"); 740 spin_lock_irq(&phba->hbalock); 741 phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED; 742 spin_unlock_irq(&phba->hbalock); 743 } 744 } 745 746 /* 747 * For FC we need to do some special processing because of the SLI 748 * Port's default settings of the Common Service Parameters. 749 */ 750 if ((phba->sli_rev == LPFC_SLI_REV4) && 751 (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC)) { 752 /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */ 753 if (fabric_param_changed) 754 lpfc_unregister_fcf_prep(phba); 755 756 /* This should just update the VFI CSPs*/ 757 if (vport->fc_flag & FC_VFI_REGISTERED) 758 lpfc_issue_reg_vfi(vport); 759 } 760 761 if (fabric_param_changed && 762 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 763 764 /* If our NportID changed, we need to ensure all 765 * remaining NPORTs get unreg_login'ed. 766 */ 767 list_for_each_entry_safe(np, next_np, 768 &vport->fc_nodes, nlp_listp) { 769 if ((np->nlp_state != NLP_STE_NPR_NODE) || 770 !(np->nlp_flag & NLP_NPR_ADISC)) 771 continue; 772 spin_lock_irq(&np->lock); 773 np->nlp_flag &= ~NLP_NPR_ADISC; 774 spin_unlock_irq(&np->lock); 775 lpfc_unreg_rpi(vport, np); 776 } 777 lpfc_cleanup_pending_mbox(vport); 778 779 if (phba->sli_rev == LPFC_SLI_REV4) { 780 lpfc_sli4_unreg_all_rpis(vport); 781 lpfc_mbx_unreg_vpi(vport); 782 spin_lock_irq(shost->host_lock); 783 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 784 spin_unlock_irq(shost->host_lock); 785 } 786 787 /* 788 * For SLI3 and SLI4, the VPI needs to be reregistered in 789 * response to this fabric parameter change event. 790 */ 791 spin_lock_irq(shost->host_lock); 792 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 793 spin_unlock_irq(shost->host_lock); 794 } else if ((phba->sli_rev == LPFC_SLI_REV4) && 795 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 796 /* 797 * Driver needs to re-reg VPI in order for f/w 798 * to update the MAC address. 799 */ 800 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 801 lpfc_register_new_vport(phba, vport, ndlp); 802 return 0; 803 } 804 805 if (phba->sli_rev < LPFC_SLI_REV4) { 806 lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE); 807 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED && 808 vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) 809 lpfc_register_new_vport(phba, vport, ndlp); 810 else 811 lpfc_issue_fabric_reglogin(vport); 812 } else { 813 ndlp->nlp_type |= NLP_FABRIC; 814 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 815 if ((!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) && 816 (vport->vpi_state & LPFC_VPI_REGISTERED)) { 817 lpfc_start_fdiscs(phba); 818 lpfc_do_scr_ns_plogi(phba, vport); 819 } else if (vport->fc_flag & FC_VFI_REGISTERED) 820 lpfc_issue_init_vpi(vport); 821 else { 822 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 823 "3135 Need register VFI: (x%x/%x)\n", 824 vport->fc_prevDID, vport->fc_myDID); 825 lpfc_issue_reg_vfi(vport); 826 } 827 } 828 return 0; 829 } 830 831 /** 832 * lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port 833 * @vport: pointer to a host virtual N_Port data structure. 834 * @ndlp: pointer to a node-list data structure. 835 * @sp: pointer to service parameter data structure. 836 * 837 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback 838 * function to handle the completion of a Fabric Login (FLOGI) into an N_Port 839 * in a point-to-point topology. First, the @vport's N_Port Name is compared 840 * with the received N_Port Name: if the @vport's N_Port Name is greater than 841 * the received N_Port Name lexicographically, this node shall assign local 842 * N_Port ID (PT2PT_LocalID: 1) and remote N_Port ID (PT2PT_RemoteID: 2) and 843 * will send out Port Login (PLOGI) with the N_Port IDs assigned. Otherwise, 844 * this node shall just wait for the remote node to issue PLOGI and assign 845 * N_Port IDs. 846 * 847 * Return code 848 * 0 - Success 849 * -ENXIO - Fail 850 **/ 851 static int 852 lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 853 struct serv_parm *sp) 854 { 855 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 856 struct lpfc_hba *phba = vport->phba; 857 LPFC_MBOXQ_t *mbox; 858 int rc; 859 860 spin_lock_irq(shost->host_lock); 861 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 862 vport->fc_flag |= FC_PT2PT; 863 spin_unlock_irq(shost->host_lock); 864 865 /* If we are pt2pt with another NPort, force NPIV off! */ 866 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED; 867 868 /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */ 869 if ((phba->sli_rev == LPFC_SLI_REV4) && phba->fc_topology_changed) { 870 lpfc_unregister_fcf_prep(phba); 871 872 spin_lock_irq(shost->host_lock); 873 vport->fc_flag &= ~FC_VFI_REGISTERED; 874 spin_unlock_irq(shost->host_lock); 875 phba->fc_topology_changed = 0; 876 } 877 878 rc = memcmp(&vport->fc_portname, &sp->portName, 879 sizeof(vport->fc_portname)); 880 881 if (rc >= 0) { 882 /* This side will initiate the PLOGI */ 883 spin_lock_irq(shost->host_lock); 884 vport->fc_flag |= FC_PT2PT_PLOGI; 885 spin_unlock_irq(shost->host_lock); 886 887 /* 888 * N_Port ID cannot be 0, set our Id to LocalID 889 * the other side will be RemoteID. 890 */ 891 892 /* not equal */ 893 if (rc) 894 vport->fc_myDID = PT2PT_LocalID; 895 896 /* Decrement ndlp reference count indicating that ndlp can be 897 * safely released when other references to it are done. 898 */ 899 lpfc_nlp_put(ndlp); 900 901 ndlp = lpfc_findnode_did(vport, PT2PT_RemoteID); 902 if (!ndlp) { 903 /* 904 * Cannot find existing Fabric ndlp, so allocate a 905 * new one 906 */ 907 ndlp = lpfc_nlp_init(vport, PT2PT_RemoteID); 908 if (!ndlp) 909 goto fail; 910 } 911 912 memcpy(&ndlp->nlp_portname, &sp->portName, 913 sizeof(struct lpfc_name)); 914 memcpy(&ndlp->nlp_nodename, &sp->nodeName, 915 sizeof(struct lpfc_name)); 916 /* Set state will put ndlp onto node list if not already done */ 917 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 918 spin_lock_irq(&ndlp->lock); 919 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 920 spin_unlock_irq(&ndlp->lock); 921 922 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 923 if (!mbox) 924 goto fail; 925 926 lpfc_config_link(phba, mbox); 927 928 mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link; 929 mbox->vport = vport; 930 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 931 if (rc == MBX_NOT_FINISHED) { 932 mempool_free(mbox, phba->mbox_mem_pool); 933 goto fail; 934 } 935 } else { 936 /* This side will wait for the PLOGI, decrement ndlp reference 937 * count indicating that ndlp can be released when other 938 * references to it are done. 939 */ 940 lpfc_nlp_put(ndlp); 941 942 /* Start discovery - this should just do CLEAR_LA */ 943 lpfc_disc_start(vport); 944 } 945 946 return 0; 947 fail: 948 return -ENXIO; 949 } 950 951 /** 952 * lpfc_cmpl_els_flogi - Completion callback function for flogi 953 * @phba: pointer to lpfc hba data structure. 954 * @cmdiocb: pointer to lpfc command iocb data structure. 955 * @rspiocb: pointer to lpfc response iocb data structure. 956 * 957 * This routine is the top-level completion callback function for issuing 958 * a Fabric Login (FLOGI) command. If the response IOCB reported error, 959 * the lpfc_els_retry() routine shall be invoked to retry the FLOGI. If 960 * retry has been made (either immediately or delayed with lpfc_els_retry() 961 * returning 1), the command IOCB will be released and function returned. 962 * If the retry attempt has been given up (possibly reach the maximum 963 * number of retries), one additional decrement of ndlp reference shall be 964 * invoked before going out after releasing the command IOCB. This will 965 * actually release the remote node (Note, lpfc_els_free_iocb() will also 966 * invoke one decrement of ndlp reference count). If no error reported in 967 * the IOCB status, the command Port ID field is used to determine whether 968 * this is a point-to-point topology or a fabric topology: if the Port ID 969 * field is assigned, it is a fabric topology; otherwise, it is a 970 * point-to-point topology. The routine lpfc_cmpl_els_flogi_fabric() or 971 * lpfc_cmpl_els_flogi_nport() shall be invoked accordingly to handle the 972 * specific topology completion conditions. 973 **/ 974 static void 975 lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 976 struct lpfc_iocbq *rspiocb) 977 { 978 struct lpfc_vport *vport = cmdiocb->vport; 979 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 980 IOCB_t *irsp = &rspiocb->iocb; 981 struct lpfc_nodelist *ndlp = cmdiocb->context1; 982 struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp; 983 struct serv_parm *sp; 984 uint16_t fcf_index; 985 int rc; 986 987 /* Check to see if link went down during discovery */ 988 if (lpfc_els_chk_latt(vport)) { 989 /* One additional decrement on node reference count to 990 * trigger the release of the node 991 */ 992 lpfc_nlp_put(ndlp); 993 goto out; 994 } 995 996 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 997 "FLOGI cmpl: status:x%x/x%x state:x%x", 998 irsp->ulpStatus, irsp->un.ulpWord[4], 999 vport->port_state); 1000 1001 if (irsp->ulpStatus) { 1002 /* 1003 * In case of FIP mode, perform roundrobin FCF failover 1004 * due to new FCF discovery 1005 */ 1006 if ((phba->hba_flag & HBA_FIP_SUPPORT) && 1007 (phba->fcf.fcf_flag & FCF_DISCOVERY)) { 1008 if (phba->link_state < LPFC_LINK_UP) 1009 goto stop_rr_fcf_flogi; 1010 if ((phba->fcoe_cvl_eventtag_attn == 1011 phba->fcoe_cvl_eventtag) && 1012 (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 1013 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == 1014 IOERR_SLI_ABORTED)) 1015 goto stop_rr_fcf_flogi; 1016 else 1017 phba->fcoe_cvl_eventtag_attn = 1018 phba->fcoe_cvl_eventtag; 1019 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS, 1020 "2611 FLOGI failed on FCF (x%x), " 1021 "status:x%x/x%x, tmo:x%x, perform " 1022 "roundrobin FCF failover\n", 1023 phba->fcf.current_rec.fcf_indx, 1024 irsp->ulpStatus, irsp->un.ulpWord[4], 1025 irsp->ulpTimeout); 1026 lpfc_sli4_set_fcf_flogi_fail(phba, 1027 phba->fcf.current_rec.fcf_indx); 1028 fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba); 1029 rc = lpfc_sli4_fcf_rr_next_proc(vport, fcf_index); 1030 if (rc) 1031 goto out; 1032 } 1033 1034 stop_rr_fcf_flogi: 1035 /* FLOGI failure */ 1036 if (!(irsp->ulpStatus == IOSTAT_LOCAL_REJECT && 1037 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == 1038 IOERR_LOOP_OPEN_FAILURE))) 1039 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1040 "2858 FLOGI failure Status:x%x/x%x TMO" 1041 ":x%x Data x%x x%x\n", 1042 irsp->ulpStatus, irsp->un.ulpWord[4], 1043 irsp->ulpTimeout, phba->hba_flag, 1044 phba->fcf.fcf_flag); 1045 1046 /* Check for retry */ 1047 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) 1048 goto out; 1049 1050 lpfc_printf_vlog(vport, KERN_WARNING, LOG_TRACE_EVENT, 1051 "0150 FLOGI failure Status:x%x/x%x " 1052 "xri x%x TMO:x%x\n", 1053 irsp->ulpStatus, irsp->un.ulpWord[4], 1054 cmdiocb->sli4_xritag, irsp->ulpTimeout); 1055 1056 /* If this is not a loop open failure, bail out */ 1057 if (!(irsp->ulpStatus == IOSTAT_LOCAL_REJECT && 1058 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == 1059 IOERR_LOOP_OPEN_FAILURE))) 1060 goto flogifail; 1061 1062 /* FLOGI failed, so there is no fabric */ 1063 spin_lock_irq(shost->host_lock); 1064 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 1065 spin_unlock_irq(shost->host_lock); 1066 1067 /* If private loop, then allow max outstanding els to be 1068 * LPFC_MAX_DISC_THREADS (32). Scanning in the case of no 1069 * alpa map would take too long otherwise. 1070 */ 1071 if (phba->alpa_map[0] == 0) 1072 vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS; 1073 if ((phba->sli_rev == LPFC_SLI_REV4) && 1074 (!(vport->fc_flag & FC_VFI_REGISTERED) || 1075 (vport->fc_prevDID != vport->fc_myDID) || 1076 phba->fc_topology_changed)) { 1077 if (vport->fc_flag & FC_VFI_REGISTERED) { 1078 if (phba->fc_topology_changed) { 1079 lpfc_unregister_fcf_prep(phba); 1080 spin_lock_irq(shost->host_lock); 1081 vport->fc_flag &= ~FC_VFI_REGISTERED; 1082 spin_unlock_irq(shost->host_lock); 1083 phba->fc_topology_changed = 0; 1084 } else { 1085 lpfc_sli4_unreg_all_rpis(vport); 1086 } 1087 } 1088 1089 /* Do not register VFI if the driver aborted FLOGI */ 1090 if (!lpfc_error_lost_link(irsp)) 1091 lpfc_issue_reg_vfi(vport); 1092 1093 lpfc_nlp_put(ndlp); 1094 goto out; 1095 } 1096 goto flogifail; 1097 } 1098 spin_lock_irq(shost->host_lock); 1099 vport->fc_flag &= ~FC_VPORT_CVL_RCVD; 1100 vport->fc_flag &= ~FC_VPORT_LOGO_RCVD; 1101 spin_unlock_irq(shost->host_lock); 1102 1103 /* 1104 * The FLogI succeeded. Sync the data for the CPU before 1105 * accessing it. 1106 */ 1107 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 1108 if (!prsp) 1109 goto out; 1110 sp = prsp->virt + sizeof(uint32_t); 1111 1112 /* FLOGI completes successfully */ 1113 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1114 "0101 FLOGI completes successfully, I/O tag:x%x, " 1115 "xri x%x Data: x%x x%x x%x x%x x%x %x\n", 1116 cmdiocb->iotag, cmdiocb->sli4_xritag, 1117 irsp->un.ulpWord[4], sp->cmn.e_d_tov, 1118 sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution, 1119 vport->port_state, vport->fc_flag); 1120 1121 if (vport->port_state == LPFC_FLOGI) { 1122 /* 1123 * If Common Service Parameters indicate Nport 1124 * we are point to point, if Fport we are Fabric. 1125 */ 1126 if (sp->cmn.fPort) 1127 rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp, irsp); 1128 else if (!(phba->hba_flag & HBA_FCOE_MODE)) 1129 rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp); 1130 else { 1131 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1132 "2831 FLOGI response with cleared Fabric " 1133 "bit fcf_index 0x%x " 1134 "Switch Name %02x%02x%02x%02x%02x%02x%02x%02x " 1135 "Fabric Name " 1136 "%02x%02x%02x%02x%02x%02x%02x%02x\n", 1137 phba->fcf.current_rec.fcf_indx, 1138 phba->fcf.current_rec.switch_name[0], 1139 phba->fcf.current_rec.switch_name[1], 1140 phba->fcf.current_rec.switch_name[2], 1141 phba->fcf.current_rec.switch_name[3], 1142 phba->fcf.current_rec.switch_name[4], 1143 phba->fcf.current_rec.switch_name[5], 1144 phba->fcf.current_rec.switch_name[6], 1145 phba->fcf.current_rec.switch_name[7], 1146 phba->fcf.current_rec.fabric_name[0], 1147 phba->fcf.current_rec.fabric_name[1], 1148 phba->fcf.current_rec.fabric_name[2], 1149 phba->fcf.current_rec.fabric_name[3], 1150 phba->fcf.current_rec.fabric_name[4], 1151 phba->fcf.current_rec.fabric_name[5], 1152 phba->fcf.current_rec.fabric_name[6], 1153 phba->fcf.current_rec.fabric_name[7]); 1154 1155 lpfc_nlp_put(ndlp); 1156 spin_lock_irq(&phba->hbalock); 1157 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 1158 phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO); 1159 spin_unlock_irq(&phba->hbalock); 1160 phba->fcf.fcf_redisc_attempted = 0; /* reset */ 1161 goto out; 1162 } 1163 if (!rc) { 1164 /* Mark the FCF discovery process done */ 1165 if (phba->hba_flag & HBA_FIP_SUPPORT) 1166 lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP | 1167 LOG_ELS, 1168 "2769 FLOGI to FCF (x%x) " 1169 "completed successfully\n", 1170 phba->fcf.current_rec.fcf_indx); 1171 spin_lock_irq(&phba->hbalock); 1172 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 1173 phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO); 1174 spin_unlock_irq(&phba->hbalock); 1175 phba->fcf.fcf_redisc_attempted = 0; /* reset */ 1176 goto out; 1177 } 1178 } 1179 1180 flogifail: 1181 spin_lock_irq(&phba->hbalock); 1182 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 1183 spin_unlock_irq(&phba->hbalock); 1184 1185 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) 1186 lpfc_nlp_put(ndlp); 1187 if (!lpfc_error_lost_link(irsp)) { 1188 /* FLOGI failed, so just use loop map to make discovery list */ 1189 lpfc_disc_list_loopmap(vport); 1190 1191 /* Start discovery */ 1192 lpfc_disc_start(vport); 1193 } else if (((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) || 1194 (((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) != 1195 IOERR_SLI_ABORTED) && 1196 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) != 1197 IOERR_SLI_DOWN))) && 1198 (phba->link_state != LPFC_CLEAR_LA)) { 1199 /* If FLOGI failed enable link interrupt. */ 1200 lpfc_issue_clear_la(phba, vport); 1201 } 1202 out: 1203 phba->hba_flag &= ~HBA_FLOGI_OUTSTANDING; 1204 lpfc_els_free_iocb(phba, cmdiocb); 1205 lpfc_nlp_put(ndlp); 1206 } 1207 1208 /** 1209 * lpfc_cmpl_els_link_down - Completion callback function for ELS command 1210 * aborted during a link down 1211 * @phba: pointer to lpfc hba data structure. 1212 * @cmdiocb: pointer to lpfc command iocb data structure. 1213 * @rspiocb: pointer to lpfc response iocb data structure. 1214 * 1215 */ 1216 static void 1217 lpfc_cmpl_els_link_down(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1218 struct lpfc_iocbq *rspiocb) 1219 { 1220 IOCB_t *irsp; 1221 uint32_t *pcmd; 1222 uint32_t cmd; 1223 1224 pcmd = (uint32_t *)(((struct lpfc_dmabuf *)cmdiocb->context2)->virt); 1225 cmd = *pcmd; 1226 irsp = &rspiocb->iocb; 1227 1228 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 1229 "6445 ELS completes after LINK_DOWN: " 1230 " Status %x/%x cmd x%x flg x%x\n", 1231 irsp->ulpStatus, irsp->un.ulpWord[4], cmd, 1232 cmdiocb->iocb_flag); 1233 1234 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC) { 1235 cmdiocb->iocb_flag &= ~LPFC_IO_FABRIC; 1236 atomic_dec(&phba->fabric_iocb_count); 1237 } 1238 lpfc_els_free_iocb(phba, cmdiocb); 1239 } 1240 1241 /** 1242 * lpfc_issue_els_flogi - Issue an flogi iocb command for a vport 1243 * @vport: pointer to a host virtual N_Port data structure. 1244 * @ndlp: pointer to a node-list data structure. 1245 * @retry: number of retries to the command IOCB. 1246 * 1247 * This routine issues a Fabric Login (FLOGI) Request ELS command 1248 * for a @vport. The initiator service parameters are put into the payload 1249 * of the FLOGI Request IOCB and the top-level callback function pointer 1250 * to lpfc_cmpl_els_flogi() routine is put to the IOCB completion callback 1251 * function field. The lpfc_issue_fabric_iocb routine is invoked to send 1252 * out FLOGI ELS command with one outstanding fabric IOCB at a time. 1253 * 1254 * Note that the ndlp reference count will be incremented by 1 for holding the 1255 * ndlp and the reference to ndlp will be stored into the context1 field of 1256 * the IOCB for the completion callback function to the FLOGI ELS command. 1257 * 1258 * Return code 1259 * 0 - successfully issued flogi iocb for @vport 1260 * 1 - failed to issue flogi iocb for @vport 1261 **/ 1262 static int 1263 lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 1264 uint8_t retry) 1265 { 1266 struct lpfc_hba *phba = vport->phba; 1267 struct serv_parm *sp; 1268 IOCB_t *icmd; 1269 struct lpfc_iocbq *elsiocb; 1270 struct lpfc_iocbq defer_flogi_acc; 1271 uint8_t *pcmd; 1272 uint16_t cmdsize; 1273 uint32_t tmo, did; 1274 int rc; 1275 1276 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 1277 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 1278 ndlp->nlp_DID, ELS_CMD_FLOGI); 1279 1280 if (!elsiocb) 1281 return 1; 1282 1283 icmd = &elsiocb->iocb; 1284 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 1285 1286 /* For FLOGI request, remainder of payload is service parameters */ 1287 *((uint32_t *) (pcmd)) = ELS_CMD_FLOGI; 1288 pcmd += sizeof(uint32_t); 1289 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm)); 1290 sp = (struct serv_parm *) pcmd; 1291 1292 /* Setup CSPs accordingly for Fabric */ 1293 sp->cmn.e_d_tov = 0; 1294 sp->cmn.w2.r_a_tov = 0; 1295 sp->cmn.virtual_fabric_support = 0; 1296 sp->cls1.classValid = 0; 1297 if (sp->cmn.fcphLow < FC_PH3) 1298 sp->cmn.fcphLow = FC_PH3; 1299 if (sp->cmn.fcphHigh < FC_PH3) 1300 sp->cmn.fcphHigh = FC_PH3; 1301 1302 if (phba->sli_rev == LPFC_SLI_REV4) { 1303 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 1304 LPFC_SLI_INTF_IF_TYPE_0) { 1305 elsiocb->iocb.ulpCt_h = ((SLI4_CT_FCFI >> 1) & 1); 1306 elsiocb->iocb.ulpCt_l = (SLI4_CT_FCFI & 1); 1307 /* FLOGI needs to be 3 for WQE FCFI */ 1308 /* Set the fcfi to the fcfi we registered with */ 1309 elsiocb->iocb.ulpContext = phba->fcf.fcfi; 1310 } 1311 /* Can't do SLI4 class2 without support sequence coalescing */ 1312 sp->cls2.classValid = 0; 1313 sp->cls2.seqDelivery = 0; 1314 } else { 1315 /* Historical, setting sequential-delivery bit for SLI3 */ 1316 sp->cls2.seqDelivery = (sp->cls2.classValid) ? 1 : 0; 1317 sp->cls3.seqDelivery = (sp->cls3.classValid) ? 1 : 0; 1318 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { 1319 sp->cmn.request_multiple_Nport = 1; 1320 /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */ 1321 icmd->ulpCt_h = 1; 1322 icmd->ulpCt_l = 0; 1323 } else 1324 sp->cmn.request_multiple_Nport = 0; 1325 } 1326 1327 if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) { 1328 icmd->un.elsreq64.myID = 0; 1329 icmd->un.elsreq64.fl = 1; 1330 } 1331 1332 tmo = phba->fc_ratov; 1333 phba->fc_ratov = LPFC_DISC_FLOGI_TMO; 1334 lpfc_set_disctmo(vport); 1335 phba->fc_ratov = tmo; 1336 1337 phba->fc_stat.elsXmitFLOGI++; 1338 elsiocb->iocb_cmpl = lpfc_cmpl_els_flogi; 1339 1340 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1341 "Issue FLOGI: opt:x%x", 1342 phba->sli3_options, 0, 0); 1343 1344 elsiocb->context1 = lpfc_nlp_get(ndlp); 1345 if (!elsiocb->context1) { 1346 lpfc_els_free_iocb(phba, elsiocb); 1347 return 1; 1348 } 1349 1350 rc = lpfc_issue_fabric_iocb(phba, elsiocb); 1351 if (rc == IOCB_ERROR) { 1352 lpfc_els_free_iocb(phba, elsiocb); 1353 lpfc_nlp_put(ndlp); 1354 return 1; 1355 } 1356 1357 phba->hba_flag |= (HBA_FLOGI_ISSUED | HBA_FLOGI_OUTSTANDING); 1358 1359 /* Check for a deferred FLOGI ACC condition */ 1360 if (phba->defer_flogi_acc_flag) { 1361 did = vport->fc_myDID; 1362 vport->fc_myDID = Fabric_DID; 1363 1364 memset(&defer_flogi_acc, 0, sizeof(struct lpfc_iocbq)); 1365 1366 defer_flogi_acc.iocb.ulpContext = phba->defer_flogi_acc_rx_id; 1367 defer_flogi_acc.iocb.unsli3.rcvsli3.ox_id = 1368 phba->defer_flogi_acc_ox_id; 1369 1370 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1371 "3354 Xmit deferred FLOGI ACC: rx_id: x%x," 1372 " ox_id: x%x, hba_flag x%x\n", 1373 phba->defer_flogi_acc_rx_id, 1374 phba->defer_flogi_acc_ox_id, phba->hba_flag); 1375 1376 /* Send deferred FLOGI ACC */ 1377 lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, &defer_flogi_acc, 1378 ndlp, NULL); 1379 1380 phba->defer_flogi_acc_flag = false; 1381 1382 vport->fc_myDID = did; 1383 } 1384 1385 return 0; 1386 } 1387 1388 /** 1389 * lpfc_els_abort_flogi - Abort all outstanding flogi iocbs 1390 * @phba: pointer to lpfc hba data structure. 1391 * 1392 * This routine aborts all the outstanding Fabric Login (FLOGI) IOCBs 1393 * with a @phba. This routine walks all the outstanding IOCBs on the txcmplq 1394 * list and issues an abort IOCB commond on each outstanding IOCB that 1395 * contains a active Fabric_DID ndlp. Note that this function is to issue 1396 * the abort IOCB command on all the outstanding IOCBs, thus when this 1397 * function returns, it does not guarantee all the IOCBs are actually aborted. 1398 * 1399 * Return code 1400 * 0 - Successfully issued abort iocb on all outstanding flogis (Always 0) 1401 **/ 1402 int 1403 lpfc_els_abort_flogi(struct lpfc_hba *phba) 1404 { 1405 struct lpfc_sli_ring *pring; 1406 struct lpfc_iocbq *iocb, *next_iocb; 1407 struct lpfc_nodelist *ndlp; 1408 IOCB_t *icmd; 1409 1410 /* Abort outstanding I/O on NPort <nlp_DID> */ 1411 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, 1412 "0201 Abort outstanding I/O on NPort x%x\n", 1413 Fabric_DID); 1414 1415 pring = lpfc_phba_elsring(phba); 1416 if (unlikely(!pring)) 1417 return -EIO; 1418 1419 /* 1420 * Check the txcmplq for an iocb that matches the nport the driver is 1421 * searching for. 1422 */ 1423 spin_lock_irq(&phba->hbalock); 1424 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) { 1425 icmd = &iocb->iocb; 1426 if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR) { 1427 ndlp = (struct lpfc_nodelist *)(iocb->context1); 1428 if (ndlp && ndlp->nlp_DID == Fabric_DID) { 1429 if ((phba->pport->fc_flag & FC_PT2PT) && 1430 !(phba->pport->fc_flag & FC_PT2PT_PLOGI)) 1431 iocb->fabric_iocb_cmpl = 1432 lpfc_ignore_els_cmpl; 1433 lpfc_sli_issue_abort_iotag(phba, pring, iocb, 1434 NULL); 1435 } 1436 } 1437 } 1438 /* Make sure HBA is alive */ 1439 lpfc_issue_hb_tmo(phba); 1440 1441 spin_unlock_irq(&phba->hbalock); 1442 1443 return 0; 1444 } 1445 1446 /** 1447 * lpfc_initial_flogi - Issue an initial fabric login for a vport 1448 * @vport: pointer to a host virtual N_Port data structure. 1449 * 1450 * This routine issues an initial Fabric Login (FLOGI) for the @vport 1451 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from 1452 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and 1453 * put it into the @vport's ndlp list. If an inactive ndlp found on the list, 1454 * it will just be enabled and made active. The lpfc_issue_els_flogi() routine 1455 * is then invoked with the @vport and the ndlp to perform the FLOGI for the 1456 * @vport. 1457 * 1458 * Return code 1459 * 0 - failed to issue initial flogi for @vport 1460 * 1 - successfully issued initial flogi for @vport 1461 **/ 1462 int 1463 lpfc_initial_flogi(struct lpfc_vport *vport) 1464 { 1465 struct lpfc_nodelist *ndlp; 1466 1467 vport->port_state = LPFC_FLOGI; 1468 lpfc_set_disctmo(vport); 1469 1470 /* First look for the Fabric ndlp */ 1471 ndlp = lpfc_findnode_did(vport, Fabric_DID); 1472 if (!ndlp) { 1473 /* Cannot find existing Fabric ndlp, so allocate a new one */ 1474 ndlp = lpfc_nlp_init(vport, Fabric_DID); 1475 if (!ndlp) 1476 return 0; 1477 /* Set the node type */ 1478 ndlp->nlp_type |= NLP_FABRIC; 1479 1480 /* Put ndlp onto node list */ 1481 lpfc_enqueue_node(vport, ndlp); 1482 } 1483 1484 if (lpfc_issue_els_flogi(vport, ndlp, 0)) { 1485 /* This decrement of reference count to node shall kick off 1486 * the release of the node. 1487 */ 1488 lpfc_nlp_put(ndlp); 1489 return 0; 1490 } 1491 return 1; 1492 } 1493 1494 /** 1495 * lpfc_initial_fdisc - Issue an initial fabric discovery for a vport 1496 * @vport: pointer to a host virtual N_Port data structure. 1497 * 1498 * This routine issues an initial Fabric Discover (FDISC) for the @vport 1499 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from 1500 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and 1501 * put it into the @vport's ndlp list. If an inactive ndlp found on the list, 1502 * it will just be enabled and made active. The lpfc_issue_els_fdisc() routine 1503 * is then invoked with the @vport and the ndlp to perform the FDISC for the 1504 * @vport. 1505 * 1506 * Return code 1507 * 0 - failed to issue initial fdisc for @vport 1508 * 1 - successfully issued initial fdisc for @vport 1509 **/ 1510 int 1511 lpfc_initial_fdisc(struct lpfc_vport *vport) 1512 { 1513 struct lpfc_nodelist *ndlp; 1514 1515 /* First look for the Fabric ndlp */ 1516 ndlp = lpfc_findnode_did(vport, Fabric_DID); 1517 if (!ndlp) { 1518 /* Cannot find existing Fabric ndlp, so allocate a new one */ 1519 ndlp = lpfc_nlp_init(vport, Fabric_DID); 1520 if (!ndlp) 1521 return 0; 1522 1523 /* NPIV is only supported in Fabrics. */ 1524 ndlp->nlp_type |= NLP_FABRIC; 1525 1526 /* Put ndlp onto node list */ 1527 lpfc_enqueue_node(vport, ndlp); 1528 } 1529 1530 if (lpfc_issue_els_fdisc(vport, ndlp, 0)) { 1531 /* decrement node reference count to trigger the release of 1532 * the node. 1533 */ 1534 lpfc_nlp_put(ndlp); 1535 return 0; 1536 } 1537 return 1; 1538 } 1539 1540 /** 1541 * lpfc_more_plogi - Check and issue remaining plogis for a vport 1542 * @vport: pointer to a host virtual N_Port data structure. 1543 * 1544 * This routine checks whether there are more remaining Port Logins 1545 * (PLOGI) to be issued for the @vport. If so, it will invoke the routine 1546 * lpfc_els_disc_plogi() to go through the Node Port Recovery (NPR) nodes 1547 * to issue ELS PLOGIs up to the configured discover threads with the 1548 * @vport (@vport->cfg_discovery_threads). The function also decrement 1549 * the @vport's num_disc_node by 1 if it is not already 0. 1550 **/ 1551 void 1552 lpfc_more_plogi(struct lpfc_vport *vport) 1553 { 1554 if (vport->num_disc_nodes) 1555 vport->num_disc_nodes--; 1556 1557 /* Continue discovery with <num_disc_nodes> PLOGIs to go */ 1558 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 1559 "0232 Continue discovery with %d PLOGIs to go " 1560 "Data: x%x x%x x%x\n", 1561 vport->num_disc_nodes, vport->fc_plogi_cnt, 1562 vport->fc_flag, vport->port_state); 1563 /* Check to see if there are more PLOGIs to be sent */ 1564 if (vport->fc_flag & FC_NLP_MORE) 1565 /* go thru NPR nodes and issue any remaining ELS PLOGIs */ 1566 lpfc_els_disc_plogi(vport); 1567 1568 return; 1569 } 1570 1571 /** 1572 * lpfc_plogi_confirm_nport - Confirm plogi wwpn matches stored ndlp 1573 * @phba: pointer to lpfc hba data structure. 1574 * @prsp: pointer to response IOCB payload. 1575 * @ndlp: pointer to a node-list data structure. 1576 * 1577 * This routine checks and indicates whether the WWPN of an N_Port, retrieved 1578 * from a PLOGI, matches the WWPN that is stored in the @ndlp for that N_POrt. 1579 * The following cases are considered N_Port confirmed: 1580 * 1) The N_Port is a Fabric ndlp; 2) The @ndlp is on vport list and matches 1581 * the WWPN of the N_Port logged into; 3) The @ndlp is not on vport list but 1582 * it does not have WWPN assigned either. If the WWPN is confirmed, the 1583 * pointer to the @ndlp will be returned. If the WWPN is not confirmed: 1584 * 1) if there is a node on vport list other than the @ndlp with the same 1585 * WWPN of the N_Port PLOGI logged into, the lpfc_unreg_rpi() will be invoked 1586 * on that node to release the RPI associated with the node; 2) if there is 1587 * no node found on vport list with the same WWPN of the N_Port PLOGI logged 1588 * into, a new node shall be allocated (or activated). In either case, the 1589 * parameters of the @ndlp shall be copied to the new_ndlp, the @ndlp shall 1590 * be released and the new_ndlp shall be put on to the vport node list and 1591 * its pointer returned as the confirmed node. 1592 * 1593 * Note that before the @ndlp got "released", the keepDID from not-matching 1594 * or inactive "new_ndlp" on the vport node list is assigned to the nlp_DID 1595 * of the @ndlp. This is because the release of @ndlp is actually to put it 1596 * into an inactive state on the vport node list and the vport node list 1597 * management algorithm does not allow two node with a same DID. 1598 * 1599 * Return code 1600 * pointer to the PLOGI N_Port @ndlp 1601 **/ 1602 static struct lpfc_nodelist * 1603 lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, 1604 struct lpfc_nodelist *ndlp) 1605 { 1606 struct lpfc_vport *vport = ndlp->vport; 1607 struct lpfc_nodelist *new_ndlp; 1608 struct serv_parm *sp; 1609 uint8_t name[sizeof(struct lpfc_name)]; 1610 uint32_t keepDID = 0, keep_nlp_flag = 0; 1611 uint32_t keep_new_nlp_flag = 0; 1612 uint16_t keep_nlp_state; 1613 u32 keep_nlp_fc4_type = 0; 1614 struct lpfc_nvme_rport *keep_nrport = NULL; 1615 unsigned long *active_rrqs_xri_bitmap = NULL; 1616 1617 /* Fabric nodes can have the same WWPN so we don't bother searching 1618 * by WWPN. Just return the ndlp that was given to us. 1619 */ 1620 if (ndlp->nlp_type & NLP_FABRIC) 1621 return ndlp; 1622 1623 sp = (struct serv_parm *) ((uint8_t *) prsp + sizeof(uint32_t)); 1624 memset(name, 0, sizeof(struct lpfc_name)); 1625 1626 /* Now we find out if the NPort we are logging into, matches the WWPN 1627 * we have for that ndlp. If not, we have some work to do. 1628 */ 1629 new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName); 1630 1631 /* return immediately if the WWPN matches ndlp */ 1632 if (!new_ndlp || (new_ndlp == ndlp)) 1633 return ndlp; 1634 1635 if (phba->sli_rev == LPFC_SLI_REV4) { 1636 active_rrqs_xri_bitmap = mempool_alloc(phba->active_rrq_pool, 1637 GFP_KERNEL); 1638 if (active_rrqs_xri_bitmap) 1639 memset(active_rrqs_xri_bitmap, 0, 1640 phba->cfg_rrq_xri_bitmap_sz); 1641 } 1642 1643 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE, 1644 "3178 PLOGI confirm: ndlp x%x x%x x%x: " 1645 "new_ndlp x%x x%x x%x\n", 1646 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_fc4_type, 1647 (new_ndlp ? new_ndlp->nlp_DID : 0), 1648 (new_ndlp ? new_ndlp->nlp_flag : 0), 1649 (new_ndlp ? new_ndlp->nlp_fc4_type : 0)); 1650 1651 keepDID = new_ndlp->nlp_DID; 1652 1653 if (phba->sli_rev == LPFC_SLI_REV4 && active_rrqs_xri_bitmap) 1654 memcpy(active_rrqs_xri_bitmap, new_ndlp->active_rrqs_xri_bitmap, 1655 phba->cfg_rrq_xri_bitmap_sz); 1656 1657 /* At this point in this routine, we know new_ndlp will be 1658 * returned. however, any previous GID_FTs that were done 1659 * would have updated nlp_fc4_type in ndlp, so we must ensure 1660 * new_ndlp has the right value. 1661 */ 1662 if (vport->fc_flag & FC_FABRIC) { 1663 keep_nlp_fc4_type = new_ndlp->nlp_fc4_type; 1664 new_ndlp->nlp_fc4_type = ndlp->nlp_fc4_type; 1665 } 1666 1667 lpfc_unreg_rpi(vport, new_ndlp); 1668 new_ndlp->nlp_DID = ndlp->nlp_DID; 1669 new_ndlp->nlp_prev_state = ndlp->nlp_prev_state; 1670 if (phba->sli_rev == LPFC_SLI_REV4) 1671 memcpy(new_ndlp->active_rrqs_xri_bitmap, 1672 ndlp->active_rrqs_xri_bitmap, 1673 phba->cfg_rrq_xri_bitmap_sz); 1674 1675 /* Lock both ndlps */ 1676 spin_lock_irq(&ndlp->lock); 1677 spin_lock_irq(&new_ndlp->lock); 1678 keep_new_nlp_flag = new_ndlp->nlp_flag; 1679 keep_nlp_flag = ndlp->nlp_flag; 1680 new_ndlp->nlp_flag = ndlp->nlp_flag; 1681 1682 /* if new_ndlp had NLP_UNREG_INP set, keep it */ 1683 if (keep_new_nlp_flag & NLP_UNREG_INP) 1684 new_ndlp->nlp_flag |= NLP_UNREG_INP; 1685 else 1686 new_ndlp->nlp_flag &= ~NLP_UNREG_INP; 1687 1688 /* if new_ndlp had NLP_RPI_REGISTERED set, keep it */ 1689 if (keep_new_nlp_flag & NLP_RPI_REGISTERED) 1690 new_ndlp->nlp_flag |= NLP_RPI_REGISTERED; 1691 else 1692 new_ndlp->nlp_flag &= ~NLP_RPI_REGISTERED; 1693 1694 /* 1695 * Retain the DROPPED flag. This will take care of the init 1696 * refcount when affecting the state change 1697 */ 1698 if (keep_new_nlp_flag & NLP_DROPPED) 1699 new_ndlp->nlp_flag |= NLP_DROPPED; 1700 else 1701 new_ndlp->nlp_flag &= ~NLP_DROPPED; 1702 1703 ndlp->nlp_flag = keep_new_nlp_flag; 1704 1705 /* if ndlp had NLP_UNREG_INP set, keep it */ 1706 if (keep_nlp_flag & NLP_UNREG_INP) 1707 ndlp->nlp_flag |= NLP_UNREG_INP; 1708 else 1709 ndlp->nlp_flag &= ~NLP_UNREG_INP; 1710 1711 /* if ndlp had NLP_RPI_REGISTERED set, keep it */ 1712 if (keep_nlp_flag & NLP_RPI_REGISTERED) 1713 ndlp->nlp_flag |= NLP_RPI_REGISTERED; 1714 else 1715 ndlp->nlp_flag &= ~NLP_RPI_REGISTERED; 1716 1717 /* 1718 * Retain the DROPPED flag. This will take care of the init 1719 * refcount when affecting the state change 1720 */ 1721 if (keep_nlp_flag & NLP_DROPPED) 1722 ndlp->nlp_flag |= NLP_DROPPED; 1723 else 1724 ndlp->nlp_flag &= ~NLP_DROPPED; 1725 1726 spin_unlock_irq(&new_ndlp->lock); 1727 spin_unlock_irq(&ndlp->lock); 1728 1729 /* Set nlp_states accordingly */ 1730 keep_nlp_state = new_ndlp->nlp_state; 1731 lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state); 1732 1733 /* interchange the nvme remoteport structs */ 1734 keep_nrport = new_ndlp->nrport; 1735 new_ndlp->nrport = ndlp->nrport; 1736 1737 /* Move this back to NPR state */ 1738 if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) { 1739 /* The new_ndlp is replacing ndlp totally, so we need 1740 * to put ndlp on UNUSED list and try to free it. 1741 */ 1742 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1743 "3179 PLOGI confirm NEW: %x %x\n", 1744 new_ndlp->nlp_DID, keepDID); 1745 1746 /* Two ndlps cannot have the same did on the nodelist. 1747 * Note: for this case, ndlp has a NULL WWPN so setting 1748 * the nlp_fc4_type isn't required. 1749 */ 1750 ndlp->nlp_DID = keepDID; 1751 lpfc_nlp_set_state(vport, ndlp, keep_nlp_state); 1752 if (phba->sli_rev == LPFC_SLI_REV4 && 1753 active_rrqs_xri_bitmap) 1754 memcpy(ndlp->active_rrqs_xri_bitmap, 1755 active_rrqs_xri_bitmap, 1756 phba->cfg_rrq_xri_bitmap_sz); 1757 1758 } else { 1759 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1760 "3180 PLOGI confirm SWAP: %x %x\n", 1761 new_ndlp->nlp_DID, keepDID); 1762 1763 lpfc_unreg_rpi(vport, ndlp); 1764 1765 /* Two ndlps cannot have the same did and the fc4 1766 * type must be transferred because the ndlp is in 1767 * flight. 1768 */ 1769 ndlp->nlp_DID = keepDID; 1770 ndlp->nlp_fc4_type = keep_nlp_fc4_type; 1771 1772 if (phba->sli_rev == LPFC_SLI_REV4 && 1773 active_rrqs_xri_bitmap) 1774 memcpy(ndlp->active_rrqs_xri_bitmap, 1775 active_rrqs_xri_bitmap, 1776 phba->cfg_rrq_xri_bitmap_sz); 1777 1778 /* Since we are switching over to the new_ndlp, 1779 * reset the old ndlp state 1780 */ 1781 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) || 1782 (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) 1783 keep_nlp_state = NLP_STE_NPR_NODE; 1784 lpfc_nlp_set_state(vport, ndlp, keep_nlp_state); 1785 ndlp->nrport = keep_nrport; 1786 } 1787 1788 /* 1789 * If ndlp is not associated with any rport we can drop it here else 1790 * let dev_loss_tmo_callbk trigger DEVICE_RM event 1791 */ 1792 if (!ndlp->rport && (ndlp->nlp_state == NLP_STE_NPR_NODE)) 1793 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); 1794 1795 if (phba->sli_rev == LPFC_SLI_REV4 && 1796 active_rrqs_xri_bitmap) 1797 mempool_free(active_rrqs_xri_bitmap, 1798 phba->active_rrq_pool); 1799 1800 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE, 1801 "3173 PLOGI confirm exit: new_ndlp x%x x%x x%x\n", 1802 new_ndlp->nlp_DID, new_ndlp->nlp_flag, 1803 new_ndlp->nlp_fc4_type); 1804 1805 return new_ndlp; 1806 } 1807 1808 /** 1809 * lpfc_end_rscn - Check and handle more rscn for a vport 1810 * @vport: pointer to a host virtual N_Port data structure. 1811 * 1812 * This routine checks whether more Registration State Change 1813 * Notifications (RSCNs) came in while the discovery state machine was in 1814 * the FC_RSCN_MODE. If so, the lpfc_els_handle_rscn() routine will be 1815 * invoked to handle the additional RSCNs for the @vport. Otherwise, the 1816 * FC_RSCN_MODE bit will be cleared with the @vport to mark as the end of 1817 * handling the RSCNs. 1818 **/ 1819 void 1820 lpfc_end_rscn(struct lpfc_vport *vport) 1821 { 1822 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1823 1824 if (vport->fc_flag & FC_RSCN_MODE) { 1825 /* 1826 * Check to see if more RSCNs came in while we were 1827 * processing this one. 1828 */ 1829 if (vport->fc_rscn_id_cnt || 1830 (vport->fc_flag & FC_RSCN_DISCOVERY) != 0) 1831 lpfc_els_handle_rscn(vport); 1832 else { 1833 spin_lock_irq(shost->host_lock); 1834 vport->fc_flag &= ~FC_RSCN_MODE; 1835 spin_unlock_irq(shost->host_lock); 1836 } 1837 } 1838 } 1839 1840 /** 1841 * lpfc_cmpl_els_rrq - Completion handled for els RRQs. 1842 * @phba: pointer to lpfc hba data structure. 1843 * @cmdiocb: pointer to lpfc command iocb data structure. 1844 * @rspiocb: pointer to lpfc response iocb data structure. 1845 * 1846 * This routine will call the clear rrq function to free the rrq and 1847 * clear the xri's bit in the ndlp's xri_bitmap. If the ndlp does not 1848 * exist then the clear_rrq is still called because the rrq needs to 1849 * be freed. 1850 **/ 1851 1852 static void 1853 lpfc_cmpl_els_rrq(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1854 struct lpfc_iocbq *rspiocb) 1855 { 1856 struct lpfc_vport *vport = cmdiocb->vport; 1857 IOCB_t *irsp; 1858 struct lpfc_nodelist *ndlp = cmdiocb->context1; 1859 struct lpfc_node_rrq *rrq; 1860 1861 /* we pass cmdiocb to state machine which needs rspiocb as well */ 1862 rrq = cmdiocb->context_un.rrq; 1863 cmdiocb->context_un.rsp_iocb = rspiocb; 1864 1865 irsp = &rspiocb->iocb; 1866 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1867 "RRQ cmpl: status:x%x/x%x did:x%x", 1868 irsp->ulpStatus, irsp->un.ulpWord[4], 1869 irsp->un.elsreq64.remoteID); 1870 1871 /* rrq completes to NPort <nlp_DID> */ 1872 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1873 "2880 RRQ completes to DID x%x " 1874 "Data: x%x x%x x%x x%x x%x\n", 1875 irsp->un.elsreq64.remoteID, 1876 irsp->ulpStatus, irsp->un.ulpWord[4], 1877 irsp->ulpTimeout, rrq->xritag, rrq->rxid); 1878 1879 if (irsp->ulpStatus) { 1880 /* Check for retry */ 1881 /* RRQ failed Don't print the vport to vport rjts */ 1882 if (irsp->ulpStatus != IOSTAT_LS_RJT || 1883 (((irsp->un.ulpWord[4]) >> 16 != LSRJT_INVALID_CMD) && 1884 ((irsp->un.ulpWord[4]) >> 16 != LSRJT_UNABLE_TPC)) || 1885 (phba)->pport->cfg_log_verbose & LOG_ELS) 1886 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1887 "2881 RRQ failure DID:%06X Status:" 1888 "x%x/x%x\n", 1889 ndlp->nlp_DID, irsp->ulpStatus, 1890 irsp->un.ulpWord[4]); 1891 } 1892 1893 lpfc_clr_rrq_active(phba, rrq->xritag, rrq); 1894 lpfc_els_free_iocb(phba, cmdiocb); 1895 lpfc_nlp_put(ndlp); 1896 return; 1897 } 1898 /** 1899 * lpfc_cmpl_els_plogi - Completion callback function for plogi 1900 * @phba: pointer to lpfc hba data structure. 1901 * @cmdiocb: pointer to lpfc command iocb data structure. 1902 * @rspiocb: pointer to lpfc response iocb data structure. 1903 * 1904 * This routine is the completion callback function for issuing the Port 1905 * Login (PLOGI) command. For PLOGI completion, there must be an active 1906 * ndlp on the vport node list that matches the remote node ID from the 1907 * PLOGI response IOCB. If such ndlp does not exist, the PLOGI is simply 1908 * ignored and command IOCB released. The PLOGI response IOCB status is 1909 * checked for error conditions. If there is error status reported, PLOGI 1910 * retry shall be attempted by invoking the lpfc_els_retry() routine. 1911 * Otherwise, the lpfc_plogi_confirm_nport() routine shall be invoked on 1912 * the ndlp and the NLP_EVT_CMPL_PLOGI state to the Discover State Machine 1913 * (DSM) is set for this PLOGI completion. Finally, it checks whether 1914 * there are additional N_Port nodes with the vport that need to perform 1915 * PLOGI. If so, the lpfc_more_plogi() routine is invoked to issue addition 1916 * PLOGIs. 1917 **/ 1918 static void 1919 lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 1920 struct lpfc_iocbq *rspiocb) 1921 { 1922 struct lpfc_vport *vport = cmdiocb->vport; 1923 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1924 IOCB_t *irsp; 1925 struct lpfc_nodelist *ndlp, *free_ndlp; 1926 struct lpfc_dmabuf *prsp; 1927 int disc; 1928 1929 /* we pass cmdiocb to state machine which needs rspiocb as well */ 1930 cmdiocb->context_un.rsp_iocb = rspiocb; 1931 1932 irsp = &rspiocb->iocb; 1933 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1934 "PLOGI cmpl: status:x%x/x%x did:x%x", 1935 irsp->ulpStatus, irsp->un.ulpWord[4], 1936 irsp->un.elsreq64.remoteID); 1937 1938 ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID); 1939 if (!ndlp) { 1940 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1941 "0136 PLOGI completes to NPort x%x " 1942 "with no ndlp. Data: x%x x%x x%x\n", 1943 irsp->un.elsreq64.remoteID, 1944 irsp->ulpStatus, irsp->un.ulpWord[4], 1945 irsp->ulpIoTag); 1946 goto out_freeiocb; 1947 } 1948 1949 /* Since ndlp can be freed in the disc state machine, note if this node 1950 * is being used during discovery. 1951 */ 1952 spin_lock_irq(&ndlp->lock); 1953 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC); 1954 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 1955 spin_unlock_irq(&ndlp->lock); 1956 1957 /* PLOGI completes to NPort <nlp_DID> */ 1958 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 1959 "0102 PLOGI completes to NPort x%06x " 1960 "Data: x%x x%x x%x x%x x%x\n", 1961 ndlp->nlp_DID, ndlp->nlp_fc4_type, 1962 irsp->ulpStatus, irsp->un.ulpWord[4], 1963 disc, vport->num_disc_nodes); 1964 1965 /* Check to see if link went down during discovery */ 1966 if (lpfc_els_chk_latt(vport)) { 1967 spin_lock_irq(&ndlp->lock); 1968 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 1969 spin_unlock_irq(&ndlp->lock); 1970 goto out; 1971 } 1972 1973 if (irsp->ulpStatus) { 1974 /* Check for retry */ 1975 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 1976 /* ELS command is being retried */ 1977 if (disc) { 1978 spin_lock_irq(&ndlp->lock); 1979 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 1980 spin_unlock_irq(&ndlp->lock); 1981 } 1982 goto out; 1983 } 1984 /* PLOGI failed Don't print the vport to vport rjts */ 1985 if (irsp->ulpStatus != IOSTAT_LS_RJT || 1986 (((irsp->un.ulpWord[4]) >> 16 != LSRJT_INVALID_CMD) && 1987 ((irsp->un.ulpWord[4]) >> 16 != LSRJT_UNABLE_TPC)) || 1988 (phba)->pport->cfg_log_verbose & LOG_ELS) 1989 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1990 "2753 PLOGI failure DID:%06X Status:x%x/x%x\n", 1991 ndlp->nlp_DID, irsp->ulpStatus, 1992 irsp->un.ulpWord[4]); 1993 1994 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 1995 if (lpfc_error_lost_link(irsp)) 1996 goto check_plogi; 1997 else 1998 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 1999 NLP_EVT_CMPL_PLOGI); 2000 2001 /* As long as this node is not registered with the scsi or nvme 2002 * transport, it is no longer an active node. Otherwise 2003 * devloss handles the final cleanup. 2004 */ 2005 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { 2006 spin_lock_irq(&ndlp->lock); 2007 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 2008 spin_unlock_irq(&ndlp->lock); 2009 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2010 NLP_EVT_DEVICE_RM); 2011 } 2012 } else { 2013 /* Good status, call state machine */ 2014 prsp = list_entry(((struct lpfc_dmabuf *) 2015 cmdiocb->context2)->list.next, 2016 struct lpfc_dmabuf, list); 2017 ndlp = lpfc_plogi_confirm_nport(phba, prsp->virt, ndlp); 2018 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2019 NLP_EVT_CMPL_PLOGI); 2020 } 2021 2022 check_plogi: 2023 if (disc && vport->num_disc_nodes) { 2024 /* Check to see if there are more PLOGIs to be sent */ 2025 lpfc_more_plogi(vport); 2026 2027 if (vport->num_disc_nodes == 0) { 2028 spin_lock_irq(shost->host_lock); 2029 vport->fc_flag &= ~FC_NDISC_ACTIVE; 2030 spin_unlock_irq(shost->host_lock); 2031 2032 lpfc_can_disctmo(vport); 2033 lpfc_end_rscn(vport); 2034 } 2035 } 2036 2037 out: 2038 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, 2039 "PLOGI Cmpl PUT: did:x%x refcnt %d", 2040 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 2041 2042 out_freeiocb: 2043 /* Release the reference on the original I/O request. */ 2044 free_ndlp = (struct lpfc_nodelist *)cmdiocb->context1; 2045 2046 lpfc_els_free_iocb(phba, cmdiocb); 2047 lpfc_nlp_put(free_ndlp); 2048 return; 2049 } 2050 2051 /** 2052 * lpfc_issue_els_plogi - Issue an plogi iocb command for a vport 2053 * @vport: pointer to a host virtual N_Port data structure. 2054 * @did: destination port identifier. 2055 * @retry: number of retries to the command IOCB. 2056 * 2057 * This routine issues a Port Login (PLOGI) command to a remote N_Port 2058 * (with the @did) for a @vport. Before issuing a PLOGI to a remote N_Port, 2059 * the ndlp with the remote N_Port DID must exist on the @vport's ndlp list. 2060 * This routine constructs the proper fields of the PLOGI IOCB and invokes 2061 * the lpfc_sli_issue_iocb() routine to send out PLOGI ELS command. 2062 * 2063 * Note that the ndlp reference count will be incremented by 1 for holding 2064 * the ndlp and the reference to ndlp will be stored into the context1 field 2065 * of the IOCB for the completion callback function to the PLOGI ELS command. 2066 * 2067 * Return code 2068 * 0 - Successfully issued a plogi for @vport 2069 * 1 - failed to issue a plogi for @vport 2070 **/ 2071 int 2072 lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry) 2073 { 2074 struct lpfc_hba *phba = vport->phba; 2075 struct serv_parm *sp; 2076 struct lpfc_nodelist *ndlp; 2077 struct lpfc_iocbq *elsiocb; 2078 uint8_t *pcmd; 2079 uint16_t cmdsize; 2080 int ret; 2081 2082 ndlp = lpfc_findnode_did(vport, did); 2083 if (!ndlp) 2084 return 1; 2085 2086 /* Defer the processing of the issue PLOGI until after the 2087 * outstanding UNREG_RPI mbox command completes, unless we 2088 * are going offline. This logic does not apply for Fabric DIDs 2089 */ 2090 if ((ndlp->nlp_flag & NLP_UNREG_INP) && 2091 ((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) && 2092 !(vport->fc_flag & FC_OFFLINE_MODE)) { 2093 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2094 "4110 Issue PLOGI x%x deferred " 2095 "on NPort x%x rpi x%x Data: x%px\n", 2096 ndlp->nlp_defer_did, ndlp->nlp_DID, 2097 ndlp->nlp_rpi, ndlp); 2098 2099 /* We can only defer 1st PLOGI */ 2100 if (ndlp->nlp_defer_did == NLP_EVT_NOTHING_PENDING) 2101 ndlp->nlp_defer_did = did; 2102 return 0; 2103 } 2104 2105 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 2106 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did, 2107 ELS_CMD_PLOGI); 2108 if (!elsiocb) 2109 return 1; 2110 2111 spin_lock_irq(&ndlp->lock); 2112 ndlp->nlp_flag &= ~NLP_FCP_PRLI_RJT; 2113 spin_unlock_irq(&ndlp->lock); 2114 2115 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2116 2117 /* For PLOGI request, remainder of payload is service parameters */ 2118 *((uint32_t *) (pcmd)) = ELS_CMD_PLOGI; 2119 pcmd += sizeof(uint32_t); 2120 memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm)); 2121 sp = (struct serv_parm *) pcmd; 2122 2123 /* 2124 * If we are a N-port connected to a Fabric, fix-up paramm's so logins 2125 * to device on remote loops work. 2126 */ 2127 if ((vport->fc_flag & FC_FABRIC) && !(vport->fc_flag & FC_PUBLIC_LOOP)) 2128 sp->cmn.altBbCredit = 1; 2129 2130 if (sp->cmn.fcphLow < FC_PH_4_3) 2131 sp->cmn.fcphLow = FC_PH_4_3; 2132 2133 if (sp->cmn.fcphHigh < FC_PH3) 2134 sp->cmn.fcphHigh = FC_PH3; 2135 2136 sp->cmn.valid_vendor_ver_level = 0; 2137 memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion)); 2138 sp->cmn.bbRcvSizeMsb &= 0xF; 2139 2140 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2141 "Issue PLOGI: did:x%x", 2142 did, 0, 0); 2143 2144 /* If our firmware supports this feature, convey that 2145 * information to the target using the vendor specific field. 2146 */ 2147 if (phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) { 2148 sp->cmn.valid_vendor_ver_level = 1; 2149 sp->un.vv.vid = cpu_to_be32(LPFC_VV_EMLX_ID); 2150 sp->un.vv.flags = cpu_to_be32(LPFC_VV_SUPPRESS_RSP); 2151 } 2152 2153 phba->fc_stat.elsXmitPLOGI++; 2154 elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi; 2155 2156 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2157 "Issue PLOGI: did:x%x refcnt %d", 2158 did, kref_read(&ndlp->kref), 0); 2159 elsiocb->context1 = lpfc_nlp_get(ndlp); 2160 if (!elsiocb->context1) { 2161 lpfc_els_free_iocb(phba, elsiocb); 2162 return 1; 2163 } 2164 2165 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 2166 if (ret) { 2167 lpfc_els_free_iocb(phba, elsiocb); 2168 lpfc_nlp_put(ndlp); 2169 return 1; 2170 } 2171 2172 return 0; 2173 } 2174 2175 /** 2176 * lpfc_cmpl_els_prli - Completion callback function for prli 2177 * @phba: pointer to lpfc hba data structure. 2178 * @cmdiocb: pointer to lpfc command iocb data structure. 2179 * @rspiocb: pointer to lpfc response iocb data structure. 2180 * 2181 * This routine is the completion callback function for a Process Login 2182 * (PRLI) ELS command. The PRLI response IOCB status is checked for error 2183 * status. If there is error status reported, PRLI retry shall be attempted 2184 * by invoking the lpfc_els_retry() routine. Otherwise, the state 2185 * NLP_EVT_CMPL_PRLI is sent to the Discover State Machine (DSM) for this 2186 * ndlp to mark the PRLI completion. 2187 **/ 2188 static void 2189 lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2190 struct lpfc_iocbq *rspiocb) 2191 { 2192 struct lpfc_vport *vport = cmdiocb->vport; 2193 IOCB_t *irsp; 2194 struct lpfc_nodelist *ndlp; 2195 char *mode; 2196 u32 loglevel; 2197 2198 /* we pass cmdiocb to state machine which needs rspiocb as well */ 2199 cmdiocb->context_un.rsp_iocb = rspiocb; 2200 2201 irsp = &(rspiocb->iocb); 2202 ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 2203 spin_lock_irq(&ndlp->lock); 2204 ndlp->nlp_flag &= ~NLP_PRLI_SND; 2205 2206 /* Driver supports multiple FC4 types. Counters matter. */ 2207 vport->fc_prli_sent--; 2208 ndlp->fc4_prli_sent--; 2209 spin_unlock_irq(&ndlp->lock); 2210 2211 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2212 "PRLI cmpl: status:x%x/x%x did:x%x", 2213 irsp->ulpStatus, irsp->un.ulpWord[4], 2214 ndlp->nlp_DID); 2215 2216 /* PRLI completes to NPort <nlp_DID> */ 2217 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2218 "0103 PRLI completes to NPort x%06x " 2219 "Data: x%x x%x x%x x%x\n", 2220 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4], 2221 vport->num_disc_nodes, ndlp->fc4_prli_sent); 2222 2223 /* Check to see if link went down during discovery */ 2224 if (lpfc_els_chk_latt(vport)) 2225 goto out; 2226 2227 if (irsp->ulpStatus) { 2228 /* Check for retry */ 2229 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 2230 /* ELS command is being retried */ 2231 goto out; 2232 } 2233 2234 /* If we don't send GFT_ID to Fabric, a PRLI error 2235 * could be expected. 2236 */ 2237 if ((vport->fc_flag & FC_FABRIC) || 2238 (vport->cfg_enable_fc4_type != LPFC_ENABLE_BOTH)) { 2239 mode = KERN_ERR; 2240 loglevel = LOG_TRACE_EVENT; 2241 } else { 2242 mode = KERN_INFO; 2243 loglevel = LOG_ELS; 2244 } 2245 2246 /* PRLI failed */ 2247 lpfc_printf_vlog(vport, mode, loglevel, 2248 "2754 PRLI failure DID:%06X Status:x%x/x%x, " 2249 "data: x%x\n", 2250 ndlp->nlp_DID, irsp->ulpStatus, 2251 irsp->un.ulpWord[4], ndlp->fc4_prli_sent); 2252 2253 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 2254 if (!lpfc_error_lost_link(irsp)) 2255 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2256 NLP_EVT_CMPL_PRLI); 2257 2258 /* As long as this node is not registered with the SCSI 2259 * or NVMe transport and no other PRLIs are outstanding, 2260 * it is no longer an active node. Otherwise devloss 2261 * handles the final cleanup. 2262 */ 2263 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) && 2264 !ndlp->fc4_prli_sent) { 2265 spin_lock_irq(&ndlp->lock); 2266 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 2267 spin_unlock_irq(&ndlp->lock); 2268 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2269 NLP_EVT_DEVICE_RM); 2270 } 2271 } else { 2272 /* Good status, call state machine. However, if another 2273 * PRLI is outstanding, don't call the state machine 2274 * because final disposition to Mapped or Unmapped is 2275 * completed there. 2276 */ 2277 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2278 NLP_EVT_CMPL_PRLI); 2279 } 2280 2281 out: 2282 lpfc_els_free_iocb(phba, cmdiocb); 2283 lpfc_nlp_put(ndlp); 2284 return; 2285 } 2286 2287 /** 2288 * lpfc_issue_els_prli - Issue a prli iocb command for a vport 2289 * @vport: pointer to a host virtual N_Port data structure. 2290 * @ndlp: pointer to a node-list data structure. 2291 * @retry: number of retries to the command IOCB. 2292 * 2293 * This routine issues a Process Login (PRLI) ELS command for the 2294 * @vport. The PRLI service parameters are set up in the payload of the 2295 * PRLI Request command and the pointer to lpfc_cmpl_els_prli() routine 2296 * is put to the IOCB completion callback func field before invoking the 2297 * routine lpfc_sli_issue_iocb() to send out PRLI command. 2298 * 2299 * Note that the ndlp reference count will be incremented by 1 for holding the 2300 * ndlp and the reference to ndlp will be stored into the context1 field of 2301 * the IOCB for the completion callback function to the PRLI ELS command. 2302 * 2303 * Return code 2304 * 0 - successfully issued prli iocb command for @vport 2305 * 1 - failed to issue prli iocb command for @vport 2306 **/ 2307 int 2308 lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2309 uint8_t retry) 2310 { 2311 int rc = 0; 2312 struct lpfc_hba *phba = vport->phba; 2313 PRLI *npr; 2314 struct lpfc_nvme_prli *npr_nvme; 2315 struct lpfc_iocbq *elsiocb; 2316 uint8_t *pcmd; 2317 uint16_t cmdsize; 2318 u32 local_nlp_type, elscmd; 2319 2320 /* 2321 * If we are in RSCN mode, the FC4 types supported from a 2322 * previous GFT_ID command may not be accurate. So, if we 2323 * are a NVME Initiator, always look for the possibility of 2324 * the remote NPort beng a NVME Target. 2325 */ 2326 if (phba->sli_rev == LPFC_SLI_REV4 && 2327 vport->fc_flag & FC_RSCN_MODE && 2328 vport->nvmei_support) 2329 ndlp->nlp_fc4_type |= NLP_FC4_NVME; 2330 local_nlp_type = ndlp->nlp_fc4_type; 2331 2332 /* This routine will issue 1 or 2 PRLIs, so zero all the ndlp 2333 * fields here before any of them can complete. 2334 */ 2335 ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); 2336 ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR); 2337 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; 2338 ndlp->nlp_flag &= ~(NLP_FIRSTBURST | NLP_NPR_2B_DISC); 2339 ndlp->nvme_fb_size = 0; 2340 2341 send_next_prli: 2342 if (local_nlp_type & NLP_FC4_FCP) { 2343 /* Payload is 4 + 16 = 20 x14 bytes. */ 2344 cmdsize = (sizeof(uint32_t) + sizeof(PRLI)); 2345 elscmd = ELS_CMD_PRLI; 2346 } else if (local_nlp_type & NLP_FC4_NVME) { 2347 /* Payload is 4 + 20 = 24 x18 bytes. */ 2348 cmdsize = (sizeof(uint32_t) + sizeof(struct lpfc_nvme_prli)); 2349 elscmd = ELS_CMD_NVMEPRLI; 2350 } else { 2351 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2352 "3083 Unknown FC_TYPE x%x ndlp x%06x\n", 2353 ndlp->nlp_fc4_type, ndlp->nlp_DID); 2354 return 1; 2355 } 2356 2357 /* SLI3 ports don't support NVME. If this rport is a strict NVME 2358 * FC4 type, implicitly LOGO. 2359 */ 2360 if (phba->sli_rev == LPFC_SLI_REV3 && 2361 ndlp->nlp_fc4_type == NLP_FC4_NVME) { 2362 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2363 "3088 Rport fc4 type 0x%x not supported by SLI3 adapter\n", 2364 ndlp->nlp_type); 2365 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); 2366 return 1; 2367 } 2368 2369 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 2370 ndlp->nlp_DID, elscmd); 2371 if (!elsiocb) 2372 return 1; 2373 2374 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2375 2376 /* For PRLI request, remainder of payload is service parameters */ 2377 memset(pcmd, 0, cmdsize); 2378 2379 if (local_nlp_type & NLP_FC4_FCP) { 2380 /* Remainder of payload is FCP PRLI parameter page. 2381 * Note: this data structure is defined as 2382 * BE/LE in the structure definition so no 2383 * byte swap call is made. 2384 */ 2385 *((uint32_t *)(pcmd)) = ELS_CMD_PRLI; 2386 pcmd += sizeof(uint32_t); 2387 npr = (PRLI *)pcmd; 2388 2389 /* 2390 * If our firmware version is 3.20 or later, 2391 * set the following bits for FC-TAPE support. 2392 */ 2393 if (phba->vpd.rev.feaLevelHigh >= 0x02) { 2394 npr->ConfmComplAllowed = 1; 2395 npr->Retry = 1; 2396 npr->TaskRetryIdReq = 1; 2397 } 2398 npr->estabImagePair = 1; 2399 npr->readXferRdyDis = 1; 2400 if (vport->cfg_first_burst_size) 2401 npr->writeXferRdyDis = 1; 2402 2403 /* For FCP support */ 2404 npr->prliType = PRLI_FCP_TYPE; 2405 npr->initiatorFunc = 1; 2406 elsiocb->iocb_flag |= LPFC_PRLI_FCP_REQ; 2407 2408 /* Remove FCP type - processed. */ 2409 local_nlp_type &= ~NLP_FC4_FCP; 2410 } else if (local_nlp_type & NLP_FC4_NVME) { 2411 /* Remainder of payload is NVME PRLI parameter page. 2412 * This data structure is the newer definition that 2413 * uses bf macros so a byte swap is required. 2414 */ 2415 *((uint32_t *)(pcmd)) = ELS_CMD_NVMEPRLI; 2416 pcmd += sizeof(uint32_t); 2417 npr_nvme = (struct lpfc_nvme_prli *)pcmd; 2418 bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE); 2419 bf_set(prli_estabImagePair, npr_nvme, 0); /* Should be 0 */ 2420 if (phba->nsler) { 2421 bf_set(prli_nsler, npr_nvme, 1); 2422 bf_set(prli_conf, npr_nvme, 1); 2423 } 2424 2425 /* Only initiators request first burst. */ 2426 if ((phba->cfg_nvme_enable_fb) && 2427 !phba->nvmet_support) 2428 bf_set(prli_fba, npr_nvme, 1); 2429 2430 if (phba->nvmet_support) { 2431 bf_set(prli_tgt, npr_nvme, 1); 2432 bf_set(prli_disc, npr_nvme, 1); 2433 } else { 2434 bf_set(prli_init, npr_nvme, 1); 2435 bf_set(prli_conf, npr_nvme, 1); 2436 } 2437 2438 npr_nvme->word1 = cpu_to_be32(npr_nvme->word1); 2439 npr_nvme->word4 = cpu_to_be32(npr_nvme->word4); 2440 elsiocb->iocb_flag |= LPFC_PRLI_NVME_REQ; 2441 2442 /* Remove NVME type - processed. */ 2443 local_nlp_type &= ~NLP_FC4_NVME; 2444 } 2445 2446 phba->fc_stat.elsXmitPRLI++; 2447 elsiocb->iocb_cmpl = lpfc_cmpl_els_prli; 2448 spin_lock_irq(&ndlp->lock); 2449 ndlp->nlp_flag |= NLP_PRLI_SND; 2450 2451 /* The vport counters are used for lpfc_scan_finished, but 2452 * the ndlp is used to track outstanding PRLIs for different 2453 * FC4 types. 2454 */ 2455 vport->fc_prli_sent++; 2456 ndlp->fc4_prli_sent++; 2457 spin_unlock_irq(&ndlp->lock); 2458 2459 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2460 "Issue PRLI: did:x%x refcnt %d", 2461 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 2462 elsiocb->context1 = lpfc_nlp_get(ndlp); 2463 if (!elsiocb->context1) { 2464 lpfc_els_free_iocb(phba, elsiocb); 2465 goto err; 2466 } 2467 2468 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 2469 if (rc == IOCB_ERROR) { 2470 lpfc_els_free_iocb(phba, elsiocb); 2471 lpfc_nlp_put(ndlp); 2472 goto err; 2473 } 2474 2475 2476 /* The driver supports 2 FC4 types. Make sure 2477 * a PRLI is issued for all types before exiting. 2478 */ 2479 if (phba->sli_rev == LPFC_SLI_REV4 && 2480 local_nlp_type & (NLP_FC4_FCP | NLP_FC4_NVME)) 2481 goto send_next_prli; 2482 else 2483 return 0; 2484 2485 err: 2486 spin_lock_irq(&ndlp->lock); 2487 ndlp->nlp_flag &= ~NLP_PRLI_SND; 2488 spin_unlock_irq(&ndlp->lock); 2489 return 1; 2490 } 2491 2492 /** 2493 * lpfc_rscn_disc - Perform rscn discovery for a vport 2494 * @vport: pointer to a host virtual N_Port data structure. 2495 * 2496 * This routine performs Registration State Change Notification (RSCN) 2497 * discovery for a @vport. If the @vport's node port recovery count is not 2498 * zero, it will invoke the lpfc_els_disc_plogi() to perform PLOGI for all 2499 * the nodes that need recovery. If none of the PLOGI were needed through 2500 * the lpfc_els_disc_plogi() routine, the lpfc_end_rscn() routine shall be 2501 * invoked to check and handle possible more RSCN came in during the period 2502 * of processing the current ones. 2503 **/ 2504 static void 2505 lpfc_rscn_disc(struct lpfc_vport *vport) 2506 { 2507 lpfc_can_disctmo(vport); 2508 2509 /* RSCN discovery */ 2510 /* go thru NPR nodes and issue ELS PLOGIs */ 2511 if (vport->fc_npr_cnt) 2512 if (lpfc_els_disc_plogi(vport)) 2513 return; 2514 2515 lpfc_end_rscn(vport); 2516 } 2517 2518 /** 2519 * lpfc_adisc_done - Complete the adisc phase of discovery 2520 * @vport: pointer to lpfc_vport hba data structure that finished all ADISCs. 2521 * 2522 * This function is called when the final ADISC is completed during discovery. 2523 * This function handles clearing link attention or issuing reg_vpi depending 2524 * on whether npiv is enabled. This function also kicks off the PLOGI phase of 2525 * discovery. 2526 * This function is called with no locks held. 2527 **/ 2528 static void 2529 lpfc_adisc_done(struct lpfc_vport *vport) 2530 { 2531 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2532 struct lpfc_hba *phba = vport->phba; 2533 2534 /* 2535 * For NPIV, cmpl_reg_vpi will set port_state to READY, 2536 * and continue discovery. 2537 */ 2538 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 2539 !(vport->fc_flag & FC_RSCN_MODE) && 2540 (phba->sli_rev < LPFC_SLI_REV4)) { 2541 /* The ADISCs are complete. Doesn't matter if they 2542 * succeeded or failed because the ADISC completion 2543 * routine guarantees to call the state machine and 2544 * the RPI is either unregistered (failed ADISC response) 2545 * or the RPI is still valid and the node is marked 2546 * mapped for a target. The exchanges should be in the 2547 * correct state. This code is specific to SLI3. 2548 */ 2549 lpfc_issue_clear_la(phba, vport); 2550 lpfc_issue_reg_vpi(phba, vport); 2551 return; 2552 } 2553 /* 2554 * For SLI2, we need to set port_state to READY 2555 * and continue discovery. 2556 */ 2557 if (vport->port_state < LPFC_VPORT_READY) { 2558 /* If we get here, there is nothing to ADISC */ 2559 lpfc_issue_clear_la(phba, vport); 2560 if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) { 2561 vport->num_disc_nodes = 0; 2562 /* go thru NPR list, issue ELS PLOGIs */ 2563 if (vport->fc_npr_cnt) 2564 lpfc_els_disc_plogi(vport); 2565 if (!vport->num_disc_nodes) { 2566 spin_lock_irq(shost->host_lock); 2567 vport->fc_flag &= ~FC_NDISC_ACTIVE; 2568 spin_unlock_irq(shost->host_lock); 2569 lpfc_can_disctmo(vport); 2570 lpfc_end_rscn(vport); 2571 } 2572 } 2573 vport->port_state = LPFC_VPORT_READY; 2574 } else 2575 lpfc_rscn_disc(vport); 2576 } 2577 2578 /** 2579 * lpfc_more_adisc - Issue more adisc as needed 2580 * @vport: pointer to a host virtual N_Port data structure. 2581 * 2582 * This routine determines whether there are more ndlps on a @vport 2583 * node list need to have Address Discover (ADISC) issued. If so, it will 2584 * invoke the lpfc_els_disc_adisc() routine to issue ADISC on the @vport's 2585 * remaining nodes which need to have ADISC sent. 2586 **/ 2587 void 2588 lpfc_more_adisc(struct lpfc_vport *vport) 2589 { 2590 if (vport->num_disc_nodes) 2591 vport->num_disc_nodes--; 2592 /* Continue discovery with <num_disc_nodes> ADISCs to go */ 2593 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2594 "0210 Continue discovery with %d ADISCs to go " 2595 "Data: x%x x%x x%x\n", 2596 vport->num_disc_nodes, vport->fc_adisc_cnt, 2597 vport->fc_flag, vport->port_state); 2598 /* Check to see if there are more ADISCs to be sent */ 2599 if (vport->fc_flag & FC_NLP_MORE) { 2600 lpfc_set_disctmo(vport); 2601 /* go thru NPR nodes and issue any remaining ELS ADISCs */ 2602 lpfc_els_disc_adisc(vport); 2603 } 2604 if (!vport->num_disc_nodes) 2605 lpfc_adisc_done(vport); 2606 return; 2607 } 2608 2609 /** 2610 * lpfc_cmpl_els_adisc - Completion callback function for adisc 2611 * @phba: pointer to lpfc hba data structure. 2612 * @cmdiocb: pointer to lpfc command iocb data structure. 2613 * @rspiocb: pointer to lpfc response iocb data structure. 2614 * 2615 * This routine is the completion function for issuing the Address Discover 2616 * (ADISC) command. It first checks to see whether link went down during 2617 * the discovery process. If so, the node will be marked as node port 2618 * recovery for issuing discover IOCB by the link attention handler and 2619 * exit. Otherwise, the response status is checked. If error was reported 2620 * in the response status, the ADISC command shall be retried by invoking 2621 * the lpfc_els_retry() routine. Otherwise, if no error was reported in 2622 * the response status, the state machine is invoked to set transition 2623 * with respect to NLP_EVT_CMPL_ADISC event. 2624 **/ 2625 static void 2626 lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2627 struct lpfc_iocbq *rspiocb) 2628 { 2629 struct lpfc_vport *vport = cmdiocb->vport; 2630 IOCB_t *irsp; 2631 struct lpfc_nodelist *ndlp; 2632 int disc; 2633 2634 /* we pass cmdiocb to state machine which needs rspiocb as well */ 2635 cmdiocb->context_un.rsp_iocb = rspiocb; 2636 2637 irsp = &(rspiocb->iocb); 2638 ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 2639 2640 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2641 "ADISC cmpl: status:x%x/x%x did:x%x", 2642 irsp->ulpStatus, irsp->un.ulpWord[4], 2643 ndlp->nlp_DID); 2644 2645 /* Since ndlp can be freed in the disc state machine, note if this node 2646 * is being used during discovery. 2647 */ 2648 spin_lock_irq(&ndlp->lock); 2649 disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC); 2650 ndlp->nlp_flag &= ~(NLP_ADISC_SND | NLP_NPR_2B_DISC); 2651 spin_unlock_irq(&ndlp->lock); 2652 /* ADISC completes to NPort <nlp_DID> */ 2653 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2654 "0104 ADISC completes to NPort x%x " 2655 "Data: x%x x%x x%x x%x x%x\n", 2656 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4], 2657 irsp->ulpTimeout, disc, vport->num_disc_nodes); 2658 /* Check to see if link went down during discovery */ 2659 if (lpfc_els_chk_latt(vport)) { 2660 spin_lock_irq(&ndlp->lock); 2661 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2662 spin_unlock_irq(&ndlp->lock); 2663 goto out; 2664 } 2665 2666 if (irsp->ulpStatus) { 2667 /* Check for retry */ 2668 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 2669 /* ELS command is being retried */ 2670 if (disc) { 2671 spin_lock_irq(&ndlp->lock); 2672 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2673 spin_unlock_irq(&ndlp->lock); 2674 lpfc_set_disctmo(vport); 2675 } 2676 goto out; 2677 } 2678 /* ADISC failed */ 2679 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2680 "2755 ADISC failure DID:%06X Status:x%x/x%x\n", 2681 ndlp->nlp_DID, irsp->ulpStatus, 2682 irsp->un.ulpWord[4]); 2683 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 2684 if (lpfc_error_lost_link(irsp)) 2685 goto check_adisc; 2686 else 2687 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2688 NLP_EVT_CMPL_ADISC); 2689 2690 /* As long as this node is not registered with the SCSI or NVMe 2691 * transport, it is no longer an active node. Otherwise 2692 * devloss handles the final cleanup. 2693 */ 2694 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { 2695 spin_lock_irq(&ndlp->lock); 2696 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 2697 spin_unlock_irq(&ndlp->lock); 2698 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2699 NLP_EVT_DEVICE_RM); 2700 } 2701 } else 2702 /* Good status, call state machine */ 2703 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2704 NLP_EVT_CMPL_ADISC); 2705 2706 check_adisc: 2707 /* Check to see if there are more ADISCs to be sent */ 2708 if (disc && vport->num_disc_nodes) 2709 lpfc_more_adisc(vport); 2710 out: 2711 lpfc_els_free_iocb(phba, cmdiocb); 2712 lpfc_nlp_put(ndlp); 2713 return; 2714 } 2715 2716 /** 2717 * lpfc_issue_els_adisc - Issue an address discover iocb to an node on a vport 2718 * @vport: pointer to a virtual N_Port data structure. 2719 * @ndlp: pointer to a node-list data structure. 2720 * @retry: number of retries to the command IOCB. 2721 * 2722 * This routine issues an Address Discover (ADISC) for an @ndlp on a 2723 * @vport. It prepares the payload of the ADISC ELS command, updates the 2724 * and states of the ndlp, and invokes the lpfc_sli_issue_iocb() routine 2725 * to issue the ADISC ELS command. 2726 * 2727 * Note that the ndlp reference count will be incremented by 1 for holding the 2728 * ndlp and the reference to ndlp will be stored into the context1 field of 2729 * the IOCB for the completion callback function to the ADISC ELS command. 2730 * 2731 * Return code 2732 * 0 - successfully issued adisc 2733 * 1 - failed to issue adisc 2734 **/ 2735 int 2736 lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2737 uint8_t retry) 2738 { 2739 int rc = 0; 2740 struct lpfc_hba *phba = vport->phba; 2741 ADISC *ap; 2742 struct lpfc_iocbq *elsiocb; 2743 uint8_t *pcmd; 2744 uint16_t cmdsize; 2745 2746 cmdsize = (sizeof(uint32_t) + sizeof(ADISC)); 2747 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 2748 ndlp->nlp_DID, ELS_CMD_ADISC); 2749 if (!elsiocb) 2750 return 1; 2751 2752 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2753 2754 /* For ADISC request, remainder of payload is service parameters */ 2755 *((uint32_t *) (pcmd)) = ELS_CMD_ADISC; 2756 pcmd += sizeof(uint32_t); 2757 2758 /* Fill in ADISC payload */ 2759 ap = (ADISC *) pcmd; 2760 ap->hardAL_PA = phba->fc_pref_ALPA; 2761 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name)); 2762 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 2763 ap->DID = be32_to_cpu(vport->fc_myDID); 2764 2765 phba->fc_stat.elsXmitADISC++; 2766 elsiocb->iocb_cmpl = lpfc_cmpl_els_adisc; 2767 spin_lock_irq(&ndlp->lock); 2768 ndlp->nlp_flag |= NLP_ADISC_SND; 2769 spin_unlock_irq(&ndlp->lock); 2770 elsiocb->context1 = lpfc_nlp_get(ndlp); 2771 if (!elsiocb->context1) { 2772 lpfc_els_free_iocb(phba, elsiocb); 2773 goto err; 2774 } 2775 2776 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2777 "Issue ADISC: did:x%x refcnt %d", 2778 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 2779 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 2780 if (rc == IOCB_ERROR) { 2781 lpfc_els_free_iocb(phba, elsiocb); 2782 lpfc_nlp_put(ndlp); 2783 goto err; 2784 } 2785 2786 return 0; 2787 2788 err: 2789 spin_lock_irq(&ndlp->lock); 2790 ndlp->nlp_flag &= ~NLP_ADISC_SND; 2791 spin_unlock_irq(&ndlp->lock); 2792 return 1; 2793 } 2794 2795 /** 2796 * lpfc_cmpl_els_logo - Completion callback function for logo 2797 * @phba: pointer to lpfc hba data structure. 2798 * @cmdiocb: pointer to lpfc command iocb data structure. 2799 * @rspiocb: pointer to lpfc response iocb data structure. 2800 * 2801 * This routine is the completion function for issuing the ELS Logout (LOGO) 2802 * command. If no error status was reported from the LOGO response, the 2803 * state machine of the associated ndlp shall be invoked for transition with 2804 * respect to NLP_EVT_CMPL_LOGO event. 2805 **/ 2806 static void 2807 lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 2808 struct lpfc_iocbq *rspiocb) 2809 { 2810 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 2811 struct lpfc_vport *vport = ndlp->vport; 2812 IOCB_t *irsp; 2813 unsigned long flags; 2814 uint32_t skip_recovery = 0; 2815 int wake_up_waiter = 0; 2816 2817 /* we pass cmdiocb to state machine which needs rspiocb as well */ 2818 cmdiocb->context_un.rsp_iocb = rspiocb; 2819 2820 irsp = &(rspiocb->iocb); 2821 spin_lock_irq(&ndlp->lock); 2822 ndlp->nlp_flag &= ~NLP_LOGO_SND; 2823 if (ndlp->upcall_flags & NLP_WAIT_FOR_LOGO) { 2824 wake_up_waiter = 1; 2825 ndlp->upcall_flags &= ~NLP_WAIT_FOR_LOGO; 2826 } 2827 spin_unlock_irq(&ndlp->lock); 2828 2829 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2830 "LOGO cmpl: status:x%x/x%x did:x%x", 2831 irsp->ulpStatus, irsp->un.ulpWord[4], 2832 ndlp->nlp_DID); 2833 2834 /* LOGO completes to NPort <nlp_DID> */ 2835 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2836 "0105 LOGO completes to NPort x%x " 2837 "refcnt %d nflags x%x Data: x%x x%x x%x x%x\n", 2838 ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp->nlp_flag, 2839 irsp->ulpStatus, irsp->un.ulpWord[4], 2840 irsp->ulpTimeout, vport->num_disc_nodes); 2841 2842 if (lpfc_els_chk_latt(vport)) { 2843 skip_recovery = 1; 2844 goto out; 2845 } 2846 2847 /* The LOGO will not be retried on failure. A LOGO was 2848 * issued to the remote rport and a ACC or RJT or no Answer are 2849 * all acceptable. Note the failure and move forward with 2850 * discovery. The PLOGI will retry. 2851 */ 2852 if (irsp->ulpStatus) { 2853 /* LOGO failed */ 2854 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 2855 "2756 LOGO failure, No Retry DID:%06X Status:x%x/x%x\n", 2856 ndlp->nlp_DID, irsp->ulpStatus, 2857 irsp->un.ulpWord[4]); 2858 /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ 2859 if (lpfc_error_lost_link(irsp)) { 2860 skip_recovery = 1; 2861 goto out; 2862 } 2863 } 2864 2865 /* Call state machine. This will unregister the rpi if needed. */ 2866 lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_CMPL_LOGO); 2867 2868 /* The driver sets this flag for an NPIV instance that doesn't want to 2869 * log into the remote port. 2870 */ 2871 if (ndlp->nlp_flag & NLP_TARGET_REMOVE) { 2872 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2873 NLP_EVT_DEVICE_RM); 2874 lpfc_els_free_iocb(phba, cmdiocb); 2875 lpfc_nlp_put(ndlp); 2876 2877 /* Presume the node was released. */ 2878 return; 2879 } 2880 2881 out: 2882 /* Driver is done with the IO. */ 2883 lpfc_els_free_iocb(phba, cmdiocb); 2884 lpfc_nlp_put(ndlp); 2885 2886 /* At this point, the LOGO processing is complete. NOTE: For a 2887 * pt2pt topology, we are assuming the NPortID will only change 2888 * on link up processing. For a LOGO / PLOGI initiated by the 2889 * Initiator, we are assuming the NPortID is not going to change. 2890 */ 2891 2892 if (wake_up_waiter && ndlp->logo_waitq) 2893 wake_up(ndlp->logo_waitq); 2894 /* 2895 * If the node is a target, the handling attempts to recover the port. 2896 * For any other port type, the rpi is unregistered as an implicit 2897 * LOGO. 2898 */ 2899 if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET) && 2900 skip_recovery == 0) { 2901 lpfc_cancel_retry_delay_tmo(vport, ndlp); 2902 spin_lock_irqsave(&ndlp->lock, flags); 2903 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 2904 spin_unlock_irqrestore(&ndlp->lock, flags); 2905 2906 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 2907 "3187 LOGO completes to NPort x%x: Start " 2908 "Recovery Data: x%x x%x x%x x%x\n", 2909 ndlp->nlp_DID, irsp->ulpStatus, 2910 irsp->un.ulpWord[4], irsp->ulpTimeout, 2911 vport->num_disc_nodes); 2912 lpfc_disc_start(vport); 2913 return; 2914 } 2915 2916 /* Cleanup path for failed REG_RPI handling. If REG_RPI fails, the 2917 * driver sends a LOGO to the rport to cleanup. For fabric and 2918 * initiator ports cleanup the node as long as it the node is not 2919 * register with the transport. 2920 */ 2921 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { 2922 spin_lock_irq(&ndlp->lock); 2923 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 2924 spin_unlock_irq(&ndlp->lock); 2925 lpfc_disc_state_machine(vport, ndlp, cmdiocb, 2926 NLP_EVT_DEVICE_RM); 2927 } 2928 } 2929 2930 /** 2931 * lpfc_issue_els_logo - Issue a logo to an node on a vport 2932 * @vport: pointer to a virtual N_Port data structure. 2933 * @ndlp: pointer to a node-list data structure. 2934 * @retry: number of retries to the command IOCB. 2935 * 2936 * This routine constructs and issues an ELS Logout (LOGO) iocb command 2937 * to a remote node, referred by an @ndlp on a @vport. It constructs the 2938 * payload of the IOCB, properly sets up the @ndlp state, and invokes the 2939 * lpfc_sli_issue_iocb() routine to send out the LOGO ELS command. 2940 * 2941 * Note that the ndlp reference count will be incremented by 1 for holding the 2942 * ndlp and the reference to ndlp will be stored into the context1 field of 2943 * the IOCB for the completion callback function to the LOGO ELS command. 2944 * 2945 * Callers of this routine are expected to unregister the RPI first 2946 * 2947 * Return code 2948 * 0 - successfully issued logo 2949 * 1 - failed to issue logo 2950 **/ 2951 int 2952 lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2953 uint8_t retry) 2954 { 2955 struct lpfc_hba *phba = vport->phba; 2956 struct lpfc_iocbq *elsiocb; 2957 uint8_t *pcmd; 2958 uint16_t cmdsize; 2959 int rc; 2960 2961 spin_lock_irq(&ndlp->lock); 2962 if (ndlp->nlp_flag & NLP_LOGO_SND) { 2963 spin_unlock_irq(&ndlp->lock); 2964 return 0; 2965 } 2966 spin_unlock_irq(&ndlp->lock); 2967 2968 cmdsize = (2 * sizeof(uint32_t)) + sizeof(struct lpfc_name); 2969 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 2970 ndlp->nlp_DID, ELS_CMD_LOGO); 2971 if (!elsiocb) 2972 return 1; 2973 2974 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 2975 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO; 2976 pcmd += sizeof(uint32_t); 2977 2978 /* Fill in LOGO payload */ 2979 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID); 2980 pcmd += sizeof(uint32_t); 2981 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name)); 2982 2983 phba->fc_stat.elsXmitLOGO++; 2984 elsiocb->iocb_cmpl = lpfc_cmpl_els_logo; 2985 spin_lock_irq(&ndlp->lock); 2986 ndlp->nlp_flag |= NLP_LOGO_SND; 2987 ndlp->nlp_flag &= ~NLP_ISSUE_LOGO; 2988 spin_unlock_irq(&ndlp->lock); 2989 elsiocb->context1 = lpfc_nlp_get(ndlp); 2990 if (!elsiocb->context1) { 2991 lpfc_els_free_iocb(phba, elsiocb); 2992 goto err; 2993 } 2994 2995 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2996 "Issue LOGO: did:x%x refcnt %d", 2997 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 2998 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 2999 if (rc == IOCB_ERROR) { 3000 lpfc_els_free_iocb(phba, elsiocb); 3001 lpfc_nlp_put(ndlp); 3002 goto err; 3003 } 3004 3005 spin_lock_irq(&ndlp->lock); 3006 ndlp->nlp_prev_state = ndlp->nlp_state; 3007 spin_unlock_irq(&ndlp->lock); 3008 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE); 3009 return 0; 3010 3011 err: 3012 spin_lock_irq(&ndlp->lock); 3013 ndlp->nlp_flag &= ~NLP_LOGO_SND; 3014 spin_unlock_irq(&ndlp->lock); 3015 return 1; 3016 } 3017 3018 /** 3019 * lpfc_cmpl_els_cmd - Completion callback function for generic els command 3020 * @phba: pointer to lpfc hba data structure. 3021 * @cmdiocb: pointer to lpfc command iocb data structure. 3022 * @rspiocb: pointer to lpfc response iocb data structure. 3023 * 3024 * This routine is a generic completion callback function for ELS commands. 3025 * Specifically, it is the callback function which does not need to perform 3026 * any command specific operations. It is currently used by the ELS command 3027 * issuing routines for RSCN, lpfc_issue_els_rscn, and the ELS Fibre Channel 3028 * Address Resolution Protocol Response (FARPR) routine, lpfc_issue_els_farpr(). 3029 * Other than certain debug loggings, this callback function simply invokes the 3030 * lpfc_els_chk_latt() routine to check whether link went down during the 3031 * discovery process. 3032 **/ 3033 static void 3034 lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 3035 struct lpfc_iocbq *rspiocb) 3036 { 3037 struct lpfc_vport *vport = cmdiocb->vport; 3038 struct lpfc_nodelist *free_ndlp; 3039 IOCB_t *irsp; 3040 3041 irsp = &rspiocb->iocb; 3042 3043 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3044 "ELS cmd cmpl: status:x%x/x%x did:x%x", 3045 irsp->ulpStatus, irsp->un.ulpWord[4], 3046 irsp->un.elsreq64.remoteID); 3047 3048 /* ELS cmd tag <ulpIoTag> completes */ 3049 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3050 "0106 ELS cmd tag x%x completes Data: x%x x%x x%x\n", 3051 irsp->ulpIoTag, irsp->ulpStatus, 3052 irsp->un.ulpWord[4], irsp->ulpTimeout); 3053 3054 /* Check to see if link went down during discovery */ 3055 lpfc_els_chk_latt(vport); 3056 3057 free_ndlp = (struct lpfc_nodelist *)cmdiocb->context1; 3058 3059 lpfc_els_free_iocb(phba, cmdiocb); 3060 lpfc_nlp_put(free_ndlp); 3061 } 3062 3063 /** 3064 * lpfc_cmpl_els_disc_cmd - Completion callback function for Discovery ELS cmd 3065 * @phba: pointer to lpfc hba data structure. 3066 * @cmdiocb: pointer to lpfc command iocb data structure. 3067 * @rspiocb: pointer to lpfc response iocb data structure. 3068 * 3069 * This routine is a generic completion callback function for Discovery ELS cmd. 3070 * Currently used by the ELS command issuing routines for the ELS State Change 3071 * Request (SCR), lpfc_issue_els_scr() and the ELS RDF, lpfc_issue_els_rdf(). 3072 * These commands will be retried once only for ELS timeout errors. 3073 **/ 3074 static void 3075 lpfc_cmpl_els_disc_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 3076 struct lpfc_iocbq *rspiocb) 3077 { 3078 struct lpfc_vport *vport = cmdiocb->vport; 3079 IOCB_t *irsp; 3080 struct lpfc_els_rdf_rsp *prdf; 3081 struct lpfc_dmabuf *pcmd, *prsp; 3082 u32 *pdata; 3083 u32 cmd; 3084 struct lpfc_nodelist *ndlp = cmdiocb->context1; 3085 3086 irsp = &rspiocb->iocb; 3087 3088 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3089 "ELS cmd cmpl: status:x%x/x%x did:x%x", 3090 irsp->ulpStatus, irsp->un.ulpWord[4], 3091 irsp->un.elsreq64.remoteID); 3092 /* ELS cmd tag <ulpIoTag> completes */ 3093 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3094 "0217 ELS cmd tag x%x completes Data: x%x x%x x%x " 3095 "x%x\n", 3096 irsp->ulpIoTag, irsp->ulpStatus, 3097 irsp->un.ulpWord[4], irsp->ulpTimeout, 3098 cmdiocb->retry); 3099 3100 pcmd = (struct lpfc_dmabuf *)cmdiocb->context2; 3101 if (!pcmd) 3102 goto out; 3103 3104 pdata = (u32 *)pcmd->virt; 3105 if (!pdata) 3106 goto out; 3107 cmd = *pdata; 3108 3109 /* Only 1 retry for ELS Timeout only */ 3110 if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT && 3111 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == 3112 IOERR_SEQUENCE_TIMEOUT)) { 3113 cmdiocb->retry++; 3114 if (cmdiocb->retry <= 1) { 3115 switch (cmd) { 3116 case ELS_CMD_SCR: 3117 lpfc_issue_els_scr(vport, cmdiocb->retry); 3118 break; 3119 case ELS_CMD_RDF: 3120 cmdiocb->context1 = NULL; /* save ndlp refcnt */ 3121 lpfc_issue_els_rdf(vport, cmdiocb->retry); 3122 break; 3123 } 3124 goto out; 3125 } 3126 phba->fc_stat.elsRetryExceeded++; 3127 } 3128 if (irsp->ulpStatus) { 3129 /* ELS discovery cmd completes with error */ 3130 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, 3131 "4203 ELS cmd x%x error: x%x x%X\n", cmd, 3132 irsp->ulpStatus, irsp->un.ulpWord[4]); 3133 goto out; 3134 } 3135 3136 /* The RDF response doesn't have any impact on the running driver 3137 * but the notification descriptors are dumped here for support. 3138 */ 3139 if (cmd == ELS_CMD_RDF) { 3140 int i; 3141 3142 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 3143 if (!prsp) 3144 goto out; 3145 3146 prdf = (struct lpfc_els_rdf_rsp *)prsp->virt; 3147 if (!prdf) 3148 goto out; 3149 3150 for (i = 0; i < ELS_RDF_REG_TAG_CNT && 3151 i < be32_to_cpu(prdf->reg_d1.reg_desc.count); i++) 3152 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3153 "4677 Fabric RDF Notification Grant Data: " 3154 "0x%08x\n", 3155 be32_to_cpu( 3156 prdf->reg_d1.desc_tags[i])); 3157 } 3158 3159 out: 3160 /* Check to see if link went down during discovery */ 3161 lpfc_els_chk_latt(vport); 3162 lpfc_els_free_iocb(phba, cmdiocb); 3163 lpfc_nlp_put(ndlp); 3164 return; 3165 } 3166 3167 /** 3168 * lpfc_issue_els_scr - Issue a scr to an node on a vport 3169 * @vport: pointer to a host virtual N_Port data structure. 3170 * @retry: retry counter for the command IOCB. 3171 * 3172 * This routine issues a State Change Request (SCR) to a fabric node 3173 * on a @vport. The remote node is Fabric Controller (0xfffffd). It 3174 * first search the @vport node list to find the matching ndlp. If no such 3175 * ndlp is found, a new ndlp shall be created for this (SCR) purpose. An 3176 * IOCB is allocated, payload prepared, and the lpfc_sli_issue_iocb() 3177 * routine is invoked to send the SCR IOCB. 3178 * 3179 * Note that the ndlp reference count will be incremented by 1 for holding the 3180 * ndlp and the reference to ndlp will be stored into the context1 field of 3181 * the IOCB for the completion callback function to the SCR ELS command. 3182 * 3183 * Return code 3184 * 0 - Successfully issued scr command 3185 * 1 - Failed to issue scr command 3186 **/ 3187 int 3188 lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry) 3189 { 3190 int rc = 0; 3191 struct lpfc_hba *phba = vport->phba; 3192 struct lpfc_iocbq *elsiocb; 3193 uint8_t *pcmd; 3194 uint16_t cmdsize; 3195 struct lpfc_nodelist *ndlp; 3196 3197 cmdsize = (sizeof(uint32_t) + sizeof(SCR)); 3198 3199 ndlp = lpfc_findnode_did(vport, Fabric_Cntl_DID); 3200 if (!ndlp) { 3201 ndlp = lpfc_nlp_init(vport, Fabric_Cntl_DID); 3202 if (!ndlp) 3203 return 1; 3204 lpfc_enqueue_node(vport, ndlp); 3205 } 3206 3207 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3208 ndlp->nlp_DID, ELS_CMD_SCR); 3209 3210 if (!elsiocb) 3211 return 1; 3212 3213 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 3214 3215 *((uint32_t *) (pcmd)) = ELS_CMD_SCR; 3216 pcmd += sizeof(uint32_t); 3217 3218 /* For SCR, remainder of payload is SCR parameter page */ 3219 memset(pcmd, 0, sizeof(SCR)); 3220 ((SCR *) pcmd)->Function = SCR_FUNC_FULL; 3221 3222 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3223 "Issue SCR: did:x%x", 3224 ndlp->nlp_DID, 0, 0); 3225 3226 phba->fc_stat.elsXmitSCR++; 3227 elsiocb->iocb_cmpl = lpfc_cmpl_els_disc_cmd; 3228 elsiocb->context1 = lpfc_nlp_get(ndlp); 3229 if (!elsiocb->context1) { 3230 lpfc_els_free_iocb(phba, elsiocb); 3231 return 1; 3232 } 3233 3234 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3235 "Issue SCR: did:x%x refcnt %d", 3236 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 3237 3238 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3239 if (rc == IOCB_ERROR) { 3240 lpfc_els_free_iocb(phba, elsiocb); 3241 lpfc_nlp_put(ndlp); 3242 return 1; 3243 } 3244 3245 /* Keep the ndlp just in case RDF is being sent */ 3246 return 0; 3247 } 3248 3249 /** 3250 * lpfc_issue_els_rscn - Issue an RSCN to the Fabric Controller (Fabric) 3251 * or the other nport (pt2pt). 3252 * @vport: pointer to a host virtual N_Port data structure. 3253 * @retry: number of retries to the command IOCB. 3254 * 3255 * This routine issues a RSCN to the Fabric Controller (DID 0xFFFFFD) 3256 * when connected to a fabric, or to the remote port when connected 3257 * in point-to-point mode. When sent to the Fabric Controller, it will 3258 * replay the RSCN to registered recipients. 3259 * 3260 * Note that the ndlp reference count will be incremented by 1 for holding the 3261 * ndlp and the reference to ndlp will be stored into the context1 field of 3262 * the IOCB for the completion callback function to the RSCN ELS command. 3263 * 3264 * Return code 3265 * 0 - Successfully issued RSCN command 3266 * 1 - Failed to issue RSCN command 3267 **/ 3268 int 3269 lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry) 3270 { 3271 int rc = 0; 3272 struct lpfc_hba *phba = vport->phba; 3273 struct lpfc_iocbq *elsiocb; 3274 struct lpfc_nodelist *ndlp; 3275 struct { 3276 struct fc_els_rscn rscn; 3277 struct fc_els_rscn_page portid; 3278 } *event; 3279 uint32_t nportid; 3280 uint16_t cmdsize = sizeof(*event); 3281 3282 /* Not supported for private loop */ 3283 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP && 3284 !(vport->fc_flag & FC_PUBLIC_LOOP)) 3285 return 1; 3286 3287 if (vport->fc_flag & FC_PT2PT) { 3288 /* find any mapped nport - that would be the other nport */ 3289 ndlp = lpfc_findnode_mapped(vport); 3290 if (!ndlp) 3291 return 1; 3292 } else { 3293 nportid = FC_FID_FCTRL; 3294 /* find the fabric controller node */ 3295 ndlp = lpfc_findnode_did(vport, nportid); 3296 if (!ndlp) { 3297 /* if one didn't exist, make one */ 3298 ndlp = lpfc_nlp_init(vport, nportid); 3299 if (!ndlp) 3300 return 1; 3301 lpfc_enqueue_node(vport, ndlp); 3302 } 3303 } 3304 3305 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3306 ndlp->nlp_DID, ELS_CMD_RSCN_XMT); 3307 3308 if (!elsiocb) 3309 return 1; 3310 3311 event = ((struct lpfc_dmabuf *)elsiocb->context2)->virt; 3312 3313 event->rscn.rscn_cmd = ELS_RSCN; 3314 event->rscn.rscn_page_len = sizeof(struct fc_els_rscn_page); 3315 event->rscn.rscn_plen = cpu_to_be16(cmdsize); 3316 3317 nportid = vport->fc_myDID; 3318 /* appears that page flags must be 0 for fabric to broadcast RSCN */ 3319 event->portid.rscn_page_flags = 0; 3320 event->portid.rscn_fid[0] = (nportid & 0x00FF0000) >> 16; 3321 event->portid.rscn_fid[1] = (nportid & 0x0000FF00) >> 8; 3322 event->portid.rscn_fid[2] = nportid & 0x000000FF; 3323 3324 phba->fc_stat.elsXmitRSCN++; 3325 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd; 3326 elsiocb->context1 = lpfc_nlp_get(ndlp); 3327 if (!elsiocb->context1) { 3328 lpfc_els_free_iocb(phba, elsiocb); 3329 return 1; 3330 } 3331 3332 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3333 "Issue RSCN: did:x%x", 3334 ndlp->nlp_DID, 0, 0); 3335 3336 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3337 if (rc == IOCB_ERROR) { 3338 lpfc_els_free_iocb(phba, elsiocb); 3339 lpfc_nlp_put(ndlp); 3340 return 1; 3341 } 3342 3343 /* This will cause the callback-function lpfc_cmpl_els_cmd to 3344 * trigger the release of node. 3345 */ 3346 if (!(vport->fc_flag & FC_PT2PT)) 3347 lpfc_nlp_put(ndlp); 3348 return 0; 3349 } 3350 3351 /** 3352 * lpfc_issue_els_farpr - Issue a farp to an node on a vport 3353 * @vport: pointer to a host virtual N_Port data structure. 3354 * @nportid: N_Port identifier to the remote node. 3355 * @retry: number of retries to the command IOCB. 3356 * 3357 * This routine issues a Fibre Channel Address Resolution Response 3358 * (FARPR) to a node on a vport. The remote node N_Port identifier (@nportid) 3359 * is passed into the function. It first search the @vport node list to find 3360 * the matching ndlp. If no such ndlp is found, a new ndlp shall be created 3361 * for this (FARPR) purpose. An IOCB is allocated, payload prepared, and the 3362 * lpfc_sli_issue_iocb() routine is invoked to send the FARPR ELS command. 3363 * 3364 * Note that the ndlp reference count will be incremented by 1 for holding the 3365 * ndlp and the reference to ndlp will be stored into the context1 field of 3366 * the IOCB for the completion callback function to the FARPR ELS command. 3367 * 3368 * Return code 3369 * 0 - Successfully issued farpr command 3370 * 1 - Failed to issue farpr command 3371 **/ 3372 static int 3373 lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) 3374 { 3375 int rc = 0; 3376 struct lpfc_hba *phba = vport->phba; 3377 struct lpfc_iocbq *elsiocb; 3378 FARP *fp; 3379 uint8_t *pcmd; 3380 uint32_t *lp; 3381 uint16_t cmdsize; 3382 struct lpfc_nodelist *ondlp; 3383 struct lpfc_nodelist *ndlp; 3384 3385 cmdsize = (sizeof(uint32_t) + sizeof(FARP)); 3386 3387 ndlp = lpfc_findnode_did(vport, nportid); 3388 if (!ndlp) { 3389 ndlp = lpfc_nlp_init(vport, nportid); 3390 if (!ndlp) 3391 return 1; 3392 lpfc_enqueue_node(vport, ndlp); 3393 } 3394 3395 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3396 ndlp->nlp_DID, ELS_CMD_RNID); 3397 if (!elsiocb) 3398 return 1; 3399 3400 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 3401 3402 *((uint32_t *) (pcmd)) = ELS_CMD_FARPR; 3403 pcmd += sizeof(uint32_t); 3404 3405 /* Fill in FARPR payload */ 3406 fp = (FARP *) (pcmd); 3407 memset(fp, 0, sizeof(FARP)); 3408 lp = (uint32_t *) pcmd; 3409 *lp++ = be32_to_cpu(nportid); 3410 *lp++ = be32_to_cpu(vport->fc_myDID); 3411 fp->Rflags = 0; 3412 fp->Mflags = (FARP_MATCH_PORT | FARP_MATCH_NODE); 3413 3414 memcpy(&fp->RportName, &vport->fc_portname, sizeof(struct lpfc_name)); 3415 memcpy(&fp->RnodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 3416 ondlp = lpfc_findnode_did(vport, nportid); 3417 if (ondlp) { 3418 memcpy(&fp->OportName, &ondlp->nlp_portname, 3419 sizeof(struct lpfc_name)); 3420 memcpy(&fp->OnodeName, &ondlp->nlp_nodename, 3421 sizeof(struct lpfc_name)); 3422 } 3423 3424 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3425 "Issue FARPR: did:x%x", 3426 ndlp->nlp_DID, 0, 0); 3427 3428 phba->fc_stat.elsXmitFARPR++; 3429 elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd; 3430 elsiocb->context1 = lpfc_nlp_get(ndlp); 3431 if (!elsiocb->context1) { 3432 lpfc_els_free_iocb(phba, elsiocb); 3433 return 1; 3434 } 3435 3436 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3437 if (rc == IOCB_ERROR) { 3438 /* The additional lpfc_nlp_put will cause the following 3439 * lpfc_els_free_iocb routine to trigger the release of 3440 * the node. 3441 */ 3442 lpfc_els_free_iocb(phba, elsiocb); 3443 lpfc_nlp_put(ndlp); 3444 return 1; 3445 } 3446 /* This will cause the callback-function lpfc_cmpl_els_cmd to 3447 * trigger the release of the node. 3448 */ 3449 /* Don't release reference count as RDF is likely outstanding */ 3450 return 0; 3451 } 3452 3453 /** 3454 * lpfc_issue_els_rdf - Register for diagnostic functions from the fabric. 3455 * @vport: pointer to a host virtual N_Port data structure. 3456 * @retry: retry counter for the command IOCB. 3457 * 3458 * This routine issues an ELS RDF to the Fabric Controller to register 3459 * for diagnostic functions. 3460 * 3461 * Note that the ndlp reference count will be incremented by 1 for holding the 3462 * ndlp and the reference to ndlp will be stored into the context1 field of 3463 * the IOCB for the completion callback function to the RDF ELS command. 3464 * 3465 * Return code 3466 * 0 - Successfully issued rdf command 3467 * 1 - Failed to issue rdf command 3468 **/ 3469 int 3470 lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry) 3471 { 3472 struct lpfc_hba *phba = vport->phba; 3473 struct lpfc_iocbq *elsiocb; 3474 struct lpfc_els_rdf_req *prdf; 3475 struct lpfc_nodelist *ndlp; 3476 uint16_t cmdsize; 3477 int rc; 3478 3479 cmdsize = sizeof(*prdf); 3480 3481 ndlp = lpfc_findnode_did(vport, Fabric_Cntl_DID); 3482 if (!ndlp) { 3483 ndlp = lpfc_nlp_init(vport, Fabric_Cntl_DID); 3484 if (!ndlp) 3485 return -ENODEV; 3486 lpfc_enqueue_node(vport, ndlp); 3487 } 3488 3489 /* RDF ELS is not required on an NPIV VN_Port. */ 3490 if (vport->port_type == LPFC_NPIV_PORT) { 3491 lpfc_nlp_put(ndlp); 3492 return -EACCES; 3493 } 3494 3495 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, 3496 ndlp->nlp_DID, ELS_CMD_RDF); 3497 if (!elsiocb) 3498 return -ENOMEM; 3499 3500 /* Configure the payload for the supported FPIN events. */ 3501 prdf = (struct lpfc_els_rdf_req *) 3502 (((struct lpfc_dmabuf *)elsiocb->context2)->virt); 3503 memset(prdf, 0, cmdsize); 3504 prdf->rdf.fpin_cmd = ELS_RDF; 3505 prdf->rdf.desc_len = cpu_to_be32(sizeof(struct lpfc_els_rdf_req) - 3506 sizeof(struct fc_els_rdf)); 3507 prdf->reg_d1.reg_desc.desc_tag = cpu_to_be32(ELS_DTAG_FPIN_REGISTER); 3508 prdf->reg_d1.reg_desc.desc_len = cpu_to_be32( 3509 FC_TLV_DESC_LENGTH_FROM_SZ(prdf->reg_d1)); 3510 prdf->reg_d1.reg_desc.count = cpu_to_be32(ELS_RDF_REG_TAG_CNT); 3511 prdf->reg_d1.desc_tags[0] = cpu_to_be32(ELS_DTAG_LNK_INTEGRITY); 3512 prdf->reg_d1.desc_tags[1] = cpu_to_be32(ELS_DTAG_DELIVERY); 3513 prdf->reg_d1.desc_tags[2] = cpu_to_be32(ELS_DTAG_PEER_CONGEST); 3514 prdf->reg_d1.desc_tags[3] = cpu_to_be32(ELS_DTAG_CONGESTION); 3515 3516 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 3517 "6444 Xmit RDF to remote NPORT x%x\n", 3518 ndlp->nlp_DID); 3519 3520 elsiocb->iocb_cmpl = lpfc_cmpl_els_disc_cmd; 3521 elsiocb->context1 = lpfc_nlp_get(ndlp); 3522 if (!elsiocb->context1) { 3523 lpfc_els_free_iocb(phba, elsiocb); 3524 return -EIO; 3525 } 3526 3527 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3528 "Issue RDF: did:x%x refcnt %d", 3529 ndlp->nlp_DID, kref_read(&ndlp->kref), 0); 3530 3531 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 3532 if (rc == IOCB_ERROR) { 3533 lpfc_els_free_iocb(phba, elsiocb); 3534 lpfc_nlp_put(ndlp); 3535 return -EIO; 3536 } 3537 return 0; 3538 } 3539 3540 /** 3541 * lpfc_cancel_retry_delay_tmo - Cancel the timer with delayed iocb-cmd retry 3542 * @vport: pointer to a host virtual N_Port data structure. 3543 * @nlp: pointer to a node-list data structure. 3544 * 3545 * This routine cancels the timer with a delayed IOCB-command retry for 3546 * a @vport's @ndlp. It stops the timer for the delayed function retrial and 3547 * removes the ELS retry event if it presents. In addition, if the 3548 * NLP_NPR_2B_DISC bit is set in the @nlp's nlp_flag bitmap, ADISC IOCB 3549 * commands are sent for the @vport's nodes that require issuing discovery 3550 * ADISC. 3551 **/ 3552 void 3553 lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp) 3554 { 3555 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 3556 struct lpfc_work_evt *evtp; 3557 3558 if (!(nlp->nlp_flag & NLP_DELAY_TMO)) 3559 return; 3560 spin_lock_irq(&nlp->lock); 3561 nlp->nlp_flag &= ~NLP_DELAY_TMO; 3562 spin_unlock_irq(&nlp->lock); 3563 del_timer_sync(&nlp->nlp_delayfunc); 3564 nlp->nlp_last_elscmd = 0; 3565 if (!list_empty(&nlp->els_retry_evt.evt_listp)) { 3566 list_del_init(&nlp->els_retry_evt.evt_listp); 3567 /* Decrement nlp reference count held for the delayed retry */ 3568 evtp = &nlp->els_retry_evt; 3569 lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1); 3570 } 3571 if (nlp->nlp_flag & NLP_NPR_2B_DISC) { 3572 spin_lock_irq(&nlp->lock); 3573 nlp->nlp_flag &= ~NLP_NPR_2B_DISC; 3574 spin_unlock_irq(&nlp->lock); 3575 if (vport->num_disc_nodes) { 3576 if (vport->port_state < LPFC_VPORT_READY) { 3577 /* Check if there are more ADISCs to be sent */ 3578 lpfc_more_adisc(vport); 3579 } else { 3580 /* Check if there are more PLOGIs to be sent */ 3581 lpfc_more_plogi(vport); 3582 if (vport->num_disc_nodes == 0) { 3583 spin_lock_irq(shost->host_lock); 3584 vport->fc_flag &= ~FC_NDISC_ACTIVE; 3585 spin_unlock_irq(shost->host_lock); 3586 lpfc_can_disctmo(vport); 3587 lpfc_end_rscn(vport); 3588 } 3589 } 3590 } 3591 } 3592 return; 3593 } 3594 3595 /** 3596 * lpfc_els_retry_delay - Timer function with a ndlp delayed function timer 3597 * @t: pointer to the timer function associated data (ndlp). 3598 * 3599 * This routine is invoked by the ndlp delayed-function timer to check 3600 * whether there is any pending ELS retry event(s) with the node. If not, it 3601 * simply returns. Otherwise, if there is at least one ELS delayed event, it 3602 * adds the delayed events to the HBA work list and invokes the 3603 * lpfc_worker_wake_up() routine to wake up worker thread to process the 3604 * event. Note that lpfc_nlp_get() is called before posting the event to 3605 * the work list to hold reference count of ndlp so that it guarantees the 3606 * reference to ndlp will still be available when the worker thread gets 3607 * to the event associated with the ndlp. 3608 **/ 3609 void 3610 lpfc_els_retry_delay(struct timer_list *t) 3611 { 3612 struct lpfc_nodelist *ndlp = from_timer(ndlp, t, nlp_delayfunc); 3613 struct lpfc_vport *vport = ndlp->vport; 3614 struct lpfc_hba *phba = vport->phba; 3615 unsigned long flags; 3616 struct lpfc_work_evt *evtp = &ndlp->els_retry_evt; 3617 3618 spin_lock_irqsave(&phba->hbalock, flags); 3619 if (!list_empty(&evtp->evt_listp)) { 3620 spin_unlock_irqrestore(&phba->hbalock, flags); 3621 return; 3622 } 3623 3624 /* We need to hold the node by incrementing the reference 3625 * count until the queued work is done 3626 */ 3627 evtp->evt_arg1 = lpfc_nlp_get(ndlp); 3628 if (evtp->evt_arg1) { 3629 evtp->evt = LPFC_EVT_ELS_RETRY; 3630 list_add_tail(&evtp->evt_listp, &phba->work_list); 3631 lpfc_worker_wake_up(phba); 3632 } 3633 spin_unlock_irqrestore(&phba->hbalock, flags); 3634 return; 3635 } 3636 3637 /** 3638 * lpfc_els_retry_delay_handler - Work thread handler for ndlp delayed function 3639 * @ndlp: pointer to a node-list data structure. 3640 * 3641 * This routine is the worker-thread handler for processing the @ndlp delayed 3642 * event(s), posted by the lpfc_els_retry_delay() routine. It simply retrieves 3643 * the last ELS command from the associated ndlp and invokes the proper ELS 3644 * function according to the delayed ELS command to retry the command. 3645 **/ 3646 void 3647 lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp) 3648 { 3649 struct lpfc_vport *vport = ndlp->vport; 3650 uint32_t cmd, retry; 3651 3652 spin_lock_irq(&ndlp->lock); 3653 cmd = ndlp->nlp_last_elscmd; 3654 ndlp->nlp_last_elscmd = 0; 3655 3656 if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { 3657 spin_unlock_irq(&ndlp->lock); 3658 return; 3659 } 3660 3661 ndlp->nlp_flag &= ~NLP_DELAY_TMO; 3662 spin_unlock_irq(&ndlp->lock); 3663 /* 3664 * If a discovery event readded nlp_delayfunc after timer 3665 * firing and before processing the timer, cancel the 3666 * nlp_delayfunc. 3667 */ 3668 del_timer_sync(&ndlp->nlp_delayfunc); 3669 retry = ndlp->nlp_retry; 3670 ndlp->nlp_retry = 0; 3671 3672 switch (cmd) { 3673 case ELS_CMD_FLOGI: 3674 lpfc_issue_els_flogi(vport, ndlp, retry); 3675 break; 3676 case ELS_CMD_PLOGI: 3677 if (!lpfc_issue_els_plogi(vport, ndlp->nlp_DID, retry)) { 3678 ndlp->nlp_prev_state = ndlp->nlp_state; 3679 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 3680 } 3681 break; 3682 case ELS_CMD_ADISC: 3683 if (!lpfc_issue_els_adisc(vport, ndlp, retry)) { 3684 ndlp->nlp_prev_state = ndlp->nlp_state; 3685 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 3686 } 3687 break; 3688 case ELS_CMD_PRLI: 3689 case ELS_CMD_NVMEPRLI: 3690 if (!lpfc_issue_els_prli(vport, ndlp, retry)) { 3691 ndlp->nlp_prev_state = ndlp->nlp_state; 3692 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); 3693 } 3694 break; 3695 case ELS_CMD_LOGO: 3696 if (!lpfc_issue_els_logo(vport, ndlp, retry)) { 3697 ndlp->nlp_prev_state = ndlp->nlp_state; 3698 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE); 3699 } 3700 break; 3701 case ELS_CMD_FDISC: 3702 if (!(vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI)) 3703 lpfc_issue_els_fdisc(vport, ndlp, retry); 3704 break; 3705 } 3706 return; 3707 } 3708 3709 /** 3710 * lpfc_link_reset - Issue link reset 3711 * @vport: pointer to a virtual N_Port data structure. 3712 * 3713 * This routine performs link reset by sending INIT_LINK mailbox command. 3714 * For SLI-3 adapter, link attention interrupt is enabled before issuing 3715 * INIT_LINK mailbox command. 3716 * 3717 * Return code 3718 * 0 - Link reset initiated successfully 3719 * 1 - Failed to initiate link reset 3720 **/ 3721 int 3722 lpfc_link_reset(struct lpfc_vport *vport) 3723 { 3724 struct lpfc_hba *phba = vport->phba; 3725 LPFC_MBOXQ_t *mbox; 3726 uint32_t control; 3727 int rc; 3728 3729 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 3730 "2851 Attempt link reset\n"); 3731 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3732 if (!mbox) { 3733 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3734 "2852 Failed to allocate mbox memory"); 3735 return 1; 3736 } 3737 3738 /* Enable Link attention interrupts */ 3739 if (phba->sli_rev <= LPFC_SLI_REV3) { 3740 spin_lock_irq(&phba->hbalock); 3741 phba->sli.sli_flag |= LPFC_PROCESS_LA; 3742 control = readl(phba->HCregaddr); 3743 control |= HC_LAINT_ENA; 3744 writel(control, phba->HCregaddr); 3745 readl(phba->HCregaddr); /* flush */ 3746 spin_unlock_irq(&phba->hbalock); 3747 } 3748 3749 lpfc_init_link(phba, mbox, phba->cfg_topology, 3750 phba->cfg_link_speed); 3751 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 3752 mbox->vport = vport; 3753 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 3754 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 3755 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 3756 "2853 Failed to issue INIT_LINK " 3757 "mbox command, rc:x%x\n", rc); 3758 mempool_free(mbox, phba->mbox_mem_pool); 3759 return 1; 3760 } 3761 3762 return 0; 3763 } 3764 3765 /** 3766 * lpfc_els_retry - Make retry decision on an els command iocb 3767 * @phba: pointer to lpfc hba data structure. 3768 * @cmdiocb: pointer to lpfc command iocb data structure. 3769 * @rspiocb: pointer to lpfc response iocb data structure. 3770 * 3771 * This routine makes a retry decision on an ELS command IOCB, which has 3772 * failed. The following ELS IOCBs use this function for retrying the command 3773 * when previously issued command responsed with error status: FLOGI, PLOGI, 3774 * PRLI, ADISC and FDISC. Based on the ELS command type and the 3775 * returned error status, it makes the decision whether a retry shall be 3776 * issued for the command, and whether a retry shall be made immediately or 3777 * delayed. In the former case, the corresponding ELS command issuing-function 3778 * is called to retry the command. In the later case, the ELS command shall 3779 * be posted to the ndlp delayed event and delayed function timer set to the 3780 * ndlp for the delayed command issusing. 3781 * 3782 * Return code 3783 * 0 - No retry of els command is made 3784 * 1 - Immediate or delayed retry of els command is made 3785 **/ 3786 static int 3787 lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 3788 struct lpfc_iocbq *rspiocb) 3789 { 3790 struct lpfc_vport *vport = cmdiocb->vport; 3791 IOCB_t *irsp = &rspiocb->iocb; 3792 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 3793 struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 3794 uint32_t *elscmd; 3795 struct ls_rjt stat; 3796 int retry = 0, maxretry = lpfc_max_els_tries, delay = 0; 3797 int logerr = 0; 3798 uint32_t cmd = 0; 3799 uint32_t did; 3800 int link_reset = 0, rc; 3801 3802 3803 /* Note: context2 may be 0 for internal driver abort 3804 * of delays ELS command. 3805 */ 3806 3807 if (pcmd && pcmd->virt) { 3808 elscmd = (uint32_t *) (pcmd->virt); 3809 cmd = *elscmd++; 3810 } 3811 3812 if (ndlp) 3813 did = ndlp->nlp_DID; 3814 else { 3815 /* We should only hit this case for retrying PLOGI */ 3816 did = irsp->un.elsreq64.remoteID; 3817 ndlp = lpfc_findnode_did(vport, did); 3818 if (!ndlp && (cmd != ELS_CMD_PLOGI)) 3819 return 0; 3820 } 3821 3822 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3823 "Retry ELS: wd7:x%x wd4:x%x did:x%x", 3824 *(((uint32_t *)irsp) + 7), irsp->un.ulpWord[4], did); 3825 3826 switch (irsp->ulpStatus) { 3827 case IOSTAT_FCP_RSP_ERROR: 3828 break; 3829 case IOSTAT_REMOTE_STOP: 3830 if (phba->sli_rev == LPFC_SLI_REV4) { 3831 /* This IO was aborted by the target, we don't 3832 * know the rxid and because we did not send the 3833 * ABTS we cannot generate and RRQ. 3834 */ 3835 lpfc_set_rrq_active(phba, ndlp, 3836 cmdiocb->sli4_lxritag, 0, 0); 3837 } 3838 break; 3839 case IOSTAT_LOCAL_REJECT: 3840 switch ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK)) { 3841 case IOERR_LOOP_OPEN_FAILURE: 3842 if (cmd == ELS_CMD_FLOGI) { 3843 if (PCI_DEVICE_ID_HORNET == 3844 phba->pcidev->device) { 3845 phba->fc_topology = LPFC_TOPOLOGY_LOOP; 3846 phba->pport->fc_myDID = 0; 3847 phba->alpa_map[0] = 0; 3848 phba->alpa_map[1] = 0; 3849 } 3850 } 3851 if (cmd == ELS_CMD_PLOGI && cmdiocb->retry == 0) 3852 delay = 1000; 3853 retry = 1; 3854 break; 3855 3856 case IOERR_ILLEGAL_COMMAND: 3857 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 3858 "0124 Retry illegal cmd x%x " 3859 "retry:x%x delay:x%x\n", 3860 cmd, cmdiocb->retry, delay); 3861 retry = 1; 3862 /* All command's retry policy */ 3863 maxretry = 8; 3864 if (cmdiocb->retry > 2) 3865 delay = 1000; 3866 break; 3867 3868 case IOERR_NO_RESOURCES: 3869 logerr = 1; /* HBA out of resources */ 3870 retry = 1; 3871 if (cmdiocb->retry > 100) 3872 delay = 100; 3873 maxretry = 250; 3874 break; 3875 3876 case IOERR_ILLEGAL_FRAME: 3877 delay = 100; 3878 retry = 1; 3879 break; 3880 3881 case IOERR_INVALID_RPI: 3882 if (cmd == ELS_CMD_PLOGI && 3883 did == NameServer_DID) { 3884 /* Continue forever if plogi to */ 3885 /* the nameserver fails */ 3886 maxretry = 0; 3887 delay = 100; 3888 } 3889 retry = 1; 3890 break; 3891 3892 case IOERR_SEQUENCE_TIMEOUT: 3893 if (cmd == ELS_CMD_PLOGI && 3894 did == NameServer_DID && 3895 (cmdiocb->retry + 1) == maxretry) { 3896 /* Reset the Link */ 3897 link_reset = 1; 3898 break; 3899 } 3900 retry = 1; 3901 delay = 100; 3902 break; 3903 } 3904 break; 3905 3906 case IOSTAT_NPORT_RJT: 3907 case IOSTAT_FABRIC_RJT: 3908 if (irsp->un.ulpWord[4] & RJT_UNAVAIL_TEMP) { 3909 retry = 1; 3910 break; 3911 } 3912 break; 3913 3914 case IOSTAT_NPORT_BSY: 3915 case IOSTAT_FABRIC_BSY: 3916 logerr = 1; /* Fabric / Remote NPort out of resources */ 3917 retry = 1; 3918 break; 3919 3920 case IOSTAT_LS_RJT: 3921 stat.un.lsRjtError = be32_to_cpu(irsp->un.ulpWord[4]); 3922 /* Added for Vendor specifc support 3923 * Just keep retrying for these Rsn / Exp codes 3924 */ 3925 switch (stat.un.b.lsRjtRsnCode) { 3926 case LSRJT_UNABLE_TPC: 3927 /* The driver has a VALID PLOGI but the rport has 3928 * rejected the PRLI - can't do it now. Delay 3929 * for 1 second and try again. 3930 * 3931 * However, if explanation is REQ_UNSUPPORTED there's 3932 * no point to retry PRLI. 3933 */ 3934 if ((cmd == ELS_CMD_PRLI || cmd == ELS_CMD_NVMEPRLI) && 3935 stat.un.b.lsRjtRsnCodeExp != 3936 LSEXP_REQ_UNSUPPORTED) { 3937 delay = 1000; 3938 maxretry = lpfc_max_els_tries + 1; 3939 retry = 1; 3940 break; 3941 } 3942 3943 /* Legacy bug fix code for targets with PLOGI delays. */ 3944 if (stat.un.b.lsRjtRsnCodeExp == 3945 LSEXP_CMD_IN_PROGRESS) { 3946 if (cmd == ELS_CMD_PLOGI) { 3947 delay = 1000; 3948 maxretry = 48; 3949 } 3950 retry = 1; 3951 break; 3952 } 3953 if (stat.un.b.lsRjtRsnCodeExp == 3954 LSEXP_CANT_GIVE_DATA) { 3955 if (cmd == ELS_CMD_PLOGI) { 3956 delay = 1000; 3957 maxretry = 48; 3958 } 3959 retry = 1; 3960 break; 3961 } 3962 if (cmd == ELS_CMD_PLOGI) { 3963 delay = 1000; 3964 maxretry = lpfc_max_els_tries + 1; 3965 retry = 1; 3966 break; 3967 } 3968 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 3969 (cmd == ELS_CMD_FDISC) && 3970 (stat.un.b.lsRjtRsnCodeExp == LSEXP_OUT_OF_RESOURCE)){ 3971 lpfc_printf_vlog(vport, KERN_ERR, 3972 LOG_TRACE_EVENT, 3973 "0125 FDISC Failed (x%x). " 3974 "Fabric out of resources\n", 3975 stat.un.lsRjtError); 3976 lpfc_vport_set_state(vport, 3977 FC_VPORT_NO_FABRIC_RSCS); 3978 } 3979 break; 3980 3981 case LSRJT_LOGICAL_BSY: 3982 if ((cmd == ELS_CMD_PLOGI) || 3983 (cmd == ELS_CMD_PRLI) || 3984 (cmd == ELS_CMD_NVMEPRLI)) { 3985 delay = 1000; 3986 maxretry = 48; 3987 } else if (cmd == ELS_CMD_FDISC) { 3988 /* FDISC retry policy */ 3989 maxretry = 48; 3990 if (cmdiocb->retry >= 32) 3991 delay = 1000; 3992 } 3993 retry = 1; 3994 break; 3995 3996 case LSRJT_LOGICAL_ERR: 3997 /* There are some cases where switches return this 3998 * error when they are not ready and should be returning 3999 * Logical Busy. We should delay every time. 4000 */ 4001 if (cmd == ELS_CMD_FDISC && 4002 stat.un.b.lsRjtRsnCodeExp == LSEXP_PORT_LOGIN_REQ) { 4003 maxretry = 3; 4004 delay = 1000; 4005 retry = 1; 4006 } else if (cmd == ELS_CMD_FLOGI && 4007 stat.un.b.lsRjtRsnCodeExp == 4008 LSEXP_NOTHING_MORE) { 4009 vport->fc_sparam.cmn.bbRcvSizeMsb &= 0xf; 4010 retry = 1; 4011 lpfc_printf_vlog(vport, KERN_ERR, 4012 LOG_TRACE_EVENT, 4013 "0820 FLOGI Failed (x%x). " 4014 "BBCredit Not Supported\n", 4015 stat.un.lsRjtError); 4016 } 4017 break; 4018 4019 case LSRJT_PROTOCOL_ERR: 4020 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 4021 (cmd == ELS_CMD_FDISC) && 4022 ((stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_PNAME) || 4023 (stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_NPORT_ID)) 4024 ) { 4025 lpfc_printf_vlog(vport, KERN_ERR, 4026 LOG_TRACE_EVENT, 4027 "0122 FDISC Failed (x%x). " 4028 "Fabric Detected Bad WWN\n", 4029 stat.un.lsRjtError); 4030 lpfc_vport_set_state(vport, 4031 FC_VPORT_FABRIC_REJ_WWN); 4032 } 4033 break; 4034 case LSRJT_VENDOR_UNIQUE: 4035 if ((stat.un.b.vendorUnique == 0x45) && 4036 (cmd == ELS_CMD_FLOGI)) { 4037 goto out_retry; 4038 } 4039 break; 4040 case LSRJT_CMD_UNSUPPORTED: 4041 /* lpfc nvmet returns this type of LS_RJT when it 4042 * receives an FCP PRLI because lpfc nvmet only 4043 * support NVME. ELS request is terminated for FCP4 4044 * on this rport. 4045 */ 4046 if (stat.un.b.lsRjtRsnCodeExp == 4047 LSEXP_REQ_UNSUPPORTED && cmd == ELS_CMD_PRLI) { 4048 spin_lock_irq(&ndlp->lock); 4049 ndlp->nlp_flag |= NLP_FCP_PRLI_RJT; 4050 spin_unlock_irq(&ndlp->lock); 4051 retry = 0; 4052 goto out_retry; 4053 } 4054 break; 4055 } 4056 break; 4057 4058 case IOSTAT_INTERMED_RSP: 4059 case IOSTAT_BA_RJT: 4060 break; 4061 4062 default: 4063 break; 4064 } 4065 4066 if (link_reset) { 4067 rc = lpfc_link_reset(vport); 4068 if (rc) { 4069 /* Do not give up. Retry PLOGI one more time and attempt 4070 * link reset if PLOGI fails again. 4071 */ 4072 retry = 1; 4073 delay = 100; 4074 goto out_retry; 4075 } 4076 return 1; 4077 } 4078 4079 if (did == FDMI_DID) 4080 retry = 1; 4081 4082 if ((cmd == ELS_CMD_FLOGI) && 4083 (phba->fc_topology != LPFC_TOPOLOGY_LOOP) && 4084 !lpfc_error_lost_link(irsp)) { 4085 /* FLOGI retry policy */ 4086 retry = 1; 4087 /* retry FLOGI forever */ 4088 if (phba->link_flag != LS_LOOPBACK_MODE) 4089 maxretry = 0; 4090 else 4091 maxretry = 2; 4092 4093 if (cmdiocb->retry >= 100) 4094 delay = 5000; 4095 else if (cmdiocb->retry >= 32) 4096 delay = 1000; 4097 } else if ((cmd == ELS_CMD_FDISC) && !lpfc_error_lost_link(irsp)) { 4098 /* retry FDISCs every second up to devloss */ 4099 retry = 1; 4100 maxretry = vport->cfg_devloss_tmo; 4101 delay = 1000; 4102 } 4103 4104 cmdiocb->retry++; 4105 if (maxretry && (cmdiocb->retry >= maxretry)) { 4106 phba->fc_stat.elsRetryExceeded++; 4107 retry = 0; 4108 } 4109 4110 if ((vport->load_flag & FC_UNLOADING) != 0) 4111 retry = 0; 4112 4113 out_retry: 4114 if (retry) { 4115 if ((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_FDISC)) { 4116 /* Stop retrying PLOGI and FDISC if in FCF discovery */ 4117 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 4118 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4119 "2849 Stop retry ELS command " 4120 "x%x to remote NPORT x%x, " 4121 "Data: x%x x%x\n", cmd, did, 4122 cmdiocb->retry, delay); 4123 return 0; 4124 } 4125 } 4126 4127 /* Retry ELS command <elsCmd> to remote NPORT <did> */ 4128 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4129 "0107 Retry ELS command x%x to remote " 4130 "NPORT x%x Data: x%x x%x\n", 4131 cmd, did, cmdiocb->retry, delay); 4132 4133 if (((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) && 4134 ((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) || 4135 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) != 4136 IOERR_NO_RESOURCES))) { 4137 /* Don't reset timer for no resources */ 4138 4139 /* If discovery / RSCN timer is running, reset it */ 4140 if (timer_pending(&vport->fc_disctmo) || 4141 (vport->fc_flag & FC_RSCN_MODE)) 4142 lpfc_set_disctmo(vport); 4143 } 4144 4145 phba->fc_stat.elsXmitRetry++; 4146 if (ndlp && delay) { 4147 phba->fc_stat.elsDelayRetry++; 4148 ndlp->nlp_retry = cmdiocb->retry; 4149 4150 /* delay is specified in milliseconds */ 4151 mod_timer(&ndlp->nlp_delayfunc, 4152 jiffies + msecs_to_jiffies(delay)); 4153 spin_lock_irq(&ndlp->lock); 4154 ndlp->nlp_flag |= NLP_DELAY_TMO; 4155 spin_unlock_irq(&ndlp->lock); 4156 4157 ndlp->nlp_prev_state = ndlp->nlp_state; 4158 if ((cmd == ELS_CMD_PRLI) || 4159 (cmd == ELS_CMD_NVMEPRLI)) 4160 lpfc_nlp_set_state(vport, ndlp, 4161 NLP_STE_PRLI_ISSUE); 4162 else 4163 lpfc_nlp_set_state(vport, ndlp, 4164 NLP_STE_NPR_NODE); 4165 ndlp->nlp_last_elscmd = cmd; 4166 4167 return 1; 4168 } 4169 switch (cmd) { 4170 case ELS_CMD_FLOGI: 4171 lpfc_issue_els_flogi(vport, ndlp, cmdiocb->retry); 4172 return 1; 4173 case ELS_CMD_FDISC: 4174 lpfc_issue_els_fdisc(vport, ndlp, cmdiocb->retry); 4175 return 1; 4176 case ELS_CMD_PLOGI: 4177 if (ndlp) { 4178 ndlp->nlp_prev_state = ndlp->nlp_state; 4179 lpfc_nlp_set_state(vport, ndlp, 4180 NLP_STE_PLOGI_ISSUE); 4181 } 4182 lpfc_issue_els_plogi(vport, did, cmdiocb->retry); 4183 return 1; 4184 case ELS_CMD_ADISC: 4185 ndlp->nlp_prev_state = ndlp->nlp_state; 4186 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 4187 lpfc_issue_els_adisc(vport, ndlp, cmdiocb->retry); 4188 return 1; 4189 case ELS_CMD_PRLI: 4190 case ELS_CMD_NVMEPRLI: 4191 ndlp->nlp_prev_state = ndlp->nlp_state; 4192 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); 4193 lpfc_issue_els_prli(vport, ndlp, cmdiocb->retry); 4194 return 1; 4195 case ELS_CMD_LOGO: 4196 ndlp->nlp_prev_state = ndlp->nlp_state; 4197 lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE); 4198 lpfc_issue_els_logo(vport, ndlp, cmdiocb->retry); 4199 return 1; 4200 } 4201 } 4202 /* No retry ELS command <elsCmd> to remote NPORT <did> */ 4203 if (logerr) { 4204 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 4205 "0137 No retry ELS command x%x to remote " 4206 "NPORT x%x: Out of Resources: Error:x%x/%x\n", 4207 cmd, did, irsp->ulpStatus, 4208 irsp->un.ulpWord[4]); 4209 } 4210 else { 4211 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4212 "0108 No retry ELS command x%x to remote " 4213 "NPORT x%x Retried:%d Error:x%x/%x\n", 4214 cmd, did, cmdiocb->retry, irsp->ulpStatus, 4215 irsp->un.ulpWord[4]); 4216 } 4217 return 0; 4218 } 4219 4220 /** 4221 * lpfc_els_free_data - Free lpfc dma buffer and data structure with an iocb 4222 * @phba: pointer to lpfc hba data structure. 4223 * @buf_ptr1: pointer to the lpfc DMA buffer data structure. 4224 * 4225 * This routine releases the lpfc DMA (Direct Memory Access) buffer(s) 4226 * associated with a command IOCB back to the lpfc DMA buffer pool. It first 4227 * checks to see whether there is a lpfc DMA buffer associated with the 4228 * response of the command IOCB. If so, it will be released before releasing 4229 * the lpfc DMA buffer associated with the IOCB itself. 4230 * 4231 * Return code 4232 * 0 - Successfully released lpfc DMA buffer (currently, always return 0) 4233 **/ 4234 static int 4235 lpfc_els_free_data(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr1) 4236 { 4237 struct lpfc_dmabuf *buf_ptr; 4238 4239 /* Free the response before processing the command. */ 4240 if (!list_empty(&buf_ptr1->list)) { 4241 list_remove_head(&buf_ptr1->list, buf_ptr, 4242 struct lpfc_dmabuf, 4243 list); 4244 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 4245 kfree(buf_ptr); 4246 } 4247 lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys); 4248 kfree(buf_ptr1); 4249 return 0; 4250 } 4251 4252 /** 4253 * lpfc_els_free_bpl - Free lpfc dma buffer and data structure with bpl 4254 * @phba: pointer to lpfc hba data structure. 4255 * @buf_ptr: pointer to the lpfc dma buffer data structure. 4256 * 4257 * This routine releases the lpfc Direct Memory Access (DMA) buffer 4258 * associated with a Buffer Pointer List (BPL) back to the lpfc DMA buffer 4259 * pool. 4260 * 4261 * Return code 4262 * 0 - Successfully released lpfc DMA buffer (currently, always return 0) 4263 **/ 4264 static int 4265 lpfc_els_free_bpl(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr) 4266 { 4267 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 4268 kfree(buf_ptr); 4269 return 0; 4270 } 4271 4272 /** 4273 * lpfc_els_free_iocb - Free a command iocb and its associated resources 4274 * @phba: pointer to lpfc hba data structure. 4275 * @elsiocb: pointer to lpfc els command iocb data structure. 4276 * 4277 * This routine frees a command IOCB and its associated resources. The 4278 * command IOCB data structure contains the reference to various associated 4279 * resources, these fields must be set to NULL if the associated reference 4280 * not present: 4281 * context1 - reference to ndlp 4282 * context2 - reference to cmd 4283 * context2->next - reference to rsp 4284 * context3 - reference to bpl 4285 * 4286 * It first properly decrements the reference count held on ndlp for the 4287 * IOCB completion callback function. If LPFC_DELAY_MEM_FREE flag is not 4288 * set, it invokes the lpfc_els_free_data() routine to release the Direct 4289 * Memory Access (DMA) buffers associated with the IOCB. Otherwise, it 4290 * adds the DMA buffer the @phba data structure for the delayed release. 4291 * If reference to the Buffer Pointer List (BPL) is present, the 4292 * lpfc_els_free_bpl() routine is invoked to release the DMA memory 4293 * associated with BPL. Finally, the lpfc_sli_release_iocbq() routine is 4294 * invoked to release the IOCB data structure back to @phba IOCBQ list. 4295 * 4296 * Return code 4297 * 0 - Success (currently, always return 0) 4298 **/ 4299 int 4300 lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb) 4301 { 4302 struct lpfc_dmabuf *buf_ptr, *buf_ptr1; 4303 4304 /* The I/O job is complete. Clear the context1 data. */ 4305 elsiocb->context1 = NULL; 4306 4307 /* context2 = cmd, context2->next = rsp, context3 = bpl */ 4308 if (elsiocb->context2) { 4309 if (elsiocb->iocb_flag & LPFC_DELAY_MEM_FREE) { 4310 /* Firmware could still be in progress of DMAing 4311 * payload, so don't free data buffer till after 4312 * a hbeat. 4313 */ 4314 elsiocb->iocb_flag &= ~LPFC_DELAY_MEM_FREE; 4315 buf_ptr = elsiocb->context2; 4316 elsiocb->context2 = NULL; 4317 if (buf_ptr) { 4318 buf_ptr1 = NULL; 4319 spin_lock_irq(&phba->hbalock); 4320 if (!list_empty(&buf_ptr->list)) { 4321 list_remove_head(&buf_ptr->list, 4322 buf_ptr1, struct lpfc_dmabuf, 4323 list); 4324 INIT_LIST_HEAD(&buf_ptr1->list); 4325 list_add_tail(&buf_ptr1->list, 4326 &phba->elsbuf); 4327 phba->elsbuf_cnt++; 4328 } 4329 INIT_LIST_HEAD(&buf_ptr->list); 4330 list_add_tail(&buf_ptr->list, &phba->elsbuf); 4331 phba->elsbuf_cnt++; 4332 spin_unlock_irq(&phba->hbalock); 4333 } 4334 } else { 4335 buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2; 4336 lpfc_els_free_data(phba, buf_ptr1); 4337 elsiocb->context2 = NULL; 4338 } 4339 } 4340 4341 if (elsiocb->context3) { 4342 buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3; 4343 lpfc_els_free_bpl(phba, buf_ptr); 4344 elsiocb->context3 = NULL; 4345 } 4346 lpfc_sli_release_iocbq(phba, elsiocb); 4347 return 0; 4348 } 4349 4350 /** 4351 * lpfc_cmpl_els_logo_acc - Completion callback function to logo acc response 4352 * @phba: pointer to lpfc hba data structure. 4353 * @cmdiocb: pointer to lpfc command iocb data structure. 4354 * @rspiocb: pointer to lpfc response iocb data structure. 4355 * 4356 * This routine is the completion callback function to the Logout (LOGO) 4357 * Accept (ACC) Response ELS command. This routine is invoked to indicate 4358 * the completion of the LOGO process. It invokes the lpfc_nlp_not_used() to 4359 * release the ndlp if it has the last reference remaining (reference count 4360 * is 1). If succeeded (meaning ndlp released), it sets the IOCB context1 4361 * field to NULL to inform the following lpfc_els_free_iocb() routine no 4362 * ndlp reference count needs to be decremented. Otherwise, the ndlp 4363 * reference use-count shall be decremented by the lpfc_els_free_iocb() 4364 * routine. Finally, the lpfc_els_free_iocb() is invoked to release the 4365 * IOCB data structure. 4366 **/ 4367 static void 4368 lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 4369 struct lpfc_iocbq *rspiocb) 4370 { 4371 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 4372 struct lpfc_vport *vport = cmdiocb->vport; 4373 IOCB_t *irsp; 4374 4375 irsp = &rspiocb->iocb; 4376 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 4377 "ACC LOGO cmpl: status:x%x/x%x did:x%x", 4378 irsp->ulpStatus, irsp->un.ulpWord[4], ndlp->nlp_DID); 4379 /* ACC to LOGO completes to NPort <nlp_DID> */ 4380 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4381 "0109 ACC to LOGO completes to NPort x%x refcnt %d " 4382 "Data: x%x x%x x%x\n", 4383 ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp->nlp_flag, 4384 ndlp->nlp_state, ndlp->nlp_rpi); 4385 4386 if (ndlp->nlp_state == NLP_STE_NPR_NODE) { 4387 /* NPort Recovery mode or node is just allocated */ 4388 if (!lpfc_nlp_not_used(ndlp)) { 4389 /* If the ndlp is being used by another discovery 4390 * thread, just unregister the RPI. 4391 */ 4392 lpfc_unreg_rpi(vport, ndlp); 4393 } else { 4394 /* Indicate the node has already released, should 4395 * not reference to it from within lpfc_els_free_iocb. 4396 */ 4397 cmdiocb->context1 = NULL; 4398 } 4399 } 4400 4401 /* 4402 * The driver received a LOGO from the rport and has ACK'd it. 4403 * At this point, the driver is done so release the IOCB 4404 */ 4405 lpfc_els_free_iocb(phba, cmdiocb); 4406 lpfc_nlp_put(ndlp); 4407 } 4408 4409 /** 4410 * lpfc_mbx_cmpl_dflt_rpi - Completion callbk func for unreg dflt rpi mbox cmd 4411 * @phba: pointer to lpfc hba data structure. 4412 * @pmb: pointer to the driver internal queue element for mailbox command. 4413 * 4414 * This routine is the completion callback function for unregister default 4415 * RPI (Remote Port Index) mailbox command to the @phba. It simply releases 4416 * the associated lpfc Direct Memory Access (DMA) buffer back to the pool and 4417 * decrements the ndlp reference count held for this completion callback 4418 * function. After that, it invokes the lpfc_nlp_not_used() to check 4419 * whether there is only one reference left on the ndlp. If so, it will 4420 * perform one more decrement and trigger the release of the ndlp. 4421 **/ 4422 void 4423 lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 4424 { 4425 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf); 4426 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; 4427 4428 pmb->ctx_buf = NULL; 4429 pmb->ctx_ndlp = NULL; 4430 4431 lpfc_mbuf_free(phba, mp->virt, mp->phys); 4432 kfree(mp); 4433 mempool_free(pmb, phba->mbox_mem_pool); 4434 if (ndlp) { 4435 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, 4436 "0006 rpi x%x DID:%x flg:%x %d x%px\n", 4437 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, 4438 kref_read(&ndlp->kref), 4439 ndlp); 4440 /* This is the end of the default RPI cleanup logic for 4441 * this ndlp and it could get released. Clear the nlp_flags to 4442 * prevent any further processing. 4443 */ 4444 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; 4445 lpfc_nlp_put(ndlp); 4446 lpfc_nlp_not_used(ndlp); 4447 } 4448 4449 return; 4450 } 4451 4452 /** 4453 * lpfc_cmpl_els_rsp - Completion callback function for els response iocb cmd 4454 * @phba: pointer to lpfc hba data structure. 4455 * @cmdiocb: pointer to lpfc command iocb data structure. 4456 * @rspiocb: pointer to lpfc response iocb data structure. 4457 * 4458 * This routine is the completion callback function for ELS Response IOCB 4459 * command. In normal case, this callback function just properly sets the 4460 * nlp_flag bitmap in the ndlp data structure, if the mbox command reference 4461 * field in the command IOCB is not NULL, the referred mailbox command will 4462 * be send out, and then invokes the lpfc_els_free_iocb() routine to release 4463 * the IOCB. 4464 **/ 4465 static void 4466 lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 4467 struct lpfc_iocbq *rspiocb) 4468 { 4469 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 4470 struct lpfc_vport *vport = ndlp ? ndlp->vport : NULL; 4471 struct Scsi_Host *shost = vport ? lpfc_shost_from_vport(vport) : NULL; 4472 IOCB_t *irsp; 4473 LPFC_MBOXQ_t *mbox = NULL; 4474 struct lpfc_dmabuf *mp = NULL; 4475 4476 irsp = &rspiocb->iocb; 4477 4478 if (!vport) { 4479 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4480 "3177 ELS response failed\n"); 4481 goto out; 4482 } 4483 if (cmdiocb->context_un.mbox) 4484 mbox = cmdiocb->context_un.mbox; 4485 4486 /* Check to see if link went down during discovery */ 4487 if (!ndlp || lpfc_els_chk_latt(vport)) { 4488 if (mbox) { 4489 mp = (struct lpfc_dmabuf *)mbox->ctx_buf; 4490 if (mp) { 4491 lpfc_mbuf_free(phba, mp->virt, mp->phys); 4492 kfree(mp); 4493 } 4494 mempool_free(mbox, phba->mbox_mem_pool); 4495 } 4496 goto out; 4497 } 4498 4499 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 4500 "ELS rsp cmpl: status:x%x/x%x did:x%x", 4501 irsp->ulpStatus, irsp->un.ulpWord[4], 4502 cmdiocb->iocb.un.elsreq64.remoteID); 4503 /* ELS response tag <ulpIoTag> completes */ 4504 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4505 "0110 ELS response tag x%x completes " 4506 "Data: x%x x%x x%x x%x x%x x%x x%x\n", 4507 cmdiocb->iocb.ulpIoTag, rspiocb->iocb.ulpStatus, 4508 rspiocb->iocb.un.ulpWord[4], rspiocb->iocb.ulpTimeout, 4509 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 4510 ndlp->nlp_rpi); 4511 if (mbox) { 4512 if ((rspiocb->iocb.ulpStatus == 0) && 4513 (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) { 4514 if (!lpfc_unreg_rpi(vport, ndlp) && 4515 (!(vport->fc_flag & FC_PT2PT))) { 4516 if (ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE) { 4517 lpfc_printf_vlog(vport, KERN_INFO, 4518 LOG_DISCOVERY, 4519 "0314 PLOGI recov " 4520 "DID x%x " 4521 "Data: x%x x%x x%x\n", 4522 ndlp->nlp_DID, 4523 ndlp->nlp_state, 4524 ndlp->nlp_rpi, 4525 ndlp->nlp_flag); 4526 mp = mbox->ctx_buf; 4527 if (mp) { 4528 lpfc_mbuf_free(phba, mp->virt, 4529 mp->phys); 4530 kfree(mp); 4531 } 4532 mempool_free(mbox, phba->mbox_mem_pool); 4533 goto out; 4534 } 4535 } 4536 4537 /* Increment reference count to ndlp to hold the 4538 * reference to ndlp for the callback function. 4539 */ 4540 mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 4541 if (!mbox->ctx_ndlp) 4542 goto out; 4543 4544 mbox->vport = vport; 4545 if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) { 4546 mbox->mbox_flag |= LPFC_MBX_IMED_UNREG; 4547 mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; 4548 } 4549 else { 4550 mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login; 4551 ndlp->nlp_prev_state = ndlp->nlp_state; 4552 lpfc_nlp_set_state(vport, ndlp, 4553 NLP_STE_REG_LOGIN_ISSUE); 4554 } 4555 4556 ndlp->nlp_flag |= NLP_REG_LOGIN_SEND; 4557 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 4558 != MBX_NOT_FINISHED) 4559 goto out; 4560 4561 /* Decrement the ndlp reference count we 4562 * set for this failed mailbox command. 4563 */ 4564 lpfc_nlp_put(ndlp); 4565 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; 4566 4567 /* ELS rsp: Cannot issue reg_login for <NPortid> */ 4568 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 4569 "0138 ELS rsp: Cannot issue reg_login for x%x " 4570 "Data: x%x x%x x%x\n", 4571 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 4572 ndlp->nlp_rpi); 4573 } 4574 mp = (struct lpfc_dmabuf *)mbox->ctx_buf; 4575 if (mp) { 4576 lpfc_mbuf_free(phba, mp->virt, mp->phys); 4577 kfree(mp); 4578 } 4579 mempool_free(mbox, phba->mbox_mem_pool); 4580 } 4581 out: 4582 if (ndlp && shost) { 4583 spin_lock_irq(&ndlp->lock); 4584 if (mbox) 4585 ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN; 4586 ndlp->nlp_flag &= ~NLP_RM_DFLT_RPI; 4587 spin_unlock_irq(&ndlp->lock); 4588 } 4589 4590 /* Release the originating I/O reference. */ 4591 lpfc_els_free_iocb(phba, cmdiocb); 4592 lpfc_nlp_put(ndlp); 4593 return; 4594 } 4595 4596 /** 4597 * lpfc_els_rsp_acc - Prepare and issue an acc response iocb command 4598 * @vport: pointer to a host virtual N_Port data structure. 4599 * @flag: the els command code to be accepted. 4600 * @oldiocb: pointer to the original lpfc command iocb data structure. 4601 * @ndlp: pointer to a node-list data structure. 4602 * @mbox: pointer to the driver internal queue element for mailbox command. 4603 * 4604 * This routine prepares and issues an Accept (ACC) response IOCB 4605 * command. It uses the @flag to properly set up the IOCB field for the 4606 * specific ACC response command to be issued and invokes the 4607 * lpfc_sli_issue_iocb() routine to send out ACC response IOCB. If a 4608 * @mbox pointer is passed in, it will be put into the context_un.mbox 4609 * field of the IOCB for the completion callback function to issue the 4610 * mailbox command to the HBA later when callback is invoked. 4611 * 4612 * Note that the ndlp reference count will be incremented by 1 for holding the 4613 * ndlp and the reference to ndlp will be stored into the context1 field of 4614 * the IOCB for the completion callback function to the corresponding 4615 * response ELS IOCB command. 4616 * 4617 * Return code 4618 * 0 - Successfully issued acc response 4619 * 1 - Failed to issue acc response 4620 **/ 4621 int 4622 lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, 4623 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp, 4624 LPFC_MBOXQ_t *mbox) 4625 { 4626 struct lpfc_hba *phba = vport->phba; 4627 IOCB_t *icmd; 4628 IOCB_t *oldcmd; 4629 struct lpfc_iocbq *elsiocb; 4630 uint8_t *pcmd; 4631 struct serv_parm *sp; 4632 uint16_t cmdsize; 4633 int rc; 4634 ELS_PKT *els_pkt_ptr; 4635 4636 oldcmd = &oldiocb->iocb; 4637 4638 switch (flag) { 4639 case ELS_CMD_ACC: 4640 cmdsize = sizeof(uint32_t); 4641 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 4642 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 4643 if (!elsiocb) { 4644 spin_lock_irq(&ndlp->lock); 4645 ndlp->nlp_flag &= ~NLP_LOGO_ACC; 4646 spin_unlock_irq(&ndlp->lock); 4647 return 1; 4648 } 4649 4650 icmd = &elsiocb->iocb; 4651 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 4652 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 4653 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 4654 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 4655 pcmd += sizeof(uint32_t); 4656 4657 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 4658 "Issue ACC: did:x%x flg:x%x", 4659 ndlp->nlp_DID, ndlp->nlp_flag, 0); 4660 break; 4661 case ELS_CMD_FLOGI: 4662 case ELS_CMD_PLOGI: 4663 cmdsize = (sizeof(struct serv_parm) + sizeof(uint32_t)); 4664 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 4665 ndlp, ndlp->nlp_DID, ELS_CMD_ACC); 4666 if (!elsiocb) 4667 return 1; 4668 4669 icmd = &elsiocb->iocb; 4670 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 4671 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 4672 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 4673 4674 if (mbox) 4675 elsiocb->context_un.mbox = mbox; 4676 4677 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 4678 pcmd += sizeof(uint32_t); 4679 sp = (struct serv_parm *)pcmd; 4680 4681 if (flag == ELS_CMD_FLOGI) { 4682 /* Copy the received service parameters back */ 4683 memcpy(sp, &phba->fc_fabparam, 4684 sizeof(struct serv_parm)); 4685 4686 /* Clear the F_Port bit */ 4687 sp->cmn.fPort = 0; 4688 4689 /* Mark all class service parameters as invalid */ 4690 sp->cls1.classValid = 0; 4691 sp->cls2.classValid = 0; 4692 sp->cls3.classValid = 0; 4693 sp->cls4.classValid = 0; 4694 4695 /* Copy our worldwide names */ 4696 memcpy(&sp->portName, &vport->fc_sparam.portName, 4697 sizeof(struct lpfc_name)); 4698 memcpy(&sp->nodeName, &vport->fc_sparam.nodeName, 4699 sizeof(struct lpfc_name)); 4700 } else { 4701 memcpy(pcmd, &vport->fc_sparam, 4702 sizeof(struct serv_parm)); 4703 4704 sp->cmn.valid_vendor_ver_level = 0; 4705 memset(sp->un.vendorVersion, 0, 4706 sizeof(sp->un.vendorVersion)); 4707 sp->cmn.bbRcvSizeMsb &= 0xF; 4708 4709 /* If our firmware supports this feature, convey that 4710 * info to the target using the vendor specific field. 4711 */ 4712 if (phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) { 4713 sp->cmn.valid_vendor_ver_level = 1; 4714 sp->un.vv.vid = cpu_to_be32(LPFC_VV_EMLX_ID); 4715 sp->un.vv.flags = 4716 cpu_to_be32(LPFC_VV_SUPPRESS_RSP); 4717 } 4718 } 4719 4720 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 4721 "Issue ACC FLOGI/PLOGI: did:x%x flg:x%x", 4722 ndlp->nlp_DID, ndlp->nlp_flag, 0); 4723 break; 4724 case ELS_CMD_PRLO: 4725 cmdsize = sizeof(uint32_t) + sizeof(PRLO); 4726 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, 4727 ndlp, ndlp->nlp_DID, ELS_CMD_PRLO); 4728 if (!elsiocb) 4729 return 1; 4730 4731 icmd = &elsiocb->iocb; 4732 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 4733 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 4734 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 4735 4736 memcpy(pcmd, ((struct lpfc_dmabuf *) oldiocb->context2)->virt, 4737 sizeof(uint32_t) + sizeof(PRLO)); 4738 *((uint32_t *) (pcmd)) = ELS_CMD_PRLO_ACC; 4739 els_pkt_ptr = (ELS_PKT *) pcmd; 4740 els_pkt_ptr->un.prlo.acceptRspCode = PRLO_REQ_EXECUTED; 4741 4742 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 4743 "Issue ACC PRLO: did:x%x flg:x%x", 4744 ndlp->nlp_DID, ndlp->nlp_flag, 0); 4745 break; 4746 default: 4747 return 1; 4748 } 4749 if (ndlp->nlp_flag & NLP_LOGO_ACC) { 4750 spin_lock_irq(&ndlp->lock); 4751 if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED || 4752 ndlp->nlp_flag & NLP_REG_LOGIN_SEND)) 4753 ndlp->nlp_flag &= ~NLP_LOGO_ACC; 4754 spin_unlock_irq(&ndlp->lock); 4755 elsiocb->iocb_cmpl = lpfc_cmpl_els_logo_acc; 4756 } else { 4757 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 4758 } 4759 4760 phba->fc_stat.elsXmitACC++; 4761 elsiocb->context1 = lpfc_nlp_get(ndlp); 4762 if (!elsiocb->context1) { 4763 lpfc_els_free_iocb(phba, elsiocb); 4764 return 1; 4765 } 4766 4767 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 4768 if (rc == IOCB_ERROR) { 4769 lpfc_els_free_iocb(phba, elsiocb); 4770 lpfc_nlp_put(ndlp); 4771 return 1; 4772 } 4773 4774 /* Xmit ELS ACC response tag <ulpIoTag> */ 4775 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4776 "0128 Xmit ELS ACC response Status: x%x, IoTag: x%x, " 4777 "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x " 4778 "RPI: x%x, fc_flag x%x\n", 4779 rc, elsiocb->iotag, elsiocb->sli4_xritag, 4780 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 4781 ndlp->nlp_rpi, vport->fc_flag); 4782 return 0; 4783 } 4784 4785 /** 4786 * lpfc_els_rsp_reject - Prepare and issue a rjt response iocb command 4787 * @vport: pointer to a virtual N_Port data structure. 4788 * @rejectError: reject response to issue 4789 * @oldiocb: pointer to the original lpfc command iocb data structure. 4790 * @ndlp: pointer to a node-list data structure. 4791 * @mbox: pointer to the driver internal queue element for mailbox command. 4792 * 4793 * This routine prepares and issue an Reject (RJT) response IOCB 4794 * command. If a @mbox pointer is passed in, it will be put into the 4795 * context_un.mbox field of the IOCB for the completion callback function 4796 * to issue to the HBA later. 4797 * 4798 * Note that the ndlp reference count will be incremented by 1 for holding the 4799 * ndlp and the reference to ndlp will be stored into the context1 field of 4800 * the IOCB for the completion callback function to the reject response 4801 * ELS IOCB command. 4802 * 4803 * Return code 4804 * 0 - Successfully issued reject response 4805 * 1 - Failed to issue reject response 4806 **/ 4807 int 4808 lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError, 4809 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp, 4810 LPFC_MBOXQ_t *mbox) 4811 { 4812 int rc; 4813 struct lpfc_hba *phba = vport->phba; 4814 IOCB_t *icmd; 4815 IOCB_t *oldcmd; 4816 struct lpfc_iocbq *elsiocb; 4817 uint8_t *pcmd; 4818 uint16_t cmdsize; 4819 4820 cmdsize = 2 * sizeof(uint32_t); 4821 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 4822 ndlp->nlp_DID, ELS_CMD_LS_RJT); 4823 if (!elsiocb) 4824 return 1; 4825 4826 icmd = &elsiocb->iocb; 4827 oldcmd = &oldiocb->iocb; 4828 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 4829 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 4830 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 4831 4832 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT; 4833 pcmd += sizeof(uint32_t); 4834 *((uint32_t *) (pcmd)) = rejectError; 4835 4836 if (mbox) 4837 elsiocb->context_un.mbox = mbox; 4838 4839 /* Xmit ELS RJT <err> response tag <ulpIoTag> */ 4840 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4841 "0129 Xmit ELS RJT x%x response tag x%x " 4842 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, " 4843 "rpi x%x\n", 4844 rejectError, elsiocb->iotag, 4845 elsiocb->iocb.ulpContext, ndlp->nlp_DID, 4846 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); 4847 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 4848 "Issue LS_RJT: did:x%x flg:x%x err:x%x", 4849 ndlp->nlp_DID, ndlp->nlp_flag, rejectError); 4850 4851 phba->fc_stat.elsXmitLSRJT++; 4852 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 4853 elsiocb->context1 = lpfc_nlp_get(ndlp); 4854 if (!elsiocb->context1) { 4855 lpfc_els_free_iocb(phba, elsiocb); 4856 return 1; 4857 } 4858 4859 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 4860 if (rc == IOCB_ERROR) { 4861 lpfc_els_free_iocb(phba, elsiocb); 4862 lpfc_nlp_put(ndlp); 4863 return 1; 4864 } 4865 4866 return 0; 4867 } 4868 4869 /** 4870 * lpfc_els_rsp_adisc_acc - Prepare and issue acc response to adisc iocb cmd 4871 * @vport: pointer to a virtual N_Port data structure. 4872 * @oldiocb: pointer to the original lpfc command iocb data structure. 4873 * @ndlp: pointer to a node-list data structure. 4874 * 4875 * This routine prepares and issues an Accept (ACC) response to Address 4876 * Discover (ADISC) ELS command. It simply prepares the payload of the IOCB 4877 * and invokes the lpfc_sli_issue_iocb() routine to send out the command. 4878 * 4879 * Note that the ndlp reference count will be incremented by 1 for holding the 4880 * ndlp and the reference to ndlp will be stored into the context1 field of 4881 * the IOCB for the completion callback function to the ADISC Accept response 4882 * ELS IOCB command. 4883 * 4884 * Return code 4885 * 0 - Successfully issued acc adisc response 4886 * 1 - Failed to issue adisc acc response 4887 **/ 4888 int 4889 lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, 4890 struct lpfc_nodelist *ndlp) 4891 { 4892 struct lpfc_hba *phba = vport->phba; 4893 ADISC *ap; 4894 IOCB_t *icmd, *oldcmd; 4895 struct lpfc_iocbq *elsiocb; 4896 uint8_t *pcmd; 4897 uint16_t cmdsize; 4898 int rc; 4899 4900 cmdsize = sizeof(uint32_t) + sizeof(ADISC); 4901 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 4902 ndlp->nlp_DID, ELS_CMD_ACC); 4903 if (!elsiocb) 4904 return 1; 4905 4906 icmd = &elsiocb->iocb; 4907 oldcmd = &oldiocb->iocb; 4908 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 4909 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 4910 4911 /* Xmit ADISC ACC response tag <ulpIoTag> */ 4912 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4913 "0130 Xmit ADISC ACC response iotag x%x xri: " 4914 "x%x, did x%x, nlp_flag x%x, nlp_state x%x rpi x%x\n", 4915 elsiocb->iotag, elsiocb->iocb.ulpContext, 4916 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 4917 ndlp->nlp_rpi); 4918 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 4919 4920 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 4921 pcmd += sizeof(uint32_t); 4922 4923 ap = (ADISC *) (pcmd); 4924 ap->hardAL_PA = phba->fc_pref_ALPA; 4925 memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name)); 4926 memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 4927 ap->DID = be32_to_cpu(vport->fc_myDID); 4928 4929 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 4930 "Issue ACC ADISC: did:x%x flg:x%x refcnt %d", 4931 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); 4932 4933 phba->fc_stat.elsXmitACC++; 4934 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 4935 elsiocb->context1 = lpfc_nlp_get(ndlp); 4936 if (!elsiocb->context1) { 4937 lpfc_els_free_iocb(phba, elsiocb); 4938 return 1; 4939 } 4940 4941 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 4942 if (rc == IOCB_ERROR) { 4943 lpfc_els_free_iocb(phba, elsiocb); 4944 lpfc_nlp_put(ndlp); 4945 return 1; 4946 } 4947 4948 /* Xmit ELS ACC response tag <ulpIoTag> */ 4949 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 4950 "0128 Xmit ELS ACC response Status: x%x, IoTag: x%x, " 4951 "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x " 4952 "RPI: x%x, fc_flag x%x\n", 4953 rc, elsiocb->iotag, elsiocb->sli4_xritag, 4954 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 4955 ndlp->nlp_rpi, vport->fc_flag); 4956 return 0; 4957 } 4958 4959 /** 4960 * lpfc_els_rsp_prli_acc - Prepare and issue acc response to prli iocb cmd 4961 * @vport: pointer to a virtual N_Port data structure. 4962 * @oldiocb: pointer to the original lpfc command iocb data structure. 4963 * @ndlp: pointer to a node-list data structure. 4964 * 4965 * This routine prepares and issues an Accept (ACC) response to Process 4966 * Login (PRLI) ELS command. It simply prepares the payload of the IOCB 4967 * and invokes the lpfc_sli_issue_iocb() routine to send out the command. 4968 * 4969 * Note that the ndlp reference count will be incremented by 1 for holding the 4970 * ndlp and the reference to ndlp will be stored into the context1 field of 4971 * the IOCB for the completion callback function to the PRLI Accept response 4972 * ELS IOCB command. 4973 * 4974 * Return code 4975 * 0 - Successfully issued acc prli response 4976 * 1 - Failed to issue acc prli response 4977 **/ 4978 int 4979 lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, 4980 struct lpfc_nodelist *ndlp) 4981 { 4982 struct lpfc_hba *phba = vport->phba; 4983 PRLI *npr; 4984 struct lpfc_nvme_prli *npr_nvme; 4985 lpfc_vpd_t *vpd; 4986 IOCB_t *icmd; 4987 IOCB_t *oldcmd; 4988 struct lpfc_iocbq *elsiocb; 4989 uint8_t *pcmd; 4990 uint16_t cmdsize; 4991 uint32_t prli_fc4_req, *req_payload; 4992 struct lpfc_dmabuf *req_buf; 4993 int rc; 4994 u32 elsrspcmd; 4995 4996 /* Need the incoming PRLI payload to determine if the ACC is for an 4997 * FC4 or NVME PRLI type. The PRLI type is at word 1. 4998 */ 4999 req_buf = (struct lpfc_dmabuf *)oldiocb->context2; 5000 req_payload = (((uint32_t *)req_buf->virt) + 1); 5001 5002 /* PRLI type payload is at byte 3 for FCP or NVME. */ 5003 prli_fc4_req = be32_to_cpu(*req_payload); 5004 prli_fc4_req = (prli_fc4_req >> 24) & 0xff; 5005 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5006 "6127 PRLI_ACC: Req Type x%x, Word1 x%08x\n", 5007 prli_fc4_req, *((uint32_t *)req_payload)); 5008 5009 if (prli_fc4_req == PRLI_FCP_TYPE) { 5010 cmdsize = sizeof(uint32_t) + sizeof(PRLI); 5011 elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK)); 5012 } else if (prli_fc4_req & PRLI_NVME_TYPE) { 5013 cmdsize = sizeof(uint32_t) + sizeof(struct lpfc_nvme_prli); 5014 elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_NVMEPRLI & ~ELS_RSP_MASK)); 5015 } else { 5016 return 1; 5017 } 5018 5019 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 5020 ndlp->nlp_DID, elsrspcmd); 5021 if (!elsiocb) 5022 return 1; 5023 5024 icmd = &elsiocb->iocb; 5025 oldcmd = &oldiocb->iocb; 5026 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5027 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 5028 5029 /* Xmit PRLI ACC response tag <ulpIoTag> */ 5030 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5031 "0131 Xmit PRLI ACC response tag x%x xri x%x, " 5032 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", 5033 elsiocb->iotag, elsiocb->iocb.ulpContext, 5034 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5035 ndlp->nlp_rpi); 5036 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 5037 memset(pcmd, 0, cmdsize); 5038 5039 *((uint32_t *)(pcmd)) = elsrspcmd; 5040 pcmd += sizeof(uint32_t); 5041 5042 /* For PRLI, remainder of payload is PRLI parameter page */ 5043 vpd = &phba->vpd; 5044 5045 if (prli_fc4_req == PRLI_FCP_TYPE) { 5046 /* 5047 * If the remote port is a target and our firmware version 5048 * is 3.20 or later, set the following bits for FC-TAPE 5049 * support. 5050 */ 5051 npr = (PRLI *) pcmd; 5052 if ((ndlp->nlp_type & NLP_FCP_TARGET) && 5053 (vpd->rev.feaLevelHigh >= 0x02)) { 5054 npr->ConfmComplAllowed = 1; 5055 npr->Retry = 1; 5056 npr->TaskRetryIdReq = 1; 5057 } 5058 npr->acceptRspCode = PRLI_REQ_EXECUTED; 5059 npr->estabImagePair = 1; 5060 npr->readXferRdyDis = 1; 5061 npr->ConfmComplAllowed = 1; 5062 npr->prliType = PRLI_FCP_TYPE; 5063 npr->initiatorFunc = 1; 5064 } else if (prli_fc4_req & PRLI_NVME_TYPE) { 5065 /* Respond with an NVME PRLI Type */ 5066 npr_nvme = (struct lpfc_nvme_prli *) pcmd; 5067 bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE); 5068 bf_set(prli_estabImagePair, npr_nvme, 0); /* Should be 0 */ 5069 bf_set(prli_acc_rsp_code, npr_nvme, PRLI_REQ_EXECUTED); 5070 if (phba->nvmet_support) { 5071 bf_set(prli_tgt, npr_nvme, 1); 5072 bf_set(prli_disc, npr_nvme, 1); 5073 if (phba->cfg_nvme_enable_fb) { 5074 bf_set(prli_fba, npr_nvme, 1); 5075 5076 /* TBD. Target mode needs to post buffers 5077 * that support the configured first burst 5078 * byte size. 5079 */ 5080 bf_set(prli_fb_sz, npr_nvme, 5081 phba->cfg_nvmet_fb_size); 5082 } 5083 } else { 5084 bf_set(prli_init, npr_nvme, 1); 5085 } 5086 5087 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, 5088 "6015 NVME issue PRLI ACC word1 x%08x " 5089 "word4 x%08x word5 x%08x flag x%x, " 5090 "fcp_info x%x nlp_type x%x\n", 5091 npr_nvme->word1, npr_nvme->word4, 5092 npr_nvme->word5, ndlp->nlp_flag, 5093 ndlp->nlp_fcp_info, ndlp->nlp_type); 5094 npr_nvme->word1 = cpu_to_be32(npr_nvme->word1); 5095 npr_nvme->word4 = cpu_to_be32(npr_nvme->word4); 5096 npr_nvme->word5 = cpu_to_be32(npr_nvme->word5); 5097 } else 5098 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 5099 "6128 Unknown FC_TYPE x%x x%x ndlp x%06x\n", 5100 prli_fc4_req, ndlp->nlp_fc4_type, 5101 ndlp->nlp_DID); 5102 5103 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5104 "Issue ACC PRLI: did:x%x flg:x%x", 5105 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); 5106 5107 phba->fc_stat.elsXmitACC++; 5108 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 5109 elsiocb->context1 = lpfc_nlp_get(ndlp); 5110 if (!elsiocb->context1) { 5111 lpfc_els_free_iocb(phba, elsiocb); 5112 return 1; 5113 } 5114 5115 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5116 if (rc == IOCB_ERROR) { 5117 lpfc_els_free_iocb(phba, elsiocb); 5118 lpfc_nlp_put(ndlp); 5119 return 1; 5120 } 5121 5122 return 0; 5123 } 5124 5125 /** 5126 * lpfc_els_rsp_rnid_acc - Issue rnid acc response iocb command 5127 * @vport: pointer to a virtual N_Port data structure. 5128 * @format: rnid command format. 5129 * @oldiocb: pointer to the original lpfc command iocb data structure. 5130 * @ndlp: pointer to a node-list data structure. 5131 * 5132 * This routine issues a Request Node Identification Data (RNID) Accept 5133 * (ACC) response. It constructs the RNID ACC response command according to 5134 * the proper @format and then calls the lpfc_sli_issue_iocb() routine to 5135 * issue the response. 5136 * 5137 * Note that the ndlp reference count will be incremented by 1 for holding the 5138 * ndlp and the reference to ndlp will be stored into the context1 field of 5139 * the IOCB for the completion callback function. 5140 * 5141 * Return code 5142 * 0 - Successfully issued acc rnid response 5143 * 1 - Failed to issue acc rnid response 5144 **/ 5145 static int 5146 lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format, 5147 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) 5148 { 5149 struct lpfc_hba *phba = vport->phba; 5150 RNID *rn; 5151 IOCB_t *icmd, *oldcmd; 5152 struct lpfc_iocbq *elsiocb; 5153 uint8_t *pcmd; 5154 uint16_t cmdsize; 5155 int rc; 5156 5157 cmdsize = sizeof(uint32_t) + sizeof(uint32_t) 5158 + (2 * sizeof(struct lpfc_name)); 5159 if (format) 5160 cmdsize += sizeof(RNID_TOP_DISC); 5161 5162 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 5163 ndlp->nlp_DID, ELS_CMD_ACC); 5164 if (!elsiocb) 5165 return 1; 5166 5167 icmd = &elsiocb->iocb; 5168 oldcmd = &oldiocb->iocb; 5169 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 5170 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 5171 5172 /* Xmit RNID ACC response tag <ulpIoTag> */ 5173 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5174 "0132 Xmit RNID ACC response tag x%x xri x%x\n", 5175 elsiocb->iotag, elsiocb->iocb.ulpContext); 5176 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 5177 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 5178 pcmd += sizeof(uint32_t); 5179 5180 memset(pcmd, 0, sizeof(RNID)); 5181 rn = (RNID *) (pcmd); 5182 rn->Format = format; 5183 rn->CommonLen = (2 * sizeof(struct lpfc_name)); 5184 memcpy(&rn->portName, &vport->fc_portname, sizeof(struct lpfc_name)); 5185 memcpy(&rn->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name)); 5186 switch (format) { 5187 case 0: 5188 rn->SpecificLen = 0; 5189 break; 5190 case RNID_TOPOLOGY_DISC: 5191 rn->SpecificLen = sizeof(RNID_TOP_DISC); 5192 memcpy(&rn->un.topologyDisc.portName, 5193 &vport->fc_portname, sizeof(struct lpfc_name)); 5194 rn->un.topologyDisc.unitType = RNID_HBA; 5195 rn->un.topologyDisc.physPort = 0; 5196 rn->un.topologyDisc.attachedNodes = 0; 5197 break; 5198 default: 5199 rn->CommonLen = 0; 5200 rn->SpecificLen = 0; 5201 break; 5202 } 5203 5204 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5205 "Issue ACC RNID: did:x%x flg:x%x refcnt %d", 5206 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); 5207 5208 phba->fc_stat.elsXmitACC++; 5209 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 5210 elsiocb->context1 = lpfc_nlp_get(ndlp); 5211 if (!elsiocb->context1) { 5212 lpfc_els_free_iocb(phba, elsiocb); 5213 return 1; 5214 } 5215 5216 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5217 if (rc == IOCB_ERROR) { 5218 lpfc_els_free_iocb(phba, elsiocb); 5219 lpfc_nlp_put(ndlp); 5220 return 1; 5221 } 5222 5223 return 0; 5224 } 5225 5226 /** 5227 * lpfc_els_clear_rrq - Clear the rq that this rrq describes. 5228 * @vport: pointer to a virtual N_Port data structure. 5229 * @iocb: pointer to the lpfc command iocb data structure. 5230 * @ndlp: pointer to a node-list data structure. 5231 * 5232 * Return 5233 **/ 5234 static void 5235 lpfc_els_clear_rrq(struct lpfc_vport *vport, 5236 struct lpfc_iocbq *iocb, struct lpfc_nodelist *ndlp) 5237 { 5238 struct lpfc_hba *phba = vport->phba; 5239 uint8_t *pcmd; 5240 struct RRQ *rrq; 5241 uint16_t rxid; 5242 uint16_t xri; 5243 struct lpfc_node_rrq *prrq; 5244 5245 5246 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) iocb->context2)->virt); 5247 pcmd += sizeof(uint32_t); 5248 rrq = (struct RRQ *)pcmd; 5249 rrq->rrq_exchg = be32_to_cpu(rrq->rrq_exchg); 5250 rxid = bf_get(rrq_rxid, rrq); 5251 5252 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5253 "2883 Clear RRQ for SID:x%x OXID:x%x RXID:x%x" 5254 " x%x x%x\n", 5255 be32_to_cpu(bf_get(rrq_did, rrq)), 5256 bf_get(rrq_oxid, rrq), 5257 rxid, 5258 iocb->iotag, iocb->iocb.ulpContext); 5259 5260 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5261 "Clear RRQ: did:x%x flg:x%x exchg:x%.08x", 5262 ndlp->nlp_DID, ndlp->nlp_flag, rrq->rrq_exchg); 5263 if (vport->fc_myDID == be32_to_cpu(bf_get(rrq_did, rrq))) 5264 xri = bf_get(rrq_oxid, rrq); 5265 else 5266 xri = rxid; 5267 prrq = lpfc_get_active_rrq(vport, xri, ndlp->nlp_DID); 5268 if (prrq) 5269 lpfc_clr_rrq_active(phba, xri, prrq); 5270 return; 5271 } 5272 5273 /** 5274 * lpfc_els_rsp_echo_acc - Issue echo acc response 5275 * @vport: pointer to a virtual N_Port data structure. 5276 * @data: pointer to echo data to return in the accept. 5277 * @oldiocb: pointer to the original lpfc command iocb data structure. 5278 * @ndlp: pointer to a node-list data structure. 5279 * 5280 * Return code 5281 * 0 - Successfully issued acc echo response 5282 * 1 - Failed to issue acc echo response 5283 **/ 5284 static int 5285 lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data, 5286 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) 5287 { 5288 struct lpfc_hba *phba = vport->phba; 5289 struct lpfc_iocbq *elsiocb; 5290 uint8_t *pcmd; 5291 uint16_t cmdsize; 5292 int rc; 5293 5294 cmdsize = oldiocb->iocb.unsli3.rcvsli3.acc_len; 5295 5296 /* The accumulated length can exceed the BPL_SIZE. For 5297 * now, use this as the limit 5298 */ 5299 if (cmdsize > LPFC_BPL_SIZE) 5300 cmdsize = LPFC_BPL_SIZE; 5301 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 5302 ndlp->nlp_DID, ELS_CMD_ACC); 5303 if (!elsiocb) 5304 return 1; 5305 5306 elsiocb->iocb.ulpContext = oldiocb->iocb.ulpContext; /* Xri / rx_id */ 5307 elsiocb->iocb.unsli3.rcvsli3.ox_id = oldiocb->iocb.unsli3.rcvsli3.ox_id; 5308 5309 /* Xmit ECHO ACC response tag <ulpIoTag> */ 5310 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5311 "2876 Xmit ECHO ACC response tag x%x xri x%x\n", 5312 elsiocb->iotag, elsiocb->iocb.ulpContext); 5313 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 5314 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 5315 pcmd += sizeof(uint32_t); 5316 memcpy(pcmd, data, cmdsize - sizeof(uint32_t)); 5317 5318 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, 5319 "Issue ACC ECHO: did:x%x flg:x%x refcnt %d", 5320 ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); 5321 5322 phba->fc_stat.elsXmitACC++; 5323 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 5324 elsiocb->context1 = lpfc_nlp_get(ndlp); 5325 if (!elsiocb->context1) { 5326 lpfc_els_free_iocb(phba, elsiocb); 5327 return 1; 5328 } 5329 5330 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5331 if (rc == IOCB_ERROR) { 5332 lpfc_els_free_iocb(phba, elsiocb); 5333 lpfc_nlp_put(ndlp); 5334 return 1; 5335 } 5336 5337 return 0; 5338 } 5339 5340 /** 5341 * lpfc_els_disc_adisc - Issue remaining adisc iocbs to npr nodes of a vport 5342 * @vport: pointer to a host virtual N_Port data structure. 5343 * 5344 * This routine issues Address Discover (ADISC) ELS commands to those 5345 * N_Ports which are in node port recovery state and ADISC has not been issued 5346 * for the @vport. Each time an ELS ADISC IOCB is issued by invoking the 5347 * lpfc_issue_els_adisc() routine, the per @vport number of discover count 5348 * (num_disc_nodes) shall be incremented. If the num_disc_nodes reaches a 5349 * pre-configured threshold (cfg_discovery_threads), the @vport fc_flag will 5350 * be marked with FC_NLP_MORE bit and the process of issuing remaining ADISC 5351 * IOCBs quit for later pick up. On the other hand, after walking through 5352 * all the ndlps with the @vport and there is none ADISC IOCB issued, the 5353 * @vport fc_flag shall be cleared with FC_NLP_MORE bit indicating there is 5354 * no more ADISC need to be sent. 5355 * 5356 * Return code 5357 * The number of N_Ports with adisc issued. 5358 **/ 5359 int 5360 lpfc_els_disc_adisc(struct lpfc_vport *vport) 5361 { 5362 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 5363 struct lpfc_nodelist *ndlp, *next_ndlp; 5364 int sentadisc = 0; 5365 5366 /* go thru NPR nodes and issue any remaining ELS ADISCs */ 5367 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 5368 if (ndlp->nlp_state == NLP_STE_NPR_NODE && 5369 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 && 5370 (ndlp->nlp_flag & NLP_NPR_ADISC) != 0) { 5371 spin_lock_irq(&ndlp->lock); 5372 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 5373 spin_unlock_irq(&ndlp->lock); 5374 ndlp->nlp_prev_state = ndlp->nlp_state; 5375 lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); 5376 lpfc_issue_els_adisc(vport, ndlp, 0); 5377 sentadisc++; 5378 vport->num_disc_nodes++; 5379 if (vport->num_disc_nodes >= 5380 vport->cfg_discovery_threads) { 5381 spin_lock_irq(shost->host_lock); 5382 vport->fc_flag |= FC_NLP_MORE; 5383 spin_unlock_irq(shost->host_lock); 5384 break; 5385 } 5386 } 5387 } 5388 if (sentadisc == 0) { 5389 spin_lock_irq(shost->host_lock); 5390 vport->fc_flag &= ~FC_NLP_MORE; 5391 spin_unlock_irq(shost->host_lock); 5392 } 5393 return sentadisc; 5394 } 5395 5396 /** 5397 * lpfc_els_disc_plogi - Issue plogi for all npr nodes of a vport before adisc 5398 * @vport: pointer to a host virtual N_Port data structure. 5399 * 5400 * This routine issues Port Login (PLOGI) ELS commands to all the N_Ports 5401 * which are in node port recovery state, with a @vport. Each time an ELS 5402 * ADISC PLOGI IOCB is issued by invoking the lpfc_issue_els_plogi() routine, 5403 * the per @vport number of discover count (num_disc_nodes) shall be 5404 * incremented. If the num_disc_nodes reaches a pre-configured threshold 5405 * (cfg_discovery_threads), the @vport fc_flag will be marked with FC_NLP_MORE 5406 * bit set and quit the process of issuing remaining ADISC PLOGIN IOCBs for 5407 * later pick up. On the other hand, after walking through all the ndlps with 5408 * the @vport and there is none ADISC PLOGI IOCB issued, the @vport fc_flag 5409 * shall be cleared with the FC_NLP_MORE bit indicating there is no more ADISC 5410 * PLOGI need to be sent. 5411 * 5412 * Return code 5413 * The number of N_Ports with plogi issued. 5414 **/ 5415 int 5416 lpfc_els_disc_plogi(struct lpfc_vport *vport) 5417 { 5418 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 5419 struct lpfc_nodelist *ndlp, *next_ndlp; 5420 int sentplogi = 0; 5421 5422 /* go thru NPR nodes and issue any remaining ELS PLOGIs */ 5423 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 5424 if (ndlp->nlp_state == NLP_STE_NPR_NODE && 5425 (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 && 5426 (ndlp->nlp_flag & NLP_DELAY_TMO) == 0 && 5427 (ndlp->nlp_flag & NLP_NPR_ADISC) == 0) { 5428 ndlp->nlp_prev_state = ndlp->nlp_state; 5429 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 5430 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 5431 sentplogi++; 5432 vport->num_disc_nodes++; 5433 if (vport->num_disc_nodes >= 5434 vport->cfg_discovery_threads) { 5435 spin_lock_irq(shost->host_lock); 5436 vport->fc_flag |= FC_NLP_MORE; 5437 spin_unlock_irq(shost->host_lock); 5438 break; 5439 } 5440 } 5441 } 5442 5443 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 5444 "6452 Discover PLOGI %d flag x%x\n", 5445 sentplogi, vport->fc_flag); 5446 5447 if (sentplogi) { 5448 lpfc_set_disctmo(vport); 5449 } 5450 else { 5451 spin_lock_irq(shost->host_lock); 5452 vport->fc_flag &= ~FC_NLP_MORE; 5453 spin_unlock_irq(shost->host_lock); 5454 } 5455 return sentplogi; 5456 } 5457 5458 static uint32_t 5459 lpfc_rdp_res_link_service(struct fc_rdp_link_service_desc *desc, 5460 uint32_t word0) 5461 { 5462 5463 desc->tag = cpu_to_be32(RDP_LINK_SERVICE_DESC_TAG); 5464 desc->payload.els_req = word0; 5465 desc->length = cpu_to_be32(sizeof(desc->payload)); 5466 5467 return sizeof(struct fc_rdp_link_service_desc); 5468 } 5469 5470 static uint32_t 5471 lpfc_rdp_res_sfp_desc(struct fc_rdp_sfp_desc *desc, 5472 uint8_t *page_a0, uint8_t *page_a2) 5473 { 5474 uint16_t wavelength; 5475 uint16_t temperature; 5476 uint16_t rx_power; 5477 uint16_t tx_bias; 5478 uint16_t tx_power; 5479 uint16_t vcc; 5480 uint16_t flag = 0; 5481 struct sff_trasnceiver_codes_byte4 *trasn_code_byte4; 5482 struct sff_trasnceiver_codes_byte5 *trasn_code_byte5; 5483 5484 desc->tag = cpu_to_be32(RDP_SFP_DESC_TAG); 5485 5486 trasn_code_byte4 = (struct sff_trasnceiver_codes_byte4 *) 5487 &page_a0[SSF_TRANSCEIVER_CODE_B4]; 5488 trasn_code_byte5 = (struct sff_trasnceiver_codes_byte5 *) 5489 &page_a0[SSF_TRANSCEIVER_CODE_B5]; 5490 5491 if ((trasn_code_byte4->fc_sw_laser) || 5492 (trasn_code_byte5->fc_sw_laser_sl) || 5493 (trasn_code_byte5->fc_sw_laser_sn)) { /* check if its short WL */ 5494 flag |= (SFP_FLAG_PT_SWLASER << SFP_FLAG_PT_SHIFT); 5495 } else if (trasn_code_byte4->fc_lw_laser) { 5496 wavelength = (page_a0[SSF_WAVELENGTH_B1] << 8) | 5497 page_a0[SSF_WAVELENGTH_B0]; 5498 if (wavelength == SFP_WAVELENGTH_LC1310) 5499 flag |= SFP_FLAG_PT_LWLASER_LC1310 << SFP_FLAG_PT_SHIFT; 5500 if (wavelength == SFP_WAVELENGTH_LL1550) 5501 flag |= SFP_FLAG_PT_LWLASER_LL1550 << SFP_FLAG_PT_SHIFT; 5502 } 5503 /* check if its SFP+ */ 5504 flag |= ((page_a0[SSF_IDENTIFIER] == SFF_PG0_IDENT_SFP) ? 5505 SFP_FLAG_CT_SFP_PLUS : SFP_FLAG_CT_UNKNOWN) 5506 << SFP_FLAG_CT_SHIFT; 5507 5508 /* check if its OPTICAL */ 5509 flag |= ((page_a0[SSF_CONNECTOR] == SFF_PG0_CONNECTOR_LC) ? 5510 SFP_FLAG_IS_OPTICAL_PORT : 0) 5511 << SFP_FLAG_IS_OPTICAL_SHIFT; 5512 5513 temperature = (page_a2[SFF_TEMPERATURE_B1] << 8 | 5514 page_a2[SFF_TEMPERATURE_B0]); 5515 vcc = (page_a2[SFF_VCC_B1] << 8 | 5516 page_a2[SFF_VCC_B0]); 5517 tx_power = (page_a2[SFF_TXPOWER_B1] << 8 | 5518 page_a2[SFF_TXPOWER_B0]); 5519 tx_bias = (page_a2[SFF_TX_BIAS_CURRENT_B1] << 8 | 5520 page_a2[SFF_TX_BIAS_CURRENT_B0]); 5521 rx_power = (page_a2[SFF_RXPOWER_B1] << 8 | 5522 page_a2[SFF_RXPOWER_B0]); 5523 desc->sfp_info.temperature = cpu_to_be16(temperature); 5524 desc->sfp_info.rx_power = cpu_to_be16(rx_power); 5525 desc->sfp_info.tx_bias = cpu_to_be16(tx_bias); 5526 desc->sfp_info.tx_power = cpu_to_be16(tx_power); 5527 desc->sfp_info.vcc = cpu_to_be16(vcc); 5528 5529 desc->sfp_info.flags = cpu_to_be16(flag); 5530 desc->length = cpu_to_be32(sizeof(desc->sfp_info)); 5531 5532 return sizeof(struct fc_rdp_sfp_desc); 5533 } 5534 5535 static uint32_t 5536 lpfc_rdp_res_link_error(struct fc_rdp_link_error_status_desc *desc, 5537 READ_LNK_VAR *stat) 5538 { 5539 uint32_t type; 5540 5541 desc->tag = cpu_to_be32(RDP_LINK_ERROR_STATUS_DESC_TAG); 5542 5543 type = VN_PT_PHY_PF_PORT << VN_PT_PHY_SHIFT; 5544 5545 desc->info.port_type = cpu_to_be32(type); 5546 5547 desc->info.link_status.link_failure_cnt = 5548 cpu_to_be32(stat->linkFailureCnt); 5549 desc->info.link_status.loss_of_synch_cnt = 5550 cpu_to_be32(stat->lossSyncCnt); 5551 desc->info.link_status.loss_of_signal_cnt = 5552 cpu_to_be32(stat->lossSignalCnt); 5553 desc->info.link_status.primitive_seq_proto_err = 5554 cpu_to_be32(stat->primSeqErrCnt); 5555 desc->info.link_status.invalid_trans_word = 5556 cpu_to_be32(stat->invalidXmitWord); 5557 desc->info.link_status.invalid_crc_cnt = cpu_to_be32(stat->crcCnt); 5558 5559 desc->length = cpu_to_be32(sizeof(desc->info)); 5560 5561 return sizeof(struct fc_rdp_link_error_status_desc); 5562 } 5563 5564 static uint32_t 5565 lpfc_rdp_res_bbc_desc(struct fc_rdp_bbc_desc *desc, READ_LNK_VAR *stat, 5566 struct lpfc_vport *vport) 5567 { 5568 uint32_t bbCredit; 5569 5570 desc->tag = cpu_to_be32(RDP_BBC_DESC_TAG); 5571 5572 bbCredit = vport->fc_sparam.cmn.bbCreditLsb | 5573 (vport->fc_sparam.cmn.bbCreditMsb << 8); 5574 desc->bbc_info.port_bbc = cpu_to_be32(bbCredit); 5575 if (vport->phba->fc_topology != LPFC_TOPOLOGY_LOOP) { 5576 bbCredit = vport->phba->fc_fabparam.cmn.bbCreditLsb | 5577 (vport->phba->fc_fabparam.cmn.bbCreditMsb << 8); 5578 desc->bbc_info.attached_port_bbc = cpu_to_be32(bbCredit); 5579 } else { 5580 desc->bbc_info.attached_port_bbc = 0; 5581 } 5582 5583 desc->bbc_info.rtt = 0; 5584 desc->length = cpu_to_be32(sizeof(desc->bbc_info)); 5585 5586 return sizeof(struct fc_rdp_bbc_desc); 5587 } 5588 5589 static uint32_t 5590 lpfc_rdp_res_oed_temp_desc(struct lpfc_hba *phba, 5591 struct fc_rdp_oed_sfp_desc *desc, uint8_t *page_a2) 5592 { 5593 uint32_t flags = 0; 5594 5595 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 5596 5597 desc->oed_info.hi_alarm = page_a2[SSF_TEMP_HIGH_ALARM]; 5598 desc->oed_info.lo_alarm = page_a2[SSF_TEMP_LOW_ALARM]; 5599 desc->oed_info.hi_warning = page_a2[SSF_TEMP_HIGH_WARNING]; 5600 desc->oed_info.lo_warning = page_a2[SSF_TEMP_LOW_WARNING]; 5601 5602 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TEMPERATURE) 5603 flags |= RDP_OET_HIGH_ALARM; 5604 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TEMPERATURE) 5605 flags |= RDP_OET_LOW_ALARM; 5606 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TEMPERATURE) 5607 flags |= RDP_OET_HIGH_WARNING; 5608 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TEMPERATURE) 5609 flags |= RDP_OET_LOW_WARNING; 5610 5611 flags |= ((0xf & RDP_OED_TEMPERATURE) << RDP_OED_TYPE_SHIFT); 5612 desc->oed_info.function_flags = cpu_to_be32(flags); 5613 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 5614 return sizeof(struct fc_rdp_oed_sfp_desc); 5615 } 5616 5617 static uint32_t 5618 lpfc_rdp_res_oed_voltage_desc(struct lpfc_hba *phba, 5619 struct fc_rdp_oed_sfp_desc *desc, 5620 uint8_t *page_a2) 5621 { 5622 uint32_t flags = 0; 5623 5624 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 5625 5626 desc->oed_info.hi_alarm = page_a2[SSF_VOLTAGE_HIGH_ALARM]; 5627 desc->oed_info.lo_alarm = page_a2[SSF_VOLTAGE_LOW_ALARM]; 5628 desc->oed_info.hi_warning = page_a2[SSF_VOLTAGE_HIGH_WARNING]; 5629 desc->oed_info.lo_warning = page_a2[SSF_VOLTAGE_LOW_WARNING]; 5630 5631 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_VOLTAGE) 5632 flags |= RDP_OET_HIGH_ALARM; 5633 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_VOLTAGE) 5634 flags |= RDP_OET_LOW_ALARM; 5635 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_VOLTAGE) 5636 flags |= RDP_OET_HIGH_WARNING; 5637 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_VOLTAGE) 5638 flags |= RDP_OET_LOW_WARNING; 5639 5640 flags |= ((0xf & RDP_OED_VOLTAGE) << RDP_OED_TYPE_SHIFT); 5641 desc->oed_info.function_flags = cpu_to_be32(flags); 5642 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 5643 return sizeof(struct fc_rdp_oed_sfp_desc); 5644 } 5645 5646 static uint32_t 5647 lpfc_rdp_res_oed_txbias_desc(struct lpfc_hba *phba, 5648 struct fc_rdp_oed_sfp_desc *desc, 5649 uint8_t *page_a2) 5650 { 5651 uint32_t flags = 0; 5652 5653 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 5654 5655 desc->oed_info.hi_alarm = page_a2[SSF_BIAS_HIGH_ALARM]; 5656 desc->oed_info.lo_alarm = page_a2[SSF_BIAS_LOW_ALARM]; 5657 desc->oed_info.hi_warning = page_a2[SSF_BIAS_HIGH_WARNING]; 5658 desc->oed_info.lo_warning = page_a2[SSF_BIAS_LOW_WARNING]; 5659 5660 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TXBIAS) 5661 flags |= RDP_OET_HIGH_ALARM; 5662 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TXBIAS) 5663 flags |= RDP_OET_LOW_ALARM; 5664 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TXBIAS) 5665 flags |= RDP_OET_HIGH_WARNING; 5666 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TXBIAS) 5667 flags |= RDP_OET_LOW_WARNING; 5668 5669 flags |= ((0xf & RDP_OED_TXBIAS) << RDP_OED_TYPE_SHIFT); 5670 desc->oed_info.function_flags = cpu_to_be32(flags); 5671 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 5672 return sizeof(struct fc_rdp_oed_sfp_desc); 5673 } 5674 5675 static uint32_t 5676 lpfc_rdp_res_oed_txpower_desc(struct lpfc_hba *phba, 5677 struct fc_rdp_oed_sfp_desc *desc, 5678 uint8_t *page_a2) 5679 { 5680 uint32_t flags = 0; 5681 5682 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 5683 5684 desc->oed_info.hi_alarm = page_a2[SSF_TXPOWER_HIGH_ALARM]; 5685 desc->oed_info.lo_alarm = page_a2[SSF_TXPOWER_LOW_ALARM]; 5686 desc->oed_info.hi_warning = page_a2[SSF_TXPOWER_HIGH_WARNING]; 5687 desc->oed_info.lo_warning = page_a2[SSF_TXPOWER_LOW_WARNING]; 5688 5689 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_TXPOWER) 5690 flags |= RDP_OET_HIGH_ALARM; 5691 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_TXPOWER) 5692 flags |= RDP_OET_LOW_ALARM; 5693 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_TXPOWER) 5694 flags |= RDP_OET_HIGH_WARNING; 5695 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_TXPOWER) 5696 flags |= RDP_OET_LOW_WARNING; 5697 5698 flags |= ((0xf & RDP_OED_TXPOWER) << RDP_OED_TYPE_SHIFT); 5699 desc->oed_info.function_flags = cpu_to_be32(flags); 5700 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 5701 return sizeof(struct fc_rdp_oed_sfp_desc); 5702 } 5703 5704 5705 static uint32_t 5706 lpfc_rdp_res_oed_rxpower_desc(struct lpfc_hba *phba, 5707 struct fc_rdp_oed_sfp_desc *desc, 5708 uint8_t *page_a2) 5709 { 5710 uint32_t flags = 0; 5711 5712 desc->tag = cpu_to_be32(RDP_OED_DESC_TAG); 5713 5714 desc->oed_info.hi_alarm = page_a2[SSF_RXPOWER_HIGH_ALARM]; 5715 desc->oed_info.lo_alarm = page_a2[SSF_RXPOWER_LOW_ALARM]; 5716 desc->oed_info.hi_warning = page_a2[SSF_RXPOWER_HIGH_WARNING]; 5717 desc->oed_info.lo_warning = page_a2[SSF_RXPOWER_LOW_WARNING]; 5718 5719 if (phba->sfp_alarm & LPFC_TRANSGRESSION_HIGH_RXPOWER) 5720 flags |= RDP_OET_HIGH_ALARM; 5721 if (phba->sfp_alarm & LPFC_TRANSGRESSION_LOW_RXPOWER) 5722 flags |= RDP_OET_LOW_ALARM; 5723 if (phba->sfp_warning & LPFC_TRANSGRESSION_HIGH_RXPOWER) 5724 flags |= RDP_OET_HIGH_WARNING; 5725 if (phba->sfp_warning & LPFC_TRANSGRESSION_LOW_RXPOWER) 5726 flags |= RDP_OET_LOW_WARNING; 5727 5728 flags |= ((0xf & RDP_OED_RXPOWER) << RDP_OED_TYPE_SHIFT); 5729 desc->oed_info.function_flags = cpu_to_be32(flags); 5730 desc->length = cpu_to_be32(sizeof(desc->oed_info)); 5731 return sizeof(struct fc_rdp_oed_sfp_desc); 5732 } 5733 5734 static uint32_t 5735 lpfc_rdp_res_opd_desc(struct fc_rdp_opd_sfp_desc *desc, 5736 uint8_t *page_a0, struct lpfc_vport *vport) 5737 { 5738 desc->tag = cpu_to_be32(RDP_OPD_DESC_TAG); 5739 memcpy(desc->opd_info.vendor_name, &page_a0[SSF_VENDOR_NAME], 16); 5740 memcpy(desc->opd_info.model_number, &page_a0[SSF_VENDOR_PN], 16); 5741 memcpy(desc->opd_info.serial_number, &page_a0[SSF_VENDOR_SN], 16); 5742 memcpy(desc->opd_info.revision, &page_a0[SSF_VENDOR_REV], 4); 5743 memcpy(desc->opd_info.date, &page_a0[SSF_DATE_CODE], 8); 5744 desc->length = cpu_to_be32(sizeof(desc->opd_info)); 5745 return sizeof(struct fc_rdp_opd_sfp_desc); 5746 } 5747 5748 static uint32_t 5749 lpfc_rdp_res_fec_desc(struct fc_fec_rdp_desc *desc, READ_LNK_VAR *stat) 5750 { 5751 if (bf_get(lpfc_read_link_stat_gec2, stat) == 0) 5752 return 0; 5753 desc->tag = cpu_to_be32(RDP_FEC_DESC_TAG); 5754 5755 desc->info.CorrectedBlocks = 5756 cpu_to_be32(stat->fecCorrBlkCount); 5757 desc->info.UncorrectableBlocks = 5758 cpu_to_be32(stat->fecUncorrBlkCount); 5759 5760 desc->length = cpu_to_be32(sizeof(desc->info)); 5761 5762 return sizeof(struct fc_fec_rdp_desc); 5763 } 5764 5765 static uint32_t 5766 lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba) 5767 { 5768 uint16_t rdp_cap = 0; 5769 uint16_t rdp_speed; 5770 5771 desc->tag = cpu_to_be32(RDP_PORT_SPEED_DESC_TAG); 5772 5773 switch (phba->fc_linkspeed) { 5774 case LPFC_LINK_SPEED_1GHZ: 5775 rdp_speed = RDP_PS_1GB; 5776 break; 5777 case LPFC_LINK_SPEED_2GHZ: 5778 rdp_speed = RDP_PS_2GB; 5779 break; 5780 case LPFC_LINK_SPEED_4GHZ: 5781 rdp_speed = RDP_PS_4GB; 5782 break; 5783 case LPFC_LINK_SPEED_8GHZ: 5784 rdp_speed = RDP_PS_8GB; 5785 break; 5786 case LPFC_LINK_SPEED_10GHZ: 5787 rdp_speed = RDP_PS_10GB; 5788 break; 5789 case LPFC_LINK_SPEED_16GHZ: 5790 rdp_speed = RDP_PS_16GB; 5791 break; 5792 case LPFC_LINK_SPEED_32GHZ: 5793 rdp_speed = RDP_PS_32GB; 5794 break; 5795 case LPFC_LINK_SPEED_64GHZ: 5796 rdp_speed = RDP_PS_64GB; 5797 break; 5798 default: 5799 rdp_speed = RDP_PS_UNKNOWN; 5800 break; 5801 } 5802 5803 desc->info.port_speed.speed = cpu_to_be16(rdp_speed); 5804 5805 if (phba->lmt & LMT_128Gb) 5806 rdp_cap |= RDP_PS_128GB; 5807 if (phba->lmt & LMT_64Gb) 5808 rdp_cap |= RDP_PS_64GB; 5809 if (phba->lmt & LMT_32Gb) 5810 rdp_cap |= RDP_PS_32GB; 5811 if (phba->lmt & LMT_16Gb) 5812 rdp_cap |= RDP_PS_16GB; 5813 if (phba->lmt & LMT_10Gb) 5814 rdp_cap |= RDP_PS_10GB; 5815 if (phba->lmt & LMT_8Gb) 5816 rdp_cap |= RDP_PS_8GB; 5817 if (phba->lmt & LMT_4Gb) 5818 rdp_cap |= RDP_PS_4GB; 5819 if (phba->lmt & LMT_2Gb) 5820 rdp_cap |= RDP_PS_2GB; 5821 if (phba->lmt & LMT_1Gb) 5822 rdp_cap |= RDP_PS_1GB; 5823 5824 if (rdp_cap == 0) 5825 rdp_cap = RDP_CAP_UNKNOWN; 5826 if (phba->cfg_link_speed != LPFC_USER_LINK_SPEED_AUTO) 5827 rdp_cap |= RDP_CAP_USER_CONFIGURED; 5828 5829 desc->info.port_speed.capabilities = cpu_to_be16(rdp_cap); 5830 desc->length = cpu_to_be32(sizeof(desc->info)); 5831 return sizeof(struct fc_rdp_port_speed_desc); 5832 } 5833 5834 static uint32_t 5835 lpfc_rdp_res_diag_port_names(struct fc_rdp_port_name_desc *desc, 5836 struct lpfc_vport *vport) 5837 { 5838 5839 desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG); 5840 5841 memcpy(desc->port_names.wwnn, &vport->fc_nodename, 5842 sizeof(desc->port_names.wwnn)); 5843 5844 memcpy(desc->port_names.wwpn, &vport->fc_portname, 5845 sizeof(desc->port_names.wwpn)); 5846 5847 desc->length = cpu_to_be32(sizeof(desc->port_names)); 5848 return sizeof(struct fc_rdp_port_name_desc); 5849 } 5850 5851 static uint32_t 5852 lpfc_rdp_res_attach_port_names(struct fc_rdp_port_name_desc *desc, 5853 struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 5854 { 5855 5856 desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG); 5857 if (vport->fc_flag & FC_FABRIC) { 5858 memcpy(desc->port_names.wwnn, &vport->fabric_nodename, 5859 sizeof(desc->port_names.wwnn)); 5860 5861 memcpy(desc->port_names.wwpn, &vport->fabric_portname, 5862 sizeof(desc->port_names.wwpn)); 5863 } else { /* Point to Point */ 5864 memcpy(desc->port_names.wwnn, &ndlp->nlp_nodename, 5865 sizeof(desc->port_names.wwnn)); 5866 5867 memcpy(desc->port_names.wwpn, &ndlp->nlp_portname, 5868 sizeof(desc->port_names.wwpn)); 5869 } 5870 5871 desc->length = cpu_to_be32(sizeof(desc->port_names)); 5872 return sizeof(struct fc_rdp_port_name_desc); 5873 } 5874 5875 static void 5876 lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context, 5877 int status) 5878 { 5879 struct lpfc_nodelist *ndlp = rdp_context->ndlp; 5880 struct lpfc_vport *vport = ndlp->vport; 5881 struct lpfc_iocbq *elsiocb; 5882 struct ulp_bde64 *bpl; 5883 IOCB_t *icmd; 5884 uint8_t *pcmd; 5885 struct ls_rjt *stat; 5886 struct fc_rdp_res_frame *rdp_res; 5887 uint32_t cmdsize, len; 5888 uint16_t *flag_ptr; 5889 int rc; 5890 5891 if (status != SUCCESS) 5892 goto error; 5893 5894 /* This will change once we know the true size of the RDP payload */ 5895 cmdsize = sizeof(struct fc_rdp_res_frame); 5896 5897 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, 5898 lpfc_max_els_tries, rdp_context->ndlp, 5899 rdp_context->ndlp->nlp_DID, ELS_CMD_ACC); 5900 if (!elsiocb) 5901 goto free_rdp_context; 5902 5903 icmd = &elsiocb->iocb; 5904 icmd->ulpContext = rdp_context->rx_id; 5905 icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id; 5906 5907 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 5908 "2171 Xmit RDP response tag x%x xri x%x, " 5909 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x", 5910 elsiocb->iotag, elsiocb->iocb.ulpContext, 5911 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 5912 ndlp->nlp_rpi); 5913 rdp_res = (struct fc_rdp_res_frame *) 5914 (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 5915 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 5916 memset(pcmd, 0, sizeof(struct fc_rdp_res_frame)); 5917 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 5918 5919 /* Update Alarm and Warning */ 5920 flag_ptr = (uint16_t *)(rdp_context->page_a2 + SSF_ALARM_FLAGS); 5921 phba->sfp_alarm |= *flag_ptr; 5922 flag_ptr = (uint16_t *)(rdp_context->page_a2 + SSF_WARNING_FLAGS); 5923 phba->sfp_warning |= *flag_ptr; 5924 5925 /* For RDP payload */ 5926 len = 8; 5927 len += lpfc_rdp_res_link_service((struct fc_rdp_link_service_desc *) 5928 (len + pcmd), ELS_CMD_RDP); 5929 5930 len += lpfc_rdp_res_sfp_desc((struct fc_rdp_sfp_desc *)(len + pcmd), 5931 rdp_context->page_a0, rdp_context->page_a2); 5932 len += lpfc_rdp_res_speed((struct fc_rdp_port_speed_desc *)(len + pcmd), 5933 phba); 5934 len += lpfc_rdp_res_link_error((struct fc_rdp_link_error_status_desc *) 5935 (len + pcmd), &rdp_context->link_stat); 5936 len += lpfc_rdp_res_diag_port_names((struct fc_rdp_port_name_desc *) 5937 (len + pcmd), vport); 5938 len += lpfc_rdp_res_attach_port_names((struct fc_rdp_port_name_desc *) 5939 (len + pcmd), vport, ndlp); 5940 len += lpfc_rdp_res_fec_desc((struct fc_fec_rdp_desc *)(len + pcmd), 5941 &rdp_context->link_stat); 5942 len += lpfc_rdp_res_bbc_desc((struct fc_rdp_bbc_desc *)(len + pcmd), 5943 &rdp_context->link_stat, vport); 5944 len += lpfc_rdp_res_oed_temp_desc(phba, 5945 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 5946 rdp_context->page_a2); 5947 len += lpfc_rdp_res_oed_voltage_desc(phba, 5948 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 5949 rdp_context->page_a2); 5950 len += lpfc_rdp_res_oed_txbias_desc(phba, 5951 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 5952 rdp_context->page_a2); 5953 len += lpfc_rdp_res_oed_txpower_desc(phba, 5954 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 5955 rdp_context->page_a2); 5956 len += lpfc_rdp_res_oed_rxpower_desc(phba, 5957 (struct fc_rdp_oed_sfp_desc *)(len + pcmd), 5958 rdp_context->page_a2); 5959 len += lpfc_rdp_res_opd_desc((struct fc_rdp_opd_sfp_desc *)(len + pcmd), 5960 rdp_context->page_a0, vport); 5961 5962 rdp_res->length = cpu_to_be32(len - 8); 5963 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 5964 5965 /* Now that we know the true size of the payload, update the BPL */ 5966 bpl = (struct ulp_bde64 *) 5967 (((struct lpfc_dmabuf *)(elsiocb->context3))->virt); 5968 bpl->tus.f.bdeSize = len; 5969 bpl->tus.f.bdeFlags = 0; 5970 bpl->tus.w = le32_to_cpu(bpl->tus.w); 5971 5972 phba->fc_stat.elsXmitACC++; 5973 elsiocb->context1 = lpfc_nlp_get(ndlp); 5974 if (!elsiocb->context1) { 5975 lpfc_els_free_iocb(phba, elsiocb); 5976 goto free_rdp_context; 5977 } 5978 5979 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 5980 if (rc == IOCB_ERROR) { 5981 lpfc_els_free_iocb(phba, elsiocb); 5982 lpfc_nlp_put(ndlp); 5983 } 5984 5985 goto free_rdp_context; 5986 5987 error: 5988 cmdsize = 2 * sizeof(uint32_t); 5989 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, lpfc_max_els_tries, 5990 ndlp, ndlp->nlp_DID, ELS_CMD_LS_RJT); 5991 if (!elsiocb) 5992 goto free_rdp_context; 5993 5994 icmd = &elsiocb->iocb; 5995 icmd->ulpContext = rdp_context->rx_id; 5996 icmd->unsli3.rcvsli3.ox_id = rdp_context->ox_id; 5997 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 5998 5999 *((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT; 6000 stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t)); 6001 stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 6002 6003 phba->fc_stat.elsXmitLSRJT++; 6004 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 6005 elsiocb->context1 = lpfc_nlp_get(ndlp); 6006 if (!elsiocb->context1) { 6007 lpfc_els_free_iocb(phba, elsiocb); 6008 goto free_rdp_context; 6009 } 6010 6011 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 6012 if (rc == IOCB_ERROR) { 6013 lpfc_els_free_iocb(phba, elsiocb); 6014 lpfc_nlp_put(ndlp); 6015 } 6016 6017 free_rdp_context: 6018 /* This reference put is for the original unsolicited RDP. If the 6019 * iocb prep failed, there is no reference to remove. 6020 */ 6021 lpfc_nlp_put(ndlp); 6022 kfree(rdp_context); 6023 } 6024 6025 static int 6026 lpfc_get_rdp_info(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context) 6027 { 6028 LPFC_MBOXQ_t *mbox = NULL; 6029 int rc; 6030 6031 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6032 if (!mbox) { 6033 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_ELS, 6034 "7105 failed to allocate mailbox memory"); 6035 return 1; 6036 } 6037 6038 if (lpfc_sli4_dump_page_a0(phba, mbox)) 6039 goto prep_mbox_fail; 6040 mbox->vport = rdp_context->ndlp->vport; 6041 mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_page_a0; 6042 mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context; 6043 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 6044 if (rc == MBX_NOT_FINISHED) 6045 goto issue_mbox_fail; 6046 6047 return 0; 6048 6049 prep_mbox_fail: 6050 issue_mbox_fail: 6051 mempool_free(mbox, phba->mbox_mem_pool); 6052 return 1; 6053 } 6054 6055 /* 6056 * lpfc_els_rcv_rdp - Process an unsolicited RDP ELS. 6057 * @vport: pointer to a host virtual N_Port data structure. 6058 * @cmdiocb: pointer to lpfc command iocb data structure. 6059 * @ndlp: pointer to a node-list data structure. 6060 * 6061 * This routine processes an unsolicited RDP(Read Diagnostic Parameters) 6062 * IOCB. First, the payload of the unsolicited RDP is checked. 6063 * Then it will (1) send MBX_DUMP_MEMORY, Embedded DMP_LMSD sub command TYPE-3 6064 * for Page A0, (2) send MBX_DUMP_MEMORY, DMP_LMSD for Page A2, 6065 * (3) send MBX_READ_LNK_STAT to get link stat, (4) Call lpfc_els_rdp_cmpl 6066 * gather all data and send RDP response. 6067 * 6068 * Return code 6069 * 0 - Sent the acc response 6070 * 1 - Sent the reject response. 6071 */ 6072 static int 6073 lpfc_els_rcv_rdp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 6074 struct lpfc_nodelist *ndlp) 6075 { 6076 struct lpfc_hba *phba = vport->phba; 6077 struct lpfc_dmabuf *pcmd; 6078 uint8_t rjt_err, rjt_expl = LSEXP_NOTHING_MORE; 6079 struct fc_rdp_req_frame *rdp_req; 6080 struct lpfc_rdp_context *rdp_context; 6081 IOCB_t *cmd = NULL; 6082 struct ls_rjt stat; 6083 6084 if (phba->sli_rev < LPFC_SLI_REV4 || 6085 bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 6086 LPFC_SLI_INTF_IF_TYPE_2) { 6087 rjt_err = LSRJT_UNABLE_TPC; 6088 rjt_expl = LSEXP_REQ_UNSUPPORTED; 6089 goto error; 6090 } 6091 6092 if (phba->sli_rev < LPFC_SLI_REV4 || (phba->hba_flag & HBA_FCOE_MODE)) { 6093 rjt_err = LSRJT_UNABLE_TPC; 6094 rjt_expl = LSEXP_REQ_UNSUPPORTED; 6095 goto error; 6096 } 6097 6098 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 6099 rdp_req = (struct fc_rdp_req_frame *) pcmd->virt; 6100 6101 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6102 "2422 ELS RDP Request " 6103 "dec len %d tag x%x port_id %d len %d\n", 6104 be32_to_cpu(rdp_req->rdp_des_length), 6105 be32_to_cpu(rdp_req->nport_id_desc.tag), 6106 be32_to_cpu(rdp_req->nport_id_desc.nport_id), 6107 be32_to_cpu(rdp_req->nport_id_desc.length)); 6108 6109 if (sizeof(struct fc_rdp_nport_desc) != 6110 be32_to_cpu(rdp_req->rdp_des_length)) 6111 goto rjt_logerr; 6112 if (RDP_N_PORT_DESC_TAG != be32_to_cpu(rdp_req->nport_id_desc.tag)) 6113 goto rjt_logerr; 6114 if (RDP_NPORT_ID_SIZE != 6115 be32_to_cpu(rdp_req->nport_id_desc.length)) 6116 goto rjt_logerr; 6117 rdp_context = kzalloc(sizeof(struct lpfc_rdp_context), GFP_KERNEL); 6118 if (!rdp_context) { 6119 rjt_err = LSRJT_UNABLE_TPC; 6120 goto error; 6121 } 6122 6123 cmd = &cmdiocb->iocb; 6124 rdp_context->ndlp = lpfc_nlp_get(ndlp); 6125 if (!rdp_context->ndlp) { 6126 kfree(rdp_context); 6127 rjt_err = LSRJT_UNABLE_TPC; 6128 goto error; 6129 } 6130 rdp_context->ox_id = cmd->unsli3.rcvsli3.ox_id; 6131 rdp_context->rx_id = cmd->ulpContext; 6132 rdp_context->cmpl = lpfc_els_rdp_cmpl; 6133 if (lpfc_get_rdp_info(phba, rdp_context)) { 6134 lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_ELS, 6135 "2423 Unable to send mailbox"); 6136 kfree(rdp_context); 6137 rjt_err = LSRJT_UNABLE_TPC; 6138 lpfc_nlp_put(ndlp); 6139 goto error; 6140 } 6141 6142 return 0; 6143 6144 rjt_logerr: 6145 rjt_err = LSRJT_LOGICAL_ERR; 6146 6147 error: 6148 memset(&stat, 0, sizeof(stat)); 6149 stat.un.b.lsRjtRsnCode = rjt_err; 6150 stat.un.b.lsRjtRsnCodeExp = rjt_expl; 6151 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 6152 return 1; 6153 } 6154 6155 6156 static void 6157 lpfc_els_lcb_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 6158 { 6159 MAILBOX_t *mb; 6160 IOCB_t *icmd; 6161 uint8_t *pcmd; 6162 struct lpfc_iocbq *elsiocb; 6163 struct lpfc_nodelist *ndlp; 6164 struct ls_rjt *stat; 6165 union lpfc_sli4_cfg_shdr *shdr; 6166 struct lpfc_lcb_context *lcb_context; 6167 struct fc_lcb_res_frame *lcb_res; 6168 uint32_t cmdsize, shdr_status, shdr_add_status; 6169 int rc; 6170 6171 mb = &pmb->u.mb; 6172 lcb_context = (struct lpfc_lcb_context *)pmb->ctx_ndlp; 6173 ndlp = lcb_context->ndlp; 6174 pmb->ctx_ndlp = NULL; 6175 pmb->ctx_buf = NULL; 6176 6177 shdr = (union lpfc_sli4_cfg_shdr *) 6178 &pmb->u.mqe.un.beacon_config.header.cfg_shdr; 6179 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 6180 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 6181 6182 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX, 6183 "0194 SET_BEACON_CONFIG mailbox " 6184 "completed with status x%x add_status x%x," 6185 " mbx status x%x\n", 6186 shdr_status, shdr_add_status, mb->mbxStatus); 6187 6188 if ((mb->mbxStatus != MBX_SUCCESS) || shdr_status || 6189 (shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE) || 6190 (shdr_add_status == ADD_STATUS_INVALID_REQUEST)) { 6191 mempool_free(pmb, phba->mbox_mem_pool); 6192 goto error; 6193 } 6194 6195 mempool_free(pmb, phba->mbox_mem_pool); 6196 cmdsize = sizeof(struct fc_lcb_res_frame); 6197 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 6198 lpfc_max_els_tries, ndlp, 6199 ndlp->nlp_DID, ELS_CMD_ACC); 6200 6201 /* Decrement the ndlp reference count from previous mbox command */ 6202 lpfc_nlp_put(ndlp); 6203 6204 if (!elsiocb) 6205 goto free_lcb_context; 6206 6207 lcb_res = (struct fc_lcb_res_frame *) 6208 (((struct lpfc_dmabuf *)elsiocb->context2)->virt); 6209 6210 memset(lcb_res, 0, sizeof(struct fc_lcb_res_frame)); 6211 icmd = &elsiocb->iocb; 6212 icmd->ulpContext = lcb_context->rx_id; 6213 icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id; 6214 6215 pcmd = (uint8_t *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt); 6216 *((uint32_t *)(pcmd)) = ELS_CMD_ACC; 6217 lcb_res->lcb_sub_command = lcb_context->sub_command; 6218 lcb_res->lcb_type = lcb_context->type; 6219 lcb_res->capability = lcb_context->capability; 6220 lcb_res->lcb_frequency = lcb_context->frequency; 6221 lcb_res->lcb_duration = lcb_context->duration; 6222 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 6223 phba->fc_stat.elsXmitACC++; 6224 6225 elsiocb->context1 = lpfc_nlp_get(ndlp); 6226 if (!elsiocb->context1) { 6227 lpfc_els_free_iocb(phba, elsiocb); 6228 goto out; 6229 } 6230 6231 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 6232 if (rc == IOCB_ERROR) { 6233 lpfc_els_free_iocb(phba, elsiocb); 6234 lpfc_nlp_put(ndlp); 6235 } 6236 out: 6237 kfree(lcb_context); 6238 return; 6239 6240 error: 6241 cmdsize = sizeof(struct fc_lcb_res_frame); 6242 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 6243 lpfc_max_els_tries, ndlp, 6244 ndlp->nlp_DID, ELS_CMD_LS_RJT); 6245 lpfc_nlp_put(ndlp); 6246 if (!elsiocb) 6247 goto free_lcb_context; 6248 6249 icmd = &elsiocb->iocb; 6250 icmd->ulpContext = lcb_context->rx_id; 6251 icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id; 6252 pcmd = (uint8_t *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt); 6253 6254 *((uint32_t *)(pcmd)) = ELS_CMD_LS_RJT; 6255 stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t)); 6256 stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 6257 6258 if (shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE) 6259 stat->un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS; 6260 6261 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 6262 phba->fc_stat.elsXmitLSRJT++; 6263 elsiocb->context1 = lpfc_nlp_get(ndlp); 6264 if (!elsiocb->context1) { 6265 lpfc_els_free_iocb(phba, elsiocb); 6266 goto free_lcb_context; 6267 } 6268 6269 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 6270 if (rc == IOCB_ERROR) { 6271 lpfc_els_free_iocb(phba, elsiocb); 6272 lpfc_nlp_put(ndlp); 6273 } 6274 free_lcb_context: 6275 kfree(lcb_context); 6276 } 6277 6278 static int 6279 lpfc_sli4_set_beacon(struct lpfc_vport *vport, 6280 struct lpfc_lcb_context *lcb_context, 6281 uint32_t beacon_state) 6282 { 6283 struct lpfc_hba *phba = vport->phba; 6284 union lpfc_sli4_cfg_shdr *cfg_shdr; 6285 LPFC_MBOXQ_t *mbox = NULL; 6286 uint32_t len; 6287 int rc; 6288 6289 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6290 if (!mbox) 6291 return 1; 6292 6293 cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr; 6294 len = sizeof(struct lpfc_mbx_set_beacon_config) - 6295 sizeof(struct lpfc_sli4_cfg_mhdr); 6296 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 6297 LPFC_MBOX_OPCODE_SET_BEACON_CONFIG, len, 6298 LPFC_SLI4_MBX_EMBED); 6299 mbox->ctx_ndlp = (void *)lcb_context; 6300 mbox->vport = phba->pport; 6301 mbox->mbox_cmpl = lpfc_els_lcb_rsp; 6302 bf_set(lpfc_mbx_set_beacon_port_num, &mbox->u.mqe.un.beacon_config, 6303 phba->sli4_hba.physical_port); 6304 bf_set(lpfc_mbx_set_beacon_state, &mbox->u.mqe.un.beacon_config, 6305 beacon_state); 6306 mbox->u.mqe.un.beacon_config.word5 = 0; /* Reserved */ 6307 6308 /* 6309 * Check bv1s bit before issuing the mailbox 6310 * if bv1s == 1, LCB V1 supported 6311 * else, LCB V0 supported 6312 */ 6313 6314 if (phba->sli4_hba.pc_sli4_params.bv1s) { 6315 /* COMMON_SET_BEACON_CONFIG_V1 */ 6316 cfg_shdr->request.word9 = BEACON_VERSION_V1; 6317 lcb_context->capability |= LCB_CAPABILITY_DURATION; 6318 bf_set(lpfc_mbx_set_beacon_port_type, 6319 &mbox->u.mqe.un.beacon_config, 0); 6320 bf_set(lpfc_mbx_set_beacon_duration_v1, 6321 &mbox->u.mqe.un.beacon_config, 6322 be16_to_cpu(lcb_context->duration)); 6323 } else { 6324 /* COMMON_SET_BEACON_CONFIG_V0 */ 6325 if (be16_to_cpu(lcb_context->duration) != 0) { 6326 mempool_free(mbox, phba->mbox_mem_pool); 6327 return 1; 6328 } 6329 cfg_shdr->request.word9 = BEACON_VERSION_V0; 6330 lcb_context->capability &= ~(LCB_CAPABILITY_DURATION); 6331 bf_set(lpfc_mbx_set_beacon_state, 6332 &mbox->u.mqe.un.beacon_config, beacon_state); 6333 bf_set(lpfc_mbx_set_beacon_port_type, 6334 &mbox->u.mqe.un.beacon_config, 1); 6335 bf_set(lpfc_mbx_set_beacon_duration, 6336 &mbox->u.mqe.un.beacon_config, 6337 be16_to_cpu(lcb_context->duration)); 6338 } 6339 6340 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 6341 if (rc == MBX_NOT_FINISHED) { 6342 mempool_free(mbox, phba->mbox_mem_pool); 6343 return 1; 6344 } 6345 6346 return 0; 6347 } 6348 6349 6350 /** 6351 * lpfc_els_rcv_lcb - Process an unsolicited LCB 6352 * @vport: pointer to a host virtual N_Port data structure. 6353 * @cmdiocb: pointer to lpfc command iocb data structure. 6354 * @ndlp: pointer to a node-list data structure. 6355 * 6356 * This routine processes an unsolicited LCB(LINK CABLE BEACON) IOCB. 6357 * First, the payload of the unsolicited LCB is checked. 6358 * Then based on Subcommand beacon will either turn on or off. 6359 * 6360 * Return code 6361 * 0 - Sent the acc response 6362 * 1 - Sent the reject response. 6363 **/ 6364 static int 6365 lpfc_els_rcv_lcb(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 6366 struct lpfc_nodelist *ndlp) 6367 { 6368 struct lpfc_hba *phba = vport->phba; 6369 struct lpfc_dmabuf *pcmd; 6370 uint8_t *lp; 6371 struct fc_lcb_request_frame *beacon; 6372 struct lpfc_lcb_context *lcb_context; 6373 u8 state, rjt_err = 0; 6374 struct ls_rjt stat; 6375 6376 pcmd = (struct lpfc_dmabuf *)cmdiocb->context2; 6377 lp = (uint8_t *)pcmd->virt; 6378 beacon = (struct fc_lcb_request_frame *)pcmd->virt; 6379 6380 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6381 "0192 ELS LCB Data x%x x%x x%x x%x sub x%x " 6382 "type x%x frequency %x duration x%x\n", 6383 lp[0], lp[1], lp[2], 6384 beacon->lcb_command, 6385 beacon->lcb_sub_command, 6386 beacon->lcb_type, 6387 beacon->lcb_frequency, 6388 be16_to_cpu(beacon->lcb_duration)); 6389 6390 if (beacon->lcb_sub_command != LPFC_LCB_ON && 6391 beacon->lcb_sub_command != LPFC_LCB_OFF) { 6392 rjt_err = LSRJT_CMD_UNSUPPORTED; 6393 goto rjt; 6394 } 6395 6396 if (phba->sli_rev < LPFC_SLI_REV4 || 6397 phba->hba_flag & HBA_FCOE_MODE || 6398 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < 6399 LPFC_SLI_INTF_IF_TYPE_2)) { 6400 rjt_err = LSRJT_CMD_UNSUPPORTED; 6401 goto rjt; 6402 } 6403 6404 lcb_context = kmalloc(sizeof(*lcb_context), GFP_KERNEL); 6405 if (!lcb_context) { 6406 rjt_err = LSRJT_UNABLE_TPC; 6407 goto rjt; 6408 } 6409 6410 state = (beacon->lcb_sub_command == LPFC_LCB_ON) ? 1 : 0; 6411 lcb_context->sub_command = beacon->lcb_sub_command; 6412 lcb_context->capability = 0; 6413 lcb_context->type = beacon->lcb_type; 6414 lcb_context->frequency = beacon->lcb_frequency; 6415 lcb_context->duration = beacon->lcb_duration; 6416 lcb_context->ox_id = cmdiocb->iocb.unsli3.rcvsli3.ox_id; 6417 lcb_context->rx_id = cmdiocb->iocb.ulpContext; 6418 lcb_context->ndlp = lpfc_nlp_get(ndlp); 6419 if (!lcb_context->ndlp) { 6420 rjt_err = LSRJT_UNABLE_TPC; 6421 goto rjt_free; 6422 } 6423 6424 if (lpfc_sli4_set_beacon(vport, lcb_context, state)) { 6425 lpfc_printf_vlog(ndlp->vport, KERN_ERR, LOG_TRACE_EVENT, 6426 "0193 failed to send mail box"); 6427 lpfc_nlp_put(ndlp); 6428 rjt_err = LSRJT_UNABLE_TPC; 6429 goto rjt_free; 6430 } 6431 return 0; 6432 6433 rjt_free: 6434 kfree(lcb_context); 6435 rjt: 6436 memset(&stat, 0, sizeof(stat)); 6437 stat.un.b.lsRjtRsnCode = rjt_err; 6438 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 6439 return 1; 6440 } 6441 6442 6443 /** 6444 * lpfc_els_flush_rscn - Clean up any rscn activities with a vport 6445 * @vport: pointer to a host virtual N_Port data structure. 6446 * 6447 * This routine cleans up any Registration State Change Notification 6448 * (RSCN) activity with a @vport. Note that the fc_rscn_flush flag of the 6449 * @vport together with the host_lock is used to prevent multiple thread 6450 * trying to access the RSCN array on a same @vport at the same time. 6451 **/ 6452 void 6453 lpfc_els_flush_rscn(struct lpfc_vport *vport) 6454 { 6455 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6456 struct lpfc_hba *phba = vport->phba; 6457 int i; 6458 6459 spin_lock_irq(shost->host_lock); 6460 if (vport->fc_rscn_flush) { 6461 /* Another thread is walking fc_rscn_id_list on this vport */ 6462 spin_unlock_irq(shost->host_lock); 6463 return; 6464 } 6465 /* Indicate we are walking lpfc_els_flush_rscn on this vport */ 6466 vport->fc_rscn_flush = 1; 6467 spin_unlock_irq(shost->host_lock); 6468 6469 for (i = 0; i < vport->fc_rscn_id_cnt; i++) { 6470 lpfc_in_buf_free(phba, vport->fc_rscn_id_list[i]); 6471 vport->fc_rscn_id_list[i] = NULL; 6472 } 6473 spin_lock_irq(shost->host_lock); 6474 vport->fc_rscn_id_cnt = 0; 6475 vport->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY); 6476 spin_unlock_irq(shost->host_lock); 6477 lpfc_can_disctmo(vport); 6478 /* Indicate we are done walking this fc_rscn_id_list */ 6479 vport->fc_rscn_flush = 0; 6480 } 6481 6482 /** 6483 * lpfc_rscn_payload_check - Check whether there is a pending rscn to a did 6484 * @vport: pointer to a host virtual N_Port data structure. 6485 * @did: remote destination port identifier. 6486 * 6487 * This routine checks whether there is any pending Registration State 6488 * Configuration Notification (RSCN) to a @did on @vport. 6489 * 6490 * Return code 6491 * None zero - The @did matched with a pending rscn 6492 * 0 - not able to match @did with a pending rscn 6493 **/ 6494 int 6495 lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did) 6496 { 6497 D_ID ns_did; 6498 D_ID rscn_did; 6499 uint32_t *lp; 6500 uint32_t payload_len, i; 6501 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6502 6503 ns_did.un.word = did; 6504 6505 /* Never match fabric nodes for RSCNs */ 6506 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) 6507 return 0; 6508 6509 /* If we are doing a FULL RSCN rediscovery, match everything */ 6510 if (vport->fc_flag & FC_RSCN_DISCOVERY) 6511 return did; 6512 6513 spin_lock_irq(shost->host_lock); 6514 if (vport->fc_rscn_flush) { 6515 /* Another thread is walking fc_rscn_id_list on this vport */ 6516 spin_unlock_irq(shost->host_lock); 6517 return 0; 6518 } 6519 /* Indicate we are walking fc_rscn_id_list on this vport */ 6520 vport->fc_rscn_flush = 1; 6521 spin_unlock_irq(shost->host_lock); 6522 for (i = 0; i < vport->fc_rscn_id_cnt; i++) { 6523 lp = vport->fc_rscn_id_list[i]->virt; 6524 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK); 6525 payload_len -= sizeof(uint32_t); /* take off word 0 */ 6526 while (payload_len) { 6527 rscn_did.un.word = be32_to_cpu(*lp++); 6528 payload_len -= sizeof(uint32_t); 6529 switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) { 6530 case RSCN_ADDRESS_FORMAT_PORT: 6531 if ((ns_did.un.b.domain == rscn_did.un.b.domain) 6532 && (ns_did.un.b.area == rscn_did.un.b.area) 6533 && (ns_did.un.b.id == rscn_did.un.b.id)) 6534 goto return_did_out; 6535 break; 6536 case RSCN_ADDRESS_FORMAT_AREA: 6537 if ((ns_did.un.b.domain == rscn_did.un.b.domain) 6538 && (ns_did.un.b.area == rscn_did.un.b.area)) 6539 goto return_did_out; 6540 break; 6541 case RSCN_ADDRESS_FORMAT_DOMAIN: 6542 if (ns_did.un.b.domain == rscn_did.un.b.domain) 6543 goto return_did_out; 6544 break; 6545 case RSCN_ADDRESS_FORMAT_FABRIC: 6546 goto return_did_out; 6547 } 6548 } 6549 } 6550 /* Indicate we are done with walking fc_rscn_id_list on this vport */ 6551 vport->fc_rscn_flush = 0; 6552 return 0; 6553 return_did_out: 6554 /* Indicate we are done with walking fc_rscn_id_list on this vport */ 6555 vport->fc_rscn_flush = 0; 6556 return did; 6557 } 6558 6559 /** 6560 * lpfc_rscn_recovery_check - Send recovery event to vport nodes matching rscn 6561 * @vport: pointer to a host virtual N_Port data structure. 6562 * 6563 * This routine sends recovery (NLP_EVT_DEVICE_RECOVERY) event to the 6564 * state machine for a @vport's nodes that are with pending RSCN (Registration 6565 * State Change Notification). 6566 * 6567 * Return code 6568 * 0 - Successful (currently alway return 0) 6569 **/ 6570 static int 6571 lpfc_rscn_recovery_check(struct lpfc_vport *vport) 6572 { 6573 struct lpfc_nodelist *ndlp = NULL; 6574 6575 /* Move all affected nodes by pending RSCNs to NPR state. */ 6576 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 6577 if ((ndlp->nlp_state == NLP_STE_UNUSED_NODE) || 6578 !lpfc_rscn_payload_check(vport, ndlp->nlp_DID)) 6579 continue; 6580 6581 /* NVME Target mode does not do RSCN Recovery. */ 6582 if (vport->phba->nvmet_support) 6583 continue; 6584 6585 /* If we are in the process of doing discovery on this 6586 * NPort, let it continue on its own. 6587 */ 6588 switch (ndlp->nlp_state) { 6589 case NLP_STE_PLOGI_ISSUE: 6590 case NLP_STE_ADISC_ISSUE: 6591 case NLP_STE_REG_LOGIN_ISSUE: 6592 case NLP_STE_PRLI_ISSUE: 6593 case NLP_STE_LOGO_ISSUE: 6594 continue; 6595 } 6596 6597 /* Check to see if we need to NVME rescan this target 6598 * remoteport. 6599 */ 6600 if (ndlp->nlp_fc4_type & NLP_FC4_NVME && 6601 ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_DISCOVERY)) 6602 lpfc_nvme_rescan_port(vport, ndlp); 6603 6604 lpfc_disc_state_machine(vport, ndlp, NULL, 6605 NLP_EVT_DEVICE_RECOVERY); 6606 lpfc_cancel_retry_delay_tmo(vport, ndlp); 6607 } 6608 return 0; 6609 } 6610 6611 /** 6612 * lpfc_send_rscn_event - Send an RSCN event to management application 6613 * @vport: pointer to a host virtual N_Port data structure. 6614 * @cmdiocb: pointer to lpfc command iocb data structure. 6615 * 6616 * lpfc_send_rscn_event sends an RSCN netlink event to management 6617 * applications. 6618 */ 6619 static void 6620 lpfc_send_rscn_event(struct lpfc_vport *vport, 6621 struct lpfc_iocbq *cmdiocb) 6622 { 6623 struct lpfc_dmabuf *pcmd; 6624 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6625 uint32_t *payload_ptr; 6626 uint32_t payload_len; 6627 struct lpfc_rscn_event_header *rscn_event_data; 6628 6629 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 6630 payload_ptr = (uint32_t *) pcmd->virt; 6631 payload_len = be32_to_cpu(*payload_ptr & ~ELS_CMD_MASK); 6632 6633 rscn_event_data = kmalloc(sizeof(struct lpfc_rscn_event_header) + 6634 payload_len, GFP_KERNEL); 6635 if (!rscn_event_data) { 6636 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 6637 "0147 Failed to allocate memory for RSCN event\n"); 6638 return; 6639 } 6640 rscn_event_data->event_type = FC_REG_RSCN_EVENT; 6641 rscn_event_data->payload_length = payload_len; 6642 memcpy(rscn_event_data->rscn_payload, payload_ptr, 6643 payload_len); 6644 6645 fc_host_post_vendor_event(shost, 6646 fc_get_event_number(), 6647 sizeof(struct lpfc_rscn_event_header) + payload_len, 6648 (char *)rscn_event_data, 6649 LPFC_NL_VENDOR_ID); 6650 6651 kfree(rscn_event_data); 6652 } 6653 6654 /** 6655 * lpfc_els_rcv_rscn - Process an unsolicited rscn iocb 6656 * @vport: pointer to a host virtual N_Port data structure. 6657 * @cmdiocb: pointer to lpfc command iocb data structure. 6658 * @ndlp: pointer to a node-list data structure. 6659 * 6660 * This routine processes an unsolicited RSCN (Registration State Change 6661 * Notification) IOCB. First, the payload of the unsolicited RSCN is walked 6662 * to invoke fc_host_post_event() routine to the FC transport layer. If the 6663 * discover state machine is about to begin discovery, it just accepts the 6664 * RSCN and the discovery process will satisfy the RSCN. If this RSCN only 6665 * contains N_Port IDs for other vports on this HBA, it just accepts the 6666 * RSCN and ignore processing it. If the state machine is in the recovery 6667 * state, the fc_rscn_id_list of this @vport is walked and the 6668 * lpfc_rscn_recovery_check() routine is invoked to send recovery event for 6669 * all nodes that match RSCN payload. Otherwise, the lpfc_els_handle_rscn() 6670 * routine is invoked to handle the RSCN event. 6671 * 6672 * Return code 6673 * 0 - Just sent the acc response 6674 * 1 - Sent the acc response and waited for name server completion 6675 **/ 6676 static int 6677 lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 6678 struct lpfc_nodelist *ndlp) 6679 { 6680 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6681 struct lpfc_hba *phba = vport->phba; 6682 struct lpfc_dmabuf *pcmd; 6683 uint32_t *lp, *datap; 6684 uint32_t payload_len, length, nportid, *cmd; 6685 int rscn_cnt; 6686 int rscn_id = 0, hba_id = 0; 6687 int i, tmo; 6688 6689 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 6690 lp = (uint32_t *) pcmd->virt; 6691 6692 payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK); 6693 payload_len -= sizeof(uint32_t); /* take off word 0 */ 6694 /* RSCN received */ 6695 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 6696 "0214 RSCN received Data: x%x x%x x%x x%x\n", 6697 vport->fc_flag, payload_len, *lp, 6698 vport->fc_rscn_id_cnt); 6699 6700 /* Send an RSCN event to the management application */ 6701 lpfc_send_rscn_event(vport, cmdiocb); 6702 6703 for (i = 0; i < payload_len/sizeof(uint32_t); i++) 6704 fc_host_post_event(shost, fc_get_event_number(), 6705 FCH_EVT_RSCN, lp[i]); 6706 6707 /* Check if RSCN is coming from a direct-connected remote NPort */ 6708 if (vport->fc_flag & FC_PT2PT) { 6709 /* If so, just ACC it, no other action needed for now */ 6710 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 6711 "2024 pt2pt RSCN %08x Data: x%x x%x\n", 6712 *lp, vport->fc_flag, payload_len); 6713 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 6714 6715 /* Check to see if we need to NVME rescan this target 6716 * remoteport. 6717 */ 6718 if (ndlp->nlp_fc4_type & NLP_FC4_NVME && 6719 ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_DISCOVERY)) 6720 lpfc_nvme_rescan_port(vport, ndlp); 6721 return 0; 6722 } 6723 6724 /* If we are about to begin discovery, just ACC the RSCN. 6725 * Discovery processing will satisfy it. 6726 */ 6727 if (vport->port_state <= LPFC_NS_QRY) { 6728 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6729 "RCV RSCN ignore: did:x%x/ste:x%x flg:x%x", 6730 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); 6731 6732 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 6733 return 0; 6734 } 6735 6736 /* If this RSCN just contains NPortIDs for other vports on this HBA, 6737 * just ACC and ignore it. 6738 */ 6739 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 6740 !(vport->cfg_peer_port_login)) { 6741 i = payload_len; 6742 datap = lp; 6743 while (i > 0) { 6744 nportid = *datap++; 6745 nportid = ((be32_to_cpu(nportid)) & Mask_DID); 6746 i -= sizeof(uint32_t); 6747 rscn_id++; 6748 if (lpfc_find_vport_by_did(phba, nportid)) 6749 hba_id++; 6750 } 6751 if (rscn_id == hba_id) { 6752 /* ALL NPortIDs in RSCN are on HBA */ 6753 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 6754 "0219 Ignore RSCN " 6755 "Data: x%x x%x x%x x%x\n", 6756 vport->fc_flag, payload_len, 6757 *lp, vport->fc_rscn_id_cnt); 6758 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6759 "RCV RSCN vport: did:x%x/ste:x%x flg:x%x", 6760 ndlp->nlp_DID, vport->port_state, 6761 ndlp->nlp_flag); 6762 6763 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, 6764 ndlp, NULL); 6765 return 0; 6766 } 6767 } 6768 6769 spin_lock_irq(shost->host_lock); 6770 if (vport->fc_rscn_flush) { 6771 /* Another thread is walking fc_rscn_id_list on this vport */ 6772 vport->fc_flag |= FC_RSCN_DISCOVERY; 6773 spin_unlock_irq(shost->host_lock); 6774 /* Send back ACC */ 6775 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 6776 return 0; 6777 } 6778 /* Indicate we are walking fc_rscn_id_list on this vport */ 6779 vport->fc_rscn_flush = 1; 6780 spin_unlock_irq(shost->host_lock); 6781 /* Get the array count after successfully have the token */ 6782 rscn_cnt = vport->fc_rscn_id_cnt; 6783 /* If we are already processing an RSCN, save the received 6784 * RSCN payload buffer, cmdiocb->context2 to process later. 6785 */ 6786 if (vport->fc_flag & (FC_RSCN_MODE | FC_NDISC_ACTIVE)) { 6787 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6788 "RCV RSCN defer: did:x%x/ste:x%x flg:x%x", 6789 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); 6790 6791 spin_lock_irq(shost->host_lock); 6792 vport->fc_flag |= FC_RSCN_DEFERRED; 6793 6794 /* Restart disctmo if its already running */ 6795 if (vport->fc_flag & FC_DISC_TMO) { 6796 tmo = ((phba->fc_ratov * 3) + 3); 6797 mod_timer(&vport->fc_disctmo, 6798 jiffies + msecs_to_jiffies(1000 * tmo)); 6799 } 6800 if ((rscn_cnt < FC_MAX_HOLD_RSCN) && 6801 !(vport->fc_flag & FC_RSCN_DISCOVERY)) { 6802 vport->fc_flag |= FC_RSCN_MODE; 6803 spin_unlock_irq(shost->host_lock); 6804 if (rscn_cnt) { 6805 cmd = vport->fc_rscn_id_list[rscn_cnt-1]->virt; 6806 length = be32_to_cpu(*cmd & ~ELS_CMD_MASK); 6807 } 6808 if ((rscn_cnt) && 6809 (payload_len + length <= LPFC_BPL_SIZE)) { 6810 *cmd &= ELS_CMD_MASK; 6811 *cmd |= cpu_to_be32(payload_len + length); 6812 memcpy(((uint8_t *)cmd) + length, lp, 6813 payload_len); 6814 } else { 6815 vport->fc_rscn_id_list[rscn_cnt] = pcmd; 6816 vport->fc_rscn_id_cnt++; 6817 /* If we zero, cmdiocb->context2, the calling 6818 * routine will not try to free it. 6819 */ 6820 cmdiocb->context2 = NULL; 6821 } 6822 /* Deferred RSCN */ 6823 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 6824 "0235 Deferred RSCN " 6825 "Data: x%x x%x x%x\n", 6826 vport->fc_rscn_id_cnt, vport->fc_flag, 6827 vport->port_state); 6828 } else { 6829 vport->fc_flag |= FC_RSCN_DISCOVERY; 6830 spin_unlock_irq(shost->host_lock); 6831 /* ReDiscovery RSCN */ 6832 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 6833 "0234 ReDiscovery RSCN " 6834 "Data: x%x x%x x%x\n", 6835 vport->fc_rscn_id_cnt, vport->fc_flag, 6836 vport->port_state); 6837 } 6838 /* Indicate we are done walking fc_rscn_id_list on this vport */ 6839 vport->fc_rscn_flush = 0; 6840 /* Send back ACC */ 6841 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 6842 /* send RECOVERY event for ALL nodes that match RSCN payload */ 6843 lpfc_rscn_recovery_check(vport); 6844 return 0; 6845 } 6846 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 6847 "RCV RSCN: did:x%x/ste:x%x flg:x%x", 6848 ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); 6849 6850 spin_lock_irq(shost->host_lock); 6851 vport->fc_flag |= FC_RSCN_MODE; 6852 spin_unlock_irq(shost->host_lock); 6853 vport->fc_rscn_id_list[vport->fc_rscn_id_cnt++] = pcmd; 6854 /* Indicate we are done walking fc_rscn_id_list on this vport */ 6855 vport->fc_rscn_flush = 0; 6856 /* 6857 * If we zero, cmdiocb->context2, the calling routine will 6858 * not try to free it. 6859 */ 6860 cmdiocb->context2 = NULL; 6861 lpfc_set_disctmo(vport); 6862 /* Send back ACC */ 6863 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 6864 /* send RECOVERY event for ALL nodes that match RSCN payload */ 6865 lpfc_rscn_recovery_check(vport); 6866 return lpfc_els_handle_rscn(vport); 6867 } 6868 6869 /** 6870 * lpfc_els_handle_rscn - Handle rscn for a vport 6871 * @vport: pointer to a host virtual N_Port data structure. 6872 * 6873 * This routine handles the Registration State Configuration Notification 6874 * (RSCN) for a @vport. If login to NameServer does not exist, a new ndlp shall 6875 * be created and a Port Login (PLOGI) to the NameServer is issued. Otherwise, 6876 * if the ndlp to NameServer exists, a Common Transport (CT) command to the 6877 * NameServer shall be issued. If CT command to the NameServer fails to be 6878 * issued, the lpfc_els_flush_rscn() routine shall be invoked to clean up any 6879 * RSCN activities with the @vport. 6880 * 6881 * Return code 6882 * 0 - Cleaned up rscn on the @vport 6883 * 1 - Wait for plogi to name server before proceed 6884 **/ 6885 int 6886 lpfc_els_handle_rscn(struct lpfc_vport *vport) 6887 { 6888 struct lpfc_nodelist *ndlp; 6889 struct lpfc_hba *phba = vport->phba; 6890 6891 /* Ignore RSCN if the port is being torn down. */ 6892 if (vport->load_flag & FC_UNLOADING) { 6893 lpfc_els_flush_rscn(vport); 6894 return 0; 6895 } 6896 6897 /* Start timer for RSCN processing */ 6898 lpfc_set_disctmo(vport); 6899 6900 /* RSCN processed */ 6901 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 6902 "0215 RSCN processed Data: x%x x%x x%x x%x x%x x%x\n", 6903 vport->fc_flag, 0, vport->fc_rscn_id_cnt, 6904 vport->port_state, vport->num_disc_nodes, 6905 vport->gidft_inp); 6906 6907 /* To process RSCN, first compare RSCN data with NameServer */ 6908 vport->fc_ns_retry = 0; 6909 vport->num_disc_nodes = 0; 6910 6911 ndlp = lpfc_findnode_did(vport, NameServer_DID); 6912 if (ndlp && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { 6913 /* Good ndlp, issue CT Request to NameServer. Need to 6914 * know how many gidfts were issued. If none, then just 6915 * flush the RSCN. Otherwise, the outstanding requests 6916 * need to complete. 6917 */ 6918 if (phba->cfg_ns_query == LPFC_NS_QUERY_GID_FT) { 6919 if (lpfc_issue_gidft(vport) > 0) 6920 return 1; 6921 } else if (phba->cfg_ns_query == LPFC_NS_QUERY_GID_PT) { 6922 if (lpfc_issue_gidpt(vport) > 0) 6923 return 1; 6924 } else { 6925 return 1; 6926 } 6927 } else { 6928 /* Nameserver login in question. Revalidate. */ 6929 if (ndlp) { 6930 ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE; 6931 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 6932 } else { 6933 ndlp = lpfc_nlp_init(vport, NameServer_DID); 6934 if (!ndlp) { 6935 lpfc_els_flush_rscn(vport); 6936 return 0; 6937 } 6938 ndlp->nlp_prev_state = ndlp->nlp_state; 6939 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 6940 } 6941 ndlp->nlp_type |= NLP_FABRIC; 6942 lpfc_issue_els_plogi(vport, NameServer_DID, 0); 6943 /* Wait for NameServer login cmpl before we can 6944 * continue 6945 */ 6946 return 1; 6947 } 6948 6949 lpfc_els_flush_rscn(vport); 6950 return 0; 6951 } 6952 6953 /** 6954 * lpfc_els_rcv_flogi - Process an unsolicited flogi iocb 6955 * @vport: pointer to a host virtual N_Port data structure. 6956 * @cmdiocb: pointer to lpfc command iocb data structure. 6957 * @ndlp: pointer to a node-list data structure. 6958 * 6959 * This routine processes Fabric Login (FLOGI) IOCB received as an ELS 6960 * unsolicited event. An unsolicited FLOGI can be received in a point-to- 6961 * point topology. As an unsolicited FLOGI should not be received in a loop 6962 * mode, any unsolicited FLOGI received in loop mode shall be ignored. The 6963 * lpfc_check_sparm() routine is invoked to check the parameters in the 6964 * unsolicited FLOGI. If parameters validation failed, the routine 6965 * lpfc_els_rsp_reject() shall be called with reject reason code set to 6966 * LSEXP_SPARM_OPTIONS to reject the FLOGI. Otherwise, the Port WWN in the 6967 * FLOGI shall be compared with the Port WWN of the @vport to determine who 6968 * will initiate PLOGI. The higher lexicographical value party shall has 6969 * higher priority (as the winning port) and will initiate PLOGI and 6970 * communicate Port_IDs (Addresses) for both nodes in PLOGI. The result 6971 * of this will be marked in the @vport fc_flag field with FC_PT2PT_PLOGI 6972 * and then the lpfc_els_rsp_acc() routine is invoked to accept the FLOGI. 6973 * 6974 * Return code 6975 * 0 - Successfully processed the unsolicited flogi 6976 * 1 - Failed to process the unsolicited flogi 6977 **/ 6978 static int 6979 lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 6980 struct lpfc_nodelist *ndlp) 6981 { 6982 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6983 struct lpfc_hba *phba = vport->phba; 6984 struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 6985 uint32_t *lp = (uint32_t *) pcmd->virt; 6986 IOCB_t *icmd = &cmdiocb->iocb; 6987 struct serv_parm *sp; 6988 LPFC_MBOXQ_t *mbox; 6989 uint32_t cmd, did; 6990 int rc; 6991 uint32_t fc_flag = 0; 6992 uint32_t port_state = 0; 6993 6994 cmd = *lp++; 6995 sp = (struct serv_parm *) lp; 6996 6997 /* FLOGI received */ 6998 6999 lpfc_set_disctmo(vport); 7000 7001 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 7002 /* We should never receive a FLOGI in loop mode, ignore it */ 7003 did = icmd->un.elsreq64.remoteID; 7004 7005 /* An FLOGI ELS command <elsCmd> was received from DID <did> in 7006 Loop Mode */ 7007 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 7008 "0113 An FLOGI ELS command x%x was " 7009 "received from DID x%x in Loop Mode\n", 7010 cmd, did); 7011 return 1; 7012 } 7013 7014 (void) lpfc_check_sparm(vport, ndlp, sp, CLASS3, 1); 7015 7016 /* 7017 * If our portname is greater than the remote portname, 7018 * then we initiate Nport login. 7019 */ 7020 7021 rc = memcmp(&vport->fc_portname, &sp->portName, 7022 sizeof(struct lpfc_name)); 7023 7024 if (!rc) { 7025 if (phba->sli_rev < LPFC_SLI_REV4) { 7026 mbox = mempool_alloc(phba->mbox_mem_pool, 7027 GFP_KERNEL); 7028 if (!mbox) 7029 return 1; 7030 lpfc_linkdown(phba); 7031 lpfc_init_link(phba, mbox, 7032 phba->cfg_topology, 7033 phba->cfg_link_speed); 7034 mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0; 7035 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 7036 mbox->vport = vport; 7037 rc = lpfc_sli_issue_mbox(phba, mbox, 7038 MBX_NOWAIT); 7039 lpfc_set_loopback_flag(phba); 7040 if (rc == MBX_NOT_FINISHED) 7041 mempool_free(mbox, phba->mbox_mem_pool); 7042 return 1; 7043 } 7044 7045 /* abort the flogi coming back to ourselves 7046 * due to external loopback on the port. 7047 */ 7048 lpfc_els_abort_flogi(phba); 7049 return 0; 7050 7051 } else if (rc > 0) { /* greater than */ 7052 spin_lock_irq(shost->host_lock); 7053 vport->fc_flag |= FC_PT2PT_PLOGI; 7054 spin_unlock_irq(shost->host_lock); 7055 7056 /* If we have the high WWPN we can assign our own 7057 * myDID; otherwise, we have to WAIT for a PLOGI 7058 * from the remote NPort to find out what it 7059 * will be. 7060 */ 7061 vport->fc_myDID = PT2PT_LocalID; 7062 } else { 7063 vport->fc_myDID = PT2PT_RemoteID; 7064 } 7065 7066 /* 7067 * The vport state should go to LPFC_FLOGI only 7068 * AFTER we issue a FLOGI, not receive one. 7069 */ 7070 spin_lock_irq(shost->host_lock); 7071 fc_flag = vport->fc_flag; 7072 port_state = vport->port_state; 7073 vport->fc_flag |= FC_PT2PT; 7074 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 7075 7076 /* Acking an unsol FLOGI. Count 1 for link bounce 7077 * work-around. 7078 */ 7079 vport->rcv_flogi_cnt++; 7080 spin_unlock_irq(shost->host_lock); 7081 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7082 "3311 Rcv Flogi PS x%x new PS x%x " 7083 "fc_flag x%x new fc_flag x%x\n", 7084 port_state, vport->port_state, 7085 fc_flag, vport->fc_flag); 7086 7087 /* 7088 * We temporarily set fc_myDID to make it look like we are 7089 * a Fabric. This is done just so we end up with the right 7090 * did / sid on the FLOGI ACC rsp. 7091 */ 7092 did = vport->fc_myDID; 7093 vport->fc_myDID = Fabric_DID; 7094 7095 memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm)); 7096 7097 /* Defer ACC response until AFTER we issue a FLOGI */ 7098 if (!(phba->hba_flag & HBA_FLOGI_ISSUED)) { 7099 phba->defer_flogi_acc_rx_id = cmdiocb->iocb.ulpContext; 7100 phba->defer_flogi_acc_ox_id = 7101 cmdiocb->iocb.unsli3.rcvsli3.ox_id; 7102 7103 vport->fc_myDID = did; 7104 7105 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7106 "3344 Deferring FLOGI ACC: rx_id: x%x," 7107 " ox_id: x%x, hba_flag x%x\n", 7108 phba->defer_flogi_acc_rx_id, 7109 phba->defer_flogi_acc_ox_id, phba->hba_flag); 7110 7111 phba->defer_flogi_acc_flag = true; 7112 7113 return 0; 7114 } 7115 7116 /* Send back ACC */ 7117 lpfc_els_rsp_acc(vport, ELS_CMD_FLOGI, cmdiocb, ndlp, NULL); 7118 7119 /* Now lets put fc_myDID back to what its supposed to be */ 7120 vport->fc_myDID = did; 7121 7122 return 0; 7123 } 7124 7125 /** 7126 * lpfc_els_rcv_rnid - Process an unsolicited rnid iocb 7127 * @vport: pointer to a host virtual N_Port data structure. 7128 * @cmdiocb: pointer to lpfc command iocb data structure. 7129 * @ndlp: pointer to a node-list data structure. 7130 * 7131 * This routine processes Request Node Identification Data (RNID) IOCB 7132 * received as an ELS unsolicited event. Only when the RNID specified format 7133 * 0x0 or 0xDF (Topology Discovery Specific Node Identification Data) 7134 * present, this routine will invoke the lpfc_els_rsp_rnid_acc() routine to 7135 * Accept (ACC) the RNID ELS command. All the other RNID formats are 7136 * rejected by invoking the lpfc_els_rsp_reject() routine. 7137 * 7138 * Return code 7139 * 0 - Successfully processed rnid iocb (currently always return 0) 7140 **/ 7141 static int 7142 lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7143 struct lpfc_nodelist *ndlp) 7144 { 7145 struct lpfc_dmabuf *pcmd; 7146 uint32_t *lp; 7147 RNID *rn; 7148 struct ls_rjt stat; 7149 7150 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 7151 lp = (uint32_t *) pcmd->virt; 7152 7153 lp++; 7154 rn = (RNID *) lp; 7155 7156 /* RNID received */ 7157 7158 switch (rn->Format) { 7159 case 0: 7160 case RNID_TOPOLOGY_DISC: 7161 /* Send back ACC */ 7162 lpfc_els_rsp_rnid_acc(vport, rn->Format, cmdiocb, ndlp); 7163 break; 7164 default: 7165 /* Reject this request because format not supported */ 7166 stat.un.b.lsRjtRsvd0 = 0; 7167 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 7168 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 7169 stat.un.b.vendorUnique = 0; 7170 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, 7171 NULL); 7172 } 7173 return 0; 7174 } 7175 7176 /** 7177 * lpfc_els_rcv_echo - Process an unsolicited echo iocb 7178 * @vport: pointer to a host virtual N_Port data structure. 7179 * @cmdiocb: pointer to lpfc command iocb data structure. 7180 * @ndlp: pointer to a node-list data structure. 7181 * 7182 * Return code 7183 * 0 - Successfully processed echo iocb (currently always return 0) 7184 **/ 7185 static int 7186 lpfc_els_rcv_echo(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7187 struct lpfc_nodelist *ndlp) 7188 { 7189 uint8_t *pcmd; 7190 7191 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt); 7192 7193 /* skip over first word of echo command to find echo data */ 7194 pcmd += sizeof(uint32_t); 7195 7196 lpfc_els_rsp_echo_acc(vport, pcmd, cmdiocb, ndlp); 7197 return 0; 7198 } 7199 7200 /** 7201 * lpfc_els_rcv_lirr - Process an unsolicited lirr iocb 7202 * @vport: pointer to a host virtual N_Port data structure. 7203 * @cmdiocb: pointer to lpfc command iocb data structure. 7204 * @ndlp: pointer to a node-list data structure. 7205 * 7206 * This routine processes a Link Incident Report Registration(LIRR) IOCB 7207 * received as an ELS unsolicited event. Currently, this function just invokes 7208 * the lpfc_els_rsp_reject() routine to reject the LIRR IOCB unconditionally. 7209 * 7210 * Return code 7211 * 0 - Successfully processed lirr iocb (currently always return 0) 7212 **/ 7213 static int 7214 lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7215 struct lpfc_nodelist *ndlp) 7216 { 7217 struct ls_rjt stat; 7218 7219 /* For now, unconditionally reject this command */ 7220 stat.un.b.lsRjtRsvd0 = 0; 7221 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 7222 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 7223 stat.un.b.vendorUnique = 0; 7224 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 7225 return 0; 7226 } 7227 7228 /** 7229 * lpfc_els_rcv_rrq - Process an unsolicited rrq iocb 7230 * @vport: pointer to a host virtual N_Port data structure. 7231 * @cmdiocb: pointer to lpfc command iocb data structure. 7232 * @ndlp: pointer to a node-list data structure. 7233 * 7234 * This routine processes a Reinstate Recovery Qualifier (RRQ) IOCB 7235 * received as an ELS unsolicited event. A request to RRQ shall only 7236 * be accepted if the Originator Nx_Port N_Port_ID or the Responder 7237 * Nx_Port N_Port_ID of the target Exchange is the same as the 7238 * N_Port_ID of the Nx_Port that makes the request. If the RRQ is 7239 * not accepted, an LS_RJT with reason code "Unable to perform 7240 * command request" and reason code explanation "Invalid Originator 7241 * S_ID" shall be returned. For now, we just unconditionally accept 7242 * RRQ from the target. 7243 **/ 7244 static void 7245 lpfc_els_rcv_rrq(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7246 struct lpfc_nodelist *ndlp) 7247 { 7248 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 7249 if (vport->phba->sli_rev == LPFC_SLI_REV4) 7250 lpfc_els_clear_rrq(vport, cmdiocb, ndlp); 7251 } 7252 7253 /** 7254 * lpfc_els_rsp_rls_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd 7255 * @phba: pointer to lpfc hba data structure. 7256 * @pmb: pointer to the driver internal queue element for mailbox command. 7257 * 7258 * This routine is the completion callback function for the MBX_READ_LNK_STAT 7259 * mailbox command. This callback function is to actually send the Accept 7260 * (ACC) response to a Read Link Status (RLS) unsolicited IOCB event. It 7261 * collects the link statistics from the completion of the MBX_READ_LNK_STAT 7262 * mailbox command, constructs the RLS response with the link statistics 7263 * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC 7264 * response to the RLS. 7265 * 7266 * Note that the ndlp reference count will be incremented by 1 for holding the 7267 * ndlp and the reference to ndlp will be stored into the context1 field of 7268 * the IOCB for the completion callback function to the RLS Accept Response 7269 * ELS IOCB command. 7270 * 7271 **/ 7272 static void 7273 lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 7274 { 7275 int rc = 0; 7276 MAILBOX_t *mb; 7277 IOCB_t *icmd; 7278 struct RLS_RSP *rls_rsp; 7279 uint8_t *pcmd; 7280 struct lpfc_iocbq *elsiocb; 7281 struct lpfc_nodelist *ndlp; 7282 uint16_t oxid; 7283 uint16_t rxid; 7284 uint32_t cmdsize; 7285 7286 mb = &pmb->u.mb; 7287 7288 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; 7289 rxid = (uint16_t)((unsigned long)(pmb->ctx_buf) & 0xffff); 7290 oxid = (uint16_t)(((unsigned long)(pmb->ctx_buf) >> 16) & 0xffff); 7291 pmb->ctx_buf = NULL; 7292 pmb->ctx_ndlp = NULL; 7293 7294 if (mb->mbxStatus) { 7295 mempool_free(pmb, phba->mbox_mem_pool); 7296 return; 7297 } 7298 7299 cmdsize = sizeof(struct RLS_RSP) + sizeof(uint32_t); 7300 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 7301 lpfc_max_els_tries, ndlp, 7302 ndlp->nlp_DID, ELS_CMD_ACC); 7303 7304 /* Decrement the ndlp reference count from previous mbox command */ 7305 lpfc_nlp_put(ndlp); 7306 7307 if (!elsiocb) { 7308 mempool_free(pmb, phba->mbox_mem_pool); 7309 return; 7310 } 7311 7312 icmd = &elsiocb->iocb; 7313 icmd->ulpContext = rxid; 7314 icmd->unsli3.rcvsli3.ox_id = oxid; 7315 7316 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 7317 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 7318 pcmd += sizeof(uint32_t); /* Skip past command */ 7319 rls_rsp = (struct RLS_RSP *)pcmd; 7320 7321 rls_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt); 7322 rls_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt); 7323 rls_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt); 7324 rls_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt); 7325 rls_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord); 7326 rls_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt); 7327 mempool_free(pmb, phba->mbox_mem_pool); 7328 /* Xmit ELS RLS ACC response tag <ulpIoTag> */ 7329 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS, 7330 "2874 Xmit ELS RLS ACC response tag x%x xri x%x, " 7331 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", 7332 elsiocb->iotag, elsiocb->iocb.ulpContext, 7333 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 7334 ndlp->nlp_rpi); 7335 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 7336 phba->fc_stat.elsXmitACC++; 7337 elsiocb->context1 = lpfc_nlp_get(ndlp); 7338 if (!elsiocb->context1) { 7339 lpfc_els_free_iocb(phba, elsiocb); 7340 return; 7341 } 7342 7343 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 7344 if (rc == IOCB_ERROR) { 7345 lpfc_els_free_iocb(phba, elsiocb); 7346 lpfc_nlp_put(ndlp); 7347 } 7348 return; 7349 } 7350 7351 /** 7352 * lpfc_els_rcv_rls - Process an unsolicited rls iocb 7353 * @vport: pointer to a host virtual N_Port data structure. 7354 * @cmdiocb: pointer to lpfc command iocb data structure. 7355 * @ndlp: pointer to a node-list data structure. 7356 * 7357 * This routine processes Read Link Status (RLS) IOCB received as an 7358 * ELS unsolicited event. It first checks the remote port state. If the 7359 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE 7360 * state, it invokes the lpfc_els_rsl_reject() routine to send the reject 7361 * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command 7362 * for reading the HBA link statistics. It is for the callback function, 7363 * lpfc_els_rsp_rls_acc(), set to the MBX_READ_LNK_STAT mailbox command 7364 * to actually sending out RPL Accept (ACC) response. 7365 * 7366 * Return codes 7367 * 0 - Successfully processed rls iocb (currently always return 0) 7368 **/ 7369 static int 7370 lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7371 struct lpfc_nodelist *ndlp) 7372 { 7373 struct lpfc_hba *phba = vport->phba; 7374 LPFC_MBOXQ_t *mbox; 7375 struct ls_rjt stat; 7376 7377 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 7378 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) 7379 /* reject the unsolicited RLS request and done with it */ 7380 goto reject_out; 7381 7382 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC); 7383 if (mbox) { 7384 lpfc_read_lnk_stat(phba, mbox); 7385 mbox->ctx_buf = (void *)((unsigned long) 7386 ((cmdiocb->iocb.unsli3.rcvsli3.ox_id << 16) | 7387 cmdiocb->iocb.ulpContext)); /* rx_id */ 7388 mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 7389 if (!mbox->ctx_ndlp) 7390 goto node_err; 7391 mbox->vport = vport; 7392 mbox->mbox_cmpl = lpfc_els_rsp_rls_acc; 7393 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 7394 != MBX_NOT_FINISHED) 7395 /* Mbox completion will send ELS Response */ 7396 return 0; 7397 /* Decrement reference count used for the failed mbox 7398 * command. 7399 */ 7400 lpfc_nlp_put(ndlp); 7401 node_err: 7402 mempool_free(mbox, phba->mbox_mem_pool); 7403 } 7404 reject_out: 7405 /* issue rejection response */ 7406 stat.un.b.lsRjtRsvd0 = 0; 7407 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 7408 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 7409 stat.un.b.vendorUnique = 0; 7410 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 7411 return 0; 7412 } 7413 7414 /** 7415 * lpfc_els_rcv_rtv - Process an unsolicited rtv iocb 7416 * @vport: pointer to a host virtual N_Port data structure. 7417 * @cmdiocb: pointer to lpfc command iocb data structure. 7418 * @ndlp: pointer to a node-list data structure. 7419 * 7420 * This routine processes Read Timout Value (RTV) IOCB received as an 7421 * ELS unsolicited event. It first checks the remote port state. If the 7422 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE 7423 * state, it invokes the lpfc_els_rsl_reject() routine to send the reject 7424 * response. Otherwise, it sends the Accept(ACC) response to a Read Timeout 7425 * Value (RTV) unsolicited IOCB event. 7426 * 7427 * Note that the ndlp reference count will be incremented by 1 for holding the 7428 * ndlp and the reference to ndlp will be stored into the context1 field of 7429 * the IOCB for the completion callback function to the RTV Accept Response 7430 * ELS IOCB command. 7431 * 7432 * Return codes 7433 * 0 - Successfully processed rtv iocb (currently always return 0) 7434 **/ 7435 static int 7436 lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7437 struct lpfc_nodelist *ndlp) 7438 { 7439 int rc = 0; 7440 struct lpfc_hba *phba = vport->phba; 7441 struct ls_rjt stat; 7442 struct RTV_RSP *rtv_rsp; 7443 uint8_t *pcmd; 7444 struct lpfc_iocbq *elsiocb; 7445 uint32_t cmdsize; 7446 7447 7448 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 7449 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) 7450 /* reject the unsolicited RTV request and done with it */ 7451 goto reject_out; 7452 7453 cmdsize = sizeof(struct RTV_RSP) + sizeof(uint32_t); 7454 elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, 7455 lpfc_max_els_tries, ndlp, 7456 ndlp->nlp_DID, ELS_CMD_ACC); 7457 7458 if (!elsiocb) 7459 return 1; 7460 7461 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 7462 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 7463 pcmd += sizeof(uint32_t); /* Skip past command */ 7464 7465 /* use the command's xri in the response */ 7466 elsiocb->iocb.ulpContext = cmdiocb->iocb.ulpContext; /* Xri / rx_id */ 7467 elsiocb->iocb.unsli3.rcvsli3.ox_id = cmdiocb->iocb.unsli3.rcvsli3.ox_id; 7468 7469 rtv_rsp = (struct RTV_RSP *)pcmd; 7470 7471 /* populate RTV payload */ 7472 rtv_rsp->ratov = cpu_to_be32(phba->fc_ratov * 1000); /* report msecs */ 7473 rtv_rsp->edtov = cpu_to_be32(phba->fc_edtov); 7474 bf_set(qtov_edtovres, rtv_rsp, phba->fc_edtovResol ? 1 : 0); 7475 bf_set(qtov_rttov, rtv_rsp, 0); /* Field is for FC ONLY */ 7476 rtv_rsp->qtov = cpu_to_be32(rtv_rsp->qtov); 7477 7478 /* Xmit ELS RLS ACC response tag <ulpIoTag> */ 7479 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS, 7480 "2875 Xmit ELS RTV ACC response tag x%x xri x%x, " 7481 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x, " 7482 "Data: x%x x%x x%x\n", 7483 elsiocb->iotag, elsiocb->iocb.ulpContext, 7484 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 7485 ndlp->nlp_rpi, 7486 rtv_rsp->ratov, rtv_rsp->edtov, rtv_rsp->qtov); 7487 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 7488 phba->fc_stat.elsXmitACC++; 7489 elsiocb->context1 = lpfc_nlp_get(ndlp); 7490 if (!elsiocb->context1) { 7491 lpfc_els_free_iocb(phba, elsiocb); 7492 return 0; 7493 } 7494 7495 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 7496 if (rc == IOCB_ERROR) { 7497 lpfc_els_free_iocb(phba, elsiocb); 7498 lpfc_nlp_put(ndlp); 7499 } 7500 return 0; 7501 7502 reject_out: 7503 /* issue rejection response */ 7504 stat.un.b.lsRjtRsvd0 = 0; 7505 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 7506 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 7507 stat.un.b.vendorUnique = 0; 7508 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); 7509 return 0; 7510 } 7511 7512 /* lpfc_issue_els_rrq - Process an unsolicited rrq iocb 7513 * @vport: pointer to a host virtual N_Port data structure. 7514 * @ndlp: pointer to a node-list data structure. 7515 * @did: DID of the target. 7516 * @rrq: Pointer to the rrq struct. 7517 * 7518 * Build a ELS RRQ command and send it to the target. If the issue_iocb is 7519 * Successful the the completion handler will clear the RRQ. 7520 * 7521 * Return codes 7522 * 0 - Successfully sent rrq els iocb. 7523 * 1 - Failed to send rrq els iocb. 7524 **/ 7525 static int 7526 lpfc_issue_els_rrq(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 7527 uint32_t did, struct lpfc_node_rrq *rrq) 7528 { 7529 struct lpfc_hba *phba = vport->phba; 7530 struct RRQ *els_rrq; 7531 struct lpfc_iocbq *elsiocb; 7532 uint8_t *pcmd; 7533 uint16_t cmdsize; 7534 int ret; 7535 7536 if (!ndlp) 7537 return 1; 7538 7539 /* If ndlp is not NULL, we will bump the reference count on it */ 7540 cmdsize = (sizeof(uint32_t) + sizeof(struct RRQ)); 7541 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, did, 7542 ELS_CMD_RRQ); 7543 if (!elsiocb) 7544 return 1; 7545 7546 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 7547 7548 /* For RRQ request, remainder of payload is Exchange IDs */ 7549 *((uint32_t *) (pcmd)) = ELS_CMD_RRQ; 7550 pcmd += sizeof(uint32_t); 7551 els_rrq = (struct RRQ *) pcmd; 7552 7553 bf_set(rrq_oxid, els_rrq, phba->sli4_hba.xri_ids[rrq->xritag]); 7554 bf_set(rrq_rxid, els_rrq, rrq->rxid); 7555 bf_set(rrq_did, els_rrq, vport->fc_myDID); 7556 els_rrq->rrq = cpu_to_be32(els_rrq->rrq); 7557 els_rrq->rrq_exchg = cpu_to_be32(els_rrq->rrq_exchg); 7558 7559 7560 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 7561 "Issue RRQ: did:x%x", 7562 did, rrq->xritag, rrq->rxid); 7563 elsiocb->context_un.rrq = rrq; 7564 elsiocb->iocb_cmpl = lpfc_cmpl_els_rrq; 7565 7566 lpfc_nlp_get(ndlp); 7567 elsiocb->context1 = ndlp; 7568 7569 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 7570 if (ret == IOCB_ERROR) 7571 goto io_err; 7572 return 0; 7573 7574 io_err: 7575 lpfc_els_free_iocb(phba, elsiocb); 7576 lpfc_nlp_put(ndlp); 7577 return 1; 7578 } 7579 7580 /** 7581 * lpfc_send_rrq - Sends ELS RRQ if needed. 7582 * @phba: pointer to lpfc hba data structure. 7583 * @rrq: pointer to the active rrq. 7584 * 7585 * This routine will call the lpfc_issue_els_rrq if the rrq is 7586 * still active for the xri. If this function returns a failure then 7587 * the caller needs to clean up the RRQ by calling lpfc_clr_active_rrq. 7588 * 7589 * Returns 0 Success. 7590 * 1 Failure. 7591 **/ 7592 int 7593 lpfc_send_rrq(struct lpfc_hba *phba, struct lpfc_node_rrq *rrq) 7594 { 7595 struct lpfc_nodelist *ndlp = lpfc_findnode_did(rrq->vport, 7596 rrq->nlp_DID); 7597 if (!ndlp) 7598 return 1; 7599 7600 if (lpfc_test_rrq_active(phba, ndlp, rrq->xritag)) 7601 return lpfc_issue_els_rrq(rrq->vport, ndlp, 7602 rrq->nlp_DID, rrq); 7603 else 7604 return 1; 7605 } 7606 7607 /** 7608 * lpfc_els_rsp_rpl_acc - Issue an accept rpl els command 7609 * @vport: pointer to a host virtual N_Port data structure. 7610 * @cmdsize: size of the ELS command. 7611 * @oldiocb: pointer to the original lpfc command iocb data structure. 7612 * @ndlp: pointer to a node-list data structure. 7613 * 7614 * This routine issuees an Accept (ACC) Read Port List (RPL) ELS command. 7615 * It is to be called by the lpfc_els_rcv_rpl() routine to accept the RPL. 7616 * 7617 * Note that the ndlp reference count will be incremented by 1 for holding the 7618 * ndlp and the reference to ndlp will be stored into the context1 field of 7619 * the IOCB for the completion callback function to the RPL Accept Response 7620 * ELS command. 7621 * 7622 * Return code 7623 * 0 - Successfully issued ACC RPL ELS command 7624 * 1 - Failed to issue ACC RPL ELS command 7625 **/ 7626 static int 7627 lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize, 7628 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) 7629 { 7630 int rc = 0; 7631 struct lpfc_hba *phba = vport->phba; 7632 IOCB_t *icmd, *oldcmd; 7633 RPL_RSP rpl_rsp; 7634 struct lpfc_iocbq *elsiocb; 7635 uint8_t *pcmd; 7636 7637 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 7638 ndlp->nlp_DID, ELS_CMD_ACC); 7639 7640 if (!elsiocb) 7641 return 1; 7642 7643 icmd = &elsiocb->iocb; 7644 oldcmd = &oldiocb->iocb; 7645 icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ 7646 icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; 7647 7648 pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 7649 *((uint32_t *) (pcmd)) = ELS_CMD_ACC; 7650 pcmd += sizeof(uint16_t); 7651 *((uint16_t *)(pcmd)) = be16_to_cpu(cmdsize); 7652 pcmd += sizeof(uint16_t); 7653 7654 /* Setup the RPL ACC payload */ 7655 rpl_rsp.listLen = be32_to_cpu(1); 7656 rpl_rsp.index = 0; 7657 rpl_rsp.port_num_blk.portNum = 0; 7658 rpl_rsp.port_num_blk.portID = be32_to_cpu(vport->fc_myDID); 7659 memcpy(&rpl_rsp.port_num_blk.portName, &vport->fc_portname, 7660 sizeof(struct lpfc_name)); 7661 memcpy(pcmd, &rpl_rsp, cmdsize - sizeof(uint32_t)); 7662 /* Xmit ELS RPL ACC response tag <ulpIoTag> */ 7663 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7664 "0120 Xmit ELS RPL ACC response tag x%x " 7665 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, " 7666 "rpi x%x\n", 7667 elsiocb->iotag, elsiocb->iocb.ulpContext, 7668 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, 7669 ndlp->nlp_rpi); 7670 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 7671 phba->fc_stat.elsXmitACC++; 7672 elsiocb->context1 = lpfc_nlp_get(ndlp); 7673 if (!elsiocb->context1) { 7674 lpfc_els_free_iocb(phba, elsiocb); 7675 return 1; 7676 } 7677 7678 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 7679 if (rc == IOCB_ERROR) { 7680 lpfc_els_free_iocb(phba, elsiocb); 7681 lpfc_nlp_put(ndlp); 7682 return 1; 7683 } 7684 7685 return 0; 7686 } 7687 7688 /** 7689 * lpfc_els_rcv_rpl - Process an unsolicited rpl iocb 7690 * @vport: pointer to a host virtual N_Port data structure. 7691 * @cmdiocb: pointer to lpfc command iocb data structure. 7692 * @ndlp: pointer to a node-list data structure. 7693 * 7694 * This routine processes Read Port List (RPL) IOCB received as an ELS 7695 * unsolicited event. It first checks the remote port state. If the remote 7696 * port is not in NLP_STE_UNMAPPED_NODE and NLP_STE_MAPPED_NODE states, it 7697 * invokes the lpfc_els_rsp_reject() routine to send reject response. 7698 * Otherwise, this routine then invokes the lpfc_els_rsp_rpl_acc() routine 7699 * to accept the RPL. 7700 * 7701 * Return code 7702 * 0 - Successfully processed rpl iocb (currently always return 0) 7703 **/ 7704 static int 7705 lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7706 struct lpfc_nodelist *ndlp) 7707 { 7708 struct lpfc_dmabuf *pcmd; 7709 uint32_t *lp; 7710 uint32_t maxsize; 7711 uint16_t cmdsize; 7712 RPL *rpl; 7713 struct ls_rjt stat; 7714 7715 if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 7716 (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) { 7717 /* issue rejection response */ 7718 stat.un.b.lsRjtRsvd0 = 0; 7719 stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 7720 stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; 7721 stat.un.b.vendorUnique = 0; 7722 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, 7723 NULL); 7724 /* rejected the unsolicited RPL request and done with it */ 7725 return 0; 7726 } 7727 7728 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 7729 lp = (uint32_t *) pcmd->virt; 7730 rpl = (RPL *) (lp + 1); 7731 maxsize = be32_to_cpu(rpl->maxsize); 7732 7733 /* We support only one port */ 7734 if ((rpl->index == 0) && 7735 ((maxsize == 0) || 7736 ((maxsize * sizeof(uint32_t)) >= sizeof(RPL_RSP)))) { 7737 cmdsize = sizeof(uint32_t) + sizeof(RPL_RSP); 7738 } else { 7739 cmdsize = sizeof(uint32_t) + maxsize * sizeof(uint32_t); 7740 } 7741 lpfc_els_rsp_rpl_acc(vport, cmdsize, cmdiocb, ndlp); 7742 7743 return 0; 7744 } 7745 7746 /** 7747 * lpfc_els_rcv_farp - Process an unsolicited farp request els command 7748 * @vport: pointer to a virtual N_Port data structure. 7749 * @cmdiocb: pointer to lpfc command iocb data structure. 7750 * @ndlp: pointer to a node-list data structure. 7751 * 7752 * This routine processes Fibre Channel Address Resolution Protocol 7753 * (FARP) Request IOCB received as an ELS unsolicited event. Currently, 7754 * the lpfc driver only supports matching on WWPN or WWNN for FARP. As such, 7755 * FARP_MATCH_PORT flag and FARP_MATCH_NODE flag are checked against the 7756 * Match Flag in the FARP request IOCB: if FARP_MATCH_PORT flag is set, the 7757 * remote PortName is compared against the FC PortName stored in the @vport 7758 * data structure; if FARP_MATCH_NODE flag is set, the remote NodeName is 7759 * compared against the FC NodeName stored in the @vport data structure. 7760 * If any of these matches and the FARP_REQUEST_FARPR flag is set in the 7761 * FARP request IOCB Response Flag, the lpfc_issue_els_farpr() routine is 7762 * invoked to send out FARP Response to the remote node. Before sending the 7763 * FARP Response, however, the FARP_REQUEST_PLOGI flag is check in the FARP 7764 * request IOCB Response Flag and, if it is set, the lpfc_issue_els_plogi() 7765 * routine is invoked to log into the remote port first. 7766 * 7767 * Return code 7768 * 0 - Either the FARP Match Mode not supported or successfully processed 7769 **/ 7770 static int 7771 lpfc_els_rcv_farp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7772 struct lpfc_nodelist *ndlp) 7773 { 7774 struct lpfc_dmabuf *pcmd; 7775 uint32_t *lp; 7776 IOCB_t *icmd; 7777 FARP *fp; 7778 uint32_t cnt, did; 7779 7780 icmd = &cmdiocb->iocb; 7781 did = icmd->un.elsreq64.remoteID; 7782 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 7783 lp = (uint32_t *) pcmd->virt; 7784 7785 lp++; 7786 fp = (FARP *) lp; 7787 /* FARP-REQ received from DID <did> */ 7788 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7789 "0601 FARP-REQ received from DID x%x\n", did); 7790 /* We will only support match on WWPN or WWNN */ 7791 if (fp->Mflags & ~(FARP_MATCH_NODE | FARP_MATCH_PORT)) { 7792 return 0; 7793 } 7794 7795 cnt = 0; 7796 /* If this FARP command is searching for my portname */ 7797 if (fp->Mflags & FARP_MATCH_PORT) { 7798 if (memcmp(&fp->RportName, &vport->fc_portname, 7799 sizeof(struct lpfc_name)) == 0) 7800 cnt = 1; 7801 } 7802 7803 /* If this FARP command is searching for my nodename */ 7804 if (fp->Mflags & FARP_MATCH_NODE) { 7805 if (memcmp(&fp->RnodeName, &vport->fc_nodename, 7806 sizeof(struct lpfc_name)) == 0) 7807 cnt = 1; 7808 } 7809 7810 if (cnt) { 7811 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) || 7812 (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) { 7813 /* Log back into the node before sending the FARP. */ 7814 if (fp->Rflags & FARP_REQUEST_PLOGI) { 7815 ndlp->nlp_prev_state = ndlp->nlp_state; 7816 lpfc_nlp_set_state(vport, ndlp, 7817 NLP_STE_PLOGI_ISSUE); 7818 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 7819 } 7820 7821 /* Send a FARP response to that node */ 7822 if (fp->Rflags & FARP_REQUEST_FARPR) 7823 lpfc_issue_els_farpr(vport, did, 0); 7824 } 7825 } 7826 return 0; 7827 } 7828 7829 /** 7830 * lpfc_els_rcv_farpr - Process an unsolicited farp response iocb 7831 * @vport: pointer to a host virtual N_Port data structure. 7832 * @cmdiocb: pointer to lpfc command iocb data structure. 7833 * @ndlp: pointer to a node-list data structure. 7834 * 7835 * This routine processes Fibre Channel Address Resolution Protocol 7836 * Response (FARPR) IOCB received as an ELS unsolicited event. It simply 7837 * invokes the lpfc_els_rsp_acc() routine to the remote node to accept 7838 * the FARP response request. 7839 * 7840 * Return code 7841 * 0 - Successfully processed FARPR IOCB (currently always return 0) 7842 **/ 7843 static int 7844 lpfc_els_rcv_farpr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7845 struct lpfc_nodelist *ndlp) 7846 { 7847 struct lpfc_dmabuf *pcmd; 7848 uint32_t *lp; 7849 IOCB_t *icmd; 7850 uint32_t did; 7851 7852 icmd = &cmdiocb->iocb; 7853 did = icmd->un.elsreq64.remoteID; 7854 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 7855 lp = (uint32_t *) pcmd->virt; 7856 7857 lp++; 7858 /* FARP-RSP received from DID <did> */ 7859 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7860 "0600 FARP-RSP received from DID x%x\n", did); 7861 /* ACCEPT the Farp resp request */ 7862 lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); 7863 7864 return 0; 7865 } 7866 7867 /** 7868 * lpfc_els_rcv_fan - Process an unsolicited fan iocb command 7869 * @vport: pointer to a host virtual N_Port data structure. 7870 * @cmdiocb: pointer to lpfc command iocb data structure. 7871 * @fan_ndlp: pointer to a node-list data structure. 7872 * 7873 * This routine processes a Fabric Address Notification (FAN) IOCB 7874 * command received as an ELS unsolicited event. The FAN ELS command will 7875 * only be processed on a physical port (i.e., the @vport represents the 7876 * physical port). The fabric NodeName and PortName from the FAN IOCB are 7877 * compared against those in the phba data structure. If any of those is 7878 * different, the lpfc_initial_flogi() routine is invoked to initialize 7879 * Fabric Login (FLOGI) to the fabric to start the discover over. Otherwise, 7880 * if both of those are identical, the lpfc_issue_fabric_reglogin() routine 7881 * is invoked to register login to the fabric. 7882 * 7883 * Return code 7884 * 0 - Successfully processed fan iocb (currently always return 0). 7885 **/ 7886 static int 7887 lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, 7888 struct lpfc_nodelist *fan_ndlp) 7889 { 7890 struct lpfc_hba *phba = vport->phba; 7891 uint32_t *lp; 7892 FAN *fp; 7893 7894 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0265 FAN received\n"); 7895 lp = (uint32_t *)((struct lpfc_dmabuf *)cmdiocb->context2)->virt; 7896 fp = (FAN *) ++lp; 7897 /* FAN received; Fan does not have a reply sequence */ 7898 if ((vport == phba->pport) && 7899 (vport->port_state == LPFC_LOCAL_CFG_LINK)) { 7900 if ((memcmp(&phba->fc_fabparam.nodeName, &fp->FnodeName, 7901 sizeof(struct lpfc_name))) || 7902 (memcmp(&phba->fc_fabparam.portName, &fp->FportName, 7903 sizeof(struct lpfc_name)))) { 7904 /* This port has switched fabrics. FLOGI is required */ 7905 lpfc_issue_init_vfi(vport); 7906 } else { 7907 /* FAN verified - skip FLOGI */ 7908 vport->fc_myDID = vport->fc_prevDID; 7909 if (phba->sli_rev < LPFC_SLI_REV4) 7910 lpfc_issue_fabric_reglogin(vport); 7911 else { 7912 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 7913 "3138 Need register VFI: (x%x/%x)\n", 7914 vport->fc_prevDID, vport->fc_myDID); 7915 lpfc_issue_reg_vfi(vport); 7916 } 7917 } 7918 } 7919 return 0; 7920 } 7921 7922 /** 7923 * lpfc_els_timeout - Handler funciton to the els timer 7924 * @t: timer context used to obtain the vport. 7925 * 7926 * This routine is invoked by the ELS timer after timeout. It posts the ELS 7927 * timer timeout event by setting the WORKER_ELS_TMO bit to the work port 7928 * event bitmap and then invokes the lpfc_worker_wake_up() routine to wake 7929 * up the worker thread. It is for the worker thread to invoke the routine 7930 * lpfc_els_timeout_handler() to work on the posted event WORKER_ELS_TMO. 7931 **/ 7932 void 7933 lpfc_els_timeout(struct timer_list *t) 7934 { 7935 struct lpfc_vport *vport = from_timer(vport, t, els_tmofunc); 7936 struct lpfc_hba *phba = vport->phba; 7937 uint32_t tmo_posted; 7938 unsigned long iflag; 7939 7940 spin_lock_irqsave(&vport->work_port_lock, iflag); 7941 tmo_posted = vport->work_port_events & WORKER_ELS_TMO; 7942 if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING))) 7943 vport->work_port_events |= WORKER_ELS_TMO; 7944 spin_unlock_irqrestore(&vport->work_port_lock, iflag); 7945 7946 if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING))) 7947 lpfc_worker_wake_up(phba); 7948 return; 7949 } 7950 7951 7952 /** 7953 * lpfc_els_timeout_handler - Process an els timeout event 7954 * @vport: pointer to a virtual N_Port data structure. 7955 * 7956 * This routine is the actual handler function that processes an ELS timeout 7957 * event. It walks the ELS ring to get and abort all the IOCBs (except the 7958 * ABORT/CLOSE/FARP/FARPR/FDISC), which are associated with the @vport by 7959 * invoking the lpfc_sli_issue_abort_iotag() routine. 7960 **/ 7961 void 7962 lpfc_els_timeout_handler(struct lpfc_vport *vport) 7963 { 7964 struct lpfc_hba *phba = vport->phba; 7965 struct lpfc_sli_ring *pring; 7966 struct lpfc_iocbq *tmp_iocb, *piocb; 7967 IOCB_t *cmd = NULL; 7968 struct lpfc_dmabuf *pcmd; 7969 uint32_t els_command = 0; 7970 uint32_t timeout; 7971 uint32_t remote_ID = 0xffffffff; 7972 LIST_HEAD(abort_list); 7973 7974 7975 timeout = (uint32_t)(phba->fc_ratov << 1); 7976 7977 pring = lpfc_phba_elsring(phba); 7978 if (unlikely(!pring)) 7979 return; 7980 7981 if (phba->pport->load_flag & FC_UNLOADING) 7982 return; 7983 7984 spin_lock_irq(&phba->hbalock); 7985 if (phba->sli_rev == LPFC_SLI_REV4) 7986 spin_lock(&pring->ring_lock); 7987 7988 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { 7989 cmd = &piocb->iocb; 7990 7991 if ((piocb->iocb_flag & LPFC_IO_LIBDFC) != 0 || 7992 piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN || 7993 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN) 7994 continue; 7995 7996 if (piocb->vport != vport) 7997 continue; 7998 7999 pcmd = (struct lpfc_dmabuf *) piocb->context2; 8000 if (pcmd) 8001 els_command = *(uint32_t *) (pcmd->virt); 8002 8003 if (els_command == ELS_CMD_FARP || 8004 els_command == ELS_CMD_FARPR || 8005 els_command == ELS_CMD_FDISC) 8006 continue; 8007 8008 if (piocb->drvrTimeout > 0) { 8009 if (piocb->drvrTimeout >= timeout) 8010 piocb->drvrTimeout -= timeout; 8011 else 8012 piocb->drvrTimeout = 0; 8013 continue; 8014 } 8015 8016 remote_ID = 0xffffffff; 8017 if (cmd->ulpCommand != CMD_GEN_REQUEST64_CR) 8018 remote_ID = cmd->un.elsreq64.remoteID; 8019 else { 8020 struct lpfc_nodelist *ndlp; 8021 ndlp = __lpfc_findnode_rpi(vport, cmd->ulpContext); 8022 if (ndlp) 8023 remote_ID = ndlp->nlp_DID; 8024 } 8025 list_add_tail(&piocb->dlist, &abort_list); 8026 } 8027 if (phba->sli_rev == LPFC_SLI_REV4) 8028 spin_unlock(&pring->ring_lock); 8029 spin_unlock_irq(&phba->hbalock); 8030 8031 list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) { 8032 cmd = &piocb->iocb; 8033 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 8034 "0127 ELS timeout Data: x%x x%x x%x " 8035 "x%x\n", els_command, 8036 remote_ID, cmd->ulpCommand, cmd->ulpIoTag); 8037 spin_lock_irq(&phba->hbalock); 8038 list_del_init(&piocb->dlist); 8039 lpfc_sli_issue_abort_iotag(phba, pring, piocb, NULL); 8040 spin_unlock_irq(&phba->hbalock); 8041 } 8042 8043 /* Make sure HBA is alive */ 8044 lpfc_issue_hb_tmo(phba); 8045 8046 if (!list_empty(&pring->txcmplq)) 8047 if (!(phba->pport->load_flag & FC_UNLOADING)) 8048 mod_timer(&vport->els_tmofunc, 8049 jiffies + msecs_to_jiffies(1000 * timeout)); 8050 } 8051 8052 /** 8053 * lpfc_els_flush_cmd - Clean up the outstanding els commands to a vport 8054 * @vport: pointer to a host virtual N_Port data structure. 8055 * 8056 * This routine is used to clean up all the outstanding ELS commands on a 8057 * @vport. It first aborts the @vport by invoking lpfc_fabric_abort_vport() 8058 * routine. After that, it walks the ELS transmit queue to remove all the 8059 * IOCBs with the @vport other than the QUE_RING and ABORT/CLOSE IOCBs. For 8060 * the IOCBs with a non-NULL completion callback function, the callback 8061 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and 8062 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs with a NULL completion 8063 * callback function, the IOCB will simply be released. Finally, it walks 8064 * the ELS transmit completion queue to issue an abort IOCB to any transmit 8065 * completion queue IOCB that is associated with the @vport and is not 8066 * an IOCB from libdfc (i.e., the management plane IOCBs that are not 8067 * part of the discovery state machine) out to HBA by invoking the 8068 * lpfc_sli_issue_abort_iotag() routine. Note that this function issues the 8069 * abort IOCB to any transmit completion queueed IOCB, it does not guarantee 8070 * the IOCBs are aborted when this function returns. 8071 **/ 8072 void 8073 lpfc_els_flush_cmd(struct lpfc_vport *vport) 8074 { 8075 LIST_HEAD(abort_list); 8076 struct lpfc_hba *phba = vport->phba; 8077 struct lpfc_sli_ring *pring; 8078 struct lpfc_iocbq *tmp_iocb, *piocb; 8079 IOCB_t *cmd = NULL; 8080 unsigned long iflags = 0; 8081 8082 lpfc_fabric_abort_vport(vport); 8083 8084 /* 8085 * For SLI3, only the hbalock is required. But SLI4 needs to coordinate 8086 * with the ring insert operation. Because lpfc_sli_issue_abort_iotag 8087 * ultimately grabs the ring_lock, the driver must splice the list into 8088 * a working list and release the locks before calling the abort. 8089 */ 8090 spin_lock_irqsave(&phba->hbalock, iflags); 8091 pring = lpfc_phba_elsring(phba); 8092 8093 /* Bail out if we've no ELS wq, like in PCI error recovery case. */ 8094 if (unlikely(!pring)) { 8095 spin_unlock_irqrestore(&phba->hbalock, iflags); 8096 return; 8097 } 8098 8099 if (phba->sli_rev == LPFC_SLI_REV4) 8100 spin_lock(&pring->ring_lock); 8101 8102 /* First we need to issue aborts to outstanding cmds on txcmpl */ 8103 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { 8104 if (piocb->iocb_flag & LPFC_IO_LIBDFC) 8105 continue; 8106 8107 if (piocb->vport != vport) 8108 continue; 8109 8110 if (piocb->iocb_flag & LPFC_DRIVER_ABORTED) 8111 continue; 8112 8113 /* On the ELS ring we can have ELS_REQUESTs or 8114 * GEN_REQUESTs waiting for a response. 8115 */ 8116 cmd = &piocb->iocb; 8117 if (cmd->ulpCommand == CMD_ELS_REQUEST64_CR) { 8118 list_add_tail(&piocb->dlist, &abort_list); 8119 8120 /* If the link is down when flushing ELS commands 8121 * the firmware will not complete them till after 8122 * the link comes back up. This may confuse 8123 * discovery for the new link up, so we need to 8124 * change the compl routine to just clean up the iocb 8125 * and avoid any retry logic. 8126 */ 8127 if (phba->link_state == LPFC_LINK_DOWN) 8128 piocb->iocb_cmpl = lpfc_cmpl_els_link_down; 8129 } 8130 if (cmd->ulpCommand == CMD_GEN_REQUEST64_CR) 8131 list_add_tail(&piocb->dlist, &abort_list); 8132 } 8133 8134 if (phba->sli_rev == LPFC_SLI_REV4) 8135 spin_unlock(&pring->ring_lock); 8136 spin_unlock_irqrestore(&phba->hbalock, iflags); 8137 8138 /* Abort each txcmpl iocb on aborted list and remove the dlist links. */ 8139 list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) { 8140 spin_lock_irqsave(&phba->hbalock, iflags); 8141 list_del_init(&piocb->dlist); 8142 lpfc_sli_issue_abort_iotag(phba, pring, piocb, NULL); 8143 spin_unlock_irqrestore(&phba->hbalock, iflags); 8144 } 8145 /* Make sure HBA is alive */ 8146 lpfc_issue_hb_tmo(phba); 8147 8148 if (!list_empty(&abort_list)) 8149 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 8150 "3387 abort list for txq not empty\n"); 8151 INIT_LIST_HEAD(&abort_list); 8152 8153 spin_lock_irqsave(&phba->hbalock, iflags); 8154 if (phba->sli_rev == LPFC_SLI_REV4) 8155 spin_lock(&pring->ring_lock); 8156 8157 /* No need to abort the txq list, 8158 * just queue them up for lpfc_sli_cancel_iocbs 8159 */ 8160 list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) { 8161 cmd = &piocb->iocb; 8162 8163 if (piocb->iocb_flag & LPFC_IO_LIBDFC) { 8164 continue; 8165 } 8166 8167 /* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */ 8168 if (cmd->ulpCommand == CMD_QUE_RING_BUF_CN || 8169 cmd->ulpCommand == CMD_QUE_RING_BUF64_CN || 8170 cmd->ulpCommand == CMD_CLOSE_XRI_CN || 8171 cmd->ulpCommand == CMD_ABORT_XRI_CN) 8172 continue; 8173 8174 if (piocb->vport != vport) 8175 continue; 8176 8177 list_del_init(&piocb->list); 8178 list_add_tail(&piocb->list, &abort_list); 8179 } 8180 8181 /* The same holds true for any FLOGI/FDISC on the fabric_iocb_list */ 8182 if (vport == phba->pport) { 8183 list_for_each_entry_safe(piocb, tmp_iocb, 8184 &phba->fabric_iocb_list, list) { 8185 cmd = &piocb->iocb; 8186 list_del_init(&piocb->list); 8187 list_add_tail(&piocb->list, &abort_list); 8188 } 8189 } 8190 8191 if (phba->sli_rev == LPFC_SLI_REV4) 8192 spin_unlock(&pring->ring_lock); 8193 spin_unlock_irqrestore(&phba->hbalock, iflags); 8194 8195 /* Cancel all the IOCBs from the completions list */ 8196 lpfc_sli_cancel_iocbs(phba, &abort_list, 8197 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); 8198 8199 return; 8200 } 8201 8202 /** 8203 * lpfc_els_flush_all_cmd - Clean up all the outstanding els commands to a HBA 8204 * @phba: pointer to lpfc hba data structure. 8205 * 8206 * This routine is used to clean up all the outstanding ELS commands on a 8207 * @phba. It first aborts the @phba by invoking the lpfc_fabric_abort_hba() 8208 * routine. After that, it walks the ELS transmit queue to remove all the 8209 * IOCBs to the @phba other than the QUE_RING and ABORT/CLOSE IOCBs. For 8210 * the IOCBs with the completion callback function associated, the callback 8211 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and 8212 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs without the completion 8213 * callback function associated, the IOCB will simply be released. Finally, 8214 * it walks the ELS transmit completion queue to issue an abort IOCB to any 8215 * transmit completion queue IOCB that is not an IOCB from libdfc (i.e., the 8216 * management plane IOCBs that are not part of the discovery state machine) 8217 * out to HBA by invoking the lpfc_sli_issue_abort_iotag() routine. 8218 **/ 8219 void 8220 lpfc_els_flush_all_cmd(struct lpfc_hba *phba) 8221 { 8222 struct lpfc_vport *vport; 8223 8224 spin_lock_irq(&phba->port_list_lock); 8225 list_for_each_entry(vport, &phba->port_list, listentry) 8226 lpfc_els_flush_cmd(vport); 8227 spin_unlock_irq(&phba->port_list_lock); 8228 8229 return; 8230 } 8231 8232 /** 8233 * lpfc_send_els_failure_event - Posts an ELS command failure event 8234 * @phba: Pointer to hba context object. 8235 * @cmdiocbp: Pointer to command iocb which reported error. 8236 * @rspiocbp: Pointer to response iocb which reported error. 8237 * 8238 * This function sends an event when there is an ELS command 8239 * failure. 8240 **/ 8241 void 8242 lpfc_send_els_failure_event(struct lpfc_hba *phba, 8243 struct lpfc_iocbq *cmdiocbp, 8244 struct lpfc_iocbq *rspiocbp) 8245 { 8246 struct lpfc_vport *vport = cmdiocbp->vport; 8247 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 8248 struct lpfc_lsrjt_event lsrjt_event; 8249 struct lpfc_fabric_event_header fabric_event; 8250 struct ls_rjt stat; 8251 struct lpfc_nodelist *ndlp; 8252 uint32_t *pcmd; 8253 8254 ndlp = cmdiocbp->context1; 8255 if (!ndlp) 8256 return; 8257 8258 if (rspiocbp->iocb.ulpStatus == IOSTAT_LS_RJT) { 8259 lsrjt_event.header.event_type = FC_REG_ELS_EVENT; 8260 lsrjt_event.header.subcategory = LPFC_EVENT_LSRJT_RCV; 8261 memcpy(lsrjt_event.header.wwpn, &ndlp->nlp_portname, 8262 sizeof(struct lpfc_name)); 8263 memcpy(lsrjt_event.header.wwnn, &ndlp->nlp_nodename, 8264 sizeof(struct lpfc_name)); 8265 pcmd = (uint32_t *) (((struct lpfc_dmabuf *) 8266 cmdiocbp->context2)->virt); 8267 lsrjt_event.command = (pcmd != NULL) ? *pcmd : 0; 8268 stat.un.lsRjtError = be32_to_cpu(rspiocbp->iocb.un.ulpWord[4]); 8269 lsrjt_event.reason_code = stat.un.b.lsRjtRsnCode; 8270 lsrjt_event.explanation = stat.un.b.lsRjtRsnCodeExp; 8271 fc_host_post_vendor_event(shost, 8272 fc_get_event_number(), 8273 sizeof(lsrjt_event), 8274 (char *)&lsrjt_event, 8275 LPFC_NL_VENDOR_ID); 8276 return; 8277 } 8278 if ((rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY) || 8279 (rspiocbp->iocb.ulpStatus == IOSTAT_FABRIC_BSY)) { 8280 fabric_event.event_type = FC_REG_FABRIC_EVENT; 8281 if (rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY) 8282 fabric_event.subcategory = LPFC_EVENT_PORT_BUSY; 8283 else 8284 fabric_event.subcategory = LPFC_EVENT_FABRIC_BUSY; 8285 memcpy(fabric_event.wwpn, &ndlp->nlp_portname, 8286 sizeof(struct lpfc_name)); 8287 memcpy(fabric_event.wwnn, &ndlp->nlp_nodename, 8288 sizeof(struct lpfc_name)); 8289 fc_host_post_vendor_event(shost, 8290 fc_get_event_number(), 8291 sizeof(fabric_event), 8292 (char *)&fabric_event, 8293 LPFC_NL_VENDOR_ID); 8294 return; 8295 } 8296 8297 } 8298 8299 /** 8300 * lpfc_send_els_event - Posts unsolicited els event 8301 * @vport: Pointer to vport object. 8302 * @ndlp: Pointer FC node object. 8303 * @payload: ELS command code type. 8304 * 8305 * This function posts an event when there is an incoming 8306 * unsolicited ELS command. 8307 **/ 8308 static void 8309 lpfc_send_els_event(struct lpfc_vport *vport, 8310 struct lpfc_nodelist *ndlp, 8311 uint32_t *payload) 8312 { 8313 struct lpfc_els_event_header *els_data = NULL; 8314 struct lpfc_logo_event *logo_data = NULL; 8315 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 8316 8317 if (*payload == ELS_CMD_LOGO) { 8318 logo_data = kmalloc(sizeof(struct lpfc_logo_event), GFP_KERNEL); 8319 if (!logo_data) { 8320 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 8321 "0148 Failed to allocate memory " 8322 "for LOGO event\n"); 8323 return; 8324 } 8325 els_data = &logo_data->header; 8326 } else { 8327 els_data = kmalloc(sizeof(struct lpfc_els_event_header), 8328 GFP_KERNEL); 8329 if (!els_data) { 8330 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 8331 "0149 Failed to allocate memory " 8332 "for ELS event\n"); 8333 return; 8334 } 8335 } 8336 els_data->event_type = FC_REG_ELS_EVENT; 8337 switch (*payload) { 8338 case ELS_CMD_PLOGI: 8339 els_data->subcategory = LPFC_EVENT_PLOGI_RCV; 8340 break; 8341 case ELS_CMD_PRLO: 8342 els_data->subcategory = LPFC_EVENT_PRLO_RCV; 8343 break; 8344 case ELS_CMD_ADISC: 8345 els_data->subcategory = LPFC_EVENT_ADISC_RCV; 8346 break; 8347 case ELS_CMD_LOGO: 8348 els_data->subcategory = LPFC_EVENT_LOGO_RCV; 8349 /* Copy the WWPN in the LOGO payload */ 8350 memcpy(logo_data->logo_wwpn, &payload[2], 8351 sizeof(struct lpfc_name)); 8352 break; 8353 default: 8354 kfree(els_data); 8355 return; 8356 } 8357 memcpy(els_data->wwpn, &ndlp->nlp_portname, sizeof(struct lpfc_name)); 8358 memcpy(els_data->wwnn, &ndlp->nlp_nodename, sizeof(struct lpfc_name)); 8359 if (*payload == ELS_CMD_LOGO) { 8360 fc_host_post_vendor_event(shost, 8361 fc_get_event_number(), 8362 sizeof(struct lpfc_logo_event), 8363 (char *)logo_data, 8364 LPFC_NL_VENDOR_ID); 8365 kfree(logo_data); 8366 } else { 8367 fc_host_post_vendor_event(shost, 8368 fc_get_event_number(), 8369 sizeof(struct lpfc_els_event_header), 8370 (char *)els_data, 8371 LPFC_NL_VENDOR_ID); 8372 kfree(els_data); 8373 } 8374 8375 return; 8376 } 8377 8378 8379 DECLARE_ENUM2STR_LOOKUP(lpfc_get_tlv_dtag_nm, fc_ls_tlv_dtag, 8380 FC_LS_TLV_DTAG_INIT); 8381 8382 DECLARE_ENUM2STR_LOOKUP(lpfc_get_fpin_li_event_nm, fc_fpin_li_event_types, 8383 FC_FPIN_LI_EVT_TYPES_INIT); 8384 8385 /** 8386 * lpfc_els_rcv_fpin_li - Process an FPIN Link Integrity Event. 8387 * @vport: Pointer to vport object. 8388 * @tlv: Pointer to the Link Integrity Notification Descriptor. 8389 * 8390 * This function processes a link integrity FPIN event by 8391 * logging a message 8392 **/ 8393 static void 8394 lpfc_els_rcv_fpin_li(struct lpfc_vport *vport, struct fc_tlv_desc *tlv) 8395 { 8396 struct fc_fn_li_desc *li = (struct fc_fn_li_desc *)tlv; 8397 const char *li_evt_str; 8398 u32 li_evt; 8399 8400 li_evt = be16_to_cpu(li->event_type); 8401 li_evt_str = lpfc_get_fpin_li_event_nm(li_evt); 8402 8403 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 8404 "4680 FPIN Link Integrity %s (x%x) " 8405 "Detecting PN x%016llx Attached PN x%016llx " 8406 "Duration %d mSecs Count %d Port Cnt %d\n", 8407 li_evt_str, li_evt, 8408 be64_to_cpu(li->detecting_wwpn), 8409 be64_to_cpu(li->attached_wwpn), 8410 be32_to_cpu(li->event_threshold), 8411 be32_to_cpu(li->event_count), 8412 be32_to_cpu(li->pname_count)); 8413 } 8414 8415 static void 8416 lpfc_els_rcv_fpin(struct lpfc_vport *vport, struct fc_els_fpin *fpin, 8417 u32 fpin_length) 8418 { 8419 struct fc_tlv_desc *tlv; 8420 const char *dtag_nm; 8421 uint32_t desc_cnt = 0, bytes_remain; 8422 u32 dtag; 8423 8424 /* FPINs handled only if we are in the right discovery state */ 8425 if (vport->port_state < LPFC_DISC_AUTH) 8426 return; 8427 8428 /* make sure there is the full fpin header */ 8429 if (fpin_length < sizeof(struct fc_els_fpin)) 8430 return; 8431 8432 tlv = (struct fc_tlv_desc *)&fpin->fpin_desc[0]; 8433 bytes_remain = fpin_length - offsetof(struct fc_els_fpin, fpin_desc); 8434 bytes_remain = min_t(u32, bytes_remain, be32_to_cpu(fpin->desc_len)); 8435 8436 /* process each descriptor */ 8437 while (bytes_remain >= FC_TLV_DESC_HDR_SZ && 8438 bytes_remain >= FC_TLV_DESC_SZ_FROM_LENGTH(tlv)) { 8439 8440 dtag = be32_to_cpu(tlv->desc_tag); 8441 switch (dtag) { 8442 case ELS_DTAG_LNK_INTEGRITY: 8443 lpfc_els_rcv_fpin_li(vport, tlv); 8444 break; 8445 default: 8446 dtag_nm = lpfc_get_tlv_dtag_nm(dtag); 8447 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 8448 "4678 skipped FPIN descriptor[%d]: " 8449 "tag x%x (%s)\n", 8450 desc_cnt, dtag, dtag_nm); 8451 break; 8452 } 8453 8454 desc_cnt++; 8455 bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv); 8456 tlv = fc_tlv_next_desc(tlv); 8457 } 8458 8459 fc_host_fpin_rcv(lpfc_shost_from_vport(vport), fpin_length, 8460 (char *)fpin); 8461 } 8462 8463 /** 8464 * lpfc_els_unsol_buffer - Process an unsolicited event data buffer 8465 * @phba: pointer to lpfc hba data structure. 8466 * @pring: pointer to a SLI ring. 8467 * @vport: pointer to a host virtual N_Port data structure. 8468 * @elsiocb: pointer to lpfc els command iocb data structure. 8469 * 8470 * This routine is used for processing the IOCB associated with a unsolicited 8471 * event. It first determines whether there is an existing ndlp that matches 8472 * the DID from the unsolicited IOCB. If not, it will create a new one with 8473 * the DID from the unsolicited IOCB. The ELS command from the unsolicited 8474 * IOCB is then used to invoke the proper routine and to set up proper state 8475 * of the discovery state machine. 8476 **/ 8477 static void 8478 lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 8479 struct lpfc_vport *vport, struct lpfc_iocbq *elsiocb) 8480 { 8481 struct lpfc_nodelist *ndlp; 8482 struct ls_rjt stat; 8483 uint32_t *payload, payload_len; 8484 uint32_t cmd, did, newnode; 8485 uint8_t rjt_exp, rjt_err = 0, init_link = 0; 8486 IOCB_t *icmd = &elsiocb->iocb; 8487 LPFC_MBOXQ_t *mbox; 8488 8489 if (!vport || !(elsiocb->context2)) 8490 goto dropit; 8491 8492 newnode = 0; 8493 payload = ((struct lpfc_dmabuf *)elsiocb->context2)->virt; 8494 payload_len = elsiocb->iocb.unsli3.rcvsli3.acc_len; 8495 cmd = *payload; 8496 if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) == 0) 8497 lpfc_post_buffer(phba, pring, 1); 8498 8499 did = icmd->un.rcvels.remoteID; 8500 if (icmd->ulpStatus) { 8501 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8502 "RCV Unsol ELS: status:x%x/x%x did:x%x", 8503 icmd->ulpStatus, icmd->un.ulpWord[4], did); 8504 goto dropit; 8505 } 8506 8507 /* Check to see if link went down during discovery */ 8508 if (lpfc_els_chk_latt(vport)) 8509 goto dropit; 8510 8511 /* Ignore traffic received during vport shutdown. */ 8512 if (vport->load_flag & FC_UNLOADING) 8513 goto dropit; 8514 8515 /* If NPort discovery is delayed drop incoming ELS */ 8516 if ((vport->fc_flag & FC_DISC_DELAYED) && 8517 (cmd != ELS_CMD_PLOGI)) 8518 goto dropit; 8519 8520 ndlp = lpfc_findnode_did(vport, did); 8521 if (!ndlp) { 8522 /* Cannot find existing Fabric ndlp, so allocate a new one */ 8523 ndlp = lpfc_nlp_init(vport, did); 8524 if (!ndlp) 8525 goto dropit; 8526 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 8527 newnode = 1; 8528 if ((did & Fabric_DID_MASK) == Fabric_DID_MASK) 8529 ndlp->nlp_type |= NLP_FABRIC; 8530 } else if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) { 8531 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 8532 newnode = 1; 8533 } 8534 8535 phba->fc_stat.elsRcvFrame++; 8536 8537 /* 8538 * Do not process any unsolicited ELS commands 8539 * if the ndlp is in DEV_LOSS 8540 */ 8541 spin_lock_irq(&ndlp->lock); 8542 if (ndlp->nlp_flag & NLP_IN_DEV_LOSS) { 8543 spin_unlock_irq(&ndlp->lock); 8544 if (newnode) 8545 lpfc_nlp_put(ndlp); 8546 goto dropit; 8547 } 8548 spin_unlock_irq(&ndlp->lock); 8549 8550 elsiocb->context1 = lpfc_nlp_get(ndlp); 8551 if (!elsiocb->context1) 8552 goto dropit; 8553 elsiocb->vport = vport; 8554 8555 if ((cmd & ELS_CMD_MASK) == ELS_CMD_RSCN) { 8556 cmd &= ELS_CMD_MASK; 8557 } 8558 /* ELS command <elsCmd> received from NPORT <did> */ 8559 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 8560 "0112 ELS command x%x received from NPORT x%x " 8561 "refcnt %d Data: x%x x%x x%x x%x\n", 8562 cmd, did, kref_read(&ndlp->kref), vport->port_state, 8563 vport->fc_flag, vport->fc_myDID, vport->fc_prevDID); 8564 8565 /* reject till our FLOGI completes or PLOGI assigned DID via PT2PT */ 8566 if ((vport->port_state < LPFC_FABRIC_CFG_LINK) && 8567 (cmd != ELS_CMD_FLOGI) && 8568 !((cmd == ELS_CMD_PLOGI) && (vport->fc_flag & FC_PT2PT))) { 8569 rjt_err = LSRJT_LOGICAL_BSY; 8570 rjt_exp = LSEXP_NOTHING_MORE; 8571 goto lsrjt; 8572 } 8573 8574 switch (cmd) { 8575 case ELS_CMD_PLOGI: 8576 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8577 "RCV PLOGI: did:x%x/ste:x%x flg:x%x", 8578 did, vport->port_state, ndlp->nlp_flag); 8579 8580 phba->fc_stat.elsRcvPLOGI++; 8581 ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp); 8582 if (phba->sli_rev == LPFC_SLI_REV4 && 8583 (phba->pport->fc_flag & FC_PT2PT)) { 8584 vport->fc_prevDID = vport->fc_myDID; 8585 /* Our DID needs to be updated before registering 8586 * the vfi. This is done in lpfc_rcv_plogi but 8587 * that is called after the reg_vfi. 8588 */ 8589 vport->fc_myDID = elsiocb->iocb.un.rcvels.parmRo; 8590 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 8591 "3312 Remote port assigned DID x%x " 8592 "%x\n", vport->fc_myDID, 8593 vport->fc_prevDID); 8594 } 8595 8596 lpfc_send_els_event(vport, ndlp, payload); 8597 8598 /* If Nport discovery is delayed, reject PLOGIs */ 8599 if (vport->fc_flag & FC_DISC_DELAYED) { 8600 rjt_err = LSRJT_UNABLE_TPC; 8601 rjt_exp = LSEXP_NOTHING_MORE; 8602 break; 8603 } 8604 8605 if (vport->port_state < LPFC_DISC_AUTH) { 8606 if (!(phba->pport->fc_flag & FC_PT2PT) || 8607 (phba->pport->fc_flag & FC_PT2PT_PLOGI)) { 8608 rjt_err = LSRJT_UNABLE_TPC; 8609 rjt_exp = LSEXP_NOTHING_MORE; 8610 break; 8611 } 8612 } 8613 8614 spin_lock_irq(&ndlp->lock); 8615 ndlp->nlp_flag &= ~NLP_TARGET_REMOVE; 8616 spin_unlock_irq(&ndlp->lock); 8617 8618 lpfc_disc_state_machine(vport, ndlp, elsiocb, 8619 NLP_EVT_RCV_PLOGI); 8620 8621 break; 8622 case ELS_CMD_FLOGI: 8623 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8624 "RCV FLOGI: did:x%x/ste:x%x flg:x%x", 8625 did, vport->port_state, ndlp->nlp_flag); 8626 8627 phba->fc_stat.elsRcvFLOGI++; 8628 8629 /* If the driver believes fabric discovery is done and is ready, 8630 * bounce the link. There is some descrepancy. 8631 */ 8632 if (vport->port_state >= LPFC_LOCAL_CFG_LINK && 8633 vport->fc_flag & FC_PT2PT && 8634 vport->rcv_flogi_cnt >= 1) { 8635 rjt_err = LSRJT_LOGICAL_BSY; 8636 rjt_exp = LSEXP_NOTHING_MORE; 8637 init_link++; 8638 goto lsrjt; 8639 } 8640 8641 lpfc_els_rcv_flogi(vport, elsiocb, ndlp); 8642 if (newnode) 8643 lpfc_disc_state_machine(vport, ndlp, NULL, 8644 NLP_EVT_DEVICE_RM); 8645 break; 8646 case ELS_CMD_LOGO: 8647 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8648 "RCV LOGO: did:x%x/ste:x%x flg:x%x", 8649 did, vport->port_state, ndlp->nlp_flag); 8650 8651 phba->fc_stat.elsRcvLOGO++; 8652 lpfc_send_els_event(vport, ndlp, payload); 8653 if (vport->port_state < LPFC_DISC_AUTH) { 8654 rjt_err = LSRJT_UNABLE_TPC; 8655 rjt_exp = LSEXP_NOTHING_MORE; 8656 break; 8657 } 8658 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_LOGO); 8659 break; 8660 case ELS_CMD_PRLO: 8661 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8662 "RCV PRLO: did:x%x/ste:x%x flg:x%x", 8663 did, vport->port_state, ndlp->nlp_flag); 8664 8665 phba->fc_stat.elsRcvPRLO++; 8666 lpfc_send_els_event(vport, ndlp, payload); 8667 if (vport->port_state < LPFC_DISC_AUTH) { 8668 rjt_err = LSRJT_UNABLE_TPC; 8669 rjt_exp = LSEXP_NOTHING_MORE; 8670 break; 8671 } 8672 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLO); 8673 break; 8674 case ELS_CMD_LCB: 8675 phba->fc_stat.elsRcvLCB++; 8676 lpfc_els_rcv_lcb(vport, elsiocb, ndlp); 8677 break; 8678 case ELS_CMD_RDP: 8679 phba->fc_stat.elsRcvRDP++; 8680 lpfc_els_rcv_rdp(vport, elsiocb, ndlp); 8681 break; 8682 case ELS_CMD_RSCN: 8683 phba->fc_stat.elsRcvRSCN++; 8684 lpfc_els_rcv_rscn(vport, elsiocb, ndlp); 8685 if (newnode) 8686 lpfc_disc_state_machine(vport, ndlp, NULL, 8687 NLP_EVT_DEVICE_RM); 8688 break; 8689 case ELS_CMD_ADISC: 8690 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8691 "RCV ADISC: did:x%x/ste:x%x flg:x%x", 8692 did, vport->port_state, ndlp->nlp_flag); 8693 8694 lpfc_send_els_event(vport, ndlp, payload); 8695 phba->fc_stat.elsRcvADISC++; 8696 if (vport->port_state < LPFC_DISC_AUTH) { 8697 rjt_err = LSRJT_UNABLE_TPC; 8698 rjt_exp = LSEXP_NOTHING_MORE; 8699 break; 8700 } 8701 lpfc_disc_state_machine(vport, ndlp, elsiocb, 8702 NLP_EVT_RCV_ADISC); 8703 break; 8704 case ELS_CMD_PDISC: 8705 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8706 "RCV PDISC: did:x%x/ste:x%x flg:x%x", 8707 did, vport->port_state, ndlp->nlp_flag); 8708 8709 phba->fc_stat.elsRcvPDISC++; 8710 if (vport->port_state < LPFC_DISC_AUTH) { 8711 rjt_err = LSRJT_UNABLE_TPC; 8712 rjt_exp = LSEXP_NOTHING_MORE; 8713 break; 8714 } 8715 lpfc_disc_state_machine(vport, ndlp, elsiocb, 8716 NLP_EVT_RCV_PDISC); 8717 break; 8718 case ELS_CMD_FARPR: 8719 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8720 "RCV FARPR: did:x%x/ste:x%x flg:x%x", 8721 did, vport->port_state, ndlp->nlp_flag); 8722 8723 phba->fc_stat.elsRcvFARPR++; 8724 lpfc_els_rcv_farpr(vport, elsiocb, ndlp); 8725 break; 8726 case ELS_CMD_FARP: 8727 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8728 "RCV FARP: did:x%x/ste:x%x flg:x%x", 8729 did, vport->port_state, ndlp->nlp_flag); 8730 8731 phba->fc_stat.elsRcvFARP++; 8732 lpfc_els_rcv_farp(vport, elsiocb, ndlp); 8733 break; 8734 case ELS_CMD_FAN: 8735 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8736 "RCV FAN: did:x%x/ste:x%x flg:x%x", 8737 did, vport->port_state, ndlp->nlp_flag); 8738 8739 phba->fc_stat.elsRcvFAN++; 8740 lpfc_els_rcv_fan(vport, elsiocb, ndlp); 8741 break; 8742 case ELS_CMD_PRLI: 8743 case ELS_CMD_NVMEPRLI: 8744 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8745 "RCV PRLI: did:x%x/ste:x%x flg:x%x", 8746 did, vport->port_state, ndlp->nlp_flag); 8747 8748 phba->fc_stat.elsRcvPRLI++; 8749 if ((vport->port_state < LPFC_DISC_AUTH) && 8750 (vport->fc_flag & FC_FABRIC)) { 8751 rjt_err = LSRJT_UNABLE_TPC; 8752 rjt_exp = LSEXP_NOTHING_MORE; 8753 break; 8754 } 8755 lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLI); 8756 break; 8757 case ELS_CMD_LIRR: 8758 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8759 "RCV LIRR: did:x%x/ste:x%x flg:x%x", 8760 did, vport->port_state, ndlp->nlp_flag); 8761 8762 phba->fc_stat.elsRcvLIRR++; 8763 lpfc_els_rcv_lirr(vport, elsiocb, ndlp); 8764 if (newnode) 8765 lpfc_disc_state_machine(vport, ndlp, NULL, 8766 NLP_EVT_DEVICE_RM); 8767 break; 8768 case ELS_CMD_RLS: 8769 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8770 "RCV RLS: did:x%x/ste:x%x flg:x%x", 8771 did, vport->port_state, ndlp->nlp_flag); 8772 8773 phba->fc_stat.elsRcvRLS++; 8774 lpfc_els_rcv_rls(vport, elsiocb, ndlp); 8775 if (newnode) 8776 lpfc_disc_state_machine(vport, ndlp, NULL, 8777 NLP_EVT_DEVICE_RM); 8778 break; 8779 case ELS_CMD_RPL: 8780 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8781 "RCV RPL: did:x%x/ste:x%x flg:x%x", 8782 did, vport->port_state, ndlp->nlp_flag); 8783 8784 phba->fc_stat.elsRcvRPL++; 8785 lpfc_els_rcv_rpl(vport, elsiocb, ndlp); 8786 if (newnode) 8787 lpfc_disc_state_machine(vport, ndlp, NULL, 8788 NLP_EVT_DEVICE_RM); 8789 break; 8790 case ELS_CMD_RNID: 8791 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8792 "RCV RNID: did:x%x/ste:x%x flg:x%x", 8793 did, vport->port_state, ndlp->nlp_flag); 8794 8795 phba->fc_stat.elsRcvRNID++; 8796 lpfc_els_rcv_rnid(vport, elsiocb, ndlp); 8797 if (newnode) 8798 lpfc_disc_state_machine(vport, ndlp, NULL, 8799 NLP_EVT_DEVICE_RM); 8800 break; 8801 case ELS_CMD_RTV: 8802 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8803 "RCV RTV: did:x%x/ste:x%x flg:x%x", 8804 did, vport->port_state, ndlp->nlp_flag); 8805 phba->fc_stat.elsRcvRTV++; 8806 lpfc_els_rcv_rtv(vport, elsiocb, ndlp); 8807 if (newnode) 8808 lpfc_disc_state_machine(vport, ndlp, NULL, 8809 NLP_EVT_DEVICE_RM); 8810 break; 8811 case ELS_CMD_RRQ: 8812 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8813 "RCV RRQ: did:x%x/ste:x%x flg:x%x", 8814 did, vport->port_state, ndlp->nlp_flag); 8815 8816 phba->fc_stat.elsRcvRRQ++; 8817 lpfc_els_rcv_rrq(vport, elsiocb, ndlp); 8818 if (newnode) 8819 lpfc_disc_state_machine(vport, ndlp, NULL, 8820 NLP_EVT_DEVICE_RM); 8821 break; 8822 case ELS_CMD_ECHO: 8823 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8824 "RCV ECHO: did:x%x/ste:x%x flg:x%x", 8825 did, vport->port_state, ndlp->nlp_flag); 8826 8827 phba->fc_stat.elsRcvECHO++; 8828 lpfc_els_rcv_echo(vport, elsiocb, ndlp); 8829 if (newnode) 8830 lpfc_disc_state_machine(vport, ndlp, NULL, 8831 NLP_EVT_DEVICE_RM); 8832 break; 8833 case ELS_CMD_REC: 8834 /* receive this due to exchange closed */ 8835 rjt_err = LSRJT_UNABLE_TPC; 8836 rjt_exp = LSEXP_INVALID_OX_RX; 8837 break; 8838 case ELS_CMD_FPIN: 8839 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8840 "RCV FPIN: did:x%x/ste:x%x flg:x%x", 8841 did, vport->port_state, ndlp->nlp_flag); 8842 8843 lpfc_els_rcv_fpin(vport, (struct fc_els_fpin *)payload, 8844 payload_len); 8845 8846 /* There are no replies, so no rjt codes */ 8847 break; 8848 default: 8849 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, 8850 "RCV ELS cmd: cmd:x%x did:x%x/ste:x%x", 8851 cmd, did, vport->port_state); 8852 8853 /* Unsupported ELS command, reject */ 8854 rjt_err = LSRJT_CMD_UNSUPPORTED; 8855 rjt_exp = LSEXP_NOTHING_MORE; 8856 8857 /* Unknown ELS command <elsCmd> received from NPORT <did> */ 8858 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 8859 "0115 Unknown ELS command x%x " 8860 "received from NPORT x%x\n", cmd, did); 8861 if (newnode) 8862 lpfc_disc_state_machine(vport, ndlp, NULL, 8863 NLP_EVT_DEVICE_RM); 8864 break; 8865 } 8866 8867 lsrjt: 8868 /* check if need to LS_RJT received ELS cmd */ 8869 if (rjt_err) { 8870 memset(&stat, 0, sizeof(stat)); 8871 stat.un.b.lsRjtRsnCode = rjt_err; 8872 stat.un.b.lsRjtRsnCodeExp = rjt_exp; 8873 lpfc_els_rsp_reject(vport, stat.un.lsRjtError, elsiocb, ndlp, 8874 NULL); 8875 /* Remove the reference from above for new nodes. */ 8876 if (newnode) 8877 lpfc_disc_state_machine(vport, ndlp, NULL, 8878 NLP_EVT_DEVICE_RM); 8879 } 8880 8881 /* Release the reference on this elsiocb, not the ndlp. */ 8882 lpfc_nlp_put(elsiocb->context1); 8883 elsiocb->context1 = NULL; 8884 8885 /* Special case. Driver received an unsolicited command that 8886 * unsupportable given the driver's current state. Reset the 8887 * link and start over. 8888 */ 8889 if (init_link) { 8890 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 8891 if (!mbox) 8892 return; 8893 lpfc_linkdown(phba); 8894 lpfc_init_link(phba, mbox, 8895 phba->cfg_topology, 8896 phba->cfg_link_speed); 8897 mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0; 8898 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 8899 mbox->vport = vport; 8900 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) == 8901 MBX_NOT_FINISHED) 8902 mempool_free(mbox, phba->mbox_mem_pool); 8903 } 8904 8905 return; 8906 8907 dropit: 8908 if (vport && !(vport->load_flag & FC_UNLOADING)) 8909 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 8910 "0111 Dropping received ELS cmd " 8911 "Data: x%x x%x x%x\n", 8912 icmd->ulpStatus, icmd->un.ulpWord[4], icmd->ulpTimeout); 8913 phba->fc_stat.elsRcvDrop++; 8914 } 8915 8916 /** 8917 * lpfc_els_unsol_event - Process an unsolicited event from an els sli ring 8918 * @phba: pointer to lpfc hba data structure. 8919 * @pring: pointer to a SLI ring. 8920 * @elsiocb: pointer to lpfc els iocb data structure. 8921 * 8922 * This routine is used to process an unsolicited event received from a SLI 8923 * (Service Level Interface) ring. The actual processing of the data buffer 8924 * associated with the unsolicited event is done by invoking the routine 8925 * lpfc_els_unsol_buffer() after properly set up the iocb buffer from the 8926 * SLI ring on which the unsolicited event was received. 8927 **/ 8928 void 8929 lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 8930 struct lpfc_iocbq *elsiocb) 8931 { 8932 struct lpfc_vport *vport = phba->pport; 8933 IOCB_t *icmd = &elsiocb->iocb; 8934 dma_addr_t paddr; 8935 struct lpfc_dmabuf *bdeBuf1 = elsiocb->context2; 8936 struct lpfc_dmabuf *bdeBuf2 = elsiocb->context3; 8937 8938 elsiocb->context1 = NULL; 8939 elsiocb->context2 = NULL; 8940 elsiocb->context3 = NULL; 8941 8942 if (icmd->ulpStatus == IOSTAT_NEED_BUFFER) { 8943 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); 8944 } else if (icmd->ulpStatus == IOSTAT_LOCAL_REJECT && 8945 (icmd->un.ulpWord[4] & IOERR_PARAM_MASK) == 8946 IOERR_RCV_BUFFER_WAITING) { 8947 phba->fc_stat.NoRcvBuf++; 8948 /* Not enough posted buffers; Try posting more buffers */ 8949 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) 8950 lpfc_post_buffer(phba, pring, 0); 8951 return; 8952 } 8953 8954 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 8955 (icmd->ulpCommand == CMD_IOCB_RCV_ELS64_CX || 8956 icmd->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) { 8957 if (icmd->unsli3.rcvsli3.vpi == 0xffff) 8958 vport = phba->pport; 8959 else 8960 vport = lpfc_find_vport_by_vpid(phba, 8961 icmd->unsli3.rcvsli3.vpi); 8962 } 8963 8964 /* If there are no BDEs associated 8965 * with this IOCB, there is nothing to do. 8966 */ 8967 if (icmd->ulpBdeCount == 0) 8968 return; 8969 8970 /* type of ELS cmd is first 32bit word 8971 * in packet 8972 */ 8973 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 8974 elsiocb->context2 = bdeBuf1; 8975 } else { 8976 paddr = getPaddr(icmd->un.cont64[0].addrHigh, 8977 icmd->un.cont64[0].addrLow); 8978 elsiocb->context2 = lpfc_sli_ringpostbuf_get(phba, pring, 8979 paddr); 8980 } 8981 8982 lpfc_els_unsol_buffer(phba, pring, vport, elsiocb); 8983 /* 8984 * The different unsolicited event handlers would tell us 8985 * if they are done with "mp" by setting context2 to NULL. 8986 */ 8987 if (elsiocb->context2) { 8988 lpfc_in_buf_free(phba, (struct lpfc_dmabuf *)elsiocb->context2); 8989 elsiocb->context2 = NULL; 8990 } 8991 8992 /* RCV_ELS64_CX provide for 2 BDEs - process 2nd if included */ 8993 if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) && 8994 icmd->ulpBdeCount == 2) { 8995 elsiocb->context2 = bdeBuf2; 8996 lpfc_els_unsol_buffer(phba, pring, vport, elsiocb); 8997 /* free mp if we are done with it */ 8998 if (elsiocb->context2) { 8999 lpfc_in_buf_free(phba, elsiocb->context2); 9000 elsiocb->context2 = NULL; 9001 } 9002 } 9003 } 9004 9005 static void 9006 lpfc_start_fdmi(struct lpfc_vport *vport) 9007 { 9008 struct lpfc_nodelist *ndlp; 9009 9010 /* If this is the first time, allocate an ndlp and initialize 9011 * it. Otherwise, make sure the node is enabled and then do the 9012 * login. 9013 */ 9014 ndlp = lpfc_findnode_did(vport, FDMI_DID); 9015 if (!ndlp) { 9016 ndlp = lpfc_nlp_init(vport, FDMI_DID); 9017 if (ndlp) { 9018 ndlp->nlp_type |= NLP_FABRIC; 9019 } else { 9020 return; 9021 } 9022 } 9023 9024 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 9025 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 9026 } 9027 9028 /** 9029 * lpfc_do_scr_ns_plogi - Issue a plogi to the name server for scr 9030 * @phba: pointer to lpfc hba data structure. 9031 * @vport: pointer to a virtual N_Port data structure. 9032 * 9033 * This routine issues a Port Login (PLOGI) to the Name Server with 9034 * State Change Request (SCR) for a @vport. This routine will create an 9035 * ndlp for the Name Server associated to the @vport if such node does 9036 * not already exist. The PLOGI to Name Server is issued by invoking the 9037 * lpfc_issue_els_plogi() routine. If Fabric-Device Management Interface 9038 * (FDMI) is configured to the @vport, a FDMI node will be created and 9039 * the PLOGI to FDMI is issued by invoking lpfc_issue_els_plogi() routine. 9040 **/ 9041 void 9042 lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport) 9043 { 9044 struct lpfc_nodelist *ndlp; 9045 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 9046 9047 /* 9048 * If lpfc_delay_discovery parameter is set and the clean address 9049 * bit is cleared and fc fabric parameters chenged, delay FC NPort 9050 * discovery. 9051 */ 9052 spin_lock_irq(shost->host_lock); 9053 if (vport->fc_flag & FC_DISC_DELAYED) { 9054 spin_unlock_irq(shost->host_lock); 9055 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9056 "3334 Delay fc port discovery for %d secs\n", 9057 phba->fc_ratov); 9058 mod_timer(&vport->delayed_disc_tmo, 9059 jiffies + msecs_to_jiffies(1000 * phba->fc_ratov)); 9060 return; 9061 } 9062 spin_unlock_irq(shost->host_lock); 9063 9064 ndlp = lpfc_findnode_did(vport, NameServer_DID); 9065 if (!ndlp) { 9066 ndlp = lpfc_nlp_init(vport, NameServer_DID); 9067 if (!ndlp) { 9068 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 9069 lpfc_disc_start(vport); 9070 return; 9071 } 9072 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 9073 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9074 "0251 NameServer login: no memory\n"); 9075 return; 9076 } 9077 } 9078 9079 ndlp->nlp_type |= NLP_FABRIC; 9080 9081 lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); 9082 9083 if (lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0)) { 9084 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 9085 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9086 "0252 Cannot issue NameServer login\n"); 9087 return; 9088 } 9089 9090 if ((phba->cfg_enable_SmartSAN || 9091 (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) && 9092 (vport->load_flag & FC_ALLOW_FDMI)) 9093 lpfc_start_fdmi(vport); 9094 } 9095 9096 /** 9097 * lpfc_cmpl_reg_new_vport - Completion callback function to register new vport 9098 * @phba: pointer to lpfc hba data structure. 9099 * @pmb: pointer to the driver internal queue element for mailbox command. 9100 * 9101 * This routine is the completion callback function to register new vport 9102 * mailbox command. If the new vport mailbox command completes successfully, 9103 * the fabric registration login shall be performed on physical port (the 9104 * new vport created is actually a physical port, with VPI 0) or the port 9105 * login to Name Server for State Change Request (SCR) will be performed 9106 * on virtual port (real virtual port, with VPI greater than 0). 9107 **/ 9108 static void 9109 lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 9110 { 9111 struct lpfc_vport *vport = pmb->vport; 9112 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 9113 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; 9114 MAILBOX_t *mb = &pmb->u.mb; 9115 int rc; 9116 9117 spin_lock_irq(shost->host_lock); 9118 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 9119 spin_unlock_irq(shost->host_lock); 9120 9121 if (mb->mbxStatus) { 9122 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9123 "0915 Register VPI failed : Status: x%x" 9124 " upd bit: x%x \n", mb->mbxStatus, 9125 mb->un.varRegVpi.upd); 9126 if (phba->sli_rev == LPFC_SLI_REV4 && 9127 mb->un.varRegVpi.upd) 9128 goto mbox_err_exit ; 9129 9130 switch (mb->mbxStatus) { 9131 case 0x11: /* unsupported feature */ 9132 case 0x9603: /* max_vpi exceeded */ 9133 case 0x9602: /* Link event since CLEAR_LA */ 9134 /* giving up on vport registration */ 9135 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 9136 spin_lock_irq(shost->host_lock); 9137 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 9138 spin_unlock_irq(shost->host_lock); 9139 lpfc_can_disctmo(vport); 9140 break; 9141 /* If reg_vpi fail with invalid VPI status, re-init VPI */ 9142 case 0x20: 9143 spin_lock_irq(shost->host_lock); 9144 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 9145 spin_unlock_irq(shost->host_lock); 9146 lpfc_init_vpi(phba, pmb, vport->vpi); 9147 pmb->vport = vport; 9148 pmb->mbox_cmpl = lpfc_init_vpi_cmpl; 9149 rc = lpfc_sli_issue_mbox(phba, pmb, 9150 MBX_NOWAIT); 9151 if (rc == MBX_NOT_FINISHED) { 9152 lpfc_printf_vlog(vport, KERN_ERR, 9153 LOG_TRACE_EVENT, 9154 "2732 Failed to issue INIT_VPI" 9155 " mailbox command\n"); 9156 } else { 9157 lpfc_nlp_put(ndlp); 9158 return; 9159 } 9160 fallthrough; 9161 default: 9162 /* Try to recover from this error */ 9163 if (phba->sli_rev == LPFC_SLI_REV4) 9164 lpfc_sli4_unreg_all_rpis(vport); 9165 lpfc_mbx_unreg_vpi(vport); 9166 spin_lock_irq(shost->host_lock); 9167 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 9168 spin_unlock_irq(shost->host_lock); 9169 if (mb->mbxStatus == MBX_NOT_FINISHED) 9170 break; 9171 if ((vport->port_type == LPFC_PHYSICAL_PORT) && 9172 !(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG)) { 9173 if (phba->sli_rev == LPFC_SLI_REV4) 9174 lpfc_issue_init_vfi(vport); 9175 else 9176 lpfc_initial_flogi(vport); 9177 } else { 9178 lpfc_initial_fdisc(vport); 9179 } 9180 break; 9181 } 9182 } else { 9183 spin_lock_irq(shost->host_lock); 9184 vport->vpi_state |= LPFC_VPI_REGISTERED; 9185 spin_unlock_irq(shost->host_lock); 9186 if (vport == phba->pport) { 9187 if (phba->sli_rev < LPFC_SLI_REV4) 9188 lpfc_issue_fabric_reglogin(vport); 9189 else { 9190 /* 9191 * If the physical port is instantiated using 9192 * FDISC, do not start vport discovery. 9193 */ 9194 if (vport->port_state != LPFC_FDISC) 9195 lpfc_start_fdiscs(phba); 9196 lpfc_do_scr_ns_plogi(phba, vport); 9197 } 9198 } else { 9199 lpfc_do_scr_ns_plogi(phba, vport); 9200 } 9201 } 9202 mbox_err_exit: 9203 /* Now, we decrement the ndlp reference count held for this 9204 * callback function 9205 */ 9206 lpfc_nlp_put(ndlp); 9207 9208 mempool_free(pmb, phba->mbox_mem_pool); 9209 return; 9210 } 9211 9212 /** 9213 * lpfc_register_new_vport - Register a new vport with a HBA 9214 * @phba: pointer to lpfc hba data structure. 9215 * @vport: pointer to a host virtual N_Port data structure. 9216 * @ndlp: pointer to a node-list data structure. 9217 * 9218 * This routine registers the @vport as a new virtual port with a HBA. 9219 * It is done through a registering vpi mailbox command. 9220 **/ 9221 void 9222 lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport, 9223 struct lpfc_nodelist *ndlp) 9224 { 9225 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 9226 LPFC_MBOXQ_t *mbox; 9227 9228 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 9229 if (mbox) { 9230 lpfc_reg_vpi(vport, mbox); 9231 mbox->vport = vport; 9232 mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 9233 if (!mbox->ctx_ndlp) { 9234 mempool_free(mbox, phba->mbox_mem_pool); 9235 goto mbox_err_exit; 9236 } 9237 9238 mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport; 9239 if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) 9240 == MBX_NOT_FINISHED) { 9241 /* mailbox command not success, decrement ndlp 9242 * reference count for this command 9243 */ 9244 lpfc_nlp_put(ndlp); 9245 mempool_free(mbox, phba->mbox_mem_pool); 9246 9247 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9248 "0253 Register VPI: Can't send mbox\n"); 9249 goto mbox_err_exit; 9250 } 9251 } else { 9252 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9253 "0254 Register VPI: no memory\n"); 9254 goto mbox_err_exit; 9255 } 9256 return; 9257 9258 mbox_err_exit: 9259 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 9260 spin_lock_irq(shost->host_lock); 9261 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 9262 spin_unlock_irq(shost->host_lock); 9263 return; 9264 } 9265 9266 /** 9267 * lpfc_cancel_all_vport_retry_delay_timer - Cancel all vport retry delay timer 9268 * @phba: pointer to lpfc hba data structure. 9269 * 9270 * This routine cancels the retry delay timers to all the vports. 9271 **/ 9272 void 9273 lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *phba) 9274 { 9275 struct lpfc_vport **vports; 9276 struct lpfc_nodelist *ndlp; 9277 uint32_t link_state; 9278 int i; 9279 9280 /* Treat this failure as linkdown for all vports */ 9281 link_state = phba->link_state; 9282 lpfc_linkdown(phba); 9283 phba->link_state = link_state; 9284 9285 vports = lpfc_create_vport_work_array(phba); 9286 9287 if (vports) { 9288 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 9289 ndlp = lpfc_findnode_did(vports[i], Fabric_DID); 9290 if (ndlp) 9291 lpfc_cancel_retry_delay_tmo(vports[i], ndlp); 9292 lpfc_els_flush_cmd(vports[i]); 9293 } 9294 lpfc_destroy_vport_work_array(phba, vports); 9295 } 9296 } 9297 9298 /** 9299 * lpfc_retry_pport_discovery - Start timer to retry FLOGI. 9300 * @phba: pointer to lpfc hba data structure. 9301 * 9302 * This routine abort all pending discovery commands and 9303 * start a timer to retry FLOGI for the physical port 9304 * discovery. 9305 **/ 9306 void 9307 lpfc_retry_pport_discovery(struct lpfc_hba *phba) 9308 { 9309 struct lpfc_nodelist *ndlp; 9310 9311 /* Cancel the all vports retry delay retry timers */ 9312 lpfc_cancel_all_vport_retry_delay_timer(phba); 9313 9314 /* If fabric require FLOGI, then re-instantiate physical login */ 9315 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); 9316 if (!ndlp) 9317 return; 9318 9319 mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000)); 9320 spin_lock_irq(&ndlp->lock); 9321 ndlp->nlp_flag |= NLP_DELAY_TMO; 9322 spin_unlock_irq(&ndlp->lock); 9323 ndlp->nlp_last_elscmd = ELS_CMD_FLOGI; 9324 phba->pport->port_state = LPFC_FLOGI; 9325 return; 9326 } 9327 9328 /** 9329 * lpfc_fabric_login_reqd - Check if FLOGI required. 9330 * @phba: pointer to lpfc hba data structure. 9331 * @cmdiocb: pointer to FDISC command iocb. 9332 * @rspiocb: pointer to FDISC response iocb. 9333 * 9334 * This routine checks if a FLOGI is reguired for FDISC 9335 * to succeed. 9336 **/ 9337 static int 9338 lpfc_fabric_login_reqd(struct lpfc_hba *phba, 9339 struct lpfc_iocbq *cmdiocb, 9340 struct lpfc_iocbq *rspiocb) 9341 { 9342 9343 if ((rspiocb->iocb.ulpStatus != IOSTAT_FABRIC_RJT) || 9344 (rspiocb->iocb.un.ulpWord[4] != RJT_LOGIN_REQUIRED)) 9345 return 0; 9346 else 9347 return 1; 9348 } 9349 9350 /** 9351 * lpfc_cmpl_els_fdisc - Completion function for fdisc iocb command 9352 * @phba: pointer to lpfc hba data structure. 9353 * @cmdiocb: pointer to lpfc command iocb data structure. 9354 * @rspiocb: pointer to lpfc response iocb data structure. 9355 * 9356 * This routine is the completion callback function to a Fabric Discover 9357 * (FDISC) ELS command. Since all the FDISC ELS commands are issued 9358 * single threaded, each FDISC completion callback function will reset 9359 * the discovery timer for all vports such that the timers will not get 9360 * unnecessary timeout. The function checks the FDISC IOCB status. If error 9361 * detected, the vport will be set to FC_VPORT_FAILED state. Otherwise,the 9362 * vport will set to FC_VPORT_ACTIVE state. It then checks whether the DID 9363 * assigned to the vport has been changed with the completion of the FDISC 9364 * command. If so, both RPI (Remote Port Index) and VPI (Virtual Port Index) 9365 * are unregistered from the HBA, and then the lpfc_register_new_vport() 9366 * routine is invoked to register new vport with the HBA. Otherwise, the 9367 * lpfc_do_scr_ns_plogi() routine is invoked to issue a PLOGI to the Name 9368 * Server for State Change Request (SCR). 9369 **/ 9370 static void 9371 lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 9372 struct lpfc_iocbq *rspiocb) 9373 { 9374 struct lpfc_vport *vport = cmdiocb->vport; 9375 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 9376 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; 9377 struct lpfc_nodelist *np; 9378 struct lpfc_nodelist *next_np; 9379 IOCB_t *irsp = &rspiocb->iocb; 9380 struct lpfc_iocbq *piocb; 9381 struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp; 9382 struct serv_parm *sp; 9383 uint8_t fabric_param_changed; 9384 9385 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 9386 "0123 FDISC completes. x%x/x%x prevDID: x%x\n", 9387 irsp->ulpStatus, irsp->un.ulpWord[4], 9388 vport->fc_prevDID); 9389 /* Since all FDISCs are being single threaded, we 9390 * must reset the discovery timer for ALL vports 9391 * waiting to send FDISC when one completes. 9392 */ 9393 list_for_each_entry(piocb, &phba->fabric_iocb_list, list) { 9394 lpfc_set_disctmo(piocb->vport); 9395 } 9396 9397 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 9398 "FDISC cmpl: status:x%x/x%x prevdid:x%x", 9399 irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_prevDID); 9400 9401 if (irsp->ulpStatus) { 9402 9403 if (lpfc_fabric_login_reqd(phba, cmdiocb, rspiocb)) { 9404 lpfc_retry_pport_discovery(phba); 9405 goto out; 9406 } 9407 9408 /* Check for retry */ 9409 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) 9410 goto out; 9411 /* FDISC failed */ 9412 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9413 "0126 FDISC failed. (x%x/x%x)\n", 9414 irsp->ulpStatus, irsp->un.ulpWord[4]); 9415 goto fdisc_failed; 9416 } 9417 spin_lock_irq(shost->host_lock); 9418 vport->fc_flag &= ~FC_VPORT_CVL_RCVD; 9419 vport->fc_flag &= ~FC_VPORT_LOGO_RCVD; 9420 vport->fc_flag |= FC_FABRIC; 9421 if (vport->phba->fc_topology == LPFC_TOPOLOGY_LOOP) 9422 vport->fc_flag |= FC_PUBLIC_LOOP; 9423 spin_unlock_irq(shost->host_lock); 9424 9425 vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID; 9426 lpfc_vport_set_state(vport, FC_VPORT_ACTIVE); 9427 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 9428 if (!prsp) 9429 goto out; 9430 sp = prsp->virt + sizeof(uint32_t); 9431 fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp); 9432 memcpy(&vport->fabric_portname, &sp->portName, 9433 sizeof(struct lpfc_name)); 9434 memcpy(&vport->fabric_nodename, &sp->nodeName, 9435 sizeof(struct lpfc_name)); 9436 if (fabric_param_changed && 9437 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 9438 /* If our NportID changed, we need to ensure all 9439 * remaining NPORTs get unreg_login'ed so we can 9440 * issue unreg_vpi. 9441 */ 9442 list_for_each_entry_safe(np, next_np, 9443 &vport->fc_nodes, nlp_listp) { 9444 if ((np->nlp_state != NLP_STE_NPR_NODE) || 9445 !(np->nlp_flag & NLP_NPR_ADISC)) 9446 continue; 9447 spin_lock_irq(&ndlp->lock); 9448 np->nlp_flag &= ~NLP_NPR_ADISC; 9449 spin_unlock_irq(&ndlp->lock); 9450 lpfc_unreg_rpi(vport, np); 9451 } 9452 lpfc_cleanup_pending_mbox(vport); 9453 9454 if (phba->sli_rev == LPFC_SLI_REV4) 9455 lpfc_sli4_unreg_all_rpis(vport); 9456 9457 lpfc_mbx_unreg_vpi(vport); 9458 spin_lock_irq(shost->host_lock); 9459 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 9460 if (phba->sli_rev == LPFC_SLI_REV4) 9461 vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 9462 else 9463 vport->fc_flag |= FC_LOGO_RCVD_DID_CHNG; 9464 spin_unlock_irq(shost->host_lock); 9465 } else if ((phba->sli_rev == LPFC_SLI_REV4) && 9466 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { 9467 /* 9468 * Driver needs to re-reg VPI in order for f/w 9469 * to update the MAC address. 9470 */ 9471 lpfc_register_new_vport(phba, vport, ndlp); 9472 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 9473 goto out; 9474 } 9475 9476 if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI) 9477 lpfc_issue_init_vpi(vport); 9478 else if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) 9479 lpfc_register_new_vport(phba, vport, ndlp); 9480 else 9481 lpfc_do_scr_ns_plogi(phba, vport); 9482 9483 /* The FDISC completed successfully. Move the fabric ndlp to 9484 * UNMAPPED state and register with the transport. 9485 */ 9486 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 9487 goto out; 9488 9489 fdisc_failed: 9490 if (vport->fc_vport && 9491 (vport->fc_vport->vport_state != FC_VPORT_NO_FABRIC_RSCS)) 9492 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 9493 /* Cancel discovery timer */ 9494 lpfc_can_disctmo(vport); 9495 out: 9496 lpfc_els_free_iocb(phba, cmdiocb); 9497 lpfc_nlp_put(ndlp); 9498 } 9499 9500 /** 9501 * lpfc_issue_els_fdisc - Issue a fdisc iocb command 9502 * @vport: pointer to a virtual N_Port data structure. 9503 * @ndlp: pointer to a node-list data structure. 9504 * @retry: number of retries to the command IOCB. 9505 * 9506 * This routine prepares and issues a Fabric Discover (FDISC) IOCB to 9507 * a remote node (@ndlp) off a @vport. It uses the lpfc_issue_fabric_iocb() 9508 * routine to issue the IOCB, which makes sure only one outstanding fabric 9509 * IOCB will be sent off HBA at any given time. 9510 * 9511 * Note that the ndlp reference count will be incremented by 1 for holding the 9512 * ndlp and the reference to ndlp will be stored into the context1 field of 9513 * the IOCB for the completion callback function to the FDISC ELS command. 9514 * 9515 * Return code 9516 * 0 - Successfully issued fdisc iocb command 9517 * 1 - Failed to issue fdisc iocb command 9518 **/ 9519 static int 9520 lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 9521 uint8_t retry) 9522 { 9523 struct lpfc_hba *phba = vport->phba; 9524 IOCB_t *icmd; 9525 struct lpfc_iocbq *elsiocb; 9526 struct serv_parm *sp; 9527 uint8_t *pcmd; 9528 uint16_t cmdsize; 9529 int did = ndlp->nlp_DID; 9530 int rc; 9531 9532 vport->port_state = LPFC_FDISC; 9533 vport->fc_myDID = 0; 9534 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 9535 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did, 9536 ELS_CMD_FDISC); 9537 if (!elsiocb) { 9538 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 9539 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9540 "0255 Issue FDISC: no IOCB\n"); 9541 return 1; 9542 } 9543 9544 icmd = &elsiocb->iocb; 9545 icmd->un.elsreq64.myID = 0; 9546 icmd->un.elsreq64.fl = 1; 9547 9548 /* 9549 * SLI3 ports require a different context type value than SLI4. 9550 * Catch SLI3 ports here and override the prep. 9551 */ 9552 if (phba->sli_rev == LPFC_SLI_REV3) { 9553 icmd->ulpCt_h = 1; 9554 icmd->ulpCt_l = 0; 9555 } 9556 9557 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 9558 *((uint32_t *) (pcmd)) = ELS_CMD_FDISC; 9559 pcmd += sizeof(uint32_t); /* CSP Word 1 */ 9560 memcpy(pcmd, &vport->phba->pport->fc_sparam, sizeof(struct serv_parm)); 9561 sp = (struct serv_parm *) pcmd; 9562 /* Setup CSPs accordingly for Fabric */ 9563 sp->cmn.e_d_tov = 0; 9564 sp->cmn.w2.r_a_tov = 0; 9565 sp->cmn.virtual_fabric_support = 0; 9566 sp->cls1.classValid = 0; 9567 sp->cls2.seqDelivery = 1; 9568 sp->cls3.seqDelivery = 1; 9569 9570 pcmd += sizeof(uint32_t); /* CSP Word 2 */ 9571 pcmd += sizeof(uint32_t); /* CSP Word 3 */ 9572 pcmd += sizeof(uint32_t); /* CSP Word 4 */ 9573 pcmd += sizeof(uint32_t); /* Port Name */ 9574 memcpy(pcmd, &vport->fc_portname, 8); 9575 pcmd += sizeof(uint32_t); /* Node Name */ 9576 pcmd += sizeof(uint32_t); /* Node Name */ 9577 memcpy(pcmd, &vport->fc_nodename, 8); 9578 sp->cmn.valid_vendor_ver_level = 0; 9579 memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion)); 9580 lpfc_set_disctmo(vport); 9581 9582 phba->fc_stat.elsXmitFDISC++; 9583 elsiocb->iocb_cmpl = lpfc_cmpl_els_fdisc; 9584 9585 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 9586 "Issue FDISC: did:x%x", 9587 did, 0, 0); 9588 9589 elsiocb->context1 = lpfc_nlp_get(ndlp); 9590 if (!elsiocb->context1) { 9591 lpfc_els_free_iocb(phba, elsiocb); 9592 goto err_out; 9593 } 9594 9595 rc = lpfc_issue_fabric_iocb(phba, elsiocb); 9596 if (rc == IOCB_ERROR) { 9597 lpfc_els_free_iocb(phba, elsiocb); 9598 lpfc_nlp_put(ndlp); 9599 goto err_out; 9600 } 9601 9602 lpfc_vport_set_state(vport, FC_VPORT_INITIALIZING); 9603 return 0; 9604 9605 err_out: 9606 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 9607 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 9608 "0256 Issue FDISC: Cannot send IOCB\n"); 9609 return 1; 9610 } 9611 9612 /** 9613 * lpfc_cmpl_els_npiv_logo - Completion function with vport logo 9614 * @phba: pointer to lpfc hba data structure. 9615 * @cmdiocb: pointer to lpfc command iocb data structure. 9616 * @rspiocb: pointer to lpfc response iocb data structure. 9617 * 9618 * This routine is the completion callback function to the issuing of a LOGO 9619 * ELS command off a vport. It frees the command IOCB and then decrement the 9620 * reference count held on ndlp for this completion function, indicating that 9621 * the reference to the ndlp is no long needed. Note that the 9622 * lpfc_els_free_iocb() routine decrements the ndlp reference held for this 9623 * callback function and an additional explicit ndlp reference decrementation 9624 * will trigger the actual release of the ndlp. 9625 **/ 9626 static void 9627 lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 9628 struct lpfc_iocbq *rspiocb) 9629 { 9630 struct lpfc_vport *vport = cmdiocb->vport; 9631 IOCB_t *irsp; 9632 struct lpfc_nodelist *ndlp; 9633 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 9634 9635 ndlp = (struct lpfc_nodelist *)cmdiocb->context1; 9636 irsp = &rspiocb->iocb; 9637 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 9638 "LOGO npiv cmpl: status:x%x/x%x did:x%x", 9639 irsp->ulpStatus, irsp->un.ulpWord[4], irsp->un.rcvels.remoteID); 9640 9641 /* NPIV LOGO completes to NPort <nlp_DID> */ 9642 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 9643 "2928 NPIV LOGO completes to NPort x%x " 9644 "Data: x%x x%x x%x x%x x%x x%x x%x\n", 9645 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4], 9646 irsp->ulpTimeout, vport->num_disc_nodes, 9647 kref_read(&ndlp->kref), ndlp->nlp_flag, 9648 ndlp->fc4_xpt_flags); 9649 9650 if (irsp->ulpStatus == IOSTAT_SUCCESS) { 9651 spin_lock_irq(shost->host_lock); 9652 vport->fc_flag &= ~FC_NDISC_ACTIVE; 9653 vport->fc_flag &= ~FC_FABRIC; 9654 spin_unlock_irq(shost->host_lock); 9655 lpfc_can_disctmo(vport); 9656 } 9657 9658 /* Safe to release resources now. */ 9659 lpfc_els_free_iocb(phba, cmdiocb); 9660 lpfc_nlp_put(ndlp); 9661 vport->unreg_vpi_cmpl = VPORT_ERROR; 9662 } 9663 9664 /** 9665 * lpfc_issue_els_npiv_logo - Issue a logo off a vport 9666 * @vport: pointer to a virtual N_Port data structure. 9667 * @ndlp: pointer to a node-list data structure. 9668 * 9669 * This routine issues a LOGO ELS command to an @ndlp off a @vport. 9670 * 9671 * Note that the ndlp reference count will be incremented by 1 for holding the 9672 * ndlp and the reference to ndlp will be stored into the context1 field of 9673 * the IOCB for the completion callback function to the LOGO ELS command. 9674 * 9675 * Return codes 9676 * 0 - Successfully issued logo off the @vport 9677 * 1 - Failed to issue logo off the @vport 9678 **/ 9679 int 9680 lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 9681 { 9682 int rc = 0; 9683 struct lpfc_hba *phba = vport->phba; 9684 struct lpfc_iocbq *elsiocb; 9685 uint8_t *pcmd; 9686 uint16_t cmdsize; 9687 9688 cmdsize = 2 * sizeof(uint32_t) + sizeof(struct lpfc_name); 9689 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, ndlp->nlp_DID, 9690 ELS_CMD_LOGO); 9691 if (!elsiocb) 9692 return 1; 9693 9694 pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); 9695 *((uint32_t *) (pcmd)) = ELS_CMD_LOGO; 9696 pcmd += sizeof(uint32_t); 9697 9698 /* Fill in LOGO payload */ 9699 *((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID); 9700 pcmd += sizeof(uint32_t); 9701 memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name)); 9702 9703 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 9704 "Issue LOGO npiv did:x%x flg:x%x", 9705 ndlp->nlp_DID, ndlp->nlp_flag, 0); 9706 9707 elsiocb->iocb_cmpl = lpfc_cmpl_els_npiv_logo; 9708 spin_lock_irq(&ndlp->lock); 9709 ndlp->nlp_flag |= NLP_LOGO_SND; 9710 spin_unlock_irq(&ndlp->lock); 9711 elsiocb->context1 = lpfc_nlp_get(ndlp); 9712 if (!elsiocb->context1) { 9713 lpfc_els_free_iocb(phba, elsiocb); 9714 goto err; 9715 } 9716 9717 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 9718 if (rc == IOCB_ERROR) { 9719 lpfc_els_free_iocb(phba, elsiocb); 9720 lpfc_nlp_put(ndlp); 9721 goto err; 9722 } 9723 return 0; 9724 9725 err: 9726 spin_lock_irq(&ndlp->lock); 9727 ndlp->nlp_flag &= ~NLP_LOGO_SND; 9728 spin_unlock_irq(&ndlp->lock); 9729 return 1; 9730 } 9731 9732 /** 9733 * lpfc_fabric_block_timeout - Handler function to the fabric block timer 9734 * @t: timer context used to obtain the lpfc hba. 9735 * 9736 * This routine is invoked by the fabric iocb block timer after 9737 * timeout. It posts the fabric iocb block timeout event by setting the 9738 * WORKER_FABRIC_BLOCK_TMO bit to work port event bitmap and then invokes 9739 * lpfc_worker_wake_up() routine to wake up the worker thread. It is for 9740 * the worker thread to invoke the lpfc_unblock_fabric_iocbs() on the 9741 * posted event WORKER_FABRIC_BLOCK_TMO. 9742 **/ 9743 void 9744 lpfc_fabric_block_timeout(struct timer_list *t) 9745 { 9746 struct lpfc_hba *phba = from_timer(phba, t, fabric_block_timer); 9747 unsigned long iflags; 9748 uint32_t tmo_posted; 9749 9750 spin_lock_irqsave(&phba->pport->work_port_lock, iflags); 9751 tmo_posted = phba->pport->work_port_events & WORKER_FABRIC_BLOCK_TMO; 9752 if (!tmo_posted) 9753 phba->pport->work_port_events |= WORKER_FABRIC_BLOCK_TMO; 9754 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags); 9755 9756 if (!tmo_posted) 9757 lpfc_worker_wake_up(phba); 9758 return; 9759 } 9760 9761 /** 9762 * lpfc_resume_fabric_iocbs - Issue a fabric iocb from driver internal list 9763 * @phba: pointer to lpfc hba data structure. 9764 * 9765 * This routine issues one fabric iocb from the driver internal list to 9766 * the HBA. It first checks whether it's ready to issue one fabric iocb to 9767 * the HBA (whether there is no outstanding fabric iocb). If so, it shall 9768 * remove one pending fabric iocb from the driver internal list and invokes 9769 * lpfc_sli_issue_iocb() routine to send the fabric iocb to the HBA. 9770 **/ 9771 static void 9772 lpfc_resume_fabric_iocbs(struct lpfc_hba *phba) 9773 { 9774 struct lpfc_iocbq *iocb; 9775 unsigned long iflags; 9776 int ret; 9777 IOCB_t *cmd; 9778 9779 repeat: 9780 iocb = NULL; 9781 spin_lock_irqsave(&phba->hbalock, iflags); 9782 /* Post any pending iocb to the SLI layer */ 9783 if (atomic_read(&phba->fabric_iocb_count) == 0) { 9784 list_remove_head(&phba->fabric_iocb_list, iocb, typeof(*iocb), 9785 list); 9786 if (iocb) 9787 /* Increment fabric iocb count to hold the position */ 9788 atomic_inc(&phba->fabric_iocb_count); 9789 } 9790 spin_unlock_irqrestore(&phba->hbalock, iflags); 9791 if (iocb) { 9792 iocb->fabric_iocb_cmpl = iocb->iocb_cmpl; 9793 iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb; 9794 iocb->iocb_flag |= LPFC_IO_FABRIC; 9795 9796 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD, 9797 "Fabric sched1: ste:x%x", 9798 iocb->vport->port_state, 0, 0); 9799 9800 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0); 9801 9802 if (ret == IOCB_ERROR) { 9803 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl; 9804 iocb->fabric_iocb_cmpl = NULL; 9805 iocb->iocb_flag &= ~LPFC_IO_FABRIC; 9806 cmd = &iocb->iocb; 9807 cmd->ulpStatus = IOSTAT_LOCAL_REJECT; 9808 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED; 9809 iocb->iocb_cmpl(phba, iocb, iocb); 9810 9811 atomic_dec(&phba->fabric_iocb_count); 9812 goto repeat; 9813 } 9814 } 9815 } 9816 9817 /** 9818 * lpfc_unblock_fabric_iocbs - Unblock issuing fabric iocb command 9819 * @phba: pointer to lpfc hba data structure. 9820 * 9821 * This routine unblocks the issuing fabric iocb command. The function 9822 * will clear the fabric iocb block bit and then invoke the routine 9823 * lpfc_resume_fabric_iocbs() to issue one of the pending fabric iocb 9824 * from the driver internal fabric iocb list. 9825 **/ 9826 void 9827 lpfc_unblock_fabric_iocbs(struct lpfc_hba *phba) 9828 { 9829 clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 9830 9831 lpfc_resume_fabric_iocbs(phba); 9832 return; 9833 } 9834 9835 /** 9836 * lpfc_block_fabric_iocbs - Block issuing fabric iocb command 9837 * @phba: pointer to lpfc hba data structure. 9838 * 9839 * This routine blocks the issuing fabric iocb for a specified amount of 9840 * time (currently 100 ms). This is done by set the fabric iocb block bit 9841 * and set up a timeout timer for 100ms. When the block bit is set, no more 9842 * fabric iocb will be issued out of the HBA. 9843 **/ 9844 static void 9845 lpfc_block_fabric_iocbs(struct lpfc_hba *phba) 9846 { 9847 int blocked; 9848 9849 blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 9850 /* Start a timer to unblock fabric iocbs after 100ms */ 9851 if (!blocked) 9852 mod_timer(&phba->fabric_block_timer, 9853 jiffies + msecs_to_jiffies(100)); 9854 9855 return; 9856 } 9857 9858 /** 9859 * lpfc_cmpl_fabric_iocb - Completion callback function for fabric iocb 9860 * @phba: pointer to lpfc hba data structure. 9861 * @cmdiocb: pointer to lpfc command iocb data structure. 9862 * @rspiocb: pointer to lpfc response iocb data structure. 9863 * 9864 * This routine is the callback function that is put to the fabric iocb's 9865 * callback function pointer (iocb->iocb_cmpl). The original iocb's callback 9866 * function pointer has been stored in iocb->fabric_iocb_cmpl. This callback 9867 * function first restores and invokes the original iocb's callback function 9868 * and then invokes the lpfc_resume_fabric_iocbs() routine to issue the next 9869 * fabric bound iocb from the driver internal fabric iocb list onto the wire. 9870 **/ 9871 static void 9872 lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 9873 struct lpfc_iocbq *rspiocb) 9874 { 9875 struct ls_rjt stat; 9876 9877 BUG_ON((cmdiocb->iocb_flag & LPFC_IO_FABRIC) != LPFC_IO_FABRIC); 9878 9879 switch (rspiocb->iocb.ulpStatus) { 9880 case IOSTAT_NPORT_RJT: 9881 case IOSTAT_FABRIC_RJT: 9882 if (rspiocb->iocb.un.ulpWord[4] & RJT_UNAVAIL_TEMP) { 9883 lpfc_block_fabric_iocbs(phba); 9884 } 9885 break; 9886 9887 case IOSTAT_NPORT_BSY: 9888 case IOSTAT_FABRIC_BSY: 9889 lpfc_block_fabric_iocbs(phba); 9890 break; 9891 9892 case IOSTAT_LS_RJT: 9893 stat.un.lsRjtError = 9894 be32_to_cpu(rspiocb->iocb.un.ulpWord[4]); 9895 if ((stat.un.b.lsRjtRsnCode == LSRJT_UNABLE_TPC) || 9896 (stat.un.b.lsRjtRsnCode == LSRJT_LOGICAL_BSY)) 9897 lpfc_block_fabric_iocbs(phba); 9898 break; 9899 } 9900 9901 BUG_ON(atomic_read(&phba->fabric_iocb_count) == 0); 9902 9903 cmdiocb->iocb_cmpl = cmdiocb->fabric_iocb_cmpl; 9904 cmdiocb->fabric_iocb_cmpl = NULL; 9905 cmdiocb->iocb_flag &= ~LPFC_IO_FABRIC; 9906 cmdiocb->iocb_cmpl(phba, cmdiocb, rspiocb); 9907 9908 atomic_dec(&phba->fabric_iocb_count); 9909 if (!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags)) { 9910 /* Post any pending iocbs to HBA */ 9911 lpfc_resume_fabric_iocbs(phba); 9912 } 9913 } 9914 9915 /** 9916 * lpfc_issue_fabric_iocb - Issue a fabric iocb command 9917 * @phba: pointer to lpfc hba data structure. 9918 * @iocb: pointer to lpfc command iocb data structure. 9919 * 9920 * This routine is used as the top-level API for issuing a fabric iocb command 9921 * such as FLOGI and FDISC. To accommodate certain switch fabric, this driver 9922 * function makes sure that only one fabric bound iocb will be outstanding at 9923 * any given time. As such, this function will first check to see whether there 9924 * is already an outstanding fabric iocb on the wire. If so, it will put the 9925 * newly issued iocb onto the driver internal fabric iocb list, waiting to be 9926 * issued later. Otherwise, it will issue the iocb on the wire and update the 9927 * fabric iocb count it indicate that there is one fabric iocb on the wire. 9928 * 9929 * Note, this implementation has a potential sending out fabric IOCBs out of 9930 * order. The problem is caused by the construction of the "ready" boolen does 9931 * not include the condition that the internal fabric IOCB list is empty. As 9932 * such, it is possible a fabric IOCB issued by this routine might be "jump" 9933 * ahead of the fabric IOCBs in the internal list. 9934 * 9935 * Return code 9936 * IOCB_SUCCESS - either fabric iocb put on the list or issued successfully 9937 * IOCB_ERROR - failed to issue fabric iocb 9938 **/ 9939 static int 9940 lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb) 9941 { 9942 unsigned long iflags; 9943 int ready; 9944 int ret; 9945 9946 BUG_ON(atomic_read(&phba->fabric_iocb_count) > 1); 9947 9948 spin_lock_irqsave(&phba->hbalock, iflags); 9949 ready = atomic_read(&phba->fabric_iocb_count) == 0 && 9950 !test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 9951 9952 if (ready) 9953 /* Increment fabric iocb count to hold the position */ 9954 atomic_inc(&phba->fabric_iocb_count); 9955 spin_unlock_irqrestore(&phba->hbalock, iflags); 9956 if (ready) { 9957 iocb->fabric_iocb_cmpl = iocb->iocb_cmpl; 9958 iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb; 9959 iocb->iocb_flag |= LPFC_IO_FABRIC; 9960 9961 lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD, 9962 "Fabric sched2: ste:x%x", 9963 iocb->vport->port_state, 0, 0); 9964 9965 ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0); 9966 9967 if (ret == IOCB_ERROR) { 9968 iocb->iocb_cmpl = iocb->fabric_iocb_cmpl; 9969 iocb->fabric_iocb_cmpl = NULL; 9970 iocb->iocb_flag &= ~LPFC_IO_FABRIC; 9971 atomic_dec(&phba->fabric_iocb_count); 9972 } 9973 } else { 9974 spin_lock_irqsave(&phba->hbalock, iflags); 9975 list_add_tail(&iocb->list, &phba->fabric_iocb_list); 9976 spin_unlock_irqrestore(&phba->hbalock, iflags); 9977 ret = IOCB_SUCCESS; 9978 } 9979 return ret; 9980 } 9981 9982 /** 9983 * lpfc_fabric_abort_vport - Abort a vport's iocbs from driver fabric iocb list 9984 * @vport: pointer to a virtual N_Port data structure. 9985 * 9986 * This routine aborts all the IOCBs associated with a @vport from the 9987 * driver internal fabric IOCB list. The list contains fabric IOCBs to be 9988 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB 9989 * list, removes each IOCB associated with the @vport off the list, set the 9990 * status field to IOSTAT_LOCAL_REJECT, and invokes the callback function 9991 * associated with the IOCB. 9992 **/ 9993 static void lpfc_fabric_abort_vport(struct lpfc_vport *vport) 9994 { 9995 LIST_HEAD(completions); 9996 struct lpfc_hba *phba = vport->phba; 9997 struct lpfc_iocbq *tmp_iocb, *piocb; 9998 9999 spin_lock_irq(&phba->hbalock); 10000 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list, 10001 list) { 10002 10003 if (piocb->vport != vport) 10004 continue; 10005 10006 list_move_tail(&piocb->list, &completions); 10007 } 10008 spin_unlock_irq(&phba->hbalock); 10009 10010 /* Cancel all the IOCBs from the completions list */ 10011 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 10012 IOERR_SLI_ABORTED); 10013 } 10014 10015 /** 10016 * lpfc_fabric_abort_nport - Abort a ndlp's iocbs from driver fabric iocb list 10017 * @ndlp: pointer to a node-list data structure. 10018 * 10019 * This routine aborts all the IOCBs associated with an @ndlp from the 10020 * driver internal fabric IOCB list. The list contains fabric IOCBs to be 10021 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB 10022 * list, removes each IOCB associated with the @ndlp off the list, set the 10023 * status field to IOSTAT_LOCAL_REJECT, and invokes the callback function 10024 * associated with the IOCB. 10025 **/ 10026 void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp) 10027 { 10028 LIST_HEAD(completions); 10029 struct lpfc_hba *phba = ndlp->phba; 10030 struct lpfc_iocbq *tmp_iocb, *piocb; 10031 struct lpfc_sli_ring *pring; 10032 10033 pring = lpfc_phba_elsring(phba); 10034 10035 if (unlikely(!pring)) 10036 return; 10037 10038 spin_lock_irq(&phba->hbalock); 10039 list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list, 10040 list) { 10041 if ((lpfc_check_sli_ndlp(phba, pring, piocb, ndlp))) { 10042 10043 list_move_tail(&piocb->list, &completions); 10044 } 10045 } 10046 spin_unlock_irq(&phba->hbalock); 10047 10048 /* Cancel all the IOCBs from the completions list */ 10049 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 10050 IOERR_SLI_ABORTED); 10051 } 10052 10053 /** 10054 * lpfc_fabric_abort_hba - Abort all iocbs on driver fabric iocb list 10055 * @phba: pointer to lpfc hba data structure. 10056 * 10057 * This routine aborts all the IOCBs currently on the driver internal 10058 * fabric IOCB list. The list contains fabric IOCBs to be issued to the ELS 10059 * IOCB ring. This function takes the entire IOCB list off the fabric IOCB 10060 * list, removes IOCBs off the list, set the status field to 10061 * IOSTAT_LOCAL_REJECT, and invokes the callback function associated with 10062 * the IOCB. 10063 **/ 10064 void lpfc_fabric_abort_hba(struct lpfc_hba *phba) 10065 { 10066 LIST_HEAD(completions); 10067 10068 spin_lock_irq(&phba->hbalock); 10069 list_splice_init(&phba->fabric_iocb_list, &completions); 10070 spin_unlock_irq(&phba->hbalock); 10071 10072 /* Cancel all the IOCBs from the completions list */ 10073 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 10074 IOERR_SLI_ABORTED); 10075 } 10076 10077 /** 10078 * lpfc_sli4_vport_delete_els_xri_aborted -Remove all ndlp references for vport 10079 * @vport: pointer to lpfc vport data structure. 10080 * 10081 * This routine is invoked by the vport cleanup for deletions and the cleanup 10082 * for an ndlp on removal. 10083 **/ 10084 void 10085 lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *vport) 10086 { 10087 struct lpfc_hba *phba = vport->phba; 10088 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 10089 unsigned long iflag = 0; 10090 10091 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, iflag); 10092 list_for_each_entry_safe(sglq_entry, sglq_next, 10093 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) { 10094 if (sglq_entry->ndlp && sglq_entry->ndlp->vport == vport) { 10095 lpfc_nlp_put(sglq_entry->ndlp); 10096 sglq_entry->ndlp = NULL; 10097 } 10098 } 10099 spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, iflag); 10100 return; 10101 } 10102 10103 /** 10104 * lpfc_sli4_els_xri_aborted - Slow-path process of els xri abort 10105 * @phba: pointer to lpfc hba data structure. 10106 * @axri: pointer to the els xri abort wcqe structure. 10107 * 10108 * This routine is invoked by the worker thread to process a SLI4 slow-path 10109 * ELS aborted xri. 10110 **/ 10111 void 10112 lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba, 10113 struct sli4_wcqe_xri_aborted *axri) 10114 { 10115 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); 10116 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri); 10117 uint16_t lxri = 0; 10118 10119 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 10120 unsigned long iflag = 0; 10121 struct lpfc_nodelist *ndlp; 10122 struct lpfc_sli_ring *pring; 10123 10124 pring = lpfc_phba_elsring(phba); 10125 10126 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, iflag); 10127 list_for_each_entry_safe(sglq_entry, sglq_next, 10128 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) { 10129 if (sglq_entry->sli4_xritag == xri) { 10130 list_del(&sglq_entry->list); 10131 ndlp = sglq_entry->ndlp; 10132 sglq_entry->ndlp = NULL; 10133 list_add_tail(&sglq_entry->list, 10134 &phba->sli4_hba.lpfc_els_sgl_list); 10135 sglq_entry->state = SGL_FREED; 10136 spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, 10137 iflag); 10138 10139 if (ndlp) { 10140 lpfc_set_rrq_active(phba, ndlp, 10141 sglq_entry->sli4_lxritag, 10142 rxid, 1); 10143 lpfc_nlp_put(ndlp); 10144 } 10145 10146 /* Check if TXQ queue needs to be serviced */ 10147 if (pring && !list_empty(&pring->txq)) 10148 lpfc_worker_wake_up(phba); 10149 return; 10150 } 10151 } 10152 spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, iflag); 10153 lxri = lpfc_sli4_xri_inrange(phba, xri); 10154 if (lxri == NO_XRI) 10155 return; 10156 10157 spin_lock_irqsave(&phba->hbalock, iflag); 10158 sglq_entry = __lpfc_get_active_sglq(phba, lxri); 10159 if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) { 10160 spin_unlock_irqrestore(&phba->hbalock, iflag); 10161 return; 10162 } 10163 sglq_entry->state = SGL_XRI_ABORTED; 10164 spin_unlock_irqrestore(&phba->hbalock, iflag); 10165 return; 10166 } 10167 10168 /* lpfc_sli_abts_recover_port - Recover a port that failed a BLS_ABORT req. 10169 * @vport: pointer to virtual port object. 10170 * @ndlp: nodelist pointer for the impacted node. 10171 * 10172 * The driver calls this routine in response to an SLI4 XRI ABORT CQE 10173 * or an SLI3 ASYNC_STATUS_CN event from the port. For either event, 10174 * the driver is required to send a LOGO to the remote node before it 10175 * attempts to recover its login to the remote node. 10176 */ 10177 void 10178 lpfc_sli_abts_recover_port(struct lpfc_vport *vport, 10179 struct lpfc_nodelist *ndlp) 10180 { 10181 struct Scsi_Host *shost; 10182 struct lpfc_hba *phba; 10183 unsigned long flags = 0; 10184 10185 shost = lpfc_shost_from_vport(vport); 10186 phba = vport->phba; 10187 if (ndlp->nlp_state != NLP_STE_MAPPED_NODE) { 10188 lpfc_printf_log(phba, KERN_INFO, 10189 LOG_SLI, "3093 No rport recovery needed. " 10190 "rport in state 0x%x\n", ndlp->nlp_state); 10191 return; 10192 } 10193 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 10194 "3094 Start rport recovery on shost id 0x%x " 10195 "fc_id 0x%06x vpi 0x%x rpi 0x%x state 0x%x " 10196 "flags 0x%x\n", 10197 shost->host_no, ndlp->nlp_DID, 10198 vport->vpi, ndlp->nlp_rpi, ndlp->nlp_state, 10199 ndlp->nlp_flag); 10200 /* 10201 * The rport is not responding. Remove the FCP-2 flag to prevent 10202 * an ADISC in the follow-up recovery code. 10203 */ 10204 spin_lock_irqsave(&ndlp->lock, flags); 10205 ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; 10206 ndlp->nlp_flag |= NLP_ISSUE_LOGO; 10207 spin_unlock_irqrestore(&ndlp->lock, flags); 10208 lpfc_unreg_rpi(vport, ndlp); 10209 } 10210 10211